* cp-tree.def (TINST_LEVEL): Make it an 'x' node.
[official-gcc.git] / libmudflap / mf-runtime.c
blob486880cf3ce6d0b093eda21ce8c2651c8d7d85b2
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
32 #include "config.h"
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
37 #define _POSIX_SOURCE
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
39 #define _GNU_SOURCE
40 #define _XOPEN_SOURCE
41 #define _BSD_TYPES
42 #define __EXTENSIONS__
43 #define _ALL_SOURCE
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <sys/types.h>
50 #include <sys/time.h>
51 #include <time.h>
52 #include <unistd.h>
53 #ifdef HAVE_EXECINFO_H
54 #include <execinfo.h>
55 #endif
56 #ifdef HAVE_SIGNAL_H
57 #include <signal.h>
58 #endif
59 #include <assert.h>
61 #include <string.h>
62 #include <limits.h>
63 #include <sys/types.h>
64 #include <signal.h>
65 #include <errno.h>
66 #include <ctype.h>
68 #include "mf-runtime.h"
69 #include "mf-impl.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
74 /* Utility macros */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
94 abort (); } \
95 __mf_state = reentrant; \
96 } while (0)
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
100 } while (0)
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
112 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
113 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts;
118 int __mf_starting_p = 1;
119 #ifndef LIBMUDFLAPTH
120 enum __mf_state_enum __mf_state = active;
121 #else
122 /* See __mf_state_perthread() in mf-hooks.c. */
123 #endif
126 #ifdef LIBMUDFLAPTH
127 pthread_mutex_t __mf_biglock =
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
130 #else
131 PTHREAD_MUTEX_INITIALIZER;
132 #endif
133 #endif
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
138 #if HAVE_PTHREAD_H
139 #ifdef _POSIX_THREADS
140 #pragma weak pthread_join
141 #else
142 #define pthread_join NULL
143 #endif
144 const void *threads_active_p = (void *) pthread_join;
145 #endif
148 /* ------------------------------------------------------------------------ */
149 /* stats-related globals. */
151 static unsigned long __mf_count_check;
152 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
153 static unsigned long __mf_count_register;
154 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
155 static unsigned long __mf_count_unregister;
156 static unsigned long __mf_total_unregister_size;
157 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
158 static unsigned long __mf_sigusr1_received;
159 static unsigned long __mf_sigusr1_handled;
160 /* not static */ unsigned long __mf_reentrancy;
161 #ifdef LIBMUDFLAPTH
162 /* not static */ unsigned long __mf_lock_contention;
163 #endif
166 /* ------------------------------------------------------------------------ */
167 /* mode-check-related globals. */
169 typedef struct __mf_object
171 uintptr_t low, high; /* __mf_register parameters */
172 const char *name;
173 char type; /* __MF_TYPE_something */
174 char watching_p; /* Trigger a VIOL_WATCH on access? */
175 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
176 unsigned write_count; /* Likewise for __mf_check/write. */
177 unsigned liveness; /* A measure of recent checking activity. */
178 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
180 uintptr_t alloc_pc;
181 struct timeval alloc_time;
182 char **alloc_backtrace;
183 size_t alloc_backtrace_size;
184 #ifdef LIBMUDFLAPTH
185 pthread_t alloc_thread;
186 #endif
188 int deallocated_p;
189 uintptr_t dealloc_pc;
190 struct timeval dealloc_time;
191 char **dealloc_backtrace;
192 size_t dealloc_backtrace_size;
193 #ifdef LIBMUDFLAPTH
194 pthread_t dealloc_thread;
195 #endif
196 } __mf_object_t;
198 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
199 /* Actually stored as static vars within lookup function below. */
201 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
202 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
203 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
206 /* ------------------------------------------------------------------------ */
207 /* Forward function declarations */
209 void __mf_init () CTOR;
210 static void __mf_sigusr1_respond ();
211 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
212 __mf_object_t **objs, unsigned max_objs);
213 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
214 __mf_object_t **objs, unsigned max_objs, int type);
215 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_t **objs, unsigned max_objs);
217 static void __mf_adapt_cache ();
218 static void __mf_describe_object (__mf_object_t *obj);
219 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
220 static splay_tree __mf_object_tree (int type);
221 static void __mf_link_object (__mf_object_t *node);
222 static void __mf_unlink_object (__mf_object_t *node);
225 /* ------------------------------------------------------------------------ */
226 /* Configuration engine */
228 static void
229 __mf_set_default_options ()
231 memset (& __mf_opts, 0, sizeof (__mf_opts));
233 __mf_opts.adapt_cache = 1000003;
234 __mf_opts.abbreviate = 1;
235 __mf_opts.verbose_violations = 1;
236 __mf_opts.free_queue_length = 4;
237 __mf_opts.persistent_count = 100;
238 __mf_opts.crumple_zone = 32;
239 __mf_opts.backtrace = 4;
240 __mf_opts.timestamps = 1;
241 __mf_opts.mudflap_mode = mode_check;
242 __mf_opts.violation_mode = viol_nop;
243 __mf_opts.heur_std_data = 1;
244 #ifdef LIBMUDFLAPTH
245 __mf_opts.thread_stack = 0;
246 #endif
249 static struct option
251 char *name;
252 char *description;
253 enum
255 set_option,
256 read_integer_option,
257 } type;
258 int value;
259 int *target;
261 options [] =
263 {"mode-nop",
264 "mudflaps do nothing",
265 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
266 {"mode-populate",
267 "mudflaps populate object tree",
268 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
269 {"mode-check",
270 "mudflaps check for memory violations",
271 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
272 {"mode-violate",
273 "mudflaps always cause violations (diagnostic)",
274 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
276 {"viol-nop",
277 "violations do not change program execution",
278 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
279 {"viol-abort",
280 "violations cause a call to abort()",
281 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
282 {"viol-segv",
283 "violations are promoted to SIGSEGV signals",
284 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
285 {"viol-gdb",
286 "violations fork a gdb process attached to current program",
287 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
288 {"trace-calls",
289 "trace calls to mudflap runtime library",
290 set_option, 1, &__mf_opts.trace_mf_calls},
291 {"verbose-trace",
292 "trace internal events within mudflap runtime library",
293 set_option, 1, &__mf_opts.verbose_trace},
294 {"collect-stats",
295 "collect statistics on mudflap's operation",
296 set_option, 1, &__mf_opts.collect_stats},
297 #ifdef SIGUSR1
298 {"sigusr1-report",
299 "print report upon SIGUSR1",
300 set_option, 1, &__mf_opts.sigusr1_report},
301 #endif
302 {"internal-checking",
303 "perform more expensive internal checking",
304 set_option, 1, &__mf_opts.internal_checking},
305 {"print-leaks",
306 "print any memory leaks at program shutdown",
307 set_option, 1, &__mf_opts.print_leaks},
308 {"check-initialization",
309 "detect uninitialized object reads",
310 set_option, 1, &__mf_opts.check_initialization},
311 {"verbose-violations",
312 "print verbose messages when memory violations occur",
313 set_option, 1, &__mf_opts.verbose_violations},
314 {"abbreviate",
315 "abbreviate repetitive listings",
316 set_option, 1, &__mf_opts.abbreviate},
317 {"timestamps",
318 "track object lifetime timestamps",
319 set_option, 1, &__mf_opts.timestamps},
320 {"ignore-reads",
321 "ignore read accesses - assume okay",
322 set_option, 1, &__mf_opts.ignore_reads},
323 {"wipe-stack",
324 "wipe stack objects at unwind",
325 set_option, 1, &__mf_opts.wipe_stack},
326 {"wipe-heap",
327 "wipe heap objects at free",
328 set_option, 1, &__mf_opts.wipe_heap},
329 {"heur-proc-map",
330 "support /proc/self/map heuristics",
331 set_option, 1, &__mf_opts.heur_proc_map},
332 {"heur-stack-bound",
333 "enable a simple upper stack bound heuristic",
334 set_option, 1, &__mf_opts.heur_stack_bound},
335 {"heur-start-end",
336 "support _start.._end heuristics",
337 set_option, 1, &__mf_opts.heur_start_end},
338 {"heur-stdlib",
339 "register standard library data (argv, errno, stdin, ...)",
340 set_option, 1, &__mf_opts.heur_std_data},
341 {"free-queue-length",
342 "queue N deferred free() calls before performing them",
343 read_integer_option, 0, &__mf_opts.free_queue_length},
344 {"persistent-count",
345 "keep a history of N unregistered regions",
346 read_integer_option, 0, &__mf_opts.persistent_count},
347 {"crumple-zone",
348 "surround allocations with crumple zones of N bytes",
349 read_integer_option, 0, &__mf_opts.crumple_zone},
350 /* XXX: not type-safe.
351 {"lc-mask",
352 "set lookup cache size mask to N (2**M - 1)",
353 read_integer_option, 0, (int *)(&__mf_lc_mask)},
354 {"lc-shift",
355 "set lookup cache pointer shift",
356 read_integer_option, 0, (int *)(&__mf_lc_shift)},
358 {"lc-adapt",
359 "adapt mask/shift parameters after N cache misses",
360 read_integer_option, 1, &__mf_opts.adapt_cache},
361 {"backtrace",
362 "keep an N-level stack trace of each call context",
363 read_integer_option, 0, &__mf_opts.backtrace},
364 #ifdef LIBMUDFLAPTH
365 {"thread-stack",
366 "override thread stacks allocation: N kB",
367 read_integer_option, 0, &__mf_opts.thread_stack},
368 #endif
369 {0, 0, set_option, 0, NULL}
372 static void
373 __mf_usage ()
375 struct option *opt;
377 fprintf (stderr,
378 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
379 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
380 "\n"
381 "The mudflap code can be controlled by an environment variable:\n"
382 "\n"
383 "$ export MUDFLAP_OPTIONS='<options>'\n"
384 "$ <mudflapped_program>\n"
385 "\n"
386 "where <options> is a space-separated list of \n"
387 "any of the following options. Use `-no-OPTION' to disable options.\n"
388 "\n",
389 #if HAVE_PTHREAD_H
390 (threads_active_p ? "multi-threaded " : "single-threaded "),
391 #else
393 #endif
394 #if LIBMUDFLAPTH
395 "thread-aware "
396 #else
397 "thread-unaware "
398 #endif
400 /* XXX: The multi-threaded thread-unaware combination is bad. */
402 for (opt = options; opt->name; opt++)
404 int default_p = (opt->value == * opt->target);
406 switch (opt->type)
408 char buf[128];
409 case set_option:
410 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
411 if (default_p)
412 fprintf (stderr, " [active]\n");
413 else
414 fprintf (stderr, "\n");
415 break;
416 case read_integer_option:
417 strncpy (buf, opt->name, 128);
418 strncpy (buf + strlen (opt->name), "=N", 2);
419 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
420 fprintf (stderr, " [%d]\n", * opt->target);
421 break;
422 default: abort();
426 fprintf (stderr, "\n");
430 int
431 __mf_set_options (const char *optstr)
433 int rc;
434 LOCKTH ();
435 BEGIN_RECURSION_PROTECT ();
436 rc = __mfu_set_options (optstr);
437 /* XXX: It's not really that easy. A change to a bunch of parameters
438 can require updating auxiliary state or risk crashing:
439 free_queue_length, crumple_zone ... */
440 END_RECURSION_PROTECT ();
441 UNLOCKTH ();
442 return rc;
446 int
447 __mfu_set_options (const char *optstr)
449 struct option *opts = 0;
450 char *nxt = 0;
451 long tmp = 0;
452 int rc = 0;
453 const char *saved_optstr = optstr;
455 /* XXX: bounds-check for optstr! */
457 while (*optstr)
459 switch (*optstr) {
460 case ' ':
461 case '\t':
462 case '\n':
463 optstr++;
464 break;
466 case '-':
467 if (*optstr+1)
469 int negate = 0;
470 optstr++;
472 if (*optstr == '?' ||
473 strncmp (optstr, "help", 4) == 0)
475 /* Caller will print help and exit. */
476 return -1;
479 if (strncmp (optstr, "no-", 3) == 0)
481 negate = 1;
482 optstr = & optstr[3];
485 for (opts = options; opts->name; opts++)
487 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
489 optstr += strlen (opts->name);
490 assert (opts->target);
491 switch (opts->type)
493 case set_option:
494 if (negate)
495 *(opts->target) = 0;
496 else
497 *(opts->target) = opts->value;
498 break;
499 case read_integer_option:
500 if (! negate && (*optstr == '=' && *(optstr+1)))
502 optstr++;
503 tmp = strtol (optstr, &nxt, 10);
504 if ((optstr != nxt) && (tmp != LONG_MAX))
506 optstr = nxt;
507 *(opts->target) = (int)tmp;
510 else if (negate)
511 * opts->target = 0;
512 break;
517 break;
519 default:
520 fprintf (stderr,
521 "warning: unrecognized string '%s' in mudflap options\n",
522 optstr);
523 optstr += strlen (optstr);
524 rc = -1;
525 break;
529 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
530 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
531 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
533 /* Clear the lookup cache, in case the parameters got changed. */
534 /* XXX: race */
535 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
536 /* void slot 0 */
537 __mf_lookup_cache[0].low = MAXPTR;
539 TRACE ("set options from `%s'\n", saved_optstr);
541 /* Call this unconditionally, in case -sigusr1-report was toggled. */
542 __mf_sigusr1_respond ();
544 return rc;
548 #ifdef PIC
550 void
551 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
553 char *err;
555 assert (e);
556 if (e->pointer) return;
558 #if HAVE_DLVSYM
559 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
560 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
561 else
562 #endif
563 e->pointer = dlsym (RTLD_NEXT, e->name);
565 err = dlerror ();
567 if (err)
569 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
570 e->name, err);
571 abort ();
573 if (! e->pointer)
575 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
576 abort ();
581 static void
582 __mf_resolve_dynamics ()
584 int i;
585 for (i = 0; i < dyn_INITRESOLVE; i++)
586 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
590 /* NB: order must match enums in mf-impl.h */
591 struct __mf_dynamic_entry __mf_dynamic [] =
593 {NULL, "calloc", NULL},
594 {NULL, "free", NULL},
595 {NULL, "malloc", NULL},
596 {NULL, "mmap", NULL},
597 {NULL, "munmap", NULL},
598 {NULL, "realloc", NULL},
599 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
600 #ifdef LIBMUDFLAPTH
601 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
602 {NULL, "pthread_join", NULL},
603 {NULL, "pthread_exit", NULL}
604 #endif
607 #endif /* PIC */
611 /* ------------------------------------------------------------------------ */
613 /* Lookup & manage automatic initialization of the five or so splay trees. */
614 static splay_tree
615 __mf_object_tree (int type)
617 static splay_tree trees [__MF_TYPE_MAX+1];
618 assert (type >= 0 && type <= __MF_TYPE_MAX);
619 if (UNLIKELY (trees[type] == NULL))
620 trees[type] = splay_tree_new ();
621 return trees[type];
625 /* not static */void
626 __mf_init ()
628 char *ov = 0;
630 /* Return if initialization has already been done. */
631 if (LIKELY (__mf_starting_p == 0))
632 return;
634 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
635 #ifdef PIC
636 __mf_resolve_dynamics ();
637 #endif
638 __mf_starting_p = 0;
640 __mf_set_default_options ();
642 ov = getenv ("MUDFLAP_OPTIONS");
643 if (ov)
645 int rc = __mfu_set_options (ov);
646 if (rc < 0)
648 __mf_usage ();
649 exit (1);
653 /* Initialize to a non-zero description epoch. */
654 __mf_describe_object (NULL);
656 #define REG_RESERVED(obj) \
657 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
659 REG_RESERVED (__mf_lookup_cache);
660 REG_RESERVED (__mf_lc_mask);
661 REG_RESERVED (__mf_lc_shift);
662 /* XXX: others of our statics? */
664 /* Prevent access to *NULL. */
665 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
666 __mf_lookup_cache[0].low = (uintptr_t) -1;
672 __wrap_main (int argc, char* argv[])
674 extern char **environ;
675 extern int main ();
676 static int been_here = 0;
678 if (__mf_opts.heur_std_data && ! been_here)
680 unsigned i;
682 been_here = 1;
683 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
684 for (i=0; i<argc; i++)
686 unsigned j = strlen (argv[i]);
687 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
690 for (i=0; ; i++)
692 char *e = environ[i];
693 unsigned j;
694 if (e == NULL) break;
695 j = strlen (environ[i]);
696 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
698 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
700 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
702 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
703 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
704 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
706 /* Make some effort to register ctype.h static arrays. */
707 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
708 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
709 with in mf-hooks2.c. */
712 #ifdef PIC
713 return main (argc, argv, environ);
714 #else
715 return __real_main (argc, argv, environ);
716 #endif
721 extern void __mf_fini () DTOR;
722 void __mf_fini ()
724 TRACE ("__mf_fini\n");
725 __mfu_report ();
730 /* ------------------------------------------------------------------------ */
731 /* __mf_check */
733 void __mf_check (void *ptr, size_t sz, int type, const char *location)
735 LOCKTH ();
736 BEGIN_RECURSION_PROTECT ();
737 __mfu_check (ptr, sz, type, location);
738 END_RECURSION_PROTECT ();
739 UNLOCKTH ();
743 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
745 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
746 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
747 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
748 uintptr_t ptr_low = (uintptr_t) ptr;
749 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
750 struct __mf_cache old_entry = *entry;
752 if (UNLIKELY (__mf_opts.sigusr1_report))
753 __mf_sigusr1_respond ();
755 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
756 ptr, entry_idx, (unsigned long)sz,
757 (type == 0 ? "read" : "write"), location);
759 switch (__mf_opts.mudflap_mode)
761 case mode_nop:
762 entry->low = MINPTR;
763 entry->high = MAXPTR;
764 judgement = 1;
765 break;
767 case mode_populate:
768 entry->low = ptr_low;
769 entry->high = ptr_high;
770 judgement = 1;
771 break;
773 case mode_check:
775 unsigned heuristics = 0;
777 /* Advance aging/adaptation counters. */
778 static unsigned adapt_count;
779 adapt_count ++;
780 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
781 adapt_count > __mf_opts.adapt_cache))
783 adapt_count = 0;
784 __mf_adapt_cache ();
787 /* Looping only occurs if heuristics were triggered. */
788 while (judgement == 0)
790 DECLARE (void, free, void *p);
791 __mf_object_t* ovr_obj[1];
792 unsigned obj_count;
793 __mf_object_t** all_ovr_obj = NULL;
794 __mf_object_t** dealloc_me = NULL;
795 unsigned i;
797 /* Find all overlapping objects. Be optimistic that there is just one. */
798 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
799 if (UNLIKELY (obj_count > 1))
801 /* Allocate a real buffer and do the search again. */
802 DECLARE (void *, malloc, size_t c);
803 unsigned n;
804 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
805 obj_count));
806 if (all_ovr_obj == NULL) abort ();
807 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
808 assert (n == obj_count);
809 dealloc_me = all_ovr_obj;
811 else
813 all_ovr_obj = ovr_obj;
814 dealloc_me = NULL;
817 /* Update object statistics. */
818 for (i = 0; i < obj_count; i++)
820 __mf_object_t *obj = all_ovr_obj[i];
821 assert (obj != NULL);
822 if (type == __MF_CHECK_READ)
823 obj->read_count ++;
824 else
825 obj->write_count ++;
826 obj->liveness ++;
829 /* Iterate over the various objects. There are a number of special cases. */
830 for (i = 0; i < obj_count; i++)
832 __mf_object_t *obj = all_ovr_obj[i];
834 /* Any __MF_TYPE_NOACCESS hit is bad. */
835 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
836 judgement = -1;
838 /* Any object with a watch flag is bad. */
839 if (UNLIKELY (obj->watching_p))
840 judgement = -2; /* trigger VIOL_WATCH */
842 /* A read from an uninitialized object is bad. */
843 if (UNLIKELY (__mf_opts.check_initialization
844 /* reading */
845 && type == __MF_CHECK_READ
846 /* not written */
847 && obj->write_count == 0
848 /* uninitialized (heap) */
849 && obj->type == __MF_TYPE_HEAP))
850 judgement = -1;
853 /* We now know that the access spans one or more valid objects. */
854 if (LIKELY (judgement >= 0))
855 for (i = 0; i < obj_count; i++)
857 __mf_object_t *obj = all_ovr_obj[i];
859 /* Is this access entirely contained within this object? */
860 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
862 /* Valid access. */
863 entry->low = obj->low;
864 entry->high = obj->high;
865 judgement = 1;
868 /* XXX: Access runs off left or right side of this
869 object. That could be okay, if there are
870 other objects that fill in all the holes. */
873 if (dealloc_me != NULL)
874 CALL_REAL (free, dealloc_me);
876 /* If the judgment is still unknown at this stage, loop
877 around at most one more time. */
878 if (judgement == 0)
880 if (heuristics++ < 2) /* XXX parametrize this number? */
881 judgement = __mf_heuristic_check (ptr_low, ptr_high);
882 else
883 judgement = -1;
888 break;
890 case mode_violate:
891 judgement = -1;
892 break;
895 if (__mf_opts.collect_stats)
897 __mf_count_check ++;
899 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
900 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
901 __mf_lookup_cache_reusecount [entry_idx] ++;
904 if (UNLIKELY (judgement < 0))
905 __mf_violation (ptr, sz,
906 (uintptr_t) __builtin_return_address (0), location,
907 ((judgement == -1) ?
908 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
909 __MF_VIOL_WATCH));
913 static __mf_object_t *
914 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
915 const char *name, uintptr_t pc)
917 DECLARE (void *, calloc, size_t c, size_t n);
919 __mf_object_t *new_obj;
920 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
921 new_obj->low = low;
922 new_obj->high = high;
923 new_obj->type = type;
924 new_obj->name = name;
925 new_obj->alloc_pc = pc;
926 #if HAVE_GETTIMEOFDAY
927 if (__mf_opts.timestamps)
928 gettimeofday (& new_obj->alloc_time, NULL);
929 #endif
930 #if LIBMUDFLAPTH
931 new_obj->alloc_thread = pthread_self ();
932 #endif
934 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
935 new_obj->alloc_backtrace_size =
936 __mf_backtrace (& new_obj->alloc_backtrace,
937 (void *) pc, 2);
939 __mf_link_object (new_obj);
940 return new_obj;
944 static void
945 __mf_uncache_object (__mf_object_t *old_obj)
947 /* Remove any low/high pointers for this object from the lookup cache. */
949 /* Can it possibly exist in the cache? */
950 if (LIKELY (old_obj->read_count + old_obj->write_count))
952 uintptr_t low = old_obj->low;
953 uintptr_t high = old_obj->high;
954 unsigned idx_low = __MF_CACHE_INDEX (low);
955 unsigned idx_high = __MF_CACHE_INDEX (high);
956 unsigned i;
957 for (i = idx_low; i <= idx_high; i++)
959 struct __mf_cache *entry = & __mf_lookup_cache [i];
960 /* NB: the "||" in the following test permits this code to
961 tolerate the situation introduced by __mf_check over
962 contiguous objects, where a cache entry spans several
963 objects. */
964 if (entry->low == low || entry->high == high)
966 entry->low = MAXPTR;
967 entry->high = MINPTR;
974 void
975 __mf_register (void *ptr, size_t sz, int type, const char *name)
977 LOCKTH ();
978 BEGIN_RECURSION_PROTECT ();
979 __mfu_register (ptr, sz, type, name);
980 END_RECURSION_PROTECT ();
981 UNLOCKTH ();
985 void
986 __mfu_register (void *ptr, size_t sz, int type, const char *name)
988 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
989 ptr, (unsigned long) sz, type, name ? name : "");
991 if (__mf_opts.collect_stats)
993 __mf_count_register ++;
994 __mf_total_register_size [(type < 0) ? 0 :
995 (type > __MF_TYPE_MAX) ? 0 :
996 type] += sz;
999 if (UNLIKELY (__mf_opts.sigusr1_report))
1000 __mf_sigusr1_respond ();
1002 switch (__mf_opts.mudflap_mode)
1004 case mode_nop:
1005 break;
1007 case mode_violate:
1008 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1009 __MF_VIOL_REGISTER);
1010 break;
1012 case mode_populate:
1013 /* Clear the cache. */
1014 /* XXX: why the entire cache? */
1015 /* XXX: race */
1016 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1017 /* void slot 0 */
1018 __mf_lookup_cache[0].low = MAXPTR;
1019 break;
1021 case mode_check:
1023 __mf_object_t *ovr_objs [1];
1024 unsigned num_overlapping_objs;
1025 uintptr_t low = (uintptr_t) ptr;
1026 uintptr_t high = CLAMPSZ (ptr, sz);
1027 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1029 /* Treat unknown size indication as 1. */
1030 if (UNLIKELY (sz == 0)) sz = 1;
1032 /* Look for objects only of the same type. This will e.g. permit a registration
1033 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1034 __mf_check time however harmful overlaps will be detected. */
1035 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1037 /* Handle overlaps. */
1038 if (UNLIKELY (num_overlapping_objs > 0))
1040 __mf_object_t *ovr_obj = ovr_objs[0];
1042 /* Accept certain specific duplication pairs. */
1043 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1044 && ovr_obj->low == low
1045 && ovr_obj->high == high
1046 && ovr_obj->type == type)
1048 /* Duplicate registration for static objects may come
1049 from distinct compilation units. */
1050 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1051 (void *) low, (void *) high,
1052 (ovr_obj->name ? ovr_obj->name : ""));
1053 break;
1056 /* Alas, a genuine violation. */
1057 else
1059 /* Two or more *real* mappings here. */
1060 __mf_violation ((void *) ptr, sz,
1061 (uintptr_t) __builtin_return_address (0), NULL,
1062 __MF_VIOL_REGISTER);
1065 else /* No overlapping objects: AOK. */
1066 __mf_insert_new_object (low, high, type, name, pc);
1068 /* We could conceivably call __mf_check() here to prime the cache,
1069 but then the read_count/write_count field is not reliable. */
1070 break;
1072 } /* end switch (__mf_opts.mudflap_mode) */
1076 void
1077 __mf_unregister (void *ptr, size_t sz, int type)
1079 LOCKTH ();
1080 BEGIN_RECURSION_PROTECT ();
1081 __mfu_unregister (ptr, sz, type);
1082 END_RECURSION_PROTECT ();
1083 UNLOCKTH ();
1087 void
1088 __mfu_unregister (void *ptr, size_t sz, int type)
1090 DECLARE (void, free, void *ptr);
1092 if (UNLIKELY (__mf_opts.sigusr1_report))
1093 __mf_sigusr1_respond ();
1095 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1097 switch (__mf_opts.mudflap_mode)
1099 case mode_nop:
1100 break;
1102 case mode_violate:
1103 __mf_violation (ptr, sz,
1104 (uintptr_t) __builtin_return_address (0), NULL,
1105 __MF_VIOL_UNREGISTER);
1106 break;
1108 case mode_populate:
1109 /* Clear the cache. */
1110 /* XXX: race */
1111 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1112 /* void slot 0 */
1113 __mf_lookup_cache[0].low = MAXPTR;
1114 break;
1116 case mode_check:
1118 __mf_object_t *old_obj = NULL;
1119 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1120 __mf_object_t *objs[1] = {NULL};
1121 unsigned num_overlapping_objs;
1123 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1124 CLAMPSZ (ptr, sz), objs, 1, type);
1126 /* Special case for HEAP_I - see free & realloc hook. They don't
1127 know whether the input region was HEAP or HEAP_I before
1128 unmapping it. Here we give HEAP a try in case HEAP_I
1129 failed. */
1130 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1132 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1133 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1136 old_obj = objs[0];
1137 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1138 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1139 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1141 __mf_violation (ptr, sz,
1142 (uintptr_t) __builtin_return_address (0), NULL,
1143 __MF_VIOL_UNREGISTER);
1144 break;
1147 __mf_unlink_object (old_obj);
1148 __mf_uncache_object (old_obj);
1150 /* Wipe buffer contents if desired. */
1151 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1152 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1153 || old_obj->type == __MF_TYPE_HEAP_I)))
1155 memset ((void *) old_obj->low,
1157 (size_t) (old_obj->high - old_obj->low + 1));
1160 /* Manage the object cemetary. */
1161 if (__mf_opts.persistent_count > 0 &&
1162 old_obj->type >= 0 &&
1163 old_obj->type <= __MF_TYPE_MAX_CEM)
1165 old_obj->deallocated_p = 1;
1166 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1167 #if HAVE_GETTIMEOFDAY
1168 if (__mf_opts.timestamps)
1169 gettimeofday (& old_obj->dealloc_time, NULL);
1170 #endif
1171 #ifdef LIBMUDFLAPTH
1172 old_obj->dealloc_thread = pthread_self ();
1173 #endif
1175 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1176 old_obj->dealloc_backtrace_size =
1177 __mf_backtrace (& old_obj->dealloc_backtrace,
1178 NULL, 2);
1180 /* Encourage this object to be displayed again in current epoch. */
1181 old_obj->description_epoch --;
1183 /* Put this object into the cemetary. This may require this plot to
1184 be recycled, and the previous resident to be designated del_obj. */
1186 unsigned row = old_obj->type;
1187 unsigned plot = __mf_object_dead_head [row];
1189 del_obj = __mf_object_cemetary [row][plot];
1190 __mf_object_cemetary [row][plot] = old_obj;
1191 plot ++;
1192 if (plot == __mf_opts.persistent_count) plot = 0;
1193 __mf_object_dead_head [row] = plot;
1196 else
1197 del_obj = old_obj;
1199 if (__mf_opts.print_leaks)
1201 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1202 (old_obj->type == __MF_TYPE_HEAP
1203 || old_obj->type == __MF_TYPE_HEAP_I))
1205 fprintf (stderr,
1206 "*******\n"
1207 "mudflap warning: unaccessed registered object:\n");
1208 __mf_describe_object (old_obj);
1212 if (del_obj != NULL) /* May or may not equal old_obj. */
1214 if (__mf_opts.backtrace > 0)
1216 CALL_REAL(free, del_obj->alloc_backtrace);
1217 if (__mf_opts.persistent_count > 0)
1219 CALL_REAL(free, del_obj->dealloc_backtrace);
1222 CALL_REAL(free, del_obj);
1225 break;
1227 } /* end switch (__mf_opts.mudflap_mode) */
1230 if (__mf_opts.collect_stats)
1232 __mf_count_unregister ++;
1233 __mf_total_unregister_size += sz;
1239 struct tree_stats
1241 unsigned obj_count;
1242 unsigned long total_size;
1243 unsigned live_obj_count;
1244 double total_weight;
1245 double weighted_size;
1246 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1251 static int
1252 __mf_adapt_cache_fn (splay_tree_node n, void *param)
1254 __mf_object_t *obj = (__mf_object_t *) n->value;
1255 struct tree_stats *s = (struct tree_stats *) param;
1257 assert (obj != NULL && s != NULL);
1259 /* Exclude never-accessed objects. */
1260 if (obj->read_count + obj->write_count)
1262 s->obj_count ++;
1263 s->total_size += (obj->high - obj->low + 1);
1265 if (obj->liveness)
1267 unsigned i;
1268 uintptr_t addr;
1270 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1271 (void *) obj->low, obj->liveness, obj->name); */
1273 s->live_obj_count ++;
1274 s->total_weight += (double) obj->liveness;
1275 s->weighted_size +=
1276 (double) (obj->high - obj->low + 1) *
1277 (double) obj->liveness;
1279 addr = obj->low;
1280 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1282 unsigned bit = addr & 1;
1283 s->weighted_address_bits[i][bit] += obj->liveness;
1284 addr = addr >> 1;
1287 /* Age the liveness value. */
1288 obj->liveness >>= 1;
1292 return 0;
1296 static void
1297 __mf_adapt_cache ()
1299 struct tree_stats s;
1300 uintptr_t new_mask = 0;
1301 unsigned char new_shift;
1302 float cache_utilization;
1303 float max_value;
1304 static float smoothed_new_shift = -1.0;
1305 unsigned i;
1307 memset (&s, 0, sizeof (s));
1309 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1310 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1311 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1312 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1313 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1315 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1316 empty tree. Just leave the cache alone in such cases, rather
1317 than risk dying by division-by-zero. */
1318 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1319 return;
1321 /* Guess a good value for the shift parameter by finding an address bit that is a
1322 good discriminant of lively objects. */
1323 max_value = 0.0;
1324 for (i=0; i<sizeof (uintptr_t)*8; i++)
1326 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1327 if (max_value < value) max_value = value;
1329 for (i=0; i<sizeof (uintptr_t)*8; i++)
1331 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1332 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1333 if (value >= max_value * shoulder_factor)
1334 break;
1336 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1337 /* Converge toward this slowly to reduce flapping. */
1338 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1339 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1340 assert (new_shift < sizeof (uintptr_t)*8);
1342 /* Count number of used buckets. */
1343 cache_utilization = 0.0;
1344 for (i = 0; i < (1 + __mf_lc_mask); i++)
1345 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1346 cache_utilization += 1.0;
1347 cache_utilization /= (1 + __mf_lc_mask);
1349 new_mask |= 0x3ff; /* XXX: force a large cache. */
1350 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1352 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1353 "util=%u%% m=%p s=%u\n",
1354 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1355 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1357 /* We should reinitialize cache if its parameters have changed. */
1358 if (new_mask != __mf_lc_mask ||
1359 new_shift != __mf_lc_shift)
1361 __mf_lc_mask = new_mask;
1362 __mf_lc_shift = new_shift;
1363 /* XXX: race */
1364 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1365 /* void slot 0 */
1366 __mf_lookup_cache[0].low = MAXPTR;
1372 /* __mf_find_object[s] */
1374 /* Find overlapping live objecs between [low,high]. Return up to
1375 max_objs of their pointers in objs[]. Return total count of
1376 overlaps (may exceed max_objs). */
1378 unsigned
1379 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1380 __mf_object_t **objs, unsigned max_objs, int type)
1382 unsigned count = 0;
1383 splay_tree t = __mf_object_tree (type);
1384 splay_tree_key k = (splay_tree_key) ptr_low;
1385 int direction;
1387 splay_tree_node n = splay_tree_lookup (t, k);
1388 /* An exact match for base address implies a hit. */
1389 if (n != NULL)
1391 if (count < max_objs)
1392 objs[count] = (__mf_object_t *) n->value;
1393 count ++;
1396 /* Iterate left then right near this key value to find all overlapping objects. */
1397 for (direction = 0; direction < 2; direction ++)
1399 /* Reset search origin. */
1400 k = (splay_tree_key) ptr_low;
1402 while (1)
1404 __mf_object_t *obj;
1406 n = (direction == 0 ? splay_tree_successor (t, k) : splay_tree_predecessor (t, k));
1407 if (n == NULL) break;
1408 obj = (__mf_object_t *) n->value;
1410 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1411 break;
1413 if (count < max_objs)
1414 objs[count] = (__mf_object_t *) n->value;
1415 count ++;
1417 k = (splay_tree_key) obj->low;
1421 return count;
1425 unsigned
1426 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1427 __mf_object_t **objs, unsigned max_objs)
1429 int type;
1430 unsigned count = 0;
1432 /* Search each splay tree for overlaps. */
1433 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1435 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1436 if (c > max_objs)
1438 max_objs = 0;
1439 objs = NULL;
1441 else /* NB: C may equal 0 */
1443 max_objs -= c;
1444 objs += c;
1446 count += c;
1449 return count;
1454 /* __mf_link_object */
1456 static void
1457 __mf_link_object (__mf_object_t *node)
1459 splay_tree t = __mf_object_tree (node->type);
1460 splay_tree_insert (t, (splay_tree_key) node->low, (splay_tree_value) node);
1463 /* __mf_unlink_object */
1465 static void
1466 __mf_unlink_object (__mf_object_t *node)
1468 splay_tree t = __mf_object_tree (node->type);
1469 splay_tree_remove (t, (splay_tree_key) node->low);
1472 /* __mf_find_dead_objects */
1474 /* Find overlapping dead objecs between [low,high]. Return up to
1475 max_objs of their pointers in objs[]. Return total count of
1476 overlaps (may exceed max_objs). */
1478 static unsigned
1479 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1480 __mf_object_t **objs, unsigned max_objs)
1482 if (__mf_opts.persistent_count > 0)
1484 unsigned count = 0;
1485 unsigned recollection = 0;
1486 unsigned row = 0;
1488 assert (low <= high);
1489 assert (max_objs == 0 || objs != NULL);
1491 /* Widen the search from the most recent plots in each row, looking
1492 backward in time. */
1493 recollection = 0;
1494 while (recollection < __mf_opts.persistent_count)
1496 count = 0;
1498 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1500 unsigned plot;
1501 unsigned i;
1503 plot = __mf_object_dead_head [row];
1504 for (i = 0; i <= recollection; i ++)
1506 __mf_object_t *obj;
1508 /* Look backward through row: it's a circular buffer. */
1509 if (plot > 0) plot --;
1510 else plot = __mf_opts.persistent_count - 1;
1512 obj = __mf_object_cemetary [row][plot];
1513 if (obj && obj->low <= high && obj->high >= low)
1515 /* Found an overlapping dead object! */
1516 if (count < max_objs)
1517 objs [count] = obj;
1518 count ++;
1523 if (count)
1524 break;
1526 /* Look farther back in time. */
1527 recollection = (recollection * 2) + 1;
1530 return count;
1531 } else {
1532 return 0;
1536 /* __mf_describe_object */
1538 static void
1539 __mf_describe_object (__mf_object_t *obj)
1541 static unsigned epoch = 0;
1542 if (obj == NULL)
1544 epoch ++;
1545 return;
1548 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1550 fprintf (stderr,
1551 "mudflap object %p: name=`%s'\n",
1552 (void *) obj, (obj->name ? obj->name : ""));
1553 return;
1555 else
1556 obj->description_epoch = epoch;
1558 fprintf (stderr,
1559 "mudflap object %p: name=`%s'\n"
1560 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1561 "alloc time=%lu.%06lu pc=%p"
1562 #ifdef LIBMUDFLAPTH
1563 " thread=%u"
1564 #endif
1565 "\n",
1566 (void *) obj, (obj->name ? obj->name : ""),
1567 (void *) obj->low, (void *) obj->high,
1568 (unsigned long) (obj->high - obj->low + 1),
1569 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1570 obj->type == __MF_TYPE_HEAP ? "heap" :
1571 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1572 obj->type == __MF_TYPE_STACK ? "stack" :
1573 obj->type == __MF_TYPE_STATIC ? "static" :
1574 obj->type == __MF_TYPE_GUESS ? "guess" :
1575 "unknown"),
1576 obj->read_count, obj->write_count, obj->liveness,
1577 obj->watching_p ? " watching" : "",
1578 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1579 (void *) obj->alloc_pc
1580 #ifdef LIBMUDFLAPTH
1581 , (unsigned) obj->alloc_thread
1582 #endif
1585 if (__mf_opts.backtrace > 0)
1587 unsigned i;
1588 for (i=0; i<obj->alloc_backtrace_size; i++)
1589 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1592 if (__mf_opts.persistent_count > 0)
1594 if (obj->deallocated_p)
1596 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1597 #ifdef LIBMUDFLAPTH
1598 " thread=%u"
1599 #endif
1600 "\n",
1601 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1602 (void *) obj->dealloc_pc
1603 #ifdef LIBMUDFLAPTH
1604 , (unsigned) obj->dealloc_thread
1605 #endif
1609 if (__mf_opts.backtrace > 0)
1611 unsigned i;
1612 for (i=0; i<obj->dealloc_backtrace_size; i++)
1613 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1620 static int
1621 __mf_report_leaks_fn (splay_tree_node n, void *param)
1623 __mf_object_t *node = (__mf_object_t *) n->value;
1624 unsigned *count = (unsigned *) param;
1626 if (count != NULL)
1627 (*count) ++;
1629 fprintf (stderr, "Leaked object %u:\n", (*count));
1630 __mf_describe_object (node);
1632 return 0;
1636 static unsigned
1637 __mf_report_leaks ()
1639 unsigned count = 0;
1641 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1642 __mf_report_leaks_fn, & count);
1643 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1644 __mf_report_leaks_fn, & count);
1646 return count;
1649 /* ------------------------------------------------------------------------ */
1650 /* __mf_report */
1652 void
1653 __mf_report ()
1655 LOCKTH ();
1656 BEGIN_RECURSION_PROTECT ();
1657 __mfu_report ();
1658 END_RECURSION_PROTECT ();
1659 UNLOCKTH ();
1662 void
1663 __mfu_report ()
1665 if (__mf_opts.collect_stats)
1667 fprintf (stderr,
1668 "*******\n"
1669 "mudflap stats:\n"
1670 "calls to __mf_check: %lu\n"
1671 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1672 " __mf_unregister: %lu [%luB]\n"
1673 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1674 __mf_count_check,
1675 __mf_count_register,
1676 __mf_total_register_size[0], __mf_total_register_size[1],
1677 __mf_total_register_size[2], __mf_total_register_size[3],
1678 __mf_total_register_size[4], /* XXX */
1679 __mf_count_unregister, __mf_total_unregister_size,
1680 __mf_count_violation[0], __mf_count_violation[1],
1681 __mf_count_violation[2], __mf_count_violation[3],
1682 __mf_count_violation[4]);
1684 fprintf (stderr,
1685 "calls with reentrancy: %lu\n", __mf_reentrancy);
1686 #ifdef LIBMUDFLAPTH
1687 fprintf (stderr,
1688 " lock contention: %lu\n", __mf_lock_contention);
1689 #endif
1691 /* Lookup cache stats. */
1693 unsigned i;
1694 unsigned max_reuse = 0;
1695 unsigned num_used = 0;
1696 unsigned num_unused = 0;
1698 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1700 if (__mf_lookup_cache_reusecount[i])
1701 num_used ++;
1702 else
1703 num_unused ++;
1704 if (max_reuse < __mf_lookup_cache_reusecount[i])
1705 max_reuse = __mf_lookup_cache_reusecount[i];
1707 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1708 num_used, num_unused, max_reuse);
1712 unsigned live_count;
1713 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1714 fprintf (stderr, "number of live objects: %u\n", live_count);
1717 if (__mf_opts.persistent_count > 0)
1719 unsigned dead_count = 0;
1720 unsigned row, plot;
1721 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1722 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1723 if (__mf_object_cemetary [row][plot] != 0)
1724 dead_count ++;
1725 fprintf (stderr, " zombie objects: %u\n", dead_count);
1728 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1730 unsigned l;
1731 extern void * __mf_wrap_alloca_indirect (size_t c);
1733 /* Free up any remaining alloca()'d blocks. */
1734 __mf_wrap_alloca_indirect (0);
1735 __mf_describe_object (NULL); /* Reset description epoch. */
1736 l = __mf_report_leaks ();
1737 fprintf (stderr, "number of leaked objects: %u\n", l);
1741 /* __mf_backtrace */
1743 size_t
1744 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1746 void ** pc_array;
1747 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1748 unsigned remaining_size;
1749 unsigned omitted_size = 0;
1750 unsigned i;
1751 DECLARE (void, free, void *ptr);
1752 DECLARE (void *, calloc, size_t c, size_t n);
1753 DECLARE (void *, malloc, size_t n);
1755 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1756 #ifdef HAVE_BACKTRACE
1757 pc_array_size = backtrace (pc_array, pc_array_size);
1758 #else
1759 #define FETCH(n) do { if (pc_array_size >= n) { \
1760 pc_array[n] = __builtin_return_address(n); \
1761 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1763 /* Unroll some calls __builtin_return_address because this function
1764 only takes a literal integer parameter. */
1765 FETCH (0);
1766 #if 0
1767 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1768 rather than simply returning 0. :-( */
1769 FETCH (1);
1770 FETCH (2);
1771 FETCH (3);
1772 FETCH (4);
1773 FETCH (5);
1774 FETCH (6);
1775 FETCH (7);
1776 FETCH (8);
1777 if (pc_array_size > 8) pc_array_size = 9;
1778 #else
1779 if (pc_array_size > 0) pc_array_size = 1;
1780 #endif
1782 #undef FETCH
1783 #endif
1785 /* We want to trim the first few levels of the stack traceback,
1786 since they contain libmudflap wrappers and junk. If pc_array[]
1787 ends up containing a non-NULL guess_pc, then trim everything
1788 before that. Otherwise, omit the first guess_omit_levels
1789 entries. */
1791 if (guess_pc != NULL)
1792 for (i=0; i<pc_array_size; i++)
1793 if (pc_array [i] == guess_pc)
1794 omitted_size = i;
1796 if (omitted_size == 0) /* No match? */
1797 if (pc_array_size > guess_omit_levels)
1798 omitted_size = guess_omit_levels;
1800 remaining_size = pc_array_size - omitted_size;
1802 #ifdef HAVE_BACKTRACE_SYMBOLS
1803 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1804 #else
1806 /* Let's construct a buffer by hand. It will have <remaining_size>
1807 char*'s at the front, pointing at individual strings immediately
1808 afterwards. */
1809 void *buffer;
1810 char *chars;
1811 char **pointers;
1812 enum { perline = 30 };
1813 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1814 pointers = (char **) buffer;
1815 chars = (char *)buffer + (remaining_size * sizeof (char *));
1816 for (i = 0; i < remaining_size; i++)
1818 pointers[i] = chars;
1819 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1820 chars = chars + perline;
1822 *symbols = pointers;
1824 #endif
1825 CALL_REAL (free, pc_array);
1827 return remaining_size;
1830 /* ------------------------------------------------------------------------ */
1831 /* __mf_violation */
1833 void
1834 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1835 const char *location, int type)
1837 char buf [128];
1838 static unsigned violation_number;
1839 DECLARE(void, free, void *ptr);
1841 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1842 (void *) pc,
1843 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1845 if (__mf_opts.collect_stats)
1846 __mf_count_violation [(type < 0) ? 0 :
1847 (type > __MF_VIOL_WATCH) ? 0 :
1848 type] ++;
1850 /* Print out a basic warning message. */
1851 if (__mf_opts.verbose_violations)
1853 unsigned dead_p;
1854 unsigned num_helpful = 0;
1855 struct timeval now = { 0, 0 };
1856 #if HAVE_GETTIMEOFDAY
1857 gettimeofday (& now, NULL);
1858 #endif
1860 violation_number ++;
1861 fprintf (stderr,
1862 "*******\n"
1863 "mudflap violation %u (%s): time=%lu.%06lu "
1864 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1865 violation_number,
1866 ((type == __MF_VIOL_READ) ? "check/read" :
1867 (type == __MF_VIOL_WRITE) ? "check/write" :
1868 (type == __MF_VIOL_REGISTER) ? "register" :
1869 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1870 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1871 now.tv_sec, now.tv_usec,
1872 (void *) ptr, (unsigned long)sz, (void *) pc,
1873 (location != NULL ? " location=`" : ""),
1874 (location != NULL ? location : ""),
1875 (location != NULL ? "'" : ""));
1877 if (__mf_opts.backtrace > 0)
1879 char ** symbols;
1880 unsigned i, num;
1882 num = __mf_backtrace (& symbols, (void *) pc, 2);
1883 /* Note: backtrace_symbols calls malloc(). But since we're in
1884 __mf_violation and presumably __mf_check, it'll detect
1885 recursion, and not put the new string into the database. */
1887 for (i=0; i<num; i++)
1888 fprintf (stderr, " %s\n", symbols[i]);
1890 /* Calling free() here would trigger a violation. */
1891 CALL_REAL(free, symbols);
1895 /* Look for nearby objects. For this, we start with s_low/s_high
1896 pointing to the given area, looking for overlapping objects.
1897 If none show up, widen the search area and keep looking. */
1899 if (sz == 0) sz = 1;
1901 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1903 enum {max_objs = 3}; /* magic */
1904 __mf_object_t *objs[max_objs];
1905 unsigned num_objs = 0;
1906 uintptr_t s_low, s_high;
1907 unsigned tries = 0;
1908 unsigned i;
1910 s_low = (uintptr_t) ptr;
1911 s_high = CLAMPSZ (ptr, sz);
1913 while (tries < 16) /* magic */
1915 if (dead_p)
1916 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1917 else
1918 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1920 if (num_objs) /* good enough */
1921 break;
1923 tries ++;
1925 /* XXX: tune this search strategy. It's too dependent on
1926 sz, which can vary from 1 to very big (when array index
1927 checking) numbers. */
1928 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1929 s_high = CLAMPADD (s_high, (sz * tries * tries));
1932 for (i = 0; i < min (num_objs, max_objs); i++)
1934 __mf_object_t *obj = objs[i];
1935 uintptr_t low = (uintptr_t) ptr;
1936 uintptr_t high = CLAMPSZ (ptr, sz);
1937 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1938 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1939 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
1940 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
1941 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
1942 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
1944 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1945 num_helpful + i + 1,
1946 (before1 ? before1 : after1 ? after1 : into1),
1947 (before1 ? "before" : after1 ? "after" : "into"),
1948 (before2 ? before2 : after2 ? after2 : into2),
1949 (before2 ? "before" : after2 ? "after" : "into"));
1950 __mf_describe_object (obj);
1952 num_helpful += num_objs;
1955 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
1958 /* How to finally handle this violation? */
1959 switch (__mf_opts.violation_mode)
1961 case viol_nop:
1962 break;
1963 case viol_segv:
1964 kill (getpid(), SIGSEGV);
1965 break;
1966 case viol_abort:
1967 abort ();
1968 break;
1969 case viol_gdb:
1971 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
1972 system (buf);
1973 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1974 instead, and let the forked child execlp() gdb. That way, this
1975 subject process can be resumed under the supervision of gdb.
1976 This can't happen now, since system() only returns when gdb
1977 dies. In that case, we need to beware of starting a second
1978 concurrent gdb child upon the next violation. (But if the first
1979 gdb dies, then starting a new one is appropriate.) */
1980 break;
1984 /* ------------------------------------------------------------------------ */
1987 unsigned __mf_watch (void *ptr, size_t sz)
1989 unsigned rc;
1990 LOCKTH ();
1991 BEGIN_RECURSION_PROTECT ();
1992 rc = __mf_watch_or_not (ptr, sz, 1);
1993 END_RECURSION_PROTECT ();
1994 UNLOCKTH ();
1995 return rc;
1998 unsigned __mf_unwatch (void *ptr, size_t sz)
2000 unsigned rc;
2001 LOCKTH ();
2002 rc = __mf_watch_or_not (ptr, sz, 0);
2003 UNLOCKTH ();
2004 return rc;
2008 static unsigned
2009 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2011 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2012 uintptr_t ptr_low = (uintptr_t) ptr;
2013 unsigned count = 0;
2015 TRACE ("%s ptr=%p size=%lu\n",
2016 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2018 switch (__mf_opts.mudflap_mode)
2020 case mode_nop:
2021 case mode_populate:
2022 case mode_violate:
2023 count = 0;
2024 break;
2026 case mode_check:
2028 __mf_object_t **all_ovr_objs;
2029 unsigned obj_count;
2030 unsigned n;
2031 DECLARE (void *, malloc, size_t c);
2032 DECLARE (void, free, void *p);
2034 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2035 VERBOSE_TRACE (" %u:", obj_count);
2037 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2038 if (all_ovr_objs == NULL) abort ();
2039 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2040 assert (n == obj_count);
2042 for (n = 0; n < obj_count; n ++)
2044 __mf_object_t *obj = all_ovr_objs[n];
2046 VERBOSE_TRACE (" [%p]", (void *) obj);
2047 if (obj->watching_p != flag)
2049 obj->watching_p = flag;
2050 count ++;
2052 /* Remove object from cache, to ensure next access
2053 goes through __mf_check(). */
2054 if (flag)
2055 __mf_uncache_object (obj);
2058 CALL_REAL (free, all_ovr_objs);
2060 break;
2063 return count;
2067 void
2068 __mf_sigusr1_handler (int num)
2070 __mf_sigusr1_received ++;
2073 /* Install or remove SIGUSR1 handler as necessary.
2074 Also, respond to a received pending SIGUSR1. */
2075 void
2076 __mf_sigusr1_respond ()
2078 static int handler_installed;
2080 #ifdef SIGUSR1
2081 /* Manage handler */
2082 if (__mf_opts.sigusr1_report && ! handler_installed)
2084 signal (SIGUSR1, __mf_sigusr1_handler);
2085 handler_installed = 1;
2087 else if(! __mf_opts.sigusr1_report && handler_installed)
2089 signal (SIGUSR1, SIG_DFL);
2090 handler_installed = 0;
2092 #endif
2094 /* Manage enqueued signals */
2095 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2097 __mf_sigusr1_handled ++;
2098 assert (__mf_state == reentrant);
2099 __mfu_report ();
2100 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2105 /* XXX: provide an alternative __assert_fail function that cannot
2106 fail due to libmudflap infinite recursion. */
2107 #ifndef NDEBUG
2109 static void
2110 write_itoa (int fd, unsigned n)
2112 enum x { bufsize = sizeof(n)*4 };
2113 char buf [bufsize];
2114 unsigned i;
2116 for (i=0; i<bufsize-1; i++)
2118 unsigned digit = n % 10;
2119 buf[bufsize-2-i] = digit + '0';
2120 n /= 10;
2121 if (n == 0)
2123 char *m = & buf [bufsize-2-i];
2124 buf[bufsize-1] = '\0';
2125 write (fd, m, strlen(m));
2126 break;
2132 void
2133 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2135 #define write2(string) write (2, (string), strlen ((string)));
2136 write2("mf");
2137 #ifdef LIBMUDFLAPTH
2138 write2("(");
2139 write_itoa (2, (unsigned) pthread_self ());
2140 write2(")");
2141 #endif
2142 write2(": assertion failure: `");
2143 write (2, msg, strlen (msg));
2144 write2("' in ");
2145 write (2, func, strlen (func));
2146 write2(" at ");
2147 write (2, file, strlen (file));
2148 write2(":");
2149 write_itoa (2, line);
2150 write2("\n");
2151 #undef write2
2152 abort ();
2156 #endif
2162 /* #include the generic splay tree implementation from libiberty here, to
2163 ensure that it uses our memory allocation primitives. */
2165 static void
2166 splay_tree_free (void *p)
2168 DECLARE (void, free, void *p);
2169 CALL_REAL (free, p);
2172 static void *
2173 splay_tree_xmalloc (size_t s)
2175 DECLARE (void *, malloc, size_t s);
2176 return CALL_REAL (malloc, s);
2179 #define free(z) splay_tree_free(z)
2180 #define xmalloc(z) splay_tree_xmalloc(z)
2181 #include "splay-tree.c"