2004-07-14 Eric Christopher <echristo@redhat.com>
[official-gcc.git] / libmudflap / mf-runtime.c
blobf1cd0a22db765942c84e7d73649ea06dc22d5466
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
32 #include "config.h"
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
37 #define _POSIX_SOURCE
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
39 #define _GNU_SOURCE
40 #define _XOPEN_SOURCE
41 #define _BSD_TYPES
42 #define __EXTENSIONS__
43 #define _ALL_SOURCE
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <sys/types.h>
50 #include <sys/time.h>
51 #include <time.h>
52 #include <unistd.h>
53 #ifdef HAVE_EXECINFO_H
54 #include <execinfo.h>
55 #endif
56 #ifdef HAVE_SIGNAL_H
57 #include <signal.h>
58 #endif
59 #include <assert.h>
61 #include <string.h>
62 #include <limits.h>
63 #include <sys/types.h>
64 #include <signal.h>
65 #include <errno.h>
66 #include <ctype.h>
68 #include "mf-runtime.h"
69 #include "mf-impl.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
74 /* Utility macros */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
94 abort (); } \
95 __mf_state = reentrant; \
96 } while (0)
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
100 } while (0)
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
112 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
113 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts;
118 int __mf_starting_p = 1;
119 #ifndef LIBMUDFLAPTH
120 enum __mf_state_enum __mf_state = active;
121 #else
122 /* See __mf_state_perthread() in mf-hooks.c. */
123 #endif
126 #ifdef LIBMUDFLAPTH
127 pthread_mutex_t __mf_biglock =
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
130 #else
131 PTHREAD_MUTEX_INITIALIZER;
132 #endif
133 #endif
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
138 #if HAVE_PTHREAD_H
139 #ifdef _POSIX_THREADS
140 #pragma weak pthread_join
141 #else
142 #define pthread_join NULL
143 #endif
144 const void *threads_active_p = (void *) pthread_join;
145 #endif
148 /* ------------------------------------------------------------------------ */
149 /* stats-related globals. */
151 static unsigned long __mf_count_check;
152 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
153 static unsigned long __mf_count_register;
154 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
155 static unsigned long __mf_count_unregister;
156 static unsigned long __mf_total_unregister_size;
157 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
158 static unsigned long __mf_sigusr1_received;
159 static unsigned long __mf_sigusr1_handled;
160 /* not static */ unsigned long __mf_reentrancy;
161 #ifdef LIBMUDFLAPTH
162 /* not static */ unsigned long __mf_lock_contention;
163 #endif
166 /* ------------------------------------------------------------------------ */
167 /* mode-check-related globals. */
169 typedef struct __mf_object
171 uintptr_t low, high; /* __mf_register parameters */
172 const char *name;
173 char type; /* __MF_TYPE_something */
174 char watching_p; /* Trigger a VIOL_WATCH on access? */
175 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
176 unsigned write_count; /* Likewise for __mf_check/write. */
177 unsigned liveness; /* A measure of recent checking activity. */
178 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
180 uintptr_t alloc_pc;
181 struct timeval alloc_time;
182 char **alloc_backtrace;
183 size_t alloc_backtrace_size;
184 #ifdef LIBMUDFLAPTH
185 pthread_t alloc_thread;
186 #endif
188 int deallocated_p;
189 uintptr_t dealloc_pc;
190 struct timeval dealloc_time;
191 char **dealloc_backtrace;
192 size_t dealloc_backtrace_size;
193 #ifdef LIBMUDFLAPTH
194 pthread_t dealloc_thread;
195 #endif
196 } __mf_object_t;
198 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
199 /* Actually stored as static vars within lookup function below. */
201 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
202 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
203 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
206 /* ------------------------------------------------------------------------ */
207 /* Forward function declarations */
209 static void __mf_init () CTOR;
210 static void __mf_sigusr1_respond ();
211 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
212 __mf_object_t **objs, unsigned max_objs);
213 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
214 __mf_object_t **objs, unsigned max_objs, int type);
215 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_t **objs, unsigned max_objs);
217 static void __mf_adapt_cache ();
218 static void __mf_describe_object (__mf_object_t *obj);
219 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
220 static splay_tree __mf_object_tree (int type);
221 static void __mf_link_object (__mf_object_t *node);
222 static void __mf_unlink_object (__mf_object_t *node);
225 /* ------------------------------------------------------------------------ */
226 /* Configuration engine */
228 static void
229 __mf_set_default_options ()
231 memset (& __mf_opts, 0, sizeof (__mf_opts));
233 __mf_opts.adapt_cache = 1000003;
234 __mf_opts.abbreviate = 1;
235 __mf_opts.verbose_violations = 1;
236 __mf_opts.free_queue_length = 4;
237 __mf_opts.persistent_count = 100;
238 __mf_opts.crumple_zone = 32;
239 __mf_opts.backtrace = 4;
240 __mf_opts.mudflap_mode = mode_check;
241 __mf_opts.violation_mode = viol_nop;
242 __mf_opts.heur_std_data = 1;
243 #ifdef LIBMUDFLAPTH
244 __mf_opts.thread_stack = 0;
245 #endif
248 static struct option
250 char *name;
251 char *description;
252 enum
254 set_option,
255 read_integer_option,
256 } type;
257 int value;
258 int *target;
260 options [] =
262 {"mode-nop",
263 "mudflaps do nothing",
264 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
265 {"mode-populate",
266 "mudflaps populate object tree",
267 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
268 {"mode-check",
269 "mudflaps check for memory violations",
270 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
271 {"mode-violate",
272 "mudflaps always cause violations (diagnostic)",
273 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
275 {"viol-nop",
276 "violations do not change program execution",
277 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
278 {"viol-abort",
279 "violations cause a call to abort()",
280 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
281 {"viol-segv",
282 "violations are promoted to SIGSEGV signals",
283 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
284 {"viol-gdb",
285 "violations fork a gdb process attached to current program",
286 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
287 {"trace-calls",
288 "trace calls to mudflap runtime library",
289 set_option, 1, &__mf_opts.trace_mf_calls},
290 {"verbose-trace",
291 "trace internal events within mudflap runtime library",
292 set_option, 1, &__mf_opts.verbose_trace},
293 {"collect-stats",
294 "collect statistics on mudflap's operation",
295 set_option, 1, &__mf_opts.collect_stats},
296 #ifdef SIGUSR1
297 {"sigusr1-report",
298 "print report upon SIGUSR1",
299 set_option, 1, &__mf_opts.sigusr1_report},
300 #endif
301 {"internal-checking",
302 "perform more expensive internal checking",
303 set_option, 1, &__mf_opts.internal_checking},
304 {"print-leaks",
305 "print any memory leaks at program shutdown",
306 set_option, 1, &__mf_opts.print_leaks},
307 {"check-initialization",
308 "detect uninitialized object reads",
309 set_option, 1, &__mf_opts.check_initialization},
310 {"verbose-violations",
311 "print verbose messages when memory violations occur",
312 set_option, 1, &__mf_opts.verbose_violations},
313 {"abbreviate",
314 "abbreviate repetitive listings",
315 set_option, 1, &__mf_opts.abbreviate},
316 {"wipe-stack",
317 "wipe stack objects at unwind",
318 set_option, 1, &__mf_opts.wipe_stack},
319 {"wipe-heap",
320 "wipe heap objects at free",
321 set_option, 1, &__mf_opts.wipe_heap},
322 {"heur-proc-map",
323 "support /proc/self/map heuristics",
324 set_option, 1, &__mf_opts.heur_proc_map},
325 {"heur-stack-bound",
326 "enable a simple upper stack bound heuristic",
327 set_option, 1, &__mf_opts.heur_stack_bound},
328 {"heur-start-end",
329 "support _start.._end heuristics",
330 set_option, 1, &__mf_opts.heur_start_end},
331 {"heur-stdlib",
332 "register standard library data (argv, errno, stdin, ...)",
333 set_option, 1, &__mf_opts.heur_std_data},
334 {"free-queue-length",
335 "queue N deferred free() calls before performing them",
336 read_integer_option, 0, &__mf_opts.free_queue_length},
337 {"persistent-count",
338 "keep a history of N unregistered regions",
339 read_integer_option, 0, &__mf_opts.persistent_count},
340 {"crumple-zone",
341 "surround allocations with crumple zones of N bytes",
342 read_integer_option, 0, &__mf_opts.crumple_zone},
343 /* XXX: not type-safe.
344 {"lc-mask",
345 "set lookup cache size mask to N (2**M - 1)",
346 read_integer_option, 0, (int *)(&__mf_lc_mask)},
347 {"lc-shift",
348 "set lookup cache pointer shift",
349 read_integer_option, 0, (int *)(&__mf_lc_shift)},
351 {"lc-adapt",
352 "adapt mask/shift parameters after N cache misses",
353 read_integer_option, 1, &__mf_opts.adapt_cache},
354 {"backtrace",
355 "keep an N-level stack trace of each call context",
356 read_integer_option, 0, &__mf_opts.backtrace},
357 #ifdef LIBMUDFLAPTH
358 {"thread-stack",
359 "override thread stacks allocation: N kB",
360 read_integer_option, 0, &__mf_opts.thread_stack},
361 #endif
362 {0, 0, set_option, 0, NULL}
365 static void
366 __mf_usage ()
368 struct option *opt;
370 fprintf (stderr,
371 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
372 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
373 "\n"
374 "The mudflap code can be controlled by an environment variable:\n"
375 "\n"
376 "$ export MUDFLAP_OPTIONS='<options>'\n"
377 "$ <mudflapped_program>\n"
378 "\n"
379 "where <options> is a space-separated list of \n"
380 "any of the following options. Use `-no-OPTION' to disable options.\n"
381 "\n",
382 #if HAVE_PTHREAD_H
383 (threads_active_p ? "multi-threaded " : "single-threaded "),
384 #else
386 #endif
387 #if LIBMUDFLAPTH
388 "thread-aware "
389 #else
390 "thread-unaware "
391 #endif
393 /* XXX: The multi-threaded thread-unaware combination is bad. */
395 for (opt = options; opt->name; opt++)
397 int default_p = (opt->value == * opt->target);
399 switch (opt->type)
401 char buf[128];
402 case set_option:
403 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
404 if (default_p)
405 fprintf (stderr, " [active]\n");
406 else
407 fprintf (stderr, "\n");
408 break;
409 case read_integer_option:
410 strncpy (buf, opt->name, 128);
411 strncpy (buf + strlen (opt->name), "=N", 2);
412 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
413 fprintf (stderr, " [%d]\n", * opt->target);
414 break;
415 default: abort();
419 fprintf (stderr, "\n");
423 int
424 __mf_set_options (const char *optstr)
426 int rc;
427 LOCKTH ();
428 BEGIN_RECURSION_PROTECT ();
429 rc = __mfu_set_options (optstr);
430 /* XXX: It's not really that easy. A change to a bunch of parameters
431 can require updating auxiliary state or risk crashing:
432 free_queue_length, crumple_zone ... */
433 END_RECURSION_PROTECT ();
434 UNLOCKTH ();
435 return rc;
439 int
440 __mfu_set_options (const char *optstr)
442 struct option *opts = 0;
443 char *nxt = 0;
444 long tmp = 0;
445 int rc = 0;
446 const char *saved_optstr = optstr;
448 /* XXX: bounds-check for optstr! */
450 while (*optstr)
452 switch (*optstr) {
453 case ' ':
454 case '\t':
455 case '\n':
456 optstr++;
457 break;
459 case '-':
460 if (*optstr+1)
462 int negate = 0;
463 optstr++;
465 if (*optstr == '?' ||
466 strncmp (optstr, "help", 4) == 0)
468 /* Caller will print help and exit. */
469 return -1;
472 if (strncmp (optstr, "no-", 3) == 0)
474 negate = 1;
475 optstr = & optstr[3];
478 for (opts = options; opts->name; opts++)
480 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
482 optstr += strlen (opts->name);
483 assert (opts->target);
484 switch (opts->type)
486 case set_option:
487 if (negate)
488 *(opts->target) = 0;
489 else
490 *(opts->target) = opts->value;
491 break;
492 case read_integer_option:
493 if (! negate && (*optstr == '=' && *(optstr+1)))
495 optstr++;
496 tmp = strtol (optstr, &nxt, 10);
497 if ((optstr != nxt) && (tmp != LONG_MAX))
499 optstr = nxt;
500 *(opts->target) = (int)tmp;
503 else if (negate)
504 * opts->target = 0;
505 break;
510 break;
512 default:
513 fprintf (stderr,
514 "warning: unrecognized string '%s' in mudflap options\n",
515 optstr);
516 optstr += strlen (optstr);
517 rc = -1;
518 break;
522 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
523 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
524 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
526 /* Clear the lookup cache, in case the parameters got changed. */
527 /* XXX: race */
528 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
529 /* void slot 0 */
530 __mf_lookup_cache[0].low = MAXPTR;
532 TRACE ("set options from `%s'\n", saved_optstr);
534 /* Call this unconditionally, in case -sigusr1-report was toggled. */
535 __mf_sigusr1_respond ();
537 return rc;
541 #ifdef PIC
543 void
544 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
546 char *err;
548 assert (e);
549 if (e->pointer) return;
551 #if HAVE_DLVSYM
552 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
553 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
554 else
555 #endif
556 e->pointer = dlsym (RTLD_NEXT, e->name);
558 err = dlerror ();
560 if (err)
562 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
563 e->name, err);
564 abort ();
566 if (! e->pointer)
568 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
569 abort ();
574 static void
575 __mf_resolve_dynamics ()
577 int i;
578 for (i = 0; i < dyn_INITRESOLVE; i++)
579 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
583 /* NB: order must match enums in mf-impl.h */
584 struct __mf_dynamic_entry __mf_dynamic [] =
586 {NULL, "calloc", NULL},
587 {NULL, "free", NULL},
588 {NULL, "malloc", NULL},
589 {NULL, "mmap", NULL},
590 {NULL, "munmap", NULL},
591 {NULL, "realloc", NULL},
592 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
593 #ifdef LIBMUDFLAPTH
594 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
595 {NULL, "pthread_join", NULL},
596 {NULL, "pthread_exit", NULL}
597 #endif
600 #endif /* PIC */
604 /* ------------------------------------------------------------------------ */
606 /* Lookup & manage automatic initialization of the five or so splay trees. */
607 static splay_tree
608 __mf_object_tree (int type)
610 static splay_tree trees [__MF_TYPE_MAX+1];
611 assert (type >= 0 && type <= __MF_TYPE_MAX);
612 if (UNLIKELY (trees[type] == NULL))
613 trees[type] = splay_tree_new ();
614 return trees[type];
618 void
619 __mf_init ()
621 char *ov = 0;
623 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
624 #ifdef PIC
625 __mf_resolve_dynamics ();
626 #endif
627 __mf_starting_p = 0;
629 __mf_set_default_options ();
631 ov = getenv ("MUDFLAP_OPTIONS");
632 if (ov)
634 int rc = __mfu_set_options (ov);
635 if (rc < 0)
637 __mf_usage ();
638 exit (1);
642 /* Initialize to a non-zero description epoch. */
643 __mf_describe_object (NULL);
645 #define REG_RESERVED(obj) \
646 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
648 REG_RESERVED (__mf_lookup_cache);
649 REG_RESERVED (__mf_lc_mask);
650 REG_RESERVED (__mf_lc_shift);
651 /* XXX: others of our statics? */
653 /* Prevent access to *NULL. */
654 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
655 __mf_lookup_cache[0].low = (uintptr_t) -1;
661 __wrap_main (int argc, char* argv[])
663 extern char **environ;
664 extern int main ();
665 static int been_here = 0;
667 if (__mf_opts.heur_std_data && ! been_here)
669 unsigned i;
671 been_here = 1;
672 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
673 for (i=0; i<argc; i++)
675 unsigned j = strlen (argv[i]);
676 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
679 for (i=0; ; i++)
681 char *e = environ[i];
682 unsigned j;
683 if (e == NULL) break;
684 j = strlen (environ[i]);
685 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
687 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
689 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
691 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
692 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
693 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
695 /* Make some effort to register ctype.h static arrays. */
696 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
697 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
698 with in mf-hooks2.c. */
701 #ifdef PIC
702 return main (argc, argv, environ);
703 #else
704 return __real_main (argc, argv, environ);
705 #endif
710 extern void __mf_fini () DTOR;
711 void __mf_fini ()
713 TRACE ("__mf_fini\n");
714 __mfu_report ();
719 /* ------------------------------------------------------------------------ */
720 /* __mf_check */
722 void __mf_check (void *ptr, size_t sz, int type, const char *location)
724 LOCKTH ();
725 BEGIN_RECURSION_PROTECT ();
726 __mfu_check (ptr, sz, type, location);
727 END_RECURSION_PROTECT ();
728 UNLOCKTH ();
732 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
734 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
735 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
736 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
737 uintptr_t ptr_low = (uintptr_t) ptr;
738 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
739 struct __mf_cache old_entry = *entry;
741 if (UNLIKELY (__mf_opts.sigusr1_report))
742 __mf_sigusr1_respond ();
744 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
745 ptr, entry_idx, (unsigned long)sz,
746 (type == 0 ? "read" : "write"), location);
748 switch (__mf_opts.mudflap_mode)
750 case mode_nop:
751 entry->low = MINPTR;
752 entry->high = MAXPTR;
753 judgement = 1;
754 break;
756 case mode_populate:
757 entry->low = ptr_low;
758 entry->high = ptr_high;
759 judgement = 1;
760 break;
762 case mode_check:
764 unsigned heuristics = 0;
766 /* Advance aging/adaptation counters. */
767 static unsigned adapt_count;
768 adapt_count ++;
769 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
770 adapt_count > __mf_opts.adapt_cache))
772 adapt_count = 0;
773 __mf_adapt_cache ();
776 /* Looping only occurs if heuristics were triggered. */
777 while (judgement == 0)
779 DECLARE (void, free, void *p);
780 __mf_object_t* ovr_obj[1];
781 unsigned obj_count;
782 __mf_object_t** all_ovr_obj = NULL;
783 __mf_object_t** dealloc_me = NULL;
784 unsigned i;
786 /* Find all overlapping objects. Be optimistic that there is just one. */
787 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
788 if (UNLIKELY (obj_count > 1))
790 /* Allocate a real buffer and do the search again. */
791 DECLARE (void *, malloc, size_t c);
792 unsigned n;
793 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
794 obj_count));
795 if (all_ovr_obj == NULL) abort ();
796 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
797 assert (n == obj_count);
798 dealloc_me = all_ovr_obj;
800 else
802 all_ovr_obj = ovr_obj;
803 dealloc_me = NULL;
806 /* Update object statistics. */
807 for (i = 0; i < obj_count; i++)
809 __mf_object_t *obj = all_ovr_obj[i];
810 assert (obj != NULL);
811 if (type == __MF_CHECK_READ)
812 obj->read_count ++;
813 else
814 obj->write_count ++;
815 obj->liveness ++;
818 /* Iterate over the various objects. There are a number of special cases. */
819 for (i = 0; i < obj_count; i++)
821 __mf_object_t *obj = all_ovr_obj[i];
823 /* Any __MF_TYPE_NOACCESS hit is bad. */
824 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
825 judgement = -1;
827 /* Any object with a watch flag is bad. */
828 if (UNLIKELY (obj->watching_p))
829 judgement = -2; /* trigger VIOL_WATCH */
831 /* A read from an uninitialized object is bad. */
832 if (UNLIKELY (__mf_opts.check_initialization
833 /* reading */
834 && type == __MF_CHECK_READ
835 /* not written */
836 && obj->write_count == 0
837 /* uninitialized (heap) */
838 && obj->type == __MF_TYPE_HEAP))
839 judgement = -1;
842 /* We now know that the access spans one or more valid objects. */
843 if (LIKELY (judgement >= 0))
844 for (i = 0; i < obj_count; i++)
846 __mf_object_t *obj = all_ovr_obj[i];
848 /* Is this access entirely contained within this object? */
849 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
851 /* Valid access. */
852 entry->low = obj->low;
853 entry->high = obj->high;
854 judgement = 1;
857 /* XXX: Access runs off left or right side of this
858 object. That could be okay, if there are
859 other objects that fill in all the holes. */
862 if (dealloc_me != NULL)
863 CALL_REAL (free, dealloc_me);
865 /* If the judgment is still unknown at this stage, loop
866 around at most one more time. */
867 if (judgement == 0)
869 if (heuristics++ < 2) /* XXX parametrize this number? */
870 judgement = __mf_heuristic_check (ptr_low, ptr_high);
871 else
872 judgement = -1;
877 break;
879 case mode_violate:
880 judgement = -1;
881 break;
884 if (__mf_opts.collect_stats)
886 __mf_count_check ++;
888 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
889 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
890 __mf_lookup_cache_reusecount [entry_idx] ++;
893 if (UNLIKELY (judgement < 0))
894 __mf_violation (ptr, sz,
895 (uintptr_t) __builtin_return_address (0), location,
896 ((judgement == -1) ?
897 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
898 __MF_VIOL_WATCH));
902 static __mf_object_t *
903 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
904 const char *name, uintptr_t pc)
906 DECLARE (void *, calloc, size_t c, size_t n);
908 __mf_object_t *new_obj;
909 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
910 new_obj->low = low;
911 new_obj->high = high;
912 new_obj->type = type;
913 new_obj->name = name;
914 new_obj->alloc_pc = pc;
915 #if HAVE_GETTIMEOFDAY
916 gettimeofday (& new_obj->alloc_time, NULL);
917 #endif
918 #if LIBMUDFLAPTH
919 new_obj->alloc_thread = pthread_self ();
920 #endif
922 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
923 new_obj->alloc_backtrace_size =
924 __mf_backtrace (& new_obj->alloc_backtrace,
925 (void *) pc, 2);
927 __mf_link_object (new_obj);
928 return new_obj;
932 static void
933 __mf_uncache_object (__mf_object_t *old_obj)
935 /* Remove any low/high pointers for this object from the lookup cache. */
937 /* Can it possibly exist in the cache? */
938 if (LIKELY (old_obj->read_count + old_obj->write_count))
940 uintptr_t low = old_obj->low;
941 uintptr_t high = old_obj->high;
942 unsigned idx_low = __MF_CACHE_INDEX (low);
943 unsigned idx_high = __MF_CACHE_INDEX (high);
944 unsigned i;
945 for (i = idx_low; i <= idx_high; i++)
947 struct __mf_cache *entry = & __mf_lookup_cache [i];
948 /* NB: the "||" in the following test permits this code to
949 tolerate the situation introduced by __mf_check over
950 contiguous objects, where a cache entry spans several
951 objects. */
952 if (entry->low == low || entry->high == high)
954 entry->low = MAXPTR;
955 entry->high = MINPTR;
962 void
963 __mf_register (void *ptr, size_t sz, int type, const char *name)
965 LOCKTH ();
966 BEGIN_RECURSION_PROTECT ();
967 __mfu_register (ptr, sz, type, name);
968 END_RECURSION_PROTECT ();
969 UNLOCKTH ();
973 void
974 __mfu_register (void *ptr, size_t sz, int type, const char *name)
976 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
977 ptr, (unsigned long) sz, type, name ? name : "");
979 if (__mf_opts.collect_stats)
981 __mf_count_register ++;
982 __mf_total_register_size [(type < 0) ? 0 :
983 (type > __MF_TYPE_MAX) ? 0 :
984 type] += sz;
987 if (UNLIKELY (__mf_opts.sigusr1_report))
988 __mf_sigusr1_respond ();
990 switch (__mf_opts.mudflap_mode)
992 case mode_nop:
993 break;
995 case mode_violate:
996 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
997 __MF_VIOL_REGISTER);
998 break;
1000 case mode_populate:
1001 /* Clear the cache. */
1002 /* XXX: why the entire cache? */
1003 /* XXX: race */
1004 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1005 /* void slot 0 */
1006 __mf_lookup_cache[0].low = MAXPTR;
1007 break;
1009 case mode_check:
1011 __mf_object_t *ovr_objs [1];
1012 unsigned num_overlapping_objs;
1013 uintptr_t low = (uintptr_t) ptr;
1014 uintptr_t high = CLAMPSZ (ptr, sz);
1015 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1017 /* Treat unknown size indication as 1. */
1018 if (UNLIKELY (sz == 0)) sz = 1;
1020 /* Look for objects only of the same type. This will e.g. permit a registration
1021 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1022 __mf_check time however harmful overlaps will be detected. */
1023 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1025 /* Handle overlaps. */
1026 if (UNLIKELY (num_overlapping_objs > 0))
1028 __mf_object_t *ovr_obj = ovr_objs[0];
1030 /* Accept certain specific duplication pairs. */
1031 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1032 && ovr_obj->low == low
1033 && ovr_obj->high == high
1034 && ovr_obj->type == type)
1036 /* Duplicate registration for static objects may come
1037 from distinct compilation units. */
1038 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1039 (void *) low, (void *) high,
1040 (ovr_obj->name ? ovr_obj->name : ""));
1041 break;
1044 /* Alas, a genuine violation. */
1045 else
1047 /* Two or more *real* mappings here. */
1048 __mf_violation ((void *) ptr, sz,
1049 (uintptr_t) __builtin_return_address (0), NULL,
1050 __MF_VIOL_REGISTER);
1053 else /* No overlapping objects: AOK. */
1054 __mf_insert_new_object (low, high, type, name, pc);
1056 /* We could conceivably call __mf_check() here to prime the cache,
1057 but then the read_count/write_count field is not reliable. */
1058 break;
1060 } /* end switch (__mf_opts.mudflap_mode) */
1064 void
1065 __mf_unregister (void *ptr, size_t sz, int type)
1067 LOCKTH ();
1068 BEGIN_RECURSION_PROTECT ();
1069 __mfu_unregister (ptr, sz, type);
1070 END_RECURSION_PROTECT ();
1071 UNLOCKTH ();
1075 void
1076 __mfu_unregister (void *ptr, size_t sz, int type)
1078 DECLARE (void, free, void *ptr);
1080 if (UNLIKELY (__mf_opts.sigusr1_report))
1081 __mf_sigusr1_respond ();
1083 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1085 switch (__mf_opts.mudflap_mode)
1087 case mode_nop:
1088 break;
1090 case mode_violate:
1091 __mf_violation (ptr, sz,
1092 (uintptr_t) __builtin_return_address (0), NULL,
1093 __MF_VIOL_UNREGISTER);
1094 break;
1096 case mode_populate:
1097 /* Clear the cache. */
1098 /* XXX: race */
1099 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1100 /* void slot 0 */
1101 __mf_lookup_cache[0].low = MAXPTR;
1102 break;
1104 case mode_check:
1106 __mf_object_t *old_obj = NULL;
1107 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1108 __mf_object_t *objs[1] = {NULL};
1109 unsigned num_overlapping_objs;
1111 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1112 CLAMPSZ (ptr, sz), objs, 1, type);
1114 /* Special case for HEAP_I - see free & realloc hook. They don't
1115 know whether the input region was HEAP or HEAP_I before
1116 unmapping it. Here we give HEAP a try in case HEAP_I
1117 failed. */
1118 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1120 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1121 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1124 old_obj = objs[0];
1125 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1126 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1127 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1129 __mf_violation (ptr, sz,
1130 (uintptr_t) __builtin_return_address (0), NULL,
1131 __MF_VIOL_UNREGISTER);
1132 break;
1135 __mf_unlink_object (old_obj);
1136 __mf_uncache_object (old_obj);
1138 /* Wipe buffer contents if desired. */
1139 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1140 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1141 || old_obj->type == __MF_TYPE_HEAP_I)))
1143 memset ((void *) old_obj->low,
1145 (size_t) (old_obj->high - old_obj->low + 1));
1148 /* Manage the object cemetary. */
1149 if (__mf_opts.persistent_count > 0 &&
1150 old_obj->type >= 0 &&
1151 old_obj->type <= __MF_TYPE_MAX_CEM)
1153 old_obj->deallocated_p = 1;
1154 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1155 #if HAVE_GETTIMEOFDAY
1156 gettimeofday (& old_obj->dealloc_time, NULL);
1157 #endif
1158 #ifdef LIBMUDFLAPTH
1159 old_obj->dealloc_thread = pthread_self ();
1160 #endif
1162 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1163 old_obj->dealloc_backtrace_size =
1164 __mf_backtrace (& old_obj->dealloc_backtrace,
1165 NULL, 2);
1167 /* Encourage this object to be displayed again in current epoch. */
1168 old_obj->description_epoch --;
1170 /* Put this object into the cemetary. This may require this plot to
1171 be recycled, and the previous resident to be designated del_obj. */
1173 unsigned row = old_obj->type;
1174 unsigned plot = __mf_object_dead_head [row];
1176 del_obj = __mf_object_cemetary [row][plot];
1177 __mf_object_cemetary [row][plot] = old_obj;
1178 plot ++;
1179 if (plot == __mf_opts.persistent_count) plot = 0;
1180 __mf_object_dead_head [row] = plot;
1183 else
1184 del_obj = old_obj;
1186 if (__mf_opts.print_leaks)
1188 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1189 (old_obj->type == __MF_TYPE_HEAP
1190 || old_obj->type == __MF_TYPE_HEAP_I))
1192 fprintf (stderr,
1193 "*******\n"
1194 "mudflap warning: unaccessed registered object:\n");
1195 __mf_describe_object (old_obj);
1199 if (del_obj != NULL) /* May or may not equal old_obj. */
1201 if (__mf_opts.backtrace > 0)
1203 CALL_REAL(free, del_obj->alloc_backtrace);
1204 if (__mf_opts.persistent_count > 0)
1206 CALL_REAL(free, del_obj->dealloc_backtrace);
1209 CALL_REAL(free, del_obj);
1212 break;
1214 } /* end switch (__mf_opts.mudflap_mode) */
1217 if (__mf_opts.collect_stats)
1219 __mf_count_unregister ++;
1220 __mf_total_unregister_size += sz;
1226 struct tree_stats
1228 unsigned obj_count;
1229 unsigned long total_size;
1230 unsigned live_obj_count;
1231 double total_weight;
1232 double weighted_size;
1233 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1238 static int
1239 __mf_adapt_cache_fn (splay_tree_node n, void *param)
1241 __mf_object_t *obj = (__mf_object_t *) n->value;
1242 struct tree_stats *s = (struct tree_stats *) param;
1244 assert (obj != NULL && s != NULL);
1246 /* Exclude never-accessed objects. */
1247 if (obj->read_count + obj->write_count)
1249 s->obj_count ++;
1250 s->total_size += (obj->high - obj->low + 1);
1252 if (obj->liveness)
1254 unsigned i;
1255 uintptr_t addr;
1257 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1258 (void *) obj->low, obj->liveness, obj->name); */
1260 s->live_obj_count ++;
1261 s->total_weight += (double) obj->liveness;
1262 s->weighted_size +=
1263 (double) (obj->high - obj->low + 1) *
1264 (double) obj->liveness;
1266 addr = obj->low;
1267 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1269 unsigned bit = addr & 1;
1270 s->weighted_address_bits[i][bit] += obj->liveness;
1271 addr = addr >> 1;
1274 /* Age the liveness value. */
1275 obj->liveness >>= 1;
1279 return 0;
1283 static void
1284 __mf_adapt_cache ()
1286 struct tree_stats s;
1287 uintptr_t new_mask = 0;
1288 unsigned char new_shift;
1289 float cache_utilization;
1290 float max_value;
1291 static float smoothed_new_shift = -1.0;
1292 unsigned i;
1294 memset (&s, 0, sizeof (s));
1296 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1297 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1298 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1299 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1300 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1302 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1303 empty tree. Just leave the cache alone in such cases, rather
1304 than risk dying by division-by-zero. */
1305 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1306 return;
1308 /* Guess a good value for the shift parameter by finding an address bit that is a
1309 good discriminant of lively objects. */
1310 max_value = 0.0;
1311 for (i=0; i<sizeof (uintptr_t)*8; i++)
1313 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1314 if (max_value < value) max_value = value;
1316 for (i=0; i<sizeof (uintptr_t)*8; i++)
1318 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1319 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1320 if (value >= max_value * shoulder_factor)
1321 break;
1323 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1324 /* Converge toward this slowly to reduce flapping. */
1325 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1326 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1327 assert (new_shift < sizeof (uintptr_t)*8);
1329 /* Count number of used buckets. */
1330 cache_utilization = 0.0;
1331 for (i = 0; i < (1 + __mf_lc_mask); i++)
1332 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1333 cache_utilization += 1.0;
1334 cache_utilization /= (1 + __mf_lc_mask);
1336 new_mask |= 0x3ff; /* XXX: force a large cache. */
1337 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1339 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1340 "util=%u%% m=%p s=%u\n",
1341 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1342 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1344 /* We should reinitialize cache if its parameters have changed. */
1345 if (new_mask != __mf_lc_mask ||
1346 new_shift != __mf_lc_shift)
1348 __mf_lc_mask = new_mask;
1349 __mf_lc_shift = new_shift;
1350 /* XXX: race */
1351 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1352 /* void slot 0 */
1353 __mf_lookup_cache[0].low = MAXPTR;
1359 /* __mf_find_object[s] */
1361 /* Find overlapping live objecs between [low,high]. Return up to
1362 max_objs of their pointers in objs[]. Return total count of
1363 overlaps (may exceed max_objs). */
1365 unsigned
1366 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1367 __mf_object_t **objs, unsigned max_objs, int type)
1369 unsigned count = 0;
1370 splay_tree t = __mf_object_tree (type);
1371 splay_tree_key k = (splay_tree_key) ptr_low;
1372 int direction;
1374 splay_tree_node n = splay_tree_lookup (t, k);
1375 /* An exact match for base address implies a hit. */
1376 if (n != NULL)
1378 if (count < max_objs)
1379 objs[count] = (__mf_object_t *) n->value;
1380 count ++;
1383 /* Iterate left then right near this key value to find all overlapping objects. */
1384 for (direction = 0; direction < 2; direction ++)
1386 /* Reset search origin. */
1387 k = (splay_tree_key) ptr_low;
1389 while (1)
1391 __mf_object_t *obj;
1393 n = (direction == 0 ? splay_tree_successor (t, k) : splay_tree_predecessor (t, k));
1394 if (n == NULL) break;
1395 obj = (__mf_object_t *) n->value;
1397 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1398 break;
1400 if (count < max_objs)
1401 objs[count] = (__mf_object_t *) n->value;
1402 count ++;
1404 k = (splay_tree_key) obj->low;
1408 return count;
1412 unsigned
1413 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1414 __mf_object_t **objs, unsigned max_objs)
1416 int type;
1417 unsigned count = 0;
1419 /* Search each splay tree for overlaps. */
1420 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1422 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1423 if (c > max_objs)
1425 max_objs = 0;
1426 objs = NULL;
1428 else /* NB: C may equal 0 */
1430 max_objs -= c;
1431 objs += c;
1433 count += c;
1436 return count;
1441 /* __mf_link_object */
1443 static void
1444 __mf_link_object (__mf_object_t *node)
1446 splay_tree t = __mf_object_tree (node->type);
1447 splay_tree_insert (t, (splay_tree_key) node->low, (splay_tree_value) node);
1450 /* __mf_unlink_object */
1452 static void
1453 __mf_unlink_object (__mf_object_t *node)
1455 splay_tree t = __mf_object_tree (node->type);
1456 splay_tree_remove (t, (splay_tree_key) node->low);
1459 /* __mf_find_dead_objects */
1461 /* Find overlapping dead objecs between [low,high]. Return up to
1462 max_objs of their pointers in objs[]. Return total count of
1463 overlaps (may exceed max_objs). */
1465 static unsigned
1466 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1467 __mf_object_t **objs, unsigned max_objs)
1469 if (__mf_opts.persistent_count > 0)
1471 unsigned count = 0;
1472 unsigned recollection = 0;
1473 unsigned row = 0;
1475 assert (low <= high);
1476 assert (max_objs == 0 || objs != NULL);
1478 /* Widen the search from the most recent plots in each row, looking
1479 backward in time. */
1480 recollection = 0;
1481 while (recollection < __mf_opts.persistent_count)
1483 count = 0;
1485 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1487 unsigned plot;
1488 unsigned i;
1490 plot = __mf_object_dead_head [row];
1491 for (i = 0; i <= recollection; i ++)
1493 __mf_object_t *obj;
1495 /* Look backward through row: it's a circular buffer. */
1496 if (plot > 0) plot --;
1497 else plot = __mf_opts.persistent_count - 1;
1499 obj = __mf_object_cemetary [row][plot];
1500 if (obj && obj->low <= high && obj->high >= low)
1502 /* Found an overlapping dead object! */
1503 if (count < max_objs)
1504 objs [count] = obj;
1505 count ++;
1510 if (count)
1511 break;
1513 /* Look farther back in time. */
1514 recollection = (recollection * 2) + 1;
1517 return count;
1518 } else {
1519 return 0;
1523 /* __mf_describe_object */
1525 static void
1526 __mf_describe_object (__mf_object_t *obj)
1528 static unsigned epoch = 0;
1529 if (obj == NULL)
1531 epoch ++;
1532 return;
1535 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1537 fprintf (stderr,
1538 "mudflap object %p: name=`%s'\n",
1539 (void *) obj, (obj->name ? obj->name : ""));
1540 return;
1542 else
1543 obj->description_epoch = epoch;
1545 fprintf (stderr,
1546 "mudflap object %p: name=`%s'\n"
1547 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1548 "alloc time=%lu.%06lu pc=%p"
1549 #ifdef LIBMUDFLAPTH
1550 " thread=%u"
1551 #endif
1552 "\n",
1553 (void *) obj, (obj->name ? obj->name : ""),
1554 (void *) obj->low, (void *) obj->high,
1555 (unsigned long) (obj->high - obj->low + 1),
1556 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1557 obj->type == __MF_TYPE_HEAP ? "heap" :
1558 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1559 obj->type == __MF_TYPE_STACK ? "stack" :
1560 obj->type == __MF_TYPE_STATIC ? "static" :
1561 obj->type == __MF_TYPE_GUESS ? "guess" :
1562 "unknown"),
1563 obj->read_count, obj->write_count, obj->liveness,
1564 obj->watching_p ? " watching" : "",
1565 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1566 (void *) obj->alloc_pc
1567 #ifdef LIBMUDFLAPTH
1568 , (unsigned) obj->alloc_thread
1569 #endif
1572 if (__mf_opts.backtrace > 0)
1574 unsigned i;
1575 for (i=0; i<obj->alloc_backtrace_size; i++)
1576 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1579 if (__mf_opts.persistent_count > 0)
1581 if (obj->deallocated_p)
1583 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1584 #ifdef LIBMUDFLAPTH
1585 " thread=%u"
1586 #endif
1587 "\n",
1588 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1589 (void *) obj->dealloc_pc
1590 #ifdef LIBMUDFLAPTH
1591 , (unsigned) obj->dealloc_thread
1592 #endif
1596 if (__mf_opts.backtrace > 0)
1598 unsigned i;
1599 for (i=0; i<obj->dealloc_backtrace_size; i++)
1600 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1607 static int
1608 __mf_report_leaks_fn (splay_tree_node n, void *param)
1610 __mf_object_t *node = (__mf_object_t *) n->value;
1611 unsigned *count = (unsigned *) param;
1613 if (count != NULL)
1614 (*count) ++;
1616 fprintf (stderr, "Leaked object %u:\n", (*count));
1617 __mf_describe_object (node);
1619 return 0;
1623 static unsigned
1624 __mf_report_leaks ()
1626 unsigned count = 0;
1628 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1629 __mf_report_leaks_fn, & count);
1630 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1631 __mf_report_leaks_fn, & count);
1633 return count;
1636 /* ------------------------------------------------------------------------ */
1637 /* __mf_report */
1639 void
1640 __mf_report ()
1642 LOCKTH ();
1643 BEGIN_RECURSION_PROTECT ();
1644 __mfu_report ();
1645 END_RECURSION_PROTECT ();
1646 UNLOCKTH ();
1649 void
1650 __mfu_report ()
1652 if (__mf_opts.collect_stats)
1654 fprintf (stderr,
1655 "*******\n"
1656 "mudflap stats:\n"
1657 "calls to __mf_check: %lu\n"
1658 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1659 " __mf_unregister: %lu [%luB]\n"
1660 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1661 __mf_count_check,
1662 __mf_count_register,
1663 __mf_total_register_size[0], __mf_total_register_size[1],
1664 __mf_total_register_size[2], __mf_total_register_size[3],
1665 __mf_total_register_size[4], /* XXX */
1666 __mf_count_unregister, __mf_total_unregister_size,
1667 __mf_count_violation[0], __mf_count_violation[1],
1668 __mf_count_violation[2], __mf_count_violation[3],
1669 __mf_count_violation[4]);
1671 fprintf (stderr,
1672 "calls with reentrancy: %lu\n", __mf_reentrancy);
1673 #ifdef LIBMUDFLAPTH
1674 fprintf (stderr,
1675 " lock contention: %lu\n", __mf_lock_contention);
1676 #endif
1678 /* Lookup cache stats. */
1680 unsigned i;
1681 unsigned max_reuse = 0;
1682 unsigned num_used = 0;
1683 unsigned num_unused = 0;
1685 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1687 if (__mf_lookup_cache_reusecount[i])
1688 num_used ++;
1689 else
1690 num_unused ++;
1691 if (max_reuse < __mf_lookup_cache_reusecount[i])
1692 max_reuse = __mf_lookup_cache_reusecount[i];
1694 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1695 num_used, num_unused, max_reuse);
1699 unsigned live_count;
1700 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1701 fprintf (stderr, "number of live objects: %u\n", live_count);
1704 if (__mf_opts.persistent_count > 0)
1706 unsigned dead_count = 0;
1707 unsigned row, plot;
1708 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1709 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1710 if (__mf_object_cemetary [row][plot] != 0)
1711 dead_count ++;
1712 fprintf (stderr, " zombie objects: %u\n", dead_count);
1715 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1717 unsigned l;
1718 extern void * __mf_wrap_alloca_indirect (size_t c);
1720 /* Free up any remaining alloca()'d blocks. */
1721 __mf_wrap_alloca_indirect (0);
1722 __mf_describe_object (NULL); /* Reset description epoch. */
1723 l = __mf_report_leaks ();
1724 fprintf (stderr, "number of leaked objects: %u\n", l);
1728 /* __mf_backtrace */
1730 size_t
1731 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1733 void ** pc_array;
1734 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1735 unsigned remaining_size;
1736 unsigned omitted_size = 0;
1737 unsigned i;
1738 DECLARE (void, free, void *ptr);
1739 DECLARE (void *, calloc, size_t c, size_t n);
1740 DECLARE (void *, malloc, size_t n);
1742 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1743 #ifdef HAVE_BACKTRACE
1744 pc_array_size = backtrace (pc_array, pc_array_size);
1745 #else
1746 #define FETCH(n) do { if (pc_array_size >= n) { \
1747 pc_array[n] = __builtin_return_address(n); \
1748 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1750 /* Unroll some calls __builtin_return_address because this function
1751 only takes a literal integer parameter. */
1752 FETCH (0);
1753 #if 0
1754 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1755 rather than simply returning 0. :-( */
1756 FETCH (1);
1757 FETCH (2);
1758 FETCH (3);
1759 FETCH (4);
1760 FETCH (5);
1761 FETCH (6);
1762 FETCH (7);
1763 FETCH (8);
1764 if (pc_array_size > 8) pc_array_size = 9;
1765 #else
1766 if (pc_array_size > 0) pc_array_size = 1;
1767 #endif
1769 #undef FETCH
1770 #endif
1772 /* We want to trim the first few levels of the stack traceback,
1773 since they contain libmudflap wrappers and junk. If pc_array[]
1774 ends up containing a non-NULL guess_pc, then trim everything
1775 before that. Otherwise, omit the first guess_omit_levels
1776 entries. */
1778 if (guess_pc != NULL)
1779 for (i=0; i<pc_array_size; i++)
1780 if (pc_array [i] == guess_pc)
1781 omitted_size = i;
1783 if (omitted_size == 0) /* No match? */
1784 if (pc_array_size > guess_omit_levels)
1785 omitted_size = guess_omit_levels;
1787 remaining_size = pc_array_size - omitted_size;
1789 #ifdef HAVE_BACKTRACE_SYMBOLS
1790 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1791 #else
1793 /* Let's construct a buffer by hand. It will have <remaining_size>
1794 char*'s at the front, pointing at individual strings immediately
1795 afterwards. */
1796 void *buffer;
1797 char *chars;
1798 char **pointers;
1799 enum { perline = 30 };
1800 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1801 pointers = (char **) buffer;
1802 chars = (char *)buffer + (remaining_size * sizeof (char *));
1803 for (i = 0; i < remaining_size; i++)
1805 pointers[i] = chars;
1806 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1807 chars = chars + perline;
1809 *symbols = pointers;
1811 #endif
1812 CALL_REAL (free, pc_array);
1814 return remaining_size;
1817 /* ------------------------------------------------------------------------ */
1818 /* __mf_violation */
1820 void
1821 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1822 const char *location, int type)
1824 char buf [128];
1825 static unsigned violation_number;
1826 DECLARE(void, free, void *ptr);
1828 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1829 (void *) pc,
1830 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1832 if (__mf_opts.collect_stats)
1833 __mf_count_violation [(type < 0) ? 0 :
1834 (type > __MF_VIOL_WATCH) ? 0 :
1835 type] ++;
1837 /* Print out a basic warning message. */
1838 if (__mf_opts.verbose_violations)
1840 unsigned dead_p;
1841 unsigned num_helpful = 0;
1842 struct timeval now;
1843 #if HAVE_GETTIMEOFDAY
1844 gettimeofday (& now, NULL);
1845 #endif
1847 violation_number ++;
1848 fprintf (stderr,
1849 "*******\n"
1850 "mudflap violation %u (%s): time=%lu.%06lu "
1851 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1852 violation_number,
1853 ((type == __MF_VIOL_READ) ? "check/read" :
1854 (type == __MF_VIOL_WRITE) ? "check/write" :
1855 (type == __MF_VIOL_REGISTER) ? "register" :
1856 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1857 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1858 now.tv_sec, now.tv_usec,
1859 (void *) ptr, (unsigned long)sz, (void *) pc,
1860 (location != NULL ? " location=`" : ""),
1861 (location != NULL ? location : ""),
1862 (location != NULL ? "'" : ""));
1864 if (__mf_opts.backtrace > 0)
1866 char ** symbols;
1867 unsigned i, num;
1869 num = __mf_backtrace (& symbols, (void *) pc, 2);
1870 /* Note: backtrace_symbols calls malloc(). But since we're in
1871 __mf_violation and presumably __mf_check, it'll detect
1872 recursion, and not put the new string into the database. */
1874 for (i=0; i<num; i++)
1875 fprintf (stderr, " %s\n", symbols[i]);
1877 /* Calling free() here would trigger a violation. */
1878 CALL_REAL(free, symbols);
1882 /* Look for nearby objects. For this, we start with s_low/s_high
1883 pointing to the given area, looking for overlapping objects.
1884 If none show up, widen the search area and keep looking. */
1886 if (sz == 0) sz = 1;
1888 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1890 enum {max_objs = 3}; /* magic */
1891 __mf_object_t *objs[max_objs];
1892 unsigned num_objs = 0;
1893 uintptr_t s_low, s_high;
1894 unsigned tries = 0;
1895 unsigned i;
1897 s_low = (uintptr_t) ptr;
1898 s_high = CLAMPSZ (ptr, sz);
1900 while (tries < 16) /* magic */
1902 if (dead_p)
1903 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1904 else
1905 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1907 if (num_objs) /* good enough */
1908 break;
1910 tries ++;
1912 /* XXX: tune this search strategy. It's too dependent on
1913 sz, which can vary from 1 to very big (when array index
1914 checking) numbers. */
1915 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1916 s_high = CLAMPADD (s_high, (sz * tries * tries));
1919 for (i = 0; i < min (num_objs, max_objs); i++)
1921 __mf_object_t *obj = objs[i];
1922 uintptr_t low = (uintptr_t) ptr;
1923 uintptr_t high = CLAMPSZ (ptr, sz);
1924 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1925 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1926 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
1927 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
1928 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
1929 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
1931 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1932 num_helpful + i + 1,
1933 (before1 ? before1 : after1 ? after1 : into1),
1934 (before1 ? "before" : after1 ? "after" : "into"),
1935 (before2 ? before2 : after2 ? after2 : into2),
1936 (before2 ? "before" : after2 ? "after" : "into"));
1937 __mf_describe_object (obj);
1939 num_helpful += num_objs;
1942 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
1945 /* How to finally handle this violation? */
1946 switch (__mf_opts.violation_mode)
1948 case viol_nop:
1949 break;
1950 case viol_segv:
1951 kill (getpid(), SIGSEGV);
1952 break;
1953 case viol_abort:
1954 abort ();
1955 break;
1956 case viol_gdb:
1958 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
1959 system (buf);
1960 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1961 instead, and let the forked child execlp() gdb. That way, this
1962 subject process can be resumed under the supervision of gdb.
1963 This can't happen now, since system() only returns when gdb
1964 dies. In that case, we need to beware of starting a second
1965 concurrent gdb child upon the next violation. (But if the first
1966 gdb dies, then starting a new one is appropriate.) */
1967 break;
1971 /* ------------------------------------------------------------------------ */
1974 unsigned __mf_watch (void *ptr, size_t sz)
1976 unsigned rc;
1977 LOCKTH ();
1978 BEGIN_RECURSION_PROTECT ();
1979 rc = __mf_watch_or_not (ptr, sz, 1);
1980 END_RECURSION_PROTECT ();
1981 UNLOCKTH ();
1982 return rc;
1985 unsigned __mf_unwatch (void *ptr, size_t sz)
1987 unsigned rc;
1988 LOCKTH ();
1989 rc = __mf_watch_or_not (ptr, sz, 0);
1990 UNLOCKTH ();
1991 return rc;
1995 static unsigned
1996 __mf_watch_or_not (void *ptr, size_t sz, char flag)
1998 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
1999 uintptr_t ptr_low = (uintptr_t) ptr;
2000 unsigned count = 0;
2002 TRACE ("%s ptr=%p size=%lu\n",
2003 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2005 switch (__mf_opts.mudflap_mode)
2007 case mode_nop:
2008 case mode_populate:
2009 case mode_violate:
2010 count = 0;
2011 break;
2013 case mode_check:
2015 __mf_object_t **all_ovr_objs;
2016 unsigned obj_count;
2017 unsigned n;
2018 DECLARE (void *, malloc, size_t c);
2019 DECLARE (void, free, void *p);
2021 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2022 VERBOSE_TRACE (" %u:", obj_count);
2024 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2025 if (all_ovr_objs == NULL) abort ();
2026 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2027 assert (n == obj_count);
2029 for (n = 0; n < obj_count; n ++)
2031 __mf_object_t *obj = all_ovr_objs[n];
2033 VERBOSE_TRACE (" [%p]", (void *) obj);
2034 if (obj->watching_p != flag)
2036 obj->watching_p = flag;
2037 count ++;
2039 /* Remove object from cache, to ensure next access
2040 goes through __mf_check(). */
2041 if (flag)
2042 __mf_uncache_object (obj);
2045 CALL_REAL (free, all_ovr_objs);
2047 break;
2050 return count;
2054 void
2055 __mf_sigusr1_handler (int num)
2057 __mf_sigusr1_received ++;
2060 /* Install or remove SIGUSR1 handler as necessary.
2061 Also, respond to a received pending SIGUSR1. */
2062 void
2063 __mf_sigusr1_respond ()
2065 static int handler_installed;
2067 #ifdef SIGUSR1
2068 /* Manage handler */
2069 if (__mf_opts.sigusr1_report && ! handler_installed)
2071 signal (SIGUSR1, __mf_sigusr1_handler);
2072 handler_installed = 1;
2074 else if(! __mf_opts.sigusr1_report && handler_installed)
2076 signal (SIGUSR1, SIG_DFL);
2077 handler_installed = 0;
2079 #endif
2081 /* Manage enqueued signals */
2082 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2084 __mf_sigusr1_handled ++;
2085 assert (__mf_state == reentrant);
2086 __mfu_report ();
2087 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2092 /* XXX: provide an alternative __assert_fail function that cannot
2093 fail due to libmudflap infinite recursion. */
2094 #ifndef NDEBUG
2096 static void
2097 write_itoa (int fd, unsigned n)
2099 enum x { bufsize = sizeof(n)*4 };
2100 char buf [bufsize];
2101 unsigned i;
2103 for (i=0; i<bufsize-1; i++)
2105 unsigned digit = n % 10;
2106 buf[bufsize-2-i] = digit + '0';
2107 n /= 10;
2108 if (n == 0)
2110 char *m = & buf [bufsize-2-i];
2111 buf[bufsize-1] = '\0';
2112 write (fd, m, strlen(m));
2113 break;
2119 void
2120 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2122 #define write2(string) write (2, (string), strlen ((string)));
2123 write2("mf");
2124 #ifdef LIBMUDFLAPTH
2125 write2("(");
2126 write_itoa (2, (unsigned) pthread_self ());
2127 write2(")");
2128 #endif
2129 write2(": assertion failure: `");
2130 write (2, msg, strlen (msg));
2131 write2("' in ");
2132 write (2, func, strlen (func));
2133 write2(" at ");
2134 write (2, file, strlen (file));
2135 write2(":");
2136 write_itoa (2, line);
2137 write2("\n");
2138 #undef write2
2139 abort ();
2143 #endif
2149 /* #include the generic splay tree implementation from libiberty here, to
2150 ensure that it uses our memory allocation primitives. */
2152 static void
2153 splay_tree_free (void *p)
2155 DECLARE (void, free, void *p);
2156 CALL_REAL (free, p);
2159 static void *
2160 splay_tree_xmalloc (size_t s)
2162 DECLARE (void *, malloc, size_t s);
2163 return CALL_REAL (malloc, s);
2166 #define free(z) splay_tree_free(z)
2167 #define xmalloc(z) splay_tree_xmalloc(z)
2168 #include "splay-tree.c"