2008-05-01 Richard Guenther <rguenther@suse.de>
[official-gcc.git] / libmudflap / mf-runtime.c
blob79fdb323dbed334eca9cd6e11f747ee9425e8b21
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008
3 Free Software Foundation, Inc.
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
14 version.
16 In addition to the permissions in the GNU General Public License, the
17 Free Software Foundation gives you unlimited permission to link the
18 compiled version of this file into combinations with other programs,
19 and to distribute those combinations without any restriction coming
20 from the use of this file. (The General Public License restrictions
21 do apply in other respects; for example, they cover modification of
22 the file, and distribution when not linked into a combine
23 executable.)
25 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
26 WARRANTY; without even the implied warranty of MERCHANTABILITY or
27 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
28 for more details.
30 You should have received a copy of the GNU General Public License
31 along with GCC; see the file COPYING. If not, write to the Free
32 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
33 02110-1301, USA. */
35 #include "config.h"
37 /* These attempt to coax various unix flavours to declare all our
38 needed tidbits in the system headers. */
39 #if !defined(__FreeBSD__) && !defined(__APPLE__)
40 #define _POSIX_SOURCE
41 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define _GNU_SOURCE
43 #define _XOPEN_SOURCE
44 #define _BSD_TYPES
45 #define __EXTENSIONS__
46 #define _ALL_SOURCE
47 #define _LARGE_FILE_API
48 #define _XOPEN_SOURCE_EXTENDED 1
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <sys/types.h>
53 #include <sys/time.h>
54 #include <time.h>
55 #include <unistd.h>
56 #ifdef HAVE_EXECINFO_H
57 #include <execinfo.h>
58 #endif
59 #ifdef HAVE_SIGNAL_H
60 #include <signal.h>
61 #endif
62 #include <assert.h>
64 #include <string.h>
65 #include <limits.h>
66 #include <sys/types.h>
67 #include <signal.h>
68 #include <errno.h>
69 #include <ctype.h>
71 #include "mf-runtime.h"
72 #include "mf-impl.h"
75 /* ------------------------------------------------------------------------ */
76 /* Splay-tree implementation. */
78 typedef uintptr_t mfsplay_tree_key;
79 typedef void *mfsplay_tree_value;
81 /* Forward declaration for a node in the tree. */
82 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
84 /* The type of a function used to iterate over the tree. */
85 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
87 /* The nodes in the splay tree. */
88 struct mfsplay_tree_node_s
90 /* Data. */
91 mfsplay_tree_key key;
92 mfsplay_tree_value value;
93 /* Children. */
94 mfsplay_tree_node left;
95 mfsplay_tree_node right;
96 /* XXX: The addition of a parent pointer may eliminate some recursion. */
99 /* The splay tree itself. */
100 struct mfsplay_tree_s
102 /* The root of the tree. */
103 mfsplay_tree_node root;
105 /* The last key value for which the tree has been splayed, but not
106 since modified. */
107 mfsplay_tree_key last_splayed_key;
108 int last_splayed_key_p;
110 /* Statistics. */
111 unsigned num_keys;
113 /* Traversal recursion control flags. */
114 unsigned max_depth;
115 unsigned depth;
116 unsigned rebalance_p;
118 typedef struct mfsplay_tree_s *mfsplay_tree;
120 static mfsplay_tree mfsplay_tree_new (void);
121 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
122 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
125 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
126 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
127 static void mfsplay_tree_rebalance (mfsplay_tree sp);
129 /* ------------------------------------------------------------------------ */
130 /* Utility macros */
132 #define CTOR __attribute__ ((constructor))
133 #define DTOR __attribute__ ((destructor))
136 /* Codes to describe the context in which a violation occurs. */
137 #define __MF_VIOL_UNKNOWN 0
138 #define __MF_VIOL_READ 1
139 #define __MF_VIOL_WRITE 2
140 #define __MF_VIOL_REGISTER 3
141 #define __MF_VIOL_UNREGISTER 4
142 #define __MF_VIOL_WATCH 5
144 /* Protect against recursive calls. */
146 static void
147 begin_recursion_protect1 (const char *pf)
149 if (__mf_get_state () == reentrant)
151 write (2, "mf: erroneous reentrancy detected in `", 38);
152 write (2, pf, strlen(pf));
153 write (2, "'\n", 2); \
154 abort ();
156 __mf_set_state (reentrant);
159 #define BEGIN_RECURSION_PROTECT() \
160 begin_recursion_protect1 (__PRETTY_FUNCTION__)
162 #define END_RECURSION_PROTECT() \
163 __mf_set_state (active)
165 /* ------------------------------------------------------------------------ */
166 /* Required globals. */
168 #define LOOKUP_CACHE_MASK_DFL 1023
169 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
170 #define LOOKUP_CACHE_SHIFT_DFL 2
172 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
173 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
174 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
175 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
177 struct __mf_options __mf_opts;
178 int __mf_starting_p = 1;
180 #ifdef LIBMUDFLAPTH
181 #ifdef HAVE_TLS
182 __thread enum __mf_state_enum __mf_state_1 = reentrant;
183 #endif
184 #else
185 enum __mf_state_enum __mf_state_1 = reentrant;
186 #endif
188 #ifdef LIBMUDFLAPTH
189 pthread_mutex_t __mf_biglock =
190 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
191 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
192 #else
193 PTHREAD_MUTEX_INITIALIZER;
194 #endif
195 #endif
197 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
198 the libmudflap.la (no threading support) can diagnose whether
199 the application is linked with -lpthread. See __mf_usage() below. */
200 #if HAVE_PTHREAD_H
201 #ifdef _POSIX_THREADS
202 #pragma weak pthread_join
203 #else
204 #define pthread_join NULL
205 #endif
206 #endif
209 /* ------------------------------------------------------------------------ */
210 /* stats-related globals. */
212 static unsigned long __mf_count_check;
213 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
214 static unsigned long __mf_count_register;
215 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
216 static unsigned long __mf_count_unregister;
217 static unsigned long __mf_total_unregister_size;
218 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
219 static unsigned long __mf_sigusr1_received;
220 static unsigned long __mf_sigusr1_handled;
221 /* not static */ unsigned long __mf_reentrancy;
222 #ifdef LIBMUDFLAPTH
223 /* not static */ unsigned long __mf_lock_contention;
224 #endif
227 /* ------------------------------------------------------------------------ */
228 /* mode-check-related globals. */
230 typedef struct __mf_object
232 uintptr_t low, high; /* __mf_register parameters */
233 const char *name;
234 char type; /* __MF_TYPE_something */
235 char watching_p; /* Trigger a VIOL_WATCH on access? */
236 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
237 unsigned write_count; /* Likewise for __mf_check/write. */
238 unsigned liveness; /* A measure of recent checking activity. */
239 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
241 uintptr_t alloc_pc;
242 struct timeval alloc_time;
243 char **alloc_backtrace;
244 size_t alloc_backtrace_size;
245 #ifdef LIBMUDFLAPTH
246 pthread_t alloc_thread;
247 #endif
249 int deallocated_p;
250 uintptr_t dealloc_pc;
251 struct timeval dealloc_time;
252 char **dealloc_backtrace;
253 size_t dealloc_backtrace_size;
254 #ifdef LIBMUDFLAPTH
255 pthread_t dealloc_thread;
256 #endif
257 } __mf_object_t;
259 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
260 /* Actually stored as static vars within lookup function below. */
262 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
263 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
264 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
267 /* ------------------------------------------------------------------------ */
268 /* Forward function declarations */
270 void __mf_init () CTOR;
271 static void __mf_sigusr1_respond ();
272 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
273 __mf_object_t **objs, unsigned max_objs);
274 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
275 __mf_object_t **objs, unsigned max_objs, int type);
276 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
277 __mf_object_t **objs, unsigned max_objs);
278 static void __mf_adapt_cache ();
279 static void __mf_describe_object (__mf_object_t *obj);
280 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
281 static mfsplay_tree __mf_object_tree (int type);
282 static void __mf_link_object (__mf_object_t *node);
283 static void __mf_unlink_object (__mf_object_t *node);
286 /* ------------------------------------------------------------------------ */
287 /* Configuration engine */
289 static void
290 __mf_set_default_options ()
292 memset (& __mf_opts, 0, sizeof (__mf_opts));
294 __mf_opts.adapt_cache = 1000003;
295 __mf_opts.abbreviate = 1;
296 __mf_opts.verbose_violations = 1;
297 __mf_opts.free_queue_length = 4;
298 __mf_opts.persistent_count = 100;
299 __mf_opts.crumple_zone = 32;
300 __mf_opts.backtrace = 4;
301 __mf_opts.timestamps = 1;
302 __mf_opts.mudflap_mode = mode_check;
303 __mf_opts.violation_mode = viol_nop;
304 #ifdef HAVE___LIBC_FREERES
305 __mf_opts.call_libc_freeres = 1;
306 #endif
307 __mf_opts.heur_std_data = 1;
308 #ifdef LIBMUDFLAPTH
309 __mf_opts.thread_stack = 0;
310 #endif
313 static struct mudoption
315 char *name;
316 char *description;
317 enum
319 set_option,
320 read_integer_option,
321 } type;
322 unsigned value;
323 unsigned *target;
325 options [] =
327 {"mode-nop",
328 "mudflaps do nothing",
329 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
330 {"mode-populate",
331 "mudflaps populate object tree",
332 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
333 {"mode-check",
334 "mudflaps check for memory violations",
335 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
336 {"mode-violate",
337 "mudflaps always cause violations (diagnostic)",
338 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
340 {"viol-nop",
341 "violations do not change program execution",
342 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
343 {"viol-abort",
344 "violations cause a call to abort()",
345 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
346 {"viol-segv",
347 "violations are promoted to SIGSEGV signals",
348 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
349 {"viol-gdb",
350 "violations fork a gdb process attached to current program",
351 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
352 {"trace-calls",
353 "trace calls to mudflap runtime library",
354 set_option, 1, &__mf_opts.trace_mf_calls},
355 {"verbose-trace",
356 "trace internal events within mudflap runtime library",
357 set_option, 1, &__mf_opts.verbose_trace},
358 {"collect-stats",
359 "collect statistics on mudflap's operation",
360 set_option, 1, &__mf_opts.collect_stats},
361 #ifdef SIGUSR1
362 {"sigusr1-report",
363 "print report upon SIGUSR1",
364 set_option, 1, &__mf_opts.sigusr1_report},
365 #endif
366 {"internal-checking",
367 "perform more expensive internal checking",
368 set_option, 1, &__mf_opts.internal_checking},
369 {"print-leaks",
370 "print any memory leaks at program shutdown",
371 set_option, 1, &__mf_opts.print_leaks},
372 #ifdef HAVE___LIBC_FREERES
373 {"libc-freeres",
374 "call glibc __libc_freeres at shutdown for better leak data",
375 set_option, 1, &__mf_opts.call_libc_freeres},
376 #endif
377 {"check-initialization",
378 "detect uninitialized object reads",
379 set_option, 1, &__mf_opts.check_initialization},
380 {"verbose-violations",
381 "print verbose messages when memory violations occur",
382 set_option, 1, &__mf_opts.verbose_violations},
383 {"abbreviate",
384 "abbreviate repetitive listings",
385 set_option, 1, &__mf_opts.abbreviate},
386 {"timestamps",
387 "track object lifetime timestamps",
388 set_option, 1, &__mf_opts.timestamps},
389 {"ignore-reads",
390 "ignore read accesses - assume okay",
391 set_option, 1, &__mf_opts.ignore_reads},
392 {"wipe-stack",
393 "wipe stack objects at unwind",
394 set_option, 1, &__mf_opts.wipe_stack},
395 {"wipe-heap",
396 "wipe heap objects at free",
397 set_option, 1, &__mf_opts.wipe_heap},
398 {"heur-proc-map",
399 "support /proc/self/map heuristics",
400 set_option, 1, &__mf_opts.heur_proc_map},
401 {"heur-stack-bound",
402 "enable a simple upper stack bound heuristic",
403 set_option, 1, &__mf_opts.heur_stack_bound},
404 {"heur-start-end",
405 "support _start.._end heuristics",
406 set_option, 1, &__mf_opts.heur_start_end},
407 {"heur-stdlib",
408 "register standard library data (argv, errno, stdin, ...)",
409 set_option, 1, &__mf_opts.heur_std_data},
410 {"free-queue-length",
411 "queue N deferred free() calls before performing them",
412 read_integer_option, 0, &__mf_opts.free_queue_length},
413 {"persistent-count",
414 "keep a history of N unregistered regions",
415 read_integer_option, 0, &__mf_opts.persistent_count},
416 {"crumple-zone",
417 "surround allocations with crumple zones of N bytes",
418 read_integer_option, 0, &__mf_opts.crumple_zone},
419 /* XXX: not type-safe.
420 {"lc-mask",
421 "set lookup cache size mask to N (2**M - 1)",
422 read_integer_option, 0, (int *)(&__mf_lc_mask)},
423 {"lc-shift",
424 "set lookup cache pointer shift",
425 read_integer_option, 0, (int *)(&__mf_lc_shift)},
427 {"lc-adapt",
428 "adapt mask/shift parameters after N cache misses",
429 read_integer_option, 1, &__mf_opts.adapt_cache},
430 {"backtrace",
431 "keep an N-level stack trace of each call context",
432 read_integer_option, 0, &__mf_opts.backtrace},
433 #ifdef LIBMUDFLAPTH
434 {"thread-stack",
435 "override thread stacks allocation: N kB",
436 read_integer_option, 0, &__mf_opts.thread_stack},
437 #endif
438 {0, 0, set_option, 0, NULL}
441 static void
442 __mf_usage ()
444 struct mudoption *opt;
446 fprintf (stderr,
447 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
448 "Mudflap is Copyright (C) 2002-2008 Free Software Foundation, Inc.\n"
449 "\n"
450 "The mudflap code can be controlled by an environment variable:\n"
451 "\n"
452 "$ export MUDFLAP_OPTIONS='<options>'\n"
453 "$ <mudflapped_program>\n"
454 "\n"
455 "where <options> is a space-separated list of \n"
456 "any of the following options. Use `-no-OPTION' to disable options.\n"
457 "\n",
458 #if HAVE_PTHREAD_H
459 (pthread_join ? "multi-threaded " : "single-threaded "),
460 #else
462 #endif
463 #if LIBMUDFLAPTH
464 "thread-aware "
465 #else
466 "thread-unaware "
467 #endif
469 /* XXX: The multi-threaded thread-unaware combination is bad. */
471 for (opt = options; opt->name; opt++)
473 int default_p = (opt->value == * opt->target);
475 switch (opt->type)
477 char buf[128];
478 case set_option:
479 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
480 if (default_p)
481 fprintf (stderr, " [active]\n");
482 else
483 fprintf (stderr, "\n");
484 break;
485 case read_integer_option:
486 strncpy (buf, opt->name, 128);
487 strncpy (buf + strlen (opt->name), "=N", 2);
488 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
489 fprintf (stderr, " [%d]\n", * opt->target);
490 break;
491 default: abort();
495 fprintf (stderr, "\n");
500 __mf_set_options (const char *optstr)
502 int rc;
503 LOCKTH ();
504 BEGIN_RECURSION_PROTECT ();
505 rc = __mfu_set_options (optstr);
506 /* XXX: It's not really that easy. A change to a bunch of parameters
507 can require updating auxiliary state or risk crashing:
508 free_queue_length, crumple_zone ... */
509 END_RECURSION_PROTECT ();
510 UNLOCKTH ();
511 return rc;
516 __mfu_set_options (const char *optstr)
518 struct mudoption *opts = 0;
519 char *nxt = 0;
520 long tmp = 0;
521 int rc = 0;
522 const char *saved_optstr = optstr;
524 /* XXX: bounds-check for optstr! */
526 while (*optstr)
528 switch (*optstr) {
529 case ' ':
530 case '\t':
531 case '\n':
532 optstr++;
533 break;
535 case '-':
536 if (*optstr+1)
538 int negate = 0;
539 optstr++;
541 if (*optstr == '?' ||
542 strncmp (optstr, "help", 4) == 0)
544 /* Caller will print help and exit. */
545 return -1;
548 if (strncmp (optstr, "no-", 3) == 0)
550 negate = 1;
551 optstr = & optstr[3];
554 for (opts = options; opts->name; opts++)
556 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
558 optstr += strlen (opts->name);
559 assert (opts->target);
560 switch (opts->type)
562 case set_option:
563 if (negate)
564 *(opts->target) = 0;
565 else
566 *(opts->target) = opts->value;
567 break;
568 case read_integer_option:
569 if (! negate && (*optstr == '=' && *(optstr+1)))
571 optstr++;
572 tmp = strtol (optstr, &nxt, 10);
573 if ((optstr != nxt) && (tmp != LONG_MAX))
575 optstr = nxt;
576 *(opts->target) = (int)tmp;
579 else if (negate)
580 * opts->target = 0;
581 break;
586 break;
588 default:
589 fprintf (stderr,
590 "warning: unrecognized string '%s' in mudflap options\n",
591 optstr);
592 optstr += strlen (optstr);
593 rc = -1;
594 break;
598 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
599 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
600 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
602 /* Clear the lookup cache, in case the parameters got changed. */
603 /* XXX: race */
604 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
605 /* void slot 0 */
606 __mf_lookup_cache[0].low = MAXPTR;
608 TRACE ("set options from `%s'\n", saved_optstr);
610 /* Call this unconditionally, in case -sigusr1-report was toggled. */
611 __mf_sigusr1_respond ();
613 return rc;
617 #ifdef PIC
619 void
620 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
622 char *err;
624 assert (e);
625 if (e->pointer) return;
627 #if HAVE_DLVSYM
628 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
629 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
630 else
631 #endif
632 e->pointer = dlsym (RTLD_NEXT, e->name);
634 err = dlerror ();
636 if (err)
638 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
639 e->name, err);
640 abort ();
642 if (! e->pointer)
644 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
645 abort ();
650 static void
651 __mf_resolve_dynamics ()
653 int i;
654 for (i = 0; i < dyn_INITRESOLVE; i++)
655 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
659 /* NB: order must match enums in mf-impl.h */
660 struct __mf_dynamic_entry __mf_dynamic [] =
662 {NULL, "calloc", NULL},
663 {NULL, "free", NULL},
664 {NULL, "malloc", NULL},
665 {NULL, "mmap", NULL},
666 {NULL, "munmap", NULL},
667 {NULL, "realloc", NULL},
668 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
669 #ifdef LIBMUDFLAPTH
670 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
671 {NULL, "pthread_join", NULL},
672 {NULL, "pthread_exit", NULL}
673 #endif
676 #endif /* PIC */
680 /* ------------------------------------------------------------------------ */
682 /* Lookup & manage automatic initialization of the five or so splay trees. */
683 static mfsplay_tree
684 __mf_object_tree (int type)
686 static mfsplay_tree trees [__MF_TYPE_MAX+1];
687 assert (type >= 0 && type <= __MF_TYPE_MAX);
688 if (UNLIKELY (trees[type] == NULL))
689 trees[type] = mfsplay_tree_new ();
690 return trees[type];
694 /* not static */void
695 __mf_init ()
697 char *ov = 0;
699 /* Return if initialization has already been done. */
700 if (LIKELY (__mf_starting_p == 0))
701 return;
703 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
704 #ifdef PIC
705 __mf_resolve_dynamics ();
706 #endif
707 __mf_starting_p = 0;
709 __mf_set_state (active);
711 __mf_set_default_options ();
713 ov = getenv ("MUDFLAP_OPTIONS");
714 if (ov)
716 int rc = __mfu_set_options (ov);
717 if (rc < 0)
719 __mf_usage ();
720 exit (1);
724 /* Initialize to a non-zero description epoch. */
725 __mf_describe_object (NULL);
727 #define REG_RESERVED(obj) \
728 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
730 REG_RESERVED (__mf_lookup_cache);
731 REG_RESERVED (__mf_lc_mask);
732 REG_RESERVED (__mf_lc_shift);
733 /* XXX: others of our statics? */
735 /* Prevent access to *NULL. */
736 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
737 __mf_lookup_cache[0].low = (uintptr_t) -1;
743 __wrap_main (int argc, char* argv[])
745 extern char **environ;
746 extern int main ();
747 extern int __real_main ();
748 static int been_here = 0;
750 if (__mf_opts.heur_std_data && ! been_here)
752 unsigned i;
754 been_here = 1;
755 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
756 for (i=0; i<argc; i++)
758 unsigned j = strlen (argv[i]);
759 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
762 for (i=0; ; i++)
764 char *e = environ[i];
765 unsigned j;
766 if (e == NULL) break;
767 j = strlen (environ[i]);
768 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
770 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
772 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
774 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
775 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
776 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
778 /* Make some effort to register ctype.h static arrays. */
779 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
780 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
781 with in mf-hooks2.c. */
784 #ifdef PIC
785 return main (argc, argv, environ);
786 #else
787 return __real_main (argc, argv, environ);
788 #endif
793 extern void __mf_fini () DTOR;
794 void __mf_fini ()
796 TRACE ("__mf_fini\n");
797 __mfu_report ();
799 #ifndef PIC
800 /* Since we didn't populate the tree for allocations in constructors
801 before __mf_init, we cannot check destructors after __mf_fini. */
802 __mf_opts.mudflap_mode = mode_nop;
803 #endif
808 /* ------------------------------------------------------------------------ */
809 /* __mf_check */
811 void __mf_check (void *ptr, size_t sz, int type, const char *location)
813 LOCKTH ();
814 BEGIN_RECURSION_PROTECT ();
815 __mfu_check (ptr, sz, type, location);
816 END_RECURSION_PROTECT ();
817 UNLOCKTH ();
821 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
823 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
824 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
825 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
826 uintptr_t ptr_low = (uintptr_t) ptr;
827 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
828 struct __mf_cache old_entry = *entry;
830 if (UNLIKELY (__mf_opts.sigusr1_report))
831 __mf_sigusr1_respond ();
832 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
833 return;
835 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
836 ptr, entry_idx, (unsigned long)sz,
837 (type == 0 ? "read" : "write"), location);
839 switch (__mf_opts.mudflap_mode)
841 case mode_nop:
842 /* It is tempting to poison the cache here similarly to
843 mode_populate. However that eliminates a valuable
844 distinction between these two modes. mode_nop is useful to
845 let a user count & trace every single check / registration
846 call. mode_populate is useful to let a program run fast
847 while unchecked.
849 judgement = 1;
850 break;
852 case mode_populate:
853 entry->low = ptr_low;
854 entry->high = ptr_high;
855 judgement = 1;
856 break;
858 case mode_check:
860 unsigned heuristics = 0;
862 /* Advance aging/adaptation counters. */
863 static unsigned adapt_count;
864 adapt_count ++;
865 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
866 adapt_count > __mf_opts.adapt_cache))
868 adapt_count = 0;
869 __mf_adapt_cache ();
872 /* Looping only occurs if heuristics were triggered. */
873 while (judgement == 0)
875 DECLARE (void, free, void *p);
876 __mf_object_t* ovr_obj[1];
877 unsigned obj_count;
878 __mf_object_t** all_ovr_obj = NULL;
879 __mf_object_t** dealloc_me = NULL;
880 unsigned i;
882 /* Find all overlapping objects. Be optimistic that there is just one. */
883 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
884 if (UNLIKELY (obj_count > 1))
886 /* Allocate a real buffer and do the search again. */
887 DECLARE (void *, malloc, size_t c);
888 unsigned n;
889 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
890 obj_count));
891 if (all_ovr_obj == NULL) abort ();
892 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
893 assert (n == obj_count);
894 dealloc_me = all_ovr_obj;
896 else
898 all_ovr_obj = ovr_obj;
899 dealloc_me = NULL;
902 /* Update object statistics. */
903 for (i = 0; i < obj_count; i++)
905 __mf_object_t *obj = all_ovr_obj[i];
906 assert (obj != NULL);
907 if (type == __MF_CHECK_READ)
908 obj->read_count ++;
909 else
910 obj->write_count ++;
911 obj->liveness ++;
914 /* Iterate over the various objects. There are a number of special cases. */
915 for (i = 0; i < obj_count; i++)
917 __mf_object_t *obj = all_ovr_obj[i];
919 /* Any __MF_TYPE_NOACCESS hit is bad. */
920 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
921 judgement = -1;
923 /* Any object with a watch flag is bad. */
924 if (UNLIKELY (obj->watching_p))
925 judgement = -2; /* trigger VIOL_WATCH */
927 /* A read from an uninitialized object is bad. */
928 if (UNLIKELY (__mf_opts.check_initialization
929 /* reading */
930 && type == __MF_CHECK_READ
931 /* not written */
932 && obj->write_count == 0
933 /* uninitialized (heap) */
934 && obj->type == __MF_TYPE_HEAP))
935 judgement = -1;
938 /* We now know that the access spans no invalid objects. */
939 if (LIKELY (judgement >= 0))
940 for (i = 0; i < obj_count; i++)
942 __mf_object_t *obj = all_ovr_obj[i];
944 /* Is this access entirely contained within this object? */
945 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
947 /* Valid access. */
948 entry->low = obj->low;
949 entry->high = obj->high;
950 judgement = 1;
954 /* This access runs off the end of one valid object. That
955 could be okay, if other valid objects fill in all the
956 holes. We allow this only for HEAP and GUESS type
957 objects. Accesses to STATIC and STACK variables
958 should not be allowed to span. */
959 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
961 unsigned uncovered = 0;
962 for (i = 0; i < obj_count; i++)
964 __mf_object_t *obj = all_ovr_obj[i];
965 int j, uncovered_low_p, uncovered_high_p;
966 uintptr_t ptr_lower, ptr_higher;
968 uncovered_low_p = ptr_low < obj->low;
969 ptr_lower = CLAMPSUB (obj->low, 1);
970 uncovered_high_p = ptr_high > obj->high;
971 ptr_higher = CLAMPADD (obj->high, 1);
973 for (j = 0; j < obj_count; j++)
975 __mf_object_t *obj2 = all_ovr_obj[j];
977 if (i == j) continue;
979 /* Filter out objects that cannot be spanned across. */
980 if (obj2->type == __MF_TYPE_STACK
981 || obj2->type == __MF_TYPE_STATIC)
982 continue;
984 /* Consider a side "covered" if obj2 includes
985 the next byte on that side. */
986 if (uncovered_low_p
987 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
988 uncovered_low_p = 0;
989 if (uncovered_high_p
990 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
991 uncovered_high_p = 0;
994 if (uncovered_low_p || uncovered_high_p)
995 uncovered ++;
998 /* Success if no overlapping objects are uncovered. */
999 if (uncovered == 0)
1000 judgement = 1;
1004 if (dealloc_me != NULL)
1005 CALL_REAL (free, dealloc_me);
1007 /* If the judgment is still unknown at this stage, loop
1008 around at most one more time. */
1009 if (judgement == 0)
1011 if (heuristics++ < 2) /* XXX parametrize this number? */
1012 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1013 else
1014 judgement = -1;
1019 break;
1021 case mode_violate:
1022 judgement = -1;
1023 break;
1026 if (__mf_opts.collect_stats)
1028 __mf_count_check ++;
1030 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1031 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1032 __mf_lookup_cache_reusecount [entry_idx] ++;
1035 if (UNLIKELY (judgement < 0))
1036 __mf_violation (ptr, sz,
1037 (uintptr_t) __builtin_return_address (0), location,
1038 ((judgement == -1) ?
1039 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1040 __MF_VIOL_WATCH));
1044 static __mf_object_t *
1045 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1046 const char *name, uintptr_t pc)
1048 DECLARE (void *, calloc, size_t c, size_t n);
1050 __mf_object_t *new_obj;
1051 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1052 new_obj->low = low;
1053 new_obj->high = high;
1054 new_obj->type = type;
1055 new_obj->name = name;
1056 new_obj->alloc_pc = pc;
1057 #if HAVE_GETTIMEOFDAY
1058 if (__mf_opts.timestamps)
1059 gettimeofday (& new_obj->alloc_time, NULL);
1060 #endif
1061 #if LIBMUDFLAPTH
1062 new_obj->alloc_thread = pthread_self ();
1063 #endif
1065 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1066 new_obj->alloc_backtrace_size =
1067 __mf_backtrace (& new_obj->alloc_backtrace,
1068 (void *) pc, 2);
1070 __mf_link_object (new_obj);
1071 return new_obj;
1075 static void
1076 __mf_uncache_object (__mf_object_t *old_obj)
1078 /* Remove any low/high pointers for this object from the lookup cache. */
1080 /* Can it possibly exist in the cache? */
1081 if (LIKELY (old_obj->read_count + old_obj->write_count))
1083 uintptr_t low = old_obj->low;
1084 uintptr_t high = old_obj->high;
1085 struct __mf_cache *entry;
1086 unsigned i;
1087 if ((high - low) >= (__mf_lc_mask << __mf_lc_shift))
1089 /* For large objects (>= cache size - 1) check the whole cache. */
1090 entry = & __mf_lookup_cache [0];
1091 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1093 /* NB: the "||" in the following test permits this code to
1094 tolerate the situation introduced by __mf_check over
1095 contiguous objects, where a cache entry spans several
1096 objects. */
1097 if (entry->low == low || entry->high == high)
1099 entry->low = MAXPTR;
1100 entry->high = MINPTR;
1104 else
1106 /* Object is now smaller then cache size. */
1107 unsigned entry_low_idx = __MF_CACHE_INDEX (low);
1108 unsigned entry_high_idx = __MF_CACHE_INDEX (high);
1109 if (entry_low_idx <= entry_high_idx)
1111 entry = & __mf_lookup_cache [entry_low_idx];
1112 for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
1114 /* NB: the "||" in the following test permits this code to
1115 tolerate the situation introduced by __mf_check over
1116 contiguous objects, where a cache entry spans several
1117 objects. */
1118 if (entry->low == low || entry->high == high)
1120 entry->low = MAXPTR;
1121 entry->high = MINPTR;
1125 else
1127 /* Object wrapped around the end of the cache. First search
1128 from low to end of cache and then from 0 to high. */
1129 entry = & __mf_lookup_cache [entry_low_idx];
1130 for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
1132 /* NB: the "||" in the following test permits this code to
1133 tolerate the situation introduced by __mf_check over
1134 contiguous objects, where a cache entry spans several
1135 objects. */
1136 if (entry->low == low || entry->high == high)
1138 entry->low = MAXPTR;
1139 entry->high = MINPTR;
1142 entry = & __mf_lookup_cache [0];
1143 for (i = 0; i <= entry_high_idx; i++, entry++)
1145 /* NB: the "||" in the following test permits this code to
1146 tolerate the situation introduced by __mf_check over
1147 contiguous objects, where a cache entry spans several
1148 objects. */
1149 if (entry->low == low || entry->high == high)
1151 entry->low = MAXPTR;
1152 entry->high = MINPTR;
1161 void
1162 __mf_register (void *ptr, size_t sz, int type, const char *name)
1164 LOCKTH ();
1165 BEGIN_RECURSION_PROTECT ();
1166 __mfu_register (ptr, sz, type, name);
1167 END_RECURSION_PROTECT ();
1168 UNLOCKTH ();
1172 void
1173 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1175 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1176 ptr, (unsigned long) sz, type, name ? name : "");
1178 if (__mf_opts.collect_stats)
1180 __mf_count_register ++;
1181 __mf_total_register_size [(type < 0) ? 0 :
1182 (type > __MF_TYPE_MAX) ? 0 :
1183 type] += sz;
1186 if (UNLIKELY (__mf_opts.sigusr1_report))
1187 __mf_sigusr1_respond ();
1189 switch (__mf_opts.mudflap_mode)
1191 case mode_nop:
1192 break;
1194 case mode_violate:
1195 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1196 __MF_VIOL_REGISTER);
1197 break;
1199 case mode_populate:
1200 /* Clear the cache. */
1201 /* XXX: why the entire cache? */
1202 /* XXX: race */
1203 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1204 /* void slot 0 */
1205 __mf_lookup_cache[0].low = MAXPTR;
1206 break;
1208 case mode_check:
1210 __mf_object_t *ovr_objs [1];
1211 unsigned num_overlapping_objs;
1212 uintptr_t low = (uintptr_t) ptr;
1213 uintptr_t high = CLAMPSZ (ptr, sz);
1214 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1216 /* Treat unknown size indication as 1. */
1217 if (UNLIKELY (sz == 0)) sz = 1;
1219 /* Look for objects only of the same type. This will e.g. permit a registration
1220 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1221 __mf_check time however harmful overlaps will be detected. */
1222 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1224 /* Handle overlaps. */
1225 if (UNLIKELY (num_overlapping_objs > 0))
1227 __mf_object_t *ovr_obj = ovr_objs[0];
1229 /* Accept certain specific duplication pairs. */
1230 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1231 && ovr_obj->low == low
1232 && ovr_obj->high == high
1233 && ovr_obj->type == type)
1235 /* Duplicate registration for static objects may come
1236 from distinct compilation units. */
1237 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1238 (void *) low, (void *) high,
1239 (ovr_obj->name ? ovr_obj->name : ""));
1240 break;
1243 /* Alas, a genuine violation. */
1244 else
1246 /* Two or more *real* mappings here. */
1247 __mf_violation ((void *) ptr, sz,
1248 (uintptr_t) __builtin_return_address (0), NULL,
1249 __MF_VIOL_REGISTER);
1252 else /* No overlapping objects: AOK. */
1253 __mf_insert_new_object (low, high, type, name, pc);
1255 /* We could conceivably call __mf_check() here to prime the cache,
1256 but then the read_count/write_count field is not reliable. */
1257 break;
1259 } /* end switch (__mf_opts.mudflap_mode) */
1263 void
1264 __mf_unregister (void *ptr, size_t sz, int type)
1266 LOCKTH ();
1267 BEGIN_RECURSION_PROTECT ();
1268 __mfu_unregister (ptr, sz, type);
1269 END_RECURSION_PROTECT ();
1270 UNLOCKTH ();
1274 void
1275 __mfu_unregister (void *ptr, size_t sz, int type)
1277 DECLARE (void, free, void *ptr);
1279 if (UNLIKELY (__mf_opts.sigusr1_report))
1280 __mf_sigusr1_respond ();
1282 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1284 switch (__mf_opts.mudflap_mode)
1286 case mode_nop:
1287 break;
1289 case mode_violate:
1290 __mf_violation (ptr, sz,
1291 (uintptr_t) __builtin_return_address (0), NULL,
1292 __MF_VIOL_UNREGISTER);
1293 break;
1295 case mode_populate:
1296 /* Clear the cache. */
1297 /* XXX: race */
1298 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1299 /* void slot 0 */
1300 __mf_lookup_cache[0].low = MAXPTR;
1301 break;
1303 case mode_check:
1305 __mf_object_t *old_obj = NULL;
1306 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1307 __mf_object_t *objs[1] = {NULL};
1308 unsigned num_overlapping_objs;
1310 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1311 CLAMPSZ (ptr, sz), objs, 1, type);
1313 /* Special case for HEAP_I - see free & realloc hook. They don't
1314 know whether the input region was HEAP or HEAP_I before
1315 unmapping it. Here we give HEAP a try in case HEAP_I
1316 failed. */
1317 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1319 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1320 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1323 old_obj = objs[0];
1324 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1325 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1326 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1328 __mf_violation (ptr, sz,
1329 (uintptr_t) __builtin_return_address (0), NULL,
1330 __MF_VIOL_UNREGISTER);
1331 break;
1334 __mf_unlink_object (old_obj);
1335 __mf_uncache_object (old_obj);
1337 /* Wipe buffer contents if desired. */
1338 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1339 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1340 || old_obj->type == __MF_TYPE_HEAP_I)))
1342 memset ((void *) old_obj->low,
1344 (size_t) (old_obj->high - old_obj->low + 1));
1347 /* Manage the object cemetary. */
1348 if (__mf_opts.persistent_count > 0
1349 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1351 old_obj->deallocated_p = 1;
1352 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1353 #if HAVE_GETTIMEOFDAY
1354 if (__mf_opts.timestamps)
1355 gettimeofday (& old_obj->dealloc_time, NULL);
1356 #endif
1357 #ifdef LIBMUDFLAPTH
1358 old_obj->dealloc_thread = pthread_self ();
1359 #endif
1361 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1362 old_obj->dealloc_backtrace_size =
1363 __mf_backtrace (& old_obj->dealloc_backtrace,
1364 NULL, 2);
1366 /* Encourage this object to be displayed again in current epoch. */
1367 old_obj->description_epoch --;
1369 /* Put this object into the cemetary. This may require this plot to
1370 be recycled, and the previous resident to be designated del_obj. */
1372 unsigned row = old_obj->type;
1373 unsigned plot = __mf_object_dead_head [row];
1375 del_obj = __mf_object_cemetary [row][plot];
1376 __mf_object_cemetary [row][plot] = old_obj;
1377 plot ++;
1378 if (plot == __mf_opts.persistent_count) plot = 0;
1379 __mf_object_dead_head [row] = plot;
1382 else
1383 del_obj = old_obj;
1385 if (__mf_opts.print_leaks)
1387 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1388 (old_obj->type == __MF_TYPE_HEAP
1389 || old_obj->type == __MF_TYPE_HEAP_I))
1391 /* The problem with a warning message here is that we may not
1392 be privy to accesses to such objects that occur within
1393 uninstrumented libraries. */
1394 #if 0
1395 fprintf (stderr,
1396 "*******\n"
1397 "mudflap warning: unaccessed registered object:\n");
1398 __mf_describe_object (old_obj);
1399 #endif
1403 if (del_obj != NULL) /* May or may not equal old_obj. */
1405 if (__mf_opts.backtrace > 0)
1407 CALL_REAL(free, del_obj->alloc_backtrace);
1408 if (__mf_opts.persistent_count > 0)
1410 CALL_REAL(free, del_obj->dealloc_backtrace);
1413 CALL_REAL(free, del_obj);
1416 break;
1418 } /* end switch (__mf_opts.mudflap_mode) */
1421 if (__mf_opts.collect_stats)
1423 __mf_count_unregister ++;
1424 __mf_total_unregister_size += sz;
1430 struct tree_stats
1432 unsigned obj_count;
1433 unsigned long total_size;
1434 unsigned live_obj_count;
1435 double total_weight;
1436 double weighted_size;
1437 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1442 static int
1443 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1445 __mf_object_t *obj = (__mf_object_t *) n->value;
1446 struct tree_stats *s = (struct tree_stats *) param;
1448 assert (obj != NULL && s != NULL);
1450 /* Exclude never-accessed objects. */
1451 if (obj->read_count + obj->write_count)
1453 s->obj_count ++;
1454 s->total_size += (obj->high - obj->low + 1);
1456 if (obj->liveness)
1458 unsigned i;
1459 uintptr_t addr;
1461 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1462 (void *) obj->low, obj->liveness, obj->name); */
1464 s->live_obj_count ++;
1465 s->total_weight += (double) obj->liveness;
1466 s->weighted_size +=
1467 (double) (obj->high - obj->low + 1) *
1468 (double) obj->liveness;
1470 addr = obj->low;
1471 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1473 unsigned bit = addr & 1;
1474 s->weighted_address_bits[i][bit] += obj->liveness;
1475 addr = addr >> 1;
1478 /* Age the liveness value. */
1479 obj->liveness >>= 1;
1483 return 0;
1487 static void
1488 __mf_adapt_cache ()
1490 struct tree_stats s;
1491 uintptr_t new_mask = 0;
1492 unsigned char new_shift;
1493 float cache_utilization;
1494 float max_value;
1495 static float smoothed_new_shift = -1.0;
1496 unsigned i;
1498 memset (&s, 0, sizeof (s));
1500 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1501 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1502 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1503 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1504 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1506 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1507 empty tree. Just leave the cache alone in such cases, rather
1508 than risk dying by division-by-zero. */
1509 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1510 return;
1512 /* Guess a good value for the shift parameter by finding an address bit that is a
1513 good discriminant of lively objects. */
1514 max_value = 0.0;
1515 for (i=0; i<sizeof (uintptr_t)*8; i++)
1517 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1518 if (max_value < value) max_value = value;
1520 for (i=0; i<sizeof (uintptr_t)*8; i++)
1522 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1523 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1524 if (value >= max_value * shoulder_factor)
1525 break;
1527 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1528 /* Converge toward this slowly to reduce flapping. */
1529 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1530 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1531 assert (new_shift < sizeof (uintptr_t)*8);
1533 /* Count number of used buckets. */
1534 cache_utilization = 0.0;
1535 for (i = 0; i < (1 + __mf_lc_mask); i++)
1536 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1537 cache_utilization += 1.0;
1538 cache_utilization /= (1 + __mf_lc_mask);
1540 new_mask |= 0xffff; /* XXX: force a large cache. */
1541 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1543 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1544 "util=%u%% m=%p s=%u\n",
1545 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1546 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1548 /* We should reinitialize cache if its parameters have changed. */
1549 if (new_mask != __mf_lc_mask ||
1550 new_shift != __mf_lc_shift)
1552 __mf_lc_mask = new_mask;
1553 __mf_lc_shift = new_shift;
1554 /* XXX: race */
1555 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1556 /* void slot 0 */
1557 __mf_lookup_cache[0].low = MAXPTR;
1563 /* __mf_find_object[s] */
1565 /* Find overlapping live objecs between [low,high]. Return up to
1566 max_objs of their pointers in objs[]. Return total count of
1567 overlaps (may exceed max_objs). */
1569 unsigned
1570 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1571 __mf_object_t **objs, unsigned max_objs, int type)
1573 unsigned count = 0;
1574 mfsplay_tree t = __mf_object_tree (type);
1575 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1576 int direction;
1578 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1579 /* An exact match for base address implies a hit. */
1580 if (n != NULL)
1582 if (count < max_objs)
1583 objs[count] = (__mf_object_t *) n->value;
1584 count ++;
1587 /* Iterate left then right near this key value to find all overlapping objects. */
1588 for (direction = 0; direction < 2; direction ++)
1590 /* Reset search origin. */
1591 k = (mfsplay_tree_key) ptr_low;
1593 while (1)
1595 __mf_object_t *obj;
1597 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1598 if (n == NULL) break;
1599 obj = (__mf_object_t *) n->value;
1601 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1602 break;
1604 if (count < max_objs)
1605 objs[count] = (__mf_object_t *) n->value;
1606 count ++;
1608 k = (mfsplay_tree_key) obj->low;
1612 return count;
1616 unsigned
1617 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1618 __mf_object_t **objs, unsigned max_objs)
1620 int type;
1621 unsigned count = 0;
1623 /* Search each splay tree for overlaps. */
1624 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1626 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1627 if (c > max_objs)
1629 max_objs = 0;
1630 objs = NULL;
1632 else /* NB: C may equal 0 */
1634 max_objs -= c;
1635 objs += c;
1637 count += c;
1640 return count;
1645 /* __mf_link_object */
1647 static void
1648 __mf_link_object (__mf_object_t *node)
1650 mfsplay_tree t = __mf_object_tree (node->type);
1651 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1654 /* __mf_unlink_object */
1656 static void
1657 __mf_unlink_object (__mf_object_t *node)
1659 mfsplay_tree t = __mf_object_tree (node->type);
1660 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1663 /* __mf_find_dead_objects */
1665 /* Find overlapping dead objecs between [low,high]. Return up to
1666 max_objs of their pointers in objs[]. Return total count of
1667 overlaps (may exceed max_objs). */
1669 static unsigned
1670 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1671 __mf_object_t **objs, unsigned max_objs)
1673 if (__mf_opts.persistent_count > 0)
1675 unsigned count = 0;
1676 unsigned recollection = 0;
1677 unsigned row = 0;
1679 assert (low <= high);
1680 assert (max_objs == 0 || objs != NULL);
1682 /* Widen the search from the most recent plots in each row, looking
1683 backward in time. */
1684 recollection = 0;
1685 while (recollection < __mf_opts.persistent_count)
1687 count = 0;
1689 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1691 unsigned plot;
1692 unsigned i;
1694 plot = __mf_object_dead_head [row];
1695 for (i = 0; i <= recollection; i ++)
1697 __mf_object_t *obj;
1699 /* Look backward through row: it's a circular buffer. */
1700 if (plot > 0) plot --;
1701 else plot = __mf_opts.persistent_count - 1;
1703 obj = __mf_object_cemetary [row][plot];
1704 if (obj && obj->low <= high && obj->high >= low)
1706 /* Found an overlapping dead object! */
1707 if (count < max_objs)
1708 objs [count] = obj;
1709 count ++;
1714 if (count)
1715 break;
1717 /* Look farther back in time. */
1718 recollection = (recollection * 2) + 1;
1721 return count;
1722 } else {
1723 return 0;
1727 /* __mf_describe_object */
1729 static void
1730 __mf_describe_object (__mf_object_t *obj)
1732 static unsigned epoch = 0;
1733 if (obj == NULL)
1735 epoch ++;
1736 return;
1739 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1741 fprintf (stderr,
1742 "mudflap %sobject %p: name=`%s'\n",
1743 (obj->deallocated_p ? "dead " : ""),
1744 (void *) obj, (obj->name ? obj->name : ""));
1745 return;
1747 else
1748 obj->description_epoch = epoch;
1750 fprintf (stderr,
1751 "mudflap %sobject %p: name=`%s'\n"
1752 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1753 "alloc time=%lu.%06lu pc=%p"
1754 #ifdef LIBMUDFLAPTH
1755 " thread=%u"
1756 #endif
1757 "\n",
1758 (obj->deallocated_p ? "dead " : ""),
1759 (void *) obj, (obj->name ? obj->name : ""),
1760 (void *) obj->low, (void *) obj->high,
1761 (unsigned long) (obj->high - obj->low + 1),
1762 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1763 obj->type == __MF_TYPE_HEAP ? "heap" :
1764 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1765 obj->type == __MF_TYPE_STACK ? "stack" :
1766 obj->type == __MF_TYPE_STATIC ? "static" :
1767 obj->type == __MF_TYPE_GUESS ? "guess" :
1768 "unknown"),
1769 obj->read_count, obj->write_count, obj->liveness,
1770 obj->watching_p ? " watching" : "",
1771 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1772 (void *) obj->alloc_pc
1773 #ifdef LIBMUDFLAPTH
1774 , (unsigned) obj->alloc_thread
1775 #endif
1778 if (__mf_opts.backtrace > 0)
1780 unsigned i;
1781 for (i=0; i<obj->alloc_backtrace_size; i++)
1782 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1785 if (__mf_opts.persistent_count > 0)
1787 if (obj->deallocated_p)
1789 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1790 #ifdef LIBMUDFLAPTH
1791 " thread=%u"
1792 #endif
1793 "\n",
1794 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1795 (void *) obj->dealloc_pc
1796 #ifdef LIBMUDFLAPTH
1797 , (unsigned) obj->dealloc_thread
1798 #endif
1802 if (__mf_opts.backtrace > 0)
1804 unsigned i;
1805 for (i=0; i<obj->dealloc_backtrace_size; i++)
1806 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1813 static int
1814 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1816 __mf_object_t *node = (__mf_object_t *) n->value;
1817 unsigned *count = (unsigned *) param;
1819 if (count != NULL)
1820 (*count) ++;
1822 fprintf (stderr, "Leaked object %u:\n", (*count));
1823 __mf_describe_object (node);
1825 return 0;
1829 static unsigned
1830 __mf_report_leaks ()
1832 unsigned count = 0;
1834 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1835 __mf_report_leaks_fn, & count);
1836 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1837 __mf_report_leaks_fn, & count);
1839 return count;
1842 /* ------------------------------------------------------------------------ */
1843 /* __mf_report */
1845 void
1846 __mf_report ()
1848 LOCKTH ();
1849 BEGIN_RECURSION_PROTECT ();
1850 __mfu_report ();
1851 END_RECURSION_PROTECT ();
1852 UNLOCKTH ();
1855 void
1856 __mfu_report ()
1858 if (__mf_opts.collect_stats)
1860 fprintf (stderr,
1861 "*******\n"
1862 "mudflap stats:\n"
1863 "calls to __mf_check: %lu\n"
1864 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1865 " __mf_unregister: %lu [%luB]\n"
1866 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1867 __mf_count_check,
1868 __mf_count_register,
1869 __mf_total_register_size[0], __mf_total_register_size[1],
1870 __mf_total_register_size[2], __mf_total_register_size[3],
1871 __mf_total_register_size[4], /* XXX */
1872 __mf_count_unregister, __mf_total_unregister_size,
1873 __mf_count_violation[0], __mf_count_violation[1],
1874 __mf_count_violation[2], __mf_count_violation[3],
1875 __mf_count_violation[4]);
1877 fprintf (stderr,
1878 "calls with reentrancy: %lu\n", __mf_reentrancy);
1879 #ifdef LIBMUDFLAPTH
1880 fprintf (stderr,
1881 " lock contention: %lu\n", __mf_lock_contention);
1882 #endif
1884 /* Lookup cache stats. */
1886 unsigned i;
1887 unsigned max_reuse = 0;
1888 unsigned num_used = 0;
1889 unsigned num_unused = 0;
1891 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1893 if (__mf_lookup_cache_reusecount[i])
1894 num_used ++;
1895 else
1896 num_unused ++;
1897 if (max_reuse < __mf_lookup_cache_reusecount[i])
1898 max_reuse = __mf_lookup_cache_reusecount[i];
1900 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1901 num_used, num_unused, max_reuse);
1905 unsigned live_count;
1906 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1907 fprintf (stderr, "number of live objects: %u\n", live_count);
1910 if (__mf_opts.persistent_count > 0)
1912 unsigned dead_count = 0;
1913 unsigned row, plot;
1914 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1915 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1916 if (__mf_object_cemetary [row][plot] != 0)
1917 dead_count ++;
1918 fprintf (stderr, " zombie objects: %u\n", dead_count);
1921 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1923 unsigned l;
1924 extern void * __mf_wrap_alloca_indirect (size_t c);
1926 /* Free up any remaining alloca()'d blocks. */
1927 __mf_wrap_alloca_indirect (0);
1928 #ifdef HAVE___LIBC_FREERES
1929 if (__mf_opts.call_libc_freeres)
1931 extern void __libc_freeres (void);
1932 __libc_freeres ();
1934 #endif
1936 __mf_describe_object (NULL); /* Reset description epoch. */
1937 l = __mf_report_leaks ();
1938 fprintf (stderr, "number of leaked objects: %u\n", l);
1942 /* __mf_backtrace */
1944 size_t
1945 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1947 void ** pc_array;
1948 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1949 unsigned remaining_size;
1950 unsigned omitted_size = 0;
1951 unsigned i;
1952 DECLARE (void, free, void *ptr);
1953 DECLARE (void *, calloc, size_t c, size_t n);
1954 DECLARE (void *, malloc, size_t n);
1956 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1957 #ifdef HAVE_BACKTRACE
1958 pc_array_size = backtrace (pc_array, pc_array_size);
1959 #else
1960 #define FETCH(n) do { if (pc_array_size >= n) { \
1961 pc_array[n] = __builtin_return_address(n); \
1962 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1964 /* Unroll some calls __builtin_return_address because this function
1965 only takes a literal integer parameter. */
1966 FETCH (0);
1967 #if 0
1968 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1969 rather than simply returning 0. :-( */
1970 FETCH (1);
1971 FETCH (2);
1972 FETCH (3);
1973 FETCH (4);
1974 FETCH (5);
1975 FETCH (6);
1976 FETCH (7);
1977 FETCH (8);
1978 if (pc_array_size > 8) pc_array_size = 9;
1979 #else
1980 if (pc_array_size > 0) pc_array_size = 1;
1981 #endif
1983 #undef FETCH
1984 #endif
1986 /* We want to trim the first few levels of the stack traceback,
1987 since they contain libmudflap wrappers and junk. If pc_array[]
1988 ends up containing a non-NULL guess_pc, then trim everything
1989 before that. Otherwise, omit the first guess_omit_levels
1990 entries. */
1992 if (guess_pc != NULL)
1993 for (i=0; i<pc_array_size; i++)
1994 if (pc_array [i] == guess_pc)
1995 omitted_size = i;
1997 if (omitted_size == 0) /* No match? */
1998 if (pc_array_size > guess_omit_levels)
1999 omitted_size = guess_omit_levels;
2001 remaining_size = pc_array_size - omitted_size;
2003 #ifdef HAVE_BACKTRACE_SYMBOLS
2004 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2005 #else
2007 /* Let's construct a buffer by hand. It will have <remaining_size>
2008 char*'s at the front, pointing at individual strings immediately
2009 afterwards. */
2010 void *buffer;
2011 char *chars;
2012 char **pointers;
2013 enum { perline = 30 };
2014 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2015 pointers = (char **) buffer;
2016 chars = (char *)buffer + (remaining_size * sizeof (char *));
2017 for (i = 0; i < remaining_size; i++)
2019 pointers[i] = chars;
2020 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2021 chars = chars + perline;
2023 *symbols = pointers;
2025 #endif
2026 CALL_REAL (free, pc_array);
2028 return remaining_size;
2031 /* ------------------------------------------------------------------------ */
2032 /* __mf_violation */
2034 void
2035 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2036 const char *location, int type)
2038 char buf [128];
2039 static unsigned violation_number;
2040 DECLARE(void, free, void *ptr);
2042 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2043 (void *) pc,
2044 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2046 if (__mf_opts.collect_stats)
2047 __mf_count_violation [(type < 0) ? 0 :
2048 (type > __MF_VIOL_WATCH) ? 0 :
2049 type] ++;
2051 /* Print out a basic warning message. */
2052 if (__mf_opts.verbose_violations)
2054 unsigned dead_p;
2055 unsigned num_helpful = 0;
2056 struct timeval now = { 0, 0 };
2057 #if HAVE_GETTIMEOFDAY
2058 gettimeofday (& now, NULL);
2059 #endif
2061 violation_number ++;
2062 fprintf (stderr,
2063 "*******\n"
2064 "mudflap violation %u (%s): time=%lu.%06lu "
2065 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2066 violation_number,
2067 ((type == __MF_VIOL_READ) ? "check/read" :
2068 (type == __MF_VIOL_WRITE) ? "check/write" :
2069 (type == __MF_VIOL_REGISTER) ? "register" :
2070 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2071 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2072 now.tv_sec, now.tv_usec,
2073 (void *) ptr, (unsigned long)sz, (void *) pc,
2074 (location != NULL ? " location=`" : ""),
2075 (location != NULL ? location : ""),
2076 (location != NULL ? "'" : ""));
2078 if (__mf_opts.backtrace > 0)
2080 char ** symbols;
2081 unsigned i, num;
2083 num = __mf_backtrace (& symbols, (void *) pc, 2);
2084 /* Note: backtrace_symbols calls malloc(). But since we're in
2085 __mf_violation and presumably __mf_check, it'll detect
2086 recursion, and not put the new string into the database. */
2088 for (i=0; i<num; i++)
2089 fprintf (stderr, " %s\n", symbols[i]);
2091 /* Calling free() here would trigger a violation. */
2092 CALL_REAL(free, symbols);
2096 /* Look for nearby objects. For this, we start with s_low/s_high
2097 pointing to the given area, looking for overlapping objects.
2098 If none show up, widen the search area and keep looking. */
2100 if (sz == 0) sz = 1;
2102 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2104 enum {max_objs = 3}; /* magic */
2105 __mf_object_t *objs[max_objs];
2106 unsigned num_objs = 0;
2107 uintptr_t s_low, s_high;
2108 unsigned tries = 0;
2109 unsigned i;
2111 s_low = (uintptr_t) ptr;
2112 s_high = CLAMPSZ (ptr, sz);
2114 while (tries < 16) /* magic */
2116 if (dead_p)
2117 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2118 else
2119 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2121 if (num_objs) /* good enough */
2122 break;
2124 tries ++;
2126 /* XXX: tune this search strategy. It's too dependent on
2127 sz, which can vary from 1 to very big (when array index
2128 checking) numbers. */
2129 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2130 s_high = CLAMPADD (s_high, (sz * tries * tries));
2133 for (i = 0; i < min (num_objs, max_objs); i++)
2135 __mf_object_t *obj = objs[i];
2136 uintptr_t low = (uintptr_t) ptr;
2137 uintptr_t high = CLAMPSZ (ptr, sz);
2138 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2139 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2140 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2141 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2142 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2143 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2145 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2146 num_helpful + i + 1,
2147 (before1 ? before1 : after1 ? after1 : into1),
2148 (before1 ? "before" : after1 ? "after" : "into"),
2149 (before2 ? before2 : after2 ? after2 : into2),
2150 (before2 ? "before" : after2 ? "after" : "into"));
2151 __mf_describe_object (obj);
2153 num_helpful += num_objs;
2156 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2159 /* How to finally handle this violation? */
2160 switch (__mf_opts.violation_mode)
2162 case viol_nop:
2163 break;
2164 case viol_segv:
2165 kill (getpid(), SIGSEGV);
2166 break;
2167 case viol_abort:
2168 abort ();
2169 break;
2170 case viol_gdb:
2172 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2173 system (buf);
2174 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2175 instead, and let the forked child execlp() gdb. That way, this
2176 subject process can be resumed under the supervision of gdb.
2177 This can't happen now, since system() only returns when gdb
2178 dies. In that case, we need to beware of starting a second
2179 concurrent gdb child upon the next violation. (But if the first
2180 gdb dies, then starting a new one is appropriate.) */
2181 break;
2185 /* ------------------------------------------------------------------------ */
2188 unsigned __mf_watch (void *ptr, size_t sz)
2190 unsigned rc;
2191 LOCKTH ();
2192 BEGIN_RECURSION_PROTECT ();
2193 rc = __mf_watch_or_not (ptr, sz, 1);
2194 END_RECURSION_PROTECT ();
2195 UNLOCKTH ();
2196 return rc;
2199 unsigned __mf_unwatch (void *ptr, size_t sz)
2201 unsigned rc;
2202 LOCKTH ();
2203 rc = __mf_watch_or_not (ptr, sz, 0);
2204 UNLOCKTH ();
2205 return rc;
2209 static unsigned
2210 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2212 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2213 uintptr_t ptr_low = (uintptr_t) ptr;
2214 unsigned count = 0;
2216 TRACE ("%s ptr=%p size=%lu\n",
2217 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2219 switch (__mf_opts.mudflap_mode)
2221 case mode_nop:
2222 case mode_populate:
2223 case mode_violate:
2224 count = 0;
2225 break;
2227 case mode_check:
2229 __mf_object_t **all_ovr_objs;
2230 unsigned obj_count;
2231 unsigned n;
2232 DECLARE (void *, malloc, size_t c);
2233 DECLARE (void, free, void *p);
2235 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2236 VERBOSE_TRACE (" %u:", obj_count);
2238 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2239 if (all_ovr_objs == NULL) abort ();
2240 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2241 assert (n == obj_count);
2243 for (n = 0; n < obj_count; n ++)
2245 __mf_object_t *obj = all_ovr_objs[n];
2247 VERBOSE_TRACE (" [%p]", (void *) obj);
2248 if (obj->watching_p != flag)
2250 obj->watching_p = flag;
2251 count ++;
2253 /* Remove object from cache, to ensure next access
2254 goes through __mf_check(). */
2255 if (flag)
2256 __mf_uncache_object (obj);
2259 CALL_REAL (free, all_ovr_objs);
2261 break;
2264 return count;
2268 void
2269 __mf_sigusr1_handler (int num)
2271 __mf_sigusr1_received ++;
2274 /* Install or remove SIGUSR1 handler as necessary.
2275 Also, respond to a received pending SIGUSR1. */
2276 void
2277 __mf_sigusr1_respond ()
2279 static int handler_installed;
2281 #ifdef SIGUSR1
2282 /* Manage handler */
2283 if (__mf_opts.sigusr1_report && ! handler_installed)
2285 signal (SIGUSR1, __mf_sigusr1_handler);
2286 handler_installed = 1;
2288 else if(! __mf_opts.sigusr1_report && handler_installed)
2290 signal (SIGUSR1, SIG_DFL);
2291 handler_installed = 0;
2293 #endif
2295 /* Manage enqueued signals */
2296 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2298 __mf_sigusr1_handled ++;
2299 assert (__mf_get_state () == reentrant);
2300 __mfu_report ();
2301 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2306 /* XXX: provide an alternative __assert_fail function that cannot
2307 fail due to libmudflap infinite recursion. */
2308 #ifndef NDEBUG
2310 static void
2311 write_itoa (int fd, unsigned n)
2313 enum x { bufsize = sizeof(n)*4 };
2314 char buf [bufsize];
2315 unsigned i;
2317 for (i=0; i<bufsize-1; i++)
2319 unsigned digit = n % 10;
2320 buf[bufsize-2-i] = digit + '0';
2321 n /= 10;
2322 if (n == 0)
2324 char *m = & buf [bufsize-2-i];
2325 buf[bufsize-1] = '\0';
2326 write (fd, m, strlen(m));
2327 break;
2333 void
2334 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2336 #define write2(string) write (2, (string), strlen ((string)));
2337 write2("mf");
2338 #ifdef LIBMUDFLAPTH
2339 write2("(");
2340 write_itoa (2, (unsigned) pthread_self ());
2341 write2(")");
2342 #endif
2343 write2(": assertion failure: `");
2344 write (2, msg, strlen (msg));
2345 write2("' in ");
2346 write (2, func, strlen (func));
2347 write2(" at ");
2348 write (2, file, strlen (file));
2349 write2(":");
2350 write_itoa (2, line);
2351 write2("\n");
2352 #undef write2
2353 abort ();
2357 #endif
2361 /* Adapted splay tree code, originally from libiberty. It has been
2362 specialized for libmudflap as requested by RMS. */
2364 static void
2365 mfsplay_tree_free (void *p)
2367 DECLARE (void, free, void *p);
2368 CALL_REAL (free, p);
2371 static void *
2372 mfsplay_tree_xmalloc (size_t s)
2374 DECLARE (void *, malloc, size_t s);
2375 return CALL_REAL (malloc, s);
2379 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2380 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2381 mfsplay_tree_key,
2382 mfsplay_tree_node *,
2383 mfsplay_tree_node *,
2384 mfsplay_tree_node *);
2387 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2388 and grandparent, respectively, of NODE. */
2390 static mfsplay_tree_node
2391 mfsplay_tree_splay_helper (mfsplay_tree sp,
2392 mfsplay_tree_key key,
2393 mfsplay_tree_node * node,
2394 mfsplay_tree_node * parent,
2395 mfsplay_tree_node * grandparent)
2397 mfsplay_tree_node *next;
2398 mfsplay_tree_node n;
2399 int comparison;
2401 n = *node;
2403 if (!n)
2404 return *parent;
2406 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2408 if (comparison == 0)
2409 /* We've found the target. */
2410 next = 0;
2411 else if (comparison < 0)
2412 /* The target is to the left. */
2413 next = &n->left;
2414 else
2415 /* The target is to the right. */
2416 next = &n->right;
2418 if (next)
2420 /* Check whether our recursion depth is too high. Abort this search,
2421 and signal that a rebalance is required to continue. */
2422 if (sp->depth > sp->max_depth)
2424 sp->rebalance_p = 1;
2425 return n;
2428 /* Continue down the tree. */
2429 sp->depth ++;
2430 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2431 sp->depth --;
2433 /* The recursive call will change the place to which NODE
2434 points. */
2435 if (*node != n || sp->rebalance_p)
2436 return n;
2439 if (!parent)
2440 /* NODE is the root. We are done. */
2441 return n;
2443 /* First, handle the case where there is no grandparent (i.e.,
2444 *PARENT is the root of the tree.) */
2445 if (!grandparent)
2447 if (n == (*parent)->left)
2449 *node = n->right;
2450 n->right = *parent;
2452 else
2454 *node = n->left;
2455 n->left = *parent;
2457 *parent = n;
2458 return n;
2461 /* Next handle the cases where both N and *PARENT are left children,
2462 or where both are right children. */
2463 if (n == (*parent)->left && *parent == (*grandparent)->left)
2465 mfsplay_tree_node p = *parent;
2467 (*grandparent)->left = p->right;
2468 p->right = *grandparent;
2469 p->left = n->right;
2470 n->right = p;
2471 *grandparent = n;
2472 return n;
2474 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2476 mfsplay_tree_node p = *parent;
2478 (*grandparent)->right = p->left;
2479 p->left = *grandparent;
2480 p->right = n->left;
2481 n->left = p;
2482 *grandparent = n;
2483 return n;
2486 /* Finally, deal with the case where N is a left child, but *PARENT
2487 is a right child, or vice versa. */
2488 if (n == (*parent)->left)
2490 (*parent)->left = n->right;
2491 n->right = *parent;
2492 (*grandparent)->right = n->left;
2493 n->left = *grandparent;
2494 *grandparent = n;
2495 return n;
2497 else
2499 (*parent)->right = n->left;
2500 n->left = *parent;
2501 (*grandparent)->left = n->right;
2502 n->right = *grandparent;
2503 *grandparent = n;
2504 return n;
2510 static int
2511 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2513 mfsplay_tree_node **p = array_ptr;
2514 *(*p) = n;
2515 (*p)++;
2516 return 0;
2520 static mfsplay_tree_node
2521 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2522 unsigned high)
2524 unsigned middle = low + (high - low) / 2;
2525 mfsplay_tree_node n = array[middle];
2527 /* Note that since we're producing a balanced binary tree, it is not a problem
2528 that this function is recursive. */
2529 if (low + 1 <= middle)
2530 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2531 else
2532 n->left = NULL;
2534 if (middle + 1 <= high)
2535 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2536 else
2537 n->right = NULL;
2539 return n;
2543 /* Rebalance the entire tree. Do this by copying all the node
2544 pointers into an array, then cleverly re-linking them. */
2545 static void
2546 mfsplay_tree_rebalance (mfsplay_tree sp)
2548 mfsplay_tree_node *all_nodes, *all_nodes_1;
2550 if (sp->num_keys <= 2)
2551 return;
2553 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2555 /* Traverse all nodes to copy their addresses into this array. */
2556 all_nodes_1 = all_nodes;
2557 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2558 (void *) &all_nodes_1);
2560 /* Relink all the nodes. */
2561 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2563 mfsplay_tree_free (all_nodes);
2567 /* Splay SP around KEY. */
2568 static void
2569 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2571 if (sp->root == 0)
2572 return;
2574 /* If we just splayed the tree with the same key, do nothing. */
2575 if (sp->last_splayed_key_p &&
2576 (sp->last_splayed_key == key))
2577 return;
2579 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2580 The idea is to limit excessive stack usage if we're facing
2581 degenerate access patterns. Unfortunately such patterns can occur
2582 e.g. during static initialization, where many static objects might
2583 be registered in increasing address sequence, or during a case where
2584 large tree-like heap data structures are allocated quickly.
2586 On x86, this corresponds to roughly 200K of stack usage.
2587 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2588 sp->max_depth = 2500;
2589 sp->rebalance_p = sp->depth = 0;
2591 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2592 if (sp->rebalance_p)
2594 mfsplay_tree_rebalance (sp);
2596 sp->rebalance_p = sp->depth = 0;
2597 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2599 if (sp->rebalance_p)
2600 abort ();
2604 /* Cache this splay key. */
2605 sp->last_splayed_key = key;
2606 sp->last_splayed_key_p = 1;
2611 /* Allocate a new splay tree. */
2612 static mfsplay_tree
2613 mfsplay_tree_new ()
2615 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2616 sp->root = NULL;
2617 sp->last_splayed_key_p = 0;
2618 sp->num_keys = 0;
2620 return sp;
2625 /* Insert a new node (associating KEY with DATA) into SP. If a
2626 previous node with the indicated KEY exists, its data is replaced
2627 with the new value. Returns the new node. */
2628 static mfsplay_tree_node
2629 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2631 int comparison = 0;
2633 mfsplay_tree_splay (sp, key);
2635 if (sp->root)
2636 comparison = ((sp->root->key > key) ? 1 :
2637 ((sp->root->key < key) ? -1 : 0));
2639 if (sp->root && comparison == 0)
2641 /* If the root of the tree already has the indicated KEY, just
2642 replace the value with VALUE. */
2643 sp->root->value = value;
2645 else
2647 /* Create a new node, and insert it at the root. */
2648 mfsplay_tree_node node;
2650 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2651 node->key = key;
2652 node->value = value;
2653 sp->num_keys++;
2654 if (!sp->root)
2655 node->left = node->right = 0;
2656 else if (comparison < 0)
2658 node->left = sp->root;
2659 node->right = node->left->right;
2660 node->left->right = 0;
2662 else
2664 node->right = sp->root;
2665 node->left = node->right->left;
2666 node->right->left = 0;
2669 sp->root = node;
2670 sp->last_splayed_key_p = 0;
2673 return sp->root;
2676 /* Remove KEY from SP. It is not an error if it did not exist. */
2678 static void
2679 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2681 mfsplay_tree_splay (sp, key);
2682 sp->last_splayed_key_p = 0;
2683 if (sp->root && (sp->root->key == key))
2685 mfsplay_tree_node left, right;
2686 left = sp->root->left;
2687 right = sp->root->right;
2688 /* Delete the root node itself. */
2689 mfsplay_tree_free (sp->root);
2690 sp->num_keys--;
2691 /* One of the children is now the root. Doesn't matter much
2692 which, so long as we preserve the properties of the tree. */
2693 if (left)
2695 sp->root = left;
2696 /* If there was a right child as well, hang it off the
2697 right-most leaf of the left child. */
2698 if (right)
2700 while (left->right)
2701 left = left->right;
2702 left->right = right;
2705 else
2706 sp->root = right;
2710 /* Lookup KEY in SP, returning VALUE if present, and NULL
2711 otherwise. */
2713 static mfsplay_tree_node
2714 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2716 mfsplay_tree_splay (sp, key);
2717 if (sp->root && (sp->root->key == key))
2718 return sp->root;
2719 else
2720 return 0;
2724 /* Return the immediate predecessor KEY, or NULL if there is no
2725 predecessor. KEY need not be present in the tree. */
2727 static mfsplay_tree_node
2728 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2730 int comparison;
2731 mfsplay_tree_node node;
2732 /* If the tree is empty, there is certainly no predecessor. */
2733 if (!sp->root)
2734 return NULL;
2735 /* Splay the tree around KEY. That will leave either the KEY
2736 itself, its predecessor, or its successor at the root. */
2737 mfsplay_tree_splay (sp, key);
2738 comparison = ((sp->root->key > key) ? 1 :
2739 ((sp->root->key < key) ? -1 : 0));
2741 /* If the predecessor is at the root, just return it. */
2742 if (comparison < 0)
2743 return sp->root;
2744 /* Otherwise, find the rightmost element of the left subtree. */
2745 node = sp->root->left;
2746 if (node)
2747 while (node->right)
2748 node = node->right;
2749 return node;
2752 /* Return the immediate successor KEY, or NULL if there is no
2753 successor. KEY need not be present in the tree. */
2755 static mfsplay_tree_node
2756 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2758 int comparison;
2759 mfsplay_tree_node node;
2760 /* If the tree is empty, there is certainly no successor. */
2761 if (!sp->root)
2762 return NULL;
2763 /* Splay the tree around KEY. That will leave either the KEY
2764 itself, its predecessor, or its successor at the root. */
2765 mfsplay_tree_splay (sp, key);
2766 comparison = ((sp->root->key > key) ? 1 :
2767 ((sp->root->key < key) ? -1 : 0));
2768 /* If the successor is at the root, just return it. */
2769 if (comparison > 0)
2770 return sp->root;
2771 /* Otherwise, find the leftmost element of the right subtree. */
2772 node = sp->root->right;
2773 if (node)
2774 while (node->left)
2775 node = node->left;
2776 return node;
2779 /* Call FN, passing it the DATA, for every node in SP, following an
2780 in-order traversal. If FN every returns a non-zero value, the
2781 iteration ceases immediately, and the value is returned.
2782 Otherwise, this function returns 0.
2784 This function simulates recursion using dynamically allocated
2785 arrays, since it may be called from mfsplay_tree_rebalance(), which
2786 in turn means that the tree is already uncomfortably deep for stack
2787 space limits. */
2788 static int
2789 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2791 mfsplay_tree_node *stack1;
2792 char *stack2;
2793 unsigned sp;
2794 int val = 0;
2795 enum s { s_left, s_here, s_right, s_up };
2797 if (st->root == NULL) /* => num_keys == 0 */
2798 return 0;
2800 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2801 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2803 sp = 0;
2804 stack1 [sp] = st->root;
2805 stack2 [sp] = s_left;
2807 while (1)
2809 mfsplay_tree_node n;
2810 enum s s;
2812 n = stack1 [sp];
2813 s = stack2 [sp];
2815 /* Handle each of the four possible states separately. */
2817 /* 1: We're here to traverse the left subtree (if any). */
2818 if (s == s_left)
2820 stack2 [sp] = s_here;
2821 if (n->left != NULL)
2823 sp ++;
2824 stack1 [sp] = n->left;
2825 stack2 [sp] = s_left;
2829 /* 2: We're here to traverse this node. */
2830 else if (s == s_here)
2832 stack2 [sp] = s_right;
2833 val = (*fn) (n, data);
2834 if (val) break;
2837 /* 3: We're here to traverse the right subtree (if any). */
2838 else if (s == s_right)
2840 stack2 [sp] = s_up;
2841 if (n->right != NULL)
2843 sp ++;
2844 stack1 [sp] = n->right;
2845 stack2 [sp] = s_left;
2849 /* 4: We're here after both subtrees (if any) have been traversed. */
2850 else if (s == s_up)
2852 /* Pop the stack. */
2853 if (sp == 0) break; /* Popping off the root note: we're finished! */
2854 sp --;
2857 else
2858 abort ();
2861 mfsplay_tree_free (stack1);
2862 mfsplay_tree_free (stack2);
2863 return val;