Makefile.def (flags_to_pass): Add OBJDUMP_FOR_TARGET.
[official-gcc.git] / libmudflap / mf-runtime.c
blob72d1a575958dd0717fc3208ce1efdd2e30f965fb
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
32 02110-1301, USA. */
34 #include "config.h"
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
109 /* Statistics. */
110 unsigned num_keys;
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
145 static void
146 begin_recursion_protect1 (const char *pf)
148 if (__mf_get_state () == reentrant)
150 write (2, "mf: erroneous reentrancy detected in `", 38);
151 write (2, pf, strlen(pf));
152 write (2, "'\n", 2); \
153 abort ();
155 __mf_set_state (reentrant);
158 #define BEGIN_RECURSION_PROTECT() \
159 begin_recursion_protect1 (__PRETTY_FUNCTION__)
161 #define END_RECURSION_PROTECT() \
162 __mf_set_state (active)
164 /* ------------------------------------------------------------------------ */
165 /* Required globals. */
167 #define LOOKUP_CACHE_MASK_DFL 1023
168 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
169 #define LOOKUP_CACHE_SHIFT_DFL 2
171 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
172 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
173 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
174 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
176 struct __mf_options __mf_opts;
177 int __mf_starting_p = 1;
179 #ifdef LIBMUDFLAPTH
180 #ifdef HAVE_TLS
181 __thread enum __mf_state_enum __mf_state_1 = reentrant;
182 #endif
183 #else
184 enum __mf_state_enum __mf_state_1 = reentrant;
185 #endif
187 #ifdef LIBMUDFLAPTH
188 pthread_mutex_t __mf_biglock =
189 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
190 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
191 #else
192 PTHREAD_MUTEX_INITIALIZER;
193 #endif
194 #endif
196 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
197 the libmudflap.la (no threading support) can diagnose whether
198 the application is linked with -lpthread. See __mf_usage() below. */
199 #if HAVE_PTHREAD_H
200 #ifdef _POSIX_THREADS
201 #pragma weak pthread_join
202 #else
203 #define pthread_join NULL
204 #endif
205 #endif
208 /* ------------------------------------------------------------------------ */
209 /* stats-related globals. */
211 static unsigned long __mf_count_check;
212 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
213 static unsigned long __mf_count_register;
214 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
215 static unsigned long __mf_count_unregister;
216 static unsigned long __mf_total_unregister_size;
217 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
218 static unsigned long __mf_sigusr1_received;
219 static unsigned long __mf_sigusr1_handled;
220 /* not static */ unsigned long __mf_reentrancy;
221 #ifdef LIBMUDFLAPTH
222 /* not static */ unsigned long __mf_lock_contention;
223 #endif
226 /* ------------------------------------------------------------------------ */
227 /* mode-check-related globals. */
229 typedef struct __mf_object
231 uintptr_t low, high; /* __mf_register parameters */
232 const char *name;
233 char type; /* __MF_TYPE_something */
234 char watching_p; /* Trigger a VIOL_WATCH on access? */
235 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
236 unsigned write_count; /* Likewise for __mf_check/write. */
237 unsigned liveness; /* A measure of recent checking activity. */
238 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
240 uintptr_t alloc_pc;
241 struct timeval alloc_time;
242 char **alloc_backtrace;
243 size_t alloc_backtrace_size;
244 #ifdef LIBMUDFLAPTH
245 pthread_t alloc_thread;
246 #endif
248 int deallocated_p;
249 uintptr_t dealloc_pc;
250 struct timeval dealloc_time;
251 char **dealloc_backtrace;
252 size_t dealloc_backtrace_size;
253 #ifdef LIBMUDFLAPTH
254 pthread_t dealloc_thread;
255 #endif
256 } __mf_object_t;
258 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
259 /* Actually stored as static vars within lookup function below. */
261 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
262 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
263 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
266 /* ------------------------------------------------------------------------ */
267 /* Forward function declarations */
269 void __mf_init () CTOR;
270 static void __mf_sigusr1_respond ();
271 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272 __mf_object_t **objs, unsigned max_objs);
273 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
274 __mf_object_t **objs, unsigned max_objs, int type);
275 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
276 __mf_object_t **objs, unsigned max_objs);
277 static void __mf_adapt_cache ();
278 static void __mf_describe_object (__mf_object_t *obj);
279 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
280 static mfsplay_tree __mf_object_tree (int type);
281 static void __mf_link_object (__mf_object_t *node);
282 static void __mf_unlink_object (__mf_object_t *node);
285 /* ------------------------------------------------------------------------ */
286 /* Configuration engine */
288 static void
289 __mf_set_default_options ()
291 memset (& __mf_opts, 0, sizeof (__mf_opts));
293 __mf_opts.adapt_cache = 1000003;
294 __mf_opts.abbreviate = 1;
295 __mf_opts.verbose_violations = 1;
296 __mf_opts.free_queue_length = 4;
297 __mf_opts.persistent_count = 100;
298 __mf_opts.crumple_zone = 32;
299 __mf_opts.backtrace = 4;
300 __mf_opts.timestamps = 1;
301 __mf_opts.mudflap_mode = mode_check;
302 __mf_opts.violation_mode = viol_nop;
303 __mf_opts.heur_std_data = 1;
304 #ifdef LIBMUDFLAPTH
305 __mf_opts.thread_stack = 0;
306 #endif
309 static struct option
311 char *name;
312 char *description;
313 enum
315 set_option,
316 read_integer_option,
317 } type;
318 unsigned value;
319 unsigned *target;
321 options [] =
323 {"mode-nop",
324 "mudflaps do nothing",
325 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
326 {"mode-populate",
327 "mudflaps populate object tree",
328 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
329 {"mode-check",
330 "mudflaps check for memory violations",
331 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
332 {"mode-violate",
333 "mudflaps always cause violations (diagnostic)",
334 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
336 {"viol-nop",
337 "violations do not change program execution",
338 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
339 {"viol-abort",
340 "violations cause a call to abort()",
341 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
342 {"viol-segv",
343 "violations are promoted to SIGSEGV signals",
344 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
345 {"viol-gdb",
346 "violations fork a gdb process attached to current program",
347 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
348 {"trace-calls",
349 "trace calls to mudflap runtime library",
350 set_option, 1, &__mf_opts.trace_mf_calls},
351 {"verbose-trace",
352 "trace internal events within mudflap runtime library",
353 set_option, 1, &__mf_opts.verbose_trace},
354 {"collect-stats",
355 "collect statistics on mudflap's operation",
356 set_option, 1, &__mf_opts.collect_stats},
357 #ifdef SIGUSR1
358 {"sigusr1-report",
359 "print report upon SIGUSR1",
360 set_option, 1, &__mf_opts.sigusr1_report},
361 #endif
362 {"internal-checking",
363 "perform more expensive internal checking",
364 set_option, 1, &__mf_opts.internal_checking},
365 {"print-leaks",
366 "print any memory leaks at program shutdown",
367 set_option, 1, &__mf_opts.print_leaks},
368 {"check-initialization",
369 "detect uninitialized object reads",
370 set_option, 1, &__mf_opts.check_initialization},
371 {"verbose-violations",
372 "print verbose messages when memory violations occur",
373 set_option, 1, &__mf_opts.verbose_violations},
374 {"abbreviate",
375 "abbreviate repetitive listings",
376 set_option, 1, &__mf_opts.abbreviate},
377 {"timestamps",
378 "track object lifetime timestamps",
379 set_option, 1, &__mf_opts.timestamps},
380 {"ignore-reads",
381 "ignore read accesses - assume okay",
382 set_option, 1, &__mf_opts.ignore_reads},
383 {"wipe-stack",
384 "wipe stack objects at unwind",
385 set_option, 1, &__mf_opts.wipe_stack},
386 {"wipe-heap",
387 "wipe heap objects at free",
388 set_option, 1, &__mf_opts.wipe_heap},
389 {"heur-proc-map",
390 "support /proc/self/map heuristics",
391 set_option, 1, &__mf_opts.heur_proc_map},
392 {"heur-stack-bound",
393 "enable a simple upper stack bound heuristic",
394 set_option, 1, &__mf_opts.heur_stack_bound},
395 {"heur-start-end",
396 "support _start.._end heuristics",
397 set_option, 1, &__mf_opts.heur_start_end},
398 {"heur-stdlib",
399 "register standard library data (argv, errno, stdin, ...)",
400 set_option, 1, &__mf_opts.heur_std_data},
401 {"free-queue-length",
402 "queue N deferred free() calls before performing them",
403 read_integer_option, 0, &__mf_opts.free_queue_length},
404 {"persistent-count",
405 "keep a history of N unregistered regions",
406 read_integer_option, 0, &__mf_opts.persistent_count},
407 {"crumple-zone",
408 "surround allocations with crumple zones of N bytes",
409 read_integer_option, 0, &__mf_opts.crumple_zone},
410 /* XXX: not type-safe.
411 {"lc-mask",
412 "set lookup cache size mask to N (2**M - 1)",
413 read_integer_option, 0, (int *)(&__mf_lc_mask)},
414 {"lc-shift",
415 "set lookup cache pointer shift",
416 read_integer_option, 0, (int *)(&__mf_lc_shift)},
418 {"lc-adapt",
419 "adapt mask/shift parameters after N cache misses",
420 read_integer_option, 1, &__mf_opts.adapt_cache},
421 {"backtrace",
422 "keep an N-level stack trace of each call context",
423 read_integer_option, 0, &__mf_opts.backtrace},
424 #ifdef LIBMUDFLAPTH
425 {"thread-stack",
426 "override thread stacks allocation: N kB",
427 read_integer_option, 0, &__mf_opts.thread_stack},
428 #endif
429 {0, 0, set_option, 0, NULL}
432 static void
433 __mf_usage ()
435 struct option *opt;
437 fprintf (stderr,
438 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
439 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
440 "\n"
441 "The mudflap code can be controlled by an environment variable:\n"
442 "\n"
443 "$ export MUDFLAP_OPTIONS='<options>'\n"
444 "$ <mudflapped_program>\n"
445 "\n"
446 "where <options> is a space-separated list of \n"
447 "any of the following options. Use `-no-OPTION' to disable options.\n"
448 "\n",
449 #if HAVE_PTHREAD_H
450 (pthread_join ? "multi-threaded " : "single-threaded "),
451 #else
453 #endif
454 #if LIBMUDFLAPTH
455 "thread-aware "
456 #else
457 "thread-unaware "
458 #endif
460 /* XXX: The multi-threaded thread-unaware combination is bad. */
462 for (opt = options; opt->name; opt++)
464 int default_p = (opt->value == * opt->target);
466 switch (opt->type)
468 char buf[128];
469 case set_option:
470 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
471 if (default_p)
472 fprintf (stderr, " [active]\n");
473 else
474 fprintf (stderr, "\n");
475 break;
476 case read_integer_option:
477 strncpy (buf, opt->name, 128);
478 strncpy (buf + strlen (opt->name), "=N", 2);
479 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
480 fprintf (stderr, " [%d]\n", * opt->target);
481 break;
482 default: abort();
486 fprintf (stderr, "\n");
491 __mf_set_options (const char *optstr)
493 int rc;
494 LOCKTH ();
495 BEGIN_RECURSION_PROTECT ();
496 rc = __mfu_set_options (optstr);
497 /* XXX: It's not really that easy. A change to a bunch of parameters
498 can require updating auxiliary state or risk crashing:
499 free_queue_length, crumple_zone ... */
500 END_RECURSION_PROTECT ();
501 UNLOCKTH ();
502 return rc;
507 __mfu_set_options (const char *optstr)
509 struct option *opts = 0;
510 char *nxt = 0;
511 long tmp = 0;
512 int rc = 0;
513 const char *saved_optstr = optstr;
515 /* XXX: bounds-check for optstr! */
517 while (*optstr)
519 switch (*optstr) {
520 case ' ':
521 case '\t':
522 case '\n':
523 optstr++;
524 break;
526 case '-':
527 if (*optstr+1)
529 int negate = 0;
530 optstr++;
532 if (*optstr == '?' ||
533 strncmp (optstr, "help", 4) == 0)
535 /* Caller will print help and exit. */
536 return -1;
539 if (strncmp (optstr, "no-", 3) == 0)
541 negate = 1;
542 optstr = & optstr[3];
545 for (opts = options; opts->name; opts++)
547 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
549 optstr += strlen (opts->name);
550 assert (opts->target);
551 switch (opts->type)
553 case set_option:
554 if (negate)
555 *(opts->target) = 0;
556 else
557 *(opts->target) = opts->value;
558 break;
559 case read_integer_option:
560 if (! negate && (*optstr == '=' && *(optstr+1)))
562 optstr++;
563 tmp = strtol (optstr, &nxt, 10);
564 if ((optstr != nxt) && (tmp != LONG_MAX))
566 optstr = nxt;
567 *(opts->target) = (int)tmp;
570 else if (negate)
571 * opts->target = 0;
572 break;
577 break;
579 default:
580 fprintf (stderr,
581 "warning: unrecognized string '%s' in mudflap options\n",
582 optstr);
583 optstr += strlen (optstr);
584 rc = -1;
585 break;
589 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
590 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
591 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
593 /* Clear the lookup cache, in case the parameters got changed. */
594 /* XXX: race */
595 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
596 /* void slot 0 */
597 __mf_lookup_cache[0].low = MAXPTR;
599 TRACE ("set options from `%s'\n", saved_optstr);
601 /* Call this unconditionally, in case -sigusr1-report was toggled. */
602 __mf_sigusr1_respond ();
604 return rc;
608 #ifdef PIC
610 void
611 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
613 char *err;
615 assert (e);
616 if (e->pointer) return;
618 #if HAVE_DLVSYM
619 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
620 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
621 else
622 #endif
623 e->pointer = dlsym (RTLD_NEXT, e->name);
625 err = dlerror ();
627 if (err)
629 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
630 e->name, err);
631 abort ();
633 if (! e->pointer)
635 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
636 abort ();
641 static void
642 __mf_resolve_dynamics ()
644 int i;
645 for (i = 0; i < dyn_INITRESOLVE; i++)
646 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
650 /* NB: order must match enums in mf-impl.h */
651 struct __mf_dynamic_entry __mf_dynamic [] =
653 {NULL, "calloc", NULL},
654 {NULL, "free", NULL},
655 {NULL, "malloc", NULL},
656 {NULL, "mmap", NULL},
657 {NULL, "munmap", NULL},
658 {NULL, "realloc", NULL},
659 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
660 #ifdef LIBMUDFLAPTH
661 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
662 {NULL, "pthread_join", NULL},
663 {NULL, "pthread_exit", NULL}
664 #endif
667 #endif /* PIC */
671 /* ------------------------------------------------------------------------ */
673 /* Lookup & manage automatic initialization of the five or so splay trees. */
674 static mfsplay_tree
675 __mf_object_tree (int type)
677 static mfsplay_tree trees [__MF_TYPE_MAX+1];
678 assert (type >= 0 && type <= __MF_TYPE_MAX);
679 if (UNLIKELY (trees[type] == NULL))
680 trees[type] = mfsplay_tree_new ();
681 return trees[type];
685 /* not static */void
686 __mf_init ()
688 char *ov = 0;
690 /* Return if initialization has already been done. */
691 if (LIKELY (__mf_starting_p == 0))
692 return;
694 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
695 #ifdef PIC
696 __mf_resolve_dynamics ();
697 #endif
698 __mf_starting_p = 0;
700 __mf_set_state (active);
702 __mf_set_default_options ();
704 ov = getenv ("MUDFLAP_OPTIONS");
705 if (ov)
707 int rc = __mfu_set_options (ov);
708 if (rc < 0)
710 __mf_usage ();
711 exit (1);
715 /* Initialize to a non-zero description epoch. */
716 __mf_describe_object (NULL);
718 #define REG_RESERVED(obj) \
719 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
721 REG_RESERVED (__mf_lookup_cache);
722 REG_RESERVED (__mf_lc_mask);
723 REG_RESERVED (__mf_lc_shift);
724 /* XXX: others of our statics? */
726 /* Prevent access to *NULL. */
727 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
728 __mf_lookup_cache[0].low = (uintptr_t) -1;
734 __wrap_main (int argc, char* argv[])
736 extern char **environ;
737 extern int main ();
738 extern int __real_main ();
739 static int been_here = 0;
741 if (__mf_opts.heur_std_data && ! been_here)
743 unsigned i;
745 been_here = 1;
746 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
747 for (i=0; i<argc; i++)
749 unsigned j = strlen (argv[i]);
750 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
753 for (i=0; ; i++)
755 char *e = environ[i];
756 unsigned j;
757 if (e == NULL) break;
758 j = strlen (environ[i]);
759 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
761 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
763 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
765 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
766 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
767 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
769 /* Make some effort to register ctype.h static arrays. */
770 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
771 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
772 with in mf-hooks2.c. */
775 #ifdef PIC
776 return main (argc, argv, environ);
777 #else
778 return __real_main (argc, argv, environ);
779 #endif
784 extern void __mf_fini () DTOR;
785 void __mf_fini ()
787 TRACE ("__mf_fini\n");
788 __mfu_report ();
790 #ifndef PIC
791 /* Since we didn't populate the tree for allocations in constructors
792 before __mf_init, we cannot check destructors after __mf_fini. */
793 __mf_opts.mudflap_mode = mode_nop;
794 #endif
799 /* ------------------------------------------------------------------------ */
800 /* __mf_check */
802 void __mf_check (void *ptr, size_t sz, int type, const char *location)
804 LOCKTH ();
805 BEGIN_RECURSION_PROTECT ();
806 __mfu_check (ptr, sz, type, location);
807 END_RECURSION_PROTECT ();
808 UNLOCKTH ();
812 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
814 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
815 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
816 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
817 uintptr_t ptr_low = (uintptr_t) ptr;
818 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
819 struct __mf_cache old_entry = *entry;
821 if (UNLIKELY (__mf_opts.sigusr1_report))
822 __mf_sigusr1_respond ();
823 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
824 return;
826 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
827 ptr, entry_idx, (unsigned long)sz,
828 (type == 0 ? "read" : "write"), location);
830 switch (__mf_opts.mudflap_mode)
832 case mode_nop:
833 /* It is tempting to poison the cache here similarly to
834 mode_populate. However that eliminates a valuable
835 distinction between these two modes. mode_nop is useful to
836 let a user count & trace every single check / registration
837 call. mode_populate is useful to let a program run fast
838 while unchecked.
840 judgement = 1;
841 break;
843 case mode_populate:
844 entry->low = ptr_low;
845 entry->high = ptr_high;
846 judgement = 1;
847 break;
849 case mode_check:
851 unsigned heuristics = 0;
853 /* Advance aging/adaptation counters. */
854 static unsigned adapt_count;
855 adapt_count ++;
856 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
857 adapt_count > __mf_opts.adapt_cache))
859 adapt_count = 0;
860 __mf_adapt_cache ();
863 /* Looping only occurs if heuristics were triggered. */
864 while (judgement == 0)
866 DECLARE (void, free, void *p);
867 __mf_object_t* ovr_obj[1];
868 unsigned obj_count;
869 __mf_object_t** all_ovr_obj = NULL;
870 __mf_object_t** dealloc_me = NULL;
871 unsigned i;
873 /* Find all overlapping objects. Be optimistic that there is just one. */
874 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
875 if (UNLIKELY (obj_count > 1))
877 /* Allocate a real buffer and do the search again. */
878 DECLARE (void *, malloc, size_t c);
879 unsigned n;
880 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
881 obj_count));
882 if (all_ovr_obj == NULL) abort ();
883 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
884 assert (n == obj_count);
885 dealloc_me = all_ovr_obj;
887 else
889 all_ovr_obj = ovr_obj;
890 dealloc_me = NULL;
893 /* Update object statistics. */
894 for (i = 0; i < obj_count; i++)
896 __mf_object_t *obj = all_ovr_obj[i];
897 assert (obj != NULL);
898 if (type == __MF_CHECK_READ)
899 obj->read_count ++;
900 else
901 obj->write_count ++;
902 obj->liveness ++;
905 /* Iterate over the various objects. There are a number of special cases. */
906 for (i = 0; i < obj_count; i++)
908 __mf_object_t *obj = all_ovr_obj[i];
910 /* Any __MF_TYPE_NOACCESS hit is bad. */
911 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
912 judgement = -1;
914 /* Any object with a watch flag is bad. */
915 if (UNLIKELY (obj->watching_p))
916 judgement = -2; /* trigger VIOL_WATCH */
918 /* A read from an uninitialized object is bad. */
919 if (UNLIKELY (__mf_opts.check_initialization
920 /* reading */
921 && type == __MF_CHECK_READ
922 /* not written */
923 && obj->write_count == 0
924 /* uninitialized (heap) */
925 && obj->type == __MF_TYPE_HEAP))
926 judgement = -1;
929 /* We now know that the access spans no invalid objects. */
930 if (LIKELY (judgement >= 0))
931 for (i = 0; i < obj_count; i++)
933 __mf_object_t *obj = all_ovr_obj[i];
935 /* Is this access entirely contained within this object? */
936 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
938 /* Valid access. */
939 entry->low = obj->low;
940 entry->high = obj->high;
941 judgement = 1;
945 /* This access runs off the end of one valid object. That
946 could be okay, if other valid objects fill in all the
947 holes. We allow this only for HEAP and GUESS type
948 objects. Accesses to STATIC and STACK variables
949 should not be allowed to span. */
950 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
952 unsigned uncovered = 0;
953 for (i = 0; i < obj_count; i++)
955 __mf_object_t *obj = all_ovr_obj[i];
956 int j, uncovered_low_p, uncovered_high_p;
957 uintptr_t ptr_lower, ptr_higher;
959 uncovered_low_p = ptr_low < obj->low;
960 ptr_lower = CLAMPSUB (obj->low, 1);
961 uncovered_high_p = ptr_high > obj->high;
962 ptr_higher = CLAMPADD (obj->high, 1);
964 for (j = 0; j < obj_count; j++)
966 __mf_object_t *obj2 = all_ovr_obj[j];
968 if (i == j) continue;
970 /* Filter out objects that cannot be spanned across. */
971 if (obj2->type == __MF_TYPE_STACK
972 || obj2->type == __MF_TYPE_STATIC)
973 continue;
975 /* Consider a side "covered" if obj2 includes
976 the next byte on that side. */
977 if (uncovered_low_p
978 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
979 uncovered_low_p = 0;
980 if (uncovered_high_p
981 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
982 uncovered_high_p = 0;
985 if (uncovered_low_p || uncovered_high_p)
986 uncovered ++;
989 /* Success if no overlapping objects are uncovered. */
990 if (uncovered == 0)
991 judgement = 1;
995 if (dealloc_me != NULL)
996 CALL_REAL (free, dealloc_me);
998 /* If the judgment is still unknown at this stage, loop
999 around at most one more time. */
1000 if (judgement == 0)
1002 if (heuristics++ < 2) /* XXX parametrize this number? */
1003 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1004 else
1005 judgement = -1;
1010 break;
1012 case mode_violate:
1013 judgement = -1;
1014 break;
1017 if (__mf_opts.collect_stats)
1019 __mf_count_check ++;
1021 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1022 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1023 __mf_lookup_cache_reusecount [entry_idx] ++;
1026 if (UNLIKELY (judgement < 0))
1027 __mf_violation (ptr, sz,
1028 (uintptr_t) __builtin_return_address (0), location,
1029 ((judgement == -1) ?
1030 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1031 __MF_VIOL_WATCH));
1035 static __mf_object_t *
1036 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1037 const char *name, uintptr_t pc)
1039 DECLARE (void *, calloc, size_t c, size_t n);
1041 __mf_object_t *new_obj;
1042 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1043 new_obj->low = low;
1044 new_obj->high = high;
1045 new_obj->type = type;
1046 new_obj->name = name;
1047 new_obj->alloc_pc = pc;
1048 #if HAVE_GETTIMEOFDAY
1049 if (__mf_opts.timestamps)
1050 gettimeofday (& new_obj->alloc_time, NULL);
1051 #endif
1052 #if LIBMUDFLAPTH
1053 new_obj->alloc_thread = pthread_self ();
1054 #endif
1056 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1057 new_obj->alloc_backtrace_size =
1058 __mf_backtrace (& new_obj->alloc_backtrace,
1059 (void *) pc, 2);
1061 __mf_link_object (new_obj);
1062 return new_obj;
1066 static void
1067 __mf_uncache_object (__mf_object_t *old_obj)
1069 /* Remove any low/high pointers for this object from the lookup cache. */
1071 /* Can it possibly exist in the cache? */
1072 if (LIKELY (old_obj->read_count + old_obj->write_count))
1074 /* As reported by Herman ten Brugge, we need to scan the entire
1075 cache for entries that may hit this object. */
1076 uintptr_t low = old_obj->low;
1077 uintptr_t high = old_obj->high;
1078 struct __mf_cache *entry = & __mf_lookup_cache [0];
1079 unsigned i;
1080 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1082 /* NB: the "||" in the following test permits this code to
1083 tolerate the situation introduced by __mf_check over
1084 contiguous objects, where a cache entry spans several
1085 objects. */
1086 if (entry->low == low || entry->high == high)
1088 entry->low = MAXPTR;
1089 entry->high = MINPTR;
1096 void
1097 __mf_register (void *ptr, size_t sz, int type, const char *name)
1099 LOCKTH ();
1100 BEGIN_RECURSION_PROTECT ();
1101 __mfu_register (ptr, sz, type, name);
1102 END_RECURSION_PROTECT ();
1103 UNLOCKTH ();
1107 void
1108 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1110 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1111 ptr, (unsigned long) sz, type, name ? name : "");
1113 if (__mf_opts.collect_stats)
1115 __mf_count_register ++;
1116 __mf_total_register_size [(type < 0) ? 0 :
1117 (type > __MF_TYPE_MAX) ? 0 :
1118 type] += sz;
1121 if (UNLIKELY (__mf_opts.sigusr1_report))
1122 __mf_sigusr1_respond ();
1124 switch (__mf_opts.mudflap_mode)
1126 case mode_nop:
1127 break;
1129 case mode_violate:
1130 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1131 __MF_VIOL_REGISTER);
1132 break;
1134 case mode_populate:
1135 /* Clear the cache. */
1136 /* XXX: why the entire cache? */
1137 /* XXX: race */
1138 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1139 /* void slot 0 */
1140 __mf_lookup_cache[0].low = MAXPTR;
1141 break;
1143 case mode_check:
1145 __mf_object_t *ovr_objs [1];
1146 unsigned num_overlapping_objs;
1147 uintptr_t low = (uintptr_t) ptr;
1148 uintptr_t high = CLAMPSZ (ptr, sz);
1149 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1151 /* Treat unknown size indication as 1. */
1152 if (UNLIKELY (sz == 0)) sz = 1;
1154 /* Look for objects only of the same type. This will e.g. permit a registration
1155 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1156 __mf_check time however harmful overlaps will be detected. */
1157 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1159 /* Handle overlaps. */
1160 if (UNLIKELY (num_overlapping_objs > 0))
1162 __mf_object_t *ovr_obj = ovr_objs[0];
1164 /* Accept certain specific duplication pairs. */
1165 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1166 && ovr_obj->low == low
1167 && ovr_obj->high == high
1168 && ovr_obj->type == type)
1170 /* Duplicate registration for static objects may come
1171 from distinct compilation units. */
1172 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1173 (void *) low, (void *) high,
1174 (ovr_obj->name ? ovr_obj->name : ""));
1175 break;
1178 /* Alas, a genuine violation. */
1179 else
1181 /* Two or more *real* mappings here. */
1182 __mf_violation ((void *) ptr, sz,
1183 (uintptr_t) __builtin_return_address (0), NULL,
1184 __MF_VIOL_REGISTER);
1187 else /* No overlapping objects: AOK. */
1188 __mf_insert_new_object (low, high, type, name, pc);
1190 /* We could conceivably call __mf_check() here to prime the cache,
1191 but then the read_count/write_count field is not reliable. */
1192 break;
1194 } /* end switch (__mf_opts.mudflap_mode) */
1198 void
1199 __mf_unregister (void *ptr, size_t sz, int type)
1201 LOCKTH ();
1202 BEGIN_RECURSION_PROTECT ();
1203 __mfu_unregister (ptr, sz, type);
1204 END_RECURSION_PROTECT ();
1205 UNLOCKTH ();
1209 void
1210 __mfu_unregister (void *ptr, size_t sz, int type)
1212 DECLARE (void, free, void *ptr);
1214 if (UNLIKELY (__mf_opts.sigusr1_report))
1215 __mf_sigusr1_respond ();
1217 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1219 switch (__mf_opts.mudflap_mode)
1221 case mode_nop:
1222 break;
1224 case mode_violate:
1225 __mf_violation (ptr, sz,
1226 (uintptr_t) __builtin_return_address (0), NULL,
1227 __MF_VIOL_UNREGISTER);
1228 break;
1230 case mode_populate:
1231 /* Clear the cache. */
1232 /* XXX: race */
1233 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1234 /* void slot 0 */
1235 __mf_lookup_cache[0].low = MAXPTR;
1236 break;
1238 case mode_check:
1240 __mf_object_t *old_obj = NULL;
1241 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1242 __mf_object_t *objs[1] = {NULL};
1243 unsigned num_overlapping_objs;
1245 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1246 CLAMPSZ (ptr, sz), objs, 1, type);
1248 /* Special case for HEAP_I - see free & realloc hook. They don't
1249 know whether the input region was HEAP or HEAP_I before
1250 unmapping it. Here we give HEAP a try in case HEAP_I
1251 failed. */
1252 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1254 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1255 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1258 old_obj = objs[0];
1259 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1260 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1261 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1263 __mf_violation (ptr, sz,
1264 (uintptr_t) __builtin_return_address (0), NULL,
1265 __MF_VIOL_UNREGISTER);
1266 break;
1269 __mf_unlink_object (old_obj);
1270 __mf_uncache_object (old_obj);
1272 /* Wipe buffer contents if desired. */
1273 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1274 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1275 || old_obj->type == __MF_TYPE_HEAP_I)))
1277 memset ((void *) old_obj->low,
1279 (size_t) (old_obj->high - old_obj->low + 1));
1282 /* Manage the object cemetary. */
1283 if (__mf_opts.persistent_count > 0
1284 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1286 old_obj->deallocated_p = 1;
1287 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1288 #if HAVE_GETTIMEOFDAY
1289 if (__mf_opts.timestamps)
1290 gettimeofday (& old_obj->dealloc_time, NULL);
1291 #endif
1292 #ifdef LIBMUDFLAPTH
1293 old_obj->dealloc_thread = pthread_self ();
1294 #endif
1296 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1297 old_obj->dealloc_backtrace_size =
1298 __mf_backtrace (& old_obj->dealloc_backtrace,
1299 NULL, 2);
1301 /* Encourage this object to be displayed again in current epoch. */
1302 old_obj->description_epoch --;
1304 /* Put this object into the cemetary. This may require this plot to
1305 be recycled, and the previous resident to be designated del_obj. */
1307 unsigned row = old_obj->type;
1308 unsigned plot = __mf_object_dead_head [row];
1310 del_obj = __mf_object_cemetary [row][plot];
1311 __mf_object_cemetary [row][plot] = old_obj;
1312 plot ++;
1313 if (plot == __mf_opts.persistent_count) plot = 0;
1314 __mf_object_dead_head [row] = plot;
1317 else
1318 del_obj = old_obj;
1320 if (__mf_opts.print_leaks)
1322 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1323 (old_obj->type == __MF_TYPE_HEAP
1324 || old_obj->type == __MF_TYPE_HEAP_I))
1326 fprintf (stderr,
1327 "*******\n"
1328 "mudflap warning: unaccessed registered object:\n");
1329 __mf_describe_object (old_obj);
1333 if (del_obj != NULL) /* May or may not equal old_obj. */
1335 if (__mf_opts.backtrace > 0)
1337 CALL_REAL(free, del_obj->alloc_backtrace);
1338 if (__mf_opts.persistent_count > 0)
1340 CALL_REAL(free, del_obj->dealloc_backtrace);
1343 CALL_REAL(free, del_obj);
1346 break;
1348 } /* end switch (__mf_opts.mudflap_mode) */
1351 if (__mf_opts.collect_stats)
1353 __mf_count_unregister ++;
1354 __mf_total_unregister_size += sz;
1360 struct tree_stats
1362 unsigned obj_count;
1363 unsigned long total_size;
1364 unsigned live_obj_count;
1365 double total_weight;
1366 double weighted_size;
1367 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1372 static int
1373 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1375 __mf_object_t *obj = (__mf_object_t *) n->value;
1376 struct tree_stats *s = (struct tree_stats *) param;
1378 assert (obj != NULL && s != NULL);
1380 /* Exclude never-accessed objects. */
1381 if (obj->read_count + obj->write_count)
1383 s->obj_count ++;
1384 s->total_size += (obj->high - obj->low + 1);
1386 if (obj->liveness)
1388 unsigned i;
1389 uintptr_t addr;
1391 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1392 (void *) obj->low, obj->liveness, obj->name); */
1394 s->live_obj_count ++;
1395 s->total_weight += (double) obj->liveness;
1396 s->weighted_size +=
1397 (double) (obj->high - obj->low + 1) *
1398 (double) obj->liveness;
1400 addr = obj->low;
1401 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1403 unsigned bit = addr & 1;
1404 s->weighted_address_bits[i][bit] += obj->liveness;
1405 addr = addr >> 1;
1408 /* Age the liveness value. */
1409 obj->liveness >>= 1;
1413 return 0;
1417 static void
1418 __mf_adapt_cache ()
1420 struct tree_stats s;
1421 uintptr_t new_mask = 0;
1422 unsigned char new_shift;
1423 float cache_utilization;
1424 float max_value;
1425 static float smoothed_new_shift = -1.0;
1426 unsigned i;
1428 memset (&s, 0, sizeof (s));
1430 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1431 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1432 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1433 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1434 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1436 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1437 empty tree. Just leave the cache alone in such cases, rather
1438 than risk dying by division-by-zero. */
1439 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1440 return;
1442 /* Guess a good value for the shift parameter by finding an address bit that is a
1443 good discriminant of lively objects. */
1444 max_value = 0.0;
1445 for (i=0; i<sizeof (uintptr_t)*8; i++)
1447 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1448 if (max_value < value) max_value = value;
1450 for (i=0; i<sizeof (uintptr_t)*8; i++)
1452 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1453 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1454 if (value >= max_value * shoulder_factor)
1455 break;
1457 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1458 /* Converge toward this slowly to reduce flapping. */
1459 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1460 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1461 assert (new_shift < sizeof (uintptr_t)*8);
1463 /* Count number of used buckets. */
1464 cache_utilization = 0.0;
1465 for (i = 0; i < (1 + __mf_lc_mask); i++)
1466 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1467 cache_utilization += 1.0;
1468 cache_utilization /= (1 + __mf_lc_mask);
1470 new_mask |= 0xffff; /* XXX: force a large cache. */
1471 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1473 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1474 "util=%u%% m=%p s=%u\n",
1475 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1476 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1478 /* We should reinitialize cache if its parameters have changed. */
1479 if (new_mask != __mf_lc_mask ||
1480 new_shift != __mf_lc_shift)
1482 __mf_lc_mask = new_mask;
1483 __mf_lc_shift = new_shift;
1484 /* XXX: race */
1485 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1486 /* void slot 0 */
1487 __mf_lookup_cache[0].low = MAXPTR;
1493 /* __mf_find_object[s] */
1495 /* Find overlapping live objecs between [low,high]. Return up to
1496 max_objs of their pointers in objs[]. Return total count of
1497 overlaps (may exceed max_objs). */
1499 unsigned
1500 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1501 __mf_object_t **objs, unsigned max_objs, int type)
1503 unsigned count = 0;
1504 mfsplay_tree t = __mf_object_tree (type);
1505 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1506 int direction;
1508 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1509 /* An exact match for base address implies a hit. */
1510 if (n != NULL)
1512 if (count < max_objs)
1513 objs[count] = (__mf_object_t *) n->value;
1514 count ++;
1517 /* Iterate left then right near this key value to find all overlapping objects. */
1518 for (direction = 0; direction < 2; direction ++)
1520 /* Reset search origin. */
1521 k = (mfsplay_tree_key) ptr_low;
1523 while (1)
1525 __mf_object_t *obj;
1527 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1528 if (n == NULL) break;
1529 obj = (__mf_object_t *) n->value;
1531 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1532 break;
1534 if (count < max_objs)
1535 objs[count] = (__mf_object_t *) n->value;
1536 count ++;
1538 k = (mfsplay_tree_key) obj->low;
1542 return count;
1546 unsigned
1547 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1548 __mf_object_t **objs, unsigned max_objs)
1550 int type;
1551 unsigned count = 0;
1553 /* Search each splay tree for overlaps. */
1554 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1556 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1557 if (c > max_objs)
1559 max_objs = 0;
1560 objs = NULL;
1562 else /* NB: C may equal 0 */
1564 max_objs -= c;
1565 objs += c;
1567 count += c;
1570 return count;
1575 /* __mf_link_object */
1577 static void
1578 __mf_link_object (__mf_object_t *node)
1580 mfsplay_tree t = __mf_object_tree (node->type);
1581 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1584 /* __mf_unlink_object */
1586 static void
1587 __mf_unlink_object (__mf_object_t *node)
1589 mfsplay_tree t = __mf_object_tree (node->type);
1590 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1593 /* __mf_find_dead_objects */
1595 /* Find overlapping dead objecs between [low,high]. Return up to
1596 max_objs of their pointers in objs[]. Return total count of
1597 overlaps (may exceed max_objs). */
1599 static unsigned
1600 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1601 __mf_object_t **objs, unsigned max_objs)
1603 if (__mf_opts.persistent_count > 0)
1605 unsigned count = 0;
1606 unsigned recollection = 0;
1607 unsigned row = 0;
1609 assert (low <= high);
1610 assert (max_objs == 0 || objs != NULL);
1612 /* Widen the search from the most recent plots in each row, looking
1613 backward in time. */
1614 recollection = 0;
1615 while (recollection < __mf_opts.persistent_count)
1617 count = 0;
1619 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1621 unsigned plot;
1622 unsigned i;
1624 plot = __mf_object_dead_head [row];
1625 for (i = 0; i <= recollection; i ++)
1627 __mf_object_t *obj;
1629 /* Look backward through row: it's a circular buffer. */
1630 if (plot > 0) plot --;
1631 else plot = __mf_opts.persistent_count - 1;
1633 obj = __mf_object_cemetary [row][plot];
1634 if (obj && obj->low <= high && obj->high >= low)
1636 /* Found an overlapping dead object! */
1637 if (count < max_objs)
1638 objs [count] = obj;
1639 count ++;
1644 if (count)
1645 break;
1647 /* Look farther back in time. */
1648 recollection = (recollection * 2) + 1;
1651 return count;
1652 } else {
1653 return 0;
1657 /* __mf_describe_object */
1659 static void
1660 __mf_describe_object (__mf_object_t *obj)
1662 static unsigned epoch = 0;
1663 if (obj == NULL)
1665 epoch ++;
1666 return;
1669 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1671 fprintf (stderr,
1672 "mudflap %sobject %p: name=`%s'\n",
1673 (obj->deallocated_p ? "dead " : ""),
1674 (void *) obj, (obj->name ? obj->name : ""));
1675 return;
1677 else
1678 obj->description_epoch = epoch;
1680 fprintf (stderr,
1681 "mudflap %sobject %p: name=`%s'\n"
1682 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1683 "alloc time=%lu.%06lu pc=%p"
1684 #ifdef LIBMUDFLAPTH
1685 " thread=%u"
1686 #endif
1687 "\n",
1688 (obj->deallocated_p ? "dead " : ""),
1689 (void *) obj, (obj->name ? obj->name : ""),
1690 (void *) obj->low, (void *) obj->high,
1691 (unsigned long) (obj->high - obj->low + 1),
1692 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1693 obj->type == __MF_TYPE_HEAP ? "heap" :
1694 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1695 obj->type == __MF_TYPE_STACK ? "stack" :
1696 obj->type == __MF_TYPE_STATIC ? "static" :
1697 obj->type == __MF_TYPE_GUESS ? "guess" :
1698 "unknown"),
1699 obj->read_count, obj->write_count, obj->liveness,
1700 obj->watching_p ? " watching" : "",
1701 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1702 (void *) obj->alloc_pc
1703 #ifdef LIBMUDFLAPTH
1704 , (unsigned) obj->alloc_thread
1705 #endif
1708 if (__mf_opts.backtrace > 0)
1710 unsigned i;
1711 for (i=0; i<obj->alloc_backtrace_size; i++)
1712 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1715 if (__mf_opts.persistent_count > 0)
1717 if (obj->deallocated_p)
1719 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1720 #ifdef LIBMUDFLAPTH
1721 " thread=%u"
1722 #endif
1723 "\n",
1724 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1725 (void *) obj->dealloc_pc
1726 #ifdef LIBMUDFLAPTH
1727 , (unsigned) obj->dealloc_thread
1728 #endif
1732 if (__mf_opts.backtrace > 0)
1734 unsigned i;
1735 for (i=0; i<obj->dealloc_backtrace_size; i++)
1736 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1743 static int
1744 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1746 __mf_object_t *node = (__mf_object_t *) n->value;
1747 unsigned *count = (unsigned *) param;
1749 if (count != NULL)
1750 (*count) ++;
1752 fprintf (stderr, "Leaked object %u:\n", (*count));
1753 __mf_describe_object (node);
1755 return 0;
1759 static unsigned
1760 __mf_report_leaks ()
1762 unsigned count = 0;
1764 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1765 __mf_report_leaks_fn, & count);
1766 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1767 __mf_report_leaks_fn, & count);
1769 return count;
1772 /* ------------------------------------------------------------------------ */
1773 /* __mf_report */
1775 void
1776 __mf_report ()
1778 LOCKTH ();
1779 BEGIN_RECURSION_PROTECT ();
1780 __mfu_report ();
1781 END_RECURSION_PROTECT ();
1782 UNLOCKTH ();
1785 void
1786 __mfu_report ()
1788 if (__mf_opts.collect_stats)
1790 fprintf (stderr,
1791 "*******\n"
1792 "mudflap stats:\n"
1793 "calls to __mf_check: %lu\n"
1794 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1795 " __mf_unregister: %lu [%luB]\n"
1796 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1797 __mf_count_check,
1798 __mf_count_register,
1799 __mf_total_register_size[0], __mf_total_register_size[1],
1800 __mf_total_register_size[2], __mf_total_register_size[3],
1801 __mf_total_register_size[4], /* XXX */
1802 __mf_count_unregister, __mf_total_unregister_size,
1803 __mf_count_violation[0], __mf_count_violation[1],
1804 __mf_count_violation[2], __mf_count_violation[3],
1805 __mf_count_violation[4]);
1807 fprintf (stderr,
1808 "calls with reentrancy: %lu\n", __mf_reentrancy);
1809 #ifdef LIBMUDFLAPTH
1810 fprintf (stderr,
1811 " lock contention: %lu\n", __mf_lock_contention);
1812 #endif
1814 /* Lookup cache stats. */
1816 unsigned i;
1817 unsigned max_reuse = 0;
1818 unsigned num_used = 0;
1819 unsigned num_unused = 0;
1821 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1823 if (__mf_lookup_cache_reusecount[i])
1824 num_used ++;
1825 else
1826 num_unused ++;
1827 if (max_reuse < __mf_lookup_cache_reusecount[i])
1828 max_reuse = __mf_lookup_cache_reusecount[i];
1830 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1831 num_used, num_unused, max_reuse);
1835 unsigned live_count;
1836 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1837 fprintf (stderr, "number of live objects: %u\n", live_count);
1840 if (__mf_opts.persistent_count > 0)
1842 unsigned dead_count = 0;
1843 unsigned row, plot;
1844 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1845 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1846 if (__mf_object_cemetary [row][plot] != 0)
1847 dead_count ++;
1848 fprintf (stderr, " zombie objects: %u\n", dead_count);
1851 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1853 unsigned l;
1854 extern void * __mf_wrap_alloca_indirect (size_t c);
1856 /* Free up any remaining alloca()'d blocks. */
1857 __mf_wrap_alloca_indirect (0);
1858 __mf_describe_object (NULL); /* Reset description epoch. */
1859 l = __mf_report_leaks ();
1860 fprintf (stderr, "number of leaked objects: %u\n", l);
1864 /* __mf_backtrace */
1866 size_t
1867 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1869 void ** pc_array;
1870 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1871 unsigned remaining_size;
1872 unsigned omitted_size = 0;
1873 unsigned i;
1874 DECLARE (void, free, void *ptr);
1875 DECLARE (void *, calloc, size_t c, size_t n);
1876 DECLARE (void *, malloc, size_t n);
1878 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1879 #ifdef HAVE_BACKTRACE
1880 pc_array_size = backtrace (pc_array, pc_array_size);
1881 #else
1882 #define FETCH(n) do { if (pc_array_size >= n) { \
1883 pc_array[n] = __builtin_return_address(n); \
1884 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1886 /* Unroll some calls __builtin_return_address because this function
1887 only takes a literal integer parameter. */
1888 FETCH (0);
1889 #if 0
1890 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1891 rather than simply returning 0. :-( */
1892 FETCH (1);
1893 FETCH (2);
1894 FETCH (3);
1895 FETCH (4);
1896 FETCH (5);
1897 FETCH (6);
1898 FETCH (7);
1899 FETCH (8);
1900 if (pc_array_size > 8) pc_array_size = 9;
1901 #else
1902 if (pc_array_size > 0) pc_array_size = 1;
1903 #endif
1905 #undef FETCH
1906 #endif
1908 /* We want to trim the first few levels of the stack traceback,
1909 since they contain libmudflap wrappers and junk. If pc_array[]
1910 ends up containing a non-NULL guess_pc, then trim everything
1911 before that. Otherwise, omit the first guess_omit_levels
1912 entries. */
1914 if (guess_pc != NULL)
1915 for (i=0; i<pc_array_size; i++)
1916 if (pc_array [i] == guess_pc)
1917 omitted_size = i;
1919 if (omitted_size == 0) /* No match? */
1920 if (pc_array_size > guess_omit_levels)
1921 omitted_size = guess_omit_levels;
1923 remaining_size = pc_array_size - omitted_size;
1925 #ifdef HAVE_BACKTRACE_SYMBOLS
1926 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1927 #else
1929 /* Let's construct a buffer by hand. It will have <remaining_size>
1930 char*'s at the front, pointing at individual strings immediately
1931 afterwards. */
1932 void *buffer;
1933 char *chars;
1934 char **pointers;
1935 enum { perline = 30 };
1936 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1937 pointers = (char **) buffer;
1938 chars = (char *)buffer + (remaining_size * sizeof (char *));
1939 for (i = 0; i < remaining_size; i++)
1941 pointers[i] = chars;
1942 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1943 chars = chars + perline;
1945 *symbols = pointers;
1947 #endif
1948 CALL_REAL (free, pc_array);
1950 return remaining_size;
1953 /* ------------------------------------------------------------------------ */
1954 /* __mf_violation */
1956 void
1957 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1958 const char *location, int type)
1960 char buf [128];
1961 static unsigned violation_number;
1962 DECLARE(void, free, void *ptr);
1964 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1965 (void *) pc,
1966 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1968 if (__mf_opts.collect_stats)
1969 __mf_count_violation [(type < 0) ? 0 :
1970 (type > __MF_VIOL_WATCH) ? 0 :
1971 type] ++;
1973 /* Print out a basic warning message. */
1974 if (__mf_opts.verbose_violations)
1976 unsigned dead_p;
1977 unsigned num_helpful = 0;
1978 struct timeval now = { 0, 0 };
1979 #if HAVE_GETTIMEOFDAY
1980 gettimeofday (& now, NULL);
1981 #endif
1983 violation_number ++;
1984 fprintf (stderr,
1985 "*******\n"
1986 "mudflap violation %u (%s): time=%lu.%06lu "
1987 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1988 violation_number,
1989 ((type == __MF_VIOL_READ) ? "check/read" :
1990 (type == __MF_VIOL_WRITE) ? "check/write" :
1991 (type == __MF_VIOL_REGISTER) ? "register" :
1992 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1993 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1994 now.tv_sec, now.tv_usec,
1995 (void *) ptr, (unsigned long)sz, (void *) pc,
1996 (location != NULL ? " location=`" : ""),
1997 (location != NULL ? location : ""),
1998 (location != NULL ? "'" : ""));
2000 if (__mf_opts.backtrace > 0)
2002 char ** symbols;
2003 unsigned i, num;
2005 num = __mf_backtrace (& symbols, (void *) pc, 2);
2006 /* Note: backtrace_symbols calls malloc(). But since we're in
2007 __mf_violation and presumably __mf_check, it'll detect
2008 recursion, and not put the new string into the database. */
2010 for (i=0; i<num; i++)
2011 fprintf (stderr, " %s\n", symbols[i]);
2013 /* Calling free() here would trigger a violation. */
2014 CALL_REAL(free, symbols);
2018 /* Look for nearby objects. For this, we start with s_low/s_high
2019 pointing to the given area, looking for overlapping objects.
2020 If none show up, widen the search area and keep looking. */
2022 if (sz == 0) sz = 1;
2024 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2026 enum {max_objs = 3}; /* magic */
2027 __mf_object_t *objs[max_objs];
2028 unsigned num_objs = 0;
2029 uintptr_t s_low, s_high;
2030 unsigned tries = 0;
2031 unsigned i;
2033 s_low = (uintptr_t) ptr;
2034 s_high = CLAMPSZ (ptr, sz);
2036 while (tries < 16) /* magic */
2038 if (dead_p)
2039 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2040 else
2041 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2043 if (num_objs) /* good enough */
2044 break;
2046 tries ++;
2048 /* XXX: tune this search strategy. It's too dependent on
2049 sz, which can vary from 1 to very big (when array index
2050 checking) numbers. */
2051 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2052 s_high = CLAMPADD (s_high, (sz * tries * tries));
2055 for (i = 0; i < min (num_objs, max_objs); i++)
2057 __mf_object_t *obj = objs[i];
2058 uintptr_t low = (uintptr_t) ptr;
2059 uintptr_t high = CLAMPSZ (ptr, sz);
2060 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2061 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2062 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2063 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2064 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2065 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2067 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2068 num_helpful + i + 1,
2069 (before1 ? before1 : after1 ? after1 : into1),
2070 (before1 ? "before" : after1 ? "after" : "into"),
2071 (before2 ? before2 : after2 ? after2 : into2),
2072 (before2 ? "before" : after2 ? "after" : "into"));
2073 __mf_describe_object (obj);
2075 num_helpful += num_objs;
2078 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2081 /* How to finally handle this violation? */
2082 switch (__mf_opts.violation_mode)
2084 case viol_nop:
2085 break;
2086 case viol_segv:
2087 kill (getpid(), SIGSEGV);
2088 break;
2089 case viol_abort:
2090 abort ();
2091 break;
2092 case viol_gdb:
2094 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2095 system (buf);
2096 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2097 instead, and let the forked child execlp() gdb. That way, this
2098 subject process can be resumed under the supervision of gdb.
2099 This can't happen now, since system() only returns when gdb
2100 dies. In that case, we need to beware of starting a second
2101 concurrent gdb child upon the next violation. (But if the first
2102 gdb dies, then starting a new one is appropriate.) */
2103 break;
2107 /* ------------------------------------------------------------------------ */
2110 unsigned __mf_watch (void *ptr, size_t sz)
2112 unsigned rc;
2113 LOCKTH ();
2114 BEGIN_RECURSION_PROTECT ();
2115 rc = __mf_watch_or_not (ptr, sz, 1);
2116 END_RECURSION_PROTECT ();
2117 UNLOCKTH ();
2118 return rc;
2121 unsigned __mf_unwatch (void *ptr, size_t sz)
2123 unsigned rc;
2124 LOCKTH ();
2125 rc = __mf_watch_or_not (ptr, sz, 0);
2126 UNLOCKTH ();
2127 return rc;
2131 static unsigned
2132 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2134 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2135 uintptr_t ptr_low = (uintptr_t) ptr;
2136 unsigned count = 0;
2138 TRACE ("%s ptr=%p size=%lu\n",
2139 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2141 switch (__mf_opts.mudflap_mode)
2143 case mode_nop:
2144 case mode_populate:
2145 case mode_violate:
2146 count = 0;
2147 break;
2149 case mode_check:
2151 __mf_object_t **all_ovr_objs;
2152 unsigned obj_count;
2153 unsigned n;
2154 DECLARE (void *, malloc, size_t c);
2155 DECLARE (void, free, void *p);
2157 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2158 VERBOSE_TRACE (" %u:", obj_count);
2160 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2161 if (all_ovr_objs == NULL) abort ();
2162 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2163 assert (n == obj_count);
2165 for (n = 0; n < obj_count; n ++)
2167 __mf_object_t *obj = all_ovr_objs[n];
2169 VERBOSE_TRACE (" [%p]", (void *) obj);
2170 if (obj->watching_p != flag)
2172 obj->watching_p = flag;
2173 count ++;
2175 /* Remove object from cache, to ensure next access
2176 goes through __mf_check(). */
2177 if (flag)
2178 __mf_uncache_object (obj);
2181 CALL_REAL (free, all_ovr_objs);
2183 break;
2186 return count;
2190 void
2191 __mf_sigusr1_handler (int num)
2193 __mf_sigusr1_received ++;
2196 /* Install or remove SIGUSR1 handler as necessary.
2197 Also, respond to a received pending SIGUSR1. */
2198 void
2199 __mf_sigusr1_respond ()
2201 static int handler_installed;
2203 #ifdef SIGUSR1
2204 /* Manage handler */
2205 if (__mf_opts.sigusr1_report && ! handler_installed)
2207 signal (SIGUSR1, __mf_sigusr1_handler);
2208 handler_installed = 1;
2210 else if(! __mf_opts.sigusr1_report && handler_installed)
2212 signal (SIGUSR1, SIG_DFL);
2213 handler_installed = 0;
2215 #endif
2217 /* Manage enqueued signals */
2218 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2220 __mf_sigusr1_handled ++;
2221 assert (__mf_get_state () == reentrant);
2222 __mfu_report ();
2223 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2228 /* XXX: provide an alternative __assert_fail function that cannot
2229 fail due to libmudflap infinite recursion. */
2230 #ifndef NDEBUG
2232 static void
2233 write_itoa (int fd, unsigned n)
2235 enum x { bufsize = sizeof(n)*4 };
2236 char buf [bufsize];
2237 unsigned i;
2239 for (i=0; i<bufsize-1; i++)
2241 unsigned digit = n % 10;
2242 buf[bufsize-2-i] = digit + '0';
2243 n /= 10;
2244 if (n == 0)
2246 char *m = & buf [bufsize-2-i];
2247 buf[bufsize-1] = '\0';
2248 write (fd, m, strlen(m));
2249 break;
2255 void
2256 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2258 #define write2(string) write (2, (string), strlen ((string)));
2259 write2("mf");
2260 #ifdef LIBMUDFLAPTH
2261 write2("(");
2262 write_itoa (2, (unsigned) pthread_self ());
2263 write2(")");
2264 #endif
2265 write2(": assertion failure: `");
2266 write (2, msg, strlen (msg));
2267 write2("' in ");
2268 write (2, func, strlen (func));
2269 write2(" at ");
2270 write (2, file, strlen (file));
2271 write2(":");
2272 write_itoa (2, line);
2273 write2("\n");
2274 #undef write2
2275 abort ();
2279 #endif
2283 /* Adapted splay tree code, originally from libiberty. It has been
2284 specialized for libmudflap as requested by RMS. */
2286 static void
2287 mfsplay_tree_free (void *p)
2289 DECLARE (void, free, void *p);
2290 CALL_REAL (free, p);
2293 static void *
2294 mfsplay_tree_xmalloc (size_t s)
2296 DECLARE (void *, malloc, size_t s);
2297 return CALL_REAL (malloc, s);
2301 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2302 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2303 mfsplay_tree_key,
2304 mfsplay_tree_node *,
2305 mfsplay_tree_node *,
2306 mfsplay_tree_node *);
2309 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2310 and grandparent, respectively, of NODE. */
2312 static mfsplay_tree_node
2313 mfsplay_tree_splay_helper (mfsplay_tree sp,
2314 mfsplay_tree_key key,
2315 mfsplay_tree_node * node,
2316 mfsplay_tree_node * parent,
2317 mfsplay_tree_node * grandparent)
2319 mfsplay_tree_node *next;
2320 mfsplay_tree_node n;
2321 int comparison;
2323 n = *node;
2325 if (!n)
2326 return *parent;
2328 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2330 if (comparison == 0)
2331 /* We've found the target. */
2332 next = 0;
2333 else if (comparison < 0)
2334 /* The target is to the left. */
2335 next = &n->left;
2336 else
2337 /* The target is to the right. */
2338 next = &n->right;
2340 if (next)
2342 /* Check whether our recursion depth is too high. Abort this search,
2343 and signal that a rebalance is required to continue. */
2344 if (sp->depth > sp->max_depth)
2346 sp->rebalance_p = 1;
2347 return n;
2350 /* Continue down the tree. */
2351 sp->depth ++;
2352 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2353 sp->depth --;
2355 /* The recursive call will change the place to which NODE
2356 points. */
2357 if (*node != n || sp->rebalance_p)
2358 return n;
2361 if (!parent)
2362 /* NODE is the root. We are done. */
2363 return n;
2365 /* First, handle the case where there is no grandparent (i.e.,
2366 *PARENT is the root of the tree.) */
2367 if (!grandparent)
2369 if (n == (*parent)->left)
2371 *node = n->right;
2372 n->right = *parent;
2374 else
2376 *node = n->left;
2377 n->left = *parent;
2379 *parent = n;
2380 return n;
2383 /* Next handle the cases where both N and *PARENT are left children,
2384 or where both are right children. */
2385 if (n == (*parent)->left && *parent == (*grandparent)->left)
2387 mfsplay_tree_node p = *parent;
2389 (*grandparent)->left = p->right;
2390 p->right = *grandparent;
2391 p->left = n->right;
2392 n->right = p;
2393 *grandparent = n;
2394 return n;
2396 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2398 mfsplay_tree_node p = *parent;
2400 (*grandparent)->right = p->left;
2401 p->left = *grandparent;
2402 p->right = n->left;
2403 n->left = p;
2404 *grandparent = n;
2405 return n;
2408 /* Finally, deal with the case where N is a left child, but *PARENT
2409 is a right child, or vice versa. */
2410 if (n == (*parent)->left)
2412 (*parent)->left = n->right;
2413 n->right = *parent;
2414 (*grandparent)->right = n->left;
2415 n->left = *grandparent;
2416 *grandparent = n;
2417 return n;
2419 else
2421 (*parent)->right = n->left;
2422 n->left = *parent;
2423 (*grandparent)->left = n->right;
2424 n->right = *grandparent;
2425 *grandparent = n;
2426 return n;
2432 static int
2433 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2435 mfsplay_tree_node **p = array_ptr;
2436 *(*p) = n;
2437 (*p)++;
2438 return 0;
2442 static mfsplay_tree_node
2443 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2444 unsigned high)
2446 unsigned middle = low + (high - low) / 2;
2447 mfsplay_tree_node n = array[middle];
2449 /* Note that since we're producing a balanced binary tree, it is not a problem
2450 that this function is recursive. */
2451 if (low + 1 <= middle)
2452 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2453 else
2454 n->left = NULL;
2456 if (middle + 1 <= high)
2457 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2458 else
2459 n->right = NULL;
2461 return n;
2465 /* Rebalance the entire tree. Do this by copying all the node
2466 pointers into an array, then cleverly re-linking them. */
2467 static void
2468 mfsplay_tree_rebalance (mfsplay_tree sp)
2470 mfsplay_tree_node *all_nodes, *all_nodes_1;
2472 if (sp->num_keys <= 2)
2473 return;
2475 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2477 /* Traverse all nodes to copy their addresses into this array. */
2478 all_nodes_1 = all_nodes;
2479 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2480 (void *) &all_nodes_1);
2482 /* Relink all the nodes. */
2483 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2485 mfsplay_tree_free (all_nodes);
2489 /* Splay SP around KEY. */
2490 static void
2491 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2493 if (sp->root == 0)
2494 return;
2496 /* If we just splayed the tree with the same key, do nothing. */
2497 if (sp->last_splayed_key_p &&
2498 (sp->last_splayed_key == key))
2499 return;
2501 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2502 The idea is to limit excessive stack usage if we're facing
2503 degenerate access patterns. Unfortunately such patterns can occur
2504 e.g. during static initialization, where many static objects might
2505 be registered in increasing address sequence, or during a case where
2506 large tree-like heap data structures are allocated quickly.
2508 On x86, this corresponds to roughly 200K of stack usage.
2509 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2510 sp->max_depth = 2500;
2511 sp->rebalance_p = sp->depth = 0;
2513 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2514 if (sp->rebalance_p)
2516 mfsplay_tree_rebalance (sp);
2518 sp->rebalance_p = sp->depth = 0;
2519 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2521 if (sp->rebalance_p)
2522 abort ();
2526 /* Cache this splay key. */
2527 sp->last_splayed_key = key;
2528 sp->last_splayed_key_p = 1;
2533 /* Allocate a new splay tree. */
2534 static mfsplay_tree
2535 mfsplay_tree_new ()
2537 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2538 sp->root = NULL;
2539 sp->last_splayed_key_p = 0;
2540 sp->num_keys = 0;
2542 return sp;
2547 /* Insert a new node (associating KEY with DATA) into SP. If a
2548 previous node with the indicated KEY exists, its data is replaced
2549 with the new value. Returns the new node. */
2550 static mfsplay_tree_node
2551 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2553 int comparison = 0;
2555 mfsplay_tree_splay (sp, key);
2557 if (sp->root)
2558 comparison = ((sp->root->key > key) ? 1 :
2559 ((sp->root->key < key) ? -1 : 0));
2561 if (sp->root && comparison == 0)
2563 /* If the root of the tree already has the indicated KEY, just
2564 replace the value with VALUE. */
2565 sp->root->value = value;
2567 else
2569 /* Create a new node, and insert it at the root. */
2570 mfsplay_tree_node node;
2572 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2573 node->key = key;
2574 node->value = value;
2575 sp->num_keys++;
2576 if (!sp->root)
2577 node->left = node->right = 0;
2578 else if (comparison < 0)
2580 node->left = sp->root;
2581 node->right = node->left->right;
2582 node->left->right = 0;
2584 else
2586 node->right = sp->root;
2587 node->left = node->right->left;
2588 node->right->left = 0;
2591 sp->root = node;
2592 sp->last_splayed_key_p = 0;
2595 return sp->root;
2598 /* Remove KEY from SP. It is not an error if it did not exist. */
2600 static void
2601 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2603 mfsplay_tree_splay (sp, key);
2604 sp->last_splayed_key_p = 0;
2605 if (sp->root && (sp->root->key == key))
2607 mfsplay_tree_node left, right;
2608 left = sp->root->left;
2609 right = sp->root->right;
2610 /* Delete the root node itself. */
2611 mfsplay_tree_free (sp->root);
2612 sp->num_keys--;
2613 /* One of the children is now the root. Doesn't matter much
2614 which, so long as we preserve the properties of the tree. */
2615 if (left)
2617 sp->root = left;
2618 /* If there was a right child as well, hang it off the
2619 right-most leaf of the left child. */
2620 if (right)
2622 while (left->right)
2623 left = left->right;
2624 left->right = right;
2627 else
2628 sp->root = right;
2632 /* Lookup KEY in SP, returning VALUE if present, and NULL
2633 otherwise. */
2635 static mfsplay_tree_node
2636 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2638 mfsplay_tree_splay (sp, key);
2639 if (sp->root && (sp->root->key == key))
2640 return sp->root;
2641 else
2642 return 0;
2646 /* Return the immediate predecessor KEY, or NULL if there is no
2647 predecessor. KEY need not be present in the tree. */
2649 static mfsplay_tree_node
2650 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2652 int comparison;
2653 mfsplay_tree_node node;
2654 /* If the tree is empty, there is certainly no predecessor. */
2655 if (!sp->root)
2656 return NULL;
2657 /* Splay the tree around KEY. That will leave either the KEY
2658 itself, its predecessor, or its successor at the root. */
2659 mfsplay_tree_splay (sp, key);
2660 comparison = ((sp->root->key > key) ? 1 :
2661 ((sp->root->key < key) ? -1 : 0));
2663 /* If the predecessor is at the root, just return it. */
2664 if (comparison < 0)
2665 return sp->root;
2666 /* Otherwise, find the rightmost element of the left subtree. */
2667 node = sp->root->left;
2668 if (node)
2669 while (node->right)
2670 node = node->right;
2671 return node;
2674 /* Return the immediate successor KEY, or NULL if there is no
2675 successor. KEY need not be present in the tree. */
2677 static mfsplay_tree_node
2678 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2680 int comparison;
2681 mfsplay_tree_node node;
2682 /* If the tree is empty, there is certainly no successor. */
2683 if (!sp->root)
2684 return NULL;
2685 /* Splay the tree around KEY. That will leave either the KEY
2686 itself, its predecessor, or its successor at the root. */
2687 mfsplay_tree_splay (sp, key);
2688 comparison = ((sp->root->key > key) ? 1 :
2689 ((sp->root->key < key) ? -1 : 0));
2690 /* If the successor is at the root, just return it. */
2691 if (comparison > 0)
2692 return sp->root;
2693 /* Otherwise, find the leftmost element of the right subtree. */
2694 node = sp->root->right;
2695 if (node)
2696 while (node->left)
2697 node = node->left;
2698 return node;
2701 /* Call FN, passing it the DATA, for every node in SP, following an
2702 in-order traversal. If FN every returns a non-zero value, the
2703 iteration ceases immediately, and the value is returned.
2704 Otherwise, this function returns 0.
2706 This function simulates recursion using dynamically allocated
2707 arrays, since it may be called from mfsplay_tree_rebalance(), which
2708 in turn means that the tree is already uncomfortably deep for stack
2709 space limits. */
2710 static int
2711 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2713 mfsplay_tree_node *stack1;
2714 char *stack2;
2715 unsigned sp;
2716 int val = 0;
2717 enum s { s_left, s_here, s_right, s_up };
2719 if (st->root == NULL) /* => num_keys == 0 */
2720 return 0;
2722 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2723 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2725 sp = 0;
2726 stack1 [sp] = st->root;
2727 stack2 [sp] = s_left;
2729 while (1)
2731 mfsplay_tree_node n;
2732 enum s s;
2734 n = stack1 [sp];
2735 s = stack2 [sp];
2737 /* Handle each of the four possible states separately. */
2739 /* 1: We're here to traverse the left subtree (if any). */
2740 if (s == s_left)
2742 stack2 [sp] = s_here;
2743 if (n->left != NULL)
2745 sp ++;
2746 stack1 [sp] = n->left;
2747 stack2 [sp] = s_left;
2751 /* 2: We're here to traverse this node. */
2752 else if (s == s_here)
2754 stack2 [sp] = s_right;
2755 val = (*fn) (n, data);
2756 if (val) break;
2759 /* 3: We're here to traverse the right subtree (if any). */
2760 else if (s == s_right)
2762 stack2 [sp] = s_up;
2763 if (n->right != NULL)
2765 sp ++;
2766 stack1 [sp] = n->right;
2767 stack2 [sp] = s_left;
2771 /* 4: We're here after both subtrees (if any) have been traversed. */
2772 else if (s == s_up)
2774 /* Pop the stack. */
2775 if (sp == 0) break; /* Popping off the root note: we're finished! */
2776 sp --;
2779 else
2780 abort ();
2783 mfsplay_tree_free (stack1);
2784 mfsplay_tree_free (stack2);
2785 return val;