PR rtl-optimization/23043
[official-gcc.git] / libmudflap / mf-runtime.c
blobaf584e773d938789abefaf8530b29c98efbb06ac
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 02111-1307, USA. */
34 #include "config.h"
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
109 /* Statistics. */
110 unsigned num_keys;
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
145 static void
146 begin_recursion_protect1 (const char *pf)
148 if (__mf_get_state () == reentrant)
150 write (2, "mf: erroneous reentrancy detected in `", 38);
151 write (2, pf, strlen(pf));
152 write (2, "'\n", 2); \
153 abort ();
155 __mf_set_state (reentrant);
158 #define BEGIN_RECURSION_PROTECT() \
159 begin_recursion_protect1 (__PRETTY_FUNCTION__)
161 #define END_RECURSION_PROTECT() \
162 __mf_set_state (active)
164 /* ------------------------------------------------------------------------ */
165 /* Required globals. */
167 #define LOOKUP_CACHE_MASK_DFL 1023
168 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
169 #define LOOKUP_CACHE_SHIFT_DFL 2
171 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
172 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
173 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
174 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
176 struct __mf_options __mf_opts;
177 int __mf_starting_p = 1;
179 #ifdef LIBMUDFLAPTH
180 #ifdef HAVE_TLS
181 __thread enum __mf_state_enum __mf_state_1 = active;
182 #endif
183 #else
184 enum __mf_state_enum __mf_state_1 = active;
185 #endif
187 #ifdef LIBMUDFLAPTH
188 pthread_mutex_t __mf_biglock =
189 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
190 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
191 #else
192 PTHREAD_MUTEX_INITIALIZER;
193 #endif
194 #endif
196 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
197 the libmudflap.la (no threading support) can diagnose whether
198 the application is linked with -lpthread. See __mf_usage() below. */
199 #if HAVE_PTHREAD_H
200 #ifdef _POSIX_THREADS
201 #pragma weak pthread_join
202 #else
203 #define pthread_join NULL
204 #endif
205 #endif
208 /* ------------------------------------------------------------------------ */
209 /* stats-related globals. */
211 static unsigned long __mf_count_check;
212 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
213 static unsigned long __mf_count_register;
214 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
215 static unsigned long __mf_count_unregister;
216 static unsigned long __mf_total_unregister_size;
217 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
218 static unsigned long __mf_sigusr1_received;
219 static unsigned long __mf_sigusr1_handled;
220 /* not static */ unsigned long __mf_reentrancy;
221 #ifdef LIBMUDFLAPTH
222 /* not static */ unsigned long __mf_lock_contention;
223 #endif
226 /* ------------------------------------------------------------------------ */
227 /* mode-check-related globals. */
229 typedef struct __mf_object
231 uintptr_t low, high; /* __mf_register parameters */
232 const char *name;
233 char type; /* __MF_TYPE_something */
234 char watching_p; /* Trigger a VIOL_WATCH on access? */
235 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
236 unsigned write_count; /* Likewise for __mf_check/write. */
237 unsigned liveness; /* A measure of recent checking activity. */
238 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
240 uintptr_t alloc_pc;
241 struct timeval alloc_time;
242 char **alloc_backtrace;
243 size_t alloc_backtrace_size;
244 #ifdef LIBMUDFLAPTH
245 pthread_t alloc_thread;
246 #endif
248 int deallocated_p;
249 uintptr_t dealloc_pc;
250 struct timeval dealloc_time;
251 char **dealloc_backtrace;
252 size_t dealloc_backtrace_size;
253 #ifdef LIBMUDFLAPTH
254 pthread_t dealloc_thread;
255 #endif
256 } __mf_object_t;
258 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
259 /* Actually stored as static vars within lookup function below. */
261 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
262 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
263 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
266 /* ------------------------------------------------------------------------ */
267 /* Forward function declarations */
269 void __mf_init () CTOR;
270 static void __mf_sigusr1_respond ();
271 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272 __mf_object_t **objs, unsigned max_objs);
273 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
274 __mf_object_t **objs, unsigned max_objs, int type);
275 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
276 __mf_object_t **objs, unsigned max_objs);
277 static void __mf_adapt_cache ();
278 static void __mf_describe_object (__mf_object_t *obj);
279 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
280 static mfsplay_tree __mf_object_tree (int type);
281 static void __mf_link_object (__mf_object_t *node);
282 static void __mf_unlink_object (__mf_object_t *node);
285 /* ------------------------------------------------------------------------ */
286 /* Configuration engine */
288 static void
289 __mf_set_default_options ()
291 memset (& __mf_opts, 0, sizeof (__mf_opts));
293 __mf_opts.adapt_cache = 1000003;
294 __mf_opts.abbreviate = 1;
295 __mf_opts.verbose_violations = 1;
296 __mf_opts.free_queue_length = 4;
297 __mf_opts.persistent_count = 100;
298 __mf_opts.crumple_zone = 32;
299 __mf_opts.backtrace = 4;
300 __mf_opts.timestamps = 1;
301 __mf_opts.mudflap_mode = mode_check;
302 __mf_opts.violation_mode = viol_nop;
303 __mf_opts.heur_std_data = 1;
304 #ifdef LIBMUDFLAPTH
305 __mf_opts.thread_stack = 0;
306 #endif
309 static struct option
311 char *name;
312 char *description;
313 enum
315 set_option,
316 read_integer_option,
317 } type;
318 unsigned value;
319 unsigned *target;
321 options [] =
323 {"mode-nop",
324 "mudflaps do nothing",
325 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
326 {"mode-populate",
327 "mudflaps populate object tree",
328 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
329 {"mode-check",
330 "mudflaps check for memory violations",
331 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
332 {"mode-violate",
333 "mudflaps always cause violations (diagnostic)",
334 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
336 {"viol-nop",
337 "violations do not change program execution",
338 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
339 {"viol-abort",
340 "violations cause a call to abort()",
341 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
342 {"viol-segv",
343 "violations are promoted to SIGSEGV signals",
344 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
345 {"viol-gdb",
346 "violations fork a gdb process attached to current program",
347 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
348 {"trace-calls",
349 "trace calls to mudflap runtime library",
350 set_option, 1, &__mf_opts.trace_mf_calls},
351 {"verbose-trace",
352 "trace internal events within mudflap runtime library",
353 set_option, 1, &__mf_opts.verbose_trace},
354 {"collect-stats",
355 "collect statistics on mudflap's operation",
356 set_option, 1, &__mf_opts.collect_stats},
357 #ifdef SIGUSR1
358 {"sigusr1-report",
359 "print report upon SIGUSR1",
360 set_option, 1, &__mf_opts.sigusr1_report},
361 #endif
362 {"internal-checking",
363 "perform more expensive internal checking",
364 set_option, 1, &__mf_opts.internal_checking},
365 {"print-leaks",
366 "print any memory leaks at program shutdown",
367 set_option, 1, &__mf_opts.print_leaks},
368 {"check-initialization",
369 "detect uninitialized object reads",
370 set_option, 1, &__mf_opts.check_initialization},
371 {"verbose-violations",
372 "print verbose messages when memory violations occur",
373 set_option, 1, &__mf_opts.verbose_violations},
374 {"abbreviate",
375 "abbreviate repetitive listings",
376 set_option, 1, &__mf_opts.abbreviate},
377 {"timestamps",
378 "track object lifetime timestamps",
379 set_option, 1, &__mf_opts.timestamps},
380 {"ignore-reads",
381 "ignore read accesses - assume okay",
382 set_option, 1, &__mf_opts.ignore_reads},
383 {"wipe-stack",
384 "wipe stack objects at unwind",
385 set_option, 1, &__mf_opts.wipe_stack},
386 {"wipe-heap",
387 "wipe heap objects at free",
388 set_option, 1, &__mf_opts.wipe_heap},
389 {"heur-proc-map",
390 "support /proc/self/map heuristics",
391 set_option, 1, &__mf_opts.heur_proc_map},
392 {"heur-stack-bound",
393 "enable a simple upper stack bound heuristic",
394 set_option, 1, &__mf_opts.heur_stack_bound},
395 {"heur-start-end",
396 "support _start.._end heuristics",
397 set_option, 1, &__mf_opts.heur_start_end},
398 {"heur-stdlib",
399 "register standard library data (argv, errno, stdin, ...)",
400 set_option, 1, &__mf_opts.heur_std_data},
401 {"free-queue-length",
402 "queue N deferred free() calls before performing them",
403 read_integer_option, 0, &__mf_opts.free_queue_length},
404 {"persistent-count",
405 "keep a history of N unregistered regions",
406 read_integer_option, 0, &__mf_opts.persistent_count},
407 {"crumple-zone",
408 "surround allocations with crumple zones of N bytes",
409 read_integer_option, 0, &__mf_opts.crumple_zone},
410 /* XXX: not type-safe.
411 {"lc-mask",
412 "set lookup cache size mask to N (2**M - 1)",
413 read_integer_option, 0, (int *)(&__mf_lc_mask)},
414 {"lc-shift",
415 "set lookup cache pointer shift",
416 read_integer_option, 0, (int *)(&__mf_lc_shift)},
418 {"lc-adapt",
419 "adapt mask/shift parameters after N cache misses",
420 read_integer_option, 1, &__mf_opts.adapt_cache},
421 {"backtrace",
422 "keep an N-level stack trace of each call context",
423 read_integer_option, 0, &__mf_opts.backtrace},
424 #ifdef LIBMUDFLAPTH
425 {"thread-stack",
426 "override thread stacks allocation: N kB",
427 read_integer_option, 0, &__mf_opts.thread_stack},
428 #endif
429 {0, 0, set_option, 0, NULL}
432 static void
433 __mf_usage ()
435 struct option *opt;
437 fprintf (stderr,
438 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
439 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
440 "\n"
441 "The mudflap code can be controlled by an environment variable:\n"
442 "\n"
443 "$ export MUDFLAP_OPTIONS='<options>'\n"
444 "$ <mudflapped_program>\n"
445 "\n"
446 "where <options> is a space-separated list of \n"
447 "any of the following options. Use `-no-OPTION' to disable options.\n"
448 "\n",
449 #if HAVE_PTHREAD_H
450 (pthread_join ? "multi-threaded " : "single-threaded "),
451 #else
453 #endif
454 #if LIBMUDFLAPTH
455 "thread-aware "
456 #else
457 "thread-unaware "
458 #endif
460 /* XXX: The multi-threaded thread-unaware combination is bad. */
462 for (opt = options; opt->name; opt++)
464 int default_p = (opt->value == * opt->target);
466 switch (opt->type)
468 char buf[128];
469 case set_option:
470 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
471 if (default_p)
472 fprintf (stderr, " [active]\n");
473 else
474 fprintf (stderr, "\n");
475 break;
476 case read_integer_option:
477 strncpy (buf, opt->name, 128);
478 strncpy (buf + strlen (opt->name), "=N", 2);
479 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
480 fprintf (stderr, " [%d]\n", * opt->target);
481 break;
482 default: abort();
486 fprintf (stderr, "\n");
491 __mf_set_options (const char *optstr)
493 int rc;
494 LOCKTH ();
495 BEGIN_RECURSION_PROTECT ();
496 rc = __mfu_set_options (optstr);
497 /* XXX: It's not really that easy. A change to a bunch of parameters
498 can require updating auxiliary state or risk crashing:
499 free_queue_length, crumple_zone ... */
500 END_RECURSION_PROTECT ();
501 UNLOCKTH ();
502 return rc;
507 __mfu_set_options (const char *optstr)
509 struct option *opts = 0;
510 char *nxt = 0;
511 long tmp = 0;
512 int rc = 0;
513 const char *saved_optstr = optstr;
515 /* XXX: bounds-check for optstr! */
517 while (*optstr)
519 switch (*optstr) {
520 case ' ':
521 case '\t':
522 case '\n':
523 optstr++;
524 break;
526 case '-':
527 if (*optstr+1)
529 int negate = 0;
530 optstr++;
532 if (*optstr == '?' ||
533 strncmp (optstr, "help", 4) == 0)
535 /* Caller will print help and exit. */
536 return -1;
539 if (strncmp (optstr, "no-", 3) == 0)
541 negate = 1;
542 optstr = & optstr[3];
545 for (opts = options; opts->name; opts++)
547 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
549 optstr += strlen (opts->name);
550 assert (opts->target);
551 switch (opts->type)
553 case set_option:
554 if (negate)
555 *(opts->target) = 0;
556 else
557 *(opts->target) = opts->value;
558 break;
559 case read_integer_option:
560 if (! negate && (*optstr == '=' && *(optstr+1)))
562 optstr++;
563 tmp = strtol (optstr, &nxt, 10);
564 if ((optstr != nxt) && (tmp != LONG_MAX))
566 optstr = nxt;
567 *(opts->target) = (int)tmp;
570 else if (negate)
571 * opts->target = 0;
572 break;
577 break;
579 default:
580 fprintf (stderr,
581 "warning: unrecognized string '%s' in mudflap options\n",
582 optstr);
583 optstr += strlen (optstr);
584 rc = -1;
585 break;
589 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
590 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
591 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
593 /* Clear the lookup cache, in case the parameters got changed. */
594 /* XXX: race */
595 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
596 /* void slot 0 */
597 __mf_lookup_cache[0].low = MAXPTR;
599 TRACE ("set options from `%s'\n", saved_optstr);
601 /* Call this unconditionally, in case -sigusr1-report was toggled. */
602 __mf_sigusr1_respond ();
604 return rc;
608 #ifdef PIC
610 void
611 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
613 char *err;
615 assert (e);
616 if (e->pointer) return;
618 #if HAVE_DLVSYM
619 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
620 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
621 else
622 #endif
623 e->pointer = dlsym (RTLD_NEXT, e->name);
625 err = dlerror ();
627 if (err)
629 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
630 e->name, err);
631 abort ();
633 if (! e->pointer)
635 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
636 abort ();
641 static void
642 __mf_resolve_dynamics ()
644 int i;
645 for (i = 0; i < dyn_INITRESOLVE; i++)
646 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
650 /* NB: order must match enums in mf-impl.h */
651 struct __mf_dynamic_entry __mf_dynamic [] =
653 {NULL, "calloc", NULL},
654 {NULL, "free", NULL},
655 {NULL, "malloc", NULL},
656 {NULL, "mmap", NULL},
657 {NULL, "munmap", NULL},
658 {NULL, "realloc", NULL},
659 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
660 #ifdef LIBMUDFLAPTH
661 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
662 {NULL, "pthread_join", NULL},
663 {NULL, "pthread_exit", NULL}
664 #endif
667 #endif /* PIC */
671 /* ------------------------------------------------------------------------ */
673 /* Lookup & manage automatic initialization of the five or so splay trees. */
674 static mfsplay_tree
675 __mf_object_tree (int type)
677 static mfsplay_tree trees [__MF_TYPE_MAX+1];
678 assert (type >= 0 && type <= __MF_TYPE_MAX);
679 if (UNLIKELY (trees[type] == NULL))
680 trees[type] = mfsplay_tree_new ();
681 return trees[type];
685 /* not static */void
686 __mf_init ()
688 char *ov = 0;
690 /* Return if initialization has already been done. */
691 if (LIKELY (__mf_starting_p == 0))
692 return;
694 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
695 #ifdef PIC
696 __mf_resolve_dynamics ();
697 #endif
698 __mf_starting_p = 0;
700 __mf_set_default_options ();
702 ov = getenv ("MUDFLAP_OPTIONS");
703 if (ov)
705 int rc = __mfu_set_options (ov);
706 if (rc < 0)
708 __mf_usage ();
709 exit (1);
713 /* Initialize to a non-zero description epoch. */
714 __mf_describe_object (NULL);
716 #define REG_RESERVED(obj) \
717 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
719 REG_RESERVED (__mf_lookup_cache);
720 REG_RESERVED (__mf_lc_mask);
721 REG_RESERVED (__mf_lc_shift);
722 /* XXX: others of our statics? */
724 /* Prevent access to *NULL. */
725 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
726 __mf_lookup_cache[0].low = (uintptr_t) -1;
732 __wrap_main (int argc, char* argv[])
734 extern char **environ;
735 extern int main ();
736 extern int __real_main ();
737 static int been_here = 0;
739 if (__mf_opts.heur_std_data && ! been_here)
741 unsigned i;
743 been_here = 1;
744 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
745 for (i=0; i<argc; i++)
747 unsigned j = strlen (argv[i]);
748 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
751 for (i=0; ; i++)
753 char *e = environ[i];
754 unsigned j;
755 if (e == NULL) break;
756 j = strlen (environ[i]);
757 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
759 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
761 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
763 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
764 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
765 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
767 /* Make some effort to register ctype.h static arrays. */
768 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
769 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
770 with in mf-hooks2.c. */
773 #ifdef PIC
774 return main (argc, argv, environ);
775 #else
776 return __real_main (argc, argv, environ);
777 #endif
782 extern void __mf_fini () DTOR;
783 void __mf_fini ()
785 TRACE ("__mf_fini\n");
786 __mfu_report ();
788 #ifndef PIC
789 /* Since we didn't populate the tree for allocations in constructors
790 before __mf_init, we cannot check destructors after __mf_fini. */
791 __mf_opts.mudflap_mode = mode_nop;
792 #endif
797 /* ------------------------------------------------------------------------ */
798 /* __mf_check */
800 void __mf_check (void *ptr, size_t sz, int type, const char *location)
802 LOCKTH ();
803 BEGIN_RECURSION_PROTECT ();
804 __mfu_check (ptr, sz, type, location);
805 END_RECURSION_PROTECT ();
806 UNLOCKTH ();
810 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
812 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
813 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
814 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
815 uintptr_t ptr_low = (uintptr_t) ptr;
816 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
817 struct __mf_cache old_entry = *entry;
819 if (UNLIKELY (__mf_opts.sigusr1_report))
820 __mf_sigusr1_respond ();
821 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
822 return;
824 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
825 ptr, entry_idx, (unsigned long)sz,
826 (type == 0 ? "read" : "write"), location);
828 switch (__mf_opts.mudflap_mode)
830 case mode_nop:
831 /* It is tempting to poison the cache here similarly to
832 mode_populate. However that eliminates a valuable
833 distinction between these two modes. mode_nop is useful to
834 let a user count & trace every single check / registration
835 call. mode_populate is useful to let a program run fast
836 while unchecked.
838 judgement = 1;
839 break;
841 case mode_populate:
842 entry->low = ptr_low;
843 entry->high = ptr_high;
844 judgement = 1;
845 break;
847 case mode_check:
849 unsigned heuristics = 0;
851 /* Advance aging/adaptation counters. */
852 static unsigned adapt_count;
853 adapt_count ++;
854 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
855 adapt_count > __mf_opts.adapt_cache))
857 adapt_count = 0;
858 __mf_adapt_cache ();
861 /* Looping only occurs if heuristics were triggered. */
862 while (judgement == 0)
864 DECLARE (void, free, void *p);
865 __mf_object_t* ovr_obj[1];
866 unsigned obj_count;
867 __mf_object_t** all_ovr_obj = NULL;
868 __mf_object_t** dealloc_me = NULL;
869 unsigned i;
871 /* Find all overlapping objects. Be optimistic that there is just one. */
872 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
873 if (UNLIKELY (obj_count > 1))
875 /* Allocate a real buffer and do the search again. */
876 DECLARE (void *, malloc, size_t c);
877 unsigned n;
878 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
879 obj_count));
880 if (all_ovr_obj == NULL) abort ();
881 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
882 assert (n == obj_count);
883 dealloc_me = all_ovr_obj;
885 else
887 all_ovr_obj = ovr_obj;
888 dealloc_me = NULL;
891 /* Update object statistics. */
892 for (i = 0; i < obj_count; i++)
894 __mf_object_t *obj = all_ovr_obj[i];
895 assert (obj != NULL);
896 if (type == __MF_CHECK_READ)
897 obj->read_count ++;
898 else
899 obj->write_count ++;
900 obj->liveness ++;
903 /* Iterate over the various objects. There are a number of special cases. */
904 for (i = 0; i < obj_count; i++)
906 __mf_object_t *obj = all_ovr_obj[i];
908 /* Any __MF_TYPE_NOACCESS hit is bad. */
909 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
910 judgement = -1;
912 /* Any object with a watch flag is bad. */
913 if (UNLIKELY (obj->watching_p))
914 judgement = -2; /* trigger VIOL_WATCH */
916 /* A read from an uninitialized object is bad. */
917 if (UNLIKELY (__mf_opts.check_initialization
918 /* reading */
919 && type == __MF_CHECK_READ
920 /* not written */
921 && obj->write_count == 0
922 /* uninitialized (heap) */
923 && obj->type == __MF_TYPE_HEAP))
924 judgement = -1;
927 /* We now know that the access spans no invalid objects. */
928 if (LIKELY (judgement >= 0))
929 for (i = 0; i < obj_count; i++)
931 __mf_object_t *obj = all_ovr_obj[i];
933 /* Is this access entirely contained within this object? */
934 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
936 /* Valid access. */
937 entry->low = obj->low;
938 entry->high = obj->high;
939 judgement = 1;
943 /* This access runs off the end of one valid object. That
944 could be okay, if other valid objects fill in all the
945 holes. We allow this only for HEAP and GUESS type
946 objects. Accesses to STATIC and STACK variables
947 should not be allowed to span. */
948 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
950 unsigned uncovered = 0;
951 for (i = 0; i < obj_count; i++)
953 __mf_object_t *obj = all_ovr_obj[i];
954 int j, uncovered_low_p, uncovered_high_p;
955 uintptr_t ptr_lower, ptr_higher;
957 uncovered_low_p = ptr_low < obj->low;
958 ptr_lower = CLAMPSUB (obj->low, 1);
959 uncovered_high_p = ptr_high > obj->high;
960 ptr_higher = CLAMPADD (obj->high, 1);
962 for (j = 0; j < obj_count; j++)
964 __mf_object_t *obj2 = all_ovr_obj[j];
966 if (i == j) continue;
968 /* Filter out objects that cannot be spanned across. */
969 if (obj2->type == __MF_TYPE_STACK
970 || obj2->type == __MF_TYPE_STATIC)
971 continue;
973 /* Consider a side "covered" if obj2 includes
974 the next byte on that side. */
975 if (uncovered_low_p
976 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
977 uncovered_low_p = 0;
978 if (uncovered_high_p
979 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
980 uncovered_high_p = 0;
983 if (uncovered_low_p || uncovered_high_p)
984 uncovered ++;
987 /* Success if no overlapping objects are uncovered. */
988 if (uncovered == 0)
989 judgement = 1;
993 if (dealloc_me != NULL)
994 CALL_REAL (free, dealloc_me);
996 /* If the judgment is still unknown at this stage, loop
997 around at most one more time. */
998 if (judgement == 0)
1000 if (heuristics++ < 2) /* XXX parametrize this number? */
1001 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1002 else
1003 judgement = -1;
1008 break;
1010 case mode_violate:
1011 judgement = -1;
1012 break;
1015 if (__mf_opts.collect_stats)
1017 __mf_count_check ++;
1019 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1020 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1021 __mf_lookup_cache_reusecount [entry_idx] ++;
1024 if (UNLIKELY (judgement < 0))
1025 __mf_violation (ptr, sz,
1026 (uintptr_t) __builtin_return_address (0), location,
1027 ((judgement == -1) ?
1028 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1029 __MF_VIOL_WATCH));
1033 static __mf_object_t *
1034 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1035 const char *name, uintptr_t pc)
1037 DECLARE (void *, calloc, size_t c, size_t n);
1039 __mf_object_t *new_obj;
1040 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1041 new_obj->low = low;
1042 new_obj->high = high;
1043 new_obj->type = type;
1044 new_obj->name = name;
1045 new_obj->alloc_pc = pc;
1046 #if HAVE_GETTIMEOFDAY
1047 if (__mf_opts.timestamps)
1048 gettimeofday (& new_obj->alloc_time, NULL);
1049 #endif
1050 #if LIBMUDFLAPTH
1051 new_obj->alloc_thread = pthread_self ();
1052 #endif
1054 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1055 new_obj->alloc_backtrace_size =
1056 __mf_backtrace (& new_obj->alloc_backtrace,
1057 (void *) pc, 2);
1059 __mf_link_object (new_obj);
1060 return new_obj;
1064 static void
1065 __mf_uncache_object (__mf_object_t *old_obj)
1067 /* Remove any low/high pointers for this object from the lookup cache. */
1069 /* Can it possibly exist in the cache? */
1070 if (LIKELY (old_obj->read_count + old_obj->write_count))
1072 /* As reported by Herman ten Brugge, we need to scan the entire
1073 cache for entries that may hit this object. */
1074 uintptr_t low = old_obj->low;
1075 uintptr_t high = old_obj->high;
1076 struct __mf_cache *entry = & __mf_lookup_cache [0];
1077 unsigned i;
1078 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1080 /* NB: the "||" in the following test permits this code to
1081 tolerate the situation introduced by __mf_check over
1082 contiguous objects, where a cache entry spans several
1083 objects. */
1084 if (entry->low == low || entry->high == high)
1086 entry->low = MAXPTR;
1087 entry->high = MINPTR;
1094 void
1095 __mf_register (void *ptr, size_t sz, int type, const char *name)
1097 LOCKTH ();
1098 BEGIN_RECURSION_PROTECT ();
1099 __mfu_register (ptr, sz, type, name);
1100 END_RECURSION_PROTECT ();
1101 UNLOCKTH ();
1105 void
1106 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1108 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1109 ptr, (unsigned long) sz, type, name ? name : "");
1111 if (__mf_opts.collect_stats)
1113 __mf_count_register ++;
1114 __mf_total_register_size [(type < 0) ? 0 :
1115 (type > __MF_TYPE_MAX) ? 0 :
1116 type] += sz;
1119 if (UNLIKELY (__mf_opts.sigusr1_report))
1120 __mf_sigusr1_respond ();
1122 switch (__mf_opts.mudflap_mode)
1124 case mode_nop:
1125 break;
1127 case mode_violate:
1128 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1129 __MF_VIOL_REGISTER);
1130 break;
1132 case mode_populate:
1133 /* Clear the cache. */
1134 /* XXX: why the entire cache? */
1135 /* XXX: race */
1136 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1137 /* void slot 0 */
1138 __mf_lookup_cache[0].low = MAXPTR;
1139 break;
1141 case mode_check:
1143 __mf_object_t *ovr_objs [1];
1144 unsigned num_overlapping_objs;
1145 uintptr_t low = (uintptr_t) ptr;
1146 uintptr_t high = CLAMPSZ (ptr, sz);
1147 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1149 /* Treat unknown size indication as 1. */
1150 if (UNLIKELY (sz == 0)) sz = 1;
1152 /* Look for objects only of the same type. This will e.g. permit a registration
1153 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1154 __mf_check time however harmful overlaps will be detected. */
1155 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1157 /* Handle overlaps. */
1158 if (UNLIKELY (num_overlapping_objs > 0))
1160 __mf_object_t *ovr_obj = ovr_objs[0];
1162 /* Accept certain specific duplication pairs. */
1163 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1164 && ovr_obj->low == low
1165 && ovr_obj->high == high
1166 && ovr_obj->type == type)
1168 /* Duplicate registration for static objects may come
1169 from distinct compilation units. */
1170 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1171 (void *) low, (void *) high,
1172 (ovr_obj->name ? ovr_obj->name : ""));
1173 break;
1176 /* Alas, a genuine violation. */
1177 else
1179 /* Two or more *real* mappings here. */
1180 __mf_violation ((void *) ptr, sz,
1181 (uintptr_t) __builtin_return_address (0), NULL,
1182 __MF_VIOL_REGISTER);
1185 else /* No overlapping objects: AOK. */
1186 __mf_insert_new_object (low, high, type, name, pc);
1188 /* We could conceivably call __mf_check() here to prime the cache,
1189 but then the read_count/write_count field is not reliable. */
1190 break;
1192 } /* end switch (__mf_opts.mudflap_mode) */
1196 void
1197 __mf_unregister (void *ptr, size_t sz, int type)
1199 LOCKTH ();
1200 BEGIN_RECURSION_PROTECT ();
1201 __mfu_unregister (ptr, sz, type);
1202 END_RECURSION_PROTECT ();
1203 UNLOCKTH ();
1207 void
1208 __mfu_unregister (void *ptr, size_t sz, int type)
1210 DECLARE (void, free, void *ptr);
1212 if (UNLIKELY (__mf_opts.sigusr1_report))
1213 __mf_sigusr1_respond ();
1215 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1217 switch (__mf_opts.mudflap_mode)
1219 case mode_nop:
1220 break;
1222 case mode_violate:
1223 __mf_violation (ptr, sz,
1224 (uintptr_t) __builtin_return_address (0), NULL,
1225 __MF_VIOL_UNREGISTER);
1226 break;
1228 case mode_populate:
1229 /* Clear the cache. */
1230 /* XXX: race */
1231 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1232 /* void slot 0 */
1233 __mf_lookup_cache[0].low = MAXPTR;
1234 break;
1236 case mode_check:
1238 __mf_object_t *old_obj = NULL;
1239 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1240 __mf_object_t *objs[1] = {NULL};
1241 unsigned num_overlapping_objs;
1243 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1244 CLAMPSZ (ptr, sz), objs, 1, type);
1246 /* Special case for HEAP_I - see free & realloc hook. They don't
1247 know whether the input region was HEAP or HEAP_I before
1248 unmapping it. Here we give HEAP a try in case HEAP_I
1249 failed. */
1250 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1252 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1253 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1256 old_obj = objs[0];
1257 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1258 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1259 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1261 __mf_violation (ptr, sz,
1262 (uintptr_t) __builtin_return_address (0), NULL,
1263 __MF_VIOL_UNREGISTER);
1264 break;
1267 __mf_unlink_object (old_obj);
1268 __mf_uncache_object (old_obj);
1270 /* Wipe buffer contents if desired. */
1271 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1272 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1273 || old_obj->type == __MF_TYPE_HEAP_I)))
1275 memset ((void *) old_obj->low,
1277 (size_t) (old_obj->high - old_obj->low + 1));
1280 /* Manage the object cemetary. */
1281 if (__mf_opts.persistent_count > 0
1282 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1284 old_obj->deallocated_p = 1;
1285 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1286 #if HAVE_GETTIMEOFDAY
1287 if (__mf_opts.timestamps)
1288 gettimeofday (& old_obj->dealloc_time, NULL);
1289 #endif
1290 #ifdef LIBMUDFLAPTH
1291 old_obj->dealloc_thread = pthread_self ();
1292 #endif
1294 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1295 old_obj->dealloc_backtrace_size =
1296 __mf_backtrace (& old_obj->dealloc_backtrace,
1297 NULL, 2);
1299 /* Encourage this object to be displayed again in current epoch. */
1300 old_obj->description_epoch --;
1302 /* Put this object into the cemetary. This may require this plot to
1303 be recycled, and the previous resident to be designated del_obj. */
1305 unsigned row = old_obj->type;
1306 unsigned plot = __mf_object_dead_head [row];
1308 del_obj = __mf_object_cemetary [row][plot];
1309 __mf_object_cemetary [row][plot] = old_obj;
1310 plot ++;
1311 if (plot == __mf_opts.persistent_count) plot = 0;
1312 __mf_object_dead_head [row] = plot;
1315 else
1316 del_obj = old_obj;
1318 if (__mf_opts.print_leaks)
1320 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1321 (old_obj->type == __MF_TYPE_HEAP
1322 || old_obj->type == __MF_TYPE_HEAP_I))
1324 fprintf (stderr,
1325 "*******\n"
1326 "mudflap warning: unaccessed registered object:\n");
1327 __mf_describe_object (old_obj);
1331 if (del_obj != NULL) /* May or may not equal old_obj. */
1333 if (__mf_opts.backtrace > 0)
1335 CALL_REAL(free, del_obj->alloc_backtrace);
1336 if (__mf_opts.persistent_count > 0)
1338 CALL_REAL(free, del_obj->dealloc_backtrace);
1341 CALL_REAL(free, del_obj);
1344 break;
1346 } /* end switch (__mf_opts.mudflap_mode) */
1349 if (__mf_opts.collect_stats)
1351 __mf_count_unregister ++;
1352 __mf_total_unregister_size += sz;
1358 struct tree_stats
1360 unsigned obj_count;
1361 unsigned long total_size;
1362 unsigned live_obj_count;
1363 double total_weight;
1364 double weighted_size;
1365 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1370 static int
1371 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1373 __mf_object_t *obj = (__mf_object_t *) n->value;
1374 struct tree_stats *s = (struct tree_stats *) param;
1376 assert (obj != NULL && s != NULL);
1378 /* Exclude never-accessed objects. */
1379 if (obj->read_count + obj->write_count)
1381 s->obj_count ++;
1382 s->total_size += (obj->high - obj->low + 1);
1384 if (obj->liveness)
1386 unsigned i;
1387 uintptr_t addr;
1389 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1390 (void *) obj->low, obj->liveness, obj->name); */
1392 s->live_obj_count ++;
1393 s->total_weight += (double) obj->liveness;
1394 s->weighted_size +=
1395 (double) (obj->high - obj->low + 1) *
1396 (double) obj->liveness;
1398 addr = obj->low;
1399 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1401 unsigned bit = addr & 1;
1402 s->weighted_address_bits[i][bit] += obj->liveness;
1403 addr = addr >> 1;
1406 /* Age the liveness value. */
1407 obj->liveness >>= 1;
1411 return 0;
1415 static void
1416 __mf_adapt_cache ()
1418 struct tree_stats s;
1419 uintptr_t new_mask = 0;
1420 unsigned char new_shift;
1421 float cache_utilization;
1422 float max_value;
1423 static float smoothed_new_shift = -1.0;
1424 unsigned i;
1426 memset (&s, 0, sizeof (s));
1428 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1429 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1430 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1431 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1432 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1434 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1435 empty tree. Just leave the cache alone in such cases, rather
1436 than risk dying by division-by-zero. */
1437 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1438 return;
1440 /* Guess a good value for the shift parameter by finding an address bit that is a
1441 good discriminant of lively objects. */
1442 max_value = 0.0;
1443 for (i=0; i<sizeof (uintptr_t)*8; i++)
1445 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1446 if (max_value < value) max_value = value;
1448 for (i=0; i<sizeof (uintptr_t)*8; i++)
1450 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1451 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1452 if (value >= max_value * shoulder_factor)
1453 break;
1455 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1456 /* Converge toward this slowly to reduce flapping. */
1457 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1458 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1459 assert (new_shift < sizeof (uintptr_t)*8);
1461 /* Count number of used buckets. */
1462 cache_utilization = 0.0;
1463 for (i = 0; i < (1 + __mf_lc_mask); i++)
1464 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1465 cache_utilization += 1.0;
1466 cache_utilization /= (1 + __mf_lc_mask);
1468 new_mask |= 0xffff; /* XXX: force a large cache. */
1469 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1471 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1472 "util=%u%% m=%p s=%u\n",
1473 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1474 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1476 /* We should reinitialize cache if its parameters have changed. */
1477 if (new_mask != __mf_lc_mask ||
1478 new_shift != __mf_lc_shift)
1480 __mf_lc_mask = new_mask;
1481 __mf_lc_shift = new_shift;
1482 /* XXX: race */
1483 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1484 /* void slot 0 */
1485 __mf_lookup_cache[0].low = MAXPTR;
1491 /* __mf_find_object[s] */
1493 /* Find overlapping live objecs between [low,high]. Return up to
1494 max_objs of their pointers in objs[]. Return total count of
1495 overlaps (may exceed max_objs). */
1497 unsigned
1498 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1499 __mf_object_t **objs, unsigned max_objs, int type)
1501 unsigned count = 0;
1502 mfsplay_tree t = __mf_object_tree (type);
1503 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1504 int direction;
1506 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1507 /* An exact match for base address implies a hit. */
1508 if (n != NULL)
1510 if (count < max_objs)
1511 objs[count] = (__mf_object_t *) n->value;
1512 count ++;
1515 /* Iterate left then right near this key value to find all overlapping objects. */
1516 for (direction = 0; direction < 2; direction ++)
1518 /* Reset search origin. */
1519 k = (mfsplay_tree_key) ptr_low;
1521 while (1)
1523 __mf_object_t *obj;
1525 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1526 if (n == NULL) break;
1527 obj = (__mf_object_t *) n->value;
1529 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1530 break;
1532 if (count < max_objs)
1533 objs[count] = (__mf_object_t *) n->value;
1534 count ++;
1536 k = (mfsplay_tree_key) obj->low;
1540 return count;
1544 unsigned
1545 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1546 __mf_object_t **objs, unsigned max_objs)
1548 int type;
1549 unsigned count = 0;
1551 /* Search each splay tree for overlaps. */
1552 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1554 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1555 if (c > max_objs)
1557 max_objs = 0;
1558 objs = NULL;
1560 else /* NB: C may equal 0 */
1562 max_objs -= c;
1563 objs += c;
1565 count += c;
1568 return count;
1573 /* __mf_link_object */
1575 static void
1576 __mf_link_object (__mf_object_t *node)
1578 mfsplay_tree t = __mf_object_tree (node->type);
1579 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1582 /* __mf_unlink_object */
1584 static void
1585 __mf_unlink_object (__mf_object_t *node)
1587 mfsplay_tree t = __mf_object_tree (node->type);
1588 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1591 /* __mf_find_dead_objects */
1593 /* Find overlapping dead objecs between [low,high]. Return up to
1594 max_objs of their pointers in objs[]. Return total count of
1595 overlaps (may exceed max_objs). */
1597 static unsigned
1598 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1599 __mf_object_t **objs, unsigned max_objs)
1601 if (__mf_opts.persistent_count > 0)
1603 unsigned count = 0;
1604 unsigned recollection = 0;
1605 unsigned row = 0;
1607 assert (low <= high);
1608 assert (max_objs == 0 || objs != NULL);
1610 /* Widen the search from the most recent plots in each row, looking
1611 backward in time. */
1612 recollection = 0;
1613 while (recollection < __mf_opts.persistent_count)
1615 count = 0;
1617 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1619 unsigned plot;
1620 unsigned i;
1622 plot = __mf_object_dead_head [row];
1623 for (i = 0; i <= recollection; i ++)
1625 __mf_object_t *obj;
1627 /* Look backward through row: it's a circular buffer. */
1628 if (plot > 0) plot --;
1629 else plot = __mf_opts.persistent_count - 1;
1631 obj = __mf_object_cemetary [row][plot];
1632 if (obj && obj->low <= high && obj->high >= low)
1634 /* Found an overlapping dead object! */
1635 if (count < max_objs)
1636 objs [count] = obj;
1637 count ++;
1642 if (count)
1643 break;
1645 /* Look farther back in time. */
1646 recollection = (recollection * 2) + 1;
1649 return count;
1650 } else {
1651 return 0;
1655 /* __mf_describe_object */
1657 static void
1658 __mf_describe_object (__mf_object_t *obj)
1660 static unsigned epoch = 0;
1661 if (obj == NULL)
1663 epoch ++;
1664 return;
1667 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1669 fprintf (stderr,
1670 "mudflap %sobject %p: name=`%s'\n",
1671 (obj->deallocated_p ? "dead " : ""),
1672 (void *) obj, (obj->name ? obj->name : ""));
1673 return;
1675 else
1676 obj->description_epoch = epoch;
1678 fprintf (stderr,
1679 "mudflap %sobject %p: name=`%s'\n"
1680 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1681 "alloc time=%lu.%06lu pc=%p"
1682 #ifdef LIBMUDFLAPTH
1683 " thread=%u"
1684 #endif
1685 "\n",
1686 (obj->deallocated_p ? "dead " : ""),
1687 (void *) obj, (obj->name ? obj->name : ""),
1688 (void *) obj->low, (void *) obj->high,
1689 (unsigned long) (obj->high - obj->low + 1),
1690 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1691 obj->type == __MF_TYPE_HEAP ? "heap" :
1692 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1693 obj->type == __MF_TYPE_STACK ? "stack" :
1694 obj->type == __MF_TYPE_STATIC ? "static" :
1695 obj->type == __MF_TYPE_GUESS ? "guess" :
1696 "unknown"),
1697 obj->read_count, obj->write_count, obj->liveness,
1698 obj->watching_p ? " watching" : "",
1699 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1700 (void *) obj->alloc_pc
1701 #ifdef LIBMUDFLAPTH
1702 , (unsigned) obj->alloc_thread
1703 #endif
1706 if (__mf_opts.backtrace > 0)
1708 unsigned i;
1709 for (i=0; i<obj->alloc_backtrace_size; i++)
1710 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1713 if (__mf_opts.persistent_count > 0)
1715 if (obj->deallocated_p)
1717 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1718 #ifdef LIBMUDFLAPTH
1719 " thread=%u"
1720 #endif
1721 "\n",
1722 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1723 (void *) obj->dealloc_pc
1724 #ifdef LIBMUDFLAPTH
1725 , (unsigned) obj->dealloc_thread
1726 #endif
1730 if (__mf_opts.backtrace > 0)
1732 unsigned i;
1733 for (i=0; i<obj->dealloc_backtrace_size; i++)
1734 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1741 static int
1742 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1744 __mf_object_t *node = (__mf_object_t *) n->value;
1745 unsigned *count = (unsigned *) param;
1747 if (count != NULL)
1748 (*count) ++;
1750 fprintf (stderr, "Leaked object %u:\n", (*count));
1751 __mf_describe_object (node);
1753 return 0;
1757 static unsigned
1758 __mf_report_leaks ()
1760 unsigned count = 0;
1762 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1763 __mf_report_leaks_fn, & count);
1764 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1765 __mf_report_leaks_fn, & count);
1767 return count;
1770 /* ------------------------------------------------------------------------ */
1771 /* __mf_report */
1773 void
1774 __mf_report ()
1776 LOCKTH ();
1777 BEGIN_RECURSION_PROTECT ();
1778 __mfu_report ();
1779 END_RECURSION_PROTECT ();
1780 UNLOCKTH ();
1783 void
1784 __mfu_report ()
1786 if (__mf_opts.collect_stats)
1788 fprintf (stderr,
1789 "*******\n"
1790 "mudflap stats:\n"
1791 "calls to __mf_check: %lu\n"
1792 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1793 " __mf_unregister: %lu [%luB]\n"
1794 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1795 __mf_count_check,
1796 __mf_count_register,
1797 __mf_total_register_size[0], __mf_total_register_size[1],
1798 __mf_total_register_size[2], __mf_total_register_size[3],
1799 __mf_total_register_size[4], /* XXX */
1800 __mf_count_unregister, __mf_total_unregister_size,
1801 __mf_count_violation[0], __mf_count_violation[1],
1802 __mf_count_violation[2], __mf_count_violation[3],
1803 __mf_count_violation[4]);
1805 fprintf (stderr,
1806 "calls with reentrancy: %lu\n", __mf_reentrancy);
1807 #ifdef LIBMUDFLAPTH
1808 fprintf (stderr,
1809 " lock contention: %lu\n", __mf_lock_contention);
1810 #endif
1812 /* Lookup cache stats. */
1814 unsigned i;
1815 unsigned max_reuse = 0;
1816 unsigned num_used = 0;
1817 unsigned num_unused = 0;
1819 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1821 if (__mf_lookup_cache_reusecount[i])
1822 num_used ++;
1823 else
1824 num_unused ++;
1825 if (max_reuse < __mf_lookup_cache_reusecount[i])
1826 max_reuse = __mf_lookup_cache_reusecount[i];
1828 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1829 num_used, num_unused, max_reuse);
1833 unsigned live_count;
1834 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1835 fprintf (stderr, "number of live objects: %u\n", live_count);
1838 if (__mf_opts.persistent_count > 0)
1840 unsigned dead_count = 0;
1841 unsigned row, plot;
1842 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1843 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1844 if (__mf_object_cemetary [row][plot] != 0)
1845 dead_count ++;
1846 fprintf (stderr, " zombie objects: %u\n", dead_count);
1849 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1851 unsigned l;
1852 extern void * __mf_wrap_alloca_indirect (size_t c);
1854 /* Free up any remaining alloca()'d blocks. */
1855 __mf_wrap_alloca_indirect (0);
1856 __mf_describe_object (NULL); /* Reset description epoch. */
1857 l = __mf_report_leaks ();
1858 fprintf (stderr, "number of leaked objects: %u\n", l);
1862 /* __mf_backtrace */
1864 size_t
1865 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1867 void ** pc_array;
1868 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1869 unsigned remaining_size;
1870 unsigned omitted_size = 0;
1871 unsigned i;
1872 DECLARE (void, free, void *ptr);
1873 DECLARE (void *, calloc, size_t c, size_t n);
1874 DECLARE (void *, malloc, size_t n);
1876 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1877 #ifdef HAVE_BACKTRACE
1878 pc_array_size = backtrace (pc_array, pc_array_size);
1879 #else
1880 #define FETCH(n) do { if (pc_array_size >= n) { \
1881 pc_array[n] = __builtin_return_address(n); \
1882 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1884 /* Unroll some calls __builtin_return_address because this function
1885 only takes a literal integer parameter. */
1886 FETCH (0);
1887 #if 0
1888 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1889 rather than simply returning 0. :-( */
1890 FETCH (1);
1891 FETCH (2);
1892 FETCH (3);
1893 FETCH (4);
1894 FETCH (5);
1895 FETCH (6);
1896 FETCH (7);
1897 FETCH (8);
1898 if (pc_array_size > 8) pc_array_size = 9;
1899 #else
1900 if (pc_array_size > 0) pc_array_size = 1;
1901 #endif
1903 #undef FETCH
1904 #endif
1906 /* We want to trim the first few levels of the stack traceback,
1907 since they contain libmudflap wrappers and junk. If pc_array[]
1908 ends up containing a non-NULL guess_pc, then trim everything
1909 before that. Otherwise, omit the first guess_omit_levels
1910 entries. */
1912 if (guess_pc != NULL)
1913 for (i=0; i<pc_array_size; i++)
1914 if (pc_array [i] == guess_pc)
1915 omitted_size = i;
1917 if (omitted_size == 0) /* No match? */
1918 if (pc_array_size > guess_omit_levels)
1919 omitted_size = guess_omit_levels;
1921 remaining_size = pc_array_size - omitted_size;
1923 #ifdef HAVE_BACKTRACE_SYMBOLS
1924 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1925 #else
1927 /* Let's construct a buffer by hand. It will have <remaining_size>
1928 char*'s at the front, pointing at individual strings immediately
1929 afterwards. */
1930 void *buffer;
1931 char *chars;
1932 char **pointers;
1933 enum { perline = 30 };
1934 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1935 pointers = (char **) buffer;
1936 chars = (char *)buffer + (remaining_size * sizeof (char *));
1937 for (i = 0; i < remaining_size; i++)
1939 pointers[i] = chars;
1940 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1941 chars = chars + perline;
1943 *symbols = pointers;
1945 #endif
1946 CALL_REAL (free, pc_array);
1948 return remaining_size;
1951 /* ------------------------------------------------------------------------ */
1952 /* __mf_violation */
1954 void
1955 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1956 const char *location, int type)
1958 char buf [128];
1959 static unsigned violation_number;
1960 DECLARE(void, free, void *ptr);
1962 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1963 (void *) pc,
1964 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1966 if (__mf_opts.collect_stats)
1967 __mf_count_violation [(type < 0) ? 0 :
1968 (type > __MF_VIOL_WATCH) ? 0 :
1969 type] ++;
1971 /* Print out a basic warning message. */
1972 if (__mf_opts.verbose_violations)
1974 unsigned dead_p;
1975 unsigned num_helpful = 0;
1976 struct timeval now = { 0, 0 };
1977 #if HAVE_GETTIMEOFDAY
1978 gettimeofday (& now, NULL);
1979 #endif
1981 violation_number ++;
1982 fprintf (stderr,
1983 "*******\n"
1984 "mudflap violation %u (%s): time=%lu.%06lu "
1985 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1986 violation_number,
1987 ((type == __MF_VIOL_READ) ? "check/read" :
1988 (type == __MF_VIOL_WRITE) ? "check/write" :
1989 (type == __MF_VIOL_REGISTER) ? "register" :
1990 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1991 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1992 now.tv_sec, now.tv_usec,
1993 (void *) ptr, (unsigned long)sz, (void *) pc,
1994 (location != NULL ? " location=`" : ""),
1995 (location != NULL ? location : ""),
1996 (location != NULL ? "'" : ""));
1998 if (__mf_opts.backtrace > 0)
2000 char ** symbols;
2001 unsigned i, num;
2003 num = __mf_backtrace (& symbols, (void *) pc, 2);
2004 /* Note: backtrace_symbols calls malloc(). But since we're in
2005 __mf_violation and presumably __mf_check, it'll detect
2006 recursion, and not put the new string into the database. */
2008 for (i=0; i<num; i++)
2009 fprintf (stderr, " %s\n", symbols[i]);
2011 /* Calling free() here would trigger a violation. */
2012 CALL_REAL(free, symbols);
2016 /* Look for nearby objects. For this, we start with s_low/s_high
2017 pointing to the given area, looking for overlapping objects.
2018 If none show up, widen the search area and keep looking. */
2020 if (sz == 0) sz = 1;
2022 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2024 enum {max_objs = 3}; /* magic */
2025 __mf_object_t *objs[max_objs];
2026 unsigned num_objs = 0;
2027 uintptr_t s_low, s_high;
2028 unsigned tries = 0;
2029 unsigned i;
2031 s_low = (uintptr_t) ptr;
2032 s_high = CLAMPSZ (ptr, sz);
2034 while (tries < 16) /* magic */
2036 if (dead_p)
2037 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2038 else
2039 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2041 if (num_objs) /* good enough */
2042 break;
2044 tries ++;
2046 /* XXX: tune this search strategy. It's too dependent on
2047 sz, which can vary from 1 to very big (when array index
2048 checking) numbers. */
2049 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2050 s_high = CLAMPADD (s_high, (sz * tries * tries));
2053 for (i = 0; i < min (num_objs, max_objs); i++)
2055 __mf_object_t *obj = objs[i];
2056 uintptr_t low = (uintptr_t) ptr;
2057 uintptr_t high = CLAMPSZ (ptr, sz);
2058 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2059 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2060 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2061 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2062 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2063 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2065 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2066 num_helpful + i + 1,
2067 (before1 ? before1 : after1 ? after1 : into1),
2068 (before1 ? "before" : after1 ? "after" : "into"),
2069 (before2 ? before2 : after2 ? after2 : into2),
2070 (before2 ? "before" : after2 ? "after" : "into"));
2071 __mf_describe_object (obj);
2073 num_helpful += num_objs;
2076 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2079 /* How to finally handle this violation? */
2080 switch (__mf_opts.violation_mode)
2082 case viol_nop:
2083 break;
2084 case viol_segv:
2085 kill (getpid(), SIGSEGV);
2086 break;
2087 case viol_abort:
2088 abort ();
2089 break;
2090 case viol_gdb:
2092 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2093 system (buf);
2094 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2095 instead, and let the forked child execlp() gdb. That way, this
2096 subject process can be resumed under the supervision of gdb.
2097 This can't happen now, since system() only returns when gdb
2098 dies. In that case, we need to beware of starting a second
2099 concurrent gdb child upon the next violation. (But if the first
2100 gdb dies, then starting a new one is appropriate.) */
2101 break;
2105 /* ------------------------------------------------------------------------ */
2108 unsigned __mf_watch (void *ptr, size_t sz)
2110 unsigned rc;
2111 LOCKTH ();
2112 BEGIN_RECURSION_PROTECT ();
2113 rc = __mf_watch_or_not (ptr, sz, 1);
2114 END_RECURSION_PROTECT ();
2115 UNLOCKTH ();
2116 return rc;
2119 unsigned __mf_unwatch (void *ptr, size_t sz)
2121 unsigned rc;
2122 LOCKTH ();
2123 rc = __mf_watch_or_not (ptr, sz, 0);
2124 UNLOCKTH ();
2125 return rc;
2129 static unsigned
2130 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2132 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2133 uintptr_t ptr_low = (uintptr_t) ptr;
2134 unsigned count = 0;
2136 TRACE ("%s ptr=%p size=%lu\n",
2137 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2139 switch (__mf_opts.mudflap_mode)
2141 case mode_nop:
2142 case mode_populate:
2143 case mode_violate:
2144 count = 0;
2145 break;
2147 case mode_check:
2149 __mf_object_t **all_ovr_objs;
2150 unsigned obj_count;
2151 unsigned n;
2152 DECLARE (void *, malloc, size_t c);
2153 DECLARE (void, free, void *p);
2155 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2156 VERBOSE_TRACE (" %u:", obj_count);
2158 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2159 if (all_ovr_objs == NULL) abort ();
2160 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2161 assert (n == obj_count);
2163 for (n = 0; n < obj_count; n ++)
2165 __mf_object_t *obj = all_ovr_objs[n];
2167 VERBOSE_TRACE (" [%p]", (void *) obj);
2168 if (obj->watching_p != flag)
2170 obj->watching_p = flag;
2171 count ++;
2173 /* Remove object from cache, to ensure next access
2174 goes through __mf_check(). */
2175 if (flag)
2176 __mf_uncache_object (obj);
2179 CALL_REAL (free, all_ovr_objs);
2181 break;
2184 return count;
2188 void
2189 __mf_sigusr1_handler (int num)
2191 __mf_sigusr1_received ++;
2194 /* Install or remove SIGUSR1 handler as necessary.
2195 Also, respond to a received pending SIGUSR1. */
2196 void
2197 __mf_sigusr1_respond ()
2199 static int handler_installed;
2201 #ifdef SIGUSR1
2202 /* Manage handler */
2203 if (__mf_opts.sigusr1_report && ! handler_installed)
2205 signal (SIGUSR1, __mf_sigusr1_handler);
2206 handler_installed = 1;
2208 else if(! __mf_opts.sigusr1_report && handler_installed)
2210 signal (SIGUSR1, SIG_DFL);
2211 handler_installed = 0;
2213 #endif
2215 /* Manage enqueued signals */
2216 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2218 __mf_sigusr1_handled ++;
2219 assert (__mf_get_state () == reentrant);
2220 __mfu_report ();
2221 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2226 /* XXX: provide an alternative __assert_fail function that cannot
2227 fail due to libmudflap infinite recursion. */
2228 #ifndef NDEBUG
2230 static void
2231 write_itoa (int fd, unsigned n)
2233 enum x { bufsize = sizeof(n)*4 };
2234 char buf [bufsize];
2235 unsigned i;
2237 for (i=0; i<bufsize-1; i++)
2239 unsigned digit = n % 10;
2240 buf[bufsize-2-i] = digit + '0';
2241 n /= 10;
2242 if (n == 0)
2244 char *m = & buf [bufsize-2-i];
2245 buf[bufsize-1] = '\0';
2246 write (fd, m, strlen(m));
2247 break;
2253 void
2254 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2256 #define write2(string) write (2, (string), strlen ((string)));
2257 write2("mf");
2258 #ifdef LIBMUDFLAPTH
2259 write2("(");
2260 write_itoa (2, (unsigned) pthread_self ());
2261 write2(")");
2262 #endif
2263 write2(": assertion failure: `");
2264 write (2, msg, strlen (msg));
2265 write2("' in ");
2266 write (2, func, strlen (func));
2267 write2(" at ");
2268 write (2, file, strlen (file));
2269 write2(":");
2270 write_itoa (2, line);
2271 write2("\n");
2272 #undef write2
2273 abort ();
2277 #endif
2281 /* Adapted splay tree code, originally from libiberty. It has been
2282 specialized for libmudflap as requested by RMS. */
2284 static void
2285 mfsplay_tree_free (void *p)
2287 DECLARE (void, free, void *p);
2288 CALL_REAL (free, p);
2291 static void *
2292 mfsplay_tree_xmalloc (size_t s)
2294 DECLARE (void *, malloc, size_t s);
2295 return CALL_REAL (malloc, s);
2299 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2300 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2301 mfsplay_tree_key,
2302 mfsplay_tree_node *,
2303 mfsplay_tree_node *,
2304 mfsplay_tree_node *);
2307 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2308 and grandparent, respectively, of NODE. */
2310 static mfsplay_tree_node
2311 mfsplay_tree_splay_helper (mfsplay_tree sp,
2312 mfsplay_tree_key key,
2313 mfsplay_tree_node * node,
2314 mfsplay_tree_node * parent,
2315 mfsplay_tree_node * grandparent)
2317 mfsplay_tree_node *next;
2318 mfsplay_tree_node n;
2319 int comparison;
2321 n = *node;
2323 if (!n)
2324 return *parent;
2326 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2328 if (comparison == 0)
2329 /* We've found the target. */
2330 next = 0;
2331 else if (comparison < 0)
2332 /* The target is to the left. */
2333 next = &n->left;
2334 else
2335 /* The target is to the right. */
2336 next = &n->right;
2338 if (next)
2340 /* Check whether our recursion depth is too high. Abort this search,
2341 and signal that a rebalance is required to continue. */
2342 if (sp->depth > sp->max_depth)
2344 sp->rebalance_p = 1;
2345 return n;
2348 /* Continue down the tree. */
2349 sp->depth ++;
2350 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2351 sp->depth --;
2353 /* The recursive call will change the place to which NODE
2354 points. */
2355 if (*node != n || sp->rebalance_p)
2356 return n;
2359 if (!parent)
2360 /* NODE is the root. We are done. */
2361 return n;
2363 /* First, handle the case where there is no grandparent (i.e.,
2364 *PARENT is the root of the tree.) */
2365 if (!grandparent)
2367 if (n == (*parent)->left)
2369 *node = n->right;
2370 n->right = *parent;
2372 else
2374 *node = n->left;
2375 n->left = *parent;
2377 *parent = n;
2378 return n;
2381 /* Next handle the cases where both N and *PARENT are left children,
2382 or where both are right children. */
2383 if (n == (*parent)->left && *parent == (*grandparent)->left)
2385 mfsplay_tree_node p = *parent;
2387 (*grandparent)->left = p->right;
2388 p->right = *grandparent;
2389 p->left = n->right;
2390 n->right = p;
2391 *grandparent = n;
2392 return n;
2394 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2396 mfsplay_tree_node p = *parent;
2398 (*grandparent)->right = p->left;
2399 p->left = *grandparent;
2400 p->right = n->left;
2401 n->left = p;
2402 *grandparent = n;
2403 return n;
2406 /* Finally, deal with the case where N is a left child, but *PARENT
2407 is a right child, or vice versa. */
2408 if (n == (*parent)->left)
2410 (*parent)->left = n->right;
2411 n->right = *parent;
2412 (*grandparent)->right = n->left;
2413 n->left = *grandparent;
2414 *grandparent = n;
2415 return n;
2417 else
2419 (*parent)->right = n->left;
2420 n->left = *parent;
2421 (*grandparent)->left = n->right;
2422 n->right = *grandparent;
2423 *grandparent = n;
2424 return n;
2430 static int
2431 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2433 mfsplay_tree_node **p = array_ptr;
2434 *(*p) = n;
2435 (*p)++;
2436 return 0;
2440 static mfsplay_tree_node
2441 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2442 unsigned high)
2444 unsigned middle = low + (high - low) / 2;
2445 mfsplay_tree_node n = array[middle];
2447 /* Note that since we're producing a balanced binary tree, it is not a problem
2448 that this function is recursive. */
2449 if (low + 1 <= middle)
2450 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2451 else
2452 n->left = NULL;
2454 if (middle + 1 <= high)
2455 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2456 else
2457 n->right = NULL;
2459 return n;
2463 /* Rebalance the entire tree. Do this by copying all the node
2464 pointers into an array, then cleverly re-linking them. */
2465 static void
2466 mfsplay_tree_rebalance (mfsplay_tree sp)
2468 mfsplay_tree_node *all_nodes, *all_nodes_1;
2470 if (sp->num_keys <= 2)
2471 return;
2473 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2475 /* Traverse all nodes to copy their addresses into this array. */
2476 all_nodes_1 = all_nodes;
2477 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2478 (void *) &all_nodes_1);
2480 /* Relink all the nodes. */
2481 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2483 mfsplay_tree_free (all_nodes);
2487 /* Splay SP around KEY. */
2488 static void
2489 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2491 if (sp->root == 0)
2492 return;
2494 /* If we just splayed the tree with the same key, do nothing. */
2495 if (sp->last_splayed_key_p &&
2496 (sp->last_splayed_key == key))
2497 return;
2499 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2500 The idea is to limit excessive stack usage if we're facing
2501 degenerate access patterns. Unfortunately such patterns can occur
2502 e.g. during static initialization, where many static objects might
2503 be registered in increasing address sequence, or during a case where
2504 large tree-like heap data structures are allocated quickly.
2506 On x86, this corresponds to roughly 200K of stack usage.
2507 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2508 sp->max_depth = 2500;
2509 sp->rebalance_p = sp->depth = 0;
2511 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2512 if (sp->rebalance_p)
2514 mfsplay_tree_rebalance (sp);
2516 sp->rebalance_p = sp->depth = 0;
2517 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2519 if (sp->rebalance_p)
2520 abort ();
2524 /* Cache this splay key. */
2525 sp->last_splayed_key = key;
2526 sp->last_splayed_key_p = 1;
2531 /* Allocate a new splay tree. */
2532 static mfsplay_tree
2533 mfsplay_tree_new ()
2535 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2536 sp->root = NULL;
2537 sp->last_splayed_key_p = 0;
2538 sp->num_keys = 0;
2540 return sp;
2545 /* Insert a new node (associating KEY with DATA) into SP. If a
2546 previous node with the indicated KEY exists, its data is replaced
2547 with the new value. Returns the new node. */
2548 static mfsplay_tree_node
2549 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2551 int comparison = 0;
2553 mfsplay_tree_splay (sp, key);
2555 if (sp->root)
2556 comparison = ((sp->root->key > key) ? 1 :
2557 ((sp->root->key < key) ? -1 : 0));
2559 if (sp->root && comparison == 0)
2561 /* If the root of the tree already has the indicated KEY, just
2562 replace the value with VALUE. */
2563 sp->root->value = value;
2565 else
2567 /* Create a new node, and insert it at the root. */
2568 mfsplay_tree_node node;
2570 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2571 node->key = key;
2572 node->value = value;
2573 sp->num_keys++;
2574 if (!sp->root)
2575 node->left = node->right = 0;
2576 else if (comparison < 0)
2578 node->left = sp->root;
2579 node->right = node->left->right;
2580 node->left->right = 0;
2582 else
2584 node->right = sp->root;
2585 node->left = node->right->left;
2586 node->right->left = 0;
2589 sp->root = node;
2590 sp->last_splayed_key_p = 0;
2593 return sp->root;
2596 /* Remove KEY from SP. It is not an error if it did not exist. */
2598 static void
2599 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2601 mfsplay_tree_splay (sp, key);
2602 sp->last_splayed_key_p = 0;
2603 if (sp->root && (sp->root->key == key))
2605 mfsplay_tree_node left, right;
2606 left = sp->root->left;
2607 right = sp->root->right;
2608 /* Delete the root node itself. */
2609 mfsplay_tree_free (sp->root);
2610 sp->num_keys--;
2611 /* One of the children is now the root. Doesn't matter much
2612 which, so long as we preserve the properties of the tree. */
2613 if (left)
2615 sp->root = left;
2616 /* If there was a right child as well, hang it off the
2617 right-most leaf of the left child. */
2618 if (right)
2620 while (left->right)
2621 left = left->right;
2622 left->right = right;
2625 else
2626 sp->root = right;
2630 /* Lookup KEY in SP, returning VALUE if present, and NULL
2631 otherwise. */
2633 static mfsplay_tree_node
2634 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2636 mfsplay_tree_splay (sp, key);
2637 if (sp->root && (sp->root->key == key))
2638 return sp->root;
2639 else
2640 return 0;
2644 /* Return the immediate predecessor KEY, or NULL if there is no
2645 predecessor. KEY need not be present in the tree. */
2647 static mfsplay_tree_node
2648 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2650 int comparison;
2651 mfsplay_tree_node node;
2652 /* If the tree is empty, there is certainly no predecessor. */
2653 if (!sp->root)
2654 return NULL;
2655 /* Splay the tree around KEY. That will leave either the KEY
2656 itself, its predecessor, or its successor at the root. */
2657 mfsplay_tree_splay (sp, key);
2658 comparison = ((sp->root->key > key) ? 1 :
2659 ((sp->root->key < key) ? -1 : 0));
2661 /* If the predecessor is at the root, just return it. */
2662 if (comparison < 0)
2663 return sp->root;
2664 /* Otherwise, find the rightmost element of the left subtree. */
2665 node = sp->root->left;
2666 if (node)
2667 while (node->right)
2668 node = node->right;
2669 return node;
2672 /* Return the immediate successor KEY, or NULL if there is no
2673 successor. KEY need not be present in the tree. */
2675 static mfsplay_tree_node
2676 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2678 int comparison;
2679 mfsplay_tree_node node;
2680 /* If the tree is empty, there is certainly no successor. */
2681 if (!sp->root)
2682 return NULL;
2683 /* Splay the tree around KEY. That will leave either the KEY
2684 itself, its predecessor, or its successor at the root. */
2685 mfsplay_tree_splay (sp, key);
2686 comparison = ((sp->root->key > key) ? 1 :
2687 ((sp->root->key < key) ? -1 : 0));
2688 /* If the successor is at the root, just return it. */
2689 if (comparison > 0)
2690 return sp->root;
2691 /* Otherwise, find the leftmost element of the right subtree. */
2692 node = sp->root->right;
2693 if (node)
2694 while (node->left)
2695 node = node->left;
2696 return node;
2699 /* Call FN, passing it the DATA, for every node in SP, following an
2700 in-order traversal. If FN every returns a non-zero value, the
2701 iteration ceases immediately, and the value is returned.
2702 Otherwise, this function returns 0.
2704 This function simulates recursion using dynamically allocated
2705 arrays, since it may be called from mfsplay_tree_rebalance(), which
2706 in turn means that the tree is already uncomfortably deep for stack
2707 space limits. */
2708 static int
2709 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2711 mfsplay_tree_node *stack1;
2712 char *stack2;
2713 unsigned sp;
2714 int val = 0;
2715 enum s { s_left, s_here, s_right, s_up };
2717 if (st->root == NULL) /* => num_keys == 0 */
2718 return 0;
2720 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2721 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2723 sp = 0;
2724 stack1 [sp] = st->root;
2725 stack2 [sp] = s_left;
2727 while (1)
2729 mfsplay_tree_node n;
2730 enum s s;
2732 n = stack1 [sp];
2733 s = stack2 [sp];
2735 /* Handle each of the four possible states separately. */
2737 /* 1: We're here to traverse the left subtree (if any). */
2738 if (s == s_left)
2740 stack2 [sp] = s_here;
2741 if (n->left != NULL)
2743 sp ++;
2744 stack1 [sp] = n->left;
2745 stack2 [sp] = s_left;
2749 /* 2: We're here to traverse this node. */
2750 else if (s == s_here)
2752 stack2 [sp] = s_right;
2753 val = (*fn) (n, data);
2754 if (val) break;
2757 /* 3: We're here to traverse the right subtree (if any). */
2758 else if (s == s_right)
2760 stack2 [sp] = s_up;
2761 if (n->right != NULL)
2763 sp ++;
2764 stack1 [sp] = n->right;
2765 stack2 [sp] = s_left;
2769 /* 4: We're here after both subtrees (if any) have been traversed. */
2770 else if (s == s_up)
2772 /* Pop the stack. */
2773 if (sp == 0) break; /* Popping off the root note: we're finished! */
2774 sp --;
2777 else
2778 abort ();
2781 mfsplay_tree_free (stack1);
2782 mfsplay_tree_free (stack2);
2783 return val;