Merge from mainline (gomp-merge-2005-02-26).
[official-gcc.git] / libmudflap / mf-runtime.c
blob64b1842766cac181a08aedec4a3fd10435a17708
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 02111-1307, USA. */
34 #include "config.h"
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
109 /* Statistics. */
110 unsigned num_keys;
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
149 abort (); } \
150 __mf_state = reentrant; \
151 } while (0)
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
155 } while (0)
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
166 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
167 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
168 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
174 #ifndef LIBMUDFLAPTH
175 enum __mf_state_enum __mf_state = active;
176 #else
177 /* See __mf_state_perthread() in mf-hooks.c. */
178 #endif
181 #ifdef LIBMUDFLAPTH
182 pthread_mutex_t __mf_biglock =
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
185 #else
186 PTHREAD_MUTEX_INITIALIZER;
187 #endif
188 #endif
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
193 #if HAVE_PTHREAD_H
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
196 #else
197 #define pthread_join NULL
198 #endif
199 const void *threads_active_p = (void *) pthread_join;
200 #endif
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
206 static unsigned long __mf_count_check;
207 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
208 static unsigned long __mf_count_register;
209 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
210 static unsigned long __mf_count_unregister;
211 static unsigned long __mf_total_unregister_size;
212 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
213 static unsigned long __mf_sigusr1_received;
214 static unsigned long __mf_sigusr1_handled;
215 /* not static */ unsigned long __mf_reentrancy;
216 #ifdef LIBMUDFLAPTH
217 /* not static */ unsigned long __mf_lock_contention;
218 #endif
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
224 typedef struct __mf_object
226 uintptr_t low, high; /* __mf_register parameters */
227 const char *name;
228 char type; /* __MF_TYPE_something */
229 char watching_p; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count; /* Likewise for __mf_check/write. */
232 unsigned liveness; /* A measure of recent checking activity. */
233 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
235 uintptr_t alloc_pc;
236 struct timeval alloc_time;
237 char **alloc_backtrace;
238 size_t alloc_backtrace_size;
239 #ifdef LIBMUDFLAPTH
240 pthread_t alloc_thread;
241 #endif
243 int deallocated_p;
244 uintptr_t dealloc_pc;
245 struct timeval dealloc_time;
246 char **dealloc_backtrace;
247 size_t dealloc_backtrace_size;
248 #ifdef LIBMUDFLAPTH
249 pthread_t dealloc_thread;
250 #endif
251 } __mf_object_t;
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
258 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
264 void __mf_init () CTOR;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
267 __mf_object_t **objs, unsigned max_objs);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
269 __mf_object_t **objs, unsigned max_objs, int type);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
271 __mf_object_t **objs, unsigned max_objs);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t *obj);
274 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
275 static mfsplay_tree __mf_object_tree (int type);
276 static void __mf_link_object (__mf_object_t *node);
277 static void __mf_unlink_object (__mf_object_t *node);
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
283 static void
284 __mf_set_default_options ()
286 memset (& __mf_opts, 0, sizeof (__mf_opts));
288 __mf_opts.adapt_cache = 1000003;
289 __mf_opts.abbreviate = 1;
290 __mf_opts.verbose_violations = 1;
291 __mf_opts.free_queue_length = 4;
292 __mf_opts.persistent_count = 100;
293 __mf_opts.crumple_zone = 32;
294 __mf_opts.backtrace = 4;
295 __mf_opts.timestamps = 1;
296 __mf_opts.mudflap_mode = mode_check;
297 __mf_opts.violation_mode = viol_nop;
298 __mf_opts.heur_std_data = 1;
299 #ifdef LIBMUDFLAPTH
300 __mf_opts.thread_stack = 0;
301 #endif
304 static struct option
306 char *name;
307 char *description;
308 enum
310 set_option,
311 read_integer_option,
312 } type;
313 unsigned value;
314 unsigned *target;
316 options [] =
318 {"mode-nop",
319 "mudflaps do nothing",
320 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
321 {"mode-populate",
322 "mudflaps populate object tree",
323 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
324 {"mode-check",
325 "mudflaps check for memory violations",
326 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
327 {"mode-violate",
328 "mudflaps always cause violations (diagnostic)",
329 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
331 {"viol-nop",
332 "violations do not change program execution",
333 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
334 {"viol-abort",
335 "violations cause a call to abort()",
336 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
337 {"viol-segv",
338 "violations are promoted to SIGSEGV signals",
339 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
340 {"viol-gdb",
341 "violations fork a gdb process attached to current program",
342 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
343 {"trace-calls",
344 "trace calls to mudflap runtime library",
345 set_option, 1, &__mf_opts.trace_mf_calls},
346 {"verbose-trace",
347 "trace internal events within mudflap runtime library",
348 set_option, 1, &__mf_opts.verbose_trace},
349 {"collect-stats",
350 "collect statistics on mudflap's operation",
351 set_option, 1, &__mf_opts.collect_stats},
352 #ifdef SIGUSR1
353 {"sigusr1-report",
354 "print report upon SIGUSR1",
355 set_option, 1, &__mf_opts.sigusr1_report},
356 #endif
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option, 1, &__mf_opts.internal_checking},
360 {"print-leaks",
361 "print any memory leaks at program shutdown",
362 set_option, 1, &__mf_opts.print_leaks},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option, 1, &__mf_opts.check_initialization},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option, 1, &__mf_opts.verbose_violations},
369 {"abbreviate",
370 "abbreviate repetitive listings",
371 set_option, 1, &__mf_opts.abbreviate},
372 {"timestamps",
373 "track object lifetime timestamps",
374 set_option, 1, &__mf_opts.timestamps},
375 {"ignore-reads",
376 "ignore read accesses - assume okay",
377 set_option, 1, &__mf_opts.ignore_reads},
378 {"wipe-stack",
379 "wipe stack objects at unwind",
380 set_option, 1, &__mf_opts.wipe_stack},
381 {"wipe-heap",
382 "wipe heap objects at free",
383 set_option, 1, &__mf_opts.wipe_heap},
384 {"heur-proc-map",
385 "support /proc/self/map heuristics",
386 set_option, 1, &__mf_opts.heur_proc_map},
387 {"heur-stack-bound",
388 "enable a simple upper stack bound heuristic",
389 set_option, 1, &__mf_opts.heur_stack_bound},
390 {"heur-start-end",
391 "support _start.._end heuristics",
392 set_option, 1, &__mf_opts.heur_start_end},
393 {"heur-stdlib",
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option, 1, &__mf_opts.heur_std_data},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option, 0, &__mf_opts.free_queue_length},
399 {"persistent-count",
400 "keep a history of N unregistered regions",
401 read_integer_option, 0, &__mf_opts.persistent_count},
402 {"crumple-zone",
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option, 0, &__mf_opts.crumple_zone},
405 /* XXX: not type-safe.
406 {"lc-mask",
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
409 {"lc-shift",
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
413 {"lc-adapt",
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option, 1, &__mf_opts.adapt_cache},
416 {"backtrace",
417 "keep an N-level stack trace of each call context",
418 read_integer_option, 0, &__mf_opts.backtrace},
419 #ifdef LIBMUDFLAPTH
420 {"thread-stack",
421 "override thread stacks allocation: N kB",
422 read_integer_option, 0, &__mf_opts.thread_stack},
423 #endif
424 {0, 0, set_option, 0, NULL}
427 static void
428 __mf_usage ()
430 struct option *opt;
432 fprintf (stderr,
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
435 "\n"
436 "The mudflap code can be controlled by an environment variable:\n"
437 "\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
440 "\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
443 "\n",
444 #if HAVE_PTHREAD_H
445 (threads_active_p ? "multi-threaded " : "single-threaded "),
446 #else
448 #endif
449 #if LIBMUDFLAPTH
450 "thread-aware "
451 #else
452 "thread-unaware "
453 #endif
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
457 for (opt = options; opt->name; opt++)
459 int default_p = (opt->value == * opt->target);
461 switch (opt->type)
463 char buf[128];
464 case set_option:
465 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
466 if (default_p)
467 fprintf (stderr, " [active]\n");
468 else
469 fprintf (stderr, "\n");
470 break;
471 case read_integer_option:
472 strncpy (buf, opt->name, 128);
473 strncpy (buf + strlen (opt->name), "=N", 2);
474 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
475 fprintf (stderr, " [%d]\n", * opt->target);
476 break;
477 default: abort();
481 fprintf (stderr, "\n");
485 int
486 __mf_set_options (const char *optstr)
488 int rc;
489 LOCKTH ();
490 BEGIN_RECURSION_PROTECT ();
491 rc = __mfu_set_options (optstr);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
496 UNLOCKTH ();
497 return rc;
501 int
502 __mfu_set_options (const char *optstr)
504 struct option *opts = 0;
505 char *nxt = 0;
506 long tmp = 0;
507 int rc = 0;
508 const char *saved_optstr = optstr;
510 /* XXX: bounds-check for optstr! */
512 while (*optstr)
514 switch (*optstr) {
515 case ' ':
516 case '\t':
517 case '\n':
518 optstr++;
519 break;
521 case '-':
522 if (*optstr+1)
524 int negate = 0;
525 optstr++;
527 if (*optstr == '?' ||
528 strncmp (optstr, "help", 4) == 0)
530 /* Caller will print help and exit. */
531 return -1;
534 if (strncmp (optstr, "no-", 3) == 0)
536 negate = 1;
537 optstr = & optstr[3];
540 for (opts = options; opts->name; opts++)
542 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
544 optstr += strlen (opts->name);
545 assert (opts->target);
546 switch (opts->type)
548 case set_option:
549 if (negate)
550 *(opts->target) = 0;
551 else
552 *(opts->target) = opts->value;
553 break;
554 case read_integer_option:
555 if (! negate && (*optstr == '=' && *(optstr+1)))
557 optstr++;
558 tmp = strtol (optstr, &nxt, 10);
559 if ((optstr != nxt) && (tmp != LONG_MAX))
561 optstr = nxt;
562 *(opts->target) = (int)tmp;
565 else if (negate)
566 * opts->target = 0;
567 break;
572 break;
574 default:
575 fprintf (stderr,
576 "warning: unrecognized string '%s' in mudflap options\n",
577 optstr);
578 optstr += strlen (optstr);
579 rc = -1;
580 break;
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
586 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
588 /* Clear the lookup cache, in case the parameters got changed. */
589 /* XXX: race */
590 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
591 /* void slot 0 */
592 __mf_lookup_cache[0].low = MAXPTR;
594 TRACE ("set options from `%s'\n", saved_optstr);
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
599 return rc;
603 #ifdef PIC
605 void
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
608 char *err;
610 assert (e);
611 if (e->pointer) return;
613 #if HAVE_DLVSYM
614 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
615 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
616 else
617 #endif
618 e->pointer = dlsym (RTLD_NEXT, e->name);
620 err = dlerror ();
622 if (err)
624 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
625 e->name, err);
626 abort ();
628 if (! e->pointer)
630 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
631 abort ();
636 static void
637 __mf_resolve_dynamics ()
639 int i;
640 for (i = 0; i < dyn_INITRESOLVE; i++)
641 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic [] =
648 {NULL, "calloc", NULL},
649 {NULL, "free", NULL},
650 {NULL, "malloc", NULL},
651 {NULL, "mmap", NULL},
652 {NULL, "munmap", NULL},
653 {NULL, "realloc", NULL},
654 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
655 #ifdef LIBMUDFLAPTH
656 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
657 {NULL, "pthread_join", NULL},
658 {NULL, "pthread_exit", NULL}
659 #endif
662 #endif /* PIC */
666 /* ------------------------------------------------------------------------ */
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
669 static mfsplay_tree
670 __mf_object_tree (int type)
672 static mfsplay_tree trees [__MF_TYPE_MAX+1];
673 assert (type >= 0 && type <= __MF_TYPE_MAX);
674 if (UNLIKELY (trees[type] == NULL))
675 trees[type] = mfsplay_tree_new ();
676 return trees[type];
680 /* not static */void
681 __mf_init ()
683 char *ov = 0;
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p == 0))
687 return;
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
690 #ifdef PIC
691 __mf_resolve_dynamics ();
692 #endif
693 __mf_starting_p = 0;
695 __mf_set_default_options ();
697 ov = getenv ("MUDFLAP_OPTIONS");
698 if (ov)
700 int rc = __mfu_set_options (ov);
701 if (rc < 0)
703 __mf_usage ();
704 exit (1);
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL);
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
714 REG_RESERVED (__mf_lookup_cache);
715 REG_RESERVED (__mf_lc_mask);
716 REG_RESERVED (__mf_lc_shift);
717 /* XXX: others of our statics? */
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
721 __mf_lookup_cache[0].low = (uintptr_t) -1;
727 __wrap_main (int argc, char* argv[])
729 extern char **environ;
730 extern int main ();
731 extern int __real_main ();
732 static int been_here = 0;
734 if (__mf_opts.heur_std_data && ! been_here)
736 unsigned i;
738 been_here = 1;
739 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
740 for (i=0; i<argc; i++)
742 unsigned j = strlen (argv[i]);
743 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
746 for (i=0; ; i++)
748 char *e = environ[i];
749 unsigned j;
750 if (e == NULL) break;
751 j = strlen (environ[i]);
752 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
754 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
756 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
758 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
759 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
760 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
762 /* Make some effort to register ctype.h static arrays. */
763 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
764 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
765 with in mf-hooks2.c. */
768 #ifdef PIC
769 return main (argc, argv, environ);
770 #else
771 return __real_main (argc, argv, environ);
772 #endif
777 extern void __mf_fini () DTOR;
778 void __mf_fini ()
780 TRACE ("__mf_fini\n");
781 __mfu_report ();
783 #ifndef PIC
784 /* Since we didn't populate the tree for allocations in constructors
785 before __mf_init, we cannot check destructors after __mf_fini. */
786 __mf_opts.mudflap_mode = mode_nop;
787 #endif
792 /* ------------------------------------------------------------------------ */
793 /* __mf_check */
795 void __mf_check (void *ptr, size_t sz, int type, const char *location)
797 LOCKTH ();
798 BEGIN_RECURSION_PROTECT ();
799 __mfu_check (ptr, sz, type, location);
800 END_RECURSION_PROTECT ();
801 UNLOCKTH ();
805 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
807 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
808 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
809 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
810 uintptr_t ptr_low = (uintptr_t) ptr;
811 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
812 struct __mf_cache old_entry = *entry;
814 if (UNLIKELY (__mf_opts.sigusr1_report))
815 __mf_sigusr1_respond ();
817 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
818 ptr, entry_idx, (unsigned long)sz,
819 (type == 0 ? "read" : "write"), location);
821 switch (__mf_opts.mudflap_mode)
823 case mode_nop:
824 /* It is tempting to poison the cache here similarly to
825 mode_populate. However that eliminates a valuable
826 distinction between these two modes. mode_nop is useful to
827 let a user count & trace every single check / registration
828 call. mode_populate is useful to let a program run fast
829 while unchecked.
831 judgement = 1;
832 break;
834 case mode_populate:
835 entry->low = ptr_low;
836 entry->high = ptr_high;
837 judgement = 1;
838 break;
840 case mode_check:
842 unsigned heuristics = 0;
844 /* Advance aging/adaptation counters. */
845 static unsigned adapt_count;
846 adapt_count ++;
847 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
848 adapt_count > __mf_opts.adapt_cache))
850 adapt_count = 0;
851 __mf_adapt_cache ();
854 /* Looping only occurs if heuristics were triggered. */
855 while (judgement == 0)
857 DECLARE (void, free, void *p);
858 __mf_object_t* ovr_obj[1];
859 unsigned obj_count;
860 __mf_object_t** all_ovr_obj = NULL;
861 __mf_object_t** dealloc_me = NULL;
862 unsigned i;
864 /* Find all overlapping objects. Be optimistic that there is just one. */
865 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
866 if (UNLIKELY (obj_count > 1))
868 /* Allocate a real buffer and do the search again. */
869 DECLARE (void *, malloc, size_t c);
870 unsigned n;
871 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
872 obj_count));
873 if (all_ovr_obj == NULL) abort ();
874 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
875 assert (n == obj_count);
876 dealloc_me = all_ovr_obj;
878 else
880 all_ovr_obj = ovr_obj;
881 dealloc_me = NULL;
884 /* Update object statistics. */
885 for (i = 0; i < obj_count; i++)
887 __mf_object_t *obj = all_ovr_obj[i];
888 assert (obj != NULL);
889 if (type == __MF_CHECK_READ)
890 obj->read_count ++;
891 else
892 obj->write_count ++;
893 obj->liveness ++;
896 /* Iterate over the various objects. There are a number of special cases. */
897 for (i = 0; i < obj_count; i++)
899 __mf_object_t *obj = all_ovr_obj[i];
901 /* Any __MF_TYPE_NOACCESS hit is bad. */
902 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
903 judgement = -1;
905 /* Any object with a watch flag is bad. */
906 if (UNLIKELY (obj->watching_p))
907 judgement = -2; /* trigger VIOL_WATCH */
909 /* A read from an uninitialized object is bad. */
910 if (UNLIKELY (__mf_opts.check_initialization
911 /* reading */
912 && type == __MF_CHECK_READ
913 /* not written */
914 && obj->write_count == 0
915 /* uninitialized (heap) */
916 && obj->type == __MF_TYPE_HEAP))
917 judgement = -1;
920 /* We now know that the access spans one or more only valid objects. */
921 if (LIKELY (judgement >= 0))
922 for (i = 0; i < obj_count; i++)
924 __mf_object_t *obj = all_ovr_obj[i];
926 /* Is this access entirely contained within this object? */
927 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
929 /* Valid access. */
930 entry->low = obj->low;
931 entry->high = obj->high;
932 judgement = 1;
936 /* This access runs off the end of one valid object. That
937 could be okay, if other valid objects fill in all the
938 holes. We allow this only for HEAP and GUESS type
939 objects. Accesses to STATIC and STACK variables
940 should not be allowed to span. */
941 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
943 unsigned uncovered = 0;
944 for (i = 0; i < obj_count; i++)
946 __mf_object_t *obj = all_ovr_obj[i];
947 int j, uncovered_low_p, uncovered_high_p;
948 uintptr_t ptr_lower, ptr_higher;
950 uncovered_low_p = ptr_low < obj->low;
951 ptr_lower = CLAMPSUB (obj->low, 1);
952 uncovered_high_p = ptr_high > obj->high;
953 ptr_higher = CLAMPADD (obj->high, 1);
955 for (j = 0; j < obj_count; j++)
957 __mf_object_t *obj2 = all_ovr_obj[j];
959 if (i == j) continue;
961 /* Filter out objects that cannot be spanned across. */
962 if (obj2->type == __MF_TYPE_STACK
963 || obj2->type == __MF_TYPE_STATIC)
964 continue;
966 /* Consider a side "covered" if obj2 includes
967 the next byte on that side. */
968 if (uncovered_low_p
969 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
970 uncovered_low_p = 0;
971 if (uncovered_high_p
972 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
973 uncovered_high_p = 0;
976 if (uncovered_low_p || uncovered_high_p)
977 uncovered ++;
980 /* Success if no overlapping objects are uncovered. */
981 if (uncovered == 0)
982 judgement = 1;
986 if (dealloc_me != NULL)
987 CALL_REAL (free, dealloc_me);
989 /* If the judgment is still unknown at this stage, loop
990 around at most one more time. */
991 if (judgement == 0)
993 if (heuristics++ < 2) /* XXX parametrize this number? */
994 judgement = __mf_heuristic_check (ptr_low, ptr_high);
995 else
996 judgement = -1;
1001 break;
1003 case mode_violate:
1004 judgement = -1;
1005 break;
1008 if (__mf_opts.collect_stats)
1010 __mf_count_check ++;
1012 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1013 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1014 __mf_lookup_cache_reusecount [entry_idx] ++;
1017 if (UNLIKELY (judgement < 0))
1018 __mf_violation (ptr, sz,
1019 (uintptr_t) __builtin_return_address (0), location,
1020 ((judgement == -1) ?
1021 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1022 __MF_VIOL_WATCH));
1026 static __mf_object_t *
1027 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1028 const char *name, uintptr_t pc)
1030 DECLARE (void *, calloc, size_t c, size_t n);
1032 __mf_object_t *new_obj;
1033 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1034 new_obj->low = low;
1035 new_obj->high = high;
1036 new_obj->type = type;
1037 new_obj->name = name;
1038 new_obj->alloc_pc = pc;
1039 #if HAVE_GETTIMEOFDAY
1040 if (__mf_opts.timestamps)
1041 gettimeofday (& new_obj->alloc_time, NULL);
1042 #endif
1043 #if LIBMUDFLAPTH
1044 new_obj->alloc_thread = pthread_self ();
1045 #endif
1047 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1048 new_obj->alloc_backtrace_size =
1049 __mf_backtrace (& new_obj->alloc_backtrace,
1050 (void *) pc, 2);
1052 __mf_link_object (new_obj);
1053 return new_obj;
1057 static void
1058 __mf_uncache_object (__mf_object_t *old_obj)
1060 /* Remove any low/high pointers for this object from the lookup cache. */
1062 /* Can it possibly exist in the cache? */
1063 if (LIKELY (old_obj->read_count + old_obj->write_count))
1065 uintptr_t low = old_obj->low;
1066 uintptr_t high = old_obj->high;
1067 unsigned idx_low = __MF_CACHE_INDEX (low);
1068 unsigned idx_high = __MF_CACHE_INDEX (high);
1069 unsigned i;
1070 for (i = idx_low; i <= idx_high; i++)
1072 struct __mf_cache *entry = & __mf_lookup_cache [i];
1073 /* NB: the "||" in the following test permits this code to
1074 tolerate the situation introduced by __mf_check over
1075 contiguous objects, where a cache entry spans several
1076 objects. */
1077 if (entry->low == low || entry->high == high)
1079 entry->low = MAXPTR;
1080 entry->high = MINPTR;
1087 void
1088 __mf_register (void *ptr, size_t sz, int type, const char *name)
1090 LOCKTH ();
1091 BEGIN_RECURSION_PROTECT ();
1092 __mfu_register (ptr, sz, type, name);
1093 END_RECURSION_PROTECT ();
1094 UNLOCKTH ();
1098 void
1099 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1101 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1102 ptr, (unsigned long) sz, type, name ? name : "");
1104 if (__mf_opts.collect_stats)
1106 __mf_count_register ++;
1107 __mf_total_register_size [(type < 0) ? 0 :
1108 (type > __MF_TYPE_MAX) ? 0 :
1109 type] += sz;
1112 if (UNLIKELY (__mf_opts.sigusr1_report))
1113 __mf_sigusr1_respond ();
1115 switch (__mf_opts.mudflap_mode)
1117 case mode_nop:
1118 break;
1120 case mode_violate:
1121 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1122 __MF_VIOL_REGISTER);
1123 break;
1125 case mode_populate:
1126 /* Clear the cache. */
1127 /* XXX: why the entire cache? */
1128 /* XXX: race */
1129 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1130 /* void slot 0 */
1131 __mf_lookup_cache[0].low = MAXPTR;
1132 break;
1134 case mode_check:
1136 __mf_object_t *ovr_objs [1];
1137 unsigned num_overlapping_objs;
1138 uintptr_t low = (uintptr_t) ptr;
1139 uintptr_t high = CLAMPSZ (ptr, sz);
1140 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1142 /* Treat unknown size indication as 1. */
1143 if (UNLIKELY (sz == 0)) sz = 1;
1145 /* Look for objects only of the same type. This will e.g. permit a registration
1146 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1147 __mf_check time however harmful overlaps will be detected. */
1148 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1150 /* Handle overlaps. */
1151 if (UNLIKELY (num_overlapping_objs > 0))
1153 __mf_object_t *ovr_obj = ovr_objs[0];
1155 /* Accept certain specific duplication pairs. */
1156 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1157 && ovr_obj->low == low
1158 && ovr_obj->high == high
1159 && ovr_obj->type == type)
1161 /* Duplicate registration for static objects may come
1162 from distinct compilation units. */
1163 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1164 (void *) low, (void *) high,
1165 (ovr_obj->name ? ovr_obj->name : ""));
1166 break;
1169 /* Alas, a genuine violation. */
1170 else
1172 /* Two or more *real* mappings here. */
1173 __mf_violation ((void *) ptr, sz,
1174 (uintptr_t) __builtin_return_address (0), NULL,
1175 __MF_VIOL_REGISTER);
1178 else /* No overlapping objects: AOK. */
1179 __mf_insert_new_object (low, high, type, name, pc);
1181 /* We could conceivably call __mf_check() here to prime the cache,
1182 but then the read_count/write_count field is not reliable. */
1183 break;
1185 } /* end switch (__mf_opts.mudflap_mode) */
1189 void
1190 __mf_unregister (void *ptr, size_t sz, int type)
1192 LOCKTH ();
1193 BEGIN_RECURSION_PROTECT ();
1194 __mfu_unregister (ptr, sz, type);
1195 END_RECURSION_PROTECT ();
1196 UNLOCKTH ();
1200 void
1201 __mfu_unregister (void *ptr, size_t sz, int type)
1203 DECLARE (void, free, void *ptr);
1205 if (UNLIKELY (__mf_opts.sigusr1_report))
1206 __mf_sigusr1_respond ();
1208 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1210 switch (__mf_opts.mudflap_mode)
1212 case mode_nop:
1213 break;
1215 case mode_violate:
1216 __mf_violation (ptr, sz,
1217 (uintptr_t) __builtin_return_address (0), NULL,
1218 __MF_VIOL_UNREGISTER);
1219 break;
1221 case mode_populate:
1222 /* Clear the cache. */
1223 /* XXX: race */
1224 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1225 /* void slot 0 */
1226 __mf_lookup_cache[0].low = MAXPTR;
1227 break;
1229 case mode_check:
1231 __mf_object_t *old_obj = NULL;
1232 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1233 __mf_object_t *objs[1] = {NULL};
1234 unsigned num_overlapping_objs;
1236 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1237 CLAMPSZ (ptr, sz), objs, 1, type);
1239 /* Special case for HEAP_I - see free & realloc hook. They don't
1240 know whether the input region was HEAP or HEAP_I before
1241 unmapping it. Here we give HEAP a try in case HEAP_I
1242 failed. */
1243 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1245 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1246 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1249 old_obj = objs[0];
1250 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1251 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1252 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1254 __mf_violation (ptr, sz,
1255 (uintptr_t) __builtin_return_address (0), NULL,
1256 __MF_VIOL_UNREGISTER);
1257 break;
1260 __mf_unlink_object (old_obj);
1261 __mf_uncache_object (old_obj);
1263 /* Wipe buffer contents if desired. */
1264 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1265 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1266 || old_obj->type == __MF_TYPE_HEAP_I)))
1268 memset ((void *) old_obj->low,
1270 (size_t) (old_obj->high - old_obj->low + 1));
1273 /* Manage the object cemetary. */
1274 if (__mf_opts.persistent_count > 0 &&
1275 old_obj->type >= 0 &&
1276 old_obj->type <= __MF_TYPE_MAX_CEM)
1278 old_obj->deallocated_p = 1;
1279 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1280 #if HAVE_GETTIMEOFDAY
1281 if (__mf_opts.timestamps)
1282 gettimeofday (& old_obj->dealloc_time, NULL);
1283 #endif
1284 #ifdef LIBMUDFLAPTH
1285 old_obj->dealloc_thread = pthread_self ();
1286 #endif
1288 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1289 old_obj->dealloc_backtrace_size =
1290 __mf_backtrace (& old_obj->dealloc_backtrace,
1291 NULL, 2);
1293 /* Encourage this object to be displayed again in current epoch. */
1294 old_obj->description_epoch --;
1296 /* Put this object into the cemetary. This may require this plot to
1297 be recycled, and the previous resident to be designated del_obj. */
1299 unsigned row = old_obj->type;
1300 unsigned plot = __mf_object_dead_head [row];
1302 del_obj = __mf_object_cemetary [row][plot];
1303 __mf_object_cemetary [row][plot] = old_obj;
1304 plot ++;
1305 if (plot == __mf_opts.persistent_count) plot = 0;
1306 __mf_object_dead_head [row] = plot;
1309 else
1310 del_obj = old_obj;
1312 if (__mf_opts.print_leaks)
1314 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1315 (old_obj->type == __MF_TYPE_HEAP
1316 || old_obj->type == __MF_TYPE_HEAP_I))
1318 fprintf (stderr,
1319 "*******\n"
1320 "mudflap warning: unaccessed registered object:\n");
1321 __mf_describe_object (old_obj);
1325 if (del_obj != NULL) /* May or may not equal old_obj. */
1327 if (__mf_opts.backtrace > 0)
1329 CALL_REAL(free, del_obj->alloc_backtrace);
1330 if (__mf_opts.persistent_count > 0)
1332 CALL_REAL(free, del_obj->dealloc_backtrace);
1335 CALL_REAL(free, del_obj);
1338 break;
1340 } /* end switch (__mf_opts.mudflap_mode) */
1343 if (__mf_opts.collect_stats)
1345 __mf_count_unregister ++;
1346 __mf_total_unregister_size += sz;
1352 struct tree_stats
1354 unsigned obj_count;
1355 unsigned long total_size;
1356 unsigned live_obj_count;
1357 double total_weight;
1358 double weighted_size;
1359 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1364 static int
1365 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1367 __mf_object_t *obj = (__mf_object_t *) n->value;
1368 struct tree_stats *s = (struct tree_stats *) param;
1370 assert (obj != NULL && s != NULL);
1372 /* Exclude never-accessed objects. */
1373 if (obj->read_count + obj->write_count)
1375 s->obj_count ++;
1376 s->total_size += (obj->high - obj->low + 1);
1378 if (obj->liveness)
1380 unsigned i;
1381 uintptr_t addr;
1383 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1384 (void *) obj->low, obj->liveness, obj->name); */
1386 s->live_obj_count ++;
1387 s->total_weight += (double) obj->liveness;
1388 s->weighted_size +=
1389 (double) (obj->high - obj->low + 1) *
1390 (double) obj->liveness;
1392 addr = obj->low;
1393 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1395 unsigned bit = addr & 1;
1396 s->weighted_address_bits[i][bit] += obj->liveness;
1397 addr = addr >> 1;
1400 /* Age the liveness value. */
1401 obj->liveness >>= 1;
1405 return 0;
1409 static void
1410 __mf_adapt_cache ()
1412 struct tree_stats s;
1413 uintptr_t new_mask = 0;
1414 unsigned char new_shift;
1415 float cache_utilization;
1416 float max_value;
1417 static float smoothed_new_shift = -1.0;
1418 unsigned i;
1420 memset (&s, 0, sizeof (s));
1422 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1423 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1424 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1425 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1426 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1428 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1429 empty tree. Just leave the cache alone in such cases, rather
1430 than risk dying by division-by-zero. */
1431 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1432 return;
1434 /* Guess a good value for the shift parameter by finding an address bit that is a
1435 good discriminant of lively objects. */
1436 max_value = 0.0;
1437 for (i=0; i<sizeof (uintptr_t)*8; i++)
1439 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1440 if (max_value < value) max_value = value;
1442 for (i=0; i<sizeof (uintptr_t)*8; i++)
1444 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1445 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1446 if (value >= max_value * shoulder_factor)
1447 break;
1449 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1450 /* Converge toward this slowly to reduce flapping. */
1451 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1452 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1453 assert (new_shift < sizeof (uintptr_t)*8);
1455 /* Count number of used buckets. */
1456 cache_utilization = 0.0;
1457 for (i = 0; i < (1 + __mf_lc_mask); i++)
1458 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1459 cache_utilization += 1.0;
1460 cache_utilization /= (1 + __mf_lc_mask);
1462 new_mask |= 0xffff; /* XXX: force a large cache. */
1463 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1465 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1466 "util=%u%% m=%p s=%u\n",
1467 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1468 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1470 /* We should reinitialize cache if its parameters have changed. */
1471 if (new_mask != __mf_lc_mask ||
1472 new_shift != __mf_lc_shift)
1474 __mf_lc_mask = new_mask;
1475 __mf_lc_shift = new_shift;
1476 /* XXX: race */
1477 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1478 /* void slot 0 */
1479 __mf_lookup_cache[0].low = MAXPTR;
1485 /* __mf_find_object[s] */
1487 /* Find overlapping live objecs between [low,high]. Return up to
1488 max_objs of their pointers in objs[]. Return total count of
1489 overlaps (may exceed max_objs). */
1491 unsigned
1492 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1493 __mf_object_t **objs, unsigned max_objs, int type)
1495 unsigned count = 0;
1496 mfsplay_tree t = __mf_object_tree (type);
1497 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1498 int direction;
1500 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1501 /* An exact match for base address implies a hit. */
1502 if (n != NULL)
1504 if (count < max_objs)
1505 objs[count] = (__mf_object_t *) n->value;
1506 count ++;
1509 /* Iterate left then right near this key value to find all overlapping objects. */
1510 for (direction = 0; direction < 2; direction ++)
1512 /* Reset search origin. */
1513 k = (mfsplay_tree_key) ptr_low;
1515 while (1)
1517 __mf_object_t *obj;
1519 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1520 if (n == NULL) break;
1521 obj = (__mf_object_t *) n->value;
1523 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1524 break;
1526 if (count < max_objs)
1527 objs[count] = (__mf_object_t *) n->value;
1528 count ++;
1530 k = (mfsplay_tree_key) obj->low;
1534 return count;
1538 unsigned
1539 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1540 __mf_object_t **objs, unsigned max_objs)
1542 int type;
1543 unsigned count = 0;
1545 /* Search each splay tree for overlaps. */
1546 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1548 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1549 if (c > max_objs)
1551 max_objs = 0;
1552 objs = NULL;
1554 else /* NB: C may equal 0 */
1556 max_objs -= c;
1557 objs += c;
1559 count += c;
1562 return count;
1567 /* __mf_link_object */
1569 static void
1570 __mf_link_object (__mf_object_t *node)
1572 mfsplay_tree t = __mf_object_tree (node->type);
1573 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1576 /* __mf_unlink_object */
1578 static void
1579 __mf_unlink_object (__mf_object_t *node)
1581 mfsplay_tree t = __mf_object_tree (node->type);
1582 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1585 /* __mf_find_dead_objects */
1587 /* Find overlapping dead objecs between [low,high]. Return up to
1588 max_objs of their pointers in objs[]. Return total count of
1589 overlaps (may exceed max_objs). */
1591 static unsigned
1592 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1593 __mf_object_t **objs, unsigned max_objs)
1595 if (__mf_opts.persistent_count > 0)
1597 unsigned count = 0;
1598 unsigned recollection = 0;
1599 unsigned row = 0;
1601 assert (low <= high);
1602 assert (max_objs == 0 || objs != NULL);
1604 /* Widen the search from the most recent plots in each row, looking
1605 backward in time. */
1606 recollection = 0;
1607 while (recollection < __mf_opts.persistent_count)
1609 count = 0;
1611 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1613 unsigned plot;
1614 unsigned i;
1616 plot = __mf_object_dead_head [row];
1617 for (i = 0; i <= recollection; i ++)
1619 __mf_object_t *obj;
1621 /* Look backward through row: it's a circular buffer. */
1622 if (plot > 0) plot --;
1623 else plot = __mf_opts.persistent_count - 1;
1625 obj = __mf_object_cemetary [row][plot];
1626 if (obj && obj->low <= high && obj->high >= low)
1628 /* Found an overlapping dead object! */
1629 if (count < max_objs)
1630 objs [count] = obj;
1631 count ++;
1636 if (count)
1637 break;
1639 /* Look farther back in time. */
1640 recollection = (recollection * 2) + 1;
1643 return count;
1644 } else {
1645 return 0;
1649 /* __mf_describe_object */
1651 static void
1652 __mf_describe_object (__mf_object_t *obj)
1654 static unsigned epoch = 0;
1655 if (obj == NULL)
1657 epoch ++;
1658 return;
1661 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1663 fprintf (stderr,
1664 "mudflap %sobject %p: name=`%s'\n",
1665 (obj->deallocated_p ? "dead " : ""),
1666 (void *) obj, (obj->name ? obj->name : ""));
1667 return;
1669 else
1670 obj->description_epoch = epoch;
1672 fprintf (stderr,
1673 "mudflap %sobject %p: name=`%s'\n"
1674 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1675 "alloc time=%lu.%06lu pc=%p"
1676 #ifdef LIBMUDFLAPTH
1677 " thread=%u"
1678 #endif
1679 "\n",
1680 (obj->deallocated_p ? "dead " : ""),
1681 (void *) obj, (obj->name ? obj->name : ""),
1682 (void *) obj->low, (void *) obj->high,
1683 (unsigned long) (obj->high - obj->low + 1),
1684 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1685 obj->type == __MF_TYPE_HEAP ? "heap" :
1686 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1687 obj->type == __MF_TYPE_STACK ? "stack" :
1688 obj->type == __MF_TYPE_STATIC ? "static" :
1689 obj->type == __MF_TYPE_GUESS ? "guess" :
1690 "unknown"),
1691 obj->read_count, obj->write_count, obj->liveness,
1692 obj->watching_p ? " watching" : "",
1693 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1694 (void *) obj->alloc_pc
1695 #ifdef LIBMUDFLAPTH
1696 , (unsigned) obj->alloc_thread
1697 #endif
1700 if (__mf_opts.backtrace > 0)
1702 unsigned i;
1703 for (i=0; i<obj->alloc_backtrace_size; i++)
1704 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1707 if (__mf_opts.persistent_count > 0)
1709 if (obj->deallocated_p)
1711 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1712 #ifdef LIBMUDFLAPTH
1713 " thread=%u"
1714 #endif
1715 "\n",
1716 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1717 (void *) obj->dealloc_pc
1718 #ifdef LIBMUDFLAPTH
1719 , (unsigned) obj->dealloc_thread
1720 #endif
1724 if (__mf_opts.backtrace > 0)
1726 unsigned i;
1727 for (i=0; i<obj->dealloc_backtrace_size; i++)
1728 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1735 static int
1736 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1738 __mf_object_t *node = (__mf_object_t *) n->value;
1739 unsigned *count = (unsigned *) param;
1741 if (count != NULL)
1742 (*count) ++;
1744 fprintf (stderr, "Leaked object %u:\n", (*count));
1745 __mf_describe_object (node);
1747 return 0;
1751 static unsigned
1752 __mf_report_leaks ()
1754 unsigned count = 0;
1756 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1757 __mf_report_leaks_fn, & count);
1758 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1759 __mf_report_leaks_fn, & count);
1761 return count;
1764 /* ------------------------------------------------------------------------ */
1765 /* __mf_report */
1767 void
1768 __mf_report ()
1770 LOCKTH ();
1771 BEGIN_RECURSION_PROTECT ();
1772 __mfu_report ();
1773 END_RECURSION_PROTECT ();
1774 UNLOCKTH ();
1777 void
1778 __mfu_report ()
1780 if (__mf_opts.collect_stats)
1782 fprintf (stderr,
1783 "*******\n"
1784 "mudflap stats:\n"
1785 "calls to __mf_check: %lu\n"
1786 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1787 " __mf_unregister: %lu [%luB]\n"
1788 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1789 __mf_count_check,
1790 __mf_count_register,
1791 __mf_total_register_size[0], __mf_total_register_size[1],
1792 __mf_total_register_size[2], __mf_total_register_size[3],
1793 __mf_total_register_size[4], /* XXX */
1794 __mf_count_unregister, __mf_total_unregister_size,
1795 __mf_count_violation[0], __mf_count_violation[1],
1796 __mf_count_violation[2], __mf_count_violation[3],
1797 __mf_count_violation[4]);
1799 fprintf (stderr,
1800 "calls with reentrancy: %lu\n", __mf_reentrancy);
1801 #ifdef LIBMUDFLAPTH
1802 fprintf (stderr,
1803 " lock contention: %lu\n", __mf_lock_contention);
1804 #endif
1806 /* Lookup cache stats. */
1808 unsigned i;
1809 unsigned max_reuse = 0;
1810 unsigned num_used = 0;
1811 unsigned num_unused = 0;
1813 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1815 if (__mf_lookup_cache_reusecount[i])
1816 num_used ++;
1817 else
1818 num_unused ++;
1819 if (max_reuse < __mf_lookup_cache_reusecount[i])
1820 max_reuse = __mf_lookup_cache_reusecount[i];
1822 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1823 num_used, num_unused, max_reuse);
1827 unsigned live_count;
1828 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1829 fprintf (stderr, "number of live objects: %u\n", live_count);
1832 if (__mf_opts.persistent_count > 0)
1834 unsigned dead_count = 0;
1835 unsigned row, plot;
1836 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1837 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1838 if (__mf_object_cemetary [row][plot] != 0)
1839 dead_count ++;
1840 fprintf (stderr, " zombie objects: %u\n", dead_count);
1843 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1845 unsigned l;
1846 extern void * __mf_wrap_alloca_indirect (size_t c);
1848 /* Free up any remaining alloca()'d blocks. */
1849 __mf_wrap_alloca_indirect (0);
1850 __mf_describe_object (NULL); /* Reset description epoch. */
1851 l = __mf_report_leaks ();
1852 fprintf (stderr, "number of leaked objects: %u\n", l);
1856 /* __mf_backtrace */
1858 size_t
1859 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1861 void ** pc_array;
1862 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1863 unsigned remaining_size;
1864 unsigned omitted_size = 0;
1865 unsigned i;
1866 DECLARE (void, free, void *ptr);
1867 DECLARE (void *, calloc, size_t c, size_t n);
1868 DECLARE (void *, malloc, size_t n);
1870 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1871 #ifdef HAVE_BACKTRACE
1872 pc_array_size = backtrace (pc_array, pc_array_size);
1873 #else
1874 #define FETCH(n) do { if (pc_array_size >= n) { \
1875 pc_array[n] = __builtin_return_address(n); \
1876 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1878 /* Unroll some calls __builtin_return_address because this function
1879 only takes a literal integer parameter. */
1880 FETCH (0);
1881 #if 0
1882 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1883 rather than simply returning 0. :-( */
1884 FETCH (1);
1885 FETCH (2);
1886 FETCH (3);
1887 FETCH (4);
1888 FETCH (5);
1889 FETCH (6);
1890 FETCH (7);
1891 FETCH (8);
1892 if (pc_array_size > 8) pc_array_size = 9;
1893 #else
1894 if (pc_array_size > 0) pc_array_size = 1;
1895 #endif
1897 #undef FETCH
1898 #endif
1900 /* We want to trim the first few levels of the stack traceback,
1901 since they contain libmudflap wrappers and junk. If pc_array[]
1902 ends up containing a non-NULL guess_pc, then trim everything
1903 before that. Otherwise, omit the first guess_omit_levels
1904 entries. */
1906 if (guess_pc != NULL)
1907 for (i=0; i<pc_array_size; i++)
1908 if (pc_array [i] == guess_pc)
1909 omitted_size = i;
1911 if (omitted_size == 0) /* No match? */
1912 if (pc_array_size > guess_omit_levels)
1913 omitted_size = guess_omit_levels;
1915 remaining_size = pc_array_size - omitted_size;
1917 #ifdef HAVE_BACKTRACE_SYMBOLS
1918 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1919 #else
1921 /* Let's construct a buffer by hand. It will have <remaining_size>
1922 char*'s at the front, pointing at individual strings immediately
1923 afterwards. */
1924 void *buffer;
1925 char *chars;
1926 char **pointers;
1927 enum { perline = 30 };
1928 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1929 pointers = (char **) buffer;
1930 chars = (char *)buffer + (remaining_size * sizeof (char *));
1931 for (i = 0; i < remaining_size; i++)
1933 pointers[i] = chars;
1934 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1935 chars = chars + perline;
1937 *symbols = pointers;
1939 #endif
1940 CALL_REAL (free, pc_array);
1942 return remaining_size;
1945 /* ------------------------------------------------------------------------ */
1946 /* __mf_violation */
1948 void
1949 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1950 const char *location, int type)
1952 char buf [128];
1953 static unsigned violation_number;
1954 DECLARE(void, free, void *ptr);
1956 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1957 (void *) pc,
1958 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1960 if (__mf_opts.collect_stats)
1961 __mf_count_violation [(type < 0) ? 0 :
1962 (type > __MF_VIOL_WATCH) ? 0 :
1963 type] ++;
1965 /* Print out a basic warning message. */
1966 if (__mf_opts.verbose_violations)
1968 unsigned dead_p;
1969 unsigned num_helpful = 0;
1970 struct timeval now = { 0, 0 };
1971 #if HAVE_GETTIMEOFDAY
1972 gettimeofday (& now, NULL);
1973 #endif
1975 violation_number ++;
1976 fprintf (stderr,
1977 "*******\n"
1978 "mudflap violation %u (%s): time=%lu.%06lu "
1979 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1980 violation_number,
1981 ((type == __MF_VIOL_READ) ? "check/read" :
1982 (type == __MF_VIOL_WRITE) ? "check/write" :
1983 (type == __MF_VIOL_REGISTER) ? "register" :
1984 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1985 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1986 now.tv_sec, now.tv_usec,
1987 (void *) ptr, (unsigned long)sz, (void *) pc,
1988 (location != NULL ? " location=`" : ""),
1989 (location != NULL ? location : ""),
1990 (location != NULL ? "'" : ""));
1992 if (__mf_opts.backtrace > 0)
1994 char ** symbols;
1995 unsigned i, num;
1997 num = __mf_backtrace (& symbols, (void *) pc, 2);
1998 /* Note: backtrace_symbols calls malloc(). But since we're in
1999 __mf_violation and presumably __mf_check, it'll detect
2000 recursion, and not put the new string into the database. */
2002 for (i=0; i<num; i++)
2003 fprintf (stderr, " %s\n", symbols[i]);
2005 /* Calling free() here would trigger a violation. */
2006 CALL_REAL(free, symbols);
2010 /* Look for nearby objects. For this, we start with s_low/s_high
2011 pointing to the given area, looking for overlapping objects.
2012 If none show up, widen the search area and keep looking. */
2014 if (sz == 0) sz = 1;
2016 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2018 enum {max_objs = 3}; /* magic */
2019 __mf_object_t *objs[max_objs];
2020 unsigned num_objs = 0;
2021 uintptr_t s_low, s_high;
2022 unsigned tries = 0;
2023 unsigned i;
2025 s_low = (uintptr_t) ptr;
2026 s_high = CLAMPSZ (ptr, sz);
2028 while (tries < 16) /* magic */
2030 if (dead_p)
2031 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2032 else
2033 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2035 if (num_objs) /* good enough */
2036 break;
2038 tries ++;
2040 /* XXX: tune this search strategy. It's too dependent on
2041 sz, which can vary from 1 to very big (when array index
2042 checking) numbers. */
2043 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2044 s_high = CLAMPADD (s_high, (sz * tries * tries));
2047 for (i = 0; i < min (num_objs, max_objs); i++)
2049 __mf_object_t *obj = objs[i];
2050 uintptr_t low = (uintptr_t) ptr;
2051 uintptr_t high = CLAMPSZ (ptr, sz);
2052 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2053 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2054 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2055 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2056 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2057 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2059 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2060 num_helpful + i + 1,
2061 (before1 ? before1 : after1 ? after1 : into1),
2062 (before1 ? "before" : after1 ? "after" : "into"),
2063 (before2 ? before2 : after2 ? after2 : into2),
2064 (before2 ? "before" : after2 ? "after" : "into"));
2065 __mf_describe_object (obj);
2067 num_helpful += num_objs;
2070 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2073 /* How to finally handle this violation? */
2074 switch (__mf_opts.violation_mode)
2076 case viol_nop:
2077 break;
2078 case viol_segv:
2079 kill (getpid(), SIGSEGV);
2080 break;
2081 case viol_abort:
2082 abort ();
2083 break;
2084 case viol_gdb:
2086 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2087 system (buf);
2088 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2089 instead, and let the forked child execlp() gdb. That way, this
2090 subject process can be resumed under the supervision of gdb.
2091 This can't happen now, since system() only returns when gdb
2092 dies. In that case, we need to beware of starting a second
2093 concurrent gdb child upon the next violation. (But if the first
2094 gdb dies, then starting a new one is appropriate.) */
2095 break;
2099 /* ------------------------------------------------------------------------ */
2102 unsigned __mf_watch (void *ptr, size_t sz)
2104 unsigned rc;
2105 LOCKTH ();
2106 BEGIN_RECURSION_PROTECT ();
2107 rc = __mf_watch_or_not (ptr, sz, 1);
2108 END_RECURSION_PROTECT ();
2109 UNLOCKTH ();
2110 return rc;
2113 unsigned __mf_unwatch (void *ptr, size_t sz)
2115 unsigned rc;
2116 LOCKTH ();
2117 rc = __mf_watch_or_not (ptr, sz, 0);
2118 UNLOCKTH ();
2119 return rc;
2123 static unsigned
2124 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2126 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2127 uintptr_t ptr_low = (uintptr_t) ptr;
2128 unsigned count = 0;
2130 TRACE ("%s ptr=%p size=%lu\n",
2131 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2133 switch (__mf_opts.mudflap_mode)
2135 case mode_nop:
2136 case mode_populate:
2137 case mode_violate:
2138 count = 0;
2139 break;
2141 case mode_check:
2143 __mf_object_t **all_ovr_objs;
2144 unsigned obj_count;
2145 unsigned n;
2146 DECLARE (void *, malloc, size_t c);
2147 DECLARE (void, free, void *p);
2149 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2150 VERBOSE_TRACE (" %u:", obj_count);
2152 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2153 if (all_ovr_objs == NULL) abort ();
2154 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2155 assert (n == obj_count);
2157 for (n = 0; n < obj_count; n ++)
2159 __mf_object_t *obj = all_ovr_objs[n];
2161 VERBOSE_TRACE (" [%p]", (void *) obj);
2162 if (obj->watching_p != flag)
2164 obj->watching_p = flag;
2165 count ++;
2167 /* Remove object from cache, to ensure next access
2168 goes through __mf_check(). */
2169 if (flag)
2170 __mf_uncache_object (obj);
2173 CALL_REAL (free, all_ovr_objs);
2175 break;
2178 return count;
2182 void
2183 __mf_sigusr1_handler (int num)
2185 __mf_sigusr1_received ++;
2188 /* Install or remove SIGUSR1 handler as necessary.
2189 Also, respond to a received pending SIGUSR1. */
2190 void
2191 __mf_sigusr1_respond ()
2193 static int handler_installed;
2195 #ifdef SIGUSR1
2196 /* Manage handler */
2197 if (__mf_opts.sigusr1_report && ! handler_installed)
2199 signal (SIGUSR1, __mf_sigusr1_handler);
2200 handler_installed = 1;
2202 else if(! __mf_opts.sigusr1_report && handler_installed)
2204 signal (SIGUSR1, SIG_DFL);
2205 handler_installed = 0;
2207 #endif
2209 /* Manage enqueued signals */
2210 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2212 __mf_sigusr1_handled ++;
2213 assert (__mf_state == reentrant);
2214 __mfu_report ();
2215 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2220 /* XXX: provide an alternative __assert_fail function that cannot
2221 fail due to libmudflap infinite recursion. */
2222 #ifndef NDEBUG
2224 static void
2225 write_itoa (int fd, unsigned n)
2227 enum x { bufsize = sizeof(n)*4 };
2228 char buf [bufsize];
2229 unsigned i;
2231 for (i=0; i<bufsize-1; i++)
2233 unsigned digit = n % 10;
2234 buf[bufsize-2-i] = digit + '0';
2235 n /= 10;
2236 if (n == 0)
2238 char *m = & buf [bufsize-2-i];
2239 buf[bufsize-1] = '\0';
2240 write (fd, m, strlen(m));
2241 break;
2247 void
2248 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2250 #define write2(string) write (2, (string), strlen ((string)));
2251 write2("mf");
2252 #ifdef LIBMUDFLAPTH
2253 write2("(");
2254 write_itoa (2, (unsigned) pthread_self ());
2255 write2(")");
2256 #endif
2257 write2(": assertion failure: `");
2258 write (2, msg, strlen (msg));
2259 write2("' in ");
2260 write (2, func, strlen (func));
2261 write2(" at ");
2262 write (2, file, strlen (file));
2263 write2(":");
2264 write_itoa (2, line);
2265 write2("\n");
2266 #undef write2
2267 abort ();
2271 #endif
2275 /* Adapted splay tree code, originally from libiberty. It has been
2276 specialized for libmudflap as requested by RMS. */
2278 static void
2279 mfsplay_tree_free (void *p)
2281 DECLARE (void, free, void *p);
2282 CALL_REAL (free, p);
2285 static void *
2286 mfsplay_tree_xmalloc (size_t s)
2288 DECLARE (void *, malloc, size_t s);
2289 return CALL_REAL (malloc, s);
2293 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2294 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2295 mfsplay_tree_key,
2296 mfsplay_tree_node *,
2297 mfsplay_tree_node *,
2298 mfsplay_tree_node *);
2301 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2302 and grandparent, respectively, of NODE. */
2304 static mfsplay_tree_node
2305 mfsplay_tree_splay_helper (mfsplay_tree sp,
2306 mfsplay_tree_key key,
2307 mfsplay_tree_node * node,
2308 mfsplay_tree_node * parent,
2309 mfsplay_tree_node * grandparent)
2311 mfsplay_tree_node *next;
2312 mfsplay_tree_node n;
2313 int comparison;
2315 n = *node;
2317 if (!n)
2318 return *parent;
2320 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2322 if (comparison == 0)
2323 /* We've found the target. */
2324 next = 0;
2325 else if (comparison < 0)
2326 /* The target is to the left. */
2327 next = &n->left;
2328 else
2329 /* The target is to the right. */
2330 next = &n->right;
2332 if (next)
2334 /* Check whether our recursion depth is too high. Abort this search,
2335 and signal that a rebalance is required to continue. */
2336 if (sp->depth > sp->max_depth)
2338 sp->rebalance_p = 1;
2339 return n;
2342 /* Continue down the tree. */
2343 sp->depth ++;
2344 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2345 sp->depth --;
2347 /* The recursive call will change the place to which NODE
2348 points. */
2349 if (*node != n || sp->rebalance_p)
2350 return n;
2353 if (!parent)
2354 /* NODE is the root. We are done. */
2355 return n;
2357 /* First, handle the case where there is no grandparent (i.e.,
2358 *PARENT is the root of the tree.) */
2359 if (!grandparent)
2361 if (n == (*parent)->left)
2363 *node = n->right;
2364 n->right = *parent;
2366 else
2368 *node = n->left;
2369 n->left = *parent;
2371 *parent = n;
2372 return n;
2375 /* Next handle the cases where both N and *PARENT are left children,
2376 or where both are right children. */
2377 if (n == (*parent)->left && *parent == (*grandparent)->left)
2379 mfsplay_tree_node p = *parent;
2381 (*grandparent)->left = p->right;
2382 p->right = *grandparent;
2383 p->left = n->right;
2384 n->right = p;
2385 *grandparent = n;
2386 return n;
2388 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2390 mfsplay_tree_node p = *parent;
2392 (*grandparent)->right = p->left;
2393 p->left = *grandparent;
2394 p->right = n->left;
2395 n->left = p;
2396 *grandparent = n;
2397 return n;
2400 /* Finally, deal with the case where N is a left child, but *PARENT
2401 is a right child, or vice versa. */
2402 if (n == (*parent)->left)
2404 (*parent)->left = n->right;
2405 n->right = *parent;
2406 (*grandparent)->right = n->left;
2407 n->left = *grandparent;
2408 *grandparent = n;
2409 return n;
2411 else
2413 (*parent)->right = n->left;
2414 n->left = *parent;
2415 (*grandparent)->left = n->right;
2416 n->right = *grandparent;
2417 *grandparent = n;
2418 return n;
2424 static int
2425 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2427 mfsplay_tree_node **p = array_ptr;
2428 *(*p) = n;
2429 (*p)++;
2430 return 0;
2434 static mfsplay_tree_node
2435 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2436 unsigned high)
2438 unsigned middle = low + (high - low) / 2;
2439 mfsplay_tree_node n = array[middle];
2441 /* Note that since we're producing a balanced binary tree, it is not a problem
2442 that this function is recursive. */
2443 if (low + 1 <= middle)
2444 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2445 else
2446 n->left = NULL;
2448 if (middle + 1 <= high)
2449 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2450 else
2451 n->right = NULL;
2453 return n;
2457 /* Rebalance the entire tree. Do this by copying all the node
2458 pointers into an array, then cleverly re-linking them. */
2459 static void
2460 mfsplay_tree_rebalance (mfsplay_tree sp)
2462 mfsplay_tree_node *all_nodes, *all_nodes_1;
2464 if (sp->num_keys <= 2)
2465 return;
2467 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2469 /* Traverse all nodes to copy their addresses into this array. */
2470 all_nodes_1 = all_nodes;
2471 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2472 (void *) &all_nodes_1);
2474 /* Relink all the nodes. */
2475 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2477 mfsplay_tree_free (all_nodes);
2481 /* Splay SP around KEY. */
2482 static void
2483 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2485 if (sp->root == 0)
2486 return;
2488 /* If we just splayed the tree with the same key, do nothing. */
2489 if (sp->last_splayed_key_p &&
2490 (sp->last_splayed_key == key))
2491 return;
2493 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2494 The idea is to limit excessive stack usage if we're facing
2495 degenerate access patterns. Unfortunately such patterns can occur
2496 e.g. during static initialization, where many static objects might
2497 be registered in increasing address sequence, or during a case where
2498 large tree-like heap data structures are allocated quickly.
2500 On x86, this corresponds to roughly 200K of stack usage.
2501 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2502 sp->max_depth = 2500;
2503 sp->rebalance_p = sp->depth = 0;
2505 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2506 if (sp->rebalance_p)
2508 mfsplay_tree_rebalance (sp);
2510 sp->rebalance_p = sp->depth = 0;
2511 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2513 if (sp->rebalance_p)
2514 abort ();
2518 /* Cache this splay key. */
2519 sp->last_splayed_key = key;
2520 sp->last_splayed_key_p = 1;
2525 /* Allocate a new splay tree. */
2526 static mfsplay_tree
2527 mfsplay_tree_new ()
2529 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2530 sp->root = NULL;
2531 sp->last_splayed_key_p = 0;
2532 sp->num_keys = 0;
2534 return sp;
2539 /* Insert a new node (associating KEY with DATA) into SP. If a
2540 previous node with the indicated KEY exists, its data is replaced
2541 with the new value. Returns the new node. */
2542 static mfsplay_tree_node
2543 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2545 int comparison = 0;
2547 mfsplay_tree_splay (sp, key);
2549 if (sp->root)
2550 comparison = ((sp->root->key > key) ? 1 :
2551 ((sp->root->key < key) ? -1 : 0));
2553 if (sp->root && comparison == 0)
2555 /* If the root of the tree already has the indicated KEY, just
2556 replace the value with VALUE. */
2557 sp->root->value = value;
2559 else
2561 /* Create a new node, and insert it at the root. */
2562 mfsplay_tree_node node;
2564 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2565 node->key = key;
2566 node->value = value;
2567 sp->num_keys++;
2568 if (!sp->root)
2569 node->left = node->right = 0;
2570 else if (comparison < 0)
2572 node->left = sp->root;
2573 node->right = node->left->right;
2574 node->left->right = 0;
2576 else
2578 node->right = sp->root;
2579 node->left = node->right->left;
2580 node->right->left = 0;
2583 sp->root = node;
2584 sp->last_splayed_key_p = 0;
2587 return sp->root;
2590 /* Remove KEY from SP. It is not an error if it did not exist. */
2592 static void
2593 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2595 mfsplay_tree_splay (sp, key);
2596 sp->last_splayed_key_p = 0;
2597 if (sp->root && (sp->root->key == key))
2599 mfsplay_tree_node left, right;
2600 left = sp->root->left;
2601 right = sp->root->right;
2602 /* Delete the root node itself. */
2603 mfsplay_tree_free (sp->root);
2604 sp->num_keys--;
2605 /* One of the children is now the root. Doesn't matter much
2606 which, so long as we preserve the properties of the tree. */
2607 if (left)
2609 sp->root = left;
2610 /* If there was a right child as well, hang it off the
2611 right-most leaf of the left child. */
2612 if (right)
2614 while (left->right)
2615 left = left->right;
2616 left->right = right;
2619 else
2620 sp->root = right;
2624 /* Lookup KEY in SP, returning VALUE if present, and NULL
2625 otherwise. */
2627 static mfsplay_tree_node
2628 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2630 mfsplay_tree_splay (sp, key);
2631 if (sp->root && (sp->root->key == key))
2632 return sp->root;
2633 else
2634 return 0;
2638 /* Return the immediate predecessor KEY, or NULL if there is no
2639 predecessor. KEY need not be present in the tree. */
2641 static mfsplay_tree_node
2642 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2644 int comparison;
2645 mfsplay_tree_node node;
2646 /* If the tree is empty, there is certainly no predecessor. */
2647 if (!sp->root)
2648 return NULL;
2649 /* Splay the tree around KEY. That will leave either the KEY
2650 itself, its predecessor, or its successor at the root. */
2651 mfsplay_tree_splay (sp, key);
2652 comparison = ((sp->root->key > key) ? 1 :
2653 ((sp->root->key < key) ? -1 : 0));
2655 /* If the predecessor is at the root, just return it. */
2656 if (comparison < 0)
2657 return sp->root;
2658 /* Otherwise, find the rightmost element of the left subtree. */
2659 node = sp->root->left;
2660 if (node)
2661 while (node->right)
2662 node = node->right;
2663 return node;
2666 /* Return the immediate successor KEY, or NULL if there is no
2667 successor. KEY need not be present in the tree. */
2669 static mfsplay_tree_node
2670 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2672 int comparison;
2673 mfsplay_tree_node node;
2674 /* If the tree is empty, there is certainly no successor. */
2675 if (!sp->root)
2676 return NULL;
2677 /* Splay the tree around KEY. That will leave either the KEY
2678 itself, its predecessor, or its successor at the root. */
2679 mfsplay_tree_splay (sp, key);
2680 comparison = ((sp->root->key > key) ? 1 :
2681 ((sp->root->key < key) ? -1 : 0));
2682 /* If the successor is at the root, just return it. */
2683 if (comparison > 0)
2684 return sp->root;
2685 /* Otherwise, find the leftmost element of the right subtree. */
2686 node = sp->root->right;
2687 if (node)
2688 while (node->left)
2689 node = node->left;
2690 return node;
2693 /* Call FN, passing it the DATA, for every node in SP, following an
2694 in-order traversal. If FN every returns a non-zero value, the
2695 iteration ceases immediately, and the value is returned.
2696 Otherwise, this function returns 0.
2698 This function simulates recursion using dynamically allocated
2699 arrays, since it may be called from mfsplay_tree_rebalance(), which
2700 in turn means that the tree is already uncomfortably deep for stack
2701 space limits. */
2702 static int
2703 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2705 mfsplay_tree_node *stack1;
2706 char *stack2;
2707 unsigned sp;
2708 int val = 0;
2709 enum s { s_left, s_here, s_right, s_up };
2711 if (st->root == NULL) /* => num_keys == 0 */
2712 return 0;
2714 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2715 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2717 sp = 0;
2718 stack1 [sp] = st->root;
2719 stack2 [sp] = s_left;
2721 while (1)
2723 mfsplay_tree_node n;
2724 enum s s;
2726 n = stack1 [sp];
2727 s = stack2 [sp];
2729 /* Handle each of the four possible states separately. */
2731 /* 1: We're here to traverse the left subtree (if any). */
2732 if (s == s_left)
2734 stack2 [sp] = s_here;
2735 if (n->left != NULL)
2737 sp ++;
2738 stack1 [sp] = n->left;
2739 stack2 [sp] = s_left;
2743 /* 2: We're here to traverse this node. */
2744 else if (s == s_here)
2746 stack2 [sp] = s_right;
2747 val = (*fn) (n, data);
2748 if (val) break;
2751 /* 3: We're here to traverse the right subtree (if any). */
2752 else if (s == s_right)
2754 stack2 [sp] = s_up;
2755 if (n->right != NULL)
2757 sp ++;
2758 stack1 [sp] = n->right;
2759 stack2 [sp] = s_left;
2763 /* 4: We're here after both subtrees (if any) have been traversed. */
2764 else if (s == s_up)
2766 /* Pop the stack. */
2767 if (sp == 0) break; /* Popping off the root note: we're finished! */
2768 sp --;
2771 else
2772 abort ();
2775 mfsplay_tree_free (stack1);
2776 mfsplay_tree_free (stack2);
2777 return val;