* testsuite/libmudflap.c/externs-1.c (main): Add return statement.
[official-gcc.git] / libmudflap / mf-runtime.c
blob317aeaef52183c14e41111257a53878289e39c74
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 02111-1307, USA. */
34 #include "config.h"
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
109 /* Statistics. */
110 unsigned num_keys;
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
149 abort (); } \
150 __mf_state = reentrant; \
151 } while (0)
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
155 } while (0)
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
166 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
167 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
168 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
174 #ifndef LIBMUDFLAPTH
175 enum __mf_state_enum __mf_state = active;
176 #else
177 /* See __mf_state_perthread() in mf-hooks.c. */
178 #endif
181 #ifdef LIBMUDFLAPTH
182 pthread_mutex_t __mf_biglock =
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
185 #else
186 PTHREAD_MUTEX_INITIALIZER;
187 #endif
188 #endif
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
193 #if HAVE_PTHREAD_H
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
196 #else
197 #define pthread_join NULL
198 #endif
199 const void *threads_active_p = (void *) pthread_join;
200 #endif
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
206 static unsigned long __mf_count_check;
207 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
208 static unsigned long __mf_count_register;
209 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
210 static unsigned long __mf_count_unregister;
211 static unsigned long __mf_total_unregister_size;
212 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
213 static unsigned long __mf_sigusr1_received;
214 static unsigned long __mf_sigusr1_handled;
215 /* not static */ unsigned long __mf_reentrancy;
216 #ifdef LIBMUDFLAPTH
217 /* not static */ unsigned long __mf_lock_contention;
218 #endif
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
224 typedef struct __mf_object
226 uintptr_t low, high; /* __mf_register parameters */
227 const char *name;
228 char type; /* __MF_TYPE_something */
229 char watching_p; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count; /* Likewise for __mf_check/write. */
232 unsigned liveness; /* A measure of recent checking activity. */
233 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
235 uintptr_t alloc_pc;
236 struct timeval alloc_time;
237 char **alloc_backtrace;
238 size_t alloc_backtrace_size;
239 #ifdef LIBMUDFLAPTH
240 pthread_t alloc_thread;
241 #endif
243 int deallocated_p;
244 uintptr_t dealloc_pc;
245 struct timeval dealloc_time;
246 char **dealloc_backtrace;
247 size_t dealloc_backtrace_size;
248 #ifdef LIBMUDFLAPTH
249 pthread_t dealloc_thread;
250 #endif
251 } __mf_object_t;
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
258 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
264 void __mf_init () CTOR;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
267 __mf_object_t **objs, unsigned max_objs);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
269 __mf_object_t **objs, unsigned max_objs, int type);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
271 __mf_object_t **objs, unsigned max_objs);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t *obj);
274 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
275 static mfsplay_tree __mf_object_tree (int type);
276 static void __mf_link_object (__mf_object_t *node);
277 static void __mf_unlink_object (__mf_object_t *node);
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
283 static void
284 __mf_set_default_options ()
286 memset (& __mf_opts, 0, sizeof (__mf_opts));
288 __mf_opts.adapt_cache = 1000003;
289 __mf_opts.abbreviate = 1;
290 __mf_opts.verbose_violations = 1;
291 __mf_opts.free_queue_length = 4;
292 __mf_opts.persistent_count = 100;
293 __mf_opts.crumple_zone = 32;
294 __mf_opts.backtrace = 4;
295 __mf_opts.timestamps = 1;
296 __mf_opts.mudflap_mode = mode_check;
297 __mf_opts.violation_mode = viol_nop;
298 __mf_opts.heur_std_data = 1;
299 #ifdef LIBMUDFLAPTH
300 __mf_opts.thread_stack = 0;
301 #endif
304 static struct option
306 char *name;
307 char *description;
308 enum
310 set_option,
311 read_integer_option,
312 } type;
313 unsigned value;
314 unsigned *target;
316 options [] =
318 {"mode-nop",
319 "mudflaps do nothing",
320 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
321 {"mode-populate",
322 "mudflaps populate object tree",
323 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
324 {"mode-check",
325 "mudflaps check for memory violations",
326 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
327 {"mode-violate",
328 "mudflaps always cause violations (diagnostic)",
329 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
331 {"viol-nop",
332 "violations do not change program execution",
333 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
334 {"viol-abort",
335 "violations cause a call to abort()",
336 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
337 {"viol-segv",
338 "violations are promoted to SIGSEGV signals",
339 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
340 {"viol-gdb",
341 "violations fork a gdb process attached to current program",
342 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
343 {"trace-calls",
344 "trace calls to mudflap runtime library",
345 set_option, 1, &__mf_opts.trace_mf_calls},
346 {"verbose-trace",
347 "trace internal events within mudflap runtime library",
348 set_option, 1, &__mf_opts.verbose_trace},
349 {"collect-stats",
350 "collect statistics on mudflap's operation",
351 set_option, 1, &__mf_opts.collect_stats},
352 #ifdef SIGUSR1
353 {"sigusr1-report",
354 "print report upon SIGUSR1",
355 set_option, 1, &__mf_opts.sigusr1_report},
356 #endif
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option, 1, &__mf_opts.internal_checking},
360 {"print-leaks",
361 "print any memory leaks at program shutdown",
362 set_option, 1, &__mf_opts.print_leaks},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option, 1, &__mf_opts.check_initialization},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option, 1, &__mf_opts.verbose_violations},
369 {"abbreviate",
370 "abbreviate repetitive listings",
371 set_option, 1, &__mf_opts.abbreviate},
372 {"timestamps",
373 "track object lifetime timestamps",
374 set_option, 1, &__mf_opts.timestamps},
375 {"ignore-reads",
376 "ignore read accesses - assume okay",
377 set_option, 1, &__mf_opts.ignore_reads},
378 {"wipe-stack",
379 "wipe stack objects at unwind",
380 set_option, 1, &__mf_opts.wipe_stack},
381 {"wipe-heap",
382 "wipe heap objects at free",
383 set_option, 1, &__mf_opts.wipe_heap},
384 {"heur-proc-map",
385 "support /proc/self/map heuristics",
386 set_option, 1, &__mf_opts.heur_proc_map},
387 {"heur-stack-bound",
388 "enable a simple upper stack bound heuristic",
389 set_option, 1, &__mf_opts.heur_stack_bound},
390 {"heur-start-end",
391 "support _start.._end heuristics",
392 set_option, 1, &__mf_opts.heur_start_end},
393 {"heur-stdlib",
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option, 1, &__mf_opts.heur_std_data},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option, 0, &__mf_opts.free_queue_length},
399 {"persistent-count",
400 "keep a history of N unregistered regions",
401 read_integer_option, 0, &__mf_opts.persistent_count},
402 {"crumple-zone",
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option, 0, &__mf_opts.crumple_zone},
405 /* XXX: not type-safe.
406 {"lc-mask",
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
409 {"lc-shift",
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
413 {"lc-adapt",
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option, 1, &__mf_opts.adapt_cache},
416 {"backtrace",
417 "keep an N-level stack trace of each call context",
418 read_integer_option, 0, &__mf_opts.backtrace},
419 #ifdef LIBMUDFLAPTH
420 {"thread-stack",
421 "override thread stacks allocation: N kB",
422 read_integer_option, 0, &__mf_opts.thread_stack},
423 #endif
424 {0, 0, set_option, 0, NULL}
427 static void
428 __mf_usage ()
430 struct option *opt;
432 fprintf (stderr,
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
435 "\n"
436 "The mudflap code can be controlled by an environment variable:\n"
437 "\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
440 "\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
443 "\n",
444 #if HAVE_PTHREAD_H
445 (threads_active_p ? "multi-threaded " : "single-threaded "),
446 #else
448 #endif
449 #if LIBMUDFLAPTH
450 "thread-aware "
451 #else
452 "thread-unaware "
453 #endif
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
457 for (opt = options; opt->name; opt++)
459 int default_p = (opt->value == * opt->target);
461 switch (opt->type)
463 char buf[128];
464 case set_option:
465 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
466 if (default_p)
467 fprintf (stderr, " [active]\n");
468 else
469 fprintf (stderr, "\n");
470 break;
471 case read_integer_option:
472 strncpy (buf, opt->name, 128);
473 strncpy (buf + strlen (opt->name), "=N", 2);
474 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
475 fprintf (stderr, " [%d]\n", * opt->target);
476 break;
477 default: abort();
481 fprintf (stderr, "\n");
486 __mf_set_options (const char *optstr)
488 int rc;
489 LOCKTH ();
490 BEGIN_RECURSION_PROTECT ();
491 rc = __mfu_set_options (optstr);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
496 UNLOCKTH ();
497 return rc;
502 __mfu_set_options (const char *optstr)
504 struct option *opts = 0;
505 char *nxt = 0;
506 long tmp = 0;
507 int rc = 0;
508 const char *saved_optstr = optstr;
510 /* XXX: bounds-check for optstr! */
512 while (*optstr)
514 switch (*optstr) {
515 case ' ':
516 case '\t':
517 case '\n':
518 optstr++;
519 break;
521 case '-':
522 if (*optstr+1)
524 int negate = 0;
525 optstr++;
527 if (*optstr == '?' ||
528 strncmp (optstr, "help", 4) == 0)
530 /* Caller will print help and exit. */
531 return -1;
534 if (strncmp (optstr, "no-", 3) == 0)
536 negate = 1;
537 optstr = & optstr[3];
540 for (opts = options; opts->name; opts++)
542 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
544 optstr += strlen (opts->name);
545 assert (opts->target);
546 switch (opts->type)
548 case set_option:
549 if (negate)
550 *(opts->target) = 0;
551 else
552 *(opts->target) = opts->value;
553 break;
554 case read_integer_option:
555 if (! negate && (*optstr == '=' && *(optstr+1)))
557 optstr++;
558 tmp = strtol (optstr, &nxt, 10);
559 if ((optstr != nxt) && (tmp != LONG_MAX))
561 optstr = nxt;
562 *(opts->target) = (int)tmp;
565 else if (negate)
566 * opts->target = 0;
567 break;
572 break;
574 default:
575 fprintf (stderr,
576 "warning: unrecognized string '%s' in mudflap options\n",
577 optstr);
578 optstr += strlen (optstr);
579 rc = -1;
580 break;
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
586 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
588 /* Clear the lookup cache, in case the parameters got changed. */
589 /* XXX: race */
590 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
591 /* void slot 0 */
592 __mf_lookup_cache[0].low = MAXPTR;
594 TRACE ("set options from `%s'\n", saved_optstr);
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
599 return rc;
603 #ifdef PIC
605 void
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
608 char *err;
610 assert (e);
611 if (e->pointer) return;
613 #if HAVE_DLVSYM
614 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
615 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
616 else
617 #endif
618 e->pointer = dlsym (RTLD_NEXT, e->name);
620 err = dlerror ();
622 if (err)
624 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
625 e->name, err);
626 abort ();
628 if (! e->pointer)
630 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
631 abort ();
636 static void
637 __mf_resolve_dynamics ()
639 int i;
640 for (i = 0; i < dyn_INITRESOLVE; i++)
641 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic [] =
648 {NULL, "calloc", NULL},
649 {NULL, "free", NULL},
650 {NULL, "malloc", NULL},
651 {NULL, "mmap", NULL},
652 {NULL, "munmap", NULL},
653 {NULL, "realloc", NULL},
654 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
655 #ifdef LIBMUDFLAPTH
656 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
657 {NULL, "pthread_join", NULL},
658 {NULL, "pthread_exit", NULL}
659 #endif
662 #endif /* PIC */
666 /* ------------------------------------------------------------------------ */
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
669 static mfsplay_tree
670 __mf_object_tree (int type)
672 static mfsplay_tree trees [__MF_TYPE_MAX+1];
673 assert (type >= 0 && type <= __MF_TYPE_MAX);
674 if (UNLIKELY (trees[type] == NULL))
675 trees[type] = mfsplay_tree_new ();
676 return trees[type];
680 /* not static */void
681 __mf_init ()
683 char *ov = 0;
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p == 0))
687 return;
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
690 #ifdef PIC
691 __mf_resolve_dynamics ();
692 #endif
693 __mf_starting_p = 0;
695 __mf_set_default_options ();
697 ov = getenv ("MUDFLAP_OPTIONS");
698 if (ov)
700 int rc = __mfu_set_options (ov);
701 if (rc < 0)
703 __mf_usage ();
704 exit (1);
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL);
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
714 REG_RESERVED (__mf_lookup_cache);
715 REG_RESERVED (__mf_lc_mask);
716 REG_RESERVED (__mf_lc_shift);
717 /* XXX: others of our statics? */
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
721 __mf_lookup_cache[0].low = (uintptr_t) -1;
727 __wrap_main (int argc, char* argv[])
729 extern char **environ;
730 extern int main ();
731 extern int __real_main ();
732 static int been_here = 0;
734 if (__mf_opts.heur_std_data && ! been_here)
736 unsigned i;
738 been_here = 1;
739 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
740 for (i=0; i<argc; i++)
742 unsigned j = strlen (argv[i]);
743 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
746 for (i=0; ; i++)
748 char *e = environ[i];
749 unsigned j;
750 if (e == NULL) break;
751 j = strlen (environ[i]);
752 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
754 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
756 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
758 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
759 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
760 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
762 /* Make some effort to register ctype.h static arrays. */
763 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
764 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
765 with in mf-hooks2.c. */
768 #ifdef PIC
769 return main (argc, argv, environ);
770 #else
771 return __real_main (argc, argv, environ);
772 #endif
777 extern void __mf_fini () DTOR;
778 void __mf_fini ()
780 TRACE ("__mf_fini\n");
781 __mfu_report ();
783 #ifndef PIC
784 /* Since we didn't populate the tree for allocations in constructors
785 before __mf_init, we cannot check destructors after __mf_fini. */
786 __mf_opts.mudflap_mode = mode_nop;
787 #endif
792 /* ------------------------------------------------------------------------ */
793 /* __mf_check */
795 void __mf_check (void *ptr, size_t sz, int type, const char *location)
797 LOCKTH ();
798 BEGIN_RECURSION_PROTECT ();
799 __mfu_check (ptr, sz, type, location);
800 END_RECURSION_PROTECT ();
801 UNLOCKTH ();
805 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
807 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
808 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
809 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
810 uintptr_t ptr_low = (uintptr_t) ptr;
811 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
812 struct __mf_cache old_entry = *entry;
814 if (UNLIKELY (__mf_opts.sigusr1_report))
815 __mf_sigusr1_respond ();
816 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
817 return;
819 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
820 ptr, entry_idx, (unsigned long)sz,
821 (type == 0 ? "read" : "write"), location);
823 switch (__mf_opts.mudflap_mode)
825 case mode_nop:
826 /* It is tempting to poison the cache here similarly to
827 mode_populate. However that eliminates a valuable
828 distinction between these two modes. mode_nop is useful to
829 let a user count & trace every single check / registration
830 call. mode_populate is useful to let a program run fast
831 while unchecked.
833 judgement = 1;
834 break;
836 case mode_populate:
837 entry->low = ptr_low;
838 entry->high = ptr_high;
839 judgement = 1;
840 break;
842 case mode_check:
844 unsigned heuristics = 0;
846 /* Advance aging/adaptation counters. */
847 static unsigned adapt_count;
848 adapt_count ++;
849 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
850 adapt_count > __mf_opts.adapt_cache))
852 adapt_count = 0;
853 __mf_adapt_cache ();
856 /* Looping only occurs if heuristics were triggered. */
857 while (judgement == 0)
859 DECLARE (void, free, void *p);
860 __mf_object_t* ovr_obj[1];
861 unsigned obj_count;
862 __mf_object_t** all_ovr_obj = NULL;
863 __mf_object_t** dealloc_me = NULL;
864 unsigned i;
866 /* Find all overlapping objects. Be optimistic that there is just one. */
867 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
868 if (UNLIKELY (obj_count > 1))
870 /* Allocate a real buffer and do the search again. */
871 DECLARE (void *, malloc, size_t c);
872 unsigned n;
873 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
874 obj_count));
875 if (all_ovr_obj == NULL) abort ();
876 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
877 assert (n == obj_count);
878 dealloc_me = all_ovr_obj;
880 else
882 all_ovr_obj = ovr_obj;
883 dealloc_me = NULL;
886 /* Update object statistics. */
887 for (i = 0; i < obj_count; i++)
889 __mf_object_t *obj = all_ovr_obj[i];
890 assert (obj != NULL);
891 if (type == __MF_CHECK_READ)
892 obj->read_count ++;
893 else
894 obj->write_count ++;
895 obj->liveness ++;
898 /* Iterate over the various objects. There are a number of special cases. */
899 for (i = 0; i < obj_count; i++)
901 __mf_object_t *obj = all_ovr_obj[i];
903 /* Any __MF_TYPE_NOACCESS hit is bad. */
904 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
905 judgement = -1;
907 /* Any object with a watch flag is bad. */
908 if (UNLIKELY (obj->watching_p))
909 judgement = -2; /* trigger VIOL_WATCH */
911 /* A read from an uninitialized object is bad. */
912 if (UNLIKELY (__mf_opts.check_initialization
913 /* reading */
914 && type == __MF_CHECK_READ
915 /* not written */
916 && obj->write_count == 0
917 /* uninitialized (heap) */
918 && obj->type == __MF_TYPE_HEAP))
919 judgement = -1;
922 /* We now know that the access spans no invalid objects. */
923 if (LIKELY (judgement >= 0))
924 for (i = 0; i < obj_count; i++)
926 __mf_object_t *obj = all_ovr_obj[i];
928 /* Is this access entirely contained within this object? */
929 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
931 /* Valid access. */
932 entry->low = obj->low;
933 entry->high = obj->high;
934 judgement = 1;
938 /* This access runs off the end of one valid object. That
939 could be okay, if other valid objects fill in all the
940 holes. We allow this only for HEAP and GUESS type
941 objects. Accesses to STATIC and STACK variables
942 should not be allowed to span. */
943 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
945 unsigned uncovered = 0;
946 for (i = 0; i < obj_count; i++)
948 __mf_object_t *obj = all_ovr_obj[i];
949 int j, uncovered_low_p, uncovered_high_p;
950 uintptr_t ptr_lower, ptr_higher;
952 uncovered_low_p = ptr_low < obj->low;
953 ptr_lower = CLAMPSUB (obj->low, 1);
954 uncovered_high_p = ptr_high > obj->high;
955 ptr_higher = CLAMPADD (obj->high, 1);
957 for (j = 0; j < obj_count; j++)
959 __mf_object_t *obj2 = all_ovr_obj[j];
961 if (i == j) continue;
963 /* Filter out objects that cannot be spanned across. */
964 if (obj2->type == __MF_TYPE_STACK
965 || obj2->type == __MF_TYPE_STATIC)
966 continue;
968 /* Consider a side "covered" if obj2 includes
969 the next byte on that side. */
970 if (uncovered_low_p
971 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
972 uncovered_low_p = 0;
973 if (uncovered_high_p
974 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
975 uncovered_high_p = 0;
978 if (uncovered_low_p || uncovered_high_p)
979 uncovered ++;
982 /* Success if no overlapping objects are uncovered. */
983 if (uncovered == 0)
984 judgement = 1;
988 if (dealloc_me != NULL)
989 CALL_REAL (free, dealloc_me);
991 /* If the judgment is still unknown at this stage, loop
992 around at most one more time. */
993 if (judgement == 0)
995 if (heuristics++ < 2) /* XXX parametrize this number? */
996 judgement = __mf_heuristic_check (ptr_low, ptr_high);
997 else
998 judgement = -1;
1003 break;
1005 case mode_violate:
1006 judgement = -1;
1007 break;
1010 if (__mf_opts.collect_stats)
1012 __mf_count_check ++;
1014 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1015 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1016 __mf_lookup_cache_reusecount [entry_idx] ++;
1019 if (UNLIKELY (judgement < 0))
1020 __mf_violation (ptr, sz,
1021 (uintptr_t) __builtin_return_address (0), location,
1022 ((judgement == -1) ?
1023 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1024 __MF_VIOL_WATCH));
1028 static __mf_object_t *
1029 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1030 const char *name, uintptr_t pc)
1032 DECLARE (void *, calloc, size_t c, size_t n);
1034 __mf_object_t *new_obj;
1035 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1036 new_obj->low = low;
1037 new_obj->high = high;
1038 new_obj->type = type;
1039 new_obj->name = name;
1040 new_obj->alloc_pc = pc;
1041 #if HAVE_GETTIMEOFDAY
1042 if (__mf_opts.timestamps)
1043 gettimeofday (& new_obj->alloc_time, NULL);
1044 #endif
1045 #if LIBMUDFLAPTH
1046 new_obj->alloc_thread = pthread_self ();
1047 #endif
1049 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1050 new_obj->alloc_backtrace_size =
1051 __mf_backtrace (& new_obj->alloc_backtrace,
1052 (void *) pc, 2);
1054 __mf_link_object (new_obj);
1055 return new_obj;
1059 static void
1060 __mf_uncache_object (__mf_object_t *old_obj)
1062 /* Remove any low/high pointers for this object from the lookup cache. */
1064 /* Can it possibly exist in the cache? */
1065 if (LIKELY (old_obj->read_count + old_obj->write_count))
1067 /* As reported by Herman ten Brugge, we need to scan the entire
1068 cache for entries that may hit this object. */
1069 uintptr_t low = old_obj->low;
1070 uintptr_t high = old_obj->high;
1071 struct __mf_cache *entry = & __mf_lookup_cache [0];
1072 unsigned i;
1073 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1075 /* NB: the "||" in the following test permits this code to
1076 tolerate the situation introduced by __mf_check over
1077 contiguous objects, where a cache entry spans several
1078 objects. */
1079 if (entry->low == low || entry->high == high)
1081 entry->low = MAXPTR;
1082 entry->high = MINPTR;
1089 void
1090 __mf_register (void *ptr, size_t sz, int type, const char *name)
1092 LOCKTH ();
1093 BEGIN_RECURSION_PROTECT ();
1094 __mfu_register (ptr, sz, type, name);
1095 END_RECURSION_PROTECT ();
1096 UNLOCKTH ();
1100 void
1101 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1103 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1104 ptr, (unsigned long) sz, type, name ? name : "");
1106 if (__mf_opts.collect_stats)
1108 __mf_count_register ++;
1109 __mf_total_register_size [(type < 0) ? 0 :
1110 (type > __MF_TYPE_MAX) ? 0 :
1111 type] += sz;
1114 if (UNLIKELY (__mf_opts.sigusr1_report))
1115 __mf_sigusr1_respond ();
1117 switch (__mf_opts.mudflap_mode)
1119 case mode_nop:
1120 break;
1122 case mode_violate:
1123 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1124 __MF_VIOL_REGISTER);
1125 break;
1127 case mode_populate:
1128 /* Clear the cache. */
1129 /* XXX: why the entire cache? */
1130 /* XXX: race */
1131 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1132 /* void slot 0 */
1133 __mf_lookup_cache[0].low = MAXPTR;
1134 break;
1136 case mode_check:
1138 __mf_object_t *ovr_objs [1];
1139 unsigned num_overlapping_objs;
1140 uintptr_t low = (uintptr_t) ptr;
1141 uintptr_t high = CLAMPSZ (ptr, sz);
1142 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1144 /* Treat unknown size indication as 1. */
1145 if (UNLIKELY (sz == 0)) sz = 1;
1147 /* Look for objects only of the same type. This will e.g. permit a registration
1148 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1149 __mf_check time however harmful overlaps will be detected. */
1150 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1152 /* Handle overlaps. */
1153 if (UNLIKELY (num_overlapping_objs > 0))
1155 __mf_object_t *ovr_obj = ovr_objs[0];
1157 /* Accept certain specific duplication pairs. */
1158 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1159 && ovr_obj->low == low
1160 && ovr_obj->high == high
1161 && ovr_obj->type == type)
1163 /* Duplicate registration for static objects may come
1164 from distinct compilation units. */
1165 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1166 (void *) low, (void *) high,
1167 (ovr_obj->name ? ovr_obj->name : ""));
1168 break;
1171 /* Alas, a genuine violation. */
1172 else
1174 /* Two or more *real* mappings here. */
1175 __mf_violation ((void *) ptr, sz,
1176 (uintptr_t) __builtin_return_address (0), NULL,
1177 __MF_VIOL_REGISTER);
1180 else /* No overlapping objects: AOK. */
1181 __mf_insert_new_object (low, high, type, name, pc);
1183 /* We could conceivably call __mf_check() here to prime the cache,
1184 but then the read_count/write_count field is not reliable. */
1185 break;
1187 } /* end switch (__mf_opts.mudflap_mode) */
1191 void
1192 __mf_unregister (void *ptr, size_t sz, int type)
1194 LOCKTH ();
1195 BEGIN_RECURSION_PROTECT ();
1196 __mfu_unregister (ptr, sz, type);
1197 END_RECURSION_PROTECT ();
1198 UNLOCKTH ();
1202 void
1203 __mfu_unregister (void *ptr, size_t sz, int type)
1205 DECLARE (void, free, void *ptr);
1207 if (UNLIKELY (__mf_opts.sigusr1_report))
1208 __mf_sigusr1_respond ();
1210 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1212 switch (__mf_opts.mudflap_mode)
1214 case mode_nop:
1215 break;
1217 case mode_violate:
1218 __mf_violation (ptr, sz,
1219 (uintptr_t) __builtin_return_address (0), NULL,
1220 __MF_VIOL_UNREGISTER);
1221 break;
1223 case mode_populate:
1224 /* Clear the cache. */
1225 /* XXX: race */
1226 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1227 /* void slot 0 */
1228 __mf_lookup_cache[0].low = MAXPTR;
1229 break;
1231 case mode_check:
1233 __mf_object_t *old_obj = NULL;
1234 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1235 __mf_object_t *objs[1] = {NULL};
1236 unsigned num_overlapping_objs;
1238 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1239 CLAMPSZ (ptr, sz), objs, 1, type);
1241 /* Special case for HEAP_I - see free & realloc hook. They don't
1242 know whether the input region was HEAP or HEAP_I before
1243 unmapping it. Here we give HEAP a try in case HEAP_I
1244 failed. */
1245 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1247 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1248 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1251 old_obj = objs[0];
1252 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1253 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1254 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1256 __mf_violation (ptr, sz,
1257 (uintptr_t) __builtin_return_address (0), NULL,
1258 __MF_VIOL_UNREGISTER);
1259 break;
1262 __mf_unlink_object (old_obj);
1263 __mf_uncache_object (old_obj);
1265 /* Wipe buffer contents if desired. */
1266 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1267 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1268 || old_obj->type == __MF_TYPE_HEAP_I)))
1270 memset ((void *) old_obj->low,
1272 (size_t) (old_obj->high - old_obj->low + 1));
1275 /* Manage the object cemetary. */
1276 if (__mf_opts.persistent_count > 0
1277 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1279 old_obj->deallocated_p = 1;
1280 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1281 #if HAVE_GETTIMEOFDAY
1282 if (__mf_opts.timestamps)
1283 gettimeofday (& old_obj->dealloc_time, NULL);
1284 #endif
1285 #ifdef LIBMUDFLAPTH
1286 old_obj->dealloc_thread = pthread_self ();
1287 #endif
1289 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1290 old_obj->dealloc_backtrace_size =
1291 __mf_backtrace (& old_obj->dealloc_backtrace,
1292 NULL, 2);
1294 /* Encourage this object to be displayed again in current epoch. */
1295 old_obj->description_epoch --;
1297 /* Put this object into the cemetary. This may require this plot to
1298 be recycled, and the previous resident to be designated del_obj. */
1300 unsigned row = old_obj->type;
1301 unsigned plot = __mf_object_dead_head [row];
1303 del_obj = __mf_object_cemetary [row][plot];
1304 __mf_object_cemetary [row][plot] = old_obj;
1305 plot ++;
1306 if (plot == __mf_opts.persistent_count) plot = 0;
1307 __mf_object_dead_head [row] = plot;
1310 else
1311 del_obj = old_obj;
1313 if (__mf_opts.print_leaks)
1315 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1316 (old_obj->type == __MF_TYPE_HEAP
1317 || old_obj->type == __MF_TYPE_HEAP_I))
1319 fprintf (stderr,
1320 "*******\n"
1321 "mudflap warning: unaccessed registered object:\n");
1322 __mf_describe_object (old_obj);
1326 if (del_obj != NULL) /* May or may not equal old_obj. */
1328 if (__mf_opts.backtrace > 0)
1330 CALL_REAL(free, del_obj->alloc_backtrace);
1331 if (__mf_opts.persistent_count > 0)
1333 CALL_REAL(free, del_obj->dealloc_backtrace);
1336 CALL_REAL(free, del_obj);
1339 break;
1341 } /* end switch (__mf_opts.mudflap_mode) */
1344 if (__mf_opts.collect_stats)
1346 __mf_count_unregister ++;
1347 __mf_total_unregister_size += sz;
1353 struct tree_stats
1355 unsigned obj_count;
1356 unsigned long total_size;
1357 unsigned live_obj_count;
1358 double total_weight;
1359 double weighted_size;
1360 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1365 static int
1366 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1368 __mf_object_t *obj = (__mf_object_t *) n->value;
1369 struct tree_stats *s = (struct tree_stats *) param;
1371 assert (obj != NULL && s != NULL);
1373 /* Exclude never-accessed objects. */
1374 if (obj->read_count + obj->write_count)
1376 s->obj_count ++;
1377 s->total_size += (obj->high - obj->low + 1);
1379 if (obj->liveness)
1381 unsigned i;
1382 uintptr_t addr;
1384 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1385 (void *) obj->low, obj->liveness, obj->name); */
1387 s->live_obj_count ++;
1388 s->total_weight += (double) obj->liveness;
1389 s->weighted_size +=
1390 (double) (obj->high - obj->low + 1) *
1391 (double) obj->liveness;
1393 addr = obj->low;
1394 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1396 unsigned bit = addr & 1;
1397 s->weighted_address_bits[i][bit] += obj->liveness;
1398 addr = addr >> 1;
1401 /* Age the liveness value. */
1402 obj->liveness >>= 1;
1406 return 0;
1410 static void
1411 __mf_adapt_cache ()
1413 struct tree_stats s;
1414 uintptr_t new_mask = 0;
1415 unsigned char new_shift;
1416 float cache_utilization;
1417 float max_value;
1418 static float smoothed_new_shift = -1.0;
1419 unsigned i;
1421 memset (&s, 0, sizeof (s));
1423 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1424 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1425 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1426 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1427 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1429 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1430 empty tree. Just leave the cache alone in such cases, rather
1431 than risk dying by division-by-zero. */
1432 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1433 return;
1435 /* Guess a good value for the shift parameter by finding an address bit that is a
1436 good discriminant of lively objects. */
1437 max_value = 0.0;
1438 for (i=0; i<sizeof (uintptr_t)*8; i++)
1440 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1441 if (max_value < value) max_value = value;
1443 for (i=0; i<sizeof (uintptr_t)*8; i++)
1445 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1446 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1447 if (value >= max_value * shoulder_factor)
1448 break;
1450 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1451 /* Converge toward this slowly to reduce flapping. */
1452 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1453 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1454 assert (new_shift < sizeof (uintptr_t)*8);
1456 /* Count number of used buckets. */
1457 cache_utilization = 0.0;
1458 for (i = 0; i < (1 + __mf_lc_mask); i++)
1459 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1460 cache_utilization += 1.0;
1461 cache_utilization /= (1 + __mf_lc_mask);
1463 new_mask |= 0xffff; /* XXX: force a large cache. */
1464 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1466 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1467 "util=%u%% m=%p s=%u\n",
1468 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1469 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1471 /* We should reinitialize cache if its parameters have changed. */
1472 if (new_mask != __mf_lc_mask ||
1473 new_shift != __mf_lc_shift)
1475 __mf_lc_mask = new_mask;
1476 __mf_lc_shift = new_shift;
1477 /* XXX: race */
1478 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1479 /* void slot 0 */
1480 __mf_lookup_cache[0].low = MAXPTR;
1486 /* __mf_find_object[s] */
1488 /* Find overlapping live objecs between [low,high]. Return up to
1489 max_objs of their pointers in objs[]. Return total count of
1490 overlaps (may exceed max_objs). */
1492 unsigned
1493 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1494 __mf_object_t **objs, unsigned max_objs, int type)
1496 unsigned count = 0;
1497 mfsplay_tree t = __mf_object_tree (type);
1498 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1499 int direction;
1501 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1502 /* An exact match for base address implies a hit. */
1503 if (n != NULL)
1505 if (count < max_objs)
1506 objs[count] = (__mf_object_t *) n->value;
1507 count ++;
1510 /* Iterate left then right near this key value to find all overlapping objects. */
1511 for (direction = 0; direction < 2; direction ++)
1513 /* Reset search origin. */
1514 k = (mfsplay_tree_key) ptr_low;
1516 while (1)
1518 __mf_object_t *obj;
1520 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1521 if (n == NULL) break;
1522 obj = (__mf_object_t *) n->value;
1524 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1525 break;
1527 if (count < max_objs)
1528 objs[count] = (__mf_object_t *) n->value;
1529 count ++;
1531 k = (mfsplay_tree_key) obj->low;
1535 return count;
1539 unsigned
1540 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1541 __mf_object_t **objs, unsigned max_objs)
1543 int type;
1544 unsigned count = 0;
1546 /* Search each splay tree for overlaps. */
1547 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1549 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1550 if (c > max_objs)
1552 max_objs = 0;
1553 objs = NULL;
1555 else /* NB: C may equal 0 */
1557 max_objs -= c;
1558 objs += c;
1560 count += c;
1563 return count;
1568 /* __mf_link_object */
1570 static void
1571 __mf_link_object (__mf_object_t *node)
1573 mfsplay_tree t = __mf_object_tree (node->type);
1574 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1577 /* __mf_unlink_object */
1579 static void
1580 __mf_unlink_object (__mf_object_t *node)
1582 mfsplay_tree t = __mf_object_tree (node->type);
1583 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1586 /* __mf_find_dead_objects */
1588 /* Find overlapping dead objecs between [low,high]. Return up to
1589 max_objs of their pointers in objs[]. Return total count of
1590 overlaps (may exceed max_objs). */
1592 static unsigned
1593 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1594 __mf_object_t **objs, unsigned max_objs)
1596 if (__mf_opts.persistent_count > 0)
1598 unsigned count = 0;
1599 unsigned recollection = 0;
1600 unsigned row = 0;
1602 assert (low <= high);
1603 assert (max_objs == 0 || objs != NULL);
1605 /* Widen the search from the most recent plots in each row, looking
1606 backward in time. */
1607 recollection = 0;
1608 while (recollection < __mf_opts.persistent_count)
1610 count = 0;
1612 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1614 unsigned plot;
1615 unsigned i;
1617 plot = __mf_object_dead_head [row];
1618 for (i = 0; i <= recollection; i ++)
1620 __mf_object_t *obj;
1622 /* Look backward through row: it's a circular buffer. */
1623 if (plot > 0) plot --;
1624 else plot = __mf_opts.persistent_count - 1;
1626 obj = __mf_object_cemetary [row][plot];
1627 if (obj && obj->low <= high && obj->high >= low)
1629 /* Found an overlapping dead object! */
1630 if (count < max_objs)
1631 objs [count] = obj;
1632 count ++;
1637 if (count)
1638 break;
1640 /* Look farther back in time. */
1641 recollection = (recollection * 2) + 1;
1644 return count;
1645 } else {
1646 return 0;
1650 /* __mf_describe_object */
1652 static void
1653 __mf_describe_object (__mf_object_t *obj)
1655 static unsigned epoch = 0;
1656 if (obj == NULL)
1658 epoch ++;
1659 return;
1662 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1664 fprintf (stderr,
1665 "mudflap %sobject %p: name=`%s'\n",
1666 (obj->deallocated_p ? "dead " : ""),
1667 (void *) obj, (obj->name ? obj->name : ""));
1668 return;
1670 else
1671 obj->description_epoch = epoch;
1673 fprintf (stderr,
1674 "mudflap %sobject %p: name=`%s'\n"
1675 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1676 "alloc time=%lu.%06lu pc=%p"
1677 #ifdef LIBMUDFLAPTH
1678 " thread=%u"
1679 #endif
1680 "\n",
1681 (obj->deallocated_p ? "dead " : ""),
1682 (void *) obj, (obj->name ? obj->name : ""),
1683 (void *) obj->low, (void *) obj->high,
1684 (unsigned long) (obj->high - obj->low + 1),
1685 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1686 obj->type == __MF_TYPE_HEAP ? "heap" :
1687 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1688 obj->type == __MF_TYPE_STACK ? "stack" :
1689 obj->type == __MF_TYPE_STATIC ? "static" :
1690 obj->type == __MF_TYPE_GUESS ? "guess" :
1691 "unknown"),
1692 obj->read_count, obj->write_count, obj->liveness,
1693 obj->watching_p ? " watching" : "",
1694 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1695 (void *) obj->alloc_pc
1696 #ifdef LIBMUDFLAPTH
1697 , (unsigned) obj->alloc_thread
1698 #endif
1701 if (__mf_opts.backtrace > 0)
1703 unsigned i;
1704 for (i=0; i<obj->alloc_backtrace_size; i++)
1705 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1708 if (__mf_opts.persistent_count > 0)
1710 if (obj->deallocated_p)
1712 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1713 #ifdef LIBMUDFLAPTH
1714 " thread=%u"
1715 #endif
1716 "\n",
1717 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1718 (void *) obj->dealloc_pc
1719 #ifdef LIBMUDFLAPTH
1720 , (unsigned) obj->dealloc_thread
1721 #endif
1725 if (__mf_opts.backtrace > 0)
1727 unsigned i;
1728 for (i=0; i<obj->dealloc_backtrace_size; i++)
1729 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1736 static int
1737 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1739 __mf_object_t *node = (__mf_object_t *) n->value;
1740 unsigned *count = (unsigned *) param;
1742 if (count != NULL)
1743 (*count) ++;
1745 fprintf (stderr, "Leaked object %u:\n", (*count));
1746 __mf_describe_object (node);
1748 return 0;
1752 static unsigned
1753 __mf_report_leaks ()
1755 unsigned count = 0;
1757 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1758 __mf_report_leaks_fn, & count);
1759 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1760 __mf_report_leaks_fn, & count);
1762 return count;
1765 /* ------------------------------------------------------------------------ */
1766 /* __mf_report */
1768 void
1769 __mf_report ()
1771 LOCKTH ();
1772 BEGIN_RECURSION_PROTECT ();
1773 __mfu_report ();
1774 END_RECURSION_PROTECT ();
1775 UNLOCKTH ();
1778 void
1779 __mfu_report ()
1781 if (__mf_opts.collect_stats)
1783 fprintf (stderr,
1784 "*******\n"
1785 "mudflap stats:\n"
1786 "calls to __mf_check: %lu\n"
1787 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1788 " __mf_unregister: %lu [%luB]\n"
1789 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1790 __mf_count_check,
1791 __mf_count_register,
1792 __mf_total_register_size[0], __mf_total_register_size[1],
1793 __mf_total_register_size[2], __mf_total_register_size[3],
1794 __mf_total_register_size[4], /* XXX */
1795 __mf_count_unregister, __mf_total_unregister_size,
1796 __mf_count_violation[0], __mf_count_violation[1],
1797 __mf_count_violation[2], __mf_count_violation[3],
1798 __mf_count_violation[4]);
1800 fprintf (stderr,
1801 "calls with reentrancy: %lu\n", __mf_reentrancy);
1802 #ifdef LIBMUDFLAPTH
1803 fprintf (stderr,
1804 " lock contention: %lu\n", __mf_lock_contention);
1805 #endif
1807 /* Lookup cache stats. */
1809 unsigned i;
1810 unsigned max_reuse = 0;
1811 unsigned num_used = 0;
1812 unsigned num_unused = 0;
1814 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1816 if (__mf_lookup_cache_reusecount[i])
1817 num_used ++;
1818 else
1819 num_unused ++;
1820 if (max_reuse < __mf_lookup_cache_reusecount[i])
1821 max_reuse = __mf_lookup_cache_reusecount[i];
1823 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1824 num_used, num_unused, max_reuse);
1828 unsigned live_count;
1829 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1830 fprintf (stderr, "number of live objects: %u\n", live_count);
1833 if (__mf_opts.persistent_count > 0)
1835 unsigned dead_count = 0;
1836 unsigned row, plot;
1837 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1838 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1839 if (__mf_object_cemetary [row][plot] != 0)
1840 dead_count ++;
1841 fprintf (stderr, " zombie objects: %u\n", dead_count);
1844 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1846 unsigned l;
1847 extern void * __mf_wrap_alloca_indirect (size_t c);
1849 /* Free up any remaining alloca()'d blocks. */
1850 __mf_wrap_alloca_indirect (0);
1851 __mf_describe_object (NULL); /* Reset description epoch. */
1852 l = __mf_report_leaks ();
1853 fprintf (stderr, "number of leaked objects: %u\n", l);
1857 /* __mf_backtrace */
1859 size_t
1860 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1862 void ** pc_array;
1863 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1864 unsigned remaining_size;
1865 unsigned omitted_size = 0;
1866 unsigned i;
1867 DECLARE (void, free, void *ptr);
1868 DECLARE (void *, calloc, size_t c, size_t n);
1869 DECLARE (void *, malloc, size_t n);
1871 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1872 #ifdef HAVE_BACKTRACE
1873 pc_array_size = backtrace (pc_array, pc_array_size);
1874 #else
1875 #define FETCH(n) do { if (pc_array_size >= n) { \
1876 pc_array[n] = __builtin_return_address(n); \
1877 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1879 /* Unroll some calls __builtin_return_address because this function
1880 only takes a literal integer parameter. */
1881 FETCH (0);
1882 #if 0
1883 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1884 rather than simply returning 0. :-( */
1885 FETCH (1);
1886 FETCH (2);
1887 FETCH (3);
1888 FETCH (4);
1889 FETCH (5);
1890 FETCH (6);
1891 FETCH (7);
1892 FETCH (8);
1893 if (pc_array_size > 8) pc_array_size = 9;
1894 #else
1895 if (pc_array_size > 0) pc_array_size = 1;
1896 #endif
1898 #undef FETCH
1899 #endif
1901 /* We want to trim the first few levels of the stack traceback,
1902 since they contain libmudflap wrappers and junk. If pc_array[]
1903 ends up containing a non-NULL guess_pc, then trim everything
1904 before that. Otherwise, omit the first guess_omit_levels
1905 entries. */
1907 if (guess_pc != NULL)
1908 for (i=0; i<pc_array_size; i++)
1909 if (pc_array [i] == guess_pc)
1910 omitted_size = i;
1912 if (omitted_size == 0) /* No match? */
1913 if (pc_array_size > guess_omit_levels)
1914 omitted_size = guess_omit_levels;
1916 remaining_size = pc_array_size - omitted_size;
1918 #ifdef HAVE_BACKTRACE_SYMBOLS
1919 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1920 #else
1922 /* Let's construct a buffer by hand. It will have <remaining_size>
1923 char*'s at the front, pointing at individual strings immediately
1924 afterwards. */
1925 void *buffer;
1926 char *chars;
1927 char **pointers;
1928 enum { perline = 30 };
1929 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1930 pointers = (char **) buffer;
1931 chars = (char *)buffer + (remaining_size * sizeof (char *));
1932 for (i = 0; i < remaining_size; i++)
1934 pointers[i] = chars;
1935 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1936 chars = chars + perline;
1938 *symbols = pointers;
1940 #endif
1941 CALL_REAL (free, pc_array);
1943 return remaining_size;
1946 /* ------------------------------------------------------------------------ */
1947 /* __mf_violation */
1949 void
1950 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1951 const char *location, int type)
1953 char buf [128];
1954 static unsigned violation_number;
1955 DECLARE(void, free, void *ptr);
1957 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1958 (void *) pc,
1959 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1961 if (__mf_opts.collect_stats)
1962 __mf_count_violation [(type < 0) ? 0 :
1963 (type > __MF_VIOL_WATCH) ? 0 :
1964 type] ++;
1966 /* Print out a basic warning message. */
1967 if (__mf_opts.verbose_violations)
1969 unsigned dead_p;
1970 unsigned num_helpful = 0;
1971 struct timeval now = { 0, 0 };
1972 #if HAVE_GETTIMEOFDAY
1973 gettimeofday (& now, NULL);
1974 #endif
1976 violation_number ++;
1977 fprintf (stderr,
1978 "*******\n"
1979 "mudflap violation %u (%s): time=%lu.%06lu "
1980 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1981 violation_number,
1982 ((type == __MF_VIOL_READ) ? "check/read" :
1983 (type == __MF_VIOL_WRITE) ? "check/write" :
1984 (type == __MF_VIOL_REGISTER) ? "register" :
1985 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1986 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1987 now.tv_sec, now.tv_usec,
1988 (void *) ptr, (unsigned long)sz, (void *) pc,
1989 (location != NULL ? " location=`" : ""),
1990 (location != NULL ? location : ""),
1991 (location != NULL ? "'" : ""));
1993 if (__mf_opts.backtrace > 0)
1995 char ** symbols;
1996 unsigned i, num;
1998 num = __mf_backtrace (& symbols, (void *) pc, 2);
1999 /* Note: backtrace_symbols calls malloc(). But since we're in
2000 __mf_violation and presumably __mf_check, it'll detect
2001 recursion, and not put the new string into the database. */
2003 for (i=0; i<num; i++)
2004 fprintf (stderr, " %s\n", symbols[i]);
2006 /* Calling free() here would trigger a violation. */
2007 CALL_REAL(free, symbols);
2011 /* Look for nearby objects. For this, we start with s_low/s_high
2012 pointing to the given area, looking for overlapping objects.
2013 If none show up, widen the search area and keep looking. */
2015 if (sz == 0) sz = 1;
2017 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2019 enum {max_objs = 3}; /* magic */
2020 __mf_object_t *objs[max_objs];
2021 unsigned num_objs = 0;
2022 uintptr_t s_low, s_high;
2023 unsigned tries = 0;
2024 unsigned i;
2026 s_low = (uintptr_t) ptr;
2027 s_high = CLAMPSZ (ptr, sz);
2029 while (tries < 16) /* magic */
2031 if (dead_p)
2032 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2033 else
2034 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2036 if (num_objs) /* good enough */
2037 break;
2039 tries ++;
2041 /* XXX: tune this search strategy. It's too dependent on
2042 sz, which can vary from 1 to very big (when array index
2043 checking) numbers. */
2044 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2045 s_high = CLAMPADD (s_high, (sz * tries * tries));
2048 for (i = 0; i < min (num_objs, max_objs); i++)
2050 __mf_object_t *obj = objs[i];
2051 uintptr_t low = (uintptr_t) ptr;
2052 uintptr_t high = CLAMPSZ (ptr, sz);
2053 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2054 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2055 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2056 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2057 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2058 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2060 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2061 num_helpful + i + 1,
2062 (before1 ? before1 : after1 ? after1 : into1),
2063 (before1 ? "before" : after1 ? "after" : "into"),
2064 (before2 ? before2 : after2 ? after2 : into2),
2065 (before2 ? "before" : after2 ? "after" : "into"));
2066 __mf_describe_object (obj);
2068 num_helpful += num_objs;
2071 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2074 /* How to finally handle this violation? */
2075 switch (__mf_opts.violation_mode)
2077 case viol_nop:
2078 break;
2079 case viol_segv:
2080 kill (getpid(), SIGSEGV);
2081 break;
2082 case viol_abort:
2083 abort ();
2084 break;
2085 case viol_gdb:
2087 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2088 system (buf);
2089 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2090 instead, and let the forked child execlp() gdb. That way, this
2091 subject process can be resumed under the supervision of gdb.
2092 This can't happen now, since system() only returns when gdb
2093 dies. In that case, we need to beware of starting a second
2094 concurrent gdb child upon the next violation. (But if the first
2095 gdb dies, then starting a new one is appropriate.) */
2096 break;
2100 /* ------------------------------------------------------------------------ */
2103 unsigned __mf_watch (void *ptr, size_t sz)
2105 unsigned rc;
2106 LOCKTH ();
2107 BEGIN_RECURSION_PROTECT ();
2108 rc = __mf_watch_or_not (ptr, sz, 1);
2109 END_RECURSION_PROTECT ();
2110 UNLOCKTH ();
2111 return rc;
2114 unsigned __mf_unwatch (void *ptr, size_t sz)
2116 unsigned rc;
2117 LOCKTH ();
2118 rc = __mf_watch_or_not (ptr, sz, 0);
2119 UNLOCKTH ();
2120 return rc;
2124 static unsigned
2125 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2127 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2128 uintptr_t ptr_low = (uintptr_t) ptr;
2129 unsigned count = 0;
2131 TRACE ("%s ptr=%p size=%lu\n",
2132 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2134 switch (__mf_opts.mudflap_mode)
2136 case mode_nop:
2137 case mode_populate:
2138 case mode_violate:
2139 count = 0;
2140 break;
2142 case mode_check:
2144 __mf_object_t **all_ovr_objs;
2145 unsigned obj_count;
2146 unsigned n;
2147 DECLARE (void *, malloc, size_t c);
2148 DECLARE (void, free, void *p);
2150 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2151 VERBOSE_TRACE (" %u:", obj_count);
2153 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2154 if (all_ovr_objs == NULL) abort ();
2155 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2156 assert (n == obj_count);
2158 for (n = 0; n < obj_count; n ++)
2160 __mf_object_t *obj = all_ovr_objs[n];
2162 VERBOSE_TRACE (" [%p]", (void *) obj);
2163 if (obj->watching_p != flag)
2165 obj->watching_p = flag;
2166 count ++;
2168 /* Remove object from cache, to ensure next access
2169 goes through __mf_check(). */
2170 if (flag)
2171 __mf_uncache_object (obj);
2174 CALL_REAL (free, all_ovr_objs);
2176 break;
2179 return count;
2183 void
2184 __mf_sigusr1_handler (int num)
2186 __mf_sigusr1_received ++;
2189 /* Install or remove SIGUSR1 handler as necessary.
2190 Also, respond to a received pending SIGUSR1. */
2191 void
2192 __mf_sigusr1_respond ()
2194 static int handler_installed;
2196 #ifdef SIGUSR1
2197 /* Manage handler */
2198 if (__mf_opts.sigusr1_report && ! handler_installed)
2200 signal (SIGUSR1, __mf_sigusr1_handler);
2201 handler_installed = 1;
2203 else if(! __mf_opts.sigusr1_report && handler_installed)
2205 signal (SIGUSR1, SIG_DFL);
2206 handler_installed = 0;
2208 #endif
2210 /* Manage enqueued signals */
2211 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2213 __mf_sigusr1_handled ++;
2214 assert (__mf_state == reentrant);
2215 __mfu_report ();
2216 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2221 /* XXX: provide an alternative __assert_fail function that cannot
2222 fail due to libmudflap infinite recursion. */
2223 #ifndef NDEBUG
2225 static void
2226 write_itoa (int fd, unsigned n)
2228 enum x { bufsize = sizeof(n)*4 };
2229 char buf [bufsize];
2230 unsigned i;
2232 for (i=0; i<bufsize-1; i++)
2234 unsigned digit = n % 10;
2235 buf[bufsize-2-i] = digit + '0';
2236 n /= 10;
2237 if (n == 0)
2239 char *m = & buf [bufsize-2-i];
2240 buf[bufsize-1] = '\0';
2241 write (fd, m, strlen(m));
2242 break;
2248 void
2249 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2251 #define write2(string) write (2, (string), strlen ((string)));
2252 write2("mf");
2253 #ifdef LIBMUDFLAPTH
2254 write2("(");
2255 write_itoa (2, (unsigned) pthread_self ());
2256 write2(")");
2257 #endif
2258 write2(": assertion failure: `");
2259 write (2, msg, strlen (msg));
2260 write2("' in ");
2261 write (2, func, strlen (func));
2262 write2(" at ");
2263 write (2, file, strlen (file));
2264 write2(":");
2265 write_itoa (2, line);
2266 write2("\n");
2267 #undef write2
2268 abort ();
2272 #endif
2276 /* Adapted splay tree code, originally from libiberty. It has been
2277 specialized for libmudflap as requested by RMS. */
2279 static void
2280 mfsplay_tree_free (void *p)
2282 DECLARE (void, free, void *p);
2283 CALL_REAL (free, p);
2286 static void *
2287 mfsplay_tree_xmalloc (size_t s)
2289 DECLARE (void *, malloc, size_t s);
2290 return CALL_REAL (malloc, s);
2294 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2295 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2296 mfsplay_tree_key,
2297 mfsplay_tree_node *,
2298 mfsplay_tree_node *,
2299 mfsplay_tree_node *);
2302 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2303 and grandparent, respectively, of NODE. */
2305 static mfsplay_tree_node
2306 mfsplay_tree_splay_helper (mfsplay_tree sp,
2307 mfsplay_tree_key key,
2308 mfsplay_tree_node * node,
2309 mfsplay_tree_node * parent,
2310 mfsplay_tree_node * grandparent)
2312 mfsplay_tree_node *next;
2313 mfsplay_tree_node n;
2314 int comparison;
2316 n = *node;
2318 if (!n)
2319 return *parent;
2321 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2323 if (comparison == 0)
2324 /* We've found the target. */
2325 next = 0;
2326 else if (comparison < 0)
2327 /* The target is to the left. */
2328 next = &n->left;
2329 else
2330 /* The target is to the right. */
2331 next = &n->right;
2333 if (next)
2335 /* Check whether our recursion depth is too high. Abort this search,
2336 and signal that a rebalance is required to continue. */
2337 if (sp->depth > sp->max_depth)
2339 sp->rebalance_p = 1;
2340 return n;
2343 /* Continue down the tree. */
2344 sp->depth ++;
2345 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2346 sp->depth --;
2348 /* The recursive call will change the place to which NODE
2349 points. */
2350 if (*node != n || sp->rebalance_p)
2351 return n;
2354 if (!parent)
2355 /* NODE is the root. We are done. */
2356 return n;
2358 /* First, handle the case where there is no grandparent (i.e.,
2359 *PARENT is the root of the tree.) */
2360 if (!grandparent)
2362 if (n == (*parent)->left)
2364 *node = n->right;
2365 n->right = *parent;
2367 else
2369 *node = n->left;
2370 n->left = *parent;
2372 *parent = n;
2373 return n;
2376 /* Next handle the cases where both N and *PARENT are left children,
2377 or where both are right children. */
2378 if (n == (*parent)->left && *parent == (*grandparent)->left)
2380 mfsplay_tree_node p = *parent;
2382 (*grandparent)->left = p->right;
2383 p->right = *grandparent;
2384 p->left = n->right;
2385 n->right = p;
2386 *grandparent = n;
2387 return n;
2389 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2391 mfsplay_tree_node p = *parent;
2393 (*grandparent)->right = p->left;
2394 p->left = *grandparent;
2395 p->right = n->left;
2396 n->left = p;
2397 *grandparent = n;
2398 return n;
2401 /* Finally, deal with the case where N is a left child, but *PARENT
2402 is a right child, or vice versa. */
2403 if (n == (*parent)->left)
2405 (*parent)->left = n->right;
2406 n->right = *parent;
2407 (*grandparent)->right = n->left;
2408 n->left = *grandparent;
2409 *grandparent = n;
2410 return n;
2412 else
2414 (*parent)->right = n->left;
2415 n->left = *parent;
2416 (*grandparent)->left = n->right;
2417 n->right = *grandparent;
2418 *grandparent = n;
2419 return n;
2425 static int
2426 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2428 mfsplay_tree_node **p = array_ptr;
2429 *(*p) = n;
2430 (*p)++;
2431 return 0;
2435 static mfsplay_tree_node
2436 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2437 unsigned high)
2439 unsigned middle = low + (high - low) / 2;
2440 mfsplay_tree_node n = array[middle];
2442 /* Note that since we're producing a balanced binary tree, it is not a problem
2443 that this function is recursive. */
2444 if (low + 1 <= middle)
2445 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2446 else
2447 n->left = NULL;
2449 if (middle + 1 <= high)
2450 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2451 else
2452 n->right = NULL;
2454 return n;
2458 /* Rebalance the entire tree. Do this by copying all the node
2459 pointers into an array, then cleverly re-linking them. */
2460 static void
2461 mfsplay_tree_rebalance (mfsplay_tree sp)
2463 mfsplay_tree_node *all_nodes, *all_nodes_1;
2465 if (sp->num_keys <= 2)
2466 return;
2468 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2470 /* Traverse all nodes to copy their addresses into this array. */
2471 all_nodes_1 = all_nodes;
2472 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2473 (void *) &all_nodes_1);
2475 /* Relink all the nodes. */
2476 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2478 mfsplay_tree_free (all_nodes);
2482 /* Splay SP around KEY. */
2483 static void
2484 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2486 if (sp->root == 0)
2487 return;
2489 /* If we just splayed the tree with the same key, do nothing. */
2490 if (sp->last_splayed_key_p &&
2491 (sp->last_splayed_key == key))
2492 return;
2494 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2495 The idea is to limit excessive stack usage if we're facing
2496 degenerate access patterns. Unfortunately such patterns can occur
2497 e.g. during static initialization, where many static objects might
2498 be registered in increasing address sequence, or during a case where
2499 large tree-like heap data structures are allocated quickly.
2501 On x86, this corresponds to roughly 200K of stack usage.
2502 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2503 sp->max_depth = 2500;
2504 sp->rebalance_p = sp->depth = 0;
2506 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2507 if (sp->rebalance_p)
2509 mfsplay_tree_rebalance (sp);
2511 sp->rebalance_p = sp->depth = 0;
2512 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2514 if (sp->rebalance_p)
2515 abort ();
2519 /* Cache this splay key. */
2520 sp->last_splayed_key = key;
2521 sp->last_splayed_key_p = 1;
2526 /* Allocate a new splay tree. */
2527 static mfsplay_tree
2528 mfsplay_tree_new ()
2530 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2531 sp->root = NULL;
2532 sp->last_splayed_key_p = 0;
2533 sp->num_keys = 0;
2535 return sp;
2540 /* Insert a new node (associating KEY with DATA) into SP. If a
2541 previous node with the indicated KEY exists, its data is replaced
2542 with the new value. Returns the new node. */
2543 static mfsplay_tree_node
2544 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2546 int comparison = 0;
2548 mfsplay_tree_splay (sp, key);
2550 if (sp->root)
2551 comparison = ((sp->root->key > key) ? 1 :
2552 ((sp->root->key < key) ? -1 : 0));
2554 if (sp->root && comparison == 0)
2556 /* If the root of the tree already has the indicated KEY, just
2557 replace the value with VALUE. */
2558 sp->root->value = value;
2560 else
2562 /* Create a new node, and insert it at the root. */
2563 mfsplay_tree_node node;
2565 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2566 node->key = key;
2567 node->value = value;
2568 sp->num_keys++;
2569 if (!sp->root)
2570 node->left = node->right = 0;
2571 else if (comparison < 0)
2573 node->left = sp->root;
2574 node->right = node->left->right;
2575 node->left->right = 0;
2577 else
2579 node->right = sp->root;
2580 node->left = node->right->left;
2581 node->right->left = 0;
2584 sp->root = node;
2585 sp->last_splayed_key_p = 0;
2588 return sp->root;
2591 /* Remove KEY from SP. It is not an error if it did not exist. */
2593 static void
2594 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2596 mfsplay_tree_splay (sp, key);
2597 sp->last_splayed_key_p = 0;
2598 if (sp->root && (sp->root->key == key))
2600 mfsplay_tree_node left, right;
2601 left = sp->root->left;
2602 right = sp->root->right;
2603 /* Delete the root node itself. */
2604 mfsplay_tree_free (sp->root);
2605 sp->num_keys--;
2606 /* One of the children is now the root. Doesn't matter much
2607 which, so long as we preserve the properties of the tree. */
2608 if (left)
2610 sp->root = left;
2611 /* If there was a right child as well, hang it off the
2612 right-most leaf of the left child. */
2613 if (right)
2615 while (left->right)
2616 left = left->right;
2617 left->right = right;
2620 else
2621 sp->root = right;
2625 /* Lookup KEY in SP, returning VALUE if present, and NULL
2626 otherwise. */
2628 static mfsplay_tree_node
2629 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2631 mfsplay_tree_splay (sp, key);
2632 if (sp->root && (sp->root->key == key))
2633 return sp->root;
2634 else
2635 return 0;
2639 /* Return the immediate predecessor KEY, or NULL if there is no
2640 predecessor. KEY need not be present in the tree. */
2642 static mfsplay_tree_node
2643 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2645 int comparison;
2646 mfsplay_tree_node node;
2647 /* If the tree is empty, there is certainly no predecessor. */
2648 if (!sp->root)
2649 return NULL;
2650 /* Splay the tree around KEY. That will leave either the KEY
2651 itself, its predecessor, or its successor at the root. */
2652 mfsplay_tree_splay (sp, key);
2653 comparison = ((sp->root->key > key) ? 1 :
2654 ((sp->root->key < key) ? -1 : 0));
2656 /* If the predecessor is at the root, just return it. */
2657 if (comparison < 0)
2658 return sp->root;
2659 /* Otherwise, find the rightmost element of the left subtree. */
2660 node = sp->root->left;
2661 if (node)
2662 while (node->right)
2663 node = node->right;
2664 return node;
2667 /* Return the immediate successor KEY, or NULL if there is no
2668 successor. KEY need not be present in the tree. */
2670 static mfsplay_tree_node
2671 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2673 int comparison;
2674 mfsplay_tree_node node;
2675 /* If the tree is empty, there is certainly no successor. */
2676 if (!sp->root)
2677 return NULL;
2678 /* Splay the tree around KEY. That will leave either the KEY
2679 itself, its predecessor, or its successor at the root. */
2680 mfsplay_tree_splay (sp, key);
2681 comparison = ((sp->root->key > key) ? 1 :
2682 ((sp->root->key < key) ? -1 : 0));
2683 /* If the successor is at the root, just return it. */
2684 if (comparison > 0)
2685 return sp->root;
2686 /* Otherwise, find the leftmost element of the right subtree. */
2687 node = sp->root->right;
2688 if (node)
2689 while (node->left)
2690 node = node->left;
2691 return node;
2694 /* Call FN, passing it the DATA, for every node in SP, following an
2695 in-order traversal. If FN every returns a non-zero value, the
2696 iteration ceases immediately, and the value is returned.
2697 Otherwise, this function returns 0.
2699 This function simulates recursion using dynamically allocated
2700 arrays, since it may be called from mfsplay_tree_rebalance(), which
2701 in turn means that the tree is already uncomfortably deep for stack
2702 space limits. */
2703 static int
2704 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2706 mfsplay_tree_node *stack1;
2707 char *stack2;
2708 unsigned sp;
2709 int val = 0;
2710 enum s { s_left, s_here, s_right, s_up };
2712 if (st->root == NULL) /* => num_keys == 0 */
2713 return 0;
2715 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2716 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2718 sp = 0;
2719 stack1 [sp] = st->root;
2720 stack2 [sp] = s_left;
2722 while (1)
2724 mfsplay_tree_node n;
2725 enum s s;
2727 n = stack1 [sp];
2728 s = stack2 [sp];
2730 /* Handle each of the four possible states separately. */
2732 /* 1: We're here to traverse the left subtree (if any). */
2733 if (s == s_left)
2735 stack2 [sp] = s_here;
2736 if (n->left != NULL)
2738 sp ++;
2739 stack1 [sp] = n->left;
2740 stack2 [sp] = s_left;
2744 /* 2: We're here to traverse this node. */
2745 else if (s == s_here)
2747 stack2 [sp] = s_right;
2748 val = (*fn) (n, data);
2749 if (val) break;
2752 /* 3: We're here to traverse the right subtree (if any). */
2753 else if (s == s_right)
2755 stack2 [sp] = s_up;
2756 if (n->right != NULL)
2758 sp ++;
2759 stack1 [sp] = n->right;
2760 stack2 [sp] = s_left;
2764 /* 4: We're here after both subtrees (if any) have been traversed. */
2765 else if (s == s_up)
2767 /* Pop the stack. */
2768 if (sp == 0) break; /* Popping off the root note: we're finished! */
2769 sp --;
2772 else
2773 abort ();
2776 mfsplay_tree_free (stack1);
2777 mfsplay_tree_free (stack2);
2778 return val;