* gcc.target/avr/torture/avr-torture.exp (AVR_TORTURE_OPTIONS):
[official-gcc.git] / libmudflap / mf-runtime.c
blob5d3e3a2275f06d4c0dc5bbdcfb01dcb1c7adcbad
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 Under Section 7 of GPL version 3, you are granted additional
22 permissions described in the GCC Runtime Library Exception, version
23 3.1, as published by the Free Software Foundation.
25 You should have received a copy of the GNU General Public License and
26 a copy of the GCC Runtime Library Exception along with this program;
27 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
28 <http://www.gnu.org/licenses/>. */
30 #include "config.h"
32 /* These attempt to coax various unix flavours to declare all our
33 needed tidbits in the system headers. */
34 #if !defined(__FreeBSD__) && !defined(__APPLE__)
35 #define _POSIX_SOURCE
36 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
37 #define _GNU_SOURCE
38 #define _XOPEN_SOURCE
39 #define _BSD_TYPES
40 #define __EXTENSIONS__
41 #define _ALL_SOURCE
42 #define _LARGE_FILE_API
43 #define _XOPEN_SOURCE_EXTENDED 1
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <sys/types.h>
48 #include <sys/time.h>
49 #include <time.h>
50 #include <unistd.h>
51 #ifdef HAVE_EXECINFO_H
52 #include <execinfo.h>
53 #endif
54 #ifdef HAVE_SIGNAL_H
55 #include <signal.h>
56 #endif
57 #include <assert.h>
59 #include <string.h>
60 #include <limits.h>
61 #include <sys/types.h>
62 #include <signal.h>
63 #include <errno.h>
64 #include <ctype.h>
66 #include "mf-runtime.h"
67 #include "mf-impl.h"
70 /* ------------------------------------------------------------------------ */
71 /* Splay-tree implementation. */
73 typedef uintptr_t mfsplay_tree_key;
74 typedef void *mfsplay_tree_value;
76 /* Forward declaration for a node in the tree. */
77 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
79 /* The type of a function used to iterate over the tree. */
80 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
82 /* The nodes in the splay tree. */
83 struct mfsplay_tree_node_s
85 /* Data. */
86 mfsplay_tree_key key;
87 mfsplay_tree_value value;
88 /* Children. */
89 mfsplay_tree_node left;
90 mfsplay_tree_node right;
91 /* XXX: The addition of a parent pointer may eliminate some recursion. */
94 /* The splay tree itself. */
95 struct mfsplay_tree_s
97 /* The root of the tree. */
98 mfsplay_tree_node root;
100 /* The last key value for which the tree has been splayed, but not
101 since modified. */
102 mfsplay_tree_key last_splayed_key;
103 int last_splayed_key_p;
105 /* Statistics. */
106 unsigned num_keys;
108 /* Traversal recursion control flags. */
109 unsigned max_depth;
110 unsigned depth;
111 unsigned rebalance_p;
113 typedef struct mfsplay_tree_s *mfsplay_tree;
115 static mfsplay_tree mfsplay_tree_new (void);
116 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
117 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
118 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
119 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
120 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
121 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
122 static void mfsplay_tree_rebalance (mfsplay_tree sp);
124 /* ------------------------------------------------------------------------ */
125 /* Utility macros */
127 #define CTOR __attribute__ ((constructor))
128 #define DTOR __attribute__ ((destructor))
131 /* Codes to describe the context in which a violation occurs. */
132 #define __MF_VIOL_UNKNOWN 0
133 #define __MF_VIOL_READ 1
134 #define __MF_VIOL_WRITE 2
135 #define __MF_VIOL_REGISTER 3
136 #define __MF_VIOL_UNREGISTER 4
137 #define __MF_VIOL_WATCH 5
139 /* Protect against recursive calls. */
141 static void
142 begin_recursion_protect1 (const char *pf)
144 if (__mf_get_state () == reentrant)
146 write (2, "mf: erroneous reentrancy detected in `", 38);
147 write (2, pf, strlen(pf));
148 write (2, "'\n", 2); \
149 abort ();
151 __mf_set_state (reentrant);
154 #define BEGIN_RECURSION_PROTECT() \
155 begin_recursion_protect1 (__PRETTY_FUNCTION__)
157 #define END_RECURSION_PROTECT() \
158 __mf_set_state (active)
160 /* ------------------------------------------------------------------------ */
161 /* Required globals. */
163 #define LOOKUP_CACHE_MASK_DFL 1023
164 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
165 #define LOOKUP_CACHE_SHIFT_DFL 2
167 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
168 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
169 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
170 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
172 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
175 #ifdef LIBMUDFLAPTH
176 #if defined(HAVE_TLS) && !defined(USE_EMUTLS)
177 __thread enum __mf_state_enum __mf_state_1 = reentrant;
178 #endif
179 #else
180 enum __mf_state_enum __mf_state_1 = reentrant;
181 #endif
183 #ifdef LIBMUDFLAPTH
184 pthread_mutex_t __mf_biglock =
185 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
186 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
187 #else
188 PTHREAD_MUTEX_INITIALIZER;
189 #endif
190 #endif
192 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
193 the libmudflap.la (no threading support) can diagnose whether
194 the application is linked with -lpthread. See __mf_usage() below. */
195 #if HAVE_PTHREAD_H
196 #ifdef _POSIX_THREADS
197 #pragma weak pthread_join
198 #else
199 #define pthread_join NULL
200 #endif
201 #endif
204 /* ------------------------------------------------------------------------ */
205 /* stats-related globals. */
207 static unsigned long __mf_count_check;
208 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
209 static unsigned long __mf_count_register;
210 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
211 static unsigned long __mf_count_unregister;
212 static unsigned long __mf_total_unregister_size;
213 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
214 static unsigned long __mf_sigusr1_received;
215 static unsigned long __mf_sigusr1_handled;
216 /* not static */ unsigned long __mf_reentrancy;
217 #ifdef LIBMUDFLAPTH
218 /* not static */ unsigned long __mf_lock_contention;
219 #endif
222 /* ------------------------------------------------------------------------ */
223 /* mode-check-related globals. */
225 typedef struct __mf_object
227 uintptr_t low, high; /* __mf_register parameters */
228 const char *name;
229 char type; /* __MF_TYPE_something */
230 char watching_p; /* Trigger a VIOL_WATCH on access? */
231 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
232 unsigned write_count; /* Likewise for __mf_check/write. */
233 unsigned liveness; /* A measure of recent checking activity. */
234 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
236 uintptr_t alloc_pc;
237 struct timeval alloc_time;
238 char **alloc_backtrace;
239 size_t alloc_backtrace_size;
240 #ifdef LIBMUDFLAPTH
241 pthread_t alloc_thread;
242 #endif
244 int deallocated_p;
245 uintptr_t dealloc_pc;
246 struct timeval dealloc_time;
247 char **dealloc_backtrace;
248 size_t dealloc_backtrace_size;
249 #ifdef LIBMUDFLAPTH
250 pthread_t dealloc_thread;
251 #endif
252 } __mf_object_t;
254 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
255 /* Actually stored as static vars within lookup function below. */
257 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
258 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
259 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
262 /* ------------------------------------------------------------------------ */
263 /* Forward function declarations */
265 void __mf_init () CTOR;
266 static void __mf_sigusr1_respond ();
267 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
268 __mf_object_t **objs, unsigned max_objs);
269 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
270 __mf_object_t **objs, unsigned max_objs, int type);
271 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272 __mf_object_t **objs, unsigned max_objs);
273 static void __mf_adapt_cache ();
274 static void __mf_describe_object (__mf_object_t *obj);
275 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
276 static mfsplay_tree __mf_object_tree (int type);
277 static void __mf_link_object (__mf_object_t *node);
278 static void __mf_unlink_object (__mf_object_t *node);
281 /* ------------------------------------------------------------------------ */
282 /* Configuration engine */
284 static void
285 __mf_set_default_options ()
287 memset (& __mf_opts, 0, sizeof (__mf_opts));
289 __mf_opts.adapt_cache = 1000003;
290 __mf_opts.abbreviate = 1;
291 __mf_opts.verbose_violations = 1;
292 __mf_opts.free_queue_length = 4;
293 __mf_opts.persistent_count = 100;
294 __mf_opts.crumple_zone = 32;
295 __mf_opts.backtrace = 4;
296 __mf_opts.timestamps = 1;
297 __mf_opts.mudflap_mode = mode_check;
298 __mf_opts.violation_mode = viol_nop;
299 #ifdef HAVE___LIBC_FREERES
300 __mf_opts.call_libc_freeres = 1;
301 #endif
302 __mf_opts.heur_std_data = 1;
303 #ifdef LIBMUDFLAPTH
304 __mf_opts.thread_stack = 0;
305 #endif
307 /* PR41443: Beware that the above flags will be applied to
308 setuid/setgid binaries, and cannot be overriden with
309 $MUDFLAP_OPTIONS. So the defaults must be non-exploitable.
311 Should we consider making the default violation_mode something
312 harsher than viol_nop? OTOH, glibc's MALLOC_CHECK_ is disabled
313 by default for these same programs. */
316 static struct mudoption
318 char *name;
319 char *description;
320 enum
322 set_option,
323 read_integer_option,
324 } type;
325 unsigned value;
326 unsigned *target;
328 options [] =
330 {"mode-nop",
331 "mudflaps do nothing",
332 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
333 {"mode-populate",
334 "mudflaps populate object tree",
335 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
336 {"mode-check",
337 "mudflaps check for memory violations",
338 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
339 {"mode-violate",
340 "mudflaps always cause violations (diagnostic)",
341 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
343 {"viol-nop",
344 "violations do not change program execution",
345 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
346 {"viol-abort",
347 "violations cause a call to abort()",
348 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
349 {"viol-segv",
350 "violations are promoted to SIGSEGV signals",
351 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
352 {"viol-gdb",
353 "violations fork a gdb process attached to current program",
354 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
355 {"trace-calls",
356 "trace calls to mudflap runtime library",
357 set_option, 1, &__mf_opts.trace_mf_calls},
358 {"verbose-trace",
359 "trace internal events within mudflap runtime library",
360 set_option, 1, &__mf_opts.verbose_trace},
361 {"collect-stats",
362 "collect statistics on mudflap's operation",
363 set_option, 1, &__mf_opts.collect_stats},
364 #ifdef SIGUSR1
365 {"sigusr1-report",
366 "print report upon SIGUSR1",
367 set_option, 1, &__mf_opts.sigusr1_report},
368 #endif
369 {"internal-checking",
370 "perform more expensive internal checking",
371 set_option, 1, &__mf_opts.internal_checking},
372 {"print-leaks",
373 "print any memory leaks at program shutdown",
374 set_option, 1, &__mf_opts.print_leaks},
375 #ifdef HAVE___LIBC_FREERES
376 {"libc-freeres",
377 "call glibc __libc_freeres at shutdown for better leak data",
378 set_option, 1, &__mf_opts.call_libc_freeres},
379 #endif
380 {"check-initialization",
381 "detect uninitialized object reads",
382 set_option, 1, &__mf_opts.check_initialization},
383 {"verbose-violations",
384 "print verbose messages when memory violations occur",
385 set_option, 1, &__mf_opts.verbose_violations},
386 {"abbreviate",
387 "abbreviate repetitive listings",
388 set_option, 1, &__mf_opts.abbreviate},
389 {"timestamps",
390 "track object lifetime timestamps",
391 set_option, 1, &__mf_opts.timestamps},
392 {"ignore-reads",
393 "ignore read accesses - assume okay",
394 set_option, 1, &__mf_opts.ignore_reads},
395 {"wipe-stack",
396 "wipe stack objects at unwind",
397 set_option, 1, &__mf_opts.wipe_stack},
398 {"wipe-heap",
399 "wipe heap objects at free",
400 set_option, 1, &__mf_opts.wipe_heap},
401 {"heur-proc-map",
402 "support /proc/self/map heuristics",
403 set_option, 1, &__mf_opts.heur_proc_map},
404 {"heur-stack-bound",
405 "enable a simple upper stack bound heuristic",
406 set_option, 1, &__mf_opts.heur_stack_bound},
407 {"heur-start-end",
408 "support _start.._end heuristics",
409 set_option, 1, &__mf_opts.heur_start_end},
410 {"heur-stdlib",
411 "register standard library data (argv, errno, stdin, ...)",
412 set_option, 1, &__mf_opts.heur_std_data},
413 {"free-queue-length",
414 "queue N deferred free() calls before performing them",
415 read_integer_option, 0, &__mf_opts.free_queue_length},
416 {"persistent-count",
417 "keep a history of N unregistered regions",
418 read_integer_option, 0, &__mf_opts.persistent_count},
419 {"crumple-zone",
420 "surround allocations with crumple zones of N bytes",
421 read_integer_option, 0, &__mf_opts.crumple_zone},
422 /* XXX: not type-safe.
423 {"lc-mask",
424 "set lookup cache size mask to N (2**M - 1)",
425 read_integer_option, 0, (int *)(&__mf_lc_mask)},
426 {"lc-shift",
427 "set lookup cache pointer shift",
428 read_integer_option, 0, (int *)(&__mf_lc_shift)},
430 {"lc-adapt",
431 "adapt mask/shift parameters after N cache misses",
432 read_integer_option, 1, &__mf_opts.adapt_cache},
433 {"backtrace",
434 "keep an N-level stack trace of each call context",
435 read_integer_option, 0, &__mf_opts.backtrace},
436 #ifdef LIBMUDFLAPTH
437 {"thread-stack",
438 "override thread stacks allocation: N kB",
439 read_integer_option, 0, &__mf_opts.thread_stack},
440 #endif
441 {0, 0, set_option, 0, NULL}
444 static void
445 __mf_usage ()
447 struct mudoption *opt;
449 fprintf (stderr,
450 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
451 "Mudflap is Copyright (C) 2002-2012 Free Software Foundation, Inc.\n"
452 "\n"
453 "Unless setuid, a program's mudflap options be set by an environment variable:\n"
454 "\n"
455 "$ export MUDFLAP_OPTIONS='<options>'\n"
456 "$ <mudflapped_program>\n"
457 "\n"
458 "where <options> is a space-separated list of \n"
459 "any of the following options. Use `-no-OPTION' to disable options.\n"
460 "\n",
461 #if HAVE_PTHREAD_H
462 (pthread_join ? "multi-threaded " : "single-threaded "),
463 #else
465 #endif
466 #if LIBMUDFLAPTH
467 "thread-aware "
468 #else
469 "thread-unaware "
470 #endif
472 /* XXX: The multi-threaded thread-unaware combination is bad. */
474 for (opt = options; opt->name; opt++)
476 int default_p = (opt->value == * opt->target);
478 switch (opt->type)
480 char buf[128];
481 case set_option:
482 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
483 if (default_p)
484 fprintf (stderr, " [active]\n");
485 else
486 fprintf (stderr, "\n");
487 break;
488 case read_integer_option:
489 strncpy (buf, opt->name, 128);
490 strncpy (buf + strlen (opt->name), "=N", 2);
491 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
492 fprintf (stderr, " [%d]\n", * opt->target);
493 break;
494 default: abort();
498 fprintf (stderr, "\n");
503 __mf_set_options (const char *optstr)
505 int rc;
506 LOCKTH ();
507 BEGIN_RECURSION_PROTECT ();
508 rc = __mfu_set_options (optstr);
509 /* XXX: It's not really that easy. A change to a bunch of parameters
510 can require updating auxiliary state or risk crashing:
511 free_queue_length, crumple_zone ... */
512 END_RECURSION_PROTECT ();
513 UNLOCKTH ();
514 return rc;
519 __mfu_set_options (const char *optstr)
521 struct mudoption *opts = 0;
522 char *nxt = 0;
523 long tmp = 0;
524 int rc = 0;
525 const char *saved_optstr = optstr;
527 /* XXX: bounds-check for optstr! */
529 while (*optstr)
531 switch (*optstr) {
532 case ' ':
533 case '\t':
534 case '\n':
535 optstr++;
536 break;
538 case '-':
539 if (*optstr+1)
541 int negate = 0;
542 optstr++;
544 if (*optstr == '?' ||
545 strncmp (optstr, "help", 4) == 0)
547 /* Caller will print help and exit. */
548 return -1;
551 if (strncmp (optstr, "no-", 3) == 0)
553 negate = 1;
554 optstr = & optstr[3];
557 for (opts = options; opts->name; opts++)
559 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
561 optstr += strlen (opts->name);
562 assert (opts->target);
563 switch (opts->type)
565 case set_option:
566 if (negate)
567 *(opts->target) = 0;
568 else
569 *(opts->target) = opts->value;
570 break;
571 case read_integer_option:
572 if (! negate && (*optstr == '=' && *(optstr+1)))
574 optstr++;
575 tmp = strtol (optstr, &nxt, 10);
576 if ((optstr != nxt) && (tmp != LONG_MAX))
578 optstr = nxt;
579 *(opts->target) = (int)tmp;
582 else if (negate)
583 * opts->target = 0;
584 break;
589 break;
591 default:
592 fprintf (stderr,
593 "warning: unrecognized string '%s' in mudflap options\n",
594 optstr);
595 optstr += strlen (optstr);
596 rc = -1;
597 break;
601 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
602 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
603 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
605 /* Clear the lookup cache, in case the parameters got changed. */
606 /* XXX: race */
607 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
608 /* void slot 0 */
609 __mf_lookup_cache[0].low = MAXPTR;
611 TRACE ("set options from `%s'\n", saved_optstr);
613 /* Call this unconditionally, in case -sigusr1-report was toggled. */
614 __mf_sigusr1_respond ();
616 return rc;
620 #ifdef PIC
622 void
623 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
625 char *err;
627 assert (e);
628 if (e->pointer) return;
630 #if HAVE_DLVSYM
631 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
632 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
633 else
634 #endif
635 e->pointer = dlsym (RTLD_NEXT, e->name);
637 err = dlerror ();
639 if (err)
641 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
642 e->name, err);
643 abort ();
645 if (! e->pointer)
647 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
648 abort ();
653 static void
654 __mf_resolve_dynamics ()
656 int i;
657 for (i = 0; i < dyn_INITRESOLVE; i++)
658 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
662 /* NB: order must match enums in mf-impl.h */
663 struct __mf_dynamic_entry __mf_dynamic [] =
665 {NULL, "calloc", NULL},
666 {NULL, "free", NULL},
667 {NULL, "malloc", NULL},
668 {NULL, "mmap", NULL},
669 #ifdef HAVE_MMAP64
670 {NULL, "mmap64", NULL},
671 #endif
672 {NULL, "munmap", NULL},
673 {NULL, "realloc", NULL},
674 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
675 #ifdef LIBMUDFLAPTH
676 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
677 {NULL, "pthread_join", NULL},
678 {NULL, "pthread_exit", NULL}
679 #endif
682 #endif /* PIC */
686 /* ------------------------------------------------------------------------ */
688 /* Lookup & manage automatic initialization of the five or so splay trees. */
689 static mfsplay_tree
690 __mf_object_tree (int type)
692 static mfsplay_tree trees [__MF_TYPE_MAX+1];
693 assert (type >= 0 && type <= __MF_TYPE_MAX);
694 if (UNLIKELY (trees[type] == NULL))
695 trees[type] = mfsplay_tree_new ();
696 return trees[type];
700 /* not static */void
701 __mf_init ()
703 char *ov = 0;
705 /* Return if initialization has already been done. */
706 if (LIKELY (__mf_starting_p == 0))
707 return;
709 #if defined(__FreeBSD__) && defined(LIBMUDFLAPTH)
710 pthread_self();
711 LOCKTH ();
712 UNLOCKTH ();
713 #endif /* Prime mutex which calls calloc upon first lock to avoid deadlock. */
715 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
716 #ifdef PIC
717 __mf_resolve_dynamics ();
718 #endif
719 __mf_starting_p = 0;
721 __mf_set_state (active);
723 __mf_set_default_options ();
725 if (getuid () == geteuid () && getgid () == getegid ()) /* PR41433, not setuid */
726 ov = getenv ("MUDFLAP_OPTIONS");
727 if (ov)
729 int rc = __mfu_set_options (ov);
730 if (rc < 0)
732 __mf_usage ();
733 exit (1);
737 /* Initialize to a non-zero description epoch. */
738 __mf_describe_object (NULL);
740 #define REG_RESERVED(obj) \
741 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
743 REG_RESERVED (__mf_lookup_cache);
744 REG_RESERVED (__mf_lc_mask);
745 REG_RESERVED (__mf_lc_shift);
746 /* XXX: others of our statics? */
748 /* Prevent access to *NULL. */
749 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
750 __mf_lookup_cache[0].low = (uintptr_t) -1;
756 __wrap_main (int argc, char* argv[])
758 extern char **environ;
759 extern int main ();
760 extern int __real_main ();
761 static int been_here = 0;
763 if (__mf_opts.heur_std_data && ! been_here)
765 unsigned i;
767 been_here = 1;
768 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
769 for (i=0; i<argc; i++)
771 unsigned j = strlen (argv[i]);
772 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
775 for (i=0; ; i++)
777 char *e = environ[i];
778 unsigned j;
779 if (e == NULL) break;
780 j = strlen (environ[i]);
781 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
783 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
785 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
787 #if !(defined(__sun__) && defined(__svr4__))
788 /* Conflicts with the automatic registration of __iob[]. */
789 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
790 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
791 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
792 #endif
794 /* Make some effort to register ctype.h static arrays. */
795 #if defined(__sun__) && defined(__svr4__)
796 /* __ctype[] is declared without size, but MB_CUR_MAX is the last
797 member. There seems to be no proper way to determine the size. */
798 __mf_register (__ctype, &MB_CUR_MAX - &__ctype[0] + 1, __MF_TYPE_STATIC, "__ctype");
799 /* __ctype_mask points at _C_masks[1]. The size can only determined
800 using nm on libc.so.1. */
801 __mf_register (__ctype_mask - 1, 1028, __MF_TYPE_STATIC, "_C_masks");
802 #endif
803 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
804 with in mf-hooks2.c. */
807 #ifdef PIC
808 return main (argc, argv, environ);
809 #else
810 return __real_main (argc, argv, environ);
811 #endif
816 extern void __mf_fini () DTOR;
817 void __mf_fini ()
819 TRACE ("__mf_fini\n");
820 __mfu_report ();
822 #ifndef PIC
823 /* Since we didn't populate the tree for allocations in constructors
824 before __mf_init, we cannot check destructors after __mf_fini. */
825 __mf_opts.mudflap_mode = mode_nop;
826 #endif
831 /* ------------------------------------------------------------------------ */
832 /* __mf_check */
834 void __mf_check (void *ptr, size_t sz, int type, const char *location)
836 LOCKTH ();
837 BEGIN_RECURSION_PROTECT ();
838 __mfu_check (ptr, sz, type, location);
839 END_RECURSION_PROTECT ();
840 UNLOCKTH ();
844 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
846 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
847 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
848 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
849 uintptr_t ptr_low = (uintptr_t) ptr;
850 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
851 struct __mf_cache old_entry = *entry;
853 if (UNLIKELY (__mf_opts.sigusr1_report))
854 __mf_sigusr1_respond ();
855 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
856 return;
858 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
859 ptr, entry_idx, (unsigned long)sz,
860 (type == 0 ? "read" : "write"), location);
862 switch (__mf_opts.mudflap_mode)
864 case mode_nop:
865 /* It is tempting to poison the cache here similarly to
866 mode_populate. However that eliminates a valuable
867 distinction between these two modes. mode_nop is useful to
868 let a user count & trace every single check / registration
869 call. mode_populate is useful to let a program run fast
870 while unchecked.
872 judgement = 1;
873 break;
875 case mode_populate:
876 entry->low = ptr_low;
877 entry->high = ptr_high;
878 judgement = 1;
879 break;
881 case mode_check:
883 unsigned heuristics = 0;
885 /* Advance aging/adaptation counters. */
886 static unsigned adapt_count;
887 adapt_count ++;
888 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
889 adapt_count > __mf_opts.adapt_cache))
891 adapt_count = 0;
892 __mf_adapt_cache ();
895 /* Looping only occurs if heuristics were triggered. */
896 while (judgement == 0)
898 DECLARE (void, free, void *p);
899 __mf_object_t* ovr_obj[1];
900 unsigned obj_count;
901 __mf_object_t** all_ovr_obj = NULL;
902 __mf_object_t** dealloc_me = NULL;
903 unsigned i;
905 /* Find all overlapping objects. Be optimistic that there is just one. */
906 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
907 if (UNLIKELY (obj_count > 1))
909 /* Allocate a real buffer and do the search again. */
910 DECLARE (void *, malloc, size_t c);
911 unsigned n;
912 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
913 obj_count));
914 if (all_ovr_obj == NULL) abort ();
915 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
916 assert (n == obj_count);
917 dealloc_me = all_ovr_obj;
919 else
921 all_ovr_obj = ovr_obj;
922 dealloc_me = NULL;
925 /* Update object statistics. */
926 for (i = 0; i < obj_count; i++)
928 __mf_object_t *obj = all_ovr_obj[i];
929 assert (obj != NULL);
930 if (type == __MF_CHECK_READ)
931 obj->read_count ++;
932 else
933 obj->write_count ++;
934 obj->liveness ++;
937 /* Iterate over the various objects. There are a number of special cases. */
938 for (i = 0; i < obj_count; i++)
940 __mf_object_t *obj = all_ovr_obj[i];
942 /* Any __MF_TYPE_NOACCESS hit is bad. */
943 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
944 judgement = -1;
946 /* Any object with a watch flag is bad. */
947 if (UNLIKELY (obj->watching_p))
948 judgement = -2; /* trigger VIOL_WATCH */
950 /* A read from an uninitialized object is bad. */
951 if (UNLIKELY (__mf_opts.check_initialization
952 /* reading */
953 && type == __MF_CHECK_READ
954 /* not written */
955 && obj->write_count == 0
956 /* uninitialized (heap) */
957 && obj->type == __MF_TYPE_HEAP))
958 judgement = -1;
961 /* We now know that the access spans no invalid objects. */
962 if (LIKELY (judgement >= 0))
963 for (i = 0; i < obj_count; i++)
965 __mf_object_t *obj = all_ovr_obj[i];
967 /* Is this access entirely contained within this object? */
968 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
970 /* Valid access. */
971 entry->low = obj->low;
972 entry->high = obj->high;
973 judgement = 1;
977 /* This access runs off the end of one valid object. That
978 could be okay, if other valid objects fill in all the
979 holes. We allow this only for HEAP and GUESS type
980 objects. Accesses to STATIC and STACK variables
981 should not be allowed to span. */
982 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
984 unsigned uncovered = 0;
985 for (i = 0; i < obj_count; i++)
987 __mf_object_t *obj = all_ovr_obj[i];
988 int j, uncovered_low_p, uncovered_high_p;
989 uintptr_t ptr_lower, ptr_higher;
991 uncovered_low_p = ptr_low < obj->low;
992 ptr_lower = CLAMPSUB (obj->low, 1);
993 uncovered_high_p = ptr_high > obj->high;
994 ptr_higher = CLAMPADD (obj->high, 1);
996 for (j = 0; j < obj_count; j++)
998 __mf_object_t *obj2 = all_ovr_obj[j];
1000 if (i == j) continue;
1002 /* Filter out objects that cannot be spanned across. */
1003 if (obj2->type == __MF_TYPE_STACK
1004 || obj2->type == __MF_TYPE_STATIC)
1005 continue;
1007 /* Consider a side "covered" if obj2 includes
1008 the next byte on that side. */
1009 if (uncovered_low_p
1010 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
1011 uncovered_low_p = 0;
1012 if (uncovered_high_p
1013 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
1014 uncovered_high_p = 0;
1017 if (uncovered_low_p || uncovered_high_p)
1018 uncovered ++;
1021 /* Success if no overlapping objects are uncovered. */
1022 if (uncovered == 0)
1023 judgement = 1;
1027 if (dealloc_me != NULL)
1028 CALL_REAL (free, dealloc_me);
1030 /* If the judgment is still unknown at this stage, loop
1031 around at most one more time. */
1032 if (judgement == 0)
1034 if (heuristics++ < 2) /* XXX parametrize this number? */
1035 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1036 else
1037 judgement = -1;
1042 break;
1044 case mode_violate:
1045 judgement = -1;
1046 break;
1049 if (__mf_opts.collect_stats)
1051 __mf_count_check ++;
1053 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1054 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1055 __mf_lookup_cache_reusecount [entry_idx] ++;
1058 if (UNLIKELY (judgement < 0))
1059 __mf_violation (ptr, sz,
1060 (uintptr_t) __builtin_return_address (0), location,
1061 ((judgement == -1) ?
1062 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1063 __MF_VIOL_WATCH));
1067 static __mf_object_t *
1068 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1069 const char *name, uintptr_t pc)
1071 DECLARE (void *, calloc, size_t c, size_t n);
1073 __mf_object_t *new_obj;
1074 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1075 new_obj->low = low;
1076 new_obj->high = high;
1077 new_obj->type = type;
1078 new_obj->name = name;
1079 new_obj->alloc_pc = pc;
1080 #if HAVE_GETTIMEOFDAY
1081 if (__mf_opts.timestamps)
1082 gettimeofday (& new_obj->alloc_time, NULL);
1083 #endif
1084 #if LIBMUDFLAPTH
1085 new_obj->alloc_thread = pthread_self ();
1086 #endif
1088 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1089 new_obj->alloc_backtrace_size =
1090 __mf_backtrace (& new_obj->alloc_backtrace,
1091 (void *) pc, 2);
1093 __mf_link_object (new_obj);
1094 return new_obj;
1098 static void
1099 __mf_uncache_object (__mf_object_t *old_obj)
1101 /* Remove any low/high pointers for this object from the lookup cache. */
1103 /* Can it possibly exist in the cache? */
1104 if (LIKELY (old_obj->read_count + old_obj->write_count))
1106 uintptr_t low = old_obj->low;
1107 uintptr_t high = old_obj->high;
1108 struct __mf_cache *entry;
1109 unsigned i;
1110 if ((high - low) >= (__mf_lc_mask << __mf_lc_shift))
1112 /* For large objects (>= cache size - 1) check the whole cache. */
1113 entry = & __mf_lookup_cache [0];
1114 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1116 /* NB: the "||" in the following test permits this code to
1117 tolerate the situation introduced by __mf_check over
1118 contiguous objects, where a cache entry spans several
1119 objects. */
1120 if (entry->low == low || entry->high == high)
1122 entry->low = MAXPTR;
1123 entry->high = MINPTR;
1127 else
1129 /* Object is now smaller then cache size. */
1130 unsigned entry_low_idx = __MF_CACHE_INDEX (low);
1131 unsigned entry_high_idx = __MF_CACHE_INDEX (high);
1132 if (entry_low_idx <= entry_high_idx)
1134 entry = & __mf_lookup_cache [entry_low_idx];
1135 for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
1137 /* NB: the "||" in the following test permits this code to
1138 tolerate the situation introduced by __mf_check over
1139 contiguous objects, where a cache entry spans several
1140 objects. */
1141 if (entry->low == low || entry->high == high)
1143 entry->low = MAXPTR;
1144 entry->high = MINPTR;
1148 else
1150 /* Object wrapped around the end of the cache. First search
1151 from low to end of cache and then from 0 to high. */
1152 entry = & __mf_lookup_cache [entry_low_idx];
1153 for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
1155 /* NB: the "||" in the following test permits this code to
1156 tolerate the situation introduced by __mf_check over
1157 contiguous objects, where a cache entry spans several
1158 objects. */
1159 if (entry->low == low || entry->high == high)
1161 entry->low = MAXPTR;
1162 entry->high = MINPTR;
1165 entry = & __mf_lookup_cache [0];
1166 for (i = 0; i <= entry_high_idx; i++, entry++)
1168 /* NB: the "||" in the following test permits this code to
1169 tolerate the situation introduced by __mf_check over
1170 contiguous objects, where a cache entry spans several
1171 objects. */
1172 if (entry->low == low || entry->high == high)
1174 entry->low = MAXPTR;
1175 entry->high = MINPTR;
1184 void
1185 __mf_register (void *ptr, size_t sz, int type, const char *name)
1187 LOCKTH ();
1188 BEGIN_RECURSION_PROTECT ();
1189 __mfu_register (ptr, sz, type, name);
1190 END_RECURSION_PROTECT ();
1191 UNLOCKTH ();
1195 void
1196 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1198 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1199 ptr, (unsigned long) sz, type, name ? name : "");
1201 if (__mf_opts.collect_stats)
1203 __mf_count_register ++;
1204 __mf_total_register_size [(type < 0) ? 0 :
1205 (type > __MF_TYPE_MAX) ? 0 :
1206 type] += sz;
1209 if (UNLIKELY (__mf_opts.sigusr1_report))
1210 __mf_sigusr1_respond ();
1212 switch (__mf_opts.mudflap_mode)
1214 case mode_nop:
1215 break;
1217 case mode_violate:
1218 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1219 __MF_VIOL_REGISTER);
1220 break;
1222 case mode_populate:
1223 /* Clear the cache. */
1224 /* XXX: why the entire cache? */
1225 /* XXX: race */
1226 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1227 /* void slot 0 */
1228 __mf_lookup_cache[0].low = MAXPTR;
1229 break;
1231 case mode_check:
1233 __mf_object_t *ovr_objs [1];
1234 unsigned num_overlapping_objs;
1235 uintptr_t low = (uintptr_t) ptr;
1236 uintptr_t high = CLAMPSZ (ptr, sz);
1237 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1239 /* Treat unknown size indication as 1. */
1240 if (UNLIKELY (sz == 0)) sz = 1;
1242 /* Look for objects only of the same type. This will e.g. permit a registration
1243 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1244 __mf_check time however harmful overlaps will be detected. */
1245 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1247 /* Handle overlaps. */
1248 if (UNLIKELY (num_overlapping_objs > 0))
1250 __mf_object_t *ovr_obj = ovr_objs[0];
1252 /* Accept certain specific duplication pairs. */
1253 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1254 && ovr_obj->low == low
1255 && ovr_obj->high == high
1256 && ovr_obj->type == type)
1258 /* Duplicate registration for static objects may come
1259 from distinct compilation units. */
1260 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1261 (void *) low, (void *) high,
1262 (ovr_obj->name ? ovr_obj->name : ""));
1263 break;
1266 /* Alas, a genuine violation. */
1267 else
1269 /* Two or more *real* mappings here. */
1270 __mf_violation ((void *) ptr, sz,
1271 (uintptr_t) __builtin_return_address (0), NULL,
1272 __MF_VIOL_REGISTER);
1275 else /* No overlapping objects: AOK. */
1276 __mf_insert_new_object (low, high, type, name, pc);
1278 /* We could conceivably call __mf_check() here to prime the cache,
1279 but then the read_count/write_count field is not reliable. */
1280 break;
1282 } /* end switch (__mf_opts.mudflap_mode) */
1286 void
1287 __mf_unregister (void *ptr, size_t sz, int type)
1289 LOCKTH ();
1290 BEGIN_RECURSION_PROTECT ();
1291 __mfu_unregister (ptr, sz, type);
1292 END_RECURSION_PROTECT ();
1293 UNLOCKTH ();
1297 void
1298 __mfu_unregister (void *ptr, size_t sz, int type)
1300 DECLARE (void, free, void *ptr);
1302 if (UNLIKELY (__mf_opts.sigusr1_report))
1303 __mf_sigusr1_respond ();
1305 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1307 switch (__mf_opts.mudflap_mode)
1309 case mode_nop:
1310 break;
1312 case mode_violate:
1313 __mf_violation (ptr, sz,
1314 (uintptr_t) __builtin_return_address (0), NULL,
1315 __MF_VIOL_UNREGISTER);
1316 break;
1318 case mode_populate:
1319 /* Clear the cache. */
1320 /* XXX: race */
1321 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1322 /* void slot 0 */
1323 __mf_lookup_cache[0].low = MAXPTR;
1324 break;
1326 case mode_check:
1328 __mf_object_t *old_obj = NULL;
1329 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1330 __mf_object_t *objs[1] = {NULL};
1331 unsigned num_overlapping_objs;
1333 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1334 CLAMPSZ (ptr, sz), objs, 1, type);
1336 /* Special case for HEAP_I - see free & realloc hook. They don't
1337 know whether the input region was HEAP or HEAP_I before
1338 unmapping it. Here we give HEAP a try in case HEAP_I
1339 failed. */
1340 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1342 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1343 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1346 old_obj = objs[0];
1347 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1348 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1349 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1351 __mf_violation (ptr, sz,
1352 (uintptr_t) __builtin_return_address (0), NULL,
1353 __MF_VIOL_UNREGISTER);
1354 break;
1357 __mf_unlink_object (old_obj);
1358 __mf_uncache_object (old_obj);
1360 /* Wipe buffer contents if desired. */
1361 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1362 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1363 || old_obj->type == __MF_TYPE_HEAP_I)))
1365 memset ((void *) old_obj->low,
1367 (size_t) (old_obj->high - old_obj->low + 1));
1370 /* Manage the object cemetary. */
1371 if (__mf_opts.persistent_count > 0
1372 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1374 old_obj->deallocated_p = 1;
1375 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1376 #if HAVE_GETTIMEOFDAY
1377 if (__mf_opts.timestamps)
1378 gettimeofday (& old_obj->dealloc_time, NULL);
1379 #endif
1380 #ifdef LIBMUDFLAPTH
1381 old_obj->dealloc_thread = pthread_self ();
1382 #endif
1384 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1385 old_obj->dealloc_backtrace_size =
1386 __mf_backtrace (& old_obj->dealloc_backtrace,
1387 NULL, 2);
1389 /* Encourage this object to be displayed again in current epoch. */
1390 old_obj->description_epoch --;
1392 /* Put this object into the cemetary. This may require this plot to
1393 be recycled, and the previous resident to be designated del_obj. */
1395 unsigned row = old_obj->type;
1396 unsigned plot = __mf_object_dead_head [row];
1398 del_obj = __mf_object_cemetary [row][plot];
1399 __mf_object_cemetary [row][plot] = old_obj;
1400 plot ++;
1401 if (plot == __mf_opts.persistent_count) plot = 0;
1402 __mf_object_dead_head [row] = plot;
1405 else
1406 del_obj = old_obj;
1408 if (__mf_opts.print_leaks)
1410 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1411 (old_obj->type == __MF_TYPE_HEAP
1412 || old_obj->type == __MF_TYPE_HEAP_I))
1414 /* The problem with a warning message here is that we may not
1415 be privy to accesses to such objects that occur within
1416 uninstrumented libraries. */
1417 #if 0
1418 fprintf (stderr,
1419 "*******\n"
1420 "mudflap warning: unaccessed registered object:\n");
1421 __mf_describe_object (old_obj);
1422 #endif
1426 if (del_obj != NULL) /* May or may not equal old_obj. */
1428 if (__mf_opts.backtrace > 0)
1430 CALL_REAL(free, del_obj->alloc_backtrace);
1431 if (__mf_opts.persistent_count > 0)
1433 CALL_REAL(free, del_obj->dealloc_backtrace);
1436 CALL_REAL(free, del_obj);
1439 break;
1441 } /* end switch (__mf_opts.mudflap_mode) */
1444 if (__mf_opts.collect_stats)
1446 __mf_count_unregister ++;
1447 __mf_total_unregister_size += sz;
1453 struct tree_stats
1455 unsigned obj_count;
1456 unsigned long total_size;
1457 unsigned live_obj_count;
1458 double total_weight;
1459 double weighted_size;
1460 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1465 static int
1466 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1468 __mf_object_t *obj = (__mf_object_t *) n->value;
1469 struct tree_stats *s = (struct tree_stats *) param;
1471 assert (obj != NULL && s != NULL);
1473 /* Exclude never-accessed objects. */
1474 if (obj->read_count + obj->write_count)
1476 s->obj_count ++;
1477 s->total_size += (obj->high - obj->low + 1);
1479 if (obj->liveness)
1481 unsigned i;
1482 uintptr_t addr;
1484 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1485 (void *) obj->low, obj->liveness, obj->name); */
1487 s->live_obj_count ++;
1488 s->total_weight += (double) obj->liveness;
1489 s->weighted_size +=
1490 (double) (obj->high - obj->low + 1) *
1491 (double) obj->liveness;
1493 addr = obj->low;
1494 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1496 unsigned bit = addr & 1;
1497 s->weighted_address_bits[i][bit] += obj->liveness;
1498 addr = addr >> 1;
1501 /* Age the liveness value. */
1502 obj->liveness >>= 1;
1506 return 0;
1510 static void
1511 __mf_adapt_cache ()
1513 struct tree_stats s;
1514 uintptr_t new_mask = 0;
1515 unsigned char new_shift;
1516 float cache_utilization;
1517 float max_value;
1518 static float smoothed_new_shift = -1.0;
1519 unsigned i;
1521 memset (&s, 0, sizeof (s));
1523 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1524 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1525 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1526 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1527 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1529 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1530 empty tree. Just leave the cache alone in such cases, rather
1531 than risk dying by division-by-zero. */
1532 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1533 return;
1535 /* Guess a good value for the shift parameter by finding an address bit that is a
1536 good discriminant of lively objects. */
1537 max_value = 0.0;
1538 for (i=0; i<sizeof (uintptr_t)*8; i++)
1540 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1541 if (max_value < value) max_value = value;
1543 for (i=0; i<sizeof (uintptr_t)*8; i++)
1545 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1546 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1547 if (value >= max_value * shoulder_factor)
1548 break;
1550 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1551 /* Converge toward this slowly to reduce flapping. */
1552 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1553 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1554 assert (new_shift < sizeof (uintptr_t)*8);
1556 /* Count number of used buckets. */
1557 cache_utilization = 0.0;
1558 for (i = 0; i < (1 + __mf_lc_mask); i++)
1559 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1560 cache_utilization += 1.0;
1561 cache_utilization /= (1 + __mf_lc_mask);
1563 new_mask |= 0xffff; /* XXX: force a large cache. */
1564 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1566 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1567 "util=%u%% m=%p s=%u\n",
1568 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1569 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1571 /* We should reinitialize cache if its parameters have changed. */
1572 if (new_mask != __mf_lc_mask ||
1573 new_shift != __mf_lc_shift)
1575 __mf_lc_mask = new_mask;
1576 __mf_lc_shift = new_shift;
1577 /* XXX: race */
1578 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1579 /* void slot 0 */
1580 __mf_lookup_cache[0].low = MAXPTR;
1586 /* __mf_find_object[s] */
1588 /* Find overlapping live objecs between [low,high]. Return up to
1589 max_objs of their pointers in objs[]. Return total count of
1590 overlaps (may exceed max_objs). */
1592 unsigned
1593 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1594 __mf_object_t **objs, unsigned max_objs, int type)
1596 unsigned count = 0;
1597 mfsplay_tree t = __mf_object_tree (type);
1598 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1599 int direction;
1601 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1602 /* An exact match for base address implies a hit. */
1603 if (n != NULL)
1605 if (count < max_objs)
1606 objs[count] = (__mf_object_t *) n->value;
1607 count ++;
1610 /* Iterate left then right near this key value to find all overlapping objects. */
1611 for (direction = 0; direction < 2; direction ++)
1613 /* Reset search origin. */
1614 k = (mfsplay_tree_key) ptr_low;
1616 while (1)
1618 __mf_object_t *obj;
1620 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1621 if (n == NULL) break;
1622 obj = (__mf_object_t *) n->value;
1624 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1625 break;
1627 if (count < max_objs)
1628 objs[count] = (__mf_object_t *) n->value;
1629 count ++;
1631 k = (mfsplay_tree_key) obj->low;
1635 return count;
1639 unsigned
1640 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1641 __mf_object_t **objs, unsigned max_objs)
1643 int type;
1644 unsigned count = 0;
1646 /* Search each splay tree for overlaps. */
1647 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1649 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1650 if (c > max_objs)
1652 max_objs = 0;
1653 objs = NULL;
1655 else /* NB: C may equal 0 */
1657 max_objs -= c;
1658 objs += c;
1660 count += c;
1663 return count;
1668 /* __mf_link_object */
1670 static void
1671 __mf_link_object (__mf_object_t *node)
1673 mfsplay_tree t = __mf_object_tree (node->type);
1674 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1677 /* __mf_unlink_object */
1679 static void
1680 __mf_unlink_object (__mf_object_t *node)
1682 mfsplay_tree t = __mf_object_tree (node->type);
1683 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1686 /* __mf_find_dead_objects */
1688 /* Find overlapping dead objecs between [low,high]. Return up to
1689 max_objs of their pointers in objs[]. Return total count of
1690 overlaps (may exceed max_objs). */
1692 static unsigned
1693 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1694 __mf_object_t **objs, unsigned max_objs)
1696 if (__mf_opts.persistent_count > 0)
1698 unsigned count = 0;
1699 unsigned recollection = 0;
1700 unsigned row = 0;
1702 assert (low <= high);
1703 assert (max_objs == 0 || objs != NULL);
1705 /* Widen the search from the most recent plots in each row, looking
1706 backward in time. */
1707 recollection = 0;
1708 while (recollection < __mf_opts.persistent_count)
1710 count = 0;
1712 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1714 unsigned plot;
1715 unsigned i;
1717 plot = __mf_object_dead_head [row];
1718 for (i = 0; i <= recollection; i ++)
1720 __mf_object_t *obj;
1722 /* Look backward through row: it's a circular buffer. */
1723 if (plot > 0) plot --;
1724 else plot = __mf_opts.persistent_count - 1;
1726 obj = __mf_object_cemetary [row][plot];
1727 if (obj && obj->low <= high && obj->high >= low)
1729 /* Found an overlapping dead object! */
1730 if (count < max_objs)
1731 objs [count] = obj;
1732 count ++;
1737 if (count)
1738 break;
1740 /* Look farther back in time. */
1741 recollection = (recollection * 2) + 1;
1744 return count;
1745 } else {
1746 return 0;
1750 /* __mf_describe_object */
1752 static void
1753 __mf_describe_object (__mf_object_t *obj)
1755 static unsigned epoch = 0;
1756 if (obj == NULL)
1758 epoch ++;
1759 return;
1762 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1764 fprintf (stderr,
1765 "mudflap %sobject %p: name=`%s'\n",
1766 (obj->deallocated_p ? "dead " : ""),
1767 (void *) obj, (obj->name ? obj->name : ""));
1768 return;
1770 else
1771 obj->description_epoch = epoch;
1773 fprintf (stderr,
1774 "mudflap %sobject %p: name=`%s'\n"
1775 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1776 "alloc time=%lu.%06lu pc=%p"
1777 #ifdef LIBMUDFLAPTH
1778 " thread=%u"
1779 #endif
1780 "\n",
1781 (obj->deallocated_p ? "dead " : ""),
1782 (void *) obj, (obj->name ? obj->name : ""),
1783 (void *) obj->low, (void *) obj->high,
1784 (unsigned long) (obj->high - obj->low + 1),
1785 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1786 obj->type == __MF_TYPE_HEAP ? "heap" :
1787 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1788 obj->type == __MF_TYPE_STACK ? "stack" :
1789 obj->type == __MF_TYPE_STATIC ? "static" :
1790 obj->type == __MF_TYPE_GUESS ? "guess" :
1791 "unknown"),
1792 obj->read_count, obj->write_count, obj->liveness,
1793 obj->watching_p ? " watching" : "",
1794 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1795 (void *) obj->alloc_pc
1796 #ifdef LIBMUDFLAPTH
1797 , (unsigned) obj->alloc_thread
1798 #endif
1801 if (__mf_opts.backtrace > 0)
1803 unsigned i;
1804 for (i=0; i<obj->alloc_backtrace_size; i++)
1805 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1808 if (__mf_opts.persistent_count > 0)
1810 if (obj->deallocated_p)
1812 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1813 #ifdef LIBMUDFLAPTH
1814 " thread=%u"
1815 #endif
1816 "\n",
1817 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1818 (void *) obj->dealloc_pc
1819 #ifdef LIBMUDFLAPTH
1820 , (unsigned) obj->dealloc_thread
1821 #endif
1825 if (__mf_opts.backtrace > 0)
1827 unsigned i;
1828 for (i=0; i<obj->dealloc_backtrace_size; i++)
1829 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1836 static int
1837 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1839 __mf_object_t *node = (__mf_object_t *) n->value;
1840 unsigned *count = (unsigned *) param;
1842 if (count != NULL)
1843 (*count) ++;
1845 fprintf (stderr, "Leaked object %u:\n", (*count));
1846 __mf_describe_object (node);
1848 return 0;
1852 static unsigned
1853 __mf_report_leaks ()
1855 unsigned count = 0;
1857 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1858 __mf_report_leaks_fn, & count);
1859 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1860 __mf_report_leaks_fn, & count);
1862 return count;
1865 /* ------------------------------------------------------------------------ */
1866 /* __mf_report */
1868 void
1869 __mf_report ()
1871 LOCKTH ();
1872 BEGIN_RECURSION_PROTECT ();
1873 __mfu_report ();
1874 END_RECURSION_PROTECT ();
1875 UNLOCKTH ();
1878 void
1879 __mfu_report ()
1881 if (__mf_opts.collect_stats)
1883 fprintf (stderr,
1884 "*******\n"
1885 "mudflap stats:\n"
1886 "calls to __mf_check: %lu\n"
1887 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1888 " __mf_unregister: %lu [%luB]\n"
1889 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1890 __mf_count_check,
1891 __mf_count_register,
1892 __mf_total_register_size[0], __mf_total_register_size[1],
1893 __mf_total_register_size[2], __mf_total_register_size[3],
1894 __mf_total_register_size[4], /* XXX */
1895 __mf_count_unregister, __mf_total_unregister_size,
1896 __mf_count_violation[0], __mf_count_violation[1],
1897 __mf_count_violation[2], __mf_count_violation[3],
1898 __mf_count_violation[4]);
1900 fprintf (stderr,
1901 "calls with reentrancy: %lu\n", __mf_reentrancy);
1902 #ifdef LIBMUDFLAPTH
1903 fprintf (stderr,
1904 " lock contention: %lu\n", __mf_lock_contention);
1905 #endif
1907 /* Lookup cache stats. */
1909 unsigned i;
1910 unsigned max_reuse = 0;
1911 unsigned num_used = 0;
1912 unsigned num_unused = 0;
1914 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1916 if (__mf_lookup_cache_reusecount[i])
1917 num_used ++;
1918 else
1919 num_unused ++;
1920 if (max_reuse < __mf_lookup_cache_reusecount[i])
1921 max_reuse = __mf_lookup_cache_reusecount[i];
1923 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1924 num_used, num_unused, max_reuse);
1928 unsigned live_count;
1929 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1930 fprintf (stderr, "number of live objects: %u\n", live_count);
1933 if (__mf_opts.persistent_count > 0)
1935 unsigned dead_count = 0;
1936 unsigned row, plot;
1937 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1938 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1939 if (__mf_object_cemetary [row][plot] != 0)
1940 dead_count ++;
1941 fprintf (stderr, " zombie objects: %u\n", dead_count);
1944 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1946 unsigned l;
1947 extern void * __mf_wrap_alloca_indirect (size_t c);
1949 /* Free up any remaining alloca()'d blocks. */
1950 __mf_wrap_alloca_indirect (0);
1951 #ifdef HAVE___LIBC_FREERES
1952 if (__mf_opts.call_libc_freeres)
1954 extern void __libc_freeres (void);
1955 __libc_freeres ();
1957 #endif
1959 __mf_describe_object (NULL); /* Reset description epoch. */
1960 l = __mf_report_leaks ();
1961 fprintf (stderr, "number of leaked objects: %u\n", l);
1965 /* __mf_backtrace */
1967 size_t
1968 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1970 void ** pc_array;
1971 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1972 unsigned remaining_size;
1973 unsigned omitted_size = 0;
1974 unsigned i;
1975 DECLARE (void, free, void *ptr);
1976 DECLARE (void *, calloc, size_t c, size_t n);
1977 DECLARE (void *, malloc, size_t n);
1979 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1980 #ifdef HAVE_BACKTRACE
1981 pc_array_size = backtrace (pc_array, pc_array_size);
1982 #else
1983 #define FETCH(n) do { if (pc_array_size >= n) { \
1984 pc_array[n] = __builtin_return_address(n); \
1985 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1987 /* Unroll some calls __builtin_return_address because this function
1988 only takes a literal integer parameter. */
1989 FETCH (0);
1990 #if 0
1991 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1992 rather than simply returning 0. :-( */
1993 FETCH (1);
1994 FETCH (2);
1995 FETCH (3);
1996 FETCH (4);
1997 FETCH (5);
1998 FETCH (6);
1999 FETCH (7);
2000 FETCH (8);
2001 if (pc_array_size > 8) pc_array_size = 9;
2002 #else
2003 if (pc_array_size > 0) pc_array_size = 1;
2004 #endif
2006 #undef FETCH
2007 #endif
2009 /* We want to trim the first few levels of the stack traceback,
2010 since they contain libmudflap wrappers and junk. If pc_array[]
2011 ends up containing a non-NULL guess_pc, then trim everything
2012 before that. Otherwise, omit the first guess_omit_levels
2013 entries. */
2015 if (guess_pc != NULL)
2016 for (i=0; i<pc_array_size; i++)
2017 if (pc_array [i] == guess_pc)
2018 omitted_size = i;
2020 if (omitted_size == 0) /* No match? */
2021 if (pc_array_size > guess_omit_levels)
2022 omitted_size = guess_omit_levels;
2024 remaining_size = pc_array_size - omitted_size;
2026 #ifdef HAVE_BACKTRACE_SYMBOLS
2027 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2028 #else
2030 /* Let's construct a buffer by hand. It will have <remaining_size>
2031 char*'s at the front, pointing at individual strings immediately
2032 afterwards. */
2033 void *buffer;
2034 char *chars;
2035 char **pointers;
2036 enum { perline = 30 };
2037 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2038 pointers = (char **) buffer;
2039 chars = (char *)buffer + (remaining_size * sizeof (char *));
2040 for (i = 0; i < remaining_size; i++)
2042 pointers[i] = chars;
2043 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2044 chars = chars + perline;
2046 *symbols = pointers;
2048 #endif
2049 CALL_REAL (free, pc_array);
2051 return remaining_size;
2054 /* ------------------------------------------------------------------------ */
2055 /* __mf_violation */
2057 void
2058 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2059 const char *location, int type)
2061 char buf [128];
2062 static unsigned violation_number;
2063 DECLARE(void, free, void *ptr);
2065 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2066 (void *) pc,
2067 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2069 if (__mf_opts.collect_stats)
2070 __mf_count_violation [(type < 0) ? 0 :
2071 (type > __MF_VIOL_WATCH) ? 0 :
2072 type] ++;
2074 /* Print out a basic warning message. */
2075 if (__mf_opts.verbose_violations)
2077 unsigned dead_p;
2078 unsigned num_helpful = 0;
2079 struct timeval now = { 0, 0 };
2080 #if HAVE_GETTIMEOFDAY
2081 gettimeofday (& now, NULL);
2082 #endif
2084 violation_number ++;
2085 fprintf (stderr,
2086 "*******\n"
2087 "mudflap violation %u (%s): time=%lu.%06lu "
2088 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2089 violation_number,
2090 ((type == __MF_VIOL_READ) ? "check/read" :
2091 (type == __MF_VIOL_WRITE) ? "check/write" :
2092 (type == __MF_VIOL_REGISTER) ? "register" :
2093 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2094 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2095 now.tv_sec, now.tv_usec,
2096 (void *) ptr, (unsigned long)sz, (void *) pc,
2097 (location != NULL ? " location=`" : ""),
2098 (location != NULL ? location : ""),
2099 (location != NULL ? "'" : ""));
2101 if (__mf_opts.backtrace > 0)
2103 char ** symbols;
2104 unsigned i, num;
2106 num = __mf_backtrace (& symbols, (void *) pc, 2);
2107 /* Note: backtrace_symbols calls malloc(). But since we're in
2108 __mf_violation and presumably __mf_check, it'll detect
2109 recursion, and not put the new string into the database. */
2111 for (i=0; i<num; i++)
2112 fprintf (stderr, " %s\n", symbols[i]);
2114 /* Calling free() here would trigger a violation. */
2115 CALL_REAL(free, symbols);
2119 /* Look for nearby objects. For this, we start with s_low/s_high
2120 pointing to the given area, looking for overlapping objects.
2121 If none show up, widen the search area and keep looking. */
2123 if (sz == 0) sz = 1;
2125 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2127 enum {max_objs = 3}; /* magic */
2128 __mf_object_t *objs[max_objs];
2129 unsigned num_objs = 0;
2130 uintptr_t s_low, s_high;
2131 unsigned tries = 0;
2132 unsigned i;
2134 s_low = (uintptr_t) ptr;
2135 s_high = CLAMPSZ (ptr, sz);
2137 while (tries < 16) /* magic */
2139 if (dead_p)
2140 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2141 else
2142 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2144 if (num_objs) /* good enough */
2145 break;
2147 tries ++;
2149 /* XXX: tune this search strategy. It's too dependent on
2150 sz, which can vary from 1 to very big (when array index
2151 checking) numbers. */
2152 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2153 s_high = CLAMPADD (s_high, (sz * tries * tries));
2156 for (i = 0; i < min (num_objs, max_objs); i++)
2158 __mf_object_t *obj = objs[i];
2159 uintptr_t low = (uintptr_t) ptr;
2160 uintptr_t high = CLAMPSZ (ptr, sz);
2161 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2162 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2163 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2164 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2165 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2166 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2168 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2169 num_helpful + i + 1,
2170 (before1 ? before1 : after1 ? after1 : into1),
2171 (before1 ? "before" : after1 ? "after" : "into"),
2172 (before2 ? before2 : after2 ? after2 : into2),
2173 (before2 ? "before" : after2 ? "after" : "into"));
2174 __mf_describe_object (obj);
2176 num_helpful += num_objs;
2179 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2182 /* How to finally handle this violation? */
2183 switch (__mf_opts.violation_mode)
2185 case viol_nop:
2186 break;
2187 case viol_segv:
2188 kill (getpid(), SIGSEGV);
2189 break;
2190 case viol_abort:
2191 abort ();
2192 break;
2193 case viol_gdb:
2195 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2196 system (buf);
2197 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2198 instead, and let the forked child execlp() gdb. That way, this
2199 subject process can be resumed under the supervision of gdb.
2200 This can't happen now, since system() only returns when gdb
2201 dies. In that case, we need to beware of starting a second
2202 concurrent gdb child upon the next violation. (But if the first
2203 gdb dies, then starting a new one is appropriate.) */
2204 break;
2208 /* ------------------------------------------------------------------------ */
2211 unsigned __mf_watch (void *ptr, size_t sz)
2213 unsigned rc;
2214 LOCKTH ();
2215 BEGIN_RECURSION_PROTECT ();
2216 rc = __mf_watch_or_not (ptr, sz, 1);
2217 END_RECURSION_PROTECT ();
2218 UNLOCKTH ();
2219 return rc;
2222 unsigned __mf_unwatch (void *ptr, size_t sz)
2224 unsigned rc;
2225 LOCKTH ();
2226 rc = __mf_watch_or_not (ptr, sz, 0);
2227 UNLOCKTH ();
2228 return rc;
2232 static unsigned
2233 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2235 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2236 uintptr_t ptr_low = (uintptr_t) ptr;
2237 unsigned count = 0;
2239 TRACE ("%s ptr=%p size=%lu\n",
2240 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2242 switch (__mf_opts.mudflap_mode)
2244 case mode_nop:
2245 case mode_populate:
2246 case mode_violate:
2247 count = 0;
2248 break;
2250 case mode_check:
2252 __mf_object_t **all_ovr_objs;
2253 unsigned obj_count;
2254 unsigned n;
2255 DECLARE (void *, malloc, size_t c);
2256 DECLARE (void, free, void *p);
2258 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2259 VERBOSE_TRACE (" %u:", obj_count);
2261 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2262 if (all_ovr_objs == NULL) abort ();
2263 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2264 assert (n == obj_count);
2266 for (n = 0; n < obj_count; n ++)
2268 __mf_object_t *obj = all_ovr_objs[n];
2270 VERBOSE_TRACE (" [%p]", (void *) obj);
2271 if (obj->watching_p != flag)
2273 obj->watching_p = flag;
2274 count ++;
2276 /* Remove object from cache, to ensure next access
2277 goes through __mf_check(). */
2278 if (flag)
2279 __mf_uncache_object (obj);
2282 CALL_REAL (free, all_ovr_objs);
2284 break;
2287 return count;
2291 void
2292 __mf_sigusr1_handler (int num)
2294 __mf_sigusr1_received ++;
2297 /* Install or remove SIGUSR1 handler as necessary.
2298 Also, respond to a received pending SIGUSR1. */
2299 void
2300 __mf_sigusr1_respond ()
2302 static int handler_installed;
2304 #ifdef SIGUSR1
2305 /* Manage handler */
2306 if (__mf_opts.sigusr1_report && ! handler_installed)
2308 signal (SIGUSR1, __mf_sigusr1_handler);
2309 handler_installed = 1;
2311 else if(! __mf_opts.sigusr1_report && handler_installed)
2313 signal (SIGUSR1, SIG_DFL);
2314 handler_installed = 0;
2316 #endif
2318 /* Manage enqueued signals */
2319 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2321 __mf_sigusr1_handled ++;
2322 assert (__mf_get_state () == reentrant);
2323 __mfu_report ();
2324 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2329 /* XXX: provide an alternative __assert_fail function that cannot
2330 fail due to libmudflap infinite recursion. */
2331 #ifndef NDEBUG
2333 static void
2334 write_itoa (int fd, unsigned n)
2336 enum x { bufsize = sizeof(n)*4 };
2337 char buf [bufsize];
2338 unsigned i;
2340 for (i=0; i<bufsize-1; i++)
2342 unsigned digit = n % 10;
2343 buf[bufsize-2-i] = digit + '0';
2344 n /= 10;
2345 if (n == 0)
2347 char *m = & buf [bufsize-2-i];
2348 buf[bufsize-1] = '\0';
2349 write (fd, m, strlen(m));
2350 break;
2356 void
2357 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2359 #define write2(string) write (2, (string), strlen ((string)));
2360 write2("mf");
2361 #ifdef LIBMUDFLAPTH
2362 write2("(");
2363 write_itoa (2, (unsigned) pthread_self ());
2364 write2(")");
2365 #endif
2366 write2(": assertion failure: `");
2367 write (2, msg, strlen (msg));
2368 write2("' in ");
2369 write (2, func, strlen (func));
2370 write2(" at ");
2371 write (2, file, strlen (file));
2372 write2(":");
2373 write_itoa (2, line);
2374 write2("\n");
2375 #undef write2
2376 abort ();
2380 #endif
2384 /* Adapted splay tree code, originally from libiberty. It has been
2385 specialized for libmudflap as requested by RMS. */
2387 static void
2388 mfsplay_tree_free (void *p)
2390 DECLARE (void, free, void *p);
2391 CALL_REAL (free, p);
2394 static void *
2395 mfsplay_tree_xmalloc (size_t s)
2397 DECLARE (void *, malloc, size_t s);
2398 return CALL_REAL (malloc, s);
2402 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2403 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2404 mfsplay_tree_key,
2405 mfsplay_tree_node *,
2406 mfsplay_tree_node *,
2407 mfsplay_tree_node *);
2410 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2411 and grandparent, respectively, of NODE. */
2413 static mfsplay_tree_node
2414 mfsplay_tree_splay_helper (mfsplay_tree sp,
2415 mfsplay_tree_key key,
2416 mfsplay_tree_node * node,
2417 mfsplay_tree_node * parent,
2418 mfsplay_tree_node * grandparent)
2420 mfsplay_tree_node *next;
2421 mfsplay_tree_node n;
2422 int comparison;
2424 n = *node;
2426 if (!n)
2427 return *parent;
2429 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2431 if (comparison == 0)
2432 /* We've found the target. */
2433 next = 0;
2434 else if (comparison < 0)
2435 /* The target is to the left. */
2436 next = &n->left;
2437 else
2438 /* The target is to the right. */
2439 next = &n->right;
2441 if (next)
2443 /* Check whether our recursion depth is too high. Abort this search,
2444 and signal that a rebalance is required to continue. */
2445 if (sp->depth > sp->max_depth)
2447 sp->rebalance_p = 1;
2448 return n;
2451 /* Continue down the tree. */
2452 sp->depth ++;
2453 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2454 sp->depth --;
2456 /* The recursive call will change the place to which NODE
2457 points. */
2458 if (*node != n || sp->rebalance_p)
2459 return n;
2462 if (!parent)
2463 /* NODE is the root. We are done. */
2464 return n;
2466 /* First, handle the case where there is no grandparent (i.e.,
2467 *PARENT is the root of the tree.) */
2468 if (!grandparent)
2470 if (n == (*parent)->left)
2472 *node = n->right;
2473 n->right = *parent;
2475 else
2477 *node = n->left;
2478 n->left = *parent;
2480 *parent = n;
2481 return n;
2484 /* Next handle the cases where both N and *PARENT are left children,
2485 or where both are right children. */
2486 if (n == (*parent)->left && *parent == (*grandparent)->left)
2488 mfsplay_tree_node p = *parent;
2490 (*grandparent)->left = p->right;
2491 p->right = *grandparent;
2492 p->left = n->right;
2493 n->right = p;
2494 *grandparent = n;
2495 return n;
2497 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2499 mfsplay_tree_node p = *parent;
2501 (*grandparent)->right = p->left;
2502 p->left = *grandparent;
2503 p->right = n->left;
2504 n->left = p;
2505 *grandparent = n;
2506 return n;
2509 /* Finally, deal with the case where N is a left child, but *PARENT
2510 is a right child, or vice versa. */
2511 if (n == (*parent)->left)
2513 (*parent)->left = n->right;
2514 n->right = *parent;
2515 (*grandparent)->right = n->left;
2516 n->left = *grandparent;
2517 *grandparent = n;
2518 return n;
2520 else
2522 (*parent)->right = n->left;
2523 n->left = *parent;
2524 (*grandparent)->left = n->right;
2525 n->right = *grandparent;
2526 *grandparent = n;
2527 return n;
2533 static int
2534 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2536 mfsplay_tree_node **p = array_ptr;
2537 *(*p) = n;
2538 (*p)++;
2539 return 0;
2543 static mfsplay_tree_node
2544 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2545 unsigned high)
2547 unsigned middle = low + (high - low) / 2;
2548 mfsplay_tree_node n = array[middle];
2550 /* Note that since we're producing a balanced binary tree, it is not a problem
2551 that this function is recursive. */
2552 if (low + 1 <= middle)
2553 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2554 else
2555 n->left = NULL;
2557 if (middle + 1 <= high)
2558 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2559 else
2560 n->right = NULL;
2562 return n;
2566 /* Rebalance the entire tree. Do this by copying all the node
2567 pointers into an array, then cleverly re-linking them. */
2568 static void
2569 mfsplay_tree_rebalance (mfsplay_tree sp)
2571 mfsplay_tree_node *all_nodes, *all_nodes_1;
2573 if (sp->num_keys <= 2)
2574 return;
2576 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2578 /* Traverse all nodes to copy their addresses into this array. */
2579 all_nodes_1 = all_nodes;
2580 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2581 (void *) &all_nodes_1);
2583 /* Relink all the nodes. */
2584 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2586 mfsplay_tree_free (all_nodes);
2590 /* Splay SP around KEY. */
2591 static void
2592 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2594 if (sp->root == 0)
2595 return;
2597 /* If we just splayed the tree with the same key, do nothing. */
2598 if (sp->last_splayed_key_p &&
2599 (sp->last_splayed_key == key))
2600 return;
2602 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2603 The idea is to limit excessive stack usage if we're facing
2604 degenerate access patterns. Unfortunately such patterns can occur
2605 e.g. during static initialization, where many static objects might
2606 be registered in increasing address sequence, or during a case where
2607 large tree-like heap data structures are allocated quickly.
2609 On x86, this corresponds to roughly 200K of stack usage.
2610 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2611 sp->max_depth = 2500;
2612 sp->rebalance_p = sp->depth = 0;
2614 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2615 if (sp->rebalance_p)
2617 mfsplay_tree_rebalance (sp);
2619 sp->rebalance_p = sp->depth = 0;
2620 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2622 if (sp->rebalance_p)
2623 abort ();
2627 /* Cache this splay key. */
2628 sp->last_splayed_key = key;
2629 sp->last_splayed_key_p = 1;
2634 /* Allocate a new splay tree. */
2635 static mfsplay_tree
2636 mfsplay_tree_new ()
2638 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2639 sp->root = NULL;
2640 sp->last_splayed_key_p = 0;
2641 sp->num_keys = 0;
2643 return sp;
2648 /* Insert a new node (associating KEY with DATA) into SP. If a
2649 previous node with the indicated KEY exists, its data is replaced
2650 with the new value. Returns the new node. */
2651 static mfsplay_tree_node
2652 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2654 int comparison = 0;
2656 mfsplay_tree_splay (sp, key);
2658 if (sp->root)
2659 comparison = ((sp->root->key > key) ? 1 :
2660 ((sp->root->key < key) ? -1 : 0));
2662 if (sp->root && comparison == 0)
2664 /* If the root of the tree already has the indicated KEY, just
2665 replace the value with VALUE. */
2666 sp->root->value = value;
2668 else
2670 /* Create a new node, and insert it at the root. */
2671 mfsplay_tree_node node;
2673 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2674 node->key = key;
2675 node->value = value;
2676 sp->num_keys++;
2677 if (!sp->root)
2678 node->left = node->right = 0;
2679 else if (comparison < 0)
2681 node->left = sp->root;
2682 node->right = node->left->right;
2683 node->left->right = 0;
2685 else
2687 node->right = sp->root;
2688 node->left = node->right->left;
2689 node->right->left = 0;
2692 sp->root = node;
2693 sp->last_splayed_key_p = 0;
2696 return sp->root;
2699 /* Remove KEY from SP. It is not an error if it did not exist. */
2701 static void
2702 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2704 mfsplay_tree_splay (sp, key);
2705 sp->last_splayed_key_p = 0;
2706 if (sp->root && (sp->root->key == key))
2708 mfsplay_tree_node left, right;
2709 left = sp->root->left;
2710 right = sp->root->right;
2711 /* Delete the root node itself. */
2712 mfsplay_tree_free (sp->root);
2713 sp->num_keys--;
2714 /* One of the children is now the root. Doesn't matter much
2715 which, so long as we preserve the properties of the tree. */
2716 if (left)
2718 sp->root = left;
2719 /* If there was a right child as well, hang it off the
2720 right-most leaf of the left child. */
2721 if (right)
2723 while (left->right)
2724 left = left->right;
2725 left->right = right;
2728 else
2729 sp->root = right;
2733 /* Lookup KEY in SP, returning VALUE if present, and NULL
2734 otherwise. */
2736 static mfsplay_tree_node
2737 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2739 mfsplay_tree_splay (sp, key);
2740 if (sp->root && (sp->root->key == key))
2741 return sp->root;
2742 else
2743 return 0;
2747 /* Return the immediate predecessor KEY, or NULL if there is no
2748 predecessor. KEY need not be present in the tree. */
2750 static mfsplay_tree_node
2751 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2753 int comparison;
2754 mfsplay_tree_node node;
2755 /* If the tree is empty, there is certainly no predecessor. */
2756 if (!sp->root)
2757 return NULL;
2758 /* Splay the tree around KEY. That will leave either the KEY
2759 itself, its predecessor, or its successor at the root. */
2760 mfsplay_tree_splay (sp, key);
2761 comparison = ((sp->root->key > key) ? 1 :
2762 ((sp->root->key < key) ? -1 : 0));
2764 /* If the predecessor is at the root, just return it. */
2765 if (comparison < 0)
2766 return sp->root;
2767 /* Otherwise, find the rightmost element of the left subtree. */
2768 node = sp->root->left;
2769 if (node)
2770 while (node->right)
2771 node = node->right;
2772 return node;
2775 /* Return the immediate successor KEY, or NULL if there is no
2776 successor. KEY need not be present in the tree. */
2778 static mfsplay_tree_node
2779 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2781 int comparison;
2782 mfsplay_tree_node node;
2783 /* If the tree is empty, there is certainly no successor. */
2784 if (!sp->root)
2785 return NULL;
2786 /* Splay the tree around KEY. That will leave either the KEY
2787 itself, its predecessor, or its successor at the root. */
2788 mfsplay_tree_splay (sp, key);
2789 comparison = ((sp->root->key > key) ? 1 :
2790 ((sp->root->key < key) ? -1 : 0));
2791 /* If the successor is at the root, just return it. */
2792 if (comparison > 0)
2793 return sp->root;
2794 /* Otherwise, find the leftmost element of the right subtree. */
2795 node = sp->root->right;
2796 if (node)
2797 while (node->left)
2798 node = node->left;
2799 return node;
2802 /* Call FN, passing it the DATA, for every node in SP, following an
2803 in-order traversal. If FN every returns a non-zero value, the
2804 iteration ceases immediately, and the value is returned.
2805 Otherwise, this function returns 0.
2807 This function simulates recursion using dynamically allocated
2808 arrays, since it may be called from mfsplay_tree_rebalance(), which
2809 in turn means that the tree is already uncomfortably deep for stack
2810 space limits. */
2811 static int
2812 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2814 mfsplay_tree_node *stack1;
2815 char *stack2;
2816 unsigned sp;
2817 int val = 0;
2818 enum s { s_left, s_here, s_right, s_up };
2820 if (st->root == NULL) /* => num_keys == 0 */
2821 return 0;
2823 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2824 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2826 sp = 0;
2827 stack1 [sp] = st->root;
2828 stack2 [sp] = s_left;
2830 while (1)
2832 mfsplay_tree_node n;
2833 enum s s;
2835 n = stack1 [sp];
2836 s = stack2 [sp];
2838 /* Handle each of the four possible states separately. */
2840 /* 1: We're here to traverse the left subtree (if any). */
2841 if (s == s_left)
2843 stack2 [sp] = s_here;
2844 if (n->left != NULL)
2846 sp ++;
2847 stack1 [sp] = n->left;
2848 stack2 [sp] = s_left;
2852 /* 2: We're here to traverse this node. */
2853 else if (s == s_here)
2855 stack2 [sp] = s_right;
2856 val = (*fn) (n, data);
2857 if (val) break;
2860 /* 3: We're here to traverse the right subtree (if any). */
2861 else if (s == s_right)
2863 stack2 [sp] = s_up;
2864 if (n->right != NULL)
2866 sp ++;
2867 stack1 [sp] = n->right;
2868 stack2 [sp] = s_left;
2872 /* 4: We're here after both subtrees (if any) have been traversed. */
2873 else if (s == s_up)
2875 /* Pop the stack. */
2876 if (sp == 0) break; /* Popping off the root note: we're finished! */
2877 sp --;
2880 else
2881 abort ();
2884 mfsplay_tree_free (stack1);
2885 mfsplay_tree_free (stack2);
2886 return val;