* array.c: Don't include assert.h.
[official-gcc.git] / libmudflap / mf-runtime.c
blob312963b017d9112fccfa5e66d280eba206b517fc
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 02111-1307, USA. */
34 #include "config.h"
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
109 /* Statistics. */
110 unsigned num_keys;
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
149 abort (); } \
150 __mf_state = reentrant; \
151 } while (0)
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
155 } while (0)
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
166 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
167 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
168 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
174 #ifndef LIBMUDFLAPTH
175 enum __mf_state_enum __mf_state = active;
176 #else
177 /* See __mf_state_perthread() in mf-hooks.c. */
178 #endif
181 #ifdef LIBMUDFLAPTH
182 pthread_mutex_t __mf_biglock =
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
185 #else
186 PTHREAD_MUTEX_INITIALIZER;
187 #endif
188 #endif
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
193 #if HAVE_PTHREAD_H
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
196 #else
197 #define pthread_join NULL
198 #endif
199 const void *threads_active_p = (void *) pthread_join;
200 #endif
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
206 static unsigned long __mf_count_check;
207 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
208 static unsigned long __mf_count_register;
209 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
210 static unsigned long __mf_count_unregister;
211 static unsigned long __mf_total_unregister_size;
212 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
213 static unsigned long __mf_sigusr1_received;
214 static unsigned long __mf_sigusr1_handled;
215 /* not static */ unsigned long __mf_reentrancy;
216 #ifdef LIBMUDFLAPTH
217 /* not static */ unsigned long __mf_lock_contention;
218 #endif
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
224 typedef struct __mf_object
226 uintptr_t low, high; /* __mf_register parameters */
227 const char *name;
228 char type; /* __MF_TYPE_something */
229 char watching_p; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count; /* Likewise for __mf_check/write. */
232 unsigned liveness; /* A measure of recent checking activity. */
233 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
235 uintptr_t alloc_pc;
236 struct timeval alloc_time;
237 char **alloc_backtrace;
238 size_t alloc_backtrace_size;
239 #ifdef LIBMUDFLAPTH
240 pthread_t alloc_thread;
241 #endif
243 int deallocated_p;
244 uintptr_t dealloc_pc;
245 struct timeval dealloc_time;
246 char **dealloc_backtrace;
247 size_t dealloc_backtrace_size;
248 #ifdef LIBMUDFLAPTH
249 pthread_t dealloc_thread;
250 #endif
251 } __mf_object_t;
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
258 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
264 void __mf_init () CTOR;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
267 __mf_object_t **objs, unsigned max_objs);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
269 __mf_object_t **objs, unsigned max_objs, int type);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
271 __mf_object_t **objs, unsigned max_objs);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t *obj);
274 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
275 static mfsplay_tree __mf_object_tree (int type);
276 static void __mf_link_object (__mf_object_t *node);
277 static void __mf_unlink_object (__mf_object_t *node);
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
283 static void
284 __mf_set_default_options ()
286 memset (& __mf_opts, 0, sizeof (__mf_opts));
288 __mf_opts.adapt_cache = 1000003;
289 __mf_opts.abbreviate = 1;
290 __mf_opts.verbose_violations = 1;
291 __mf_opts.free_queue_length = 4;
292 __mf_opts.persistent_count = 100;
293 __mf_opts.crumple_zone = 32;
294 __mf_opts.backtrace = 4;
295 __mf_opts.timestamps = 1;
296 __mf_opts.mudflap_mode = mode_check;
297 __mf_opts.violation_mode = viol_nop;
298 __mf_opts.heur_std_data = 1;
299 #ifdef LIBMUDFLAPTH
300 __mf_opts.thread_stack = 0;
301 #endif
304 static struct option
306 char *name;
307 char *description;
308 enum
310 set_option,
311 read_integer_option,
312 } type;
313 int value;
314 int *target;
316 options [] =
318 {"mode-nop",
319 "mudflaps do nothing",
320 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
321 {"mode-populate",
322 "mudflaps populate object tree",
323 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
324 {"mode-check",
325 "mudflaps check for memory violations",
326 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
327 {"mode-violate",
328 "mudflaps always cause violations (diagnostic)",
329 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
331 {"viol-nop",
332 "violations do not change program execution",
333 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
334 {"viol-abort",
335 "violations cause a call to abort()",
336 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
337 {"viol-segv",
338 "violations are promoted to SIGSEGV signals",
339 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
340 {"viol-gdb",
341 "violations fork a gdb process attached to current program",
342 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
343 {"trace-calls",
344 "trace calls to mudflap runtime library",
345 set_option, 1, &__mf_opts.trace_mf_calls},
346 {"verbose-trace",
347 "trace internal events within mudflap runtime library",
348 set_option, 1, &__mf_opts.verbose_trace},
349 {"collect-stats",
350 "collect statistics on mudflap's operation",
351 set_option, 1, &__mf_opts.collect_stats},
352 #ifdef SIGUSR1
353 {"sigusr1-report",
354 "print report upon SIGUSR1",
355 set_option, 1, &__mf_opts.sigusr1_report},
356 #endif
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option, 1, &__mf_opts.internal_checking},
360 {"print-leaks",
361 "print any memory leaks at program shutdown",
362 set_option, 1, &__mf_opts.print_leaks},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option, 1, &__mf_opts.check_initialization},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option, 1, &__mf_opts.verbose_violations},
369 {"abbreviate",
370 "abbreviate repetitive listings",
371 set_option, 1, &__mf_opts.abbreviate},
372 {"timestamps",
373 "track object lifetime timestamps",
374 set_option, 1, &__mf_opts.timestamps},
375 {"ignore-reads",
376 "ignore read accesses - assume okay",
377 set_option, 1, &__mf_opts.ignore_reads},
378 {"wipe-stack",
379 "wipe stack objects at unwind",
380 set_option, 1, &__mf_opts.wipe_stack},
381 {"wipe-heap",
382 "wipe heap objects at free",
383 set_option, 1, &__mf_opts.wipe_heap},
384 {"heur-proc-map",
385 "support /proc/self/map heuristics",
386 set_option, 1, &__mf_opts.heur_proc_map},
387 {"heur-stack-bound",
388 "enable a simple upper stack bound heuristic",
389 set_option, 1, &__mf_opts.heur_stack_bound},
390 {"heur-start-end",
391 "support _start.._end heuristics",
392 set_option, 1, &__mf_opts.heur_start_end},
393 {"heur-stdlib",
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option, 1, &__mf_opts.heur_std_data},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option, 0, &__mf_opts.free_queue_length},
399 {"persistent-count",
400 "keep a history of N unregistered regions",
401 read_integer_option, 0, &__mf_opts.persistent_count},
402 {"crumple-zone",
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option, 0, &__mf_opts.crumple_zone},
405 /* XXX: not type-safe.
406 {"lc-mask",
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
409 {"lc-shift",
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
413 {"lc-adapt",
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option, 1, &__mf_opts.adapt_cache},
416 {"backtrace",
417 "keep an N-level stack trace of each call context",
418 read_integer_option, 0, &__mf_opts.backtrace},
419 #ifdef LIBMUDFLAPTH
420 {"thread-stack",
421 "override thread stacks allocation: N kB",
422 read_integer_option, 0, &__mf_opts.thread_stack},
423 #endif
424 {0, 0, set_option, 0, NULL}
427 static void
428 __mf_usage ()
430 struct option *opt;
432 fprintf (stderr,
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
435 "\n"
436 "The mudflap code can be controlled by an environment variable:\n"
437 "\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
440 "\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
443 "\n",
444 #if HAVE_PTHREAD_H
445 (threads_active_p ? "multi-threaded " : "single-threaded "),
446 #else
448 #endif
449 #if LIBMUDFLAPTH
450 "thread-aware "
451 #else
452 "thread-unaware "
453 #endif
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
457 for (opt = options; opt->name; opt++)
459 int default_p = (opt->value == * opt->target);
461 switch (opt->type)
463 char buf[128];
464 case set_option:
465 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
466 if (default_p)
467 fprintf (stderr, " [active]\n");
468 else
469 fprintf (stderr, "\n");
470 break;
471 case read_integer_option:
472 strncpy (buf, opt->name, 128);
473 strncpy (buf + strlen (opt->name), "=N", 2);
474 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
475 fprintf (stderr, " [%d]\n", * opt->target);
476 break;
477 default: abort();
481 fprintf (stderr, "\n");
485 int
486 __mf_set_options (const char *optstr)
488 int rc;
489 LOCKTH ();
490 BEGIN_RECURSION_PROTECT ();
491 rc = __mfu_set_options (optstr);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
496 UNLOCKTH ();
497 return rc;
501 int
502 __mfu_set_options (const char *optstr)
504 struct option *opts = 0;
505 char *nxt = 0;
506 long tmp = 0;
507 int rc = 0;
508 const char *saved_optstr = optstr;
510 /* XXX: bounds-check for optstr! */
512 while (*optstr)
514 switch (*optstr) {
515 case ' ':
516 case '\t':
517 case '\n':
518 optstr++;
519 break;
521 case '-':
522 if (*optstr+1)
524 int negate = 0;
525 optstr++;
527 if (*optstr == '?' ||
528 strncmp (optstr, "help", 4) == 0)
530 /* Caller will print help and exit. */
531 return -1;
534 if (strncmp (optstr, "no-", 3) == 0)
536 negate = 1;
537 optstr = & optstr[3];
540 for (opts = options; opts->name; opts++)
542 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
544 optstr += strlen (opts->name);
545 assert (opts->target);
546 switch (opts->type)
548 case set_option:
549 if (negate)
550 *(opts->target) = 0;
551 else
552 *(opts->target) = opts->value;
553 break;
554 case read_integer_option:
555 if (! negate && (*optstr == '=' && *(optstr+1)))
557 optstr++;
558 tmp = strtol (optstr, &nxt, 10);
559 if ((optstr != nxt) && (tmp != LONG_MAX))
561 optstr = nxt;
562 *(opts->target) = (int)tmp;
565 else if (negate)
566 * opts->target = 0;
567 break;
572 break;
574 default:
575 fprintf (stderr,
576 "warning: unrecognized string '%s' in mudflap options\n",
577 optstr);
578 optstr += strlen (optstr);
579 rc = -1;
580 break;
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
586 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
588 /* Clear the lookup cache, in case the parameters got changed. */
589 /* XXX: race */
590 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
591 /* void slot 0 */
592 __mf_lookup_cache[0].low = MAXPTR;
594 TRACE ("set options from `%s'\n", saved_optstr);
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
599 return rc;
603 #ifdef PIC
605 void
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
608 char *err;
610 assert (e);
611 if (e->pointer) return;
613 #if HAVE_DLVSYM
614 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
615 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
616 else
617 #endif
618 e->pointer = dlsym (RTLD_NEXT, e->name);
620 err = dlerror ();
622 if (err)
624 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
625 e->name, err);
626 abort ();
628 if (! e->pointer)
630 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
631 abort ();
636 static void
637 __mf_resolve_dynamics ()
639 int i;
640 for (i = 0; i < dyn_INITRESOLVE; i++)
641 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic [] =
648 {NULL, "calloc", NULL},
649 {NULL, "free", NULL},
650 {NULL, "malloc", NULL},
651 {NULL, "mmap", NULL},
652 {NULL, "munmap", NULL},
653 {NULL, "realloc", NULL},
654 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
655 #ifdef LIBMUDFLAPTH
656 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
657 {NULL, "pthread_join", NULL},
658 {NULL, "pthread_exit", NULL}
659 #endif
662 #endif /* PIC */
666 /* ------------------------------------------------------------------------ */
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
669 static mfsplay_tree
670 __mf_object_tree (int type)
672 static mfsplay_tree trees [__MF_TYPE_MAX+1];
673 assert (type >= 0 && type <= __MF_TYPE_MAX);
674 if (UNLIKELY (trees[type] == NULL))
675 trees[type] = mfsplay_tree_new ();
676 return trees[type];
680 /* not static */void
681 __mf_init ()
683 char *ov = 0;
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p == 0))
687 return;
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
690 #ifdef PIC
691 __mf_resolve_dynamics ();
692 #endif
693 __mf_starting_p = 0;
695 __mf_set_default_options ();
697 ov = getenv ("MUDFLAP_OPTIONS");
698 if (ov)
700 int rc = __mfu_set_options (ov);
701 if (rc < 0)
703 __mf_usage ();
704 exit (1);
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL);
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
714 REG_RESERVED (__mf_lookup_cache);
715 REG_RESERVED (__mf_lc_mask);
716 REG_RESERVED (__mf_lc_shift);
717 /* XXX: others of our statics? */
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
721 __mf_lookup_cache[0].low = (uintptr_t) -1;
727 __wrap_main (int argc, char* argv[])
729 extern char **environ;
730 extern int main ();
731 static int been_here = 0;
733 if (__mf_opts.heur_std_data && ! been_here)
735 unsigned i;
737 been_here = 1;
738 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
739 for (i=0; i<argc; i++)
741 unsigned j = strlen (argv[i]);
742 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
745 for (i=0; ; i++)
747 char *e = environ[i];
748 unsigned j;
749 if (e == NULL) break;
750 j = strlen (environ[i]);
751 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
753 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
755 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
757 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
758 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
759 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
761 /* Make some effort to register ctype.h static arrays. */
762 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
763 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
764 with in mf-hooks2.c. */
767 #ifdef PIC
768 return main (argc, argv, environ);
769 #else
770 return __real_main (argc, argv, environ);
771 #endif
776 extern void __mf_fini () DTOR;
777 void __mf_fini ()
779 TRACE ("__mf_fini\n");
780 __mfu_report ();
782 #ifndef PIC
783 /* Since we didn't populate the tree for allocations in constructors
784 before __mf_init, we cannot check destructors after __mf_fini. */
785 __mf_opts.mudflap_mode = mode_nop;
786 #endif
791 /* ------------------------------------------------------------------------ */
792 /* __mf_check */
794 void __mf_check (void *ptr, size_t sz, int type, const char *location)
796 LOCKTH ();
797 BEGIN_RECURSION_PROTECT ();
798 __mfu_check (ptr, sz, type, location);
799 END_RECURSION_PROTECT ();
800 UNLOCKTH ();
804 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
806 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
807 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
808 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
809 uintptr_t ptr_low = (uintptr_t) ptr;
810 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
811 struct __mf_cache old_entry = *entry;
813 if (UNLIKELY (__mf_opts.sigusr1_report))
814 __mf_sigusr1_respond ();
816 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
817 ptr, entry_idx, (unsigned long)sz,
818 (type == 0 ? "read" : "write"), location);
820 switch (__mf_opts.mudflap_mode)
822 case mode_nop:
823 /* It is tempting to poison the cache here similarly to
824 mode_populate. However that eliminates a valuable
825 distinction between these two modes. mode_nop is useful to
826 let a user count & trace every single check / registration
827 call. mode_populate is useful to let a program run fast
828 while unchecked.
830 judgement = 1;
831 break;
833 case mode_populate:
834 entry->low = ptr_low;
835 entry->high = ptr_high;
836 judgement = 1;
837 break;
839 case mode_check:
841 unsigned heuristics = 0;
843 /* Advance aging/adaptation counters. */
844 static unsigned adapt_count;
845 adapt_count ++;
846 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
847 adapt_count > __mf_opts.adapt_cache))
849 adapt_count = 0;
850 __mf_adapt_cache ();
853 /* Looping only occurs if heuristics were triggered. */
854 while (judgement == 0)
856 DECLARE (void, free, void *p);
857 __mf_object_t* ovr_obj[1];
858 unsigned obj_count;
859 __mf_object_t** all_ovr_obj = NULL;
860 __mf_object_t** dealloc_me = NULL;
861 unsigned i;
863 /* Find all overlapping objects. Be optimistic that there is just one. */
864 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
865 if (UNLIKELY (obj_count > 1))
867 /* Allocate a real buffer and do the search again. */
868 DECLARE (void *, malloc, size_t c);
869 unsigned n;
870 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
871 obj_count));
872 if (all_ovr_obj == NULL) abort ();
873 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
874 assert (n == obj_count);
875 dealloc_me = all_ovr_obj;
877 else
879 all_ovr_obj = ovr_obj;
880 dealloc_me = NULL;
883 /* Update object statistics. */
884 for (i = 0; i < obj_count; i++)
886 __mf_object_t *obj = all_ovr_obj[i];
887 assert (obj != NULL);
888 if (type == __MF_CHECK_READ)
889 obj->read_count ++;
890 else
891 obj->write_count ++;
892 obj->liveness ++;
895 /* Iterate over the various objects. There are a number of special cases. */
896 for (i = 0; i < obj_count; i++)
898 __mf_object_t *obj = all_ovr_obj[i];
900 /* Any __MF_TYPE_NOACCESS hit is bad. */
901 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
902 judgement = -1;
904 /* Any object with a watch flag is bad. */
905 if (UNLIKELY (obj->watching_p))
906 judgement = -2; /* trigger VIOL_WATCH */
908 /* A read from an uninitialized object is bad. */
909 if (UNLIKELY (__mf_opts.check_initialization
910 /* reading */
911 && type == __MF_CHECK_READ
912 /* not written */
913 && obj->write_count == 0
914 /* uninitialized (heap) */
915 && obj->type == __MF_TYPE_HEAP))
916 judgement = -1;
919 /* We now know that the access spans one or more valid objects. */
920 if (LIKELY (judgement >= 0))
921 for (i = 0; i < obj_count; i++)
923 __mf_object_t *obj = all_ovr_obj[i];
925 /* Is this access entirely contained within this object? */
926 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
928 /* Valid access. */
929 entry->low = obj->low;
930 entry->high = obj->high;
931 judgement = 1;
934 /* XXX: Access runs off left or right side of this
935 object. That could be okay, if there are
936 other objects that fill in all the holes. */
939 if (dealloc_me != NULL)
940 CALL_REAL (free, dealloc_me);
942 /* If the judgment is still unknown at this stage, loop
943 around at most one more time. */
944 if (judgement == 0)
946 if (heuristics++ < 2) /* XXX parametrize this number? */
947 judgement = __mf_heuristic_check (ptr_low, ptr_high);
948 else
949 judgement = -1;
954 break;
956 case mode_violate:
957 judgement = -1;
958 break;
961 if (__mf_opts.collect_stats)
963 __mf_count_check ++;
965 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
966 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
967 __mf_lookup_cache_reusecount [entry_idx] ++;
970 if (UNLIKELY (judgement < 0))
971 __mf_violation (ptr, sz,
972 (uintptr_t) __builtin_return_address (0), location,
973 ((judgement == -1) ?
974 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
975 __MF_VIOL_WATCH));
979 static __mf_object_t *
980 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
981 const char *name, uintptr_t pc)
983 DECLARE (void *, calloc, size_t c, size_t n);
985 __mf_object_t *new_obj;
986 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
987 new_obj->low = low;
988 new_obj->high = high;
989 new_obj->type = type;
990 new_obj->name = name;
991 new_obj->alloc_pc = pc;
992 #if HAVE_GETTIMEOFDAY
993 if (__mf_opts.timestamps)
994 gettimeofday (& new_obj->alloc_time, NULL);
995 #endif
996 #if LIBMUDFLAPTH
997 new_obj->alloc_thread = pthread_self ();
998 #endif
1000 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1001 new_obj->alloc_backtrace_size =
1002 __mf_backtrace (& new_obj->alloc_backtrace,
1003 (void *) pc, 2);
1005 __mf_link_object (new_obj);
1006 return new_obj;
1010 static void
1011 __mf_uncache_object (__mf_object_t *old_obj)
1013 /* Remove any low/high pointers for this object from the lookup cache. */
1015 /* Can it possibly exist in the cache? */
1016 if (LIKELY (old_obj->read_count + old_obj->write_count))
1018 uintptr_t low = old_obj->low;
1019 uintptr_t high = old_obj->high;
1020 unsigned idx_low = __MF_CACHE_INDEX (low);
1021 unsigned idx_high = __MF_CACHE_INDEX (high);
1022 unsigned i;
1023 for (i = idx_low; i <= idx_high; i++)
1025 struct __mf_cache *entry = & __mf_lookup_cache [i];
1026 /* NB: the "||" in the following test permits this code to
1027 tolerate the situation introduced by __mf_check over
1028 contiguous objects, where a cache entry spans several
1029 objects. */
1030 if (entry->low == low || entry->high == high)
1032 entry->low = MAXPTR;
1033 entry->high = MINPTR;
1040 void
1041 __mf_register (void *ptr, size_t sz, int type, const char *name)
1043 LOCKTH ();
1044 BEGIN_RECURSION_PROTECT ();
1045 __mfu_register (ptr, sz, type, name);
1046 END_RECURSION_PROTECT ();
1047 UNLOCKTH ();
1051 void
1052 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1054 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1055 ptr, (unsigned long) sz, type, name ? name : "");
1057 if (__mf_opts.collect_stats)
1059 __mf_count_register ++;
1060 __mf_total_register_size [(type < 0) ? 0 :
1061 (type > __MF_TYPE_MAX) ? 0 :
1062 type] += sz;
1065 if (UNLIKELY (__mf_opts.sigusr1_report))
1066 __mf_sigusr1_respond ();
1068 switch (__mf_opts.mudflap_mode)
1070 case mode_nop:
1071 break;
1073 case mode_violate:
1074 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1075 __MF_VIOL_REGISTER);
1076 break;
1078 case mode_populate:
1079 /* Clear the cache. */
1080 /* XXX: why the entire cache? */
1081 /* XXX: race */
1082 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1083 /* void slot 0 */
1084 __mf_lookup_cache[0].low = MAXPTR;
1085 break;
1087 case mode_check:
1089 __mf_object_t *ovr_objs [1];
1090 unsigned num_overlapping_objs;
1091 uintptr_t low = (uintptr_t) ptr;
1092 uintptr_t high = CLAMPSZ (ptr, sz);
1093 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1095 /* Treat unknown size indication as 1. */
1096 if (UNLIKELY (sz == 0)) sz = 1;
1098 /* Look for objects only of the same type. This will e.g. permit a registration
1099 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1100 __mf_check time however harmful overlaps will be detected. */
1101 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1103 /* Handle overlaps. */
1104 if (UNLIKELY (num_overlapping_objs > 0))
1106 __mf_object_t *ovr_obj = ovr_objs[0];
1108 /* Accept certain specific duplication pairs. */
1109 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1110 && ovr_obj->low == low
1111 && ovr_obj->high == high
1112 && ovr_obj->type == type)
1114 /* Duplicate registration for static objects may come
1115 from distinct compilation units. */
1116 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1117 (void *) low, (void *) high,
1118 (ovr_obj->name ? ovr_obj->name : ""));
1119 break;
1122 /* Alas, a genuine violation. */
1123 else
1125 /* Two or more *real* mappings here. */
1126 __mf_violation ((void *) ptr, sz,
1127 (uintptr_t) __builtin_return_address (0), NULL,
1128 __MF_VIOL_REGISTER);
1131 else /* No overlapping objects: AOK. */
1132 __mf_insert_new_object (low, high, type, name, pc);
1134 /* We could conceivably call __mf_check() here to prime the cache,
1135 but then the read_count/write_count field is not reliable. */
1136 break;
1138 } /* end switch (__mf_opts.mudflap_mode) */
1142 void
1143 __mf_unregister (void *ptr, size_t sz, int type)
1145 LOCKTH ();
1146 BEGIN_RECURSION_PROTECT ();
1147 __mfu_unregister (ptr, sz, type);
1148 END_RECURSION_PROTECT ();
1149 UNLOCKTH ();
1153 void
1154 __mfu_unregister (void *ptr, size_t sz, int type)
1156 DECLARE (void, free, void *ptr);
1158 if (UNLIKELY (__mf_opts.sigusr1_report))
1159 __mf_sigusr1_respond ();
1161 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1163 switch (__mf_opts.mudflap_mode)
1165 case mode_nop:
1166 break;
1168 case mode_violate:
1169 __mf_violation (ptr, sz,
1170 (uintptr_t) __builtin_return_address (0), NULL,
1171 __MF_VIOL_UNREGISTER);
1172 break;
1174 case mode_populate:
1175 /* Clear the cache. */
1176 /* XXX: race */
1177 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1178 /* void slot 0 */
1179 __mf_lookup_cache[0].low = MAXPTR;
1180 break;
1182 case mode_check:
1184 __mf_object_t *old_obj = NULL;
1185 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1186 __mf_object_t *objs[1] = {NULL};
1187 unsigned num_overlapping_objs;
1189 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1190 CLAMPSZ (ptr, sz), objs, 1, type);
1192 /* Special case for HEAP_I - see free & realloc hook. They don't
1193 know whether the input region was HEAP or HEAP_I before
1194 unmapping it. Here we give HEAP a try in case HEAP_I
1195 failed. */
1196 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1198 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1199 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1202 old_obj = objs[0];
1203 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1204 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1205 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1207 __mf_violation (ptr, sz,
1208 (uintptr_t) __builtin_return_address (0), NULL,
1209 __MF_VIOL_UNREGISTER);
1210 break;
1213 __mf_unlink_object (old_obj);
1214 __mf_uncache_object (old_obj);
1216 /* Wipe buffer contents if desired. */
1217 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1218 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1219 || old_obj->type == __MF_TYPE_HEAP_I)))
1221 memset ((void *) old_obj->low,
1223 (size_t) (old_obj->high - old_obj->low + 1));
1226 /* Manage the object cemetary. */
1227 if (__mf_opts.persistent_count > 0 &&
1228 old_obj->type >= 0 &&
1229 old_obj->type <= __MF_TYPE_MAX_CEM)
1231 old_obj->deallocated_p = 1;
1232 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1233 #if HAVE_GETTIMEOFDAY
1234 if (__mf_opts.timestamps)
1235 gettimeofday (& old_obj->dealloc_time, NULL);
1236 #endif
1237 #ifdef LIBMUDFLAPTH
1238 old_obj->dealloc_thread = pthread_self ();
1239 #endif
1241 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1242 old_obj->dealloc_backtrace_size =
1243 __mf_backtrace (& old_obj->dealloc_backtrace,
1244 NULL, 2);
1246 /* Encourage this object to be displayed again in current epoch. */
1247 old_obj->description_epoch --;
1249 /* Put this object into the cemetary. This may require this plot to
1250 be recycled, and the previous resident to be designated del_obj. */
1252 unsigned row = old_obj->type;
1253 unsigned plot = __mf_object_dead_head [row];
1255 del_obj = __mf_object_cemetary [row][plot];
1256 __mf_object_cemetary [row][plot] = old_obj;
1257 plot ++;
1258 if (plot == __mf_opts.persistent_count) plot = 0;
1259 __mf_object_dead_head [row] = plot;
1262 else
1263 del_obj = old_obj;
1265 if (__mf_opts.print_leaks)
1267 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1268 (old_obj->type == __MF_TYPE_HEAP
1269 || old_obj->type == __MF_TYPE_HEAP_I))
1271 fprintf (stderr,
1272 "*******\n"
1273 "mudflap warning: unaccessed registered object:\n");
1274 __mf_describe_object (old_obj);
1278 if (del_obj != NULL) /* May or may not equal old_obj. */
1280 if (__mf_opts.backtrace > 0)
1282 CALL_REAL(free, del_obj->alloc_backtrace);
1283 if (__mf_opts.persistent_count > 0)
1285 CALL_REAL(free, del_obj->dealloc_backtrace);
1288 CALL_REAL(free, del_obj);
1291 break;
1293 } /* end switch (__mf_opts.mudflap_mode) */
1296 if (__mf_opts.collect_stats)
1298 __mf_count_unregister ++;
1299 __mf_total_unregister_size += sz;
1305 struct tree_stats
1307 unsigned obj_count;
1308 unsigned long total_size;
1309 unsigned live_obj_count;
1310 double total_weight;
1311 double weighted_size;
1312 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1317 static int
1318 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1320 __mf_object_t *obj = (__mf_object_t *) n->value;
1321 struct tree_stats *s = (struct tree_stats *) param;
1323 assert (obj != NULL && s != NULL);
1325 /* Exclude never-accessed objects. */
1326 if (obj->read_count + obj->write_count)
1328 s->obj_count ++;
1329 s->total_size += (obj->high - obj->low + 1);
1331 if (obj->liveness)
1333 unsigned i;
1334 uintptr_t addr;
1336 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1337 (void *) obj->low, obj->liveness, obj->name); */
1339 s->live_obj_count ++;
1340 s->total_weight += (double) obj->liveness;
1341 s->weighted_size +=
1342 (double) (obj->high - obj->low + 1) *
1343 (double) obj->liveness;
1345 addr = obj->low;
1346 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1348 unsigned bit = addr & 1;
1349 s->weighted_address_bits[i][bit] += obj->liveness;
1350 addr = addr >> 1;
1353 /* Age the liveness value. */
1354 obj->liveness >>= 1;
1358 return 0;
1362 static void
1363 __mf_adapt_cache ()
1365 struct tree_stats s;
1366 uintptr_t new_mask = 0;
1367 unsigned char new_shift;
1368 float cache_utilization;
1369 float max_value;
1370 static float smoothed_new_shift = -1.0;
1371 unsigned i;
1373 memset (&s, 0, sizeof (s));
1375 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1376 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1377 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1378 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1379 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1381 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1382 empty tree. Just leave the cache alone in such cases, rather
1383 than risk dying by division-by-zero. */
1384 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1385 return;
1387 /* Guess a good value for the shift parameter by finding an address bit that is a
1388 good discriminant of lively objects. */
1389 max_value = 0.0;
1390 for (i=0; i<sizeof (uintptr_t)*8; i++)
1392 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1393 if (max_value < value) max_value = value;
1395 for (i=0; i<sizeof (uintptr_t)*8; i++)
1397 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1398 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1399 if (value >= max_value * shoulder_factor)
1400 break;
1402 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1403 /* Converge toward this slowly to reduce flapping. */
1404 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1405 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1406 assert (new_shift < sizeof (uintptr_t)*8);
1408 /* Count number of used buckets. */
1409 cache_utilization = 0.0;
1410 for (i = 0; i < (1 + __mf_lc_mask); i++)
1411 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1412 cache_utilization += 1.0;
1413 cache_utilization /= (1 + __mf_lc_mask);
1415 new_mask |= 0x3ff; /* XXX: force a large cache. */
1416 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1418 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1419 "util=%u%% m=%p s=%u\n",
1420 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1421 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1423 /* We should reinitialize cache if its parameters have changed. */
1424 if (new_mask != __mf_lc_mask ||
1425 new_shift != __mf_lc_shift)
1427 __mf_lc_mask = new_mask;
1428 __mf_lc_shift = new_shift;
1429 /* XXX: race */
1430 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1431 /* void slot 0 */
1432 __mf_lookup_cache[0].low = MAXPTR;
1438 /* __mf_find_object[s] */
1440 /* Find overlapping live objecs between [low,high]. Return up to
1441 max_objs of their pointers in objs[]. Return total count of
1442 overlaps (may exceed max_objs). */
1444 unsigned
1445 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1446 __mf_object_t **objs, unsigned max_objs, int type)
1448 unsigned count = 0;
1449 mfsplay_tree t = __mf_object_tree (type);
1450 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1451 int direction;
1453 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1454 /* An exact match for base address implies a hit. */
1455 if (n != NULL)
1457 if (count < max_objs)
1458 objs[count] = (__mf_object_t *) n->value;
1459 count ++;
1462 /* Iterate left then right near this key value to find all overlapping objects. */
1463 for (direction = 0; direction < 2; direction ++)
1465 /* Reset search origin. */
1466 k = (mfsplay_tree_key) ptr_low;
1468 while (1)
1470 __mf_object_t *obj;
1472 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1473 if (n == NULL) break;
1474 obj = (__mf_object_t *) n->value;
1476 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1477 break;
1479 if (count < max_objs)
1480 objs[count] = (__mf_object_t *) n->value;
1481 count ++;
1483 k = (mfsplay_tree_key) obj->low;
1487 return count;
1491 unsigned
1492 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1493 __mf_object_t **objs, unsigned max_objs)
1495 int type;
1496 unsigned count = 0;
1498 /* Search each splay tree for overlaps. */
1499 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1501 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1502 if (c > max_objs)
1504 max_objs = 0;
1505 objs = NULL;
1507 else /* NB: C may equal 0 */
1509 max_objs -= c;
1510 objs += c;
1512 count += c;
1515 return count;
1520 /* __mf_link_object */
1522 static void
1523 __mf_link_object (__mf_object_t *node)
1525 mfsplay_tree t = __mf_object_tree (node->type);
1526 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1529 /* __mf_unlink_object */
1531 static void
1532 __mf_unlink_object (__mf_object_t *node)
1534 mfsplay_tree t = __mf_object_tree (node->type);
1535 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1538 /* __mf_find_dead_objects */
1540 /* Find overlapping dead objecs between [low,high]. Return up to
1541 max_objs of their pointers in objs[]. Return total count of
1542 overlaps (may exceed max_objs). */
1544 static unsigned
1545 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1546 __mf_object_t **objs, unsigned max_objs)
1548 if (__mf_opts.persistent_count > 0)
1550 unsigned count = 0;
1551 unsigned recollection = 0;
1552 unsigned row = 0;
1554 assert (low <= high);
1555 assert (max_objs == 0 || objs != NULL);
1557 /* Widen the search from the most recent plots in each row, looking
1558 backward in time. */
1559 recollection = 0;
1560 while (recollection < __mf_opts.persistent_count)
1562 count = 0;
1564 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1566 unsigned plot;
1567 unsigned i;
1569 plot = __mf_object_dead_head [row];
1570 for (i = 0; i <= recollection; i ++)
1572 __mf_object_t *obj;
1574 /* Look backward through row: it's a circular buffer. */
1575 if (plot > 0) plot --;
1576 else plot = __mf_opts.persistent_count - 1;
1578 obj = __mf_object_cemetary [row][plot];
1579 if (obj && obj->low <= high && obj->high >= low)
1581 /* Found an overlapping dead object! */
1582 if (count < max_objs)
1583 objs [count] = obj;
1584 count ++;
1589 if (count)
1590 break;
1592 /* Look farther back in time. */
1593 recollection = (recollection * 2) + 1;
1596 return count;
1597 } else {
1598 return 0;
1602 /* __mf_describe_object */
1604 static void
1605 __mf_describe_object (__mf_object_t *obj)
1607 static unsigned epoch = 0;
1608 if (obj == NULL)
1610 epoch ++;
1611 return;
1614 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1616 fprintf (stderr,
1617 "mudflap %sobject %p: name=`%s'\n",
1618 (obj->deallocated_p ? "dead " : ""),
1619 (void *) obj, (obj->name ? obj->name : ""));
1620 return;
1622 else
1623 obj->description_epoch = epoch;
1625 fprintf (stderr,
1626 "mudflap %sobject %p: name=`%s'\n"
1627 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1628 "alloc time=%lu.%06lu pc=%p"
1629 #ifdef LIBMUDFLAPTH
1630 " thread=%u"
1631 #endif
1632 "\n",
1633 (obj->deallocated_p ? "dead " : ""),
1634 (void *) obj, (obj->name ? obj->name : ""),
1635 (void *) obj->low, (void *) obj->high,
1636 (unsigned long) (obj->high - obj->low + 1),
1637 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1638 obj->type == __MF_TYPE_HEAP ? "heap" :
1639 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1640 obj->type == __MF_TYPE_STACK ? "stack" :
1641 obj->type == __MF_TYPE_STATIC ? "static" :
1642 obj->type == __MF_TYPE_GUESS ? "guess" :
1643 "unknown"),
1644 obj->read_count, obj->write_count, obj->liveness,
1645 obj->watching_p ? " watching" : "",
1646 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1647 (void *) obj->alloc_pc
1648 #ifdef LIBMUDFLAPTH
1649 , (unsigned) obj->alloc_thread
1650 #endif
1653 if (__mf_opts.backtrace > 0)
1655 unsigned i;
1656 for (i=0; i<obj->alloc_backtrace_size; i++)
1657 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1660 if (__mf_opts.persistent_count > 0)
1662 if (obj->deallocated_p)
1664 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1665 #ifdef LIBMUDFLAPTH
1666 " thread=%u"
1667 #endif
1668 "\n",
1669 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1670 (void *) obj->dealloc_pc
1671 #ifdef LIBMUDFLAPTH
1672 , (unsigned) obj->dealloc_thread
1673 #endif
1677 if (__mf_opts.backtrace > 0)
1679 unsigned i;
1680 for (i=0; i<obj->dealloc_backtrace_size; i++)
1681 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1688 static int
1689 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1691 __mf_object_t *node = (__mf_object_t *) n->value;
1692 unsigned *count = (unsigned *) param;
1694 if (count != NULL)
1695 (*count) ++;
1697 fprintf (stderr, "Leaked object %u:\n", (*count));
1698 __mf_describe_object (node);
1700 return 0;
1704 static unsigned
1705 __mf_report_leaks ()
1707 unsigned count = 0;
1709 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1710 __mf_report_leaks_fn, & count);
1711 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1712 __mf_report_leaks_fn, & count);
1714 return count;
1717 /* ------------------------------------------------------------------------ */
1718 /* __mf_report */
1720 void
1721 __mf_report ()
1723 LOCKTH ();
1724 BEGIN_RECURSION_PROTECT ();
1725 __mfu_report ();
1726 END_RECURSION_PROTECT ();
1727 UNLOCKTH ();
1730 void
1731 __mfu_report ()
1733 if (__mf_opts.collect_stats)
1735 fprintf (stderr,
1736 "*******\n"
1737 "mudflap stats:\n"
1738 "calls to __mf_check: %lu\n"
1739 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1740 " __mf_unregister: %lu [%luB]\n"
1741 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1742 __mf_count_check,
1743 __mf_count_register,
1744 __mf_total_register_size[0], __mf_total_register_size[1],
1745 __mf_total_register_size[2], __mf_total_register_size[3],
1746 __mf_total_register_size[4], /* XXX */
1747 __mf_count_unregister, __mf_total_unregister_size,
1748 __mf_count_violation[0], __mf_count_violation[1],
1749 __mf_count_violation[2], __mf_count_violation[3],
1750 __mf_count_violation[4]);
1752 fprintf (stderr,
1753 "calls with reentrancy: %lu\n", __mf_reentrancy);
1754 #ifdef LIBMUDFLAPTH
1755 fprintf (stderr,
1756 " lock contention: %lu\n", __mf_lock_contention);
1757 #endif
1759 /* Lookup cache stats. */
1761 unsigned i;
1762 unsigned max_reuse = 0;
1763 unsigned num_used = 0;
1764 unsigned num_unused = 0;
1766 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1768 if (__mf_lookup_cache_reusecount[i])
1769 num_used ++;
1770 else
1771 num_unused ++;
1772 if (max_reuse < __mf_lookup_cache_reusecount[i])
1773 max_reuse = __mf_lookup_cache_reusecount[i];
1775 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1776 num_used, num_unused, max_reuse);
1780 unsigned live_count;
1781 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1782 fprintf (stderr, "number of live objects: %u\n", live_count);
1785 if (__mf_opts.persistent_count > 0)
1787 unsigned dead_count = 0;
1788 unsigned row, plot;
1789 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1790 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1791 if (__mf_object_cemetary [row][plot] != 0)
1792 dead_count ++;
1793 fprintf (stderr, " zombie objects: %u\n", dead_count);
1796 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1798 unsigned l;
1799 extern void * __mf_wrap_alloca_indirect (size_t c);
1801 /* Free up any remaining alloca()'d blocks. */
1802 __mf_wrap_alloca_indirect (0);
1803 __mf_describe_object (NULL); /* Reset description epoch. */
1804 l = __mf_report_leaks ();
1805 fprintf (stderr, "number of leaked objects: %u\n", l);
1809 /* __mf_backtrace */
1811 size_t
1812 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1814 void ** pc_array;
1815 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1816 unsigned remaining_size;
1817 unsigned omitted_size = 0;
1818 unsigned i;
1819 DECLARE (void, free, void *ptr);
1820 DECLARE (void *, calloc, size_t c, size_t n);
1821 DECLARE (void *, malloc, size_t n);
1823 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1824 #ifdef HAVE_BACKTRACE
1825 pc_array_size = backtrace (pc_array, pc_array_size);
1826 #else
1827 #define FETCH(n) do { if (pc_array_size >= n) { \
1828 pc_array[n] = __builtin_return_address(n); \
1829 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1831 /* Unroll some calls __builtin_return_address because this function
1832 only takes a literal integer parameter. */
1833 FETCH (0);
1834 #if 0
1835 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1836 rather than simply returning 0. :-( */
1837 FETCH (1);
1838 FETCH (2);
1839 FETCH (3);
1840 FETCH (4);
1841 FETCH (5);
1842 FETCH (6);
1843 FETCH (7);
1844 FETCH (8);
1845 if (pc_array_size > 8) pc_array_size = 9;
1846 #else
1847 if (pc_array_size > 0) pc_array_size = 1;
1848 #endif
1850 #undef FETCH
1851 #endif
1853 /* We want to trim the first few levels of the stack traceback,
1854 since they contain libmudflap wrappers and junk. If pc_array[]
1855 ends up containing a non-NULL guess_pc, then trim everything
1856 before that. Otherwise, omit the first guess_omit_levels
1857 entries. */
1859 if (guess_pc != NULL)
1860 for (i=0; i<pc_array_size; i++)
1861 if (pc_array [i] == guess_pc)
1862 omitted_size = i;
1864 if (omitted_size == 0) /* No match? */
1865 if (pc_array_size > guess_omit_levels)
1866 omitted_size = guess_omit_levels;
1868 remaining_size = pc_array_size - omitted_size;
1870 #ifdef HAVE_BACKTRACE_SYMBOLS
1871 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1872 #else
1874 /* Let's construct a buffer by hand. It will have <remaining_size>
1875 char*'s at the front, pointing at individual strings immediately
1876 afterwards. */
1877 void *buffer;
1878 char *chars;
1879 char **pointers;
1880 enum { perline = 30 };
1881 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1882 pointers = (char **) buffer;
1883 chars = (char *)buffer + (remaining_size * sizeof (char *));
1884 for (i = 0; i < remaining_size; i++)
1886 pointers[i] = chars;
1887 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1888 chars = chars + perline;
1890 *symbols = pointers;
1892 #endif
1893 CALL_REAL (free, pc_array);
1895 return remaining_size;
1898 /* ------------------------------------------------------------------------ */
1899 /* __mf_violation */
1901 void
1902 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1903 const char *location, int type)
1905 char buf [128];
1906 static unsigned violation_number;
1907 DECLARE(void, free, void *ptr);
1909 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1910 (void *) pc,
1911 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1913 if (__mf_opts.collect_stats)
1914 __mf_count_violation [(type < 0) ? 0 :
1915 (type > __MF_VIOL_WATCH) ? 0 :
1916 type] ++;
1918 /* Print out a basic warning message. */
1919 if (__mf_opts.verbose_violations)
1921 unsigned dead_p;
1922 unsigned num_helpful = 0;
1923 struct timeval now = { 0, 0 };
1924 #if HAVE_GETTIMEOFDAY
1925 gettimeofday (& now, NULL);
1926 #endif
1928 violation_number ++;
1929 fprintf (stderr,
1930 "*******\n"
1931 "mudflap violation %u (%s): time=%lu.%06lu "
1932 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1933 violation_number,
1934 ((type == __MF_VIOL_READ) ? "check/read" :
1935 (type == __MF_VIOL_WRITE) ? "check/write" :
1936 (type == __MF_VIOL_REGISTER) ? "register" :
1937 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1938 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1939 now.tv_sec, now.tv_usec,
1940 (void *) ptr, (unsigned long)sz, (void *) pc,
1941 (location != NULL ? " location=`" : ""),
1942 (location != NULL ? location : ""),
1943 (location != NULL ? "'" : ""));
1945 if (__mf_opts.backtrace > 0)
1947 char ** symbols;
1948 unsigned i, num;
1950 num = __mf_backtrace (& symbols, (void *) pc, 2);
1951 /* Note: backtrace_symbols calls malloc(). But since we're in
1952 __mf_violation and presumably __mf_check, it'll detect
1953 recursion, and not put the new string into the database. */
1955 for (i=0; i<num; i++)
1956 fprintf (stderr, " %s\n", symbols[i]);
1958 /* Calling free() here would trigger a violation. */
1959 CALL_REAL(free, symbols);
1963 /* Look for nearby objects. For this, we start with s_low/s_high
1964 pointing to the given area, looking for overlapping objects.
1965 If none show up, widen the search area and keep looking. */
1967 if (sz == 0) sz = 1;
1969 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1971 enum {max_objs = 3}; /* magic */
1972 __mf_object_t *objs[max_objs];
1973 unsigned num_objs = 0;
1974 uintptr_t s_low, s_high;
1975 unsigned tries = 0;
1976 unsigned i;
1978 s_low = (uintptr_t) ptr;
1979 s_high = CLAMPSZ (ptr, sz);
1981 while (tries < 16) /* magic */
1983 if (dead_p)
1984 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1985 else
1986 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1988 if (num_objs) /* good enough */
1989 break;
1991 tries ++;
1993 /* XXX: tune this search strategy. It's too dependent on
1994 sz, which can vary from 1 to very big (when array index
1995 checking) numbers. */
1996 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1997 s_high = CLAMPADD (s_high, (sz * tries * tries));
2000 for (i = 0; i < min (num_objs, max_objs); i++)
2002 __mf_object_t *obj = objs[i];
2003 uintptr_t low = (uintptr_t) ptr;
2004 uintptr_t high = CLAMPSZ (ptr, sz);
2005 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2006 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2007 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2008 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2009 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2010 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2012 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2013 num_helpful + i + 1,
2014 (before1 ? before1 : after1 ? after1 : into1),
2015 (before1 ? "before" : after1 ? "after" : "into"),
2016 (before2 ? before2 : after2 ? after2 : into2),
2017 (before2 ? "before" : after2 ? "after" : "into"));
2018 __mf_describe_object (obj);
2020 num_helpful += num_objs;
2023 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2026 /* How to finally handle this violation? */
2027 switch (__mf_opts.violation_mode)
2029 case viol_nop:
2030 break;
2031 case viol_segv:
2032 kill (getpid(), SIGSEGV);
2033 break;
2034 case viol_abort:
2035 abort ();
2036 break;
2037 case viol_gdb:
2039 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2040 system (buf);
2041 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2042 instead, and let the forked child execlp() gdb. That way, this
2043 subject process can be resumed under the supervision of gdb.
2044 This can't happen now, since system() only returns when gdb
2045 dies. In that case, we need to beware of starting a second
2046 concurrent gdb child upon the next violation. (But if the first
2047 gdb dies, then starting a new one is appropriate.) */
2048 break;
2052 /* ------------------------------------------------------------------------ */
2055 unsigned __mf_watch (void *ptr, size_t sz)
2057 unsigned rc;
2058 LOCKTH ();
2059 BEGIN_RECURSION_PROTECT ();
2060 rc = __mf_watch_or_not (ptr, sz, 1);
2061 END_RECURSION_PROTECT ();
2062 UNLOCKTH ();
2063 return rc;
2066 unsigned __mf_unwatch (void *ptr, size_t sz)
2068 unsigned rc;
2069 LOCKTH ();
2070 rc = __mf_watch_or_not (ptr, sz, 0);
2071 UNLOCKTH ();
2072 return rc;
2076 static unsigned
2077 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2079 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2080 uintptr_t ptr_low = (uintptr_t) ptr;
2081 unsigned count = 0;
2083 TRACE ("%s ptr=%p size=%lu\n",
2084 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2086 switch (__mf_opts.mudflap_mode)
2088 case mode_nop:
2089 case mode_populate:
2090 case mode_violate:
2091 count = 0;
2092 break;
2094 case mode_check:
2096 __mf_object_t **all_ovr_objs;
2097 unsigned obj_count;
2098 unsigned n;
2099 DECLARE (void *, malloc, size_t c);
2100 DECLARE (void, free, void *p);
2102 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2103 VERBOSE_TRACE (" %u:", obj_count);
2105 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2106 if (all_ovr_objs == NULL) abort ();
2107 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2108 assert (n == obj_count);
2110 for (n = 0; n < obj_count; n ++)
2112 __mf_object_t *obj = all_ovr_objs[n];
2114 VERBOSE_TRACE (" [%p]", (void *) obj);
2115 if (obj->watching_p != flag)
2117 obj->watching_p = flag;
2118 count ++;
2120 /* Remove object from cache, to ensure next access
2121 goes through __mf_check(). */
2122 if (flag)
2123 __mf_uncache_object (obj);
2126 CALL_REAL (free, all_ovr_objs);
2128 break;
2131 return count;
2135 void
2136 __mf_sigusr1_handler (int num)
2138 __mf_sigusr1_received ++;
2141 /* Install or remove SIGUSR1 handler as necessary.
2142 Also, respond to a received pending SIGUSR1. */
2143 void
2144 __mf_sigusr1_respond ()
2146 static int handler_installed;
2148 #ifdef SIGUSR1
2149 /* Manage handler */
2150 if (__mf_opts.sigusr1_report && ! handler_installed)
2152 signal (SIGUSR1, __mf_sigusr1_handler);
2153 handler_installed = 1;
2155 else if(! __mf_opts.sigusr1_report && handler_installed)
2157 signal (SIGUSR1, SIG_DFL);
2158 handler_installed = 0;
2160 #endif
2162 /* Manage enqueued signals */
2163 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2165 __mf_sigusr1_handled ++;
2166 assert (__mf_state == reentrant);
2167 __mfu_report ();
2168 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2173 /* XXX: provide an alternative __assert_fail function that cannot
2174 fail due to libmudflap infinite recursion. */
2175 #ifndef NDEBUG
2177 static void
2178 write_itoa (int fd, unsigned n)
2180 enum x { bufsize = sizeof(n)*4 };
2181 char buf [bufsize];
2182 unsigned i;
2184 for (i=0; i<bufsize-1; i++)
2186 unsigned digit = n % 10;
2187 buf[bufsize-2-i] = digit + '0';
2188 n /= 10;
2189 if (n == 0)
2191 char *m = & buf [bufsize-2-i];
2192 buf[bufsize-1] = '\0';
2193 write (fd, m, strlen(m));
2194 break;
2200 void
2201 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2203 #define write2(string) write (2, (string), strlen ((string)));
2204 write2("mf");
2205 #ifdef LIBMUDFLAPTH
2206 write2("(");
2207 write_itoa (2, (unsigned) pthread_self ());
2208 write2(")");
2209 #endif
2210 write2(": assertion failure: `");
2211 write (2, msg, strlen (msg));
2212 write2("' in ");
2213 write (2, func, strlen (func));
2214 write2(" at ");
2215 write (2, file, strlen (file));
2216 write2(":");
2217 write_itoa (2, line);
2218 write2("\n");
2219 #undef write2
2220 abort ();
2224 #endif
2228 /* Adapted splay tree code, originally from libiberty. It has been
2229 specialized for libmudflap as requested by RMS. */
2231 static void
2232 mfsplay_tree_free (void *p)
2234 DECLARE (void, free, void *p);
2235 CALL_REAL (free, p);
2238 static void *
2239 mfsplay_tree_xmalloc (size_t s)
2241 DECLARE (void *, malloc, size_t s);
2242 return CALL_REAL (malloc, s);
2246 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2247 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2248 mfsplay_tree_key,
2249 mfsplay_tree_node *,
2250 mfsplay_tree_node *,
2251 mfsplay_tree_node *);
2254 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2255 and grandparent, respectively, of NODE. */
2257 static mfsplay_tree_node
2258 mfsplay_tree_splay_helper (mfsplay_tree sp,
2259 mfsplay_tree_key key,
2260 mfsplay_tree_node * node,
2261 mfsplay_tree_node * parent,
2262 mfsplay_tree_node * grandparent)
2264 mfsplay_tree_node *next;
2265 mfsplay_tree_node n;
2266 int comparison;
2268 n = *node;
2270 if (!n)
2271 return *parent;
2273 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2275 if (comparison == 0)
2276 /* We've found the target. */
2277 next = 0;
2278 else if (comparison < 0)
2279 /* The target is to the left. */
2280 next = &n->left;
2281 else
2282 /* The target is to the right. */
2283 next = &n->right;
2285 if (next)
2287 /* Check whether our recursion depth is too high. Abort this search,
2288 and signal that a rebalance is required to continue. */
2289 if (sp->depth > sp->max_depth)
2291 sp->rebalance_p = 1;
2292 return n;
2295 /* Continue down the tree. */
2296 sp->depth ++;
2297 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2298 sp->depth --;
2300 /* The recursive call will change the place to which NODE
2301 points. */
2302 if (*node != n || sp->rebalance_p)
2303 return n;
2306 if (!parent)
2307 /* NODE is the root. We are done. */
2308 return n;
2310 /* First, handle the case where there is no grandparent (i.e.,
2311 *PARENT is the root of the tree.) */
2312 if (!grandparent)
2314 if (n == (*parent)->left)
2316 *node = n->right;
2317 n->right = *parent;
2319 else
2321 *node = n->left;
2322 n->left = *parent;
2324 *parent = n;
2325 return n;
2328 /* Next handle the cases where both N and *PARENT are left children,
2329 or where both are right children. */
2330 if (n == (*parent)->left && *parent == (*grandparent)->left)
2332 mfsplay_tree_node p = *parent;
2334 (*grandparent)->left = p->right;
2335 p->right = *grandparent;
2336 p->left = n->right;
2337 n->right = p;
2338 *grandparent = n;
2339 return n;
2341 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2343 mfsplay_tree_node p = *parent;
2345 (*grandparent)->right = p->left;
2346 p->left = *grandparent;
2347 p->right = n->left;
2348 n->left = p;
2349 *grandparent = n;
2350 return n;
2353 /* Finally, deal with the case where N is a left child, but *PARENT
2354 is a right child, or vice versa. */
2355 if (n == (*parent)->left)
2357 (*parent)->left = n->right;
2358 n->right = *parent;
2359 (*grandparent)->right = n->left;
2360 n->left = *grandparent;
2361 *grandparent = n;
2362 return n;
2364 else
2366 (*parent)->right = n->left;
2367 n->left = *parent;
2368 (*grandparent)->left = n->right;
2369 n->right = *grandparent;
2370 *grandparent = n;
2371 return n;
2377 static int
2378 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2380 mfsplay_tree_node **p = array_ptr;
2381 *(*p) = n;
2382 (*p)++;
2383 return 0;
2387 static mfsplay_tree_node
2388 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2389 unsigned high)
2391 unsigned middle = low + (high - low) / 2;
2392 mfsplay_tree_node n = array[middle];
2394 /* Note that since we're producing a balanced binary tree, it is not a problem
2395 that this function is recursive. */
2396 if (low + 1 <= middle)
2397 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2398 else
2399 n->left = NULL;
2401 if (middle + 1 <= high)
2402 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2403 else
2404 n->right = NULL;
2406 return n;
2410 /* Rebalance the entire tree. Do this by copying all the node
2411 pointers into an array, then cleverly re-linking them. */
2412 static void
2413 mfsplay_tree_rebalance (mfsplay_tree sp)
2415 mfsplay_tree_node *all_nodes, *all_nodes_1;
2417 if (sp->num_keys <= 2)
2418 return;
2420 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2422 /* Traverse all nodes to copy their addresses into this array. */
2423 all_nodes_1 = all_nodes;
2424 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2425 (void *) &all_nodes_1);
2427 /* Relink all the nodes. */
2428 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2430 mfsplay_tree_free (all_nodes);
2434 /* Splay SP around KEY. */
2435 static void
2436 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2438 if (sp->root == 0)
2439 return;
2441 /* If we just splayed the tree with the same key, do nothing. */
2442 if (sp->last_splayed_key_p &&
2443 (sp->last_splayed_key == key))
2444 return;
2446 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2447 The idea is to limit excessive stack usage if we're facing
2448 degenerate access patterns. Unfortunately such patterns can occur
2449 e.g. during static initialization, where many static objects might
2450 be registered in increasing address sequence, or during a case where
2451 large tree-like heap data structures are allocated quickly.
2453 On x86, this corresponds to roughly 200K of stack usage.
2454 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2455 sp->max_depth = 2500;
2456 sp->rebalance_p = sp->depth = 0;
2458 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2459 if (sp->rebalance_p)
2461 mfsplay_tree_rebalance (sp);
2463 sp->rebalance_p = sp->depth = 0;
2464 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2466 if (sp->rebalance_p)
2467 abort ();
2471 /* Cache this splay key. */
2472 sp->last_splayed_key = key;
2473 sp->last_splayed_key_p = 1;
2478 /* Allocate a new splay tree. */
2479 static mfsplay_tree
2480 mfsplay_tree_new ()
2482 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2483 sp->root = NULL;
2484 sp->last_splayed_key_p = 0;
2485 sp->num_keys = 0;
2487 return sp;
2492 /* Insert a new node (associating KEY with DATA) into SP. If a
2493 previous node with the indicated KEY exists, its data is replaced
2494 with the new value. Returns the new node. */
2495 static mfsplay_tree_node
2496 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2498 int comparison = 0;
2500 mfsplay_tree_splay (sp, key);
2502 if (sp->root)
2503 comparison = ((sp->root->key > key) ? 1 :
2504 ((sp->root->key < key) ? -1 : 0));
2506 if (sp->root && comparison == 0)
2508 /* If the root of the tree already has the indicated KEY, just
2509 replace the value with VALUE. */
2510 sp->root->value = value;
2512 else
2514 /* Create a new node, and insert it at the root. */
2515 mfsplay_tree_node node;
2517 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2518 node->key = key;
2519 node->value = value;
2520 sp->num_keys++;
2521 if (!sp->root)
2522 node->left = node->right = 0;
2523 else if (comparison < 0)
2525 node->left = sp->root;
2526 node->right = node->left->right;
2527 node->left->right = 0;
2529 else
2531 node->right = sp->root;
2532 node->left = node->right->left;
2533 node->right->left = 0;
2536 sp->root = node;
2537 sp->last_splayed_key_p = 0;
2540 return sp->root;
2543 /* Remove KEY from SP. It is not an error if it did not exist. */
2545 static void
2546 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2548 mfsplay_tree_splay (sp, key);
2549 sp->last_splayed_key_p = 0;
2550 if (sp->root && (sp->root->key == key))
2552 mfsplay_tree_node left, right;
2553 left = sp->root->left;
2554 right = sp->root->right;
2555 /* Delete the root node itself. */
2556 mfsplay_tree_free (sp->root);
2557 sp->num_keys--;
2558 /* One of the children is now the root. Doesn't matter much
2559 which, so long as we preserve the properties of the tree. */
2560 if (left)
2562 sp->root = left;
2563 /* If there was a right child as well, hang it off the
2564 right-most leaf of the left child. */
2565 if (right)
2567 while (left->right)
2568 left = left->right;
2569 left->right = right;
2572 else
2573 sp->root = right;
2577 /* Lookup KEY in SP, returning VALUE if present, and NULL
2578 otherwise. */
2580 static mfsplay_tree_node
2581 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2583 mfsplay_tree_splay (sp, key);
2584 if (sp->root && (sp->root->key == key))
2585 return sp->root;
2586 else
2587 return 0;
2591 /* Return the immediate predecessor KEY, or NULL if there is no
2592 predecessor. KEY need not be present in the tree. */
2594 static mfsplay_tree_node
2595 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2597 int comparison;
2598 mfsplay_tree_node node;
2599 /* If the tree is empty, there is certainly no predecessor. */
2600 if (!sp->root)
2601 return NULL;
2602 /* Splay the tree around KEY. That will leave either the KEY
2603 itself, its predecessor, or its successor at the root. */
2604 mfsplay_tree_splay (sp, key);
2605 comparison = ((sp->root->key > key) ? 1 :
2606 ((sp->root->key < key) ? -1 : 0));
2608 /* If the predecessor is at the root, just return it. */
2609 if (comparison < 0)
2610 return sp->root;
2611 /* Otherwise, find the rightmost element of the left subtree. */
2612 node = sp->root->left;
2613 if (node)
2614 while (node->right)
2615 node = node->right;
2616 return node;
2619 /* Return the immediate successor KEY, or NULL if there is no
2620 successor. KEY need not be present in the tree. */
2622 static mfsplay_tree_node
2623 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2625 int comparison;
2626 mfsplay_tree_node node;
2627 /* If the tree is empty, there is certainly no successor. */
2628 if (!sp->root)
2629 return NULL;
2630 /* Splay the tree around KEY. That will leave either the KEY
2631 itself, its predecessor, or its successor at the root. */
2632 mfsplay_tree_splay (sp, key);
2633 comparison = ((sp->root->key > key) ? 1 :
2634 ((sp->root->key < key) ? -1 : 0));
2635 /* If the successor is at the root, just return it. */
2636 if (comparison > 0)
2637 return sp->root;
2638 /* Otherwise, find the leftmost element of the right subtree. */
2639 node = sp->root->right;
2640 if (node)
2641 while (node->left)
2642 node = node->left;
2643 return node;
2646 /* Call FN, passing it the DATA, for every node in SP, following an
2647 in-order traversal. If FN every returns a non-zero value, the
2648 iteration ceases immediately, and the value is returned.
2649 Otherwise, this function returns 0.
2651 This function simulates recursion using dynamically allocated
2652 arrays, since it may be called from mfsplay_tree_rebalance(), which
2653 in turn means that the tree is already uncomfortably deep for stack
2654 space limits. */
2655 static int
2656 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2658 mfsplay_tree_node *stack1;
2659 char *stack2;
2660 unsigned sp;
2661 int val = 0;
2662 enum s { s_left, s_here, s_right, s_up };
2664 if (st->root == NULL) /* => num_keys == 0 */
2665 return 0;
2667 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2668 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2670 sp = 0;
2671 stack1 [sp] = st->root;
2672 stack2 [sp] = s_left;
2674 while (1)
2676 mfsplay_tree_node n;
2677 enum s s;
2679 n = stack1 [sp];
2680 s = stack2 [sp];
2682 /* Handle each of the four possible states separately. */
2684 /* 1: We're here to traverse the left subtree (if any). */
2685 if (s == s_left)
2687 stack2 [sp] = s_here;
2688 if (n->left != NULL)
2690 sp ++;
2691 stack1 [sp] = n->left;
2692 stack2 [sp] = s_left;
2696 /* 2: We're here to traverse this node. */
2697 else if (s == s_here)
2699 stack2 [sp] = s_right;
2700 val = (*fn) (n, data);
2701 if (val) break;
2704 /* 3: We're here to traverse the right subtree (if any). */
2705 else if (s == s_right)
2707 stack2 [sp] = s_up;
2708 if (n->right != NULL)
2710 sp ++;
2711 stack1 [sp] = n->right;
2712 stack2 [sp] = s_left;
2716 /* 4: We're here after both subtrees (if any) have been traversed. */
2717 else if (s == s_up)
2719 /* Pop the stack. */
2720 if (sp == 0) break; /* Popping off the root note: we're finished! */
2721 sp --;
2724 else
2725 abort ();
2728 mfsplay_tree_free (stack1);
2729 mfsplay_tree_free (stack2);
2730 return val;