* config/frv/frv.md (*adddi3_internal): Change name to...
[official-gcc.git] / libmudflap / mf-runtime.c
bloba0f9f739f531fa32d4bcd06806796ad4f947a2ac
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 02111-1307, USA. */
34 #include "config.h"
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
109 /* Statistics. */
110 unsigned num_keys;
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
149 abort (); } \
150 __mf_state = reentrant; \
151 } while (0)
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
155 } while (0)
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
166 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
167 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
168 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
174 #ifndef LIBMUDFLAPTH
175 enum __mf_state_enum __mf_state = active;
176 #else
177 /* See __mf_state_perthread() in mf-hooks.c. */
178 #endif
181 #ifdef LIBMUDFLAPTH
182 pthread_mutex_t __mf_biglock =
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
185 #else
186 PTHREAD_MUTEX_INITIALIZER;
187 #endif
188 #endif
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
193 #if HAVE_PTHREAD_H
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
196 #else
197 #define pthread_join NULL
198 #endif
199 const void *threads_active_p = (void *) pthread_join;
200 #endif
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
206 static unsigned long __mf_count_check;
207 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
208 static unsigned long __mf_count_register;
209 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
210 static unsigned long __mf_count_unregister;
211 static unsigned long __mf_total_unregister_size;
212 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
213 static unsigned long __mf_sigusr1_received;
214 static unsigned long __mf_sigusr1_handled;
215 /* not static */ unsigned long __mf_reentrancy;
216 #ifdef LIBMUDFLAPTH
217 /* not static */ unsigned long __mf_lock_contention;
218 #endif
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
224 typedef struct __mf_object
226 uintptr_t low, high; /* __mf_register parameters */
227 const char *name;
228 char type; /* __MF_TYPE_something */
229 char watching_p; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count; /* Likewise for __mf_check/write. */
232 unsigned liveness; /* A measure of recent checking activity. */
233 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
235 uintptr_t alloc_pc;
236 struct timeval alloc_time;
237 char **alloc_backtrace;
238 size_t alloc_backtrace_size;
239 #ifdef LIBMUDFLAPTH
240 pthread_t alloc_thread;
241 #endif
243 int deallocated_p;
244 uintptr_t dealloc_pc;
245 struct timeval dealloc_time;
246 char **dealloc_backtrace;
247 size_t dealloc_backtrace_size;
248 #ifdef LIBMUDFLAPTH
249 pthread_t dealloc_thread;
250 #endif
251 } __mf_object_t;
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
258 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
264 void __mf_init () CTOR;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
267 __mf_object_t **objs, unsigned max_objs);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
269 __mf_object_t **objs, unsigned max_objs, int type);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
271 __mf_object_t **objs, unsigned max_objs);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t *obj);
274 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
275 static mfsplay_tree __mf_object_tree (int type);
276 static void __mf_link_object (__mf_object_t *node);
277 static void __mf_unlink_object (__mf_object_t *node);
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
283 static void
284 __mf_set_default_options ()
286 memset (& __mf_opts, 0, sizeof (__mf_opts));
288 __mf_opts.adapt_cache = 1000003;
289 __mf_opts.abbreviate = 1;
290 __mf_opts.verbose_violations = 1;
291 __mf_opts.free_queue_length = 4;
292 __mf_opts.persistent_count = 100;
293 __mf_opts.crumple_zone = 32;
294 __mf_opts.backtrace = 4;
295 __mf_opts.timestamps = 1;
296 __mf_opts.mudflap_mode = mode_check;
297 __mf_opts.violation_mode = viol_nop;
298 __mf_opts.heur_std_data = 1;
299 #ifdef LIBMUDFLAPTH
300 __mf_opts.thread_stack = 0;
301 #endif
304 static struct option
306 char *name;
307 char *description;
308 enum
310 set_option,
311 read_integer_option,
312 } type;
313 unsigned value;
314 unsigned *target;
316 options [] =
318 {"mode-nop",
319 "mudflaps do nothing",
320 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
321 {"mode-populate",
322 "mudflaps populate object tree",
323 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
324 {"mode-check",
325 "mudflaps check for memory violations",
326 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
327 {"mode-violate",
328 "mudflaps always cause violations (diagnostic)",
329 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
331 {"viol-nop",
332 "violations do not change program execution",
333 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
334 {"viol-abort",
335 "violations cause a call to abort()",
336 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
337 {"viol-segv",
338 "violations are promoted to SIGSEGV signals",
339 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
340 {"viol-gdb",
341 "violations fork a gdb process attached to current program",
342 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
343 {"trace-calls",
344 "trace calls to mudflap runtime library",
345 set_option, 1, &__mf_opts.trace_mf_calls},
346 {"verbose-trace",
347 "trace internal events within mudflap runtime library",
348 set_option, 1, &__mf_opts.verbose_trace},
349 {"collect-stats",
350 "collect statistics on mudflap's operation",
351 set_option, 1, &__mf_opts.collect_stats},
352 #ifdef SIGUSR1
353 {"sigusr1-report",
354 "print report upon SIGUSR1",
355 set_option, 1, &__mf_opts.sigusr1_report},
356 #endif
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option, 1, &__mf_opts.internal_checking},
360 {"print-leaks",
361 "print any memory leaks at program shutdown",
362 set_option, 1, &__mf_opts.print_leaks},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option, 1, &__mf_opts.check_initialization},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option, 1, &__mf_opts.verbose_violations},
369 {"abbreviate",
370 "abbreviate repetitive listings",
371 set_option, 1, &__mf_opts.abbreviate},
372 {"timestamps",
373 "track object lifetime timestamps",
374 set_option, 1, &__mf_opts.timestamps},
375 {"ignore-reads",
376 "ignore read accesses - assume okay",
377 set_option, 1, &__mf_opts.ignore_reads},
378 {"wipe-stack",
379 "wipe stack objects at unwind",
380 set_option, 1, &__mf_opts.wipe_stack},
381 {"wipe-heap",
382 "wipe heap objects at free",
383 set_option, 1, &__mf_opts.wipe_heap},
384 {"heur-proc-map",
385 "support /proc/self/map heuristics",
386 set_option, 1, &__mf_opts.heur_proc_map},
387 {"heur-stack-bound",
388 "enable a simple upper stack bound heuristic",
389 set_option, 1, &__mf_opts.heur_stack_bound},
390 {"heur-start-end",
391 "support _start.._end heuristics",
392 set_option, 1, &__mf_opts.heur_start_end},
393 {"heur-stdlib",
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option, 1, &__mf_opts.heur_std_data},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option, 0, &__mf_opts.free_queue_length},
399 {"persistent-count",
400 "keep a history of N unregistered regions",
401 read_integer_option, 0, &__mf_opts.persistent_count},
402 {"crumple-zone",
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option, 0, &__mf_opts.crumple_zone},
405 /* XXX: not type-safe.
406 {"lc-mask",
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
409 {"lc-shift",
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
413 {"lc-adapt",
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option, 1, &__mf_opts.adapt_cache},
416 {"backtrace",
417 "keep an N-level stack trace of each call context",
418 read_integer_option, 0, &__mf_opts.backtrace},
419 #ifdef LIBMUDFLAPTH
420 {"thread-stack",
421 "override thread stacks allocation: N kB",
422 read_integer_option, 0, &__mf_opts.thread_stack},
423 #endif
424 {0, 0, set_option, 0, NULL}
427 static void
428 __mf_usage ()
430 struct option *opt;
432 fprintf (stderr,
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
435 "\n"
436 "The mudflap code can be controlled by an environment variable:\n"
437 "\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
440 "\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
443 "\n",
444 #if HAVE_PTHREAD_H
445 (threads_active_p ? "multi-threaded " : "single-threaded "),
446 #else
448 #endif
449 #if LIBMUDFLAPTH
450 "thread-aware "
451 #else
452 "thread-unaware "
453 #endif
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
457 for (opt = options; opt->name; opt++)
459 int default_p = (opt->value == * opt->target);
461 switch (opt->type)
463 char buf[128];
464 case set_option:
465 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
466 if (default_p)
467 fprintf (stderr, " [active]\n");
468 else
469 fprintf (stderr, "\n");
470 break;
471 case read_integer_option:
472 strncpy (buf, opt->name, 128);
473 strncpy (buf + strlen (opt->name), "=N", 2);
474 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
475 fprintf (stderr, " [%d]\n", * opt->target);
476 break;
477 default: abort();
481 fprintf (stderr, "\n");
485 int
486 __mf_set_options (const char *optstr)
488 int rc;
489 LOCKTH ();
490 BEGIN_RECURSION_PROTECT ();
491 rc = __mfu_set_options (optstr);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
496 UNLOCKTH ();
497 return rc;
501 int
502 __mfu_set_options (const char *optstr)
504 struct option *opts = 0;
505 char *nxt = 0;
506 long tmp = 0;
507 int rc = 0;
508 const char *saved_optstr = optstr;
510 /* XXX: bounds-check for optstr! */
512 while (*optstr)
514 switch (*optstr) {
515 case ' ':
516 case '\t':
517 case '\n':
518 optstr++;
519 break;
521 case '-':
522 if (*optstr+1)
524 int negate = 0;
525 optstr++;
527 if (*optstr == '?' ||
528 strncmp (optstr, "help", 4) == 0)
530 /* Caller will print help and exit. */
531 return -1;
534 if (strncmp (optstr, "no-", 3) == 0)
536 negate = 1;
537 optstr = & optstr[3];
540 for (opts = options; opts->name; opts++)
542 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
544 optstr += strlen (opts->name);
545 assert (opts->target);
546 switch (opts->type)
548 case set_option:
549 if (negate)
550 *(opts->target) = 0;
551 else
552 *(opts->target) = opts->value;
553 break;
554 case read_integer_option:
555 if (! negate && (*optstr == '=' && *(optstr+1)))
557 optstr++;
558 tmp = strtol (optstr, &nxt, 10);
559 if ((optstr != nxt) && (tmp != LONG_MAX))
561 optstr = nxt;
562 *(opts->target) = (int)tmp;
565 else if (negate)
566 * opts->target = 0;
567 break;
572 break;
574 default:
575 fprintf (stderr,
576 "warning: unrecognized string '%s' in mudflap options\n",
577 optstr);
578 optstr += strlen (optstr);
579 rc = -1;
580 break;
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
586 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
588 /* Clear the lookup cache, in case the parameters got changed. */
589 /* XXX: race */
590 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
591 /* void slot 0 */
592 __mf_lookup_cache[0].low = MAXPTR;
594 TRACE ("set options from `%s'\n", saved_optstr);
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
599 return rc;
603 #ifdef PIC
605 void
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
608 char *err;
610 assert (e);
611 if (e->pointer) return;
613 #if HAVE_DLVSYM
614 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
615 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
616 else
617 #endif
618 e->pointer = dlsym (RTLD_NEXT, e->name);
620 err = dlerror ();
622 if (err)
624 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
625 e->name, err);
626 abort ();
628 if (! e->pointer)
630 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
631 abort ();
636 static void
637 __mf_resolve_dynamics ()
639 int i;
640 for (i = 0; i < dyn_INITRESOLVE; i++)
641 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic [] =
648 {NULL, "calloc", NULL},
649 {NULL, "free", NULL},
650 {NULL, "malloc", NULL},
651 {NULL, "mmap", NULL},
652 {NULL, "munmap", NULL},
653 {NULL, "realloc", NULL},
654 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
655 #ifdef LIBMUDFLAPTH
656 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
657 {NULL, "pthread_join", NULL},
658 {NULL, "pthread_exit", NULL}
659 #endif
662 #endif /* PIC */
666 /* ------------------------------------------------------------------------ */
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
669 static mfsplay_tree
670 __mf_object_tree (int type)
672 static mfsplay_tree trees [__MF_TYPE_MAX+1];
673 assert (type >= 0 && type <= __MF_TYPE_MAX);
674 if (UNLIKELY (trees[type] == NULL))
675 trees[type] = mfsplay_tree_new ();
676 return trees[type];
680 /* not static */void
681 __mf_init ()
683 char *ov = 0;
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p == 0))
687 return;
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
690 #ifdef PIC
691 __mf_resolve_dynamics ();
692 #endif
693 __mf_starting_p = 0;
695 __mf_set_default_options ();
697 ov = getenv ("MUDFLAP_OPTIONS");
698 if (ov)
700 int rc = __mfu_set_options (ov);
701 if (rc < 0)
703 __mf_usage ();
704 exit (1);
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL);
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
714 REG_RESERVED (__mf_lookup_cache);
715 REG_RESERVED (__mf_lc_mask);
716 REG_RESERVED (__mf_lc_shift);
717 /* XXX: others of our statics? */
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
721 __mf_lookup_cache[0].low = (uintptr_t) -1;
727 __wrap_main (int argc, char* argv[])
729 extern char **environ;
730 extern int main ();
731 extern int __real_main ();
732 static int been_here = 0;
734 if (__mf_opts.heur_std_data && ! been_here)
736 unsigned i;
738 been_here = 1;
739 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
740 for (i=0; i<argc; i++)
742 unsigned j = strlen (argv[i]);
743 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
746 for (i=0; ; i++)
748 char *e = environ[i];
749 unsigned j;
750 if (e == NULL) break;
751 j = strlen (environ[i]);
752 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
754 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
756 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
758 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
759 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
760 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
762 /* Make some effort to register ctype.h static arrays. */
763 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
764 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
765 with in mf-hooks2.c. */
768 #ifdef PIC
769 return main (argc, argv, environ);
770 #else
771 return __real_main (argc, argv, environ);
772 #endif
777 extern void __mf_fini () DTOR;
778 void __mf_fini ()
780 TRACE ("__mf_fini\n");
781 __mfu_report ();
783 #ifndef PIC
784 /* Since we didn't populate the tree for allocations in constructors
785 before __mf_init, we cannot check destructors after __mf_fini. */
786 __mf_opts.mudflap_mode = mode_nop;
787 #endif
792 /* ------------------------------------------------------------------------ */
793 /* __mf_check */
795 void __mf_check (void *ptr, size_t sz, int type, const char *location)
797 LOCKTH ();
798 BEGIN_RECURSION_PROTECT ();
799 __mfu_check (ptr, sz, type, location);
800 END_RECURSION_PROTECT ();
801 UNLOCKTH ();
805 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
807 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
808 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
809 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
810 uintptr_t ptr_low = (uintptr_t) ptr;
811 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
812 struct __mf_cache old_entry = *entry;
814 if (UNLIKELY (__mf_opts.sigusr1_report))
815 __mf_sigusr1_respond ();
817 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
818 ptr, entry_idx, (unsigned long)sz,
819 (type == 0 ? "read" : "write"), location);
821 switch (__mf_opts.mudflap_mode)
823 case mode_nop:
824 /* It is tempting to poison the cache here similarly to
825 mode_populate. However that eliminates a valuable
826 distinction between these two modes. mode_nop is useful to
827 let a user count & trace every single check / registration
828 call. mode_populate is useful to let a program run fast
829 while unchecked.
831 judgement = 1;
832 break;
834 case mode_populate:
835 entry->low = ptr_low;
836 entry->high = ptr_high;
837 judgement = 1;
838 break;
840 case mode_check:
842 unsigned heuristics = 0;
844 /* Advance aging/adaptation counters. */
845 static unsigned adapt_count;
846 adapt_count ++;
847 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
848 adapt_count > __mf_opts.adapt_cache))
850 adapt_count = 0;
851 __mf_adapt_cache ();
854 /* Looping only occurs if heuristics were triggered. */
855 while (judgement == 0)
857 DECLARE (void, free, void *p);
858 __mf_object_t* ovr_obj[1];
859 unsigned obj_count;
860 __mf_object_t** all_ovr_obj = NULL;
861 __mf_object_t** dealloc_me = NULL;
862 unsigned i;
864 /* Find all overlapping objects. Be optimistic that there is just one. */
865 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
866 if (UNLIKELY (obj_count > 1))
868 /* Allocate a real buffer and do the search again. */
869 DECLARE (void *, malloc, size_t c);
870 unsigned n;
871 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
872 obj_count));
873 if (all_ovr_obj == NULL) abort ();
874 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
875 assert (n == obj_count);
876 dealloc_me = all_ovr_obj;
878 else
880 all_ovr_obj = ovr_obj;
881 dealloc_me = NULL;
884 /* Update object statistics. */
885 for (i = 0; i < obj_count; i++)
887 __mf_object_t *obj = all_ovr_obj[i];
888 assert (obj != NULL);
889 if (type == __MF_CHECK_READ)
890 obj->read_count ++;
891 else
892 obj->write_count ++;
893 obj->liveness ++;
896 /* Iterate over the various objects. There are a number of special cases. */
897 for (i = 0; i < obj_count; i++)
899 __mf_object_t *obj = all_ovr_obj[i];
901 /* Any __MF_TYPE_NOACCESS hit is bad. */
902 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
903 judgement = -1;
905 /* Any object with a watch flag is bad. */
906 if (UNLIKELY (obj->watching_p))
907 judgement = -2; /* trigger VIOL_WATCH */
909 /* A read from an uninitialized object is bad. */
910 if (UNLIKELY (__mf_opts.check_initialization
911 /* reading */
912 && type == __MF_CHECK_READ
913 /* not written */
914 && obj->write_count == 0
915 /* uninitialized (heap) */
916 && obj->type == __MF_TYPE_HEAP))
917 judgement = -1;
920 /* We now know that the access spans one or more valid objects. */
921 if (LIKELY (judgement >= 0))
922 for (i = 0; i < obj_count; i++)
924 __mf_object_t *obj = all_ovr_obj[i];
926 /* Is this access entirely contained within this object? */
927 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
929 /* Valid access. */
930 entry->low = obj->low;
931 entry->high = obj->high;
932 judgement = 1;
935 /* XXX: Access runs off left or right side of this
936 object. That could be okay, if there are
937 other objects that fill in all the holes. */
940 if (dealloc_me != NULL)
941 CALL_REAL (free, dealloc_me);
943 /* If the judgment is still unknown at this stage, loop
944 around at most one more time. */
945 if (judgement == 0)
947 if (heuristics++ < 2) /* XXX parametrize this number? */
948 judgement = __mf_heuristic_check (ptr_low, ptr_high);
949 else
950 judgement = -1;
955 break;
957 case mode_violate:
958 judgement = -1;
959 break;
962 if (__mf_opts.collect_stats)
964 __mf_count_check ++;
966 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
967 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
968 __mf_lookup_cache_reusecount [entry_idx] ++;
971 if (UNLIKELY (judgement < 0))
972 __mf_violation (ptr, sz,
973 (uintptr_t) __builtin_return_address (0), location,
974 ((judgement == -1) ?
975 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
976 __MF_VIOL_WATCH));
980 static __mf_object_t *
981 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
982 const char *name, uintptr_t pc)
984 DECLARE (void *, calloc, size_t c, size_t n);
986 __mf_object_t *new_obj;
987 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
988 new_obj->low = low;
989 new_obj->high = high;
990 new_obj->type = type;
991 new_obj->name = name;
992 new_obj->alloc_pc = pc;
993 #if HAVE_GETTIMEOFDAY
994 if (__mf_opts.timestamps)
995 gettimeofday (& new_obj->alloc_time, NULL);
996 #endif
997 #if LIBMUDFLAPTH
998 new_obj->alloc_thread = pthread_self ();
999 #endif
1001 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1002 new_obj->alloc_backtrace_size =
1003 __mf_backtrace (& new_obj->alloc_backtrace,
1004 (void *) pc, 2);
1006 __mf_link_object (new_obj);
1007 return new_obj;
1011 static void
1012 __mf_uncache_object (__mf_object_t *old_obj)
1014 /* Remove any low/high pointers for this object from the lookup cache. */
1016 /* Can it possibly exist in the cache? */
1017 if (LIKELY (old_obj->read_count + old_obj->write_count))
1019 uintptr_t low = old_obj->low;
1020 uintptr_t high = old_obj->high;
1021 unsigned idx_low = __MF_CACHE_INDEX (low);
1022 unsigned idx_high = __MF_CACHE_INDEX (high);
1023 unsigned i;
1024 for (i = idx_low; i <= idx_high; i++)
1026 struct __mf_cache *entry = & __mf_lookup_cache [i];
1027 /* NB: the "||" in the following test permits this code to
1028 tolerate the situation introduced by __mf_check over
1029 contiguous objects, where a cache entry spans several
1030 objects. */
1031 if (entry->low == low || entry->high == high)
1033 entry->low = MAXPTR;
1034 entry->high = MINPTR;
1041 void
1042 __mf_register (void *ptr, size_t sz, int type, const char *name)
1044 LOCKTH ();
1045 BEGIN_RECURSION_PROTECT ();
1046 __mfu_register (ptr, sz, type, name);
1047 END_RECURSION_PROTECT ();
1048 UNLOCKTH ();
1052 void
1053 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1055 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1056 ptr, (unsigned long) sz, type, name ? name : "");
1058 if (__mf_opts.collect_stats)
1060 __mf_count_register ++;
1061 __mf_total_register_size [(type < 0) ? 0 :
1062 (type > __MF_TYPE_MAX) ? 0 :
1063 type] += sz;
1066 if (UNLIKELY (__mf_opts.sigusr1_report))
1067 __mf_sigusr1_respond ();
1069 switch (__mf_opts.mudflap_mode)
1071 case mode_nop:
1072 break;
1074 case mode_violate:
1075 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1076 __MF_VIOL_REGISTER);
1077 break;
1079 case mode_populate:
1080 /* Clear the cache. */
1081 /* XXX: why the entire cache? */
1082 /* XXX: race */
1083 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1084 /* void slot 0 */
1085 __mf_lookup_cache[0].low = MAXPTR;
1086 break;
1088 case mode_check:
1090 __mf_object_t *ovr_objs [1];
1091 unsigned num_overlapping_objs;
1092 uintptr_t low = (uintptr_t) ptr;
1093 uintptr_t high = CLAMPSZ (ptr, sz);
1094 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1096 /* Treat unknown size indication as 1. */
1097 if (UNLIKELY (sz == 0)) sz = 1;
1099 /* Look for objects only of the same type. This will e.g. permit a registration
1100 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1101 __mf_check time however harmful overlaps will be detected. */
1102 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1104 /* Handle overlaps. */
1105 if (UNLIKELY (num_overlapping_objs > 0))
1107 __mf_object_t *ovr_obj = ovr_objs[0];
1109 /* Accept certain specific duplication pairs. */
1110 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1111 && ovr_obj->low == low
1112 && ovr_obj->high == high
1113 && ovr_obj->type == type)
1115 /* Duplicate registration for static objects may come
1116 from distinct compilation units. */
1117 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1118 (void *) low, (void *) high,
1119 (ovr_obj->name ? ovr_obj->name : ""));
1120 break;
1123 /* Alas, a genuine violation. */
1124 else
1126 /* Two or more *real* mappings here. */
1127 __mf_violation ((void *) ptr, sz,
1128 (uintptr_t) __builtin_return_address (0), NULL,
1129 __MF_VIOL_REGISTER);
1132 else /* No overlapping objects: AOK. */
1133 __mf_insert_new_object (low, high, type, name, pc);
1135 /* We could conceivably call __mf_check() here to prime the cache,
1136 but then the read_count/write_count field is not reliable. */
1137 break;
1139 } /* end switch (__mf_opts.mudflap_mode) */
1143 void
1144 __mf_unregister (void *ptr, size_t sz, int type)
1146 LOCKTH ();
1147 BEGIN_RECURSION_PROTECT ();
1148 __mfu_unregister (ptr, sz, type);
1149 END_RECURSION_PROTECT ();
1150 UNLOCKTH ();
1154 void
1155 __mfu_unregister (void *ptr, size_t sz, int type)
1157 DECLARE (void, free, void *ptr);
1159 if (UNLIKELY (__mf_opts.sigusr1_report))
1160 __mf_sigusr1_respond ();
1162 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1164 switch (__mf_opts.mudflap_mode)
1166 case mode_nop:
1167 break;
1169 case mode_violate:
1170 __mf_violation (ptr, sz,
1171 (uintptr_t) __builtin_return_address (0), NULL,
1172 __MF_VIOL_UNREGISTER);
1173 break;
1175 case mode_populate:
1176 /* Clear the cache. */
1177 /* XXX: race */
1178 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1179 /* void slot 0 */
1180 __mf_lookup_cache[0].low = MAXPTR;
1181 break;
1183 case mode_check:
1185 __mf_object_t *old_obj = NULL;
1186 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1187 __mf_object_t *objs[1] = {NULL};
1188 unsigned num_overlapping_objs;
1190 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1191 CLAMPSZ (ptr, sz), objs, 1, type);
1193 /* Special case for HEAP_I - see free & realloc hook. They don't
1194 know whether the input region was HEAP or HEAP_I before
1195 unmapping it. Here we give HEAP a try in case HEAP_I
1196 failed. */
1197 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1199 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1200 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1203 old_obj = objs[0];
1204 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1205 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1206 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1208 __mf_violation (ptr, sz,
1209 (uintptr_t) __builtin_return_address (0), NULL,
1210 __MF_VIOL_UNREGISTER);
1211 break;
1214 __mf_unlink_object (old_obj);
1215 __mf_uncache_object (old_obj);
1217 /* Wipe buffer contents if desired. */
1218 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1219 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1220 || old_obj->type == __MF_TYPE_HEAP_I)))
1222 memset ((void *) old_obj->low,
1224 (size_t) (old_obj->high - old_obj->low + 1));
1227 /* Manage the object cemetary. */
1228 if (__mf_opts.persistent_count > 0 &&
1229 old_obj->type >= 0 &&
1230 old_obj->type <= __MF_TYPE_MAX_CEM)
1232 old_obj->deallocated_p = 1;
1233 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1234 #if HAVE_GETTIMEOFDAY
1235 if (__mf_opts.timestamps)
1236 gettimeofday (& old_obj->dealloc_time, NULL);
1237 #endif
1238 #ifdef LIBMUDFLAPTH
1239 old_obj->dealloc_thread = pthread_self ();
1240 #endif
1242 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1243 old_obj->dealloc_backtrace_size =
1244 __mf_backtrace (& old_obj->dealloc_backtrace,
1245 NULL, 2);
1247 /* Encourage this object to be displayed again in current epoch. */
1248 old_obj->description_epoch --;
1250 /* Put this object into the cemetary. This may require this plot to
1251 be recycled, and the previous resident to be designated del_obj. */
1253 unsigned row = old_obj->type;
1254 unsigned plot = __mf_object_dead_head [row];
1256 del_obj = __mf_object_cemetary [row][plot];
1257 __mf_object_cemetary [row][plot] = old_obj;
1258 plot ++;
1259 if (plot == __mf_opts.persistent_count) plot = 0;
1260 __mf_object_dead_head [row] = plot;
1263 else
1264 del_obj = old_obj;
1266 if (__mf_opts.print_leaks)
1268 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1269 (old_obj->type == __MF_TYPE_HEAP
1270 || old_obj->type == __MF_TYPE_HEAP_I))
1272 fprintf (stderr,
1273 "*******\n"
1274 "mudflap warning: unaccessed registered object:\n");
1275 __mf_describe_object (old_obj);
1279 if (del_obj != NULL) /* May or may not equal old_obj. */
1281 if (__mf_opts.backtrace > 0)
1283 CALL_REAL(free, del_obj->alloc_backtrace);
1284 if (__mf_opts.persistent_count > 0)
1286 CALL_REAL(free, del_obj->dealloc_backtrace);
1289 CALL_REAL(free, del_obj);
1292 break;
1294 } /* end switch (__mf_opts.mudflap_mode) */
1297 if (__mf_opts.collect_stats)
1299 __mf_count_unregister ++;
1300 __mf_total_unregister_size += sz;
1306 struct tree_stats
1308 unsigned obj_count;
1309 unsigned long total_size;
1310 unsigned live_obj_count;
1311 double total_weight;
1312 double weighted_size;
1313 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1318 static int
1319 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1321 __mf_object_t *obj = (__mf_object_t *) n->value;
1322 struct tree_stats *s = (struct tree_stats *) param;
1324 assert (obj != NULL && s != NULL);
1326 /* Exclude never-accessed objects. */
1327 if (obj->read_count + obj->write_count)
1329 s->obj_count ++;
1330 s->total_size += (obj->high - obj->low + 1);
1332 if (obj->liveness)
1334 unsigned i;
1335 uintptr_t addr;
1337 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1338 (void *) obj->low, obj->liveness, obj->name); */
1340 s->live_obj_count ++;
1341 s->total_weight += (double) obj->liveness;
1342 s->weighted_size +=
1343 (double) (obj->high - obj->low + 1) *
1344 (double) obj->liveness;
1346 addr = obj->low;
1347 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1349 unsigned bit = addr & 1;
1350 s->weighted_address_bits[i][bit] += obj->liveness;
1351 addr = addr >> 1;
1354 /* Age the liveness value. */
1355 obj->liveness >>= 1;
1359 return 0;
1363 static void
1364 __mf_adapt_cache ()
1366 struct tree_stats s;
1367 uintptr_t new_mask = 0;
1368 unsigned char new_shift;
1369 float cache_utilization;
1370 float max_value;
1371 static float smoothed_new_shift = -1.0;
1372 unsigned i;
1374 memset (&s, 0, sizeof (s));
1376 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1377 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1378 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1379 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1380 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1382 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1383 empty tree. Just leave the cache alone in such cases, rather
1384 than risk dying by division-by-zero. */
1385 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1386 return;
1388 /* Guess a good value for the shift parameter by finding an address bit that is a
1389 good discriminant of lively objects. */
1390 max_value = 0.0;
1391 for (i=0; i<sizeof (uintptr_t)*8; i++)
1393 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1394 if (max_value < value) max_value = value;
1396 for (i=0; i<sizeof (uintptr_t)*8; i++)
1398 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1399 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1400 if (value >= max_value * shoulder_factor)
1401 break;
1403 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1404 /* Converge toward this slowly to reduce flapping. */
1405 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1406 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1407 assert (new_shift < sizeof (uintptr_t)*8);
1409 /* Count number of used buckets. */
1410 cache_utilization = 0.0;
1411 for (i = 0; i < (1 + __mf_lc_mask); i++)
1412 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1413 cache_utilization += 1.0;
1414 cache_utilization /= (1 + __mf_lc_mask);
1416 new_mask |= 0x3ff; /* XXX: force a large cache. */
1417 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1419 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1420 "util=%u%% m=%p s=%u\n",
1421 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1422 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1424 /* We should reinitialize cache if its parameters have changed. */
1425 if (new_mask != __mf_lc_mask ||
1426 new_shift != __mf_lc_shift)
1428 __mf_lc_mask = new_mask;
1429 __mf_lc_shift = new_shift;
1430 /* XXX: race */
1431 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1432 /* void slot 0 */
1433 __mf_lookup_cache[0].low = MAXPTR;
1439 /* __mf_find_object[s] */
1441 /* Find overlapping live objecs between [low,high]. Return up to
1442 max_objs of their pointers in objs[]. Return total count of
1443 overlaps (may exceed max_objs). */
1445 unsigned
1446 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1447 __mf_object_t **objs, unsigned max_objs, int type)
1449 unsigned count = 0;
1450 mfsplay_tree t = __mf_object_tree (type);
1451 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1452 int direction;
1454 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1455 /* An exact match for base address implies a hit. */
1456 if (n != NULL)
1458 if (count < max_objs)
1459 objs[count] = (__mf_object_t *) n->value;
1460 count ++;
1463 /* Iterate left then right near this key value to find all overlapping objects. */
1464 for (direction = 0; direction < 2; direction ++)
1466 /* Reset search origin. */
1467 k = (mfsplay_tree_key) ptr_low;
1469 while (1)
1471 __mf_object_t *obj;
1473 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1474 if (n == NULL) break;
1475 obj = (__mf_object_t *) n->value;
1477 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1478 break;
1480 if (count < max_objs)
1481 objs[count] = (__mf_object_t *) n->value;
1482 count ++;
1484 k = (mfsplay_tree_key) obj->low;
1488 return count;
1492 unsigned
1493 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1494 __mf_object_t **objs, unsigned max_objs)
1496 int type;
1497 unsigned count = 0;
1499 /* Search each splay tree for overlaps. */
1500 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1502 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1503 if (c > max_objs)
1505 max_objs = 0;
1506 objs = NULL;
1508 else /* NB: C may equal 0 */
1510 max_objs -= c;
1511 objs += c;
1513 count += c;
1516 return count;
1521 /* __mf_link_object */
1523 static void
1524 __mf_link_object (__mf_object_t *node)
1526 mfsplay_tree t = __mf_object_tree (node->type);
1527 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1530 /* __mf_unlink_object */
1532 static void
1533 __mf_unlink_object (__mf_object_t *node)
1535 mfsplay_tree t = __mf_object_tree (node->type);
1536 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1539 /* __mf_find_dead_objects */
1541 /* Find overlapping dead objecs between [low,high]. Return up to
1542 max_objs of their pointers in objs[]. Return total count of
1543 overlaps (may exceed max_objs). */
1545 static unsigned
1546 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1547 __mf_object_t **objs, unsigned max_objs)
1549 if (__mf_opts.persistent_count > 0)
1551 unsigned count = 0;
1552 unsigned recollection = 0;
1553 unsigned row = 0;
1555 assert (low <= high);
1556 assert (max_objs == 0 || objs != NULL);
1558 /* Widen the search from the most recent plots in each row, looking
1559 backward in time. */
1560 recollection = 0;
1561 while (recollection < __mf_opts.persistent_count)
1563 count = 0;
1565 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1567 unsigned plot;
1568 unsigned i;
1570 plot = __mf_object_dead_head [row];
1571 for (i = 0; i <= recollection; i ++)
1573 __mf_object_t *obj;
1575 /* Look backward through row: it's a circular buffer. */
1576 if (plot > 0) plot --;
1577 else plot = __mf_opts.persistent_count - 1;
1579 obj = __mf_object_cemetary [row][plot];
1580 if (obj && obj->low <= high && obj->high >= low)
1582 /* Found an overlapping dead object! */
1583 if (count < max_objs)
1584 objs [count] = obj;
1585 count ++;
1590 if (count)
1591 break;
1593 /* Look farther back in time. */
1594 recollection = (recollection * 2) + 1;
1597 return count;
1598 } else {
1599 return 0;
1603 /* __mf_describe_object */
1605 static void
1606 __mf_describe_object (__mf_object_t *obj)
1608 static unsigned epoch = 0;
1609 if (obj == NULL)
1611 epoch ++;
1612 return;
1615 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1617 fprintf (stderr,
1618 "mudflap %sobject %p: name=`%s'\n",
1619 (obj->deallocated_p ? "dead " : ""),
1620 (void *) obj, (obj->name ? obj->name : ""));
1621 return;
1623 else
1624 obj->description_epoch = epoch;
1626 fprintf (stderr,
1627 "mudflap %sobject %p: name=`%s'\n"
1628 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1629 "alloc time=%lu.%06lu pc=%p"
1630 #ifdef LIBMUDFLAPTH
1631 " thread=%u"
1632 #endif
1633 "\n",
1634 (obj->deallocated_p ? "dead " : ""),
1635 (void *) obj, (obj->name ? obj->name : ""),
1636 (void *) obj->low, (void *) obj->high,
1637 (unsigned long) (obj->high - obj->low + 1),
1638 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1639 obj->type == __MF_TYPE_HEAP ? "heap" :
1640 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1641 obj->type == __MF_TYPE_STACK ? "stack" :
1642 obj->type == __MF_TYPE_STATIC ? "static" :
1643 obj->type == __MF_TYPE_GUESS ? "guess" :
1644 "unknown"),
1645 obj->read_count, obj->write_count, obj->liveness,
1646 obj->watching_p ? " watching" : "",
1647 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1648 (void *) obj->alloc_pc
1649 #ifdef LIBMUDFLAPTH
1650 , (unsigned) obj->alloc_thread
1651 #endif
1654 if (__mf_opts.backtrace > 0)
1656 unsigned i;
1657 for (i=0; i<obj->alloc_backtrace_size; i++)
1658 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1661 if (__mf_opts.persistent_count > 0)
1663 if (obj->deallocated_p)
1665 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1666 #ifdef LIBMUDFLAPTH
1667 " thread=%u"
1668 #endif
1669 "\n",
1670 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1671 (void *) obj->dealloc_pc
1672 #ifdef LIBMUDFLAPTH
1673 , (unsigned) obj->dealloc_thread
1674 #endif
1678 if (__mf_opts.backtrace > 0)
1680 unsigned i;
1681 for (i=0; i<obj->dealloc_backtrace_size; i++)
1682 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1689 static int
1690 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1692 __mf_object_t *node = (__mf_object_t *) n->value;
1693 unsigned *count = (unsigned *) param;
1695 if (count != NULL)
1696 (*count) ++;
1698 fprintf (stderr, "Leaked object %u:\n", (*count));
1699 __mf_describe_object (node);
1701 return 0;
1705 static unsigned
1706 __mf_report_leaks ()
1708 unsigned count = 0;
1710 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1711 __mf_report_leaks_fn, & count);
1712 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1713 __mf_report_leaks_fn, & count);
1715 return count;
1718 /* ------------------------------------------------------------------------ */
1719 /* __mf_report */
1721 void
1722 __mf_report ()
1724 LOCKTH ();
1725 BEGIN_RECURSION_PROTECT ();
1726 __mfu_report ();
1727 END_RECURSION_PROTECT ();
1728 UNLOCKTH ();
1731 void
1732 __mfu_report ()
1734 if (__mf_opts.collect_stats)
1736 fprintf (stderr,
1737 "*******\n"
1738 "mudflap stats:\n"
1739 "calls to __mf_check: %lu\n"
1740 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1741 " __mf_unregister: %lu [%luB]\n"
1742 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1743 __mf_count_check,
1744 __mf_count_register,
1745 __mf_total_register_size[0], __mf_total_register_size[1],
1746 __mf_total_register_size[2], __mf_total_register_size[3],
1747 __mf_total_register_size[4], /* XXX */
1748 __mf_count_unregister, __mf_total_unregister_size,
1749 __mf_count_violation[0], __mf_count_violation[1],
1750 __mf_count_violation[2], __mf_count_violation[3],
1751 __mf_count_violation[4]);
1753 fprintf (stderr,
1754 "calls with reentrancy: %lu\n", __mf_reentrancy);
1755 #ifdef LIBMUDFLAPTH
1756 fprintf (stderr,
1757 " lock contention: %lu\n", __mf_lock_contention);
1758 #endif
1760 /* Lookup cache stats. */
1762 unsigned i;
1763 unsigned max_reuse = 0;
1764 unsigned num_used = 0;
1765 unsigned num_unused = 0;
1767 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1769 if (__mf_lookup_cache_reusecount[i])
1770 num_used ++;
1771 else
1772 num_unused ++;
1773 if (max_reuse < __mf_lookup_cache_reusecount[i])
1774 max_reuse = __mf_lookup_cache_reusecount[i];
1776 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1777 num_used, num_unused, max_reuse);
1781 unsigned live_count;
1782 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1783 fprintf (stderr, "number of live objects: %u\n", live_count);
1786 if (__mf_opts.persistent_count > 0)
1788 unsigned dead_count = 0;
1789 unsigned row, plot;
1790 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1791 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1792 if (__mf_object_cemetary [row][plot] != 0)
1793 dead_count ++;
1794 fprintf (stderr, " zombie objects: %u\n", dead_count);
1797 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1799 unsigned l;
1800 extern void * __mf_wrap_alloca_indirect (size_t c);
1802 /* Free up any remaining alloca()'d blocks. */
1803 __mf_wrap_alloca_indirect (0);
1804 __mf_describe_object (NULL); /* Reset description epoch. */
1805 l = __mf_report_leaks ();
1806 fprintf (stderr, "number of leaked objects: %u\n", l);
1810 /* __mf_backtrace */
1812 size_t
1813 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1815 void ** pc_array;
1816 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1817 unsigned remaining_size;
1818 unsigned omitted_size = 0;
1819 unsigned i;
1820 DECLARE (void, free, void *ptr);
1821 DECLARE (void *, calloc, size_t c, size_t n);
1822 DECLARE (void *, malloc, size_t n);
1824 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1825 #ifdef HAVE_BACKTRACE
1826 pc_array_size = backtrace (pc_array, pc_array_size);
1827 #else
1828 #define FETCH(n) do { if (pc_array_size >= n) { \
1829 pc_array[n] = __builtin_return_address(n); \
1830 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1832 /* Unroll some calls __builtin_return_address because this function
1833 only takes a literal integer parameter. */
1834 FETCH (0);
1835 #if 0
1836 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1837 rather than simply returning 0. :-( */
1838 FETCH (1);
1839 FETCH (2);
1840 FETCH (3);
1841 FETCH (4);
1842 FETCH (5);
1843 FETCH (6);
1844 FETCH (7);
1845 FETCH (8);
1846 if (pc_array_size > 8) pc_array_size = 9;
1847 #else
1848 if (pc_array_size > 0) pc_array_size = 1;
1849 #endif
1851 #undef FETCH
1852 #endif
1854 /* We want to trim the first few levels of the stack traceback,
1855 since they contain libmudflap wrappers and junk. If pc_array[]
1856 ends up containing a non-NULL guess_pc, then trim everything
1857 before that. Otherwise, omit the first guess_omit_levels
1858 entries. */
1860 if (guess_pc != NULL)
1861 for (i=0; i<pc_array_size; i++)
1862 if (pc_array [i] == guess_pc)
1863 omitted_size = i;
1865 if (omitted_size == 0) /* No match? */
1866 if (pc_array_size > guess_omit_levels)
1867 omitted_size = guess_omit_levels;
1869 remaining_size = pc_array_size - omitted_size;
1871 #ifdef HAVE_BACKTRACE_SYMBOLS
1872 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1873 #else
1875 /* Let's construct a buffer by hand. It will have <remaining_size>
1876 char*'s at the front, pointing at individual strings immediately
1877 afterwards. */
1878 void *buffer;
1879 char *chars;
1880 char **pointers;
1881 enum { perline = 30 };
1882 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1883 pointers = (char **) buffer;
1884 chars = (char *)buffer + (remaining_size * sizeof (char *));
1885 for (i = 0; i < remaining_size; i++)
1887 pointers[i] = chars;
1888 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1889 chars = chars + perline;
1891 *symbols = pointers;
1893 #endif
1894 CALL_REAL (free, pc_array);
1896 return remaining_size;
1899 /* ------------------------------------------------------------------------ */
1900 /* __mf_violation */
1902 void
1903 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1904 const char *location, int type)
1906 char buf [128];
1907 static unsigned violation_number;
1908 DECLARE(void, free, void *ptr);
1910 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1911 (void *) pc,
1912 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1914 if (__mf_opts.collect_stats)
1915 __mf_count_violation [(type < 0) ? 0 :
1916 (type > __MF_VIOL_WATCH) ? 0 :
1917 type] ++;
1919 /* Print out a basic warning message. */
1920 if (__mf_opts.verbose_violations)
1922 unsigned dead_p;
1923 unsigned num_helpful = 0;
1924 struct timeval now = { 0, 0 };
1925 #if HAVE_GETTIMEOFDAY
1926 gettimeofday (& now, NULL);
1927 #endif
1929 violation_number ++;
1930 fprintf (stderr,
1931 "*******\n"
1932 "mudflap violation %u (%s): time=%lu.%06lu "
1933 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1934 violation_number,
1935 ((type == __MF_VIOL_READ) ? "check/read" :
1936 (type == __MF_VIOL_WRITE) ? "check/write" :
1937 (type == __MF_VIOL_REGISTER) ? "register" :
1938 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1939 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1940 now.tv_sec, now.tv_usec,
1941 (void *) ptr, (unsigned long)sz, (void *) pc,
1942 (location != NULL ? " location=`" : ""),
1943 (location != NULL ? location : ""),
1944 (location != NULL ? "'" : ""));
1946 if (__mf_opts.backtrace > 0)
1948 char ** symbols;
1949 unsigned i, num;
1951 num = __mf_backtrace (& symbols, (void *) pc, 2);
1952 /* Note: backtrace_symbols calls malloc(). But since we're in
1953 __mf_violation and presumably __mf_check, it'll detect
1954 recursion, and not put the new string into the database. */
1956 for (i=0; i<num; i++)
1957 fprintf (stderr, " %s\n", symbols[i]);
1959 /* Calling free() here would trigger a violation. */
1960 CALL_REAL(free, symbols);
1964 /* Look for nearby objects. For this, we start with s_low/s_high
1965 pointing to the given area, looking for overlapping objects.
1966 If none show up, widen the search area and keep looking. */
1968 if (sz == 0) sz = 1;
1970 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1972 enum {max_objs = 3}; /* magic */
1973 __mf_object_t *objs[max_objs];
1974 unsigned num_objs = 0;
1975 uintptr_t s_low, s_high;
1976 unsigned tries = 0;
1977 unsigned i;
1979 s_low = (uintptr_t) ptr;
1980 s_high = CLAMPSZ (ptr, sz);
1982 while (tries < 16) /* magic */
1984 if (dead_p)
1985 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1986 else
1987 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1989 if (num_objs) /* good enough */
1990 break;
1992 tries ++;
1994 /* XXX: tune this search strategy. It's too dependent on
1995 sz, which can vary from 1 to very big (when array index
1996 checking) numbers. */
1997 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1998 s_high = CLAMPADD (s_high, (sz * tries * tries));
2001 for (i = 0; i < min (num_objs, max_objs); i++)
2003 __mf_object_t *obj = objs[i];
2004 uintptr_t low = (uintptr_t) ptr;
2005 uintptr_t high = CLAMPSZ (ptr, sz);
2006 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2007 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2008 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2009 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2010 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2011 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2013 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2014 num_helpful + i + 1,
2015 (before1 ? before1 : after1 ? after1 : into1),
2016 (before1 ? "before" : after1 ? "after" : "into"),
2017 (before2 ? before2 : after2 ? after2 : into2),
2018 (before2 ? "before" : after2 ? "after" : "into"));
2019 __mf_describe_object (obj);
2021 num_helpful += num_objs;
2024 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2027 /* How to finally handle this violation? */
2028 switch (__mf_opts.violation_mode)
2030 case viol_nop:
2031 break;
2032 case viol_segv:
2033 kill (getpid(), SIGSEGV);
2034 break;
2035 case viol_abort:
2036 abort ();
2037 break;
2038 case viol_gdb:
2040 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2041 system (buf);
2042 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2043 instead, and let the forked child execlp() gdb. That way, this
2044 subject process can be resumed under the supervision of gdb.
2045 This can't happen now, since system() only returns when gdb
2046 dies. In that case, we need to beware of starting a second
2047 concurrent gdb child upon the next violation. (But if the first
2048 gdb dies, then starting a new one is appropriate.) */
2049 break;
2053 /* ------------------------------------------------------------------------ */
2056 unsigned __mf_watch (void *ptr, size_t sz)
2058 unsigned rc;
2059 LOCKTH ();
2060 BEGIN_RECURSION_PROTECT ();
2061 rc = __mf_watch_or_not (ptr, sz, 1);
2062 END_RECURSION_PROTECT ();
2063 UNLOCKTH ();
2064 return rc;
2067 unsigned __mf_unwatch (void *ptr, size_t sz)
2069 unsigned rc;
2070 LOCKTH ();
2071 rc = __mf_watch_or_not (ptr, sz, 0);
2072 UNLOCKTH ();
2073 return rc;
2077 static unsigned
2078 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2080 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2081 uintptr_t ptr_low = (uintptr_t) ptr;
2082 unsigned count = 0;
2084 TRACE ("%s ptr=%p size=%lu\n",
2085 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2087 switch (__mf_opts.mudflap_mode)
2089 case mode_nop:
2090 case mode_populate:
2091 case mode_violate:
2092 count = 0;
2093 break;
2095 case mode_check:
2097 __mf_object_t **all_ovr_objs;
2098 unsigned obj_count;
2099 unsigned n;
2100 DECLARE (void *, malloc, size_t c);
2101 DECLARE (void, free, void *p);
2103 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2104 VERBOSE_TRACE (" %u:", obj_count);
2106 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2107 if (all_ovr_objs == NULL) abort ();
2108 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2109 assert (n == obj_count);
2111 for (n = 0; n < obj_count; n ++)
2113 __mf_object_t *obj = all_ovr_objs[n];
2115 VERBOSE_TRACE (" [%p]", (void *) obj);
2116 if (obj->watching_p != flag)
2118 obj->watching_p = flag;
2119 count ++;
2121 /* Remove object from cache, to ensure next access
2122 goes through __mf_check(). */
2123 if (flag)
2124 __mf_uncache_object (obj);
2127 CALL_REAL (free, all_ovr_objs);
2129 break;
2132 return count;
2136 void
2137 __mf_sigusr1_handler (int num)
2139 __mf_sigusr1_received ++;
2142 /* Install or remove SIGUSR1 handler as necessary.
2143 Also, respond to a received pending SIGUSR1. */
2144 void
2145 __mf_sigusr1_respond ()
2147 static int handler_installed;
2149 #ifdef SIGUSR1
2150 /* Manage handler */
2151 if (__mf_opts.sigusr1_report && ! handler_installed)
2153 signal (SIGUSR1, __mf_sigusr1_handler);
2154 handler_installed = 1;
2156 else if(! __mf_opts.sigusr1_report && handler_installed)
2158 signal (SIGUSR1, SIG_DFL);
2159 handler_installed = 0;
2161 #endif
2163 /* Manage enqueued signals */
2164 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2166 __mf_sigusr1_handled ++;
2167 assert (__mf_state == reentrant);
2168 __mfu_report ();
2169 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2174 /* XXX: provide an alternative __assert_fail function that cannot
2175 fail due to libmudflap infinite recursion. */
2176 #ifndef NDEBUG
2178 static void
2179 write_itoa (int fd, unsigned n)
2181 enum x { bufsize = sizeof(n)*4 };
2182 char buf [bufsize];
2183 unsigned i;
2185 for (i=0; i<bufsize-1; i++)
2187 unsigned digit = n % 10;
2188 buf[bufsize-2-i] = digit + '0';
2189 n /= 10;
2190 if (n == 0)
2192 char *m = & buf [bufsize-2-i];
2193 buf[bufsize-1] = '\0';
2194 write (fd, m, strlen(m));
2195 break;
2201 void
2202 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2204 #define write2(string) write (2, (string), strlen ((string)));
2205 write2("mf");
2206 #ifdef LIBMUDFLAPTH
2207 write2("(");
2208 write_itoa (2, (unsigned) pthread_self ());
2209 write2(")");
2210 #endif
2211 write2(": assertion failure: `");
2212 write (2, msg, strlen (msg));
2213 write2("' in ");
2214 write (2, func, strlen (func));
2215 write2(" at ");
2216 write (2, file, strlen (file));
2217 write2(":");
2218 write_itoa (2, line);
2219 write2("\n");
2220 #undef write2
2221 abort ();
2225 #endif
2229 /* Adapted splay tree code, originally from libiberty. It has been
2230 specialized for libmudflap as requested by RMS. */
2232 static void
2233 mfsplay_tree_free (void *p)
2235 DECLARE (void, free, void *p);
2236 CALL_REAL (free, p);
2239 static void *
2240 mfsplay_tree_xmalloc (size_t s)
2242 DECLARE (void *, malloc, size_t s);
2243 return CALL_REAL (malloc, s);
2247 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2248 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2249 mfsplay_tree_key,
2250 mfsplay_tree_node *,
2251 mfsplay_tree_node *,
2252 mfsplay_tree_node *);
2255 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2256 and grandparent, respectively, of NODE. */
2258 static mfsplay_tree_node
2259 mfsplay_tree_splay_helper (mfsplay_tree sp,
2260 mfsplay_tree_key key,
2261 mfsplay_tree_node * node,
2262 mfsplay_tree_node * parent,
2263 mfsplay_tree_node * grandparent)
2265 mfsplay_tree_node *next;
2266 mfsplay_tree_node n;
2267 int comparison;
2269 n = *node;
2271 if (!n)
2272 return *parent;
2274 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2276 if (comparison == 0)
2277 /* We've found the target. */
2278 next = 0;
2279 else if (comparison < 0)
2280 /* The target is to the left. */
2281 next = &n->left;
2282 else
2283 /* The target is to the right. */
2284 next = &n->right;
2286 if (next)
2288 /* Check whether our recursion depth is too high. Abort this search,
2289 and signal that a rebalance is required to continue. */
2290 if (sp->depth > sp->max_depth)
2292 sp->rebalance_p = 1;
2293 return n;
2296 /* Continue down the tree. */
2297 sp->depth ++;
2298 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2299 sp->depth --;
2301 /* The recursive call will change the place to which NODE
2302 points. */
2303 if (*node != n || sp->rebalance_p)
2304 return n;
2307 if (!parent)
2308 /* NODE is the root. We are done. */
2309 return n;
2311 /* First, handle the case where there is no grandparent (i.e.,
2312 *PARENT is the root of the tree.) */
2313 if (!grandparent)
2315 if (n == (*parent)->left)
2317 *node = n->right;
2318 n->right = *parent;
2320 else
2322 *node = n->left;
2323 n->left = *parent;
2325 *parent = n;
2326 return n;
2329 /* Next handle the cases where both N and *PARENT are left children,
2330 or where both are right children. */
2331 if (n == (*parent)->left && *parent == (*grandparent)->left)
2333 mfsplay_tree_node p = *parent;
2335 (*grandparent)->left = p->right;
2336 p->right = *grandparent;
2337 p->left = n->right;
2338 n->right = p;
2339 *grandparent = n;
2340 return n;
2342 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2344 mfsplay_tree_node p = *parent;
2346 (*grandparent)->right = p->left;
2347 p->left = *grandparent;
2348 p->right = n->left;
2349 n->left = p;
2350 *grandparent = n;
2351 return n;
2354 /* Finally, deal with the case where N is a left child, but *PARENT
2355 is a right child, or vice versa. */
2356 if (n == (*parent)->left)
2358 (*parent)->left = n->right;
2359 n->right = *parent;
2360 (*grandparent)->right = n->left;
2361 n->left = *grandparent;
2362 *grandparent = n;
2363 return n;
2365 else
2367 (*parent)->right = n->left;
2368 n->left = *parent;
2369 (*grandparent)->left = n->right;
2370 n->right = *grandparent;
2371 *grandparent = n;
2372 return n;
2378 static int
2379 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2381 mfsplay_tree_node **p = array_ptr;
2382 *(*p) = n;
2383 (*p)++;
2384 return 0;
2388 static mfsplay_tree_node
2389 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2390 unsigned high)
2392 unsigned middle = low + (high - low) / 2;
2393 mfsplay_tree_node n = array[middle];
2395 /* Note that since we're producing a balanced binary tree, it is not a problem
2396 that this function is recursive. */
2397 if (low + 1 <= middle)
2398 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2399 else
2400 n->left = NULL;
2402 if (middle + 1 <= high)
2403 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2404 else
2405 n->right = NULL;
2407 return n;
2411 /* Rebalance the entire tree. Do this by copying all the node
2412 pointers into an array, then cleverly re-linking them. */
2413 static void
2414 mfsplay_tree_rebalance (mfsplay_tree sp)
2416 mfsplay_tree_node *all_nodes, *all_nodes_1;
2418 if (sp->num_keys <= 2)
2419 return;
2421 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2423 /* Traverse all nodes to copy their addresses into this array. */
2424 all_nodes_1 = all_nodes;
2425 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2426 (void *) &all_nodes_1);
2428 /* Relink all the nodes. */
2429 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2431 mfsplay_tree_free (all_nodes);
2435 /* Splay SP around KEY. */
2436 static void
2437 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2439 if (sp->root == 0)
2440 return;
2442 /* If we just splayed the tree with the same key, do nothing. */
2443 if (sp->last_splayed_key_p &&
2444 (sp->last_splayed_key == key))
2445 return;
2447 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2448 The idea is to limit excessive stack usage if we're facing
2449 degenerate access patterns. Unfortunately such patterns can occur
2450 e.g. during static initialization, where many static objects might
2451 be registered in increasing address sequence, or during a case where
2452 large tree-like heap data structures are allocated quickly.
2454 On x86, this corresponds to roughly 200K of stack usage.
2455 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2456 sp->max_depth = 2500;
2457 sp->rebalance_p = sp->depth = 0;
2459 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2460 if (sp->rebalance_p)
2462 mfsplay_tree_rebalance (sp);
2464 sp->rebalance_p = sp->depth = 0;
2465 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2467 if (sp->rebalance_p)
2468 abort ();
2472 /* Cache this splay key. */
2473 sp->last_splayed_key = key;
2474 sp->last_splayed_key_p = 1;
2479 /* Allocate a new splay tree. */
2480 static mfsplay_tree
2481 mfsplay_tree_new ()
2483 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2484 sp->root = NULL;
2485 sp->last_splayed_key_p = 0;
2486 sp->num_keys = 0;
2488 return sp;
2493 /* Insert a new node (associating KEY with DATA) into SP. If a
2494 previous node with the indicated KEY exists, its data is replaced
2495 with the new value. Returns the new node. */
2496 static mfsplay_tree_node
2497 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2499 int comparison = 0;
2501 mfsplay_tree_splay (sp, key);
2503 if (sp->root)
2504 comparison = ((sp->root->key > key) ? 1 :
2505 ((sp->root->key < key) ? -1 : 0));
2507 if (sp->root && comparison == 0)
2509 /* If the root of the tree already has the indicated KEY, just
2510 replace the value with VALUE. */
2511 sp->root->value = value;
2513 else
2515 /* Create a new node, and insert it at the root. */
2516 mfsplay_tree_node node;
2518 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2519 node->key = key;
2520 node->value = value;
2521 sp->num_keys++;
2522 if (!sp->root)
2523 node->left = node->right = 0;
2524 else if (comparison < 0)
2526 node->left = sp->root;
2527 node->right = node->left->right;
2528 node->left->right = 0;
2530 else
2532 node->right = sp->root;
2533 node->left = node->right->left;
2534 node->right->left = 0;
2537 sp->root = node;
2538 sp->last_splayed_key_p = 0;
2541 return sp->root;
2544 /* Remove KEY from SP. It is not an error if it did not exist. */
2546 static void
2547 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2549 mfsplay_tree_splay (sp, key);
2550 sp->last_splayed_key_p = 0;
2551 if (sp->root && (sp->root->key == key))
2553 mfsplay_tree_node left, right;
2554 left = sp->root->left;
2555 right = sp->root->right;
2556 /* Delete the root node itself. */
2557 mfsplay_tree_free (sp->root);
2558 sp->num_keys--;
2559 /* One of the children is now the root. Doesn't matter much
2560 which, so long as we preserve the properties of the tree. */
2561 if (left)
2563 sp->root = left;
2564 /* If there was a right child as well, hang it off the
2565 right-most leaf of the left child. */
2566 if (right)
2568 while (left->right)
2569 left = left->right;
2570 left->right = right;
2573 else
2574 sp->root = right;
2578 /* Lookup KEY in SP, returning VALUE if present, and NULL
2579 otherwise. */
2581 static mfsplay_tree_node
2582 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2584 mfsplay_tree_splay (sp, key);
2585 if (sp->root && (sp->root->key == key))
2586 return sp->root;
2587 else
2588 return 0;
2592 /* Return the immediate predecessor KEY, or NULL if there is no
2593 predecessor. KEY need not be present in the tree. */
2595 static mfsplay_tree_node
2596 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2598 int comparison;
2599 mfsplay_tree_node node;
2600 /* If the tree is empty, there is certainly no predecessor. */
2601 if (!sp->root)
2602 return NULL;
2603 /* Splay the tree around KEY. That will leave either the KEY
2604 itself, its predecessor, or its successor at the root. */
2605 mfsplay_tree_splay (sp, key);
2606 comparison = ((sp->root->key > key) ? 1 :
2607 ((sp->root->key < key) ? -1 : 0));
2609 /* If the predecessor is at the root, just return it. */
2610 if (comparison < 0)
2611 return sp->root;
2612 /* Otherwise, find the rightmost element of the left subtree. */
2613 node = sp->root->left;
2614 if (node)
2615 while (node->right)
2616 node = node->right;
2617 return node;
2620 /* Return the immediate successor KEY, or NULL if there is no
2621 successor. KEY need not be present in the tree. */
2623 static mfsplay_tree_node
2624 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2626 int comparison;
2627 mfsplay_tree_node node;
2628 /* If the tree is empty, there is certainly no successor. */
2629 if (!sp->root)
2630 return NULL;
2631 /* Splay the tree around KEY. That will leave either the KEY
2632 itself, its predecessor, or its successor at the root. */
2633 mfsplay_tree_splay (sp, key);
2634 comparison = ((sp->root->key > key) ? 1 :
2635 ((sp->root->key < key) ? -1 : 0));
2636 /* If the successor is at the root, just return it. */
2637 if (comparison > 0)
2638 return sp->root;
2639 /* Otherwise, find the leftmost element of the right subtree. */
2640 node = sp->root->right;
2641 if (node)
2642 while (node->left)
2643 node = node->left;
2644 return node;
2647 /* Call FN, passing it the DATA, for every node in SP, following an
2648 in-order traversal. If FN every returns a non-zero value, the
2649 iteration ceases immediately, and the value is returned.
2650 Otherwise, this function returns 0.
2652 This function simulates recursion using dynamically allocated
2653 arrays, since it may be called from mfsplay_tree_rebalance(), which
2654 in turn means that the tree is already uncomfortably deep for stack
2655 space limits. */
2656 static int
2657 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2659 mfsplay_tree_node *stack1;
2660 char *stack2;
2661 unsigned sp;
2662 int val = 0;
2663 enum s { s_left, s_here, s_right, s_up };
2665 if (st->root == NULL) /* => num_keys == 0 */
2666 return 0;
2668 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2669 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2671 sp = 0;
2672 stack1 [sp] = st->root;
2673 stack2 [sp] = s_left;
2675 while (1)
2677 mfsplay_tree_node n;
2678 enum s s;
2680 n = stack1 [sp];
2681 s = stack2 [sp];
2683 /* Handle each of the four possible states separately. */
2685 /* 1: We're here to traverse the left subtree (if any). */
2686 if (s == s_left)
2688 stack2 [sp] = s_here;
2689 if (n->left != NULL)
2691 sp ++;
2692 stack1 [sp] = n->left;
2693 stack2 [sp] = s_left;
2697 /* 2: We're here to traverse this node. */
2698 else if (s == s_here)
2700 stack2 [sp] = s_right;
2701 val = (*fn) (n, data);
2702 if (val) break;
2705 /* 3: We're here to traverse the right subtree (if any). */
2706 else if (s == s_right)
2708 stack2 [sp] = s_up;
2709 if (n->right != NULL)
2711 sp ++;
2712 stack1 [sp] = n->right;
2713 stack2 [sp] = s_left;
2717 /* 4: We're here after both subtrees (if any) have been traversed. */
2718 else if (s == s_up)
2720 /* Pop the stack. */
2721 if (sp == 0) break; /* Popping off the root note: we're finished! */
2722 sp --;
2725 else
2726 abort ();
2729 mfsplay_tree_free (stack1);
2730 mfsplay_tree_free (stack2);
2731 return val;