1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
44 #define __EXTENSIONS__
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
51 #include <sys/types.h>
55 #ifdef HAVE_EXECINFO_H
65 #include <sys/types.h>
70 #include "mf-runtime.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key
;
78 typedef void *mfsplay_tree_value
;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s
*mfsplay_tree_node
;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn
) (mfsplay_tree_node
, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
91 mfsplay_tree_value value
;
93 mfsplay_tree_node left
;
94 mfsplay_tree_node right
;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
101 /* The root of the tree. */
102 mfsplay_tree_node root
;
104 /* The last key value for which the tree has been splayed, but not
106 mfsplay_tree_key last_splayed_key
;
107 int last_splayed_key_p
;
112 /* Traversal recursion control flags. */
115 unsigned rebalance_p
;
117 typedef struct mfsplay_tree_s
*mfsplay_tree
;
119 static mfsplay_tree
mfsplay_tree_new (void);
120 static mfsplay_tree_node
mfsplay_tree_insert (mfsplay_tree
, mfsplay_tree_key
, mfsplay_tree_value
);
121 static void mfsplay_tree_remove (mfsplay_tree
, mfsplay_tree_key
);
122 static mfsplay_tree_node
mfsplay_tree_lookup (mfsplay_tree
, mfsplay_tree_key
);
123 static mfsplay_tree_node
mfsplay_tree_predecessor (mfsplay_tree
, mfsplay_tree_key
);
124 static mfsplay_tree_node
mfsplay_tree_successor (mfsplay_tree
, mfsplay_tree_key
);
125 static int mfsplay_tree_foreach (mfsplay_tree
, mfsplay_tree_foreach_fn
, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp
);
128 /* ------------------------------------------------------------------------ */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
146 begin_recursion_protect1 (const char *pf
)
148 if (__mf_get_state () == reentrant
)
150 write (2, "mf: erroneous reentrancy detected in `", 38);
151 write (2, pf
, strlen(pf
));
152 write (2, "'\n", 2); \
155 __mf_set_state (reentrant
);
158 #define BEGIN_RECURSION_PROTECT() \
159 begin_recursion_protect1 (__PRETTY_FUNCTION__)
161 #define END_RECURSION_PROTECT() \
162 __mf_set_state (active)
164 /* ------------------------------------------------------------------------ */
165 /* Required globals. */
167 #define LOOKUP_CACHE_MASK_DFL 1023
168 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
169 #define LOOKUP_CACHE_SHIFT_DFL 2
171 struct __mf_cache __mf_lookup_cache
[LOOKUP_CACHE_SIZE_MAX
];
172 uintptr_t __mf_lc_mask
= LOOKUP_CACHE_MASK_DFL
;
173 unsigned char __mf_lc_shift
= LOOKUP_CACHE_SHIFT_DFL
;
174 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
176 struct __mf_options __mf_opts
;
177 int __mf_starting_p
= 1;
181 __thread
enum __mf_state_enum __mf_state_1
= reentrant
;
184 enum __mf_state_enum __mf_state_1
= reentrant
;
188 pthread_mutex_t __mf_biglock
=
189 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
190 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
192 PTHREAD_MUTEX_INITIALIZER
;
196 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
197 the libmudflap.la (no threading support) can diagnose whether
198 the application is linked with -lpthread. See __mf_usage() below. */
200 #ifdef _POSIX_THREADS
201 #pragma weak pthread_join
203 #define pthread_join NULL
208 /* ------------------------------------------------------------------------ */
209 /* stats-related globals. */
211 static unsigned long __mf_count_check
;
212 static unsigned long __mf_lookup_cache_reusecount
[LOOKUP_CACHE_SIZE_MAX
];
213 static unsigned long __mf_count_register
;
214 static unsigned long __mf_total_register_size
[__MF_TYPE_MAX
+1];
215 static unsigned long __mf_count_unregister
;
216 static unsigned long __mf_total_unregister_size
;
217 static unsigned long __mf_count_violation
[__MF_VIOL_WATCH
+1];
218 static unsigned long __mf_sigusr1_received
;
219 static unsigned long __mf_sigusr1_handled
;
220 /* not static */ unsigned long __mf_reentrancy
;
222 /* not static */ unsigned long __mf_lock_contention
;
226 /* ------------------------------------------------------------------------ */
227 /* mode-check-related globals. */
229 typedef struct __mf_object
231 uintptr_t low
, high
; /* __mf_register parameters */
233 char type
; /* __MF_TYPE_something */
234 char watching_p
; /* Trigger a VIOL_WATCH on access? */
235 unsigned read_count
; /* Number of times __mf_check/read was called on this object. */
236 unsigned write_count
; /* Likewise for __mf_check/write. */
237 unsigned liveness
; /* A measure of recent checking activity. */
238 unsigned description_epoch
; /* Last epoch __mf_describe_object printed this. */
241 struct timeval alloc_time
;
242 char **alloc_backtrace
;
243 size_t alloc_backtrace_size
;
245 pthread_t alloc_thread
;
249 uintptr_t dealloc_pc
;
250 struct timeval dealloc_time
;
251 char **dealloc_backtrace
;
252 size_t dealloc_backtrace_size
;
254 pthread_t dealloc_thread
;
258 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
259 /* Actually stored as static vars within lookup function below. */
261 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
262 static unsigned __mf_object_dead_head
[__MF_TYPE_MAX_CEM
+1]; /* next empty spot */
263 static __mf_object_t
*__mf_object_cemetary
[__MF_TYPE_MAX_CEM
+1][__MF_PERSIST_MAX
];
266 /* ------------------------------------------------------------------------ */
267 /* Forward function declarations */
269 void __mf_init () CTOR
;
270 static void __mf_sigusr1_respond ();
271 static unsigned __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
272 __mf_object_t
**objs
, unsigned max_objs
);
273 static unsigned __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
274 __mf_object_t
**objs
, unsigned max_objs
, int type
);
275 static unsigned __mf_find_dead_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
276 __mf_object_t
**objs
, unsigned max_objs
);
277 static void __mf_adapt_cache ();
278 static void __mf_describe_object (__mf_object_t
*obj
);
279 static unsigned __mf_watch_or_not (void *ptr
, size_t sz
, char flag
);
280 static mfsplay_tree
__mf_object_tree (int type
);
281 static void __mf_link_object (__mf_object_t
*node
);
282 static void __mf_unlink_object (__mf_object_t
*node
);
285 /* ------------------------------------------------------------------------ */
286 /* Configuration engine */
289 __mf_set_default_options ()
291 memset (& __mf_opts
, 0, sizeof (__mf_opts
));
293 __mf_opts
.adapt_cache
= 1000003;
294 __mf_opts
.abbreviate
= 1;
295 __mf_opts
.verbose_violations
= 1;
296 __mf_opts
.free_queue_length
= 4;
297 __mf_opts
.persistent_count
= 100;
298 __mf_opts
.crumple_zone
= 32;
299 __mf_opts
.backtrace
= 4;
300 __mf_opts
.timestamps
= 1;
301 __mf_opts
.mudflap_mode
= mode_check
;
302 __mf_opts
.violation_mode
= viol_nop
;
303 #ifdef HAVE___LIBC_FREERES
304 __mf_opts
.call_libc_freeres
= 1;
306 __mf_opts
.heur_std_data
= 1;
308 __mf_opts
.thread_stack
= 0;
312 static struct mudoption
327 "mudflaps do nothing",
328 set_option
, (unsigned)mode_nop
, (unsigned *)&__mf_opts
.mudflap_mode
},
330 "mudflaps populate object tree",
331 set_option
, (unsigned)mode_populate
, (unsigned *)&__mf_opts
.mudflap_mode
},
333 "mudflaps check for memory violations",
334 set_option
, (unsigned)mode_check
, (unsigned *)&__mf_opts
.mudflap_mode
},
336 "mudflaps always cause violations (diagnostic)",
337 set_option
, (unsigned)mode_violate
, (unsigned *)&__mf_opts
.mudflap_mode
},
340 "violations do not change program execution",
341 set_option
, (unsigned)viol_nop
, (unsigned *)&__mf_opts
.violation_mode
},
343 "violations cause a call to abort()",
344 set_option
, (unsigned)viol_abort
, (unsigned *)&__mf_opts
.violation_mode
},
346 "violations are promoted to SIGSEGV signals",
347 set_option
, (unsigned)viol_segv
, (unsigned *)&__mf_opts
.violation_mode
},
349 "violations fork a gdb process attached to current program",
350 set_option
, (unsigned)viol_gdb
, (unsigned *)&__mf_opts
.violation_mode
},
352 "trace calls to mudflap runtime library",
353 set_option
, 1, &__mf_opts
.trace_mf_calls
},
355 "trace internal events within mudflap runtime library",
356 set_option
, 1, &__mf_opts
.verbose_trace
},
358 "collect statistics on mudflap's operation",
359 set_option
, 1, &__mf_opts
.collect_stats
},
362 "print report upon SIGUSR1",
363 set_option
, 1, &__mf_opts
.sigusr1_report
},
365 {"internal-checking",
366 "perform more expensive internal checking",
367 set_option
, 1, &__mf_opts
.internal_checking
},
369 "print any memory leaks at program shutdown",
370 set_option
, 1, &__mf_opts
.print_leaks
},
371 #ifdef HAVE___LIBC_FREERES
373 "call glibc __libc_freeres at shutdown for better leak data",
374 set_option
, 1, &__mf_opts
.call_libc_freeres
},
376 {"check-initialization",
377 "detect uninitialized object reads",
378 set_option
, 1, &__mf_opts
.check_initialization
},
379 {"verbose-violations",
380 "print verbose messages when memory violations occur",
381 set_option
, 1, &__mf_opts
.verbose_violations
},
383 "abbreviate repetitive listings",
384 set_option
, 1, &__mf_opts
.abbreviate
},
386 "track object lifetime timestamps",
387 set_option
, 1, &__mf_opts
.timestamps
},
389 "ignore read accesses - assume okay",
390 set_option
, 1, &__mf_opts
.ignore_reads
},
392 "wipe stack objects at unwind",
393 set_option
, 1, &__mf_opts
.wipe_stack
},
395 "wipe heap objects at free",
396 set_option
, 1, &__mf_opts
.wipe_heap
},
398 "support /proc/self/map heuristics",
399 set_option
, 1, &__mf_opts
.heur_proc_map
},
401 "enable a simple upper stack bound heuristic",
402 set_option
, 1, &__mf_opts
.heur_stack_bound
},
404 "support _start.._end heuristics",
405 set_option
, 1, &__mf_opts
.heur_start_end
},
407 "register standard library data (argv, errno, stdin, ...)",
408 set_option
, 1, &__mf_opts
.heur_std_data
},
409 {"free-queue-length",
410 "queue N deferred free() calls before performing them",
411 read_integer_option
, 0, &__mf_opts
.free_queue_length
},
413 "keep a history of N unregistered regions",
414 read_integer_option
, 0, &__mf_opts
.persistent_count
},
416 "surround allocations with crumple zones of N bytes",
417 read_integer_option
, 0, &__mf_opts
.crumple_zone
},
418 /* XXX: not type-safe.
420 "set lookup cache size mask to N (2**M - 1)",
421 read_integer_option, 0, (int *)(&__mf_lc_mask)},
423 "set lookup cache pointer shift",
424 read_integer_option, 0, (int *)(&__mf_lc_shift)},
427 "adapt mask/shift parameters after N cache misses",
428 read_integer_option
, 1, &__mf_opts
.adapt_cache
},
430 "keep an N-level stack trace of each call context",
431 read_integer_option
, 0, &__mf_opts
.backtrace
},
434 "override thread stacks allocation: N kB",
435 read_integer_option
, 0, &__mf_opts
.thread_stack
},
437 {0, 0, set_option
, 0, NULL
}
443 struct mudoption
*opt
;
446 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
447 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
449 "The mudflap code can be controlled by an environment variable:\n"
451 "$ export MUDFLAP_OPTIONS='<options>'\n"
452 "$ <mudflapped_program>\n"
454 "where <options> is a space-separated list of \n"
455 "any of the following options. Use `-no-OPTION' to disable options.\n"
458 (pthread_join
? "multi-threaded " : "single-threaded "),
468 /* XXX: The multi-threaded thread-unaware combination is bad. */
470 for (opt
= options
; opt
->name
; opt
++)
472 int default_p
= (opt
->value
== * opt
->target
);
478 fprintf (stderr
, "-%-23.23s %s", opt
->name
, opt
->description
);
480 fprintf (stderr
, " [active]\n");
482 fprintf (stderr
, "\n");
484 case read_integer_option
:
485 strncpy (buf
, opt
->name
, 128);
486 strncpy (buf
+ strlen (opt
->name
), "=N", 2);
487 fprintf (stderr
, "-%-23.23s %s", buf
, opt
->description
);
488 fprintf (stderr
, " [%d]\n", * opt
->target
);
494 fprintf (stderr
, "\n");
499 __mf_set_options (const char *optstr
)
503 BEGIN_RECURSION_PROTECT ();
504 rc
= __mfu_set_options (optstr
);
505 /* XXX: It's not really that easy. A change to a bunch of parameters
506 can require updating auxiliary state or risk crashing:
507 free_queue_length, crumple_zone ... */
508 END_RECURSION_PROTECT ();
515 __mfu_set_options (const char *optstr
)
517 struct mudoption
*opts
= 0;
521 const char *saved_optstr
= optstr
;
523 /* XXX: bounds-check for optstr! */
540 if (*optstr
== '?' ||
541 strncmp (optstr
, "help", 4) == 0)
543 /* Caller will print help and exit. */
547 if (strncmp (optstr
, "no-", 3) == 0)
550 optstr
= & optstr
[3];
553 for (opts
= options
; opts
->name
; opts
++)
555 if (strncmp (optstr
, opts
->name
, strlen (opts
->name
)) == 0)
557 optstr
+= strlen (opts
->name
);
558 assert (opts
->target
);
565 *(opts
->target
) = opts
->value
;
567 case read_integer_option
:
568 if (! negate
&& (*optstr
== '=' && *(optstr
+1)))
571 tmp
= strtol (optstr
, &nxt
, 10);
572 if ((optstr
!= nxt
) && (tmp
!= LONG_MAX
))
575 *(opts
->target
) = (int)tmp
;
589 "warning: unrecognized string '%s' in mudflap options\n",
591 optstr
+= strlen (optstr
);
597 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
598 __mf_lc_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
599 __mf_opts
.free_queue_length
&= (__MF_FREEQ_MAX
- 1);
601 /* Clear the lookup cache, in case the parameters got changed. */
603 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
605 __mf_lookup_cache
[0].low
= MAXPTR
;
607 TRACE ("set options from `%s'\n", saved_optstr
);
609 /* Call this unconditionally, in case -sigusr1-report was toggled. */
610 __mf_sigusr1_respond ();
619 __mf_resolve_single_dynamic (struct __mf_dynamic_entry
*e
)
624 if (e
->pointer
) return;
627 if (e
->version
!= NULL
&& e
->version
[0] != '\0') /* non-null/empty */
628 e
->pointer
= dlvsym (RTLD_NEXT
, e
->name
, e
->version
);
631 e
->pointer
= dlsym (RTLD_NEXT
, e
->name
);
637 fprintf (stderr
, "mf: error in dlsym(\"%s\"): %s\n",
643 fprintf (stderr
, "mf: dlsym(\"%s\") = NULL\n", e
->name
);
650 __mf_resolve_dynamics ()
653 for (i
= 0; i
< dyn_INITRESOLVE
; i
++)
654 __mf_resolve_single_dynamic (& __mf_dynamic
[i
]);
658 /* NB: order must match enums in mf-impl.h */
659 struct __mf_dynamic_entry __mf_dynamic
[] =
661 {NULL
, "calloc", NULL
},
662 {NULL
, "free", NULL
},
663 {NULL
, "malloc", NULL
},
664 {NULL
, "mmap", NULL
},
665 {NULL
, "munmap", NULL
},
666 {NULL
, "realloc", NULL
},
667 {NULL
, "DUMMY", NULL
}, /* dyn_INITRESOLVE */
669 {NULL
, "pthread_create", PTHREAD_CREATE_VERSION
},
670 {NULL
, "pthread_join", NULL
},
671 {NULL
, "pthread_exit", NULL
}
679 /* ------------------------------------------------------------------------ */
681 /* Lookup & manage automatic initialization of the five or so splay trees. */
683 __mf_object_tree (int type
)
685 static mfsplay_tree trees
[__MF_TYPE_MAX
+1];
686 assert (type
>= 0 && type
<= __MF_TYPE_MAX
);
687 if (UNLIKELY (trees
[type
] == NULL
))
688 trees
[type
] = mfsplay_tree_new ();
698 /* Return if initialization has already been done. */
699 if (LIKELY (__mf_starting_p
== 0))
702 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
704 __mf_resolve_dynamics ();
708 __mf_set_state (active
);
710 __mf_set_default_options ();
712 ov
= getenv ("MUDFLAP_OPTIONS");
715 int rc
= __mfu_set_options (ov
);
723 /* Initialize to a non-zero description epoch. */
724 __mf_describe_object (NULL
);
726 #define REG_RESERVED(obj) \
727 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
729 REG_RESERVED (__mf_lookup_cache
);
730 REG_RESERVED (__mf_lc_mask
);
731 REG_RESERVED (__mf_lc_shift
);
732 /* XXX: others of our statics? */
734 /* Prevent access to *NULL. */
735 __mf_register (MINPTR
, 1, __MF_TYPE_NOACCESS
, "NULL");
736 __mf_lookup_cache
[0].low
= (uintptr_t) -1;
742 __wrap_main (int argc
, char* argv
[])
744 extern char **environ
;
746 extern int __real_main ();
747 static int been_here
= 0;
749 if (__mf_opts
.heur_std_data
&& ! been_here
)
754 __mf_register (argv
, sizeof(char *)*(argc
+1), __MF_TYPE_STATIC
, "argv[]");
755 for (i
=0; i
<argc
; i
++)
757 unsigned j
= strlen (argv
[i
]);
758 __mf_register (argv
[i
], j
+1, __MF_TYPE_STATIC
, "argv element");
763 char *e
= environ
[i
];
765 if (e
== NULL
) break;
766 j
= strlen (environ
[i
]);
767 __mf_register (environ
[i
], j
+1, __MF_TYPE_STATIC
, "environ element");
769 __mf_register (environ
, sizeof(char *)*(i
+1), __MF_TYPE_STATIC
, "environ[]");
771 __mf_register (& errno
, sizeof (errno
), __MF_TYPE_STATIC
, "errno area");
773 __mf_register (stdin
, sizeof (*stdin
), __MF_TYPE_STATIC
, "stdin");
774 __mf_register (stdout
, sizeof (*stdout
), __MF_TYPE_STATIC
, "stdout");
775 __mf_register (stderr
, sizeof (*stderr
), __MF_TYPE_STATIC
, "stderr");
777 /* Make some effort to register ctype.h static arrays. */
778 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
779 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
780 with in mf-hooks2.c. */
784 return main (argc
, argv
, environ
);
786 return __real_main (argc
, argv
, environ
);
792 extern void __mf_fini () DTOR
;
795 TRACE ("__mf_fini\n");
799 /* Since we didn't populate the tree for allocations in constructors
800 before __mf_init, we cannot check destructors after __mf_fini. */
801 __mf_opts
.mudflap_mode
= mode_nop
;
807 /* ------------------------------------------------------------------------ */
810 void __mf_check (void *ptr
, size_t sz
, int type
, const char *location
)
813 BEGIN_RECURSION_PROTECT ();
814 __mfu_check (ptr
, sz
, type
, location
);
815 END_RECURSION_PROTECT ();
820 void __mfu_check (void *ptr
, size_t sz
, int type
, const char *location
)
822 unsigned entry_idx
= __MF_CACHE_INDEX (ptr
);
823 struct __mf_cache
*entry
= & __mf_lookup_cache
[entry_idx
];
824 int judgement
= 0; /* 0=undecided; <0=violation; >0=okay */
825 uintptr_t ptr_low
= (uintptr_t) ptr
;
826 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
827 struct __mf_cache old_entry
= *entry
;
829 if (UNLIKELY (__mf_opts
.sigusr1_report
))
830 __mf_sigusr1_respond ();
831 if (UNLIKELY (__mf_opts
.ignore_reads
&& type
== 0))
834 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
835 ptr
, entry_idx
, (unsigned long)sz
,
836 (type
== 0 ? "read" : "write"), location
);
838 switch (__mf_opts
.mudflap_mode
)
841 /* It is tempting to poison the cache here similarly to
842 mode_populate. However that eliminates a valuable
843 distinction between these two modes. mode_nop is useful to
844 let a user count & trace every single check / registration
845 call. mode_populate is useful to let a program run fast
852 entry
->low
= ptr_low
;
853 entry
->high
= ptr_high
;
859 unsigned heuristics
= 0;
861 /* Advance aging/adaptation counters. */
862 static unsigned adapt_count
;
864 if (UNLIKELY (__mf_opts
.adapt_cache
> 0 &&
865 adapt_count
> __mf_opts
.adapt_cache
))
871 /* Looping only occurs if heuristics were triggered. */
872 while (judgement
== 0)
874 DECLARE (void, free
, void *p
);
875 __mf_object_t
* ovr_obj
[1];
877 __mf_object_t
** all_ovr_obj
= NULL
;
878 __mf_object_t
** dealloc_me
= NULL
;
881 /* Find all overlapping objects. Be optimistic that there is just one. */
882 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, ovr_obj
, 1);
883 if (UNLIKELY (obj_count
> 1))
885 /* Allocate a real buffer and do the search again. */
886 DECLARE (void *, malloc
, size_t c
);
888 all_ovr_obj
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) *
890 if (all_ovr_obj
== NULL
) abort ();
891 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_obj
, obj_count
);
892 assert (n
== obj_count
);
893 dealloc_me
= all_ovr_obj
;
897 all_ovr_obj
= ovr_obj
;
901 /* Update object statistics. */
902 for (i
= 0; i
< obj_count
; i
++)
904 __mf_object_t
*obj
= all_ovr_obj
[i
];
905 assert (obj
!= NULL
);
906 if (type
== __MF_CHECK_READ
)
913 /* Iterate over the various objects. There are a number of special cases. */
914 for (i
= 0; i
< obj_count
; i
++)
916 __mf_object_t
*obj
= all_ovr_obj
[i
];
918 /* Any __MF_TYPE_NOACCESS hit is bad. */
919 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
922 /* Any object with a watch flag is bad. */
923 if (UNLIKELY (obj
->watching_p
))
924 judgement
= -2; /* trigger VIOL_WATCH */
926 /* A read from an uninitialized object is bad. */
927 if (UNLIKELY (__mf_opts
.check_initialization
929 && type
== __MF_CHECK_READ
931 && obj
->write_count
== 0
932 /* uninitialized (heap) */
933 && obj
->type
== __MF_TYPE_HEAP
))
937 /* We now know that the access spans no invalid objects. */
938 if (LIKELY (judgement
>= 0))
939 for (i
= 0; i
< obj_count
; i
++)
941 __mf_object_t
*obj
= all_ovr_obj
[i
];
943 /* Is this access entirely contained within this object? */
944 if (LIKELY (ptr_low
>= obj
->low
&& ptr_high
<= obj
->high
))
947 entry
->low
= obj
->low
;
948 entry
->high
= obj
->high
;
953 /* This access runs off the end of one valid object. That
954 could be okay, if other valid objects fill in all the
955 holes. We allow this only for HEAP and GUESS type
956 objects. Accesses to STATIC and STACK variables
957 should not be allowed to span. */
958 if (UNLIKELY ((judgement
== 0) && (obj_count
> 1)))
960 unsigned uncovered
= 0;
961 for (i
= 0; i
< obj_count
; i
++)
963 __mf_object_t
*obj
= all_ovr_obj
[i
];
964 int j
, uncovered_low_p
, uncovered_high_p
;
965 uintptr_t ptr_lower
, ptr_higher
;
967 uncovered_low_p
= ptr_low
< obj
->low
;
968 ptr_lower
= CLAMPSUB (obj
->low
, 1);
969 uncovered_high_p
= ptr_high
> obj
->high
;
970 ptr_higher
= CLAMPADD (obj
->high
, 1);
972 for (j
= 0; j
< obj_count
; j
++)
974 __mf_object_t
*obj2
= all_ovr_obj
[j
];
976 if (i
== j
) continue;
978 /* Filter out objects that cannot be spanned across. */
979 if (obj2
->type
== __MF_TYPE_STACK
980 || obj2
->type
== __MF_TYPE_STATIC
)
983 /* Consider a side "covered" if obj2 includes
984 the next byte on that side. */
986 && (ptr_lower
>= obj2
->low
&& ptr_lower
<= obj2
->high
))
989 && (ptr_high
>= obj2
->low
&& ptr_higher
<= obj2
->high
))
990 uncovered_high_p
= 0;
993 if (uncovered_low_p
|| uncovered_high_p
)
997 /* Success if no overlapping objects are uncovered. */
1003 if (dealloc_me
!= NULL
)
1004 CALL_REAL (free
, dealloc_me
);
1006 /* If the judgment is still unknown at this stage, loop
1007 around at most one more time. */
1010 if (heuristics
++ < 2) /* XXX parametrize this number? */
1011 judgement
= __mf_heuristic_check (ptr_low
, ptr_high
);
1025 if (__mf_opts
.collect_stats
)
1027 __mf_count_check
++;
1029 if (LIKELY (old_entry
.low
!= entry
->low
|| old_entry
.high
!= entry
->high
))
1030 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1031 __mf_lookup_cache_reusecount
[entry_idx
] ++;
1034 if (UNLIKELY (judgement
< 0))
1035 __mf_violation (ptr
, sz
,
1036 (uintptr_t) __builtin_return_address (0), location
,
1037 ((judgement
== -1) ?
1038 (type
== __MF_CHECK_READ
? __MF_VIOL_READ
: __MF_VIOL_WRITE
) :
1043 static __mf_object_t
*
1044 __mf_insert_new_object (uintptr_t low
, uintptr_t high
, int type
,
1045 const char *name
, uintptr_t pc
)
1047 DECLARE (void *, calloc
, size_t c
, size_t n
);
1049 __mf_object_t
*new_obj
;
1050 new_obj
= CALL_REAL (calloc
, 1, sizeof(__mf_object_t
));
1052 new_obj
->high
= high
;
1053 new_obj
->type
= type
;
1054 new_obj
->name
= name
;
1055 new_obj
->alloc_pc
= pc
;
1056 #if HAVE_GETTIMEOFDAY
1057 if (__mf_opts
.timestamps
)
1058 gettimeofday (& new_obj
->alloc_time
, NULL
);
1061 new_obj
->alloc_thread
= pthread_self ();
1064 if (__mf_opts
.backtrace
> 0 && (type
== __MF_TYPE_HEAP
|| type
== __MF_TYPE_HEAP_I
))
1065 new_obj
->alloc_backtrace_size
=
1066 __mf_backtrace (& new_obj
->alloc_backtrace
,
1069 __mf_link_object (new_obj
);
1075 __mf_uncache_object (__mf_object_t
*old_obj
)
1077 /* Remove any low/high pointers for this object from the lookup cache. */
1079 /* Can it possibly exist in the cache? */
1080 if (LIKELY (old_obj
->read_count
+ old_obj
->write_count
))
1082 uintptr_t low
= old_obj
->low
;
1083 uintptr_t high
= old_obj
->high
;
1084 struct __mf_cache
*entry
;
1086 if ((high
- low
) >= (__mf_lc_mask
<< __mf_lc_shift
))
1088 /* For large objects (>= cache size - 1) check the whole cache. */
1089 entry
= & __mf_lookup_cache
[0];
1090 for (i
= 0; i
<= __mf_lc_mask
; i
++, entry
++)
1092 /* NB: the "||" in the following test permits this code to
1093 tolerate the situation introduced by __mf_check over
1094 contiguous objects, where a cache entry spans several
1096 if (entry
->low
== low
|| entry
->high
== high
)
1098 entry
->low
= MAXPTR
;
1099 entry
->high
= MINPTR
;
1105 /* Object is now smaller then cache size. */
1106 unsigned entry_low_idx
= __MF_CACHE_INDEX (low
);
1107 unsigned entry_high_idx
= __MF_CACHE_INDEX (high
);
1108 if (entry_low_idx
<= entry_high_idx
)
1110 entry
= & __mf_lookup_cache
[entry_low_idx
];
1111 for (i
= entry_low_idx
; i
<= entry_high_idx
; i
++, entry
++)
1113 /* NB: the "||" in the following test permits this code to
1114 tolerate the situation introduced by __mf_check over
1115 contiguous objects, where a cache entry spans several
1117 if (entry
->low
== low
|| entry
->high
== high
)
1119 entry
->low
= MAXPTR
;
1120 entry
->high
= MINPTR
;
1126 /* Object wrapped around the end of the cache. First search
1127 from low to end of cache and then from 0 to high. */
1128 entry
= & __mf_lookup_cache
[entry_low_idx
];
1129 for (i
= entry_low_idx
; i
<= __mf_lc_mask
; i
++, entry
++)
1131 /* NB: the "||" in the following test permits this code to
1132 tolerate the situation introduced by __mf_check over
1133 contiguous objects, where a cache entry spans several
1135 if (entry
->low
== low
|| entry
->high
== high
)
1137 entry
->low
= MAXPTR
;
1138 entry
->high
= MINPTR
;
1141 entry
= & __mf_lookup_cache
[0];
1142 for (i
= 0; i
<= entry_high_idx
; i
++, entry
++)
1144 /* NB: the "||" in the following test permits this code to
1145 tolerate the situation introduced by __mf_check over
1146 contiguous objects, where a cache entry spans several
1148 if (entry
->low
== low
|| entry
->high
== high
)
1150 entry
->low
= MAXPTR
;
1151 entry
->high
= MINPTR
;
1161 __mf_register (void *ptr
, size_t sz
, int type
, const char *name
)
1164 BEGIN_RECURSION_PROTECT ();
1165 __mfu_register (ptr
, sz
, type
, name
);
1166 END_RECURSION_PROTECT ();
1172 __mfu_register (void *ptr
, size_t sz
, int type
, const char *name
)
1174 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1175 ptr
, (unsigned long) sz
, type
, name
? name
: "");
1177 if (__mf_opts
.collect_stats
)
1179 __mf_count_register
++;
1180 __mf_total_register_size
[(type
< 0) ? 0 :
1181 (type
> __MF_TYPE_MAX
) ? 0 :
1185 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1186 __mf_sigusr1_respond ();
1188 switch (__mf_opts
.mudflap_mode
)
1194 __mf_violation (ptr
, sz
, (uintptr_t) __builtin_return_address (0), NULL
,
1195 __MF_VIOL_REGISTER
);
1199 /* Clear the cache. */
1200 /* XXX: why the entire cache? */
1202 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1204 __mf_lookup_cache
[0].low
= MAXPTR
;
1209 __mf_object_t
*ovr_objs
[1];
1210 unsigned num_overlapping_objs
;
1211 uintptr_t low
= (uintptr_t) ptr
;
1212 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1213 uintptr_t pc
= (uintptr_t) __builtin_return_address (0);
1215 /* Treat unknown size indication as 1. */
1216 if (UNLIKELY (sz
== 0)) sz
= 1;
1218 /* Look for objects only of the same type. This will e.g. permit a registration
1219 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1220 __mf_check time however harmful overlaps will be detected. */
1221 num_overlapping_objs
= __mf_find_objects2 (low
, high
, ovr_objs
, 1, type
);
1223 /* Handle overlaps. */
1224 if (UNLIKELY (num_overlapping_objs
> 0))
1226 __mf_object_t
*ovr_obj
= ovr_objs
[0];
1228 /* Accept certain specific duplication pairs. */
1229 if (((type
== __MF_TYPE_STATIC
) || (type
== __MF_TYPE_GUESS
))
1230 && ovr_obj
->low
== low
1231 && ovr_obj
->high
== high
1232 && ovr_obj
->type
== type
)
1234 /* Duplicate registration for static objects may come
1235 from distinct compilation units. */
1236 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1237 (void *) low
, (void *) high
,
1238 (ovr_obj
->name
? ovr_obj
->name
: ""));
1242 /* Alas, a genuine violation. */
1245 /* Two or more *real* mappings here. */
1246 __mf_violation ((void *) ptr
, sz
,
1247 (uintptr_t) __builtin_return_address (0), NULL
,
1248 __MF_VIOL_REGISTER
);
1251 else /* No overlapping objects: AOK. */
1252 __mf_insert_new_object (low
, high
, type
, name
, pc
);
1254 /* We could conceivably call __mf_check() here to prime the cache,
1255 but then the read_count/write_count field is not reliable. */
1258 } /* end switch (__mf_opts.mudflap_mode) */
1263 __mf_unregister (void *ptr
, size_t sz
, int type
)
1266 BEGIN_RECURSION_PROTECT ();
1267 __mfu_unregister (ptr
, sz
, type
);
1268 END_RECURSION_PROTECT ();
1274 __mfu_unregister (void *ptr
, size_t sz
, int type
)
1276 DECLARE (void, free
, void *ptr
);
1278 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1279 __mf_sigusr1_respond ();
1281 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr
, (unsigned long) sz
, type
);
1283 switch (__mf_opts
.mudflap_mode
)
1289 __mf_violation (ptr
, sz
,
1290 (uintptr_t) __builtin_return_address (0), NULL
,
1291 __MF_VIOL_UNREGISTER
);
1295 /* Clear the cache. */
1297 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1299 __mf_lookup_cache
[0].low
= MAXPTR
;
1304 __mf_object_t
*old_obj
= NULL
;
1305 __mf_object_t
*del_obj
= NULL
; /* Object to actually delete. */
1306 __mf_object_t
*objs
[1] = {NULL
};
1307 unsigned num_overlapping_objs
;
1309 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1310 CLAMPSZ (ptr
, sz
), objs
, 1, type
);
1312 /* Special case for HEAP_I - see free & realloc hook. They don't
1313 know whether the input region was HEAP or HEAP_I before
1314 unmapping it. Here we give HEAP a try in case HEAP_I
1316 if ((type
== __MF_TYPE_HEAP_I
) && (num_overlapping_objs
== 0))
1318 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1319 CLAMPSZ (ptr
, sz
), objs
, 1, __MF_TYPE_HEAP
);
1323 if (UNLIKELY ((num_overlapping_objs
!= 1) /* more than one overlap */
1324 || ((sz
== 0) ? 0 : (sz
!= (old_obj
->high
- old_obj
->low
+ 1))) /* size mismatch */
1325 || ((uintptr_t) ptr
!= old_obj
->low
))) /* base mismatch */
1327 __mf_violation (ptr
, sz
,
1328 (uintptr_t) __builtin_return_address (0), NULL
,
1329 __MF_VIOL_UNREGISTER
);
1333 __mf_unlink_object (old_obj
);
1334 __mf_uncache_object (old_obj
);
1336 /* Wipe buffer contents if desired. */
1337 if ((__mf_opts
.wipe_stack
&& old_obj
->type
== __MF_TYPE_STACK
)
1338 || (__mf_opts
.wipe_heap
&& (old_obj
->type
== __MF_TYPE_HEAP
1339 || old_obj
->type
== __MF_TYPE_HEAP_I
)))
1341 memset ((void *) old_obj
->low
,
1343 (size_t) (old_obj
->high
- old_obj
->low
+ 1));
1346 /* Manage the object cemetary. */
1347 if (__mf_opts
.persistent_count
> 0
1348 && (unsigned) old_obj
->type
<= __MF_TYPE_MAX_CEM
)
1350 old_obj
->deallocated_p
= 1;
1351 old_obj
->dealloc_pc
= (uintptr_t) __builtin_return_address (0);
1352 #if HAVE_GETTIMEOFDAY
1353 if (__mf_opts
.timestamps
)
1354 gettimeofday (& old_obj
->dealloc_time
, NULL
);
1357 old_obj
->dealloc_thread
= pthread_self ();
1360 if (__mf_opts
.backtrace
> 0 && old_obj
->type
== __MF_TYPE_HEAP
)
1361 old_obj
->dealloc_backtrace_size
=
1362 __mf_backtrace (& old_obj
->dealloc_backtrace
,
1365 /* Encourage this object to be displayed again in current epoch. */
1366 old_obj
->description_epoch
--;
1368 /* Put this object into the cemetary. This may require this plot to
1369 be recycled, and the previous resident to be designated del_obj. */
1371 unsigned row
= old_obj
->type
;
1372 unsigned plot
= __mf_object_dead_head
[row
];
1374 del_obj
= __mf_object_cemetary
[row
][plot
];
1375 __mf_object_cemetary
[row
][plot
] = old_obj
;
1377 if (plot
== __mf_opts
.persistent_count
) plot
= 0;
1378 __mf_object_dead_head
[row
] = plot
;
1384 if (__mf_opts
.print_leaks
)
1386 if ((old_obj
->read_count
+ old_obj
->write_count
) == 0 &&
1387 (old_obj
->type
== __MF_TYPE_HEAP
1388 || old_obj
->type
== __MF_TYPE_HEAP_I
))
1390 /* The problem with a warning message here is that we may not
1391 be privy to accesses to such objects that occur within
1392 uninstrumented libraries. */
1396 "mudflap warning: unaccessed registered object:\n");
1397 __mf_describe_object (old_obj
);
1402 if (del_obj
!= NULL
) /* May or may not equal old_obj. */
1404 if (__mf_opts
.backtrace
> 0)
1406 CALL_REAL(free
, del_obj
->alloc_backtrace
);
1407 if (__mf_opts
.persistent_count
> 0)
1409 CALL_REAL(free
, del_obj
->dealloc_backtrace
);
1412 CALL_REAL(free
, del_obj
);
1417 } /* end switch (__mf_opts.mudflap_mode) */
1420 if (__mf_opts
.collect_stats
)
1422 __mf_count_unregister
++;
1423 __mf_total_unregister_size
+= sz
;
1432 unsigned long total_size
;
1433 unsigned live_obj_count
;
1434 double total_weight
;
1435 double weighted_size
;
1436 unsigned long weighted_address_bits
[sizeof (uintptr_t) * 8][2];
1442 __mf_adapt_cache_fn (mfsplay_tree_node n
, void *param
)
1444 __mf_object_t
*obj
= (__mf_object_t
*) n
->value
;
1445 struct tree_stats
*s
= (struct tree_stats
*) param
;
1447 assert (obj
!= NULL
&& s
!= NULL
);
1449 /* Exclude never-accessed objects. */
1450 if (obj
->read_count
+ obj
->write_count
)
1453 s
->total_size
+= (obj
->high
- obj
->low
+ 1);
1460 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1461 (void *) obj->low, obj->liveness, obj->name); */
1463 s
->live_obj_count
++;
1464 s
->total_weight
+= (double) obj
->liveness
;
1466 (double) (obj
->high
- obj
->low
+ 1) *
1467 (double) obj
->liveness
;
1470 for (i
=0; i
<sizeof(uintptr_t) * 8; i
++)
1472 unsigned bit
= addr
& 1;
1473 s
->weighted_address_bits
[i
][bit
] += obj
->liveness
;
1477 /* Age the liveness value. */
1478 obj
->liveness
>>= 1;
1489 struct tree_stats s
;
1490 uintptr_t new_mask
= 0;
1491 unsigned char new_shift
;
1492 float cache_utilization
;
1494 static float smoothed_new_shift
= -1.0;
1497 memset (&s
, 0, sizeof (s
));
1499 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
), __mf_adapt_cache_fn
, (void *) & s
);
1500 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
), __mf_adapt_cache_fn
, (void *) & s
);
1501 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK
), __mf_adapt_cache_fn
, (void *) & s
);
1502 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC
), __mf_adapt_cache_fn
, (void *) & s
);
1503 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS
), __mf_adapt_cache_fn
, (void *) & s
);
1505 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1506 empty tree. Just leave the cache alone in such cases, rather
1507 than risk dying by division-by-zero. */
1508 if (! (s
.obj_count
> 0) && (s
.live_obj_count
> 0) && (s
.total_weight
> 0.0))
1511 /* Guess a good value for the shift parameter by finding an address bit that is a
1512 good discriminant of lively objects. */
1514 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1516 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1517 if (max_value
< value
) max_value
= value
;
1519 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1521 float shoulder_factor
= 0.7; /* Include slightly less popular bits too. */
1522 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1523 if (value
>= max_value
* shoulder_factor
)
1526 if (smoothed_new_shift
< 0) smoothed_new_shift
= __mf_lc_shift
;
1527 /* Converge toward this slowly to reduce flapping. */
1528 smoothed_new_shift
= 0.9*smoothed_new_shift
+ 0.1*i
;
1529 new_shift
= (unsigned) (smoothed_new_shift
+ 0.5);
1530 assert (new_shift
< sizeof (uintptr_t)*8);
1532 /* Count number of used buckets. */
1533 cache_utilization
= 0.0;
1534 for (i
= 0; i
< (1 + __mf_lc_mask
); i
++)
1535 if (__mf_lookup_cache
[i
].low
!= 0 || __mf_lookup_cache
[i
].high
!= 0)
1536 cache_utilization
+= 1.0;
1537 cache_utilization
/= (1 + __mf_lc_mask
);
1539 new_mask
|= 0xffff; /* XXX: force a large cache. */
1540 new_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
1542 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1543 "util=%u%% m=%p s=%u\n",
1544 s
.obj_count
, s
.live_obj_count
, s
.total_size
, s
.total_weight
, s
.weighted_size
,
1545 (unsigned)(cache_utilization
*100.0), (void *) new_mask
, new_shift
);
1547 /* We should reinitialize cache if its parameters have changed. */
1548 if (new_mask
!= __mf_lc_mask
||
1549 new_shift
!= __mf_lc_shift
)
1551 __mf_lc_mask
= new_mask
;
1552 __mf_lc_shift
= new_shift
;
1554 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1556 __mf_lookup_cache
[0].low
= MAXPTR
;
1562 /* __mf_find_object[s] */
1564 /* Find overlapping live objecs between [low,high]. Return up to
1565 max_objs of their pointers in objs[]. Return total count of
1566 overlaps (may exceed max_objs). */
1569 __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
1570 __mf_object_t
**objs
, unsigned max_objs
, int type
)
1573 mfsplay_tree t
= __mf_object_tree (type
);
1574 mfsplay_tree_key k
= (mfsplay_tree_key
) ptr_low
;
1577 mfsplay_tree_node n
= mfsplay_tree_lookup (t
, k
);
1578 /* An exact match for base address implies a hit. */
1581 if (count
< max_objs
)
1582 objs
[count
] = (__mf_object_t
*) n
->value
;
1586 /* Iterate left then right near this key value to find all overlapping objects. */
1587 for (direction
= 0; direction
< 2; direction
++)
1589 /* Reset search origin. */
1590 k
= (mfsplay_tree_key
) ptr_low
;
1596 n
= (direction
== 0 ? mfsplay_tree_successor (t
, k
) : mfsplay_tree_predecessor (t
, k
));
1597 if (n
== NULL
) break;
1598 obj
= (__mf_object_t
*) n
->value
;
1600 if (! (obj
->low
<= ptr_high
&& obj
->high
>= ptr_low
)) /* No overlap? */
1603 if (count
< max_objs
)
1604 objs
[count
] = (__mf_object_t
*) n
->value
;
1607 k
= (mfsplay_tree_key
) obj
->low
;
1616 __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
1617 __mf_object_t
**objs
, unsigned max_objs
)
1622 /* Search each splay tree for overlaps. */
1623 for (type
= __MF_TYPE_NOACCESS
; type
<= __MF_TYPE_GUESS
; type
++)
1625 unsigned c
= __mf_find_objects2 (ptr_low
, ptr_high
, objs
, max_objs
, type
);
1631 else /* NB: C may equal 0 */
1644 /* __mf_link_object */
1647 __mf_link_object (__mf_object_t
*node
)
1649 mfsplay_tree t
= __mf_object_tree (node
->type
);
1650 mfsplay_tree_insert (t
, (mfsplay_tree_key
) node
->low
, (mfsplay_tree_value
) node
);
1653 /* __mf_unlink_object */
1656 __mf_unlink_object (__mf_object_t
*node
)
1658 mfsplay_tree t
= __mf_object_tree (node
->type
);
1659 mfsplay_tree_remove (t
, (mfsplay_tree_key
) node
->low
);
1662 /* __mf_find_dead_objects */
1664 /* Find overlapping dead objecs between [low,high]. Return up to
1665 max_objs of their pointers in objs[]. Return total count of
1666 overlaps (may exceed max_objs). */
1669 __mf_find_dead_objects (uintptr_t low
, uintptr_t high
,
1670 __mf_object_t
**objs
, unsigned max_objs
)
1672 if (__mf_opts
.persistent_count
> 0)
1675 unsigned recollection
= 0;
1678 assert (low
<= high
);
1679 assert (max_objs
== 0 || objs
!= NULL
);
1681 /* Widen the search from the most recent plots in each row, looking
1682 backward in time. */
1684 while (recollection
< __mf_opts
.persistent_count
)
1688 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1693 plot
= __mf_object_dead_head
[row
];
1694 for (i
= 0; i
<= recollection
; i
++)
1698 /* Look backward through row: it's a circular buffer. */
1699 if (plot
> 0) plot
--;
1700 else plot
= __mf_opts
.persistent_count
- 1;
1702 obj
= __mf_object_cemetary
[row
][plot
];
1703 if (obj
&& obj
->low
<= high
&& obj
->high
>= low
)
1705 /* Found an overlapping dead object! */
1706 if (count
< max_objs
)
1716 /* Look farther back in time. */
1717 recollection
= (recollection
* 2) + 1;
1726 /* __mf_describe_object */
1729 __mf_describe_object (__mf_object_t
*obj
)
1731 static unsigned epoch
= 0;
1738 if (__mf_opts
.abbreviate
&& obj
->description_epoch
== epoch
)
1741 "mudflap %sobject %p: name=`%s'\n",
1742 (obj
->deallocated_p
? "dead " : ""),
1743 (void *) obj
, (obj
->name
? obj
->name
: ""));
1747 obj
->description_epoch
= epoch
;
1750 "mudflap %sobject %p: name=`%s'\n"
1751 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1752 "alloc time=%lu.%06lu pc=%p"
1757 (obj
->deallocated_p
? "dead " : ""),
1758 (void *) obj
, (obj
->name
? obj
->name
: ""),
1759 (void *) obj
->low
, (void *) obj
->high
,
1760 (unsigned long) (obj
->high
- obj
->low
+ 1),
1761 (obj
->type
== __MF_TYPE_NOACCESS
? "no-access" :
1762 obj
->type
== __MF_TYPE_HEAP
? "heap" :
1763 obj
->type
== __MF_TYPE_HEAP_I
? "heap-init" :
1764 obj
->type
== __MF_TYPE_STACK
? "stack" :
1765 obj
->type
== __MF_TYPE_STATIC
? "static" :
1766 obj
->type
== __MF_TYPE_GUESS
? "guess" :
1768 obj
->read_count
, obj
->write_count
, obj
->liveness
,
1769 obj
->watching_p
? " watching" : "",
1770 obj
->alloc_time
.tv_sec
, obj
->alloc_time
.tv_usec
,
1771 (void *) obj
->alloc_pc
1773 , (unsigned) obj
->alloc_thread
1777 if (__mf_opts
.backtrace
> 0)
1780 for (i
=0; i
<obj
->alloc_backtrace_size
; i
++)
1781 fprintf (stderr
, " %s\n", obj
->alloc_backtrace
[i
]);
1784 if (__mf_opts
.persistent_count
> 0)
1786 if (obj
->deallocated_p
)
1788 fprintf (stderr
, "dealloc time=%lu.%06lu pc=%p"
1793 obj
->dealloc_time
.tv_sec
, obj
->dealloc_time
.tv_usec
,
1794 (void *) obj
->dealloc_pc
1796 , (unsigned) obj
->dealloc_thread
1801 if (__mf_opts
.backtrace
> 0)
1804 for (i
=0; i
<obj
->dealloc_backtrace_size
; i
++)
1805 fprintf (stderr
, " %s\n", obj
->dealloc_backtrace
[i
]);
1813 __mf_report_leaks_fn (mfsplay_tree_node n
, void *param
)
1815 __mf_object_t
*node
= (__mf_object_t
*) n
->value
;
1816 unsigned *count
= (unsigned *) param
;
1821 fprintf (stderr
, "Leaked object %u:\n", (*count
));
1822 __mf_describe_object (node
);
1829 __mf_report_leaks ()
1833 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
),
1834 __mf_report_leaks_fn
, & count
);
1835 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
),
1836 __mf_report_leaks_fn
, & count
);
1841 /* ------------------------------------------------------------------------ */
1848 BEGIN_RECURSION_PROTECT ();
1850 END_RECURSION_PROTECT ();
1857 if (__mf_opts
.collect_stats
)
1862 "calls to __mf_check: %lu\n"
1863 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1864 " __mf_unregister: %lu [%luB]\n"
1865 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1867 __mf_count_register
,
1868 __mf_total_register_size
[0], __mf_total_register_size
[1],
1869 __mf_total_register_size
[2], __mf_total_register_size
[3],
1870 __mf_total_register_size
[4], /* XXX */
1871 __mf_count_unregister
, __mf_total_unregister_size
,
1872 __mf_count_violation
[0], __mf_count_violation
[1],
1873 __mf_count_violation
[2], __mf_count_violation
[3],
1874 __mf_count_violation
[4]);
1877 "calls with reentrancy: %lu\n", __mf_reentrancy
);
1880 " lock contention: %lu\n", __mf_lock_contention
);
1883 /* Lookup cache stats. */
1886 unsigned max_reuse
= 0;
1887 unsigned num_used
= 0;
1888 unsigned num_unused
= 0;
1890 for (i
= 0; i
< LOOKUP_CACHE_SIZE
; i
++)
1892 if (__mf_lookup_cache_reusecount
[i
])
1896 if (max_reuse
< __mf_lookup_cache_reusecount
[i
])
1897 max_reuse
= __mf_lookup_cache_reusecount
[i
];
1899 fprintf (stderr
, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1900 num_used
, num_unused
, max_reuse
);
1904 unsigned live_count
;
1905 live_count
= __mf_find_objects (MINPTR
, MAXPTR
, NULL
, 0);
1906 fprintf (stderr
, "number of live objects: %u\n", live_count
);
1909 if (__mf_opts
.persistent_count
> 0)
1911 unsigned dead_count
= 0;
1913 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1914 for (plot
= 0 ; plot
< __mf_opts
.persistent_count
; plot
++)
1915 if (__mf_object_cemetary
[row
][plot
] != 0)
1917 fprintf (stderr
, " zombie objects: %u\n", dead_count
);
1920 if (__mf_opts
.print_leaks
&& (__mf_opts
.mudflap_mode
== mode_check
))
1923 extern void * __mf_wrap_alloca_indirect (size_t c
);
1925 /* Free up any remaining alloca()'d blocks. */
1926 __mf_wrap_alloca_indirect (0);
1927 #ifdef HAVE___LIBC_FREERES
1928 if (__mf_opts
.call_libc_freeres
)
1930 extern void __libc_freeres (void);
1935 __mf_describe_object (NULL
); /* Reset description epoch. */
1936 l
= __mf_report_leaks ();
1937 fprintf (stderr
, "number of leaked objects: %u\n", l
);
1941 /* __mf_backtrace */
1944 __mf_backtrace (char ***symbols
, void *guess_pc
, unsigned guess_omit_levels
)
1947 unsigned pc_array_size
= __mf_opts
.backtrace
+ guess_omit_levels
;
1948 unsigned remaining_size
;
1949 unsigned omitted_size
= 0;
1951 DECLARE (void, free
, void *ptr
);
1952 DECLARE (void *, calloc
, size_t c
, size_t n
);
1953 DECLARE (void *, malloc
, size_t n
);
1955 pc_array
= CALL_REAL (calloc
, pc_array_size
, sizeof (void *) );
1956 #ifdef HAVE_BACKTRACE
1957 pc_array_size
= backtrace (pc_array
, pc_array_size
);
1959 #define FETCH(n) do { if (pc_array_size >= n) { \
1960 pc_array[n] = __builtin_return_address(n); \
1961 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1963 /* Unroll some calls __builtin_return_address because this function
1964 only takes a literal integer parameter. */
1967 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1968 rather than simply returning 0. :-( */
1977 if (pc_array_size
> 8) pc_array_size
= 9;
1979 if (pc_array_size
> 0) pc_array_size
= 1;
1985 /* We want to trim the first few levels of the stack traceback,
1986 since they contain libmudflap wrappers and junk. If pc_array[]
1987 ends up containing a non-NULL guess_pc, then trim everything
1988 before that. Otherwise, omit the first guess_omit_levels
1991 if (guess_pc
!= NULL
)
1992 for (i
=0; i
<pc_array_size
; i
++)
1993 if (pc_array
[i
] == guess_pc
)
1996 if (omitted_size
== 0) /* No match? */
1997 if (pc_array_size
> guess_omit_levels
)
1998 omitted_size
= guess_omit_levels
;
2000 remaining_size
= pc_array_size
- omitted_size
;
2002 #ifdef HAVE_BACKTRACE_SYMBOLS
2003 *symbols
= backtrace_symbols (pc_array
+ omitted_size
, remaining_size
);
2006 /* Let's construct a buffer by hand. It will have <remaining_size>
2007 char*'s at the front, pointing at individual strings immediately
2012 enum { perline
= 30 };
2013 buffer
= CALL_REAL (malloc
, remaining_size
* (perline
+ sizeof(char *)));
2014 pointers
= (char **) buffer
;
2015 chars
= (char *)buffer
+ (remaining_size
* sizeof (char *));
2016 for (i
= 0; i
< remaining_size
; i
++)
2018 pointers
[i
] = chars
;
2019 sprintf (chars
, "[0x%p]", pc_array
[omitted_size
+ i
]);
2020 chars
= chars
+ perline
;
2022 *symbols
= pointers
;
2025 CALL_REAL (free
, pc_array
);
2027 return remaining_size
;
2030 /* ------------------------------------------------------------------------ */
2031 /* __mf_violation */
2034 __mf_violation (void *ptr
, size_t sz
, uintptr_t pc
,
2035 const char *location
, int type
)
2038 static unsigned violation_number
;
2039 DECLARE(void, free
, void *ptr
);
2041 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2043 (location
!= NULL
? location
: ""), type
, ptr
, (unsigned long) sz
);
2045 if (__mf_opts
.collect_stats
)
2046 __mf_count_violation
[(type
< 0) ? 0 :
2047 (type
> __MF_VIOL_WATCH
) ? 0 :
2050 /* Print out a basic warning message. */
2051 if (__mf_opts
.verbose_violations
)
2054 unsigned num_helpful
= 0;
2055 struct timeval now
= { 0, 0 };
2056 #if HAVE_GETTIMEOFDAY
2057 gettimeofday (& now
, NULL
);
2060 violation_number
++;
2063 "mudflap violation %u (%s): time=%lu.%06lu "
2064 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2066 ((type
== __MF_VIOL_READ
) ? "check/read" :
2067 (type
== __MF_VIOL_WRITE
) ? "check/write" :
2068 (type
== __MF_VIOL_REGISTER
) ? "register" :
2069 (type
== __MF_VIOL_UNREGISTER
) ? "unregister" :
2070 (type
== __MF_VIOL_WATCH
) ? "watch" : "unknown"),
2071 now
.tv_sec
, now
.tv_usec
,
2072 (void *) ptr
, (unsigned long)sz
, (void *) pc
,
2073 (location
!= NULL
? " location=`" : ""),
2074 (location
!= NULL
? location
: ""),
2075 (location
!= NULL
? "'" : ""));
2077 if (__mf_opts
.backtrace
> 0)
2082 num
= __mf_backtrace (& symbols
, (void *) pc
, 2);
2083 /* Note: backtrace_symbols calls malloc(). But since we're in
2084 __mf_violation and presumably __mf_check, it'll detect
2085 recursion, and not put the new string into the database. */
2087 for (i
=0; i
<num
; i
++)
2088 fprintf (stderr
, " %s\n", symbols
[i
]);
2090 /* Calling free() here would trigger a violation. */
2091 CALL_REAL(free
, symbols
);
2095 /* Look for nearby objects. For this, we start with s_low/s_high
2096 pointing to the given area, looking for overlapping objects.
2097 If none show up, widen the search area and keep looking. */
2099 if (sz
== 0) sz
= 1;
2101 for (dead_p
= 0; dead_p
<= 1; dead_p
++) /* for dead_p in 0 1 */
2103 enum {max_objs
= 3}; /* magic */
2104 __mf_object_t
*objs
[max_objs
];
2105 unsigned num_objs
= 0;
2106 uintptr_t s_low
, s_high
;
2110 s_low
= (uintptr_t) ptr
;
2111 s_high
= CLAMPSZ (ptr
, sz
);
2113 while (tries
< 16) /* magic */
2116 num_objs
= __mf_find_dead_objects (s_low
, s_high
, objs
, max_objs
);
2118 num_objs
= __mf_find_objects (s_low
, s_high
, objs
, max_objs
);
2120 if (num_objs
) /* good enough */
2125 /* XXX: tune this search strategy. It's too dependent on
2126 sz, which can vary from 1 to very big (when array index
2127 checking) numbers. */
2128 s_low
= CLAMPSUB (s_low
, (sz
* tries
* tries
));
2129 s_high
= CLAMPADD (s_high
, (sz
* tries
* tries
));
2132 for (i
= 0; i
< min (num_objs
, max_objs
); i
++)
2134 __mf_object_t
*obj
= objs
[i
];
2135 uintptr_t low
= (uintptr_t) ptr
;
2136 uintptr_t high
= CLAMPSZ (ptr
, sz
);
2137 unsigned before1
= (low
< obj
->low
) ? obj
->low
- low
: 0;
2138 unsigned after1
= (low
> obj
->high
) ? low
- obj
->high
: 0;
2139 unsigned into1
= (high
>= obj
->low
&& low
<= obj
->high
) ? low
- obj
->low
: 0;
2140 unsigned before2
= (high
< obj
->low
) ? obj
->low
- high
: 0;
2141 unsigned after2
= (high
> obj
->high
) ? high
- obj
->high
: 0;
2142 unsigned into2
= (high
>= obj
->low
&& low
<= obj
->high
) ? high
- obj
->low
: 0;
2144 fprintf (stderr
, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2145 num_helpful
+ i
+ 1,
2146 (before1
? before1
: after1
? after1
: into1
),
2147 (before1
? "before" : after1
? "after" : "into"),
2148 (before2
? before2
: after2
? after2
: into2
),
2149 (before2
? "before" : after2
? "after" : "into"));
2150 __mf_describe_object (obj
);
2152 num_helpful
+= num_objs
;
2155 fprintf (stderr
, "number of nearby objects: %u\n", num_helpful
);
2158 /* How to finally handle this violation? */
2159 switch (__mf_opts
.violation_mode
)
2164 kill (getpid(), SIGSEGV
);
2171 snprintf (buf
, 128, "gdb --pid=%u", (unsigned) getpid ());
2173 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2174 instead, and let the forked child execlp() gdb. That way, this
2175 subject process can be resumed under the supervision of gdb.
2176 This can't happen now, since system() only returns when gdb
2177 dies. In that case, we need to beware of starting a second
2178 concurrent gdb child upon the next violation. (But if the first
2179 gdb dies, then starting a new one is appropriate.) */
2184 /* ------------------------------------------------------------------------ */
2187 unsigned __mf_watch (void *ptr
, size_t sz
)
2191 BEGIN_RECURSION_PROTECT ();
2192 rc
= __mf_watch_or_not (ptr
, sz
, 1);
2193 END_RECURSION_PROTECT ();
2198 unsigned __mf_unwatch (void *ptr
, size_t sz
)
2202 rc
= __mf_watch_or_not (ptr
, sz
, 0);
2209 __mf_watch_or_not (void *ptr
, size_t sz
, char flag
)
2211 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
2212 uintptr_t ptr_low
= (uintptr_t) ptr
;
2215 TRACE ("%s ptr=%p size=%lu\n",
2216 (flag
? "watch" : "unwatch"), ptr
, (unsigned long) sz
);
2218 switch (__mf_opts
.mudflap_mode
)
2228 __mf_object_t
**all_ovr_objs
;
2231 DECLARE (void *, malloc
, size_t c
);
2232 DECLARE (void, free
, void *p
);
2234 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, NULL
, 0);
2235 VERBOSE_TRACE (" %u:", obj_count
);
2237 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) * obj_count
));
2238 if (all_ovr_objs
== NULL
) abort ();
2239 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
2240 assert (n
== obj_count
);
2242 for (n
= 0; n
< obj_count
; n
++)
2244 __mf_object_t
*obj
= all_ovr_objs
[n
];
2246 VERBOSE_TRACE (" [%p]", (void *) obj
);
2247 if (obj
->watching_p
!= flag
)
2249 obj
->watching_p
= flag
;
2252 /* Remove object from cache, to ensure next access
2253 goes through __mf_check(). */
2255 __mf_uncache_object (obj
);
2258 CALL_REAL (free
, all_ovr_objs
);
2268 __mf_sigusr1_handler (int num
)
2270 __mf_sigusr1_received
++;
2273 /* Install or remove SIGUSR1 handler as necessary.
2274 Also, respond to a received pending SIGUSR1. */
2276 __mf_sigusr1_respond ()
2278 static int handler_installed
;
2281 /* Manage handler */
2282 if (__mf_opts
.sigusr1_report
&& ! handler_installed
)
2284 signal (SIGUSR1
, __mf_sigusr1_handler
);
2285 handler_installed
= 1;
2287 else if(! __mf_opts
.sigusr1_report
&& handler_installed
)
2289 signal (SIGUSR1
, SIG_DFL
);
2290 handler_installed
= 0;
2294 /* Manage enqueued signals */
2295 if (__mf_sigusr1_received
> __mf_sigusr1_handled
)
2297 __mf_sigusr1_handled
++;
2298 assert (__mf_get_state () == reentrant
);
2300 handler_installed
= 0; /* We may need to re-enable signal; this might be a SysV library. */
2305 /* XXX: provide an alternative __assert_fail function that cannot
2306 fail due to libmudflap infinite recursion. */
2310 write_itoa (int fd
, unsigned n
)
2312 enum x
{ bufsize
= sizeof(n
)*4 };
2316 for (i
=0; i
<bufsize
-1; i
++)
2318 unsigned digit
= n
% 10;
2319 buf
[bufsize
-2-i
] = digit
+ '0';
2323 char *m
= & buf
[bufsize
-2-i
];
2324 buf
[bufsize
-1] = '\0';
2325 write (fd
, m
, strlen(m
));
2333 __assert_fail (const char *msg
, const char *file
, unsigned line
, const char *func
)
2335 #define write2(string) write (2, (string), strlen ((string)));
2339 write_itoa (2, (unsigned) pthread_self ());
2342 write2(": assertion failure: `");
2343 write (2, msg
, strlen (msg
));
2345 write (2, func
, strlen (func
));
2347 write (2, file
, strlen (file
));
2349 write_itoa (2, line
);
2360 /* Adapted splay tree code, originally from libiberty. It has been
2361 specialized for libmudflap as requested by RMS. */
2364 mfsplay_tree_free (void *p
)
2366 DECLARE (void, free
, void *p
);
2367 CALL_REAL (free
, p
);
2371 mfsplay_tree_xmalloc (size_t s
)
2373 DECLARE (void *, malloc
, size_t s
);
2374 return CALL_REAL (malloc
, s
);
2378 static void mfsplay_tree_splay (mfsplay_tree
, mfsplay_tree_key
);
2379 static mfsplay_tree_node
mfsplay_tree_splay_helper (mfsplay_tree
,
2381 mfsplay_tree_node
*,
2382 mfsplay_tree_node
*,
2383 mfsplay_tree_node
*);
2386 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2387 and grandparent, respectively, of NODE. */
2389 static mfsplay_tree_node
2390 mfsplay_tree_splay_helper (mfsplay_tree sp
,
2391 mfsplay_tree_key key
,
2392 mfsplay_tree_node
* node
,
2393 mfsplay_tree_node
* parent
,
2394 mfsplay_tree_node
* grandparent
)
2396 mfsplay_tree_node
*next
;
2397 mfsplay_tree_node n
;
2405 comparison
= ((key
> n
->key
) ? 1 : ((key
< n
->key
) ? -1 : 0));
2407 if (comparison
== 0)
2408 /* We've found the target. */
2410 else if (comparison
< 0)
2411 /* The target is to the left. */
2414 /* The target is to the right. */
2419 /* Check whether our recursion depth is too high. Abort this search,
2420 and signal that a rebalance is required to continue. */
2421 if (sp
->depth
> sp
->max_depth
)
2423 sp
->rebalance_p
= 1;
2427 /* Continue down the tree. */
2429 n
= mfsplay_tree_splay_helper (sp
, key
, next
, node
, parent
);
2432 /* The recursive call will change the place to which NODE
2434 if (*node
!= n
|| sp
->rebalance_p
)
2439 /* NODE is the root. We are done. */
2442 /* First, handle the case where there is no grandparent (i.e.,
2443 *PARENT is the root of the tree.) */
2446 if (n
== (*parent
)->left
)
2460 /* Next handle the cases where both N and *PARENT are left children,
2461 or where both are right children. */
2462 if (n
== (*parent
)->left
&& *parent
== (*grandparent
)->left
)
2464 mfsplay_tree_node p
= *parent
;
2466 (*grandparent
)->left
= p
->right
;
2467 p
->right
= *grandparent
;
2473 else if (n
== (*parent
)->right
&& *parent
== (*grandparent
)->right
)
2475 mfsplay_tree_node p
= *parent
;
2477 (*grandparent
)->right
= p
->left
;
2478 p
->left
= *grandparent
;
2485 /* Finally, deal with the case where N is a left child, but *PARENT
2486 is a right child, or vice versa. */
2487 if (n
== (*parent
)->left
)
2489 (*parent
)->left
= n
->right
;
2491 (*grandparent
)->right
= n
->left
;
2492 n
->left
= *grandparent
;
2498 (*parent
)->right
= n
->left
;
2500 (*grandparent
)->left
= n
->right
;
2501 n
->right
= *grandparent
;
2510 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n
, void *array_ptr
)
2512 mfsplay_tree_node
**p
= array_ptr
;
2519 static mfsplay_tree_node
2520 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node
* array
, unsigned low
,
2523 unsigned middle
= low
+ (high
- low
) / 2;
2524 mfsplay_tree_node n
= array
[middle
];
2526 /* Note that since we're producing a balanced binary tree, it is not a problem
2527 that this function is recursive. */
2528 if (low
+ 1 <= middle
)
2529 n
->left
= mfsplay_tree_rebalance_helper2 (array
, low
, middle
- 1);
2533 if (middle
+ 1 <= high
)
2534 n
->right
= mfsplay_tree_rebalance_helper2 (array
, middle
+ 1, high
);
2542 /* Rebalance the entire tree. Do this by copying all the node
2543 pointers into an array, then cleverly re-linking them. */
2545 mfsplay_tree_rebalance (mfsplay_tree sp
)
2547 mfsplay_tree_node
*all_nodes
, *all_nodes_1
;
2549 if (sp
->num_keys
<= 2)
2552 all_nodes
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * sp
->num_keys
);
2554 /* Traverse all nodes to copy their addresses into this array. */
2555 all_nodes_1
= all_nodes
;
2556 mfsplay_tree_foreach (sp
, mfsplay_tree_rebalance_helper1
,
2557 (void *) &all_nodes_1
);
2559 /* Relink all the nodes. */
2560 sp
->root
= mfsplay_tree_rebalance_helper2 (all_nodes
, 0, sp
->num_keys
- 1);
2562 mfsplay_tree_free (all_nodes
);
2566 /* Splay SP around KEY. */
2568 mfsplay_tree_splay (mfsplay_tree sp
, mfsplay_tree_key key
)
2573 /* If we just splayed the tree with the same key, do nothing. */
2574 if (sp
->last_splayed_key_p
&&
2575 (sp
->last_splayed_key
== key
))
2578 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2579 The idea is to limit excessive stack usage if we're facing
2580 degenerate access patterns. Unfortunately such patterns can occur
2581 e.g. during static initialization, where many static objects might
2582 be registered in increasing address sequence, or during a case where
2583 large tree-like heap data structures are allocated quickly.
2585 On x86, this corresponds to roughly 200K of stack usage.
2586 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2587 sp
->max_depth
= 2500;
2588 sp
->rebalance_p
= sp
->depth
= 0;
2590 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2591 if (sp
->rebalance_p
)
2593 mfsplay_tree_rebalance (sp
);
2595 sp
->rebalance_p
= sp
->depth
= 0;
2596 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2598 if (sp
->rebalance_p
)
2603 /* Cache this splay key. */
2604 sp
->last_splayed_key
= key
;
2605 sp
->last_splayed_key_p
= 1;
2610 /* Allocate a new splay tree. */
2614 mfsplay_tree sp
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s
));
2616 sp
->last_splayed_key_p
= 0;
2624 /* Insert a new node (associating KEY with DATA) into SP. If a
2625 previous node with the indicated KEY exists, its data is replaced
2626 with the new value. Returns the new node. */
2627 static mfsplay_tree_node
2628 mfsplay_tree_insert (mfsplay_tree sp
, mfsplay_tree_key key
, mfsplay_tree_value value
)
2632 mfsplay_tree_splay (sp
, key
);
2635 comparison
= ((sp
->root
->key
> key
) ? 1 :
2636 ((sp
->root
->key
< key
) ? -1 : 0));
2638 if (sp
->root
&& comparison
== 0)
2640 /* If the root of the tree already has the indicated KEY, just
2641 replace the value with VALUE. */
2642 sp
->root
->value
= value
;
2646 /* Create a new node, and insert it at the root. */
2647 mfsplay_tree_node node
;
2649 node
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s
));
2651 node
->value
= value
;
2654 node
->left
= node
->right
= 0;
2655 else if (comparison
< 0)
2657 node
->left
= sp
->root
;
2658 node
->right
= node
->left
->right
;
2659 node
->left
->right
= 0;
2663 node
->right
= sp
->root
;
2664 node
->left
= node
->right
->left
;
2665 node
->right
->left
= 0;
2669 sp
->last_splayed_key_p
= 0;
2675 /* Remove KEY from SP. It is not an error if it did not exist. */
2678 mfsplay_tree_remove (mfsplay_tree sp
, mfsplay_tree_key key
)
2680 mfsplay_tree_splay (sp
, key
);
2681 sp
->last_splayed_key_p
= 0;
2682 if (sp
->root
&& (sp
->root
->key
== key
))
2684 mfsplay_tree_node left
, right
;
2685 left
= sp
->root
->left
;
2686 right
= sp
->root
->right
;
2687 /* Delete the root node itself. */
2688 mfsplay_tree_free (sp
->root
);
2690 /* One of the children is now the root. Doesn't matter much
2691 which, so long as we preserve the properties of the tree. */
2695 /* If there was a right child as well, hang it off the
2696 right-most leaf of the left child. */
2701 left
->right
= right
;
2709 /* Lookup KEY in SP, returning VALUE if present, and NULL
2712 static mfsplay_tree_node
2713 mfsplay_tree_lookup (mfsplay_tree sp
, mfsplay_tree_key key
)
2715 mfsplay_tree_splay (sp
, key
);
2716 if (sp
->root
&& (sp
->root
->key
== key
))
2723 /* Return the immediate predecessor KEY, or NULL if there is no
2724 predecessor. KEY need not be present in the tree. */
2726 static mfsplay_tree_node
2727 mfsplay_tree_predecessor (mfsplay_tree sp
, mfsplay_tree_key key
)
2730 mfsplay_tree_node node
;
2731 /* If the tree is empty, there is certainly no predecessor. */
2734 /* Splay the tree around KEY. That will leave either the KEY
2735 itself, its predecessor, or its successor at the root. */
2736 mfsplay_tree_splay (sp
, key
);
2737 comparison
= ((sp
->root
->key
> key
) ? 1 :
2738 ((sp
->root
->key
< key
) ? -1 : 0));
2740 /* If the predecessor is at the root, just return it. */
2743 /* Otherwise, find the rightmost element of the left subtree. */
2744 node
= sp
->root
->left
;
2751 /* Return the immediate successor KEY, or NULL if there is no
2752 successor. KEY need not be present in the tree. */
2754 static mfsplay_tree_node
2755 mfsplay_tree_successor (mfsplay_tree sp
, mfsplay_tree_key key
)
2758 mfsplay_tree_node node
;
2759 /* If the tree is empty, there is certainly no successor. */
2762 /* Splay the tree around KEY. That will leave either the KEY
2763 itself, its predecessor, or its successor at the root. */
2764 mfsplay_tree_splay (sp
, key
);
2765 comparison
= ((sp
->root
->key
> key
) ? 1 :
2766 ((sp
->root
->key
< key
) ? -1 : 0));
2767 /* If the successor is at the root, just return it. */
2770 /* Otherwise, find the leftmost element of the right subtree. */
2771 node
= sp
->root
->right
;
2778 /* Call FN, passing it the DATA, for every node in SP, following an
2779 in-order traversal. If FN every returns a non-zero value, the
2780 iteration ceases immediately, and the value is returned.
2781 Otherwise, this function returns 0.
2783 This function simulates recursion using dynamically allocated
2784 arrays, since it may be called from mfsplay_tree_rebalance(), which
2785 in turn means that the tree is already uncomfortably deep for stack
2788 mfsplay_tree_foreach (mfsplay_tree st
, mfsplay_tree_foreach_fn fn
, void *data
)
2790 mfsplay_tree_node
*stack1
;
2794 enum s
{ s_left
, s_here
, s_right
, s_up
};
2796 if (st
->root
== NULL
) /* => num_keys == 0 */
2799 stack1
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * st
->num_keys
);
2800 stack2
= mfsplay_tree_xmalloc (sizeof (char) * st
->num_keys
);
2803 stack1
[sp
] = st
->root
;
2804 stack2
[sp
] = s_left
;
2808 mfsplay_tree_node n
;
2814 /* Handle each of the four possible states separately. */
2816 /* 1: We're here to traverse the left subtree (if any). */
2819 stack2
[sp
] = s_here
;
2820 if (n
->left
!= NULL
)
2823 stack1
[sp
] = n
->left
;
2824 stack2
[sp
] = s_left
;
2828 /* 2: We're here to traverse this node. */
2829 else if (s
== s_here
)
2831 stack2
[sp
] = s_right
;
2832 val
= (*fn
) (n
, data
);
2836 /* 3: We're here to traverse the right subtree (if any). */
2837 else if (s
== s_right
)
2840 if (n
->right
!= NULL
)
2843 stack1
[sp
] = n
->right
;
2844 stack2
[sp
] = s_left
;
2848 /* 4: We're here after both subtrees (if any) have been traversed. */
2851 /* Pop the stack. */
2852 if (sp
== 0) break; /* Popping off the root note: we're finished! */
2860 mfsplay_tree_free (stack1
);
2861 mfsplay_tree_free (stack2
);