1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008
3 Free Software Foundation, Inc.
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
16 In addition to the permissions in the GNU General Public License, the
17 Free Software Foundation gives you unlimited permission to link the
18 compiled version of this file into combinations with other programs,
19 and to distribute those combinations without any restriction coming
20 from the use of this file. (The General Public License restrictions
21 do apply in other respects; for example, they cover modification of
22 the file, and distribution when not linked into a combine
25 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
26 WARRANTY; without even the implied warranty of MERCHANTABILITY or
27 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
30 You should have received a copy of the GNU General Public License
31 along with GCC; see the file COPYING. If not, write to the Free
32 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
37 /* These attempt to coax various unix flavours to declare all our
38 needed tidbits in the system headers. */
39 #if !defined(__FreeBSD__) && !defined(__APPLE__)
41 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
45 #define __EXTENSIONS__
47 #define _LARGE_FILE_API
48 #define _XOPEN_SOURCE_EXTENDED 1
52 #include <sys/types.h>
56 #ifdef HAVE_EXECINFO_H
66 #include <sys/types.h>
71 #include "mf-runtime.h"
75 /* ------------------------------------------------------------------------ */
76 /* Splay-tree implementation. */
78 typedef uintptr_t mfsplay_tree_key
;
79 typedef void *mfsplay_tree_value
;
81 /* Forward declaration for a node in the tree. */
82 typedef struct mfsplay_tree_node_s
*mfsplay_tree_node
;
84 /* The type of a function used to iterate over the tree. */
85 typedef int (*mfsplay_tree_foreach_fn
) (mfsplay_tree_node
, void *);
87 /* The nodes in the splay tree. */
88 struct mfsplay_tree_node_s
92 mfsplay_tree_value value
;
94 mfsplay_tree_node left
;
95 mfsplay_tree_node right
;
96 /* XXX: The addition of a parent pointer may eliminate some recursion. */
99 /* The splay tree itself. */
100 struct mfsplay_tree_s
102 /* The root of the tree. */
103 mfsplay_tree_node root
;
105 /* The last key value for which the tree has been splayed, but not
107 mfsplay_tree_key last_splayed_key
;
108 int last_splayed_key_p
;
113 /* Traversal recursion control flags. */
116 unsigned rebalance_p
;
118 typedef struct mfsplay_tree_s
*mfsplay_tree
;
120 static mfsplay_tree
mfsplay_tree_new (void);
121 static mfsplay_tree_node
mfsplay_tree_insert (mfsplay_tree
, mfsplay_tree_key
, mfsplay_tree_value
);
122 static void mfsplay_tree_remove (mfsplay_tree
, mfsplay_tree_key
);
123 static mfsplay_tree_node
mfsplay_tree_lookup (mfsplay_tree
, mfsplay_tree_key
);
124 static mfsplay_tree_node
mfsplay_tree_predecessor (mfsplay_tree
, mfsplay_tree_key
);
125 static mfsplay_tree_node
mfsplay_tree_successor (mfsplay_tree
, mfsplay_tree_key
);
126 static int mfsplay_tree_foreach (mfsplay_tree
, mfsplay_tree_foreach_fn
, void *);
127 static void mfsplay_tree_rebalance (mfsplay_tree sp
);
129 /* ------------------------------------------------------------------------ */
132 #define CTOR __attribute__ ((constructor))
133 #define DTOR __attribute__ ((destructor))
136 /* Codes to describe the context in which a violation occurs. */
137 #define __MF_VIOL_UNKNOWN 0
138 #define __MF_VIOL_READ 1
139 #define __MF_VIOL_WRITE 2
140 #define __MF_VIOL_REGISTER 3
141 #define __MF_VIOL_UNREGISTER 4
142 #define __MF_VIOL_WATCH 5
144 /* Protect against recursive calls. */
147 begin_recursion_protect1 (const char *pf
)
149 if (__mf_get_state () == reentrant
)
151 write (2, "mf: erroneous reentrancy detected in `", 38);
152 write (2, pf
, strlen(pf
));
153 write (2, "'\n", 2); \
156 __mf_set_state (reentrant
);
159 #define BEGIN_RECURSION_PROTECT() \
160 begin_recursion_protect1 (__PRETTY_FUNCTION__)
162 #define END_RECURSION_PROTECT() \
163 __mf_set_state (active)
165 /* ------------------------------------------------------------------------ */
166 /* Required globals. */
168 #define LOOKUP_CACHE_MASK_DFL 1023
169 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
170 #define LOOKUP_CACHE_SHIFT_DFL 2
172 struct __mf_cache __mf_lookup_cache
[LOOKUP_CACHE_SIZE_MAX
];
173 uintptr_t __mf_lc_mask
= LOOKUP_CACHE_MASK_DFL
;
174 unsigned char __mf_lc_shift
= LOOKUP_CACHE_SHIFT_DFL
;
175 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
177 struct __mf_options __mf_opts
;
178 int __mf_starting_p
= 1;
182 __thread
enum __mf_state_enum __mf_state_1
= reentrant
;
185 enum __mf_state_enum __mf_state_1
= reentrant
;
189 pthread_mutex_t __mf_biglock
=
190 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
191 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
193 PTHREAD_MUTEX_INITIALIZER
;
197 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
198 the libmudflap.la (no threading support) can diagnose whether
199 the application is linked with -lpthread. See __mf_usage() below. */
201 #ifdef _POSIX_THREADS
202 #pragma weak pthread_join
204 #define pthread_join NULL
209 /* ------------------------------------------------------------------------ */
210 /* stats-related globals. */
212 static unsigned long __mf_count_check
;
213 static unsigned long __mf_lookup_cache_reusecount
[LOOKUP_CACHE_SIZE_MAX
];
214 static unsigned long __mf_count_register
;
215 static unsigned long __mf_total_register_size
[__MF_TYPE_MAX
+1];
216 static unsigned long __mf_count_unregister
;
217 static unsigned long __mf_total_unregister_size
;
218 static unsigned long __mf_count_violation
[__MF_VIOL_WATCH
+1];
219 static unsigned long __mf_sigusr1_received
;
220 static unsigned long __mf_sigusr1_handled
;
221 /* not static */ unsigned long __mf_reentrancy
;
223 /* not static */ unsigned long __mf_lock_contention
;
227 /* ------------------------------------------------------------------------ */
228 /* mode-check-related globals. */
230 typedef struct __mf_object
232 uintptr_t low
, high
; /* __mf_register parameters */
234 char type
; /* __MF_TYPE_something */
235 char watching_p
; /* Trigger a VIOL_WATCH on access? */
236 unsigned read_count
; /* Number of times __mf_check/read was called on this object. */
237 unsigned write_count
; /* Likewise for __mf_check/write. */
238 unsigned liveness
; /* A measure of recent checking activity. */
239 unsigned description_epoch
; /* Last epoch __mf_describe_object printed this. */
242 struct timeval alloc_time
;
243 char **alloc_backtrace
;
244 size_t alloc_backtrace_size
;
246 pthread_t alloc_thread
;
250 uintptr_t dealloc_pc
;
251 struct timeval dealloc_time
;
252 char **dealloc_backtrace
;
253 size_t dealloc_backtrace_size
;
255 pthread_t dealloc_thread
;
259 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
260 /* Actually stored as static vars within lookup function below. */
262 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
263 static unsigned __mf_object_dead_head
[__MF_TYPE_MAX_CEM
+1]; /* next empty spot */
264 static __mf_object_t
*__mf_object_cemetary
[__MF_TYPE_MAX_CEM
+1][__MF_PERSIST_MAX
];
267 /* ------------------------------------------------------------------------ */
268 /* Forward function declarations */
270 void __mf_init () CTOR
;
271 static void __mf_sigusr1_respond ();
272 static unsigned __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
273 __mf_object_t
**objs
, unsigned max_objs
);
274 static unsigned __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
275 __mf_object_t
**objs
, unsigned max_objs
, int type
);
276 static unsigned __mf_find_dead_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
277 __mf_object_t
**objs
, unsigned max_objs
);
278 static void __mf_adapt_cache ();
279 static void __mf_describe_object (__mf_object_t
*obj
);
280 static unsigned __mf_watch_or_not (void *ptr
, size_t sz
, char flag
);
281 static mfsplay_tree
__mf_object_tree (int type
);
282 static void __mf_link_object (__mf_object_t
*node
);
283 static void __mf_unlink_object (__mf_object_t
*node
);
286 /* ------------------------------------------------------------------------ */
287 /* Configuration engine */
290 __mf_set_default_options ()
292 memset (& __mf_opts
, 0, sizeof (__mf_opts
));
294 __mf_opts
.adapt_cache
= 1000003;
295 __mf_opts
.abbreviate
= 1;
296 __mf_opts
.verbose_violations
= 1;
297 __mf_opts
.free_queue_length
= 4;
298 __mf_opts
.persistent_count
= 100;
299 __mf_opts
.crumple_zone
= 32;
300 __mf_opts
.backtrace
= 4;
301 __mf_opts
.timestamps
= 1;
302 __mf_opts
.mudflap_mode
= mode_check
;
303 __mf_opts
.violation_mode
= viol_nop
;
304 #ifdef HAVE___LIBC_FREERES
305 __mf_opts
.call_libc_freeres
= 1;
307 __mf_opts
.heur_std_data
= 1;
309 __mf_opts
.thread_stack
= 0;
313 static struct mudoption
328 "mudflaps do nothing",
329 set_option
, (unsigned)mode_nop
, (unsigned *)&__mf_opts
.mudflap_mode
},
331 "mudflaps populate object tree",
332 set_option
, (unsigned)mode_populate
, (unsigned *)&__mf_opts
.mudflap_mode
},
334 "mudflaps check for memory violations",
335 set_option
, (unsigned)mode_check
, (unsigned *)&__mf_opts
.mudflap_mode
},
337 "mudflaps always cause violations (diagnostic)",
338 set_option
, (unsigned)mode_violate
, (unsigned *)&__mf_opts
.mudflap_mode
},
341 "violations do not change program execution",
342 set_option
, (unsigned)viol_nop
, (unsigned *)&__mf_opts
.violation_mode
},
344 "violations cause a call to abort()",
345 set_option
, (unsigned)viol_abort
, (unsigned *)&__mf_opts
.violation_mode
},
347 "violations are promoted to SIGSEGV signals",
348 set_option
, (unsigned)viol_segv
, (unsigned *)&__mf_opts
.violation_mode
},
350 "violations fork a gdb process attached to current program",
351 set_option
, (unsigned)viol_gdb
, (unsigned *)&__mf_opts
.violation_mode
},
353 "trace calls to mudflap runtime library",
354 set_option
, 1, &__mf_opts
.trace_mf_calls
},
356 "trace internal events within mudflap runtime library",
357 set_option
, 1, &__mf_opts
.verbose_trace
},
359 "collect statistics on mudflap's operation",
360 set_option
, 1, &__mf_opts
.collect_stats
},
363 "print report upon SIGUSR1",
364 set_option
, 1, &__mf_opts
.sigusr1_report
},
366 {"internal-checking",
367 "perform more expensive internal checking",
368 set_option
, 1, &__mf_opts
.internal_checking
},
370 "print any memory leaks at program shutdown",
371 set_option
, 1, &__mf_opts
.print_leaks
},
372 #ifdef HAVE___LIBC_FREERES
374 "call glibc __libc_freeres at shutdown for better leak data",
375 set_option
, 1, &__mf_opts
.call_libc_freeres
},
377 {"check-initialization",
378 "detect uninitialized object reads",
379 set_option
, 1, &__mf_opts
.check_initialization
},
380 {"verbose-violations",
381 "print verbose messages when memory violations occur",
382 set_option
, 1, &__mf_opts
.verbose_violations
},
384 "abbreviate repetitive listings",
385 set_option
, 1, &__mf_opts
.abbreviate
},
387 "track object lifetime timestamps",
388 set_option
, 1, &__mf_opts
.timestamps
},
390 "ignore read accesses - assume okay",
391 set_option
, 1, &__mf_opts
.ignore_reads
},
393 "wipe stack objects at unwind",
394 set_option
, 1, &__mf_opts
.wipe_stack
},
396 "wipe heap objects at free",
397 set_option
, 1, &__mf_opts
.wipe_heap
},
399 "support /proc/self/map heuristics",
400 set_option
, 1, &__mf_opts
.heur_proc_map
},
402 "enable a simple upper stack bound heuristic",
403 set_option
, 1, &__mf_opts
.heur_stack_bound
},
405 "support _start.._end heuristics",
406 set_option
, 1, &__mf_opts
.heur_start_end
},
408 "register standard library data (argv, errno, stdin, ...)",
409 set_option
, 1, &__mf_opts
.heur_std_data
},
410 {"free-queue-length",
411 "queue N deferred free() calls before performing them",
412 read_integer_option
, 0, &__mf_opts
.free_queue_length
},
414 "keep a history of N unregistered regions",
415 read_integer_option
, 0, &__mf_opts
.persistent_count
},
417 "surround allocations with crumple zones of N bytes",
418 read_integer_option
, 0, &__mf_opts
.crumple_zone
},
419 /* XXX: not type-safe.
421 "set lookup cache size mask to N (2**M - 1)",
422 read_integer_option, 0, (int *)(&__mf_lc_mask)},
424 "set lookup cache pointer shift",
425 read_integer_option, 0, (int *)(&__mf_lc_shift)},
428 "adapt mask/shift parameters after N cache misses",
429 read_integer_option
, 1, &__mf_opts
.adapt_cache
},
431 "keep an N-level stack trace of each call context",
432 read_integer_option
, 0, &__mf_opts
.backtrace
},
435 "override thread stacks allocation: N kB",
436 read_integer_option
, 0, &__mf_opts
.thread_stack
},
438 {0, 0, set_option
, 0, NULL
}
444 struct mudoption
*opt
;
447 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
448 "Mudflap is Copyright (C) 2002-2008 Free Software Foundation, Inc.\n"
450 "The mudflap code can be controlled by an environment variable:\n"
452 "$ export MUDFLAP_OPTIONS='<options>'\n"
453 "$ <mudflapped_program>\n"
455 "where <options> is a space-separated list of \n"
456 "any of the following options. Use `-no-OPTION' to disable options.\n"
459 (pthread_join
? "multi-threaded " : "single-threaded "),
469 /* XXX: The multi-threaded thread-unaware combination is bad. */
471 for (opt
= options
; opt
->name
; opt
++)
473 int default_p
= (opt
->value
== * opt
->target
);
479 fprintf (stderr
, "-%-23.23s %s", opt
->name
, opt
->description
);
481 fprintf (stderr
, " [active]\n");
483 fprintf (stderr
, "\n");
485 case read_integer_option
:
486 strncpy (buf
, opt
->name
, 128);
487 strncpy (buf
+ strlen (opt
->name
), "=N", 2);
488 fprintf (stderr
, "-%-23.23s %s", buf
, opt
->description
);
489 fprintf (stderr
, " [%d]\n", * opt
->target
);
495 fprintf (stderr
, "\n");
500 __mf_set_options (const char *optstr
)
504 BEGIN_RECURSION_PROTECT ();
505 rc
= __mfu_set_options (optstr
);
506 /* XXX: It's not really that easy. A change to a bunch of parameters
507 can require updating auxiliary state or risk crashing:
508 free_queue_length, crumple_zone ... */
509 END_RECURSION_PROTECT ();
516 __mfu_set_options (const char *optstr
)
518 struct mudoption
*opts
= 0;
522 const char *saved_optstr
= optstr
;
524 /* XXX: bounds-check for optstr! */
541 if (*optstr
== '?' ||
542 strncmp (optstr
, "help", 4) == 0)
544 /* Caller will print help and exit. */
548 if (strncmp (optstr
, "no-", 3) == 0)
551 optstr
= & optstr
[3];
554 for (opts
= options
; opts
->name
; opts
++)
556 if (strncmp (optstr
, opts
->name
, strlen (opts
->name
)) == 0)
558 optstr
+= strlen (opts
->name
);
559 assert (opts
->target
);
566 *(opts
->target
) = opts
->value
;
568 case read_integer_option
:
569 if (! negate
&& (*optstr
== '=' && *(optstr
+1)))
572 tmp
= strtol (optstr
, &nxt
, 10);
573 if ((optstr
!= nxt
) && (tmp
!= LONG_MAX
))
576 *(opts
->target
) = (int)tmp
;
590 "warning: unrecognized string '%s' in mudflap options\n",
592 optstr
+= strlen (optstr
);
598 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
599 __mf_lc_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
600 __mf_opts
.free_queue_length
&= (__MF_FREEQ_MAX
- 1);
602 /* Clear the lookup cache, in case the parameters got changed. */
604 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
606 __mf_lookup_cache
[0].low
= MAXPTR
;
608 TRACE ("set options from `%s'\n", saved_optstr
);
610 /* Call this unconditionally, in case -sigusr1-report was toggled. */
611 __mf_sigusr1_respond ();
620 __mf_resolve_single_dynamic (struct __mf_dynamic_entry
*e
)
625 if (e
->pointer
) return;
628 if (e
->version
!= NULL
&& e
->version
[0] != '\0') /* non-null/empty */
629 e
->pointer
= dlvsym (RTLD_NEXT
, e
->name
, e
->version
);
632 e
->pointer
= dlsym (RTLD_NEXT
, e
->name
);
638 fprintf (stderr
, "mf: error in dlsym(\"%s\"): %s\n",
644 fprintf (stderr
, "mf: dlsym(\"%s\") = NULL\n", e
->name
);
651 __mf_resolve_dynamics ()
654 for (i
= 0; i
< dyn_INITRESOLVE
; i
++)
655 __mf_resolve_single_dynamic (& __mf_dynamic
[i
]);
659 /* NB: order must match enums in mf-impl.h */
660 struct __mf_dynamic_entry __mf_dynamic
[] =
662 {NULL
, "calloc", NULL
},
663 {NULL
, "free", NULL
},
664 {NULL
, "malloc", NULL
},
665 {NULL
, "mmap", NULL
},
666 {NULL
, "munmap", NULL
},
667 {NULL
, "realloc", NULL
},
668 {NULL
, "DUMMY", NULL
}, /* dyn_INITRESOLVE */
670 {NULL
, "pthread_create", PTHREAD_CREATE_VERSION
},
671 {NULL
, "pthread_join", NULL
},
672 {NULL
, "pthread_exit", NULL
}
680 /* ------------------------------------------------------------------------ */
682 /* Lookup & manage automatic initialization of the five or so splay trees. */
684 __mf_object_tree (int type
)
686 static mfsplay_tree trees
[__MF_TYPE_MAX
+1];
687 assert (type
>= 0 && type
<= __MF_TYPE_MAX
);
688 if (UNLIKELY (trees
[type
] == NULL
))
689 trees
[type
] = mfsplay_tree_new ();
699 /* Return if initialization has already been done. */
700 if (LIKELY (__mf_starting_p
== 0))
703 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
705 __mf_resolve_dynamics ();
709 __mf_set_state (active
);
711 __mf_set_default_options ();
713 ov
= getenv ("MUDFLAP_OPTIONS");
716 int rc
= __mfu_set_options (ov
);
724 /* Initialize to a non-zero description epoch. */
725 __mf_describe_object (NULL
);
727 #define REG_RESERVED(obj) \
728 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
730 REG_RESERVED (__mf_lookup_cache
);
731 REG_RESERVED (__mf_lc_mask
);
732 REG_RESERVED (__mf_lc_shift
);
733 /* XXX: others of our statics? */
735 /* Prevent access to *NULL. */
736 __mf_register (MINPTR
, 1, __MF_TYPE_NOACCESS
, "NULL");
737 __mf_lookup_cache
[0].low
= (uintptr_t) -1;
743 __wrap_main (int argc
, char* argv
[])
745 extern char **environ
;
747 extern int __real_main ();
748 static int been_here
= 0;
750 if (__mf_opts
.heur_std_data
&& ! been_here
)
755 __mf_register (argv
, sizeof(char *)*(argc
+1), __MF_TYPE_STATIC
, "argv[]");
756 for (i
=0; i
<argc
; i
++)
758 unsigned j
= strlen (argv
[i
]);
759 __mf_register (argv
[i
], j
+1, __MF_TYPE_STATIC
, "argv element");
764 char *e
= environ
[i
];
766 if (e
== NULL
) break;
767 j
= strlen (environ
[i
]);
768 __mf_register (environ
[i
], j
+1, __MF_TYPE_STATIC
, "environ element");
770 __mf_register (environ
, sizeof(char *)*(i
+1), __MF_TYPE_STATIC
, "environ[]");
772 __mf_register (& errno
, sizeof (errno
), __MF_TYPE_STATIC
, "errno area");
774 __mf_register (stdin
, sizeof (*stdin
), __MF_TYPE_STATIC
, "stdin");
775 __mf_register (stdout
, sizeof (*stdout
), __MF_TYPE_STATIC
, "stdout");
776 __mf_register (stderr
, sizeof (*stderr
), __MF_TYPE_STATIC
, "stderr");
778 /* Make some effort to register ctype.h static arrays. */
779 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
780 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
781 with in mf-hooks2.c. */
785 return main (argc
, argv
, environ
);
787 return __real_main (argc
, argv
, environ
);
793 extern void __mf_fini () DTOR
;
796 TRACE ("__mf_fini\n");
800 /* Since we didn't populate the tree for allocations in constructors
801 before __mf_init, we cannot check destructors after __mf_fini. */
802 __mf_opts
.mudflap_mode
= mode_nop
;
808 /* ------------------------------------------------------------------------ */
811 void __mf_check (void *ptr
, size_t sz
, int type
, const char *location
)
814 BEGIN_RECURSION_PROTECT ();
815 __mfu_check (ptr
, sz
, type
, location
);
816 END_RECURSION_PROTECT ();
821 void __mfu_check (void *ptr
, size_t sz
, int type
, const char *location
)
823 unsigned entry_idx
= __MF_CACHE_INDEX (ptr
);
824 struct __mf_cache
*entry
= & __mf_lookup_cache
[entry_idx
];
825 int judgement
= 0; /* 0=undecided; <0=violation; >0=okay */
826 uintptr_t ptr_low
= (uintptr_t) ptr
;
827 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
828 struct __mf_cache old_entry
= *entry
;
830 if (UNLIKELY (__mf_opts
.sigusr1_report
))
831 __mf_sigusr1_respond ();
832 if (UNLIKELY (__mf_opts
.ignore_reads
&& type
== 0))
835 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
836 ptr
, entry_idx
, (unsigned long)sz
,
837 (type
== 0 ? "read" : "write"), location
);
839 switch (__mf_opts
.mudflap_mode
)
842 /* It is tempting to poison the cache here similarly to
843 mode_populate. However that eliminates a valuable
844 distinction between these two modes. mode_nop is useful to
845 let a user count & trace every single check / registration
846 call. mode_populate is useful to let a program run fast
853 entry
->low
= ptr_low
;
854 entry
->high
= ptr_high
;
860 unsigned heuristics
= 0;
862 /* Advance aging/adaptation counters. */
863 static unsigned adapt_count
;
865 if (UNLIKELY (__mf_opts
.adapt_cache
> 0 &&
866 adapt_count
> __mf_opts
.adapt_cache
))
872 /* Looping only occurs if heuristics were triggered. */
873 while (judgement
== 0)
875 DECLARE (void, free
, void *p
);
876 __mf_object_t
* ovr_obj
[1];
878 __mf_object_t
** all_ovr_obj
= NULL
;
879 __mf_object_t
** dealloc_me
= NULL
;
882 /* Find all overlapping objects. Be optimistic that there is just one. */
883 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, ovr_obj
, 1);
884 if (UNLIKELY (obj_count
> 1))
886 /* Allocate a real buffer and do the search again. */
887 DECLARE (void *, malloc
, size_t c
);
889 all_ovr_obj
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) *
891 if (all_ovr_obj
== NULL
) abort ();
892 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_obj
, obj_count
);
893 assert (n
== obj_count
);
894 dealloc_me
= all_ovr_obj
;
898 all_ovr_obj
= ovr_obj
;
902 /* Update object statistics. */
903 for (i
= 0; i
< obj_count
; i
++)
905 __mf_object_t
*obj
= all_ovr_obj
[i
];
906 assert (obj
!= NULL
);
907 if (type
== __MF_CHECK_READ
)
914 /* Iterate over the various objects. There are a number of special cases. */
915 for (i
= 0; i
< obj_count
; i
++)
917 __mf_object_t
*obj
= all_ovr_obj
[i
];
919 /* Any __MF_TYPE_NOACCESS hit is bad. */
920 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
923 /* Any object with a watch flag is bad. */
924 if (UNLIKELY (obj
->watching_p
))
925 judgement
= -2; /* trigger VIOL_WATCH */
927 /* A read from an uninitialized object is bad. */
928 if (UNLIKELY (__mf_opts
.check_initialization
930 && type
== __MF_CHECK_READ
932 && obj
->write_count
== 0
933 /* uninitialized (heap) */
934 && obj
->type
== __MF_TYPE_HEAP
))
938 /* We now know that the access spans no invalid objects. */
939 if (LIKELY (judgement
>= 0))
940 for (i
= 0; i
< obj_count
; i
++)
942 __mf_object_t
*obj
= all_ovr_obj
[i
];
944 /* Is this access entirely contained within this object? */
945 if (LIKELY (ptr_low
>= obj
->low
&& ptr_high
<= obj
->high
))
948 entry
->low
= obj
->low
;
949 entry
->high
= obj
->high
;
954 /* This access runs off the end of one valid object. That
955 could be okay, if other valid objects fill in all the
956 holes. We allow this only for HEAP and GUESS type
957 objects. Accesses to STATIC and STACK variables
958 should not be allowed to span. */
959 if (UNLIKELY ((judgement
== 0) && (obj_count
> 1)))
961 unsigned uncovered
= 0;
962 for (i
= 0; i
< obj_count
; i
++)
964 __mf_object_t
*obj
= all_ovr_obj
[i
];
965 int j
, uncovered_low_p
, uncovered_high_p
;
966 uintptr_t ptr_lower
, ptr_higher
;
968 uncovered_low_p
= ptr_low
< obj
->low
;
969 ptr_lower
= CLAMPSUB (obj
->low
, 1);
970 uncovered_high_p
= ptr_high
> obj
->high
;
971 ptr_higher
= CLAMPADD (obj
->high
, 1);
973 for (j
= 0; j
< obj_count
; j
++)
975 __mf_object_t
*obj2
= all_ovr_obj
[j
];
977 if (i
== j
) continue;
979 /* Filter out objects that cannot be spanned across. */
980 if (obj2
->type
== __MF_TYPE_STACK
981 || obj2
->type
== __MF_TYPE_STATIC
)
984 /* Consider a side "covered" if obj2 includes
985 the next byte on that side. */
987 && (ptr_lower
>= obj2
->low
&& ptr_lower
<= obj2
->high
))
990 && (ptr_high
>= obj2
->low
&& ptr_higher
<= obj2
->high
))
991 uncovered_high_p
= 0;
994 if (uncovered_low_p
|| uncovered_high_p
)
998 /* Success if no overlapping objects are uncovered. */
1004 if (dealloc_me
!= NULL
)
1005 CALL_REAL (free
, dealloc_me
);
1007 /* If the judgment is still unknown at this stage, loop
1008 around at most one more time. */
1011 if (heuristics
++ < 2) /* XXX parametrize this number? */
1012 judgement
= __mf_heuristic_check (ptr_low
, ptr_high
);
1026 if (__mf_opts
.collect_stats
)
1028 __mf_count_check
++;
1030 if (LIKELY (old_entry
.low
!= entry
->low
|| old_entry
.high
!= entry
->high
))
1031 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1032 __mf_lookup_cache_reusecount
[entry_idx
] ++;
1035 if (UNLIKELY (judgement
< 0))
1036 __mf_violation (ptr
, sz
,
1037 (uintptr_t) __builtin_return_address (0), location
,
1038 ((judgement
== -1) ?
1039 (type
== __MF_CHECK_READ
? __MF_VIOL_READ
: __MF_VIOL_WRITE
) :
1044 static __mf_object_t
*
1045 __mf_insert_new_object (uintptr_t low
, uintptr_t high
, int type
,
1046 const char *name
, uintptr_t pc
)
1048 DECLARE (void *, calloc
, size_t c
, size_t n
);
1050 __mf_object_t
*new_obj
;
1051 new_obj
= CALL_REAL (calloc
, 1, sizeof(__mf_object_t
));
1053 new_obj
->high
= high
;
1054 new_obj
->type
= type
;
1055 new_obj
->name
= name
;
1056 new_obj
->alloc_pc
= pc
;
1057 #if HAVE_GETTIMEOFDAY
1058 if (__mf_opts
.timestamps
)
1059 gettimeofday (& new_obj
->alloc_time
, NULL
);
1062 new_obj
->alloc_thread
= pthread_self ();
1065 if (__mf_opts
.backtrace
> 0 && (type
== __MF_TYPE_HEAP
|| type
== __MF_TYPE_HEAP_I
))
1066 new_obj
->alloc_backtrace_size
=
1067 __mf_backtrace (& new_obj
->alloc_backtrace
,
1070 __mf_link_object (new_obj
);
1076 __mf_uncache_object (__mf_object_t
*old_obj
)
1078 /* Remove any low/high pointers for this object from the lookup cache. */
1080 /* Can it possibly exist in the cache? */
1081 if (LIKELY (old_obj
->read_count
+ old_obj
->write_count
))
1083 uintptr_t low
= old_obj
->low
;
1084 uintptr_t high
= old_obj
->high
;
1085 struct __mf_cache
*entry
;
1087 if ((high
- low
) >= (__mf_lc_mask
<< __mf_lc_shift
))
1089 /* For large objects (>= cache size - 1) check the whole cache. */
1090 entry
= & __mf_lookup_cache
[0];
1091 for (i
= 0; i
<= __mf_lc_mask
; i
++, entry
++)
1093 /* NB: the "||" in the following test permits this code to
1094 tolerate the situation introduced by __mf_check over
1095 contiguous objects, where a cache entry spans several
1097 if (entry
->low
== low
|| entry
->high
== high
)
1099 entry
->low
= MAXPTR
;
1100 entry
->high
= MINPTR
;
1106 /* Object is now smaller then cache size. */
1107 unsigned entry_low_idx
= __MF_CACHE_INDEX (low
);
1108 unsigned entry_high_idx
= __MF_CACHE_INDEX (high
);
1109 if (entry_low_idx
<= entry_high_idx
)
1111 entry
= & __mf_lookup_cache
[entry_low_idx
];
1112 for (i
= entry_low_idx
; i
<= entry_high_idx
; i
++, entry
++)
1114 /* NB: the "||" in the following test permits this code to
1115 tolerate the situation introduced by __mf_check over
1116 contiguous objects, where a cache entry spans several
1118 if (entry
->low
== low
|| entry
->high
== high
)
1120 entry
->low
= MAXPTR
;
1121 entry
->high
= MINPTR
;
1127 /* Object wrapped around the end of the cache. First search
1128 from low to end of cache and then from 0 to high. */
1129 entry
= & __mf_lookup_cache
[entry_low_idx
];
1130 for (i
= entry_low_idx
; i
<= __mf_lc_mask
; i
++, entry
++)
1132 /* NB: the "||" in the following test permits this code to
1133 tolerate the situation introduced by __mf_check over
1134 contiguous objects, where a cache entry spans several
1136 if (entry
->low
== low
|| entry
->high
== high
)
1138 entry
->low
= MAXPTR
;
1139 entry
->high
= MINPTR
;
1142 entry
= & __mf_lookup_cache
[0];
1143 for (i
= 0; i
<= entry_high_idx
; i
++, entry
++)
1145 /* NB: the "||" in the following test permits this code to
1146 tolerate the situation introduced by __mf_check over
1147 contiguous objects, where a cache entry spans several
1149 if (entry
->low
== low
|| entry
->high
== high
)
1151 entry
->low
= MAXPTR
;
1152 entry
->high
= MINPTR
;
1162 __mf_register (void *ptr
, size_t sz
, int type
, const char *name
)
1165 BEGIN_RECURSION_PROTECT ();
1166 __mfu_register (ptr
, sz
, type
, name
);
1167 END_RECURSION_PROTECT ();
1173 __mfu_register (void *ptr
, size_t sz
, int type
, const char *name
)
1175 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1176 ptr
, (unsigned long) sz
, type
, name
? name
: "");
1178 if (__mf_opts
.collect_stats
)
1180 __mf_count_register
++;
1181 __mf_total_register_size
[(type
< 0) ? 0 :
1182 (type
> __MF_TYPE_MAX
) ? 0 :
1186 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1187 __mf_sigusr1_respond ();
1189 switch (__mf_opts
.mudflap_mode
)
1195 __mf_violation (ptr
, sz
, (uintptr_t) __builtin_return_address (0), NULL
,
1196 __MF_VIOL_REGISTER
);
1200 /* Clear the cache. */
1201 /* XXX: why the entire cache? */
1203 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1205 __mf_lookup_cache
[0].low
= MAXPTR
;
1210 __mf_object_t
*ovr_objs
[1];
1211 unsigned num_overlapping_objs
;
1212 uintptr_t low
= (uintptr_t) ptr
;
1213 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1214 uintptr_t pc
= (uintptr_t) __builtin_return_address (0);
1216 /* Treat unknown size indication as 1. */
1217 if (UNLIKELY (sz
== 0)) sz
= 1;
1219 /* Look for objects only of the same type. This will e.g. permit a registration
1220 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1221 __mf_check time however harmful overlaps will be detected. */
1222 num_overlapping_objs
= __mf_find_objects2 (low
, high
, ovr_objs
, 1, type
);
1224 /* Handle overlaps. */
1225 if (UNLIKELY (num_overlapping_objs
> 0))
1227 __mf_object_t
*ovr_obj
= ovr_objs
[0];
1229 /* Accept certain specific duplication pairs. */
1230 if (((type
== __MF_TYPE_STATIC
) || (type
== __MF_TYPE_GUESS
))
1231 && ovr_obj
->low
== low
1232 && ovr_obj
->high
== high
1233 && ovr_obj
->type
== type
)
1235 /* Duplicate registration for static objects may come
1236 from distinct compilation units. */
1237 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1238 (void *) low
, (void *) high
,
1239 (ovr_obj
->name
? ovr_obj
->name
: ""));
1243 /* Alas, a genuine violation. */
1246 /* Two or more *real* mappings here. */
1247 __mf_violation ((void *) ptr
, sz
,
1248 (uintptr_t) __builtin_return_address (0), NULL
,
1249 __MF_VIOL_REGISTER
);
1252 else /* No overlapping objects: AOK. */
1253 __mf_insert_new_object (low
, high
, type
, name
, pc
);
1255 /* We could conceivably call __mf_check() here to prime the cache,
1256 but then the read_count/write_count field is not reliable. */
1259 } /* end switch (__mf_opts.mudflap_mode) */
1264 __mf_unregister (void *ptr
, size_t sz
, int type
)
1267 BEGIN_RECURSION_PROTECT ();
1268 __mfu_unregister (ptr
, sz
, type
);
1269 END_RECURSION_PROTECT ();
1275 __mfu_unregister (void *ptr
, size_t sz
, int type
)
1277 DECLARE (void, free
, void *ptr
);
1279 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1280 __mf_sigusr1_respond ();
1282 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr
, (unsigned long) sz
, type
);
1284 switch (__mf_opts
.mudflap_mode
)
1290 __mf_violation (ptr
, sz
,
1291 (uintptr_t) __builtin_return_address (0), NULL
,
1292 __MF_VIOL_UNREGISTER
);
1296 /* Clear the cache. */
1298 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1300 __mf_lookup_cache
[0].low
= MAXPTR
;
1305 __mf_object_t
*old_obj
= NULL
;
1306 __mf_object_t
*del_obj
= NULL
; /* Object to actually delete. */
1307 __mf_object_t
*objs
[1] = {NULL
};
1308 unsigned num_overlapping_objs
;
1310 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1311 CLAMPSZ (ptr
, sz
), objs
, 1, type
);
1313 /* Special case for HEAP_I - see free & realloc hook. They don't
1314 know whether the input region was HEAP or HEAP_I before
1315 unmapping it. Here we give HEAP a try in case HEAP_I
1317 if ((type
== __MF_TYPE_HEAP_I
) && (num_overlapping_objs
== 0))
1319 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1320 CLAMPSZ (ptr
, sz
), objs
, 1, __MF_TYPE_HEAP
);
1324 if (UNLIKELY ((num_overlapping_objs
!= 1) /* more than one overlap */
1325 || ((sz
== 0) ? 0 : (sz
!= (old_obj
->high
- old_obj
->low
+ 1))) /* size mismatch */
1326 || ((uintptr_t) ptr
!= old_obj
->low
))) /* base mismatch */
1328 __mf_violation (ptr
, sz
,
1329 (uintptr_t) __builtin_return_address (0), NULL
,
1330 __MF_VIOL_UNREGISTER
);
1334 __mf_unlink_object (old_obj
);
1335 __mf_uncache_object (old_obj
);
1337 /* Wipe buffer contents if desired. */
1338 if ((__mf_opts
.wipe_stack
&& old_obj
->type
== __MF_TYPE_STACK
)
1339 || (__mf_opts
.wipe_heap
&& (old_obj
->type
== __MF_TYPE_HEAP
1340 || old_obj
->type
== __MF_TYPE_HEAP_I
)))
1342 memset ((void *) old_obj
->low
,
1344 (size_t) (old_obj
->high
- old_obj
->low
+ 1));
1347 /* Manage the object cemetary. */
1348 if (__mf_opts
.persistent_count
> 0
1349 && (unsigned) old_obj
->type
<= __MF_TYPE_MAX_CEM
)
1351 old_obj
->deallocated_p
= 1;
1352 old_obj
->dealloc_pc
= (uintptr_t) __builtin_return_address (0);
1353 #if HAVE_GETTIMEOFDAY
1354 if (__mf_opts
.timestamps
)
1355 gettimeofday (& old_obj
->dealloc_time
, NULL
);
1358 old_obj
->dealloc_thread
= pthread_self ();
1361 if (__mf_opts
.backtrace
> 0 && old_obj
->type
== __MF_TYPE_HEAP
)
1362 old_obj
->dealloc_backtrace_size
=
1363 __mf_backtrace (& old_obj
->dealloc_backtrace
,
1366 /* Encourage this object to be displayed again in current epoch. */
1367 old_obj
->description_epoch
--;
1369 /* Put this object into the cemetary. This may require this plot to
1370 be recycled, and the previous resident to be designated del_obj. */
1372 unsigned row
= old_obj
->type
;
1373 unsigned plot
= __mf_object_dead_head
[row
];
1375 del_obj
= __mf_object_cemetary
[row
][plot
];
1376 __mf_object_cemetary
[row
][plot
] = old_obj
;
1378 if (plot
== __mf_opts
.persistent_count
) plot
= 0;
1379 __mf_object_dead_head
[row
] = plot
;
1385 if (__mf_opts
.print_leaks
)
1387 if ((old_obj
->read_count
+ old_obj
->write_count
) == 0 &&
1388 (old_obj
->type
== __MF_TYPE_HEAP
1389 || old_obj
->type
== __MF_TYPE_HEAP_I
))
1391 /* The problem with a warning message here is that we may not
1392 be privy to accesses to such objects that occur within
1393 uninstrumented libraries. */
1397 "mudflap warning: unaccessed registered object:\n");
1398 __mf_describe_object (old_obj
);
1403 if (del_obj
!= NULL
) /* May or may not equal old_obj. */
1405 if (__mf_opts
.backtrace
> 0)
1407 CALL_REAL(free
, del_obj
->alloc_backtrace
);
1408 if (__mf_opts
.persistent_count
> 0)
1410 CALL_REAL(free
, del_obj
->dealloc_backtrace
);
1413 CALL_REAL(free
, del_obj
);
1418 } /* end switch (__mf_opts.mudflap_mode) */
1421 if (__mf_opts
.collect_stats
)
1423 __mf_count_unregister
++;
1424 __mf_total_unregister_size
+= sz
;
1433 unsigned long total_size
;
1434 unsigned live_obj_count
;
1435 double total_weight
;
1436 double weighted_size
;
1437 unsigned long weighted_address_bits
[sizeof (uintptr_t) * 8][2];
1443 __mf_adapt_cache_fn (mfsplay_tree_node n
, void *param
)
1445 __mf_object_t
*obj
= (__mf_object_t
*) n
->value
;
1446 struct tree_stats
*s
= (struct tree_stats
*) param
;
1448 assert (obj
!= NULL
&& s
!= NULL
);
1450 /* Exclude never-accessed objects. */
1451 if (obj
->read_count
+ obj
->write_count
)
1454 s
->total_size
+= (obj
->high
- obj
->low
+ 1);
1461 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1462 (void *) obj->low, obj->liveness, obj->name); */
1464 s
->live_obj_count
++;
1465 s
->total_weight
+= (double) obj
->liveness
;
1467 (double) (obj
->high
- obj
->low
+ 1) *
1468 (double) obj
->liveness
;
1471 for (i
=0; i
<sizeof(uintptr_t) * 8; i
++)
1473 unsigned bit
= addr
& 1;
1474 s
->weighted_address_bits
[i
][bit
] += obj
->liveness
;
1478 /* Age the liveness value. */
1479 obj
->liveness
>>= 1;
1490 struct tree_stats s
;
1491 uintptr_t new_mask
= 0;
1492 unsigned char new_shift
;
1493 float cache_utilization
;
1495 static float smoothed_new_shift
= -1.0;
1498 memset (&s
, 0, sizeof (s
));
1500 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
), __mf_adapt_cache_fn
, (void *) & s
);
1501 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
), __mf_adapt_cache_fn
, (void *) & s
);
1502 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK
), __mf_adapt_cache_fn
, (void *) & s
);
1503 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC
), __mf_adapt_cache_fn
, (void *) & s
);
1504 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS
), __mf_adapt_cache_fn
, (void *) & s
);
1506 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1507 empty tree. Just leave the cache alone in such cases, rather
1508 than risk dying by division-by-zero. */
1509 if (! (s
.obj_count
> 0) && (s
.live_obj_count
> 0) && (s
.total_weight
> 0.0))
1512 /* Guess a good value for the shift parameter by finding an address bit that is a
1513 good discriminant of lively objects. */
1515 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1517 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1518 if (max_value
< value
) max_value
= value
;
1520 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1522 float shoulder_factor
= 0.7; /* Include slightly less popular bits too. */
1523 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1524 if (value
>= max_value
* shoulder_factor
)
1527 if (smoothed_new_shift
< 0) smoothed_new_shift
= __mf_lc_shift
;
1528 /* Converge toward this slowly to reduce flapping. */
1529 smoothed_new_shift
= 0.9*smoothed_new_shift
+ 0.1*i
;
1530 new_shift
= (unsigned) (smoothed_new_shift
+ 0.5);
1531 assert (new_shift
< sizeof (uintptr_t)*8);
1533 /* Count number of used buckets. */
1534 cache_utilization
= 0.0;
1535 for (i
= 0; i
< (1 + __mf_lc_mask
); i
++)
1536 if (__mf_lookup_cache
[i
].low
!= 0 || __mf_lookup_cache
[i
].high
!= 0)
1537 cache_utilization
+= 1.0;
1538 cache_utilization
/= (1 + __mf_lc_mask
);
1540 new_mask
|= 0xffff; /* XXX: force a large cache. */
1541 new_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
1543 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1544 "util=%u%% m=%p s=%u\n",
1545 s
.obj_count
, s
.live_obj_count
, s
.total_size
, s
.total_weight
, s
.weighted_size
,
1546 (unsigned)(cache_utilization
*100.0), (void *) new_mask
, new_shift
);
1548 /* We should reinitialize cache if its parameters have changed. */
1549 if (new_mask
!= __mf_lc_mask
||
1550 new_shift
!= __mf_lc_shift
)
1552 __mf_lc_mask
= new_mask
;
1553 __mf_lc_shift
= new_shift
;
1555 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1557 __mf_lookup_cache
[0].low
= MAXPTR
;
1563 /* __mf_find_object[s] */
1565 /* Find overlapping live objecs between [low,high]. Return up to
1566 max_objs of their pointers in objs[]. Return total count of
1567 overlaps (may exceed max_objs). */
1570 __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
1571 __mf_object_t
**objs
, unsigned max_objs
, int type
)
1574 mfsplay_tree t
= __mf_object_tree (type
);
1575 mfsplay_tree_key k
= (mfsplay_tree_key
) ptr_low
;
1578 mfsplay_tree_node n
= mfsplay_tree_lookup (t
, k
);
1579 /* An exact match for base address implies a hit. */
1582 if (count
< max_objs
)
1583 objs
[count
] = (__mf_object_t
*) n
->value
;
1587 /* Iterate left then right near this key value to find all overlapping objects. */
1588 for (direction
= 0; direction
< 2; direction
++)
1590 /* Reset search origin. */
1591 k
= (mfsplay_tree_key
) ptr_low
;
1597 n
= (direction
== 0 ? mfsplay_tree_successor (t
, k
) : mfsplay_tree_predecessor (t
, k
));
1598 if (n
== NULL
) break;
1599 obj
= (__mf_object_t
*) n
->value
;
1601 if (! (obj
->low
<= ptr_high
&& obj
->high
>= ptr_low
)) /* No overlap? */
1604 if (count
< max_objs
)
1605 objs
[count
] = (__mf_object_t
*) n
->value
;
1608 k
= (mfsplay_tree_key
) obj
->low
;
1617 __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
1618 __mf_object_t
**objs
, unsigned max_objs
)
1623 /* Search each splay tree for overlaps. */
1624 for (type
= __MF_TYPE_NOACCESS
; type
<= __MF_TYPE_GUESS
; type
++)
1626 unsigned c
= __mf_find_objects2 (ptr_low
, ptr_high
, objs
, max_objs
, type
);
1632 else /* NB: C may equal 0 */
1645 /* __mf_link_object */
1648 __mf_link_object (__mf_object_t
*node
)
1650 mfsplay_tree t
= __mf_object_tree (node
->type
);
1651 mfsplay_tree_insert (t
, (mfsplay_tree_key
) node
->low
, (mfsplay_tree_value
) node
);
1654 /* __mf_unlink_object */
1657 __mf_unlink_object (__mf_object_t
*node
)
1659 mfsplay_tree t
= __mf_object_tree (node
->type
);
1660 mfsplay_tree_remove (t
, (mfsplay_tree_key
) node
->low
);
1663 /* __mf_find_dead_objects */
1665 /* Find overlapping dead objecs between [low,high]. Return up to
1666 max_objs of their pointers in objs[]. Return total count of
1667 overlaps (may exceed max_objs). */
1670 __mf_find_dead_objects (uintptr_t low
, uintptr_t high
,
1671 __mf_object_t
**objs
, unsigned max_objs
)
1673 if (__mf_opts
.persistent_count
> 0)
1676 unsigned recollection
= 0;
1679 assert (low
<= high
);
1680 assert (max_objs
== 0 || objs
!= NULL
);
1682 /* Widen the search from the most recent plots in each row, looking
1683 backward in time. */
1685 while (recollection
< __mf_opts
.persistent_count
)
1689 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1694 plot
= __mf_object_dead_head
[row
];
1695 for (i
= 0; i
<= recollection
; i
++)
1699 /* Look backward through row: it's a circular buffer. */
1700 if (plot
> 0) plot
--;
1701 else plot
= __mf_opts
.persistent_count
- 1;
1703 obj
= __mf_object_cemetary
[row
][plot
];
1704 if (obj
&& obj
->low
<= high
&& obj
->high
>= low
)
1706 /* Found an overlapping dead object! */
1707 if (count
< max_objs
)
1717 /* Look farther back in time. */
1718 recollection
= (recollection
* 2) + 1;
1727 /* __mf_describe_object */
1730 __mf_describe_object (__mf_object_t
*obj
)
1732 static unsigned epoch
= 0;
1739 if (__mf_opts
.abbreviate
&& obj
->description_epoch
== epoch
)
1742 "mudflap %sobject %p: name=`%s'\n",
1743 (obj
->deallocated_p
? "dead " : ""),
1744 (void *) obj
, (obj
->name
? obj
->name
: ""));
1748 obj
->description_epoch
= epoch
;
1751 "mudflap %sobject %p: name=`%s'\n"
1752 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1753 "alloc time=%lu.%06lu pc=%p"
1758 (obj
->deallocated_p
? "dead " : ""),
1759 (void *) obj
, (obj
->name
? obj
->name
: ""),
1760 (void *) obj
->low
, (void *) obj
->high
,
1761 (unsigned long) (obj
->high
- obj
->low
+ 1),
1762 (obj
->type
== __MF_TYPE_NOACCESS
? "no-access" :
1763 obj
->type
== __MF_TYPE_HEAP
? "heap" :
1764 obj
->type
== __MF_TYPE_HEAP_I
? "heap-init" :
1765 obj
->type
== __MF_TYPE_STACK
? "stack" :
1766 obj
->type
== __MF_TYPE_STATIC
? "static" :
1767 obj
->type
== __MF_TYPE_GUESS
? "guess" :
1769 obj
->read_count
, obj
->write_count
, obj
->liveness
,
1770 obj
->watching_p
? " watching" : "",
1771 obj
->alloc_time
.tv_sec
, obj
->alloc_time
.tv_usec
,
1772 (void *) obj
->alloc_pc
1774 , (unsigned) obj
->alloc_thread
1778 if (__mf_opts
.backtrace
> 0)
1781 for (i
=0; i
<obj
->alloc_backtrace_size
; i
++)
1782 fprintf (stderr
, " %s\n", obj
->alloc_backtrace
[i
]);
1785 if (__mf_opts
.persistent_count
> 0)
1787 if (obj
->deallocated_p
)
1789 fprintf (stderr
, "dealloc time=%lu.%06lu pc=%p"
1794 obj
->dealloc_time
.tv_sec
, obj
->dealloc_time
.tv_usec
,
1795 (void *) obj
->dealloc_pc
1797 , (unsigned) obj
->dealloc_thread
1802 if (__mf_opts
.backtrace
> 0)
1805 for (i
=0; i
<obj
->dealloc_backtrace_size
; i
++)
1806 fprintf (stderr
, " %s\n", obj
->dealloc_backtrace
[i
]);
1814 __mf_report_leaks_fn (mfsplay_tree_node n
, void *param
)
1816 __mf_object_t
*node
= (__mf_object_t
*) n
->value
;
1817 unsigned *count
= (unsigned *) param
;
1822 fprintf (stderr
, "Leaked object %u:\n", (*count
));
1823 __mf_describe_object (node
);
1830 __mf_report_leaks ()
1834 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
),
1835 __mf_report_leaks_fn
, & count
);
1836 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
),
1837 __mf_report_leaks_fn
, & count
);
1842 /* ------------------------------------------------------------------------ */
1849 BEGIN_RECURSION_PROTECT ();
1851 END_RECURSION_PROTECT ();
1858 if (__mf_opts
.collect_stats
)
1863 "calls to __mf_check: %lu\n"
1864 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1865 " __mf_unregister: %lu [%luB]\n"
1866 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1868 __mf_count_register
,
1869 __mf_total_register_size
[0], __mf_total_register_size
[1],
1870 __mf_total_register_size
[2], __mf_total_register_size
[3],
1871 __mf_total_register_size
[4], /* XXX */
1872 __mf_count_unregister
, __mf_total_unregister_size
,
1873 __mf_count_violation
[0], __mf_count_violation
[1],
1874 __mf_count_violation
[2], __mf_count_violation
[3],
1875 __mf_count_violation
[4]);
1878 "calls with reentrancy: %lu\n", __mf_reentrancy
);
1881 " lock contention: %lu\n", __mf_lock_contention
);
1884 /* Lookup cache stats. */
1887 unsigned max_reuse
= 0;
1888 unsigned num_used
= 0;
1889 unsigned num_unused
= 0;
1891 for (i
= 0; i
< LOOKUP_CACHE_SIZE
; i
++)
1893 if (__mf_lookup_cache_reusecount
[i
])
1897 if (max_reuse
< __mf_lookup_cache_reusecount
[i
])
1898 max_reuse
= __mf_lookup_cache_reusecount
[i
];
1900 fprintf (stderr
, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1901 num_used
, num_unused
, max_reuse
);
1905 unsigned live_count
;
1906 live_count
= __mf_find_objects (MINPTR
, MAXPTR
, NULL
, 0);
1907 fprintf (stderr
, "number of live objects: %u\n", live_count
);
1910 if (__mf_opts
.persistent_count
> 0)
1912 unsigned dead_count
= 0;
1914 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1915 for (plot
= 0 ; plot
< __mf_opts
.persistent_count
; plot
++)
1916 if (__mf_object_cemetary
[row
][plot
] != 0)
1918 fprintf (stderr
, " zombie objects: %u\n", dead_count
);
1921 if (__mf_opts
.print_leaks
&& (__mf_opts
.mudflap_mode
== mode_check
))
1924 extern void * __mf_wrap_alloca_indirect (size_t c
);
1926 /* Free up any remaining alloca()'d blocks. */
1927 __mf_wrap_alloca_indirect (0);
1928 #ifdef HAVE___LIBC_FREERES
1929 if (__mf_opts
.call_libc_freeres
)
1931 extern void __libc_freeres (void);
1936 __mf_describe_object (NULL
); /* Reset description epoch. */
1937 l
= __mf_report_leaks ();
1938 fprintf (stderr
, "number of leaked objects: %u\n", l
);
1942 /* __mf_backtrace */
1945 __mf_backtrace (char ***symbols
, void *guess_pc
, unsigned guess_omit_levels
)
1948 unsigned pc_array_size
= __mf_opts
.backtrace
+ guess_omit_levels
;
1949 unsigned remaining_size
;
1950 unsigned omitted_size
= 0;
1952 DECLARE (void, free
, void *ptr
);
1953 DECLARE (void *, calloc
, size_t c
, size_t n
);
1954 DECLARE (void *, malloc
, size_t n
);
1956 pc_array
= CALL_REAL (calloc
, pc_array_size
, sizeof (void *) );
1957 #ifdef HAVE_BACKTRACE
1958 pc_array_size
= backtrace (pc_array
, pc_array_size
);
1960 #define FETCH(n) do { if (pc_array_size >= n) { \
1961 pc_array[n] = __builtin_return_address(n); \
1962 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1964 /* Unroll some calls __builtin_return_address because this function
1965 only takes a literal integer parameter. */
1968 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1969 rather than simply returning 0. :-( */
1978 if (pc_array_size
> 8) pc_array_size
= 9;
1980 if (pc_array_size
> 0) pc_array_size
= 1;
1986 /* We want to trim the first few levels of the stack traceback,
1987 since they contain libmudflap wrappers and junk. If pc_array[]
1988 ends up containing a non-NULL guess_pc, then trim everything
1989 before that. Otherwise, omit the first guess_omit_levels
1992 if (guess_pc
!= NULL
)
1993 for (i
=0; i
<pc_array_size
; i
++)
1994 if (pc_array
[i
] == guess_pc
)
1997 if (omitted_size
== 0) /* No match? */
1998 if (pc_array_size
> guess_omit_levels
)
1999 omitted_size
= guess_omit_levels
;
2001 remaining_size
= pc_array_size
- omitted_size
;
2003 #ifdef HAVE_BACKTRACE_SYMBOLS
2004 *symbols
= backtrace_symbols (pc_array
+ omitted_size
, remaining_size
);
2007 /* Let's construct a buffer by hand. It will have <remaining_size>
2008 char*'s at the front, pointing at individual strings immediately
2013 enum { perline
= 30 };
2014 buffer
= CALL_REAL (malloc
, remaining_size
* (perline
+ sizeof(char *)));
2015 pointers
= (char **) buffer
;
2016 chars
= (char *)buffer
+ (remaining_size
* sizeof (char *));
2017 for (i
= 0; i
< remaining_size
; i
++)
2019 pointers
[i
] = chars
;
2020 sprintf (chars
, "[0x%p]", pc_array
[omitted_size
+ i
]);
2021 chars
= chars
+ perline
;
2023 *symbols
= pointers
;
2026 CALL_REAL (free
, pc_array
);
2028 return remaining_size
;
2031 /* ------------------------------------------------------------------------ */
2032 /* __mf_violation */
2035 __mf_violation (void *ptr
, size_t sz
, uintptr_t pc
,
2036 const char *location
, int type
)
2039 static unsigned violation_number
;
2040 DECLARE(void, free
, void *ptr
);
2042 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2044 (location
!= NULL
? location
: ""), type
, ptr
, (unsigned long) sz
);
2046 if (__mf_opts
.collect_stats
)
2047 __mf_count_violation
[(type
< 0) ? 0 :
2048 (type
> __MF_VIOL_WATCH
) ? 0 :
2051 /* Print out a basic warning message. */
2052 if (__mf_opts
.verbose_violations
)
2055 unsigned num_helpful
= 0;
2056 struct timeval now
= { 0, 0 };
2057 #if HAVE_GETTIMEOFDAY
2058 gettimeofday (& now
, NULL
);
2061 violation_number
++;
2064 "mudflap violation %u (%s): time=%lu.%06lu "
2065 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2067 ((type
== __MF_VIOL_READ
) ? "check/read" :
2068 (type
== __MF_VIOL_WRITE
) ? "check/write" :
2069 (type
== __MF_VIOL_REGISTER
) ? "register" :
2070 (type
== __MF_VIOL_UNREGISTER
) ? "unregister" :
2071 (type
== __MF_VIOL_WATCH
) ? "watch" : "unknown"),
2072 now
.tv_sec
, now
.tv_usec
,
2073 (void *) ptr
, (unsigned long)sz
, (void *) pc
,
2074 (location
!= NULL
? " location=`" : ""),
2075 (location
!= NULL
? location
: ""),
2076 (location
!= NULL
? "'" : ""));
2078 if (__mf_opts
.backtrace
> 0)
2083 num
= __mf_backtrace (& symbols
, (void *) pc
, 2);
2084 /* Note: backtrace_symbols calls malloc(). But since we're in
2085 __mf_violation and presumably __mf_check, it'll detect
2086 recursion, and not put the new string into the database. */
2088 for (i
=0; i
<num
; i
++)
2089 fprintf (stderr
, " %s\n", symbols
[i
]);
2091 /* Calling free() here would trigger a violation. */
2092 CALL_REAL(free
, symbols
);
2096 /* Look for nearby objects. For this, we start with s_low/s_high
2097 pointing to the given area, looking for overlapping objects.
2098 If none show up, widen the search area and keep looking. */
2100 if (sz
== 0) sz
= 1;
2102 for (dead_p
= 0; dead_p
<= 1; dead_p
++) /* for dead_p in 0 1 */
2104 enum {max_objs
= 3}; /* magic */
2105 __mf_object_t
*objs
[max_objs
];
2106 unsigned num_objs
= 0;
2107 uintptr_t s_low
, s_high
;
2111 s_low
= (uintptr_t) ptr
;
2112 s_high
= CLAMPSZ (ptr
, sz
);
2114 while (tries
< 16) /* magic */
2117 num_objs
= __mf_find_dead_objects (s_low
, s_high
, objs
, max_objs
);
2119 num_objs
= __mf_find_objects (s_low
, s_high
, objs
, max_objs
);
2121 if (num_objs
) /* good enough */
2126 /* XXX: tune this search strategy. It's too dependent on
2127 sz, which can vary from 1 to very big (when array index
2128 checking) numbers. */
2129 s_low
= CLAMPSUB (s_low
, (sz
* tries
* tries
));
2130 s_high
= CLAMPADD (s_high
, (sz
* tries
* tries
));
2133 for (i
= 0; i
< min (num_objs
, max_objs
); i
++)
2135 __mf_object_t
*obj
= objs
[i
];
2136 uintptr_t low
= (uintptr_t) ptr
;
2137 uintptr_t high
= CLAMPSZ (ptr
, sz
);
2138 unsigned before1
= (low
< obj
->low
) ? obj
->low
- low
: 0;
2139 unsigned after1
= (low
> obj
->high
) ? low
- obj
->high
: 0;
2140 unsigned into1
= (high
>= obj
->low
&& low
<= obj
->high
) ? low
- obj
->low
: 0;
2141 unsigned before2
= (high
< obj
->low
) ? obj
->low
- high
: 0;
2142 unsigned after2
= (high
> obj
->high
) ? high
- obj
->high
: 0;
2143 unsigned into2
= (high
>= obj
->low
&& low
<= obj
->high
) ? high
- obj
->low
: 0;
2145 fprintf (stderr
, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2146 num_helpful
+ i
+ 1,
2147 (before1
? before1
: after1
? after1
: into1
),
2148 (before1
? "before" : after1
? "after" : "into"),
2149 (before2
? before2
: after2
? after2
: into2
),
2150 (before2
? "before" : after2
? "after" : "into"));
2151 __mf_describe_object (obj
);
2153 num_helpful
+= num_objs
;
2156 fprintf (stderr
, "number of nearby objects: %u\n", num_helpful
);
2159 /* How to finally handle this violation? */
2160 switch (__mf_opts
.violation_mode
)
2165 kill (getpid(), SIGSEGV
);
2172 snprintf (buf
, 128, "gdb --pid=%u", (unsigned) getpid ());
2174 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2175 instead, and let the forked child execlp() gdb. That way, this
2176 subject process can be resumed under the supervision of gdb.
2177 This can't happen now, since system() only returns when gdb
2178 dies. In that case, we need to beware of starting a second
2179 concurrent gdb child upon the next violation. (But if the first
2180 gdb dies, then starting a new one is appropriate.) */
2185 /* ------------------------------------------------------------------------ */
2188 unsigned __mf_watch (void *ptr
, size_t sz
)
2192 BEGIN_RECURSION_PROTECT ();
2193 rc
= __mf_watch_or_not (ptr
, sz
, 1);
2194 END_RECURSION_PROTECT ();
2199 unsigned __mf_unwatch (void *ptr
, size_t sz
)
2203 rc
= __mf_watch_or_not (ptr
, sz
, 0);
2210 __mf_watch_or_not (void *ptr
, size_t sz
, char flag
)
2212 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
2213 uintptr_t ptr_low
= (uintptr_t) ptr
;
2216 TRACE ("%s ptr=%p size=%lu\n",
2217 (flag
? "watch" : "unwatch"), ptr
, (unsigned long) sz
);
2219 switch (__mf_opts
.mudflap_mode
)
2229 __mf_object_t
**all_ovr_objs
;
2232 DECLARE (void *, malloc
, size_t c
);
2233 DECLARE (void, free
, void *p
);
2235 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, NULL
, 0);
2236 VERBOSE_TRACE (" %u:", obj_count
);
2238 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) * obj_count
));
2239 if (all_ovr_objs
== NULL
) abort ();
2240 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
2241 assert (n
== obj_count
);
2243 for (n
= 0; n
< obj_count
; n
++)
2245 __mf_object_t
*obj
= all_ovr_objs
[n
];
2247 VERBOSE_TRACE (" [%p]", (void *) obj
);
2248 if (obj
->watching_p
!= flag
)
2250 obj
->watching_p
= flag
;
2253 /* Remove object from cache, to ensure next access
2254 goes through __mf_check(). */
2256 __mf_uncache_object (obj
);
2259 CALL_REAL (free
, all_ovr_objs
);
2269 __mf_sigusr1_handler (int num
)
2271 __mf_sigusr1_received
++;
2274 /* Install or remove SIGUSR1 handler as necessary.
2275 Also, respond to a received pending SIGUSR1. */
2277 __mf_sigusr1_respond ()
2279 static int handler_installed
;
2282 /* Manage handler */
2283 if (__mf_opts
.sigusr1_report
&& ! handler_installed
)
2285 signal (SIGUSR1
, __mf_sigusr1_handler
);
2286 handler_installed
= 1;
2288 else if(! __mf_opts
.sigusr1_report
&& handler_installed
)
2290 signal (SIGUSR1
, SIG_DFL
);
2291 handler_installed
= 0;
2295 /* Manage enqueued signals */
2296 if (__mf_sigusr1_received
> __mf_sigusr1_handled
)
2298 __mf_sigusr1_handled
++;
2299 assert (__mf_get_state () == reentrant
);
2301 handler_installed
= 0; /* We may need to re-enable signal; this might be a SysV library. */
2306 /* XXX: provide an alternative __assert_fail function that cannot
2307 fail due to libmudflap infinite recursion. */
2311 write_itoa (int fd
, unsigned n
)
2313 enum x
{ bufsize
= sizeof(n
)*4 };
2317 for (i
=0; i
<bufsize
-1; i
++)
2319 unsigned digit
= n
% 10;
2320 buf
[bufsize
-2-i
] = digit
+ '0';
2324 char *m
= & buf
[bufsize
-2-i
];
2325 buf
[bufsize
-1] = '\0';
2326 write (fd
, m
, strlen(m
));
2334 __assert_fail (const char *msg
, const char *file
, unsigned line
, const char *func
)
2336 #define write2(string) write (2, (string), strlen ((string)));
2340 write_itoa (2, (unsigned) pthread_self ());
2343 write2(": assertion failure: `");
2344 write (2, msg
, strlen (msg
));
2346 write (2, func
, strlen (func
));
2348 write (2, file
, strlen (file
));
2350 write_itoa (2, line
);
2361 /* Adapted splay tree code, originally from libiberty. It has been
2362 specialized for libmudflap as requested by RMS. */
2365 mfsplay_tree_free (void *p
)
2367 DECLARE (void, free
, void *p
);
2368 CALL_REAL (free
, p
);
2372 mfsplay_tree_xmalloc (size_t s
)
2374 DECLARE (void *, malloc
, size_t s
);
2375 return CALL_REAL (malloc
, s
);
2379 static void mfsplay_tree_splay (mfsplay_tree
, mfsplay_tree_key
);
2380 static mfsplay_tree_node
mfsplay_tree_splay_helper (mfsplay_tree
,
2382 mfsplay_tree_node
*,
2383 mfsplay_tree_node
*,
2384 mfsplay_tree_node
*);
2387 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2388 and grandparent, respectively, of NODE. */
2390 static mfsplay_tree_node
2391 mfsplay_tree_splay_helper (mfsplay_tree sp
,
2392 mfsplay_tree_key key
,
2393 mfsplay_tree_node
* node
,
2394 mfsplay_tree_node
* parent
,
2395 mfsplay_tree_node
* grandparent
)
2397 mfsplay_tree_node
*next
;
2398 mfsplay_tree_node n
;
2406 comparison
= ((key
> n
->key
) ? 1 : ((key
< n
->key
) ? -1 : 0));
2408 if (comparison
== 0)
2409 /* We've found the target. */
2411 else if (comparison
< 0)
2412 /* The target is to the left. */
2415 /* The target is to the right. */
2420 /* Check whether our recursion depth is too high. Abort this search,
2421 and signal that a rebalance is required to continue. */
2422 if (sp
->depth
> sp
->max_depth
)
2424 sp
->rebalance_p
= 1;
2428 /* Continue down the tree. */
2430 n
= mfsplay_tree_splay_helper (sp
, key
, next
, node
, parent
);
2433 /* The recursive call will change the place to which NODE
2435 if (*node
!= n
|| sp
->rebalance_p
)
2440 /* NODE is the root. We are done. */
2443 /* First, handle the case where there is no grandparent (i.e.,
2444 *PARENT is the root of the tree.) */
2447 if (n
== (*parent
)->left
)
2461 /* Next handle the cases where both N and *PARENT are left children,
2462 or where both are right children. */
2463 if (n
== (*parent
)->left
&& *parent
== (*grandparent
)->left
)
2465 mfsplay_tree_node p
= *parent
;
2467 (*grandparent
)->left
= p
->right
;
2468 p
->right
= *grandparent
;
2474 else if (n
== (*parent
)->right
&& *parent
== (*grandparent
)->right
)
2476 mfsplay_tree_node p
= *parent
;
2478 (*grandparent
)->right
= p
->left
;
2479 p
->left
= *grandparent
;
2486 /* Finally, deal with the case where N is a left child, but *PARENT
2487 is a right child, or vice versa. */
2488 if (n
== (*parent
)->left
)
2490 (*parent
)->left
= n
->right
;
2492 (*grandparent
)->right
= n
->left
;
2493 n
->left
= *grandparent
;
2499 (*parent
)->right
= n
->left
;
2501 (*grandparent
)->left
= n
->right
;
2502 n
->right
= *grandparent
;
2511 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n
, void *array_ptr
)
2513 mfsplay_tree_node
**p
= array_ptr
;
2520 static mfsplay_tree_node
2521 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node
* array
, unsigned low
,
2524 unsigned middle
= low
+ (high
- low
) / 2;
2525 mfsplay_tree_node n
= array
[middle
];
2527 /* Note that since we're producing a balanced binary tree, it is not a problem
2528 that this function is recursive. */
2529 if (low
+ 1 <= middle
)
2530 n
->left
= mfsplay_tree_rebalance_helper2 (array
, low
, middle
- 1);
2534 if (middle
+ 1 <= high
)
2535 n
->right
= mfsplay_tree_rebalance_helper2 (array
, middle
+ 1, high
);
2543 /* Rebalance the entire tree. Do this by copying all the node
2544 pointers into an array, then cleverly re-linking them. */
2546 mfsplay_tree_rebalance (mfsplay_tree sp
)
2548 mfsplay_tree_node
*all_nodes
, *all_nodes_1
;
2550 if (sp
->num_keys
<= 2)
2553 all_nodes
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * sp
->num_keys
);
2555 /* Traverse all nodes to copy their addresses into this array. */
2556 all_nodes_1
= all_nodes
;
2557 mfsplay_tree_foreach (sp
, mfsplay_tree_rebalance_helper1
,
2558 (void *) &all_nodes_1
);
2560 /* Relink all the nodes. */
2561 sp
->root
= mfsplay_tree_rebalance_helper2 (all_nodes
, 0, sp
->num_keys
- 1);
2563 mfsplay_tree_free (all_nodes
);
2567 /* Splay SP around KEY. */
2569 mfsplay_tree_splay (mfsplay_tree sp
, mfsplay_tree_key key
)
2574 /* If we just splayed the tree with the same key, do nothing. */
2575 if (sp
->last_splayed_key_p
&&
2576 (sp
->last_splayed_key
== key
))
2579 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2580 The idea is to limit excessive stack usage if we're facing
2581 degenerate access patterns. Unfortunately such patterns can occur
2582 e.g. during static initialization, where many static objects might
2583 be registered in increasing address sequence, or during a case where
2584 large tree-like heap data structures are allocated quickly.
2586 On x86, this corresponds to roughly 200K of stack usage.
2587 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2588 sp
->max_depth
= 2500;
2589 sp
->rebalance_p
= sp
->depth
= 0;
2591 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2592 if (sp
->rebalance_p
)
2594 mfsplay_tree_rebalance (sp
);
2596 sp
->rebalance_p
= sp
->depth
= 0;
2597 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2599 if (sp
->rebalance_p
)
2604 /* Cache this splay key. */
2605 sp
->last_splayed_key
= key
;
2606 sp
->last_splayed_key_p
= 1;
2611 /* Allocate a new splay tree. */
2615 mfsplay_tree sp
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s
));
2617 sp
->last_splayed_key_p
= 0;
2625 /* Insert a new node (associating KEY with DATA) into SP. If a
2626 previous node with the indicated KEY exists, its data is replaced
2627 with the new value. Returns the new node. */
2628 static mfsplay_tree_node
2629 mfsplay_tree_insert (mfsplay_tree sp
, mfsplay_tree_key key
, mfsplay_tree_value value
)
2633 mfsplay_tree_splay (sp
, key
);
2636 comparison
= ((sp
->root
->key
> key
) ? 1 :
2637 ((sp
->root
->key
< key
) ? -1 : 0));
2639 if (sp
->root
&& comparison
== 0)
2641 /* If the root of the tree already has the indicated KEY, just
2642 replace the value with VALUE. */
2643 sp
->root
->value
= value
;
2647 /* Create a new node, and insert it at the root. */
2648 mfsplay_tree_node node
;
2650 node
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s
));
2652 node
->value
= value
;
2655 node
->left
= node
->right
= 0;
2656 else if (comparison
< 0)
2658 node
->left
= sp
->root
;
2659 node
->right
= node
->left
->right
;
2660 node
->left
->right
= 0;
2664 node
->right
= sp
->root
;
2665 node
->left
= node
->right
->left
;
2666 node
->right
->left
= 0;
2670 sp
->last_splayed_key_p
= 0;
2676 /* Remove KEY from SP. It is not an error if it did not exist. */
2679 mfsplay_tree_remove (mfsplay_tree sp
, mfsplay_tree_key key
)
2681 mfsplay_tree_splay (sp
, key
);
2682 sp
->last_splayed_key_p
= 0;
2683 if (sp
->root
&& (sp
->root
->key
== key
))
2685 mfsplay_tree_node left
, right
;
2686 left
= sp
->root
->left
;
2687 right
= sp
->root
->right
;
2688 /* Delete the root node itself. */
2689 mfsplay_tree_free (sp
->root
);
2691 /* One of the children is now the root. Doesn't matter much
2692 which, so long as we preserve the properties of the tree. */
2696 /* If there was a right child as well, hang it off the
2697 right-most leaf of the left child. */
2702 left
->right
= right
;
2710 /* Lookup KEY in SP, returning VALUE if present, and NULL
2713 static mfsplay_tree_node
2714 mfsplay_tree_lookup (mfsplay_tree sp
, mfsplay_tree_key key
)
2716 mfsplay_tree_splay (sp
, key
);
2717 if (sp
->root
&& (sp
->root
->key
== key
))
2724 /* Return the immediate predecessor KEY, or NULL if there is no
2725 predecessor. KEY need not be present in the tree. */
2727 static mfsplay_tree_node
2728 mfsplay_tree_predecessor (mfsplay_tree sp
, mfsplay_tree_key key
)
2731 mfsplay_tree_node node
;
2732 /* If the tree is empty, there is certainly no predecessor. */
2735 /* Splay the tree around KEY. That will leave either the KEY
2736 itself, its predecessor, or its successor at the root. */
2737 mfsplay_tree_splay (sp
, key
);
2738 comparison
= ((sp
->root
->key
> key
) ? 1 :
2739 ((sp
->root
->key
< key
) ? -1 : 0));
2741 /* If the predecessor is at the root, just return it. */
2744 /* Otherwise, find the rightmost element of the left subtree. */
2745 node
= sp
->root
->left
;
2752 /* Return the immediate successor KEY, or NULL if there is no
2753 successor. KEY need not be present in the tree. */
2755 static mfsplay_tree_node
2756 mfsplay_tree_successor (mfsplay_tree sp
, mfsplay_tree_key key
)
2759 mfsplay_tree_node node
;
2760 /* If the tree is empty, there is certainly no successor. */
2763 /* Splay the tree around KEY. That will leave either the KEY
2764 itself, its predecessor, or its successor at the root. */
2765 mfsplay_tree_splay (sp
, key
);
2766 comparison
= ((sp
->root
->key
> key
) ? 1 :
2767 ((sp
->root
->key
< key
) ? -1 : 0));
2768 /* If the successor is at the root, just return it. */
2771 /* Otherwise, find the leftmost element of the right subtree. */
2772 node
= sp
->root
->right
;
2779 /* Call FN, passing it the DATA, for every node in SP, following an
2780 in-order traversal. If FN every returns a non-zero value, the
2781 iteration ceases immediately, and the value is returned.
2782 Otherwise, this function returns 0.
2784 This function simulates recursion using dynamically allocated
2785 arrays, since it may be called from mfsplay_tree_rebalance(), which
2786 in turn means that the tree is already uncomfortably deep for stack
2789 mfsplay_tree_foreach (mfsplay_tree st
, mfsplay_tree_foreach_fn fn
, void *data
)
2791 mfsplay_tree_node
*stack1
;
2795 enum s
{ s_left
, s_here
, s_right
, s_up
};
2797 if (st
->root
== NULL
) /* => num_keys == 0 */
2800 stack1
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * st
->num_keys
);
2801 stack2
= mfsplay_tree_xmalloc (sizeof (char) * st
->num_keys
);
2804 stack1
[sp
] = st
->root
;
2805 stack2
[sp
] = s_left
;
2809 mfsplay_tree_node n
;
2815 /* Handle each of the four possible states separately. */
2817 /* 1: We're here to traverse the left subtree (if any). */
2820 stack2
[sp
] = s_here
;
2821 if (n
->left
!= NULL
)
2824 stack1
[sp
] = n
->left
;
2825 stack2
[sp
] = s_left
;
2829 /* 2: We're here to traverse this node. */
2830 else if (s
== s_here
)
2832 stack2
[sp
] = s_right
;
2833 val
= (*fn
) (n
, data
);
2837 /* 3: We're here to traverse the right subtree (if any). */
2838 else if (s
== s_right
)
2841 if (n
->right
!= NULL
)
2844 stack1
[sp
] = n
->right
;
2845 stack2
[sp
] = s_left
;
2849 /* 4: We're here after both subtrees (if any) have been traversed. */
2852 /* Pop the stack. */
2853 if (sp
== 0) break; /* Popping off the root note: we're finished! */
2861 mfsplay_tree_free (stack1
);
2862 mfsplay_tree_free (stack2
);