1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 Under Section 7 of GPL version 3, you are granted additional
22 permissions described in the GCC Runtime Library Exception, version
23 3.1, as published by the Free Software Foundation.
25 You should have received a copy of the GNU General Public License and
26 a copy of the GCC Runtime Library Exception along with this program;
27 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
28 <http://www.gnu.org/licenses/>. */
32 /* These attempt to coax various unix flavours to declare all our
33 needed tidbits in the system headers. */
34 #if !defined(__FreeBSD__) && !defined(__APPLE__)
36 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
40 #define __EXTENSIONS__
42 #define _LARGE_FILE_API
43 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <sys/types.h>
51 #ifdef HAVE_EXECINFO_H
61 #include <sys/types.h>
66 #include "mf-runtime.h"
70 /* ------------------------------------------------------------------------ */
71 /* Splay-tree implementation. */
73 typedef uintptr_t mfsplay_tree_key
;
74 typedef void *mfsplay_tree_value
;
76 /* Forward declaration for a node in the tree. */
77 typedef struct mfsplay_tree_node_s
*mfsplay_tree_node
;
79 /* The type of a function used to iterate over the tree. */
80 typedef int (*mfsplay_tree_foreach_fn
) (mfsplay_tree_node
, void *);
82 /* The nodes in the splay tree. */
83 struct mfsplay_tree_node_s
87 mfsplay_tree_value value
;
89 mfsplay_tree_node left
;
90 mfsplay_tree_node right
;
91 /* XXX: The addition of a parent pointer may eliminate some recursion. */
94 /* The splay tree itself. */
97 /* The root of the tree. */
98 mfsplay_tree_node root
;
100 /* The last key value for which the tree has been splayed, but not
102 mfsplay_tree_key last_splayed_key
;
103 int last_splayed_key_p
;
108 /* Traversal recursion control flags. */
111 unsigned rebalance_p
;
113 typedef struct mfsplay_tree_s
*mfsplay_tree
;
115 static mfsplay_tree
mfsplay_tree_new (void);
116 static mfsplay_tree_node
mfsplay_tree_insert (mfsplay_tree
, mfsplay_tree_key
, mfsplay_tree_value
);
117 static void mfsplay_tree_remove (mfsplay_tree
, mfsplay_tree_key
);
118 static mfsplay_tree_node
mfsplay_tree_lookup (mfsplay_tree
, mfsplay_tree_key
);
119 static mfsplay_tree_node
mfsplay_tree_predecessor (mfsplay_tree
, mfsplay_tree_key
);
120 static mfsplay_tree_node
mfsplay_tree_successor (mfsplay_tree
, mfsplay_tree_key
);
121 static int mfsplay_tree_foreach (mfsplay_tree
, mfsplay_tree_foreach_fn
, void *);
122 static void mfsplay_tree_rebalance (mfsplay_tree sp
);
124 /* ------------------------------------------------------------------------ */
127 #define CTOR __attribute__ ((constructor))
128 #define DTOR __attribute__ ((destructor))
131 /* Codes to describe the context in which a violation occurs. */
132 #define __MF_VIOL_UNKNOWN 0
133 #define __MF_VIOL_READ 1
134 #define __MF_VIOL_WRITE 2
135 #define __MF_VIOL_REGISTER 3
136 #define __MF_VIOL_UNREGISTER 4
137 #define __MF_VIOL_WATCH 5
139 /* Protect against recursive calls. */
142 begin_recursion_protect1 (const char *pf
)
144 if (__mf_get_state () == reentrant
)
146 write (2, "mf: erroneous reentrancy detected in `", 38);
147 write (2, pf
, strlen(pf
));
148 write (2, "'\n", 2); \
151 __mf_set_state (reentrant
);
154 #define BEGIN_RECURSION_PROTECT() \
155 begin_recursion_protect1 (__PRETTY_FUNCTION__)
157 #define END_RECURSION_PROTECT() \
158 __mf_set_state (active)
160 /* ------------------------------------------------------------------------ */
161 /* Required globals. */
163 #define LOOKUP_CACHE_MASK_DFL 1023
164 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
165 #define LOOKUP_CACHE_SHIFT_DFL 2
167 struct __mf_cache __mf_lookup_cache
[LOOKUP_CACHE_SIZE_MAX
];
168 uintptr_t __mf_lc_mask
= LOOKUP_CACHE_MASK_DFL
;
169 unsigned char __mf_lc_shift
= LOOKUP_CACHE_SHIFT_DFL
;
170 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
172 struct __mf_options __mf_opts
;
173 int __mf_starting_p
= 1;
176 #if defined(HAVE_TLS) && !defined(USE_EMUTLS)
177 __thread
enum __mf_state_enum __mf_state_1
= reentrant
;
180 enum __mf_state_enum __mf_state_1
= reentrant
;
184 pthread_mutex_t __mf_biglock
=
185 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
186 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
188 PTHREAD_MUTEX_INITIALIZER
;
192 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
193 the libmudflap.la (no threading support) can diagnose whether
194 the application is linked with -lpthread. See __mf_usage() below. */
196 #ifdef _POSIX_THREADS
197 #pragma weak pthread_join
199 #define pthread_join NULL
204 /* ------------------------------------------------------------------------ */
205 /* stats-related globals. */
207 static unsigned long __mf_count_check
;
208 static unsigned long __mf_lookup_cache_reusecount
[LOOKUP_CACHE_SIZE_MAX
];
209 static unsigned long __mf_count_register
;
210 static unsigned long __mf_total_register_size
[__MF_TYPE_MAX
+1];
211 static unsigned long __mf_count_unregister
;
212 static unsigned long __mf_total_unregister_size
;
213 static unsigned long __mf_count_violation
[__MF_VIOL_WATCH
+1];
214 static unsigned long __mf_sigusr1_received
;
215 static unsigned long __mf_sigusr1_handled
;
216 /* not static */ unsigned long __mf_reentrancy
;
218 /* not static */ unsigned long __mf_lock_contention
;
222 /* ------------------------------------------------------------------------ */
223 /* mode-check-related globals. */
225 typedef struct __mf_object
227 uintptr_t low
, high
; /* __mf_register parameters */
229 char type
; /* __MF_TYPE_something */
230 char watching_p
; /* Trigger a VIOL_WATCH on access? */
231 unsigned read_count
; /* Number of times __mf_check/read was called on this object. */
232 unsigned write_count
; /* Likewise for __mf_check/write. */
233 unsigned liveness
; /* A measure of recent checking activity. */
234 unsigned description_epoch
; /* Last epoch __mf_describe_object printed this. */
237 struct timeval alloc_time
;
238 char **alloc_backtrace
;
239 size_t alloc_backtrace_size
;
241 pthread_t alloc_thread
;
245 uintptr_t dealloc_pc
;
246 struct timeval dealloc_time
;
247 char **dealloc_backtrace
;
248 size_t dealloc_backtrace_size
;
250 pthread_t dealloc_thread
;
254 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
255 /* Actually stored as static vars within lookup function below. */
257 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
258 static unsigned __mf_object_dead_head
[__MF_TYPE_MAX_CEM
+1]; /* next empty spot */
259 static __mf_object_t
*__mf_object_cemetary
[__MF_TYPE_MAX_CEM
+1][__MF_PERSIST_MAX
];
262 /* ------------------------------------------------------------------------ */
263 /* Forward function declarations */
265 void __mf_init () CTOR
;
266 static void __mf_sigusr1_respond ();
267 static unsigned __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
268 __mf_object_t
**objs
, unsigned max_objs
);
269 static unsigned __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
270 __mf_object_t
**objs
, unsigned max_objs
, int type
);
271 static unsigned __mf_find_dead_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
272 __mf_object_t
**objs
, unsigned max_objs
);
273 static void __mf_adapt_cache ();
274 static void __mf_describe_object (__mf_object_t
*obj
);
275 static unsigned __mf_watch_or_not (void *ptr
, size_t sz
, char flag
);
276 static mfsplay_tree
__mf_object_tree (int type
);
277 static void __mf_link_object (__mf_object_t
*node
);
278 static void __mf_unlink_object (__mf_object_t
*node
);
281 /* ------------------------------------------------------------------------ */
282 /* Configuration engine */
285 __mf_set_default_options ()
287 memset (& __mf_opts
, 0, sizeof (__mf_opts
));
289 __mf_opts
.adapt_cache
= 1000003;
290 __mf_opts
.abbreviate
= 1;
291 __mf_opts
.verbose_violations
= 1;
292 __mf_opts
.free_queue_length
= 4;
293 __mf_opts
.persistent_count
= 100;
294 __mf_opts
.crumple_zone
= 32;
295 __mf_opts
.backtrace
= 4;
296 __mf_opts
.timestamps
= 1;
297 __mf_opts
.mudflap_mode
= mode_check
;
298 __mf_opts
.violation_mode
= viol_nop
;
299 #ifdef HAVE___LIBC_FREERES
300 __mf_opts
.call_libc_freeres
= 1;
302 __mf_opts
.heur_std_data
= 1;
304 __mf_opts
.thread_stack
= 0;
307 /* PR41443: Beware that the above flags will be applied to
308 setuid/setgid binaries, and cannot be overriden with
309 $MUDFLAP_OPTIONS. So the defaults must be non-exploitable.
311 Should we consider making the default violation_mode something
312 harsher than viol_nop? OTOH, glibc's MALLOC_CHECK_ is disabled
313 by default for these same programs. */
316 static struct mudoption
331 "mudflaps do nothing",
332 set_option
, (unsigned)mode_nop
, (unsigned *)&__mf_opts
.mudflap_mode
},
334 "mudflaps populate object tree",
335 set_option
, (unsigned)mode_populate
, (unsigned *)&__mf_opts
.mudflap_mode
},
337 "mudflaps check for memory violations",
338 set_option
, (unsigned)mode_check
, (unsigned *)&__mf_opts
.mudflap_mode
},
340 "mudflaps always cause violations (diagnostic)",
341 set_option
, (unsigned)mode_violate
, (unsigned *)&__mf_opts
.mudflap_mode
},
344 "violations do not change program execution",
345 set_option
, (unsigned)viol_nop
, (unsigned *)&__mf_opts
.violation_mode
},
347 "violations cause a call to abort()",
348 set_option
, (unsigned)viol_abort
, (unsigned *)&__mf_opts
.violation_mode
},
350 "violations are promoted to SIGSEGV signals",
351 set_option
, (unsigned)viol_segv
, (unsigned *)&__mf_opts
.violation_mode
},
353 "violations fork a gdb process attached to current program",
354 set_option
, (unsigned)viol_gdb
, (unsigned *)&__mf_opts
.violation_mode
},
356 "trace calls to mudflap runtime library",
357 set_option
, 1, &__mf_opts
.trace_mf_calls
},
359 "trace internal events within mudflap runtime library",
360 set_option
, 1, &__mf_opts
.verbose_trace
},
362 "collect statistics on mudflap's operation",
363 set_option
, 1, &__mf_opts
.collect_stats
},
366 "print report upon SIGUSR1",
367 set_option
, 1, &__mf_opts
.sigusr1_report
},
369 {"internal-checking",
370 "perform more expensive internal checking",
371 set_option
, 1, &__mf_opts
.internal_checking
},
373 "print any memory leaks at program shutdown",
374 set_option
, 1, &__mf_opts
.print_leaks
},
375 #ifdef HAVE___LIBC_FREERES
377 "call glibc __libc_freeres at shutdown for better leak data",
378 set_option
, 1, &__mf_opts
.call_libc_freeres
},
380 {"check-initialization",
381 "detect uninitialized object reads",
382 set_option
, 1, &__mf_opts
.check_initialization
},
383 {"verbose-violations",
384 "print verbose messages when memory violations occur",
385 set_option
, 1, &__mf_opts
.verbose_violations
},
387 "abbreviate repetitive listings",
388 set_option
, 1, &__mf_opts
.abbreviate
},
390 "track object lifetime timestamps",
391 set_option
, 1, &__mf_opts
.timestamps
},
393 "ignore read accesses - assume okay",
394 set_option
, 1, &__mf_opts
.ignore_reads
},
396 "wipe stack objects at unwind",
397 set_option
, 1, &__mf_opts
.wipe_stack
},
399 "wipe heap objects at free",
400 set_option
, 1, &__mf_opts
.wipe_heap
},
402 "support /proc/self/map heuristics",
403 set_option
, 1, &__mf_opts
.heur_proc_map
},
405 "enable a simple upper stack bound heuristic",
406 set_option
, 1, &__mf_opts
.heur_stack_bound
},
408 "support _start.._end heuristics",
409 set_option
, 1, &__mf_opts
.heur_start_end
},
411 "register standard library data (argv, errno, stdin, ...)",
412 set_option
, 1, &__mf_opts
.heur_std_data
},
413 {"free-queue-length",
414 "queue N deferred free() calls before performing them",
415 read_integer_option
, 0, &__mf_opts
.free_queue_length
},
417 "keep a history of N unregistered regions",
418 read_integer_option
, 0, &__mf_opts
.persistent_count
},
420 "surround allocations with crumple zones of N bytes",
421 read_integer_option
, 0, &__mf_opts
.crumple_zone
},
422 /* XXX: not type-safe.
424 "set lookup cache size mask to N (2**M - 1)",
425 read_integer_option, 0, (int *)(&__mf_lc_mask)},
427 "set lookup cache pointer shift",
428 read_integer_option, 0, (int *)(&__mf_lc_shift)},
431 "adapt mask/shift parameters after N cache misses",
432 read_integer_option
, 1, &__mf_opts
.adapt_cache
},
434 "keep an N-level stack trace of each call context",
435 read_integer_option
, 0, &__mf_opts
.backtrace
},
438 "override thread stacks allocation: N kB",
439 read_integer_option
, 0, &__mf_opts
.thread_stack
},
441 {0, 0, set_option
, 0, NULL
}
447 struct mudoption
*opt
;
450 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
451 "Mudflap is Copyright (C) 2002-2011 Free Software Foundation, Inc.\n"
453 "Unless setuid, a program's mudflap options be set by an environment variable:\n"
455 "$ export MUDFLAP_OPTIONS='<options>'\n"
456 "$ <mudflapped_program>\n"
458 "where <options> is a space-separated list of \n"
459 "any of the following options. Use `-no-OPTION' to disable options.\n"
462 (pthread_join
? "multi-threaded " : "single-threaded "),
472 /* XXX: The multi-threaded thread-unaware combination is bad. */
474 for (opt
= options
; opt
->name
; opt
++)
476 int default_p
= (opt
->value
== * opt
->target
);
482 fprintf (stderr
, "-%-23.23s %s", opt
->name
, opt
->description
);
484 fprintf (stderr
, " [active]\n");
486 fprintf (stderr
, "\n");
488 case read_integer_option
:
489 strncpy (buf
, opt
->name
, 128);
490 strncpy (buf
+ strlen (opt
->name
), "=N", 2);
491 fprintf (stderr
, "-%-23.23s %s", buf
, opt
->description
);
492 fprintf (stderr
, " [%d]\n", * opt
->target
);
498 fprintf (stderr
, "\n");
503 __mf_set_options (const char *optstr
)
507 BEGIN_RECURSION_PROTECT ();
508 rc
= __mfu_set_options (optstr
);
509 /* XXX: It's not really that easy. A change to a bunch of parameters
510 can require updating auxiliary state or risk crashing:
511 free_queue_length, crumple_zone ... */
512 END_RECURSION_PROTECT ();
519 __mfu_set_options (const char *optstr
)
521 struct mudoption
*opts
= 0;
525 const char *saved_optstr
= optstr
;
527 /* XXX: bounds-check for optstr! */
544 if (*optstr
== '?' ||
545 strncmp (optstr
, "help", 4) == 0)
547 /* Caller will print help and exit. */
551 if (strncmp (optstr
, "no-", 3) == 0)
554 optstr
= & optstr
[3];
557 for (opts
= options
; opts
->name
; opts
++)
559 if (strncmp (optstr
, opts
->name
, strlen (opts
->name
)) == 0)
561 optstr
+= strlen (opts
->name
);
562 assert (opts
->target
);
569 *(opts
->target
) = opts
->value
;
571 case read_integer_option
:
572 if (! negate
&& (*optstr
== '=' && *(optstr
+1)))
575 tmp
= strtol (optstr
, &nxt
, 10);
576 if ((optstr
!= nxt
) && (tmp
!= LONG_MAX
))
579 *(opts
->target
) = (int)tmp
;
593 "warning: unrecognized string '%s' in mudflap options\n",
595 optstr
+= strlen (optstr
);
601 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
602 __mf_lc_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
603 __mf_opts
.free_queue_length
&= (__MF_FREEQ_MAX
- 1);
605 /* Clear the lookup cache, in case the parameters got changed. */
607 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
609 __mf_lookup_cache
[0].low
= MAXPTR
;
611 TRACE ("set options from `%s'\n", saved_optstr
);
613 /* Call this unconditionally, in case -sigusr1-report was toggled. */
614 __mf_sigusr1_respond ();
623 __mf_resolve_single_dynamic (struct __mf_dynamic_entry
*e
)
628 if (e
->pointer
) return;
631 if (e
->version
!= NULL
&& e
->version
[0] != '\0') /* non-null/empty */
632 e
->pointer
= dlvsym (RTLD_NEXT
, e
->name
, e
->version
);
635 e
->pointer
= dlsym (RTLD_NEXT
, e
->name
);
641 fprintf (stderr
, "mf: error in dlsym(\"%s\"): %s\n",
647 fprintf (stderr
, "mf: dlsym(\"%s\") = NULL\n", e
->name
);
654 __mf_resolve_dynamics ()
657 for (i
= 0; i
< dyn_INITRESOLVE
; i
++)
658 __mf_resolve_single_dynamic (& __mf_dynamic
[i
]);
662 /* NB: order must match enums in mf-impl.h */
663 struct __mf_dynamic_entry __mf_dynamic
[] =
665 {NULL
, "calloc", NULL
},
666 {NULL
, "free", NULL
},
667 {NULL
, "malloc", NULL
},
668 {NULL
, "mmap", NULL
},
669 {NULL
, "munmap", NULL
},
670 {NULL
, "realloc", NULL
},
671 {NULL
, "DUMMY", NULL
}, /* dyn_INITRESOLVE */
673 {NULL
, "pthread_create", PTHREAD_CREATE_VERSION
},
674 {NULL
, "pthread_join", NULL
},
675 {NULL
, "pthread_exit", NULL
}
683 /* ------------------------------------------------------------------------ */
685 /* Lookup & manage automatic initialization of the five or so splay trees. */
687 __mf_object_tree (int type
)
689 static mfsplay_tree trees
[__MF_TYPE_MAX
+1];
690 assert (type
>= 0 && type
<= __MF_TYPE_MAX
);
691 if (UNLIKELY (trees
[type
] == NULL
))
692 trees
[type
] = mfsplay_tree_new ();
702 /* Return if initialization has already been done. */
703 if (LIKELY (__mf_starting_p
== 0))
706 #if defined(__FreeBSD__) && defined(LIBMUDFLAPTH)
710 #endif /* Prime mutex which calls calloc upon first lock to avoid deadlock. */
712 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
714 __mf_resolve_dynamics ();
718 __mf_set_state (active
);
720 __mf_set_default_options ();
722 if (getuid () == geteuid () && getgid () == getegid ()) /* PR41433, not setuid */
723 ov
= getenv ("MUDFLAP_OPTIONS");
726 int rc
= __mfu_set_options (ov
);
734 /* Initialize to a non-zero description epoch. */
735 __mf_describe_object (NULL
);
737 #define REG_RESERVED(obj) \
738 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
740 REG_RESERVED (__mf_lookup_cache
);
741 REG_RESERVED (__mf_lc_mask
);
742 REG_RESERVED (__mf_lc_shift
);
743 /* XXX: others of our statics? */
745 /* Prevent access to *NULL. */
746 __mf_register (MINPTR
, 1, __MF_TYPE_NOACCESS
, "NULL");
747 __mf_lookup_cache
[0].low
= (uintptr_t) -1;
753 __wrap_main (int argc
, char* argv
[])
755 extern char **environ
;
757 extern int __real_main ();
758 static int been_here
= 0;
760 if (__mf_opts
.heur_std_data
&& ! been_here
)
765 __mf_register (argv
, sizeof(char *)*(argc
+1), __MF_TYPE_STATIC
, "argv[]");
766 for (i
=0; i
<argc
; i
++)
768 unsigned j
= strlen (argv
[i
]);
769 __mf_register (argv
[i
], j
+1, __MF_TYPE_STATIC
, "argv element");
774 char *e
= environ
[i
];
776 if (e
== NULL
) break;
777 j
= strlen (environ
[i
]);
778 __mf_register (environ
[i
], j
+1, __MF_TYPE_STATIC
, "environ element");
780 __mf_register (environ
, sizeof(char *)*(i
+1), __MF_TYPE_STATIC
, "environ[]");
782 __mf_register (& errno
, sizeof (errno
), __MF_TYPE_STATIC
, "errno area");
784 __mf_register (stdin
, sizeof (*stdin
), __MF_TYPE_STATIC
, "stdin");
785 __mf_register (stdout
, sizeof (*stdout
), __MF_TYPE_STATIC
, "stdout");
786 __mf_register (stderr
, sizeof (*stderr
), __MF_TYPE_STATIC
, "stderr");
788 /* Make some effort to register ctype.h static arrays. */
789 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
790 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
791 with in mf-hooks2.c. */
795 return main (argc
, argv
, environ
);
797 return __real_main (argc
, argv
, environ
);
803 extern void __mf_fini () DTOR
;
806 TRACE ("__mf_fini\n");
810 /* Since we didn't populate the tree for allocations in constructors
811 before __mf_init, we cannot check destructors after __mf_fini. */
812 __mf_opts
.mudflap_mode
= mode_nop
;
818 /* ------------------------------------------------------------------------ */
821 void __mf_check (void *ptr
, size_t sz
, int type
, const char *location
)
824 BEGIN_RECURSION_PROTECT ();
825 __mfu_check (ptr
, sz
, type
, location
);
826 END_RECURSION_PROTECT ();
831 void __mfu_check (void *ptr
, size_t sz
, int type
, const char *location
)
833 unsigned entry_idx
= __MF_CACHE_INDEX (ptr
);
834 struct __mf_cache
*entry
= & __mf_lookup_cache
[entry_idx
];
835 int judgement
= 0; /* 0=undecided; <0=violation; >0=okay */
836 uintptr_t ptr_low
= (uintptr_t) ptr
;
837 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
838 struct __mf_cache old_entry
= *entry
;
840 if (UNLIKELY (__mf_opts
.sigusr1_report
))
841 __mf_sigusr1_respond ();
842 if (UNLIKELY (__mf_opts
.ignore_reads
&& type
== 0))
845 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
846 ptr
, entry_idx
, (unsigned long)sz
,
847 (type
== 0 ? "read" : "write"), location
);
849 switch (__mf_opts
.mudflap_mode
)
852 /* It is tempting to poison the cache here similarly to
853 mode_populate. However that eliminates a valuable
854 distinction between these two modes. mode_nop is useful to
855 let a user count & trace every single check / registration
856 call. mode_populate is useful to let a program run fast
863 entry
->low
= ptr_low
;
864 entry
->high
= ptr_high
;
870 unsigned heuristics
= 0;
872 /* Advance aging/adaptation counters. */
873 static unsigned adapt_count
;
875 if (UNLIKELY (__mf_opts
.adapt_cache
> 0 &&
876 adapt_count
> __mf_opts
.adapt_cache
))
882 /* Looping only occurs if heuristics were triggered. */
883 while (judgement
== 0)
885 DECLARE (void, free
, void *p
);
886 __mf_object_t
* ovr_obj
[1];
888 __mf_object_t
** all_ovr_obj
= NULL
;
889 __mf_object_t
** dealloc_me
= NULL
;
892 /* Find all overlapping objects. Be optimistic that there is just one. */
893 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, ovr_obj
, 1);
894 if (UNLIKELY (obj_count
> 1))
896 /* Allocate a real buffer and do the search again. */
897 DECLARE (void *, malloc
, size_t c
);
899 all_ovr_obj
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) *
901 if (all_ovr_obj
== NULL
) abort ();
902 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_obj
, obj_count
);
903 assert (n
== obj_count
);
904 dealloc_me
= all_ovr_obj
;
908 all_ovr_obj
= ovr_obj
;
912 /* Update object statistics. */
913 for (i
= 0; i
< obj_count
; i
++)
915 __mf_object_t
*obj
= all_ovr_obj
[i
];
916 assert (obj
!= NULL
);
917 if (type
== __MF_CHECK_READ
)
924 /* Iterate over the various objects. There are a number of special cases. */
925 for (i
= 0; i
< obj_count
; i
++)
927 __mf_object_t
*obj
= all_ovr_obj
[i
];
929 /* Any __MF_TYPE_NOACCESS hit is bad. */
930 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
933 /* Any object with a watch flag is bad. */
934 if (UNLIKELY (obj
->watching_p
))
935 judgement
= -2; /* trigger VIOL_WATCH */
937 /* A read from an uninitialized object is bad. */
938 if (UNLIKELY (__mf_opts
.check_initialization
940 && type
== __MF_CHECK_READ
942 && obj
->write_count
== 0
943 /* uninitialized (heap) */
944 && obj
->type
== __MF_TYPE_HEAP
))
948 /* We now know that the access spans no invalid objects. */
949 if (LIKELY (judgement
>= 0))
950 for (i
= 0; i
< obj_count
; i
++)
952 __mf_object_t
*obj
= all_ovr_obj
[i
];
954 /* Is this access entirely contained within this object? */
955 if (LIKELY (ptr_low
>= obj
->low
&& ptr_high
<= obj
->high
))
958 entry
->low
= obj
->low
;
959 entry
->high
= obj
->high
;
964 /* This access runs off the end of one valid object. That
965 could be okay, if other valid objects fill in all the
966 holes. We allow this only for HEAP and GUESS type
967 objects. Accesses to STATIC and STACK variables
968 should not be allowed to span. */
969 if (UNLIKELY ((judgement
== 0) && (obj_count
> 1)))
971 unsigned uncovered
= 0;
972 for (i
= 0; i
< obj_count
; i
++)
974 __mf_object_t
*obj
= all_ovr_obj
[i
];
975 int j
, uncovered_low_p
, uncovered_high_p
;
976 uintptr_t ptr_lower
, ptr_higher
;
978 uncovered_low_p
= ptr_low
< obj
->low
;
979 ptr_lower
= CLAMPSUB (obj
->low
, 1);
980 uncovered_high_p
= ptr_high
> obj
->high
;
981 ptr_higher
= CLAMPADD (obj
->high
, 1);
983 for (j
= 0; j
< obj_count
; j
++)
985 __mf_object_t
*obj2
= all_ovr_obj
[j
];
987 if (i
== j
) continue;
989 /* Filter out objects that cannot be spanned across. */
990 if (obj2
->type
== __MF_TYPE_STACK
991 || obj2
->type
== __MF_TYPE_STATIC
)
994 /* Consider a side "covered" if obj2 includes
995 the next byte on that side. */
997 && (ptr_lower
>= obj2
->low
&& ptr_lower
<= obj2
->high
))
1000 && (ptr_high
>= obj2
->low
&& ptr_higher
<= obj2
->high
))
1001 uncovered_high_p
= 0;
1004 if (uncovered_low_p
|| uncovered_high_p
)
1008 /* Success if no overlapping objects are uncovered. */
1014 if (dealloc_me
!= NULL
)
1015 CALL_REAL (free
, dealloc_me
);
1017 /* If the judgment is still unknown at this stage, loop
1018 around at most one more time. */
1021 if (heuristics
++ < 2) /* XXX parametrize this number? */
1022 judgement
= __mf_heuristic_check (ptr_low
, ptr_high
);
1036 if (__mf_opts
.collect_stats
)
1038 __mf_count_check
++;
1040 if (LIKELY (old_entry
.low
!= entry
->low
|| old_entry
.high
!= entry
->high
))
1041 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1042 __mf_lookup_cache_reusecount
[entry_idx
] ++;
1045 if (UNLIKELY (judgement
< 0))
1046 __mf_violation (ptr
, sz
,
1047 (uintptr_t) __builtin_return_address (0), location
,
1048 ((judgement
== -1) ?
1049 (type
== __MF_CHECK_READ
? __MF_VIOL_READ
: __MF_VIOL_WRITE
) :
1054 static __mf_object_t
*
1055 __mf_insert_new_object (uintptr_t low
, uintptr_t high
, int type
,
1056 const char *name
, uintptr_t pc
)
1058 DECLARE (void *, calloc
, size_t c
, size_t n
);
1060 __mf_object_t
*new_obj
;
1061 new_obj
= CALL_REAL (calloc
, 1, sizeof(__mf_object_t
));
1063 new_obj
->high
= high
;
1064 new_obj
->type
= type
;
1065 new_obj
->name
= name
;
1066 new_obj
->alloc_pc
= pc
;
1067 #if HAVE_GETTIMEOFDAY
1068 if (__mf_opts
.timestamps
)
1069 gettimeofday (& new_obj
->alloc_time
, NULL
);
1072 new_obj
->alloc_thread
= pthread_self ();
1075 if (__mf_opts
.backtrace
> 0 && (type
== __MF_TYPE_HEAP
|| type
== __MF_TYPE_HEAP_I
))
1076 new_obj
->alloc_backtrace_size
=
1077 __mf_backtrace (& new_obj
->alloc_backtrace
,
1080 __mf_link_object (new_obj
);
1086 __mf_uncache_object (__mf_object_t
*old_obj
)
1088 /* Remove any low/high pointers for this object from the lookup cache. */
1090 /* Can it possibly exist in the cache? */
1091 if (LIKELY (old_obj
->read_count
+ old_obj
->write_count
))
1093 uintptr_t low
= old_obj
->low
;
1094 uintptr_t high
= old_obj
->high
;
1095 struct __mf_cache
*entry
;
1097 if ((high
- low
) >= (__mf_lc_mask
<< __mf_lc_shift
))
1099 /* For large objects (>= cache size - 1) check the whole cache. */
1100 entry
= & __mf_lookup_cache
[0];
1101 for (i
= 0; i
<= __mf_lc_mask
; i
++, entry
++)
1103 /* NB: the "||" in the following test permits this code to
1104 tolerate the situation introduced by __mf_check over
1105 contiguous objects, where a cache entry spans several
1107 if (entry
->low
== low
|| entry
->high
== high
)
1109 entry
->low
= MAXPTR
;
1110 entry
->high
= MINPTR
;
1116 /* Object is now smaller then cache size. */
1117 unsigned entry_low_idx
= __MF_CACHE_INDEX (low
);
1118 unsigned entry_high_idx
= __MF_CACHE_INDEX (high
);
1119 if (entry_low_idx
<= entry_high_idx
)
1121 entry
= & __mf_lookup_cache
[entry_low_idx
];
1122 for (i
= entry_low_idx
; i
<= entry_high_idx
; i
++, entry
++)
1124 /* NB: the "||" in the following test permits this code to
1125 tolerate the situation introduced by __mf_check over
1126 contiguous objects, where a cache entry spans several
1128 if (entry
->low
== low
|| entry
->high
== high
)
1130 entry
->low
= MAXPTR
;
1131 entry
->high
= MINPTR
;
1137 /* Object wrapped around the end of the cache. First search
1138 from low to end of cache and then from 0 to high. */
1139 entry
= & __mf_lookup_cache
[entry_low_idx
];
1140 for (i
= entry_low_idx
; i
<= __mf_lc_mask
; i
++, entry
++)
1142 /* NB: the "||" in the following test permits this code to
1143 tolerate the situation introduced by __mf_check over
1144 contiguous objects, where a cache entry spans several
1146 if (entry
->low
== low
|| entry
->high
== high
)
1148 entry
->low
= MAXPTR
;
1149 entry
->high
= MINPTR
;
1152 entry
= & __mf_lookup_cache
[0];
1153 for (i
= 0; i
<= entry_high_idx
; i
++, entry
++)
1155 /* NB: the "||" in the following test permits this code to
1156 tolerate the situation introduced by __mf_check over
1157 contiguous objects, where a cache entry spans several
1159 if (entry
->low
== low
|| entry
->high
== high
)
1161 entry
->low
= MAXPTR
;
1162 entry
->high
= MINPTR
;
1172 __mf_register (void *ptr
, size_t sz
, int type
, const char *name
)
1175 BEGIN_RECURSION_PROTECT ();
1176 __mfu_register (ptr
, sz
, type
, name
);
1177 END_RECURSION_PROTECT ();
1183 __mfu_register (void *ptr
, size_t sz
, int type
, const char *name
)
1185 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1186 ptr
, (unsigned long) sz
, type
, name
? name
: "");
1188 if (__mf_opts
.collect_stats
)
1190 __mf_count_register
++;
1191 __mf_total_register_size
[(type
< 0) ? 0 :
1192 (type
> __MF_TYPE_MAX
) ? 0 :
1196 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1197 __mf_sigusr1_respond ();
1199 switch (__mf_opts
.mudflap_mode
)
1205 __mf_violation (ptr
, sz
, (uintptr_t) __builtin_return_address (0), NULL
,
1206 __MF_VIOL_REGISTER
);
1210 /* Clear the cache. */
1211 /* XXX: why the entire cache? */
1213 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1215 __mf_lookup_cache
[0].low
= MAXPTR
;
1220 __mf_object_t
*ovr_objs
[1];
1221 unsigned num_overlapping_objs
;
1222 uintptr_t low
= (uintptr_t) ptr
;
1223 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1224 uintptr_t pc
= (uintptr_t) __builtin_return_address (0);
1226 /* Treat unknown size indication as 1. */
1227 if (UNLIKELY (sz
== 0)) sz
= 1;
1229 /* Look for objects only of the same type. This will e.g. permit a registration
1230 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1231 __mf_check time however harmful overlaps will be detected. */
1232 num_overlapping_objs
= __mf_find_objects2 (low
, high
, ovr_objs
, 1, type
);
1234 /* Handle overlaps. */
1235 if (UNLIKELY (num_overlapping_objs
> 0))
1237 __mf_object_t
*ovr_obj
= ovr_objs
[0];
1239 /* Accept certain specific duplication pairs. */
1240 if (((type
== __MF_TYPE_STATIC
) || (type
== __MF_TYPE_GUESS
))
1241 && ovr_obj
->low
== low
1242 && ovr_obj
->high
== high
1243 && ovr_obj
->type
== type
)
1245 /* Duplicate registration for static objects may come
1246 from distinct compilation units. */
1247 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1248 (void *) low
, (void *) high
,
1249 (ovr_obj
->name
? ovr_obj
->name
: ""));
1253 /* Alas, a genuine violation. */
1256 /* Two or more *real* mappings here. */
1257 __mf_violation ((void *) ptr
, sz
,
1258 (uintptr_t) __builtin_return_address (0), NULL
,
1259 __MF_VIOL_REGISTER
);
1262 else /* No overlapping objects: AOK. */
1263 __mf_insert_new_object (low
, high
, type
, name
, pc
);
1265 /* We could conceivably call __mf_check() here to prime the cache,
1266 but then the read_count/write_count field is not reliable. */
1269 } /* end switch (__mf_opts.mudflap_mode) */
1274 __mf_unregister (void *ptr
, size_t sz
, int type
)
1277 BEGIN_RECURSION_PROTECT ();
1278 __mfu_unregister (ptr
, sz
, type
);
1279 END_RECURSION_PROTECT ();
1285 __mfu_unregister (void *ptr
, size_t sz
, int type
)
1287 DECLARE (void, free
, void *ptr
);
1289 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1290 __mf_sigusr1_respond ();
1292 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr
, (unsigned long) sz
, type
);
1294 switch (__mf_opts
.mudflap_mode
)
1300 __mf_violation (ptr
, sz
,
1301 (uintptr_t) __builtin_return_address (0), NULL
,
1302 __MF_VIOL_UNREGISTER
);
1306 /* Clear the cache. */
1308 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1310 __mf_lookup_cache
[0].low
= MAXPTR
;
1315 __mf_object_t
*old_obj
= NULL
;
1316 __mf_object_t
*del_obj
= NULL
; /* Object to actually delete. */
1317 __mf_object_t
*objs
[1] = {NULL
};
1318 unsigned num_overlapping_objs
;
1320 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1321 CLAMPSZ (ptr
, sz
), objs
, 1, type
);
1323 /* Special case for HEAP_I - see free & realloc hook. They don't
1324 know whether the input region was HEAP or HEAP_I before
1325 unmapping it. Here we give HEAP a try in case HEAP_I
1327 if ((type
== __MF_TYPE_HEAP_I
) && (num_overlapping_objs
== 0))
1329 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1330 CLAMPSZ (ptr
, sz
), objs
, 1, __MF_TYPE_HEAP
);
1334 if (UNLIKELY ((num_overlapping_objs
!= 1) /* more than one overlap */
1335 || ((sz
== 0) ? 0 : (sz
!= (old_obj
->high
- old_obj
->low
+ 1))) /* size mismatch */
1336 || ((uintptr_t) ptr
!= old_obj
->low
))) /* base mismatch */
1338 __mf_violation (ptr
, sz
,
1339 (uintptr_t) __builtin_return_address (0), NULL
,
1340 __MF_VIOL_UNREGISTER
);
1344 __mf_unlink_object (old_obj
);
1345 __mf_uncache_object (old_obj
);
1347 /* Wipe buffer contents if desired. */
1348 if ((__mf_opts
.wipe_stack
&& old_obj
->type
== __MF_TYPE_STACK
)
1349 || (__mf_opts
.wipe_heap
&& (old_obj
->type
== __MF_TYPE_HEAP
1350 || old_obj
->type
== __MF_TYPE_HEAP_I
)))
1352 memset ((void *) old_obj
->low
,
1354 (size_t) (old_obj
->high
- old_obj
->low
+ 1));
1357 /* Manage the object cemetary. */
1358 if (__mf_opts
.persistent_count
> 0
1359 && (unsigned) old_obj
->type
<= __MF_TYPE_MAX_CEM
)
1361 old_obj
->deallocated_p
= 1;
1362 old_obj
->dealloc_pc
= (uintptr_t) __builtin_return_address (0);
1363 #if HAVE_GETTIMEOFDAY
1364 if (__mf_opts
.timestamps
)
1365 gettimeofday (& old_obj
->dealloc_time
, NULL
);
1368 old_obj
->dealloc_thread
= pthread_self ();
1371 if (__mf_opts
.backtrace
> 0 && old_obj
->type
== __MF_TYPE_HEAP
)
1372 old_obj
->dealloc_backtrace_size
=
1373 __mf_backtrace (& old_obj
->dealloc_backtrace
,
1376 /* Encourage this object to be displayed again in current epoch. */
1377 old_obj
->description_epoch
--;
1379 /* Put this object into the cemetary. This may require this plot to
1380 be recycled, and the previous resident to be designated del_obj. */
1382 unsigned row
= old_obj
->type
;
1383 unsigned plot
= __mf_object_dead_head
[row
];
1385 del_obj
= __mf_object_cemetary
[row
][plot
];
1386 __mf_object_cemetary
[row
][plot
] = old_obj
;
1388 if (plot
== __mf_opts
.persistent_count
) plot
= 0;
1389 __mf_object_dead_head
[row
] = plot
;
1395 if (__mf_opts
.print_leaks
)
1397 if ((old_obj
->read_count
+ old_obj
->write_count
) == 0 &&
1398 (old_obj
->type
== __MF_TYPE_HEAP
1399 || old_obj
->type
== __MF_TYPE_HEAP_I
))
1401 /* The problem with a warning message here is that we may not
1402 be privy to accesses to such objects that occur within
1403 uninstrumented libraries. */
1407 "mudflap warning: unaccessed registered object:\n");
1408 __mf_describe_object (old_obj
);
1413 if (del_obj
!= NULL
) /* May or may not equal old_obj. */
1415 if (__mf_opts
.backtrace
> 0)
1417 CALL_REAL(free
, del_obj
->alloc_backtrace
);
1418 if (__mf_opts
.persistent_count
> 0)
1420 CALL_REAL(free
, del_obj
->dealloc_backtrace
);
1423 CALL_REAL(free
, del_obj
);
1428 } /* end switch (__mf_opts.mudflap_mode) */
1431 if (__mf_opts
.collect_stats
)
1433 __mf_count_unregister
++;
1434 __mf_total_unregister_size
+= sz
;
1443 unsigned long total_size
;
1444 unsigned live_obj_count
;
1445 double total_weight
;
1446 double weighted_size
;
1447 unsigned long weighted_address_bits
[sizeof (uintptr_t) * 8][2];
1453 __mf_adapt_cache_fn (mfsplay_tree_node n
, void *param
)
1455 __mf_object_t
*obj
= (__mf_object_t
*) n
->value
;
1456 struct tree_stats
*s
= (struct tree_stats
*) param
;
1458 assert (obj
!= NULL
&& s
!= NULL
);
1460 /* Exclude never-accessed objects. */
1461 if (obj
->read_count
+ obj
->write_count
)
1464 s
->total_size
+= (obj
->high
- obj
->low
+ 1);
1471 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1472 (void *) obj->low, obj->liveness, obj->name); */
1474 s
->live_obj_count
++;
1475 s
->total_weight
+= (double) obj
->liveness
;
1477 (double) (obj
->high
- obj
->low
+ 1) *
1478 (double) obj
->liveness
;
1481 for (i
=0; i
<sizeof(uintptr_t) * 8; i
++)
1483 unsigned bit
= addr
& 1;
1484 s
->weighted_address_bits
[i
][bit
] += obj
->liveness
;
1488 /* Age the liveness value. */
1489 obj
->liveness
>>= 1;
1500 struct tree_stats s
;
1501 uintptr_t new_mask
= 0;
1502 unsigned char new_shift
;
1503 float cache_utilization
;
1505 static float smoothed_new_shift
= -1.0;
1508 memset (&s
, 0, sizeof (s
));
1510 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
), __mf_adapt_cache_fn
, (void *) & s
);
1511 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
), __mf_adapt_cache_fn
, (void *) & s
);
1512 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK
), __mf_adapt_cache_fn
, (void *) & s
);
1513 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC
), __mf_adapt_cache_fn
, (void *) & s
);
1514 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS
), __mf_adapt_cache_fn
, (void *) & s
);
1516 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1517 empty tree. Just leave the cache alone in such cases, rather
1518 than risk dying by division-by-zero. */
1519 if (! (s
.obj_count
> 0) && (s
.live_obj_count
> 0) && (s
.total_weight
> 0.0))
1522 /* Guess a good value for the shift parameter by finding an address bit that is a
1523 good discriminant of lively objects. */
1525 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1527 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1528 if (max_value
< value
) max_value
= value
;
1530 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1532 float shoulder_factor
= 0.7; /* Include slightly less popular bits too. */
1533 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1534 if (value
>= max_value
* shoulder_factor
)
1537 if (smoothed_new_shift
< 0) smoothed_new_shift
= __mf_lc_shift
;
1538 /* Converge toward this slowly to reduce flapping. */
1539 smoothed_new_shift
= 0.9*smoothed_new_shift
+ 0.1*i
;
1540 new_shift
= (unsigned) (smoothed_new_shift
+ 0.5);
1541 assert (new_shift
< sizeof (uintptr_t)*8);
1543 /* Count number of used buckets. */
1544 cache_utilization
= 0.0;
1545 for (i
= 0; i
< (1 + __mf_lc_mask
); i
++)
1546 if (__mf_lookup_cache
[i
].low
!= 0 || __mf_lookup_cache
[i
].high
!= 0)
1547 cache_utilization
+= 1.0;
1548 cache_utilization
/= (1 + __mf_lc_mask
);
1550 new_mask
|= 0xffff; /* XXX: force a large cache. */
1551 new_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
1553 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1554 "util=%u%% m=%p s=%u\n",
1555 s
.obj_count
, s
.live_obj_count
, s
.total_size
, s
.total_weight
, s
.weighted_size
,
1556 (unsigned)(cache_utilization
*100.0), (void *) new_mask
, new_shift
);
1558 /* We should reinitialize cache if its parameters have changed. */
1559 if (new_mask
!= __mf_lc_mask
||
1560 new_shift
!= __mf_lc_shift
)
1562 __mf_lc_mask
= new_mask
;
1563 __mf_lc_shift
= new_shift
;
1565 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1567 __mf_lookup_cache
[0].low
= MAXPTR
;
1573 /* __mf_find_object[s] */
1575 /* Find overlapping live objecs between [low,high]. Return up to
1576 max_objs of their pointers in objs[]. Return total count of
1577 overlaps (may exceed max_objs). */
1580 __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
1581 __mf_object_t
**objs
, unsigned max_objs
, int type
)
1584 mfsplay_tree t
= __mf_object_tree (type
);
1585 mfsplay_tree_key k
= (mfsplay_tree_key
) ptr_low
;
1588 mfsplay_tree_node n
= mfsplay_tree_lookup (t
, k
);
1589 /* An exact match for base address implies a hit. */
1592 if (count
< max_objs
)
1593 objs
[count
] = (__mf_object_t
*) n
->value
;
1597 /* Iterate left then right near this key value to find all overlapping objects. */
1598 for (direction
= 0; direction
< 2; direction
++)
1600 /* Reset search origin. */
1601 k
= (mfsplay_tree_key
) ptr_low
;
1607 n
= (direction
== 0 ? mfsplay_tree_successor (t
, k
) : mfsplay_tree_predecessor (t
, k
));
1608 if (n
== NULL
) break;
1609 obj
= (__mf_object_t
*) n
->value
;
1611 if (! (obj
->low
<= ptr_high
&& obj
->high
>= ptr_low
)) /* No overlap? */
1614 if (count
< max_objs
)
1615 objs
[count
] = (__mf_object_t
*) n
->value
;
1618 k
= (mfsplay_tree_key
) obj
->low
;
1627 __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
1628 __mf_object_t
**objs
, unsigned max_objs
)
1633 /* Search each splay tree for overlaps. */
1634 for (type
= __MF_TYPE_NOACCESS
; type
<= __MF_TYPE_GUESS
; type
++)
1636 unsigned c
= __mf_find_objects2 (ptr_low
, ptr_high
, objs
, max_objs
, type
);
1642 else /* NB: C may equal 0 */
1655 /* __mf_link_object */
1658 __mf_link_object (__mf_object_t
*node
)
1660 mfsplay_tree t
= __mf_object_tree (node
->type
);
1661 mfsplay_tree_insert (t
, (mfsplay_tree_key
) node
->low
, (mfsplay_tree_value
) node
);
1664 /* __mf_unlink_object */
1667 __mf_unlink_object (__mf_object_t
*node
)
1669 mfsplay_tree t
= __mf_object_tree (node
->type
);
1670 mfsplay_tree_remove (t
, (mfsplay_tree_key
) node
->low
);
1673 /* __mf_find_dead_objects */
1675 /* Find overlapping dead objecs between [low,high]. Return up to
1676 max_objs of their pointers in objs[]. Return total count of
1677 overlaps (may exceed max_objs). */
1680 __mf_find_dead_objects (uintptr_t low
, uintptr_t high
,
1681 __mf_object_t
**objs
, unsigned max_objs
)
1683 if (__mf_opts
.persistent_count
> 0)
1686 unsigned recollection
= 0;
1689 assert (low
<= high
);
1690 assert (max_objs
== 0 || objs
!= NULL
);
1692 /* Widen the search from the most recent plots in each row, looking
1693 backward in time. */
1695 while (recollection
< __mf_opts
.persistent_count
)
1699 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1704 plot
= __mf_object_dead_head
[row
];
1705 for (i
= 0; i
<= recollection
; i
++)
1709 /* Look backward through row: it's a circular buffer. */
1710 if (plot
> 0) plot
--;
1711 else plot
= __mf_opts
.persistent_count
- 1;
1713 obj
= __mf_object_cemetary
[row
][plot
];
1714 if (obj
&& obj
->low
<= high
&& obj
->high
>= low
)
1716 /* Found an overlapping dead object! */
1717 if (count
< max_objs
)
1727 /* Look farther back in time. */
1728 recollection
= (recollection
* 2) + 1;
1737 /* __mf_describe_object */
1740 __mf_describe_object (__mf_object_t
*obj
)
1742 static unsigned epoch
= 0;
1749 if (__mf_opts
.abbreviate
&& obj
->description_epoch
== epoch
)
1752 "mudflap %sobject %p: name=`%s'\n",
1753 (obj
->deallocated_p
? "dead " : ""),
1754 (void *) obj
, (obj
->name
? obj
->name
: ""));
1758 obj
->description_epoch
= epoch
;
1761 "mudflap %sobject %p: name=`%s'\n"
1762 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1763 "alloc time=%lu.%06lu pc=%p"
1768 (obj
->deallocated_p
? "dead " : ""),
1769 (void *) obj
, (obj
->name
? obj
->name
: ""),
1770 (void *) obj
->low
, (void *) obj
->high
,
1771 (unsigned long) (obj
->high
- obj
->low
+ 1),
1772 (obj
->type
== __MF_TYPE_NOACCESS
? "no-access" :
1773 obj
->type
== __MF_TYPE_HEAP
? "heap" :
1774 obj
->type
== __MF_TYPE_HEAP_I
? "heap-init" :
1775 obj
->type
== __MF_TYPE_STACK
? "stack" :
1776 obj
->type
== __MF_TYPE_STATIC
? "static" :
1777 obj
->type
== __MF_TYPE_GUESS
? "guess" :
1779 obj
->read_count
, obj
->write_count
, obj
->liveness
,
1780 obj
->watching_p
? " watching" : "",
1781 obj
->alloc_time
.tv_sec
, obj
->alloc_time
.tv_usec
,
1782 (void *) obj
->alloc_pc
1784 , (unsigned) obj
->alloc_thread
1788 if (__mf_opts
.backtrace
> 0)
1791 for (i
=0; i
<obj
->alloc_backtrace_size
; i
++)
1792 fprintf (stderr
, " %s\n", obj
->alloc_backtrace
[i
]);
1795 if (__mf_opts
.persistent_count
> 0)
1797 if (obj
->deallocated_p
)
1799 fprintf (stderr
, "dealloc time=%lu.%06lu pc=%p"
1804 obj
->dealloc_time
.tv_sec
, obj
->dealloc_time
.tv_usec
,
1805 (void *) obj
->dealloc_pc
1807 , (unsigned) obj
->dealloc_thread
1812 if (__mf_opts
.backtrace
> 0)
1815 for (i
=0; i
<obj
->dealloc_backtrace_size
; i
++)
1816 fprintf (stderr
, " %s\n", obj
->dealloc_backtrace
[i
]);
1824 __mf_report_leaks_fn (mfsplay_tree_node n
, void *param
)
1826 __mf_object_t
*node
= (__mf_object_t
*) n
->value
;
1827 unsigned *count
= (unsigned *) param
;
1832 fprintf (stderr
, "Leaked object %u:\n", (*count
));
1833 __mf_describe_object (node
);
1840 __mf_report_leaks ()
1844 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
),
1845 __mf_report_leaks_fn
, & count
);
1846 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
),
1847 __mf_report_leaks_fn
, & count
);
1852 /* ------------------------------------------------------------------------ */
1859 BEGIN_RECURSION_PROTECT ();
1861 END_RECURSION_PROTECT ();
1868 if (__mf_opts
.collect_stats
)
1873 "calls to __mf_check: %lu\n"
1874 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1875 " __mf_unregister: %lu [%luB]\n"
1876 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1878 __mf_count_register
,
1879 __mf_total_register_size
[0], __mf_total_register_size
[1],
1880 __mf_total_register_size
[2], __mf_total_register_size
[3],
1881 __mf_total_register_size
[4], /* XXX */
1882 __mf_count_unregister
, __mf_total_unregister_size
,
1883 __mf_count_violation
[0], __mf_count_violation
[1],
1884 __mf_count_violation
[2], __mf_count_violation
[3],
1885 __mf_count_violation
[4]);
1888 "calls with reentrancy: %lu\n", __mf_reentrancy
);
1891 " lock contention: %lu\n", __mf_lock_contention
);
1894 /* Lookup cache stats. */
1897 unsigned max_reuse
= 0;
1898 unsigned num_used
= 0;
1899 unsigned num_unused
= 0;
1901 for (i
= 0; i
< LOOKUP_CACHE_SIZE
; i
++)
1903 if (__mf_lookup_cache_reusecount
[i
])
1907 if (max_reuse
< __mf_lookup_cache_reusecount
[i
])
1908 max_reuse
= __mf_lookup_cache_reusecount
[i
];
1910 fprintf (stderr
, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1911 num_used
, num_unused
, max_reuse
);
1915 unsigned live_count
;
1916 live_count
= __mf_find_objects (MINPTR
, MAXPTR
, NULL
, 0);
1917 fprintf (stderr
, "number of live objects: %u\n", live_count
);
1920 if (__mf_opts
.persistent_count
> 0)
1922 unsigned dead_count
= 0;
1924 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1925 for (plot
= 0 ; plot
< __mf_opts
.persistent_count
; plot
++)
1926 if (__mf_object_cemetary
[row
][plot
] != 0)
1928 fprintf (stderr
, " zombie objects: %u\n", dead_count
);
1931 if (__mf_opts
.print_leaks
&& (__mf_opts
.mudflap_mode
== mode_check
))
1934 extern void * __mf_wrap_alloca_indirect (size_t c
);
1936 /* Free up any remaining alloca()'d blocks. */
1937 __mf_wrap_alloca_indirect (0);
1938 #ifdef HAVE___LIBC_FREERES
1939 if (__mf_opts
.call_libc_freeres
)
1941 extern void __libc_freeres (void);
1946 __mf_describe_object (NULL
); /* Reset description epoch. */
1947 l
= __mf_report_leaks ();
1948 fprintf (stderr
, "number of leaked objects: %u\n", l
);
1952 /* __mf_backtrace */
1955 __mf_backtrace (char ***symbols
, void *guess_pc
, unsigned guess_omit_levels
)
1958 unsigned pc_array_size
= __mf_opts
.backtrace
+ guess_omit_levels
;
1959 unsigned remaining_size
;
1960 unsigned omitted_size
= 0;
1962 DECLARE (void, free
, void *ptr
);
1963 DECLARE (void *, calloc
, size_t c
, size_t n
);
1964 DECLARE (void *, malloc
, size_t n
);
1966 pc_array
= CALL_REAL (calloc
, pc_array_size
, sizeof (void *) );
1967 #ifdef HAVE_BACKTRACE
1968 pc_array_size
= backtrace (pc_array
, pc_array_size
);
1970 #define FETCH(n) do { if (pc_array_size >= n) { \
1971 pc_array[n] = __builtin_return_address(n); \
1972 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1974 /* Unroll some calls __builtin_return_address because this function
1975 only takes a literal integer parameter. */
1978 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1979 rather than simply returning 0. :-( */
1988 if (pc_array_size
> 8) pc_array_size
= 9;
1990 if (pc_array_size
> 0) pc_array_size
= 1;
1996 /* We want to trim the first few levels of the stack traceback,
1997 since they contain libmudflap wrappers and junk. If pc_array[]
1998 ends up containing a non-NULL guess_pc, then trim everything
1999 before that. Otherwise, omit the first guess_omit_levels
2002 if (guess_pc
!= NULL
)
2003 for (i
=0; i
<pc_array_size
; i
++)
2004 if (pc_array
[i
] == guess_pc
)
2007 if (omitted_size
== 0) /* No match? */
2008 if (pc_array_size
> guess_omit_levels
)
2009 omitted_size
= guess_omit_levels
;
2011 remaining_size
= pc_array_size
- omitted_size
;
2013 #ifdef HAVE_BACKTRACE_SYMBOLS
2014 *symbols
= backtrace_symbols (pc_array
+ omitted_size
, remaining_size
);
2017 /* Let's construct a buffer by hand. It will have <remaining_size>
2018 char*'s at the front, pointing at individual strings immediately
2023 enum { perline
= 30 };
2024 buffer
= CALL_REAL (malloc
, remaining_size
* (perline
+ sizeof(char *)));
2025 pointers
= (char **) buffer
;
2026 chars
= (char *)buffer
+ (remaining_size
* sizeof (char *));
2027 for (i
= 0; i
< remaining_size
; i
++)
2029 pointers
[i
] = chars
;
2030 sprintf (chars
, "[0x%p]", pc_array
[omitted_size
+ i
]);
2031 chars
= chars
+ perline
;
2033 *symbols
= pointers
;
2036 CALL_REAL (free
, pc_array
);
2038 return remaining_size
;
2041 /* ------------------------------------------------------------------------ */
2042 /* __mf_violation */
2045 __mf_violation (void *ptr
, size_t sz
, uintptr_t pc
,
2046 const char *location
, int type
)
2049 static unsigned violation_number
;
2050 DECLARE(void, free
, void *ptr
);
2052 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2054 (location
!= NULL
? location
: ""), type
, ptr
, (unsigned long) sz
);
2056 if (__mf_opts
.collect_stats
)
2057 __mf_count_violation
[(type
< 0) ? 0 :
2058 (type
> __MF_VIOL_WATCH
) ? 0 :
2061 /* Print out a basic warning message. */
2062 if (__mf_opts
.verbose_violations
)
2065 unsigned num_helpful
= 0;
2066 struct timeval now
= { 0, 0 };
2067 #if HAVE_GETTIMEOFDAY
2068 gettimeofday (& now
, NULL
);
2071 violation_number
++;
2074 "mudflap violation %u (%s): time=%lu.%06lu "
2075 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2077 ((type
== __MF_VIOL_READ
) ? "check/read" :
2078 (type
== __MF_VIOL_WRITE
) ? "check/write" :
2079 (type
== __MF_VIOL_REGISTER
) ? "register" :
2080 (type
== __MF_VIOL_UNREGISTER
) ? "unregister" :
2081 (type
== __MF_VIOL_WATCH
) ? "watch" : "unknown"),
2082 now
.tv_sec
, now
.tv_usec
,
2083 (void *) ptr
, (unsigned long)sz
, (void *) pc
,
2084 (location
!= NULL
? " location=`" : ""),
2085 (location
!= NULL
? location
: ""),
2086 (location
!= NULL
? "'" : ""));
2088 if (__mf_opts
.backtrace
> 0)
2093 num
= __mf_backtrace (& symbols
, (void *) pc
, 2);
2094 /* Note: backtrace_symbols calls malloc(). But since we're in
2095 __mf_violation and presumably __mf_check, it'll detect
2096 recursion, and not put the new string into the database. */
2098 for (i
=0; i
<num
; i
++)
2099 fprintf (stderr
, " %s\n", symbols
[i
]);
2101 /* Calling free() here would trigger a violation. */
2102 CALL_REAL(free
, symbols
);
2106 /* Look for nearby objects. For this, we start with s_low/s_high
2107 pointing to the given area, looking for overlapping objects.
2108 If none show up, widen the search area and keep looking. */
2110 if (sz
== 0) sz
= 1;
2112 for (dead_p
= 0; dead_p
<= 1; dead_p
++) /* for dead_p in 0 1 */
2114 enum {max_objs
= 3}; /* magic */
2115 __mf_object_t
*objs
[max_objs
];
2116 unsigned num_objs
= 0;
2117 uintptr_t s_low
, s_high
;
2121 s_low
= (uintptr_t) ptr
;
2122 s_high
= CLAMPSZ (ptr
, sz
);
2124 while (tries
< 16) /* magic */
2127 num_objs
= __mf_find_dead_objects (s_low
, s_high
, objs
, max_objs
);
2129 num_objs
= __mf_find_objects (s_low
, s_high
, objs
, max_objs
);
2131 if (num_objs
) /* good enough */
2136 /* XXX: tune this search strategy. It's too dependent on
2137 sz, which can vary from 1 to very big (when array index
2138 checking) numbers. */
2139 s_low
= CLAMPSUB (s_low
, (sz
* tries
* tries
));
2140 s_high
= CLAMPADD (s_high
, (sz
* tries
* tries
));
2143 for (i
= 0; i
< min (num_objs
, max_objs
); i
++)
2145 __mf_object_t
*obj
= objs
[i
];
2146 uintptr_t low
= (uintptr_t) ptr
;
2147 uintptr_t high
= CLAMPSZ (ptr
, sz
);
2148 unsigned before1
= (low
< obj
->low
) ? obj
->low
- low
: 0;
2149 unsigned after1
= (low
> obj
->high
) ? low
- obj
->high
: 0;
2150 unsigned into1
= (high
>= obj
->low
&& low
<= obj
->high
) ? low
- obj
->low
: 0;
2151 unsigned before2
= (high
< obj
->low
) ? obj
->low
- high
: 0;
2152 unsigned after2
= (high
> obj
->high
) ? high
- obj
->high
: 0;
2153 unsigned into2
= (high
>= obj
->low
&& low
<= obj
->high
) ? high
- obj
->low
: 0;
2155 fprintf (stderr
, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2156 num_helpful
+ i
+ 1,
2157 (before1
? before1
: after1
? after1
: into1
),
2158 (before1
? "before" : after1
? "after" : "into"),
2159 (before2
? before2
: after2
? after2
: into2
),
2160 (before2
? "before" : after2
? "after" : "into"));
2161 __mf_describe_object (obj
);
2163 num_helpful
+= num_objs
;
2166 fprintf (stderr
, "number of nearby objects: %u\n", num_helpful
);
2169 /* How to finally handle this violation? */
2170 switch (__mf_opts
.violation_mode
)
2175 kill (getpid(), SIGSEGV
);
2182 snprintf (buf
, 128, "gdb --pid=%u", (unsigned) getpid ());
2184 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2185 instead, and let the forked child execlp() gdb. That way, this
2186 subject process can be resumed under the supervision of gdb.
2187 This can't happen now, since system() only returns when gdb
2188 dies. In that case, we need to beware of starting a second
2189 concurrent gdb child upon the next violation. (But if the first
2190 gdb dies, then starting a new one is appropriate.) */
2195 /* ------------------------------------------------------------------------ */
2198 unsigned __mf_watch (void *ptr
, size_t sz
)
2202 BEGIN_RECURSION_PROTECT ();
2203 rc
= __mf_watch_or_not (ptr
, sz
, 1);
2204 END_RECURSION_PROTECT ();
2209 unsigned __mf_unwatch (void *ptr
, size_t sz
)
2213 rc
= __mf_watch_or_not (ptr
, sz
, 0);
2220 __mf_watch_or_not (void *ptr
, size_t sz
, char flag
)
2222 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
2223 uintptr_t ptr_low
= (uintptr_t) ptr
;
2226 TRACE ("%s ptr=%p size=%lu\n",
2227 (flag
? "watch" : "unwatch"), ptr
, (unsigned long) sz
);
2229 switch (__mf_opts
.mudflap_mode
)
2239 __mf_object_t
**all_ovr_objs
;
2242 DECLARE (void *, malloc
, size_t c
);
2243 DECLARE (void, free
, void *p
);
2245 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, NULL
, 0);
2246 VERBOSE_TRACE (" %u:", obj_count
);
2248 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) * obj_count
));
2249 if (all_ovr_objs
== NULL
) abort ();
2250 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
2251 assert (n
== obj_count
);
2253 for (n
= 0; n
< obj_count
; n
++)
2255 __mf_object_t
*obj
= all_ovr_objs
[n
];
2257 VERBOSE_TRACE (" [%p]", (void *) obj
);
2258 if (obj
->watching_p
!= flag
)
2260 obj
->watching_p
= flag
;
2263 /* Remove object from cache, to ensure next access
2264 goes through __mf_check(). */
2266 __mf_uncache_object (obj
);
2269 CALL_REAL (free
, all_ovr_objs
);
2279 __mf_sigusr1_handler (int num
)
2281 __mf_sigusr1_received
++;
2284 /* Install or remove SIGUSR1 handler as necessary.
2285 Also, respond to a received pending SIGUSR1. */
2287 __mf_sigusr1_respond ()
2289 static int handler_installed
;
2292 /* Manage handler */
2293 if (__mf_opts
.sigusr1_report
&& ! handler_installed
)
2295 signal (SIGUSR1
, __mf_sigusr1_handler
);
2296 handler_installed
= 1;
2298 else if(! __mf_opts
.sigusr1_report
&& handler_installed
)
2300 signal (SIGUSR1
, SIG_DFL
);
2301 handler_installed
= 0;
2305 /* Manage enqueued signals */
2306 if (__mf_sigusr1_received
> __mf_sigusr1_handled
)
2308 __mf_sigusr1_handled
++;
2309 assert (__mf_get_state () == reentrant
);
2311 handler_installed
= 0; /* We may need to re-enable signal; this might be a SysV library. */
2316 /* XXX: provide an alternative __assert_fail function that cannot
2317 fail due to libmudflap infinite recursion. */
2321 write_itoa (int fd
, unsigned n
)
2323 enum x
{ bufsize
= sizeof(n
)*4 };
2327 for (i
=0; i
<bufsize
-1; i
++)
2329 unsigned digit
= n
% 10;
2330 buf
[bufsize
-2-i
] = digit
+ '0';
2334 char *m
= & buf
[bufsize
-2-i
];
2335 buf
[bufsize
-1] = '\0';
2336 write (fd
, m
, strlen(m
));
2344 __assert_fail (const char *msg
, const char *file
, unsigned line
, const char *func
)
2346 #define write2(string) write (2, (string), strlen ((string)));
2350 write_itoa (2, (unsigned) pthread_self ());
2353 write2(": assertion failure: `");
2354 write (2, msg
, strlen (msg
));
2356 write (2, func
, strlen (func
));
2358 write (2, file
, strlen (file
));
2360 write_itoa (2, line
);
2371 /* Adapted splay tree code, originally from libiberty. It has been
2372 specialized for libmudflap as requested by RMS. */
2375 mfsplay_tree_free (void *p
)
2377 DECLARE (void, free
, void *p
);
2378 CALL_REAL (free
, p
);
2382 mfsplay_tree_xmalloc (size_t s
)
2384 DECLARE (void *, malloc
, size_t s
);
2385 return CALL_REAL (malloc
, s
);
2389 static void mfsplay_tree_splay (mfsplay_tree
, mfsplay_tree_key
);
2390 static mfsplay_tree_node
mfsplay_tree_splay_helper (mfsplay_tree
,
2392 mfsplay_tree_node
*,
2393 mfsplay_tree_node
*,
2394 mfsplay_tree_node
*);
2397 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2398 and grandparent, respectively, of NODE. */
2400 static mfsplay_tree_node
2401 mfsplay_tree_splay_helper (mfsplay_tree sp
,
2402 mfsplay_tree_key key
,
2403 mfsplay_tree_node
* node
,
2404 mfsplay_tree_node
* parent
,
2405 mfsplay_tree_node
* grandparent
)
2407 mfsplay_tree_node
*next
;
2408 mfsplay_tree_node n
;
2416 comparison
= ((key
> n
->key
) ? 1 : ((key
< n
->key
) ? -1 : 0));
2418 if (comparison
== 0)
2419 /* We've found the target. */
2421 else if (comparison
< 0)
2422 /* The target is to the left. */
2425 /* The target is to the right. */
2430 /* Check whether our recursion depth is too high. Abort this search,
2431 and signal that a rebalance is required to continue. */
2432 if (sp
->depth
> sp
->max_depth
)
2434 sp
->rebalance_p
= 1;
2438 /* Continue down the tree. */
2440 n
= mfsplay_tree_splay_helper (sp
, key
, next
, node
, parent
);
2443 /* The recursive call will change the place to which NODE
2445 if (*node
!= n
|| sp
->rebalance_p
)
2450 /* NODE is the root. We are done. */
2453 /* First, handle the case where there is no grandparent (i.e.,
2454 *PARENT is the root of the tree.) */
2457 if (n
== (*parent
)->left
)
2471 /* Next handle the cases where both N and *PARENT are left children,
2472 or where both are right children. */
2473 if (n
== (*parent
)->left
&& *parent
== (*grandparent
)->left
)
2475 mfsplay_tree_node p
= *parent
;
2477 (*grandparent
)->left
= p
->right
;
2478 p
->right
= *grandparent
;
2484 else if (n
== (*parent
)->right
&& *parent
== (*grandparent
)->right
)
2486 mfsplay_tree_node p
= *parent
;
2488 (*grandparent
)->right
= p
->left
;
2489 p
->left
= *grandparent
;
2496 /* Finally, deal with the case where N is a left child, but *PARENT
2497 is a right child, or vice versa. */
2498 if (n
== (*parent
)->left
)
2500 (*parent
)->left
= n
->right
;
2502 (*grandparent
)->right
= n
->left
;
2503 n
->left
= *grandparent
;
2509 (*parent
)->right
= n
->left
;
2511 (*grandparent
)->left
= n
->right
;
2512 n
->right
= *grandparent
;
2521 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n
, void *array_ptr
)
2523 mfsplay_tree_node
**p
= array_ptr
;
2530 static mfsplay_tree_node
2531 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node
* array
, unsigned low
,
2534 unsigned middle
= low
+ (high
- low
) / 2;
2535 mfsplay_tree_node n
= array
[middle
];
2537 /* Note that since we're producing a balanced binary tree, it is not a problem
2538 that this function is recursive. */
2539 if (low
+ 1 <= middle
)
2540 n
->left
= mfsplay_tree_rebalance_helper2 (array
, low
, middle
- 1);
2544 if (middle
+ 1 <= high
)
2545 n
->right
= mfsplay_tree_rebalance_helper2 (array
, middle
+ 1, high
);
2553 /* Rebalance the entire tree. Do this by copying all the node
2554 pointers into an array, then cleverly re-linking them. */
2556 mfsplay_tree_rebalance (mfsplay_tree sp
)
2558 mfsplay_tree_node
*all_nodes
, *all_nodes_1
;
2560 if (sp
->num_keys
<= 2)
2563 all_nodes
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * sp
->num_keys
);
2565 /* Traverse all nodes to copy their addresses into this array. */
2566 all_nodes_1
= all_nodes
;
2567 mfsplay_tree_foreach (sp
, mfsplay_tree_rebalance_helper1
,
2568 (void *) &all_nodes_1
);
2570 /* Relink all the nodes. */
2571 sp
->root
= mfsplay_tree_rebalance_helper2 (all_nodes
, 0, sp
->num_keys
- 1);
2573 mfsplay_tree_free (all_nodes
);
2577 /* Splay SP around KEY. */
2579 mfsplay_tree_splay (mfsplay_tree sp
, mfsplay_tree_key key
)
2584 /* If we just splayed the tree with the same key, do nothing. */
2585 if (sp
->last_splayed_key_p
&&
2586 (sp
->last_splayed_key
== key
))
2589 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2590 The idea is to limit excessive stack usage if we're facing
2591 degenerate access patterns. Unfortunately such patterns can occur
2592 e.g. during static initialization, where many static objects might
2593 be registered in increasing address sequence, or during a case where
2594 large tree-like heap data structures are allocated quickly.
2596 On x86, this corresponds to roughly 200K of stack usage.
2597 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2598 sp
->max_depth
= 2500;
2599 sp
->rebalance_p
= sp
->depth
= 0;
2601 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2602 if (sp
->rebalance_p
)
2604 mfsplay_tree_rebalance (sp
);
2606 sp
->rebalance_p
= sp
->depth
= 0;
2607 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2609 if (sp
->rebalance_p
)
2614 /* Cache this splay key. */
2615 sp
->last_splayed_key
= key
;
2616 sp
->last_splayed_key_p
= 1;
2621 /* Allocate a new splay tree. */
2625 mfsplay_tree sp
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s
));
2627 sp
->last_splayed_key_p
= 0;
2635 /* Insert a new node (associating KEY with DATA) into SP. If a
2636 previous node with the indicated KEY exists, its data is replaced
2637 with the new value. Returns the new node. */
2638 static mfsplay_tree_node
2639 mfsplay_tree_insert (mfsplay_tree sp
, mfsplay_tree_key key
, mfsplay_tree_value value
)
2643 mfsplay_tree_splay (sp
, key
);
2646 comparison
= ((sp
->root
->key
> key
) ? 1 :
2647 ((sp
->root
->key
< key
) ? -1 : 0));
2649 if (sp
->root
&& comparison
== 0)
2651 /* If the root of the tree already has the indicated KEY, just
2652 replace the value with VALUE. */
2653 sp
->root
->value
= value
;
2657 /* Create a new node, and insert it at the root. */
2658 mfsplay_tree_node node
;
2660 node
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s
));
2662 node
->value
= value
;
2665 node
->left
= node
->right
= 0;
2666 else if (comparison
< 0)
2668 node
->left
= sp
->root
;
2669 node
->right
= node
->left
->right
;
2670 node
->left
->right
= 0;
2674 node
->right
= sp
->root
;
2675 node
->left
= node
->right
->left
;
2676 node
->right
->left
= 0;
2680 sp
->last_splayed_key_p
= 0;
2686 /* Remove KEY from SP. It is not an error if it did not exist. */
2689 mfsplay_tree_remove (mfsplay_tree sp
, mfsplay_tree_key key
)
2691 mfsplay_tree_splay (sp
, key
);
2692 sp
->last_splayed_key_p
= 0;
2693 if (sp
->root
&& (sp
->root
->key
== key
))
2695 mfsplay_tree_node left
, right
;
2696 left
= sp
->root
->left
;
2697 right
= sp
->root
->right
;
2698 /* Delete the root node itself. */
2699 mfsplay_tree_free (sp
->root
);
2701 /* One of the children is now the root. Doesn't matter much
2702 which, so long as we preserve the properties of the tree. */
2706 /* If there was a right child as well, hang it off the
2707 right-most leaf of the left child. */
2712 left
->right
= right
;
2720 /* Lookup KEY in SP, returning VALUE if present, and NULL
2723 static mfsplay_tree_node
2724 mfsplay_tree_lookup (mfsplay_tree sp
, mfsplay_tree_key key
)
2726 mfsplay_tree_splay (sp
, key
);
2727 if (sp
->root
&& (sp
->root
->key
== key
))
2734 /* Return the immediate predecessor KEY, or NULL if there is no
2735 predecessor. KEY need not be present in the tree. */
2737 static mfsplay_tree_node
2738 mfsplay_tree_predecessor (mfsplay_tree sp
, mfsplay_tree_key key
)
2741 mfsplay_tree_node node
;
2742 /* If the tree is empty, there is certainly no predecessor. */
2745 /* Splay the tree around KEY. That will leave either the KEY
2746 itself, its predecessor, or its successor at the root. */
2747 mfsplay_tree_splay (sp
, key
);
2748 comparison
= ((sp
->root
->key
> key
) ? 1 :
2749 ((sp
->root
->key
< key
) ? -1 : 0));
2751 /* If the predecessor is at the root, just return it. */
2754 /* Otherwise, find the rightmost element of the left subtree. */
2755 node
= sp
->root
->left
;
2762 /* Return the immediate successor KEY, or NULL if there is no
2763 successor. KEY need not be present in the tree. */
2765 static mfsplay_tree_node
2766 mfsplay_tree_successor (mfsplay_tree sp
, mfsplay_tree_key key
)
2769 mfsplay_tree_node node
;
2770 /* If the tree is empty, there is certainly no successor. */
2773 /* Splay the tree around KEY. That will leave either the KEY
2774 itself, its predecessor, or its successor at the root. */
2775 mfsplay_tree_splay (sp
, key
);
2776 comparison
= ((sp
->root
->key
> key
) ? 1 :
2777 ((sp
->root
->key
< key
) ? -1 : 0));
2778 /* If the successor is at the root, just return it. */
2781 /* Otherwise, find the leftmost element of the right subtree. */
2782 node
= sp
->root
->right
;
2789 /* Call FN, passing it the DATA, for every node in SP, following an
2790 in-order traversal. If FN every returns a non-zero value, the
2791 iteration ceases immediately, and the value is returned.
2792 Otherwise, this function returns 0.
2794 This function simulates recursion using dynamically allocated
2795 arrays, since it may be called from mfsplay_tree_rebalance(), which
2796 in turn means that the tree is already uncomfortably deep for stack
2799 mfsplay_tree_foreach (mfsplay_tree st
, mfsplay_tree_foreach_fn fn
, void *data
)
2801 mfsplay_tree_node
*stack1
;
2805 enum s
{ s_left
, s_here
, s_right
, s_up
};
2807 if (st
->root
== NULL
) /* => num_keys == 0 */
2810 stack1
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * st
->num_keys
);
2811 stack2
= mfsplay_tree_xmalloc (sizeof (char) * st
->num_keys
);
2814 stack1
[sp
] = st
->root
;
2815 stack2
[sp
] = s_left
;
2819 mfsplay_tree_node n
;
2825 /* Handle each of the four possible states separately. */
2827 /* 1: We're here to traverse the left subtree (if any). */
2830 stack2
[sp
] = s_here
;
2831 if (n
->left
!= NULL
)
2834 stack1
[sp
] = n
->left
;
2835 stack2
[sp
] = s_left
;
2839 /* 2: We're here to traverse this node. */
2840 else if (s
== s_here
)
2842 stack2
[sp
] = s_right
;
2843 val
= (*fn
) (n
, data
);
2847 /* 3: We're here to traverse the right subtree (if any). */
2848 else if (s
== s_right
)
2851 if (n
->right
!= NULL
)
2854 stack1
[sp
] = n
->right
;
2855 stack2
[sp
] = s_left
;
2859 /* 4: We're here after both subtrees (if any) have been traversed. */
2862 /* Pop the stack. */
2863 if (sp
== 0) break; /* Popping off the root note: we're finished! */
2871 mfsplay_tree_free (stack1
);
2872 mfsplay_tree_free (stack2
);