1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
44 #define __EXTENSIONS__
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
51 #include <sys/types.h>
55 #ifdef HAVE_EXECINFO_H
65 #include <sys/types.h>
70 #include "mf-runtime.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key
;
78 typedef void *mfsplay_tree_value
;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s
*mfsplay_tree_node
;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn
) (mfsplay_tree_node
, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
91 mfsplay_tree_value value
;
93 mfsplay_tree_node left
;
94 mfsplay_tree_node right
;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
101 /* The root of the tree. */
102 mfsplay_tree_node root
;
104 /* The last key value for which the tree has been splayed, but not
106 mfsplay_tree_key last_splayed_key
;
107 int last_splayed_key_p
;
112 /* Traversal recursion control flags. */
115 unsigned rebalance_p
;
117 typedef struct mfsplay_tree_s
*mfsplay_tree
;
119 static mfsplay_tree
mfsplay_tree_new (void);
120 static mfsplay_tree_node
mfsplay_tree_insert (mfsplay_tree
, mfsplay_tree_key
, mfsplay_tree_value
);
121 static void mfsplay_tree_remove (mfsplay_tree
, mfsplay_tree_key
);
122 static mfsplay_tree_node
mfsplay_tree_lookup (mfsplay_tree
, mfsplay_tree_key
);
123 static mfsplay_tree_node
mfsplay_tree_predecessor (mfsplay_tree
, mfsplay_tree_key
);
124 static mfsplay_tree_node
mfsplay_tree_successor (mfsplay_tree
, mfsplay_tree_key
);
125 static int mfsplay_tree_foreach (mfsplay_tree
, mfsplay_tree_foreach_fn
, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp
);
128 /* ------------------------------------------------------------------------ */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
150 __mf_state = reentrant; \
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
166 struct __mf_cache __mf_lookup_cache
[LOOKUP_CACHE_SIZE_MAX
];
167 uintptr_t __mf_lc_mask
= LOOKUP_CACHE_MASK_DFL
;
168 unsigned char __mf_lc_shift
= LOOKUP_CACHE_SHIFT_DFL
;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171 struct __mf_options __mf_opts
;
173 int __mf_starting_p
= 1;
175 enum __mf_state_enum __mf_state
= active
;
177 /* See __mf_state_perthread() in mf-hooks.c. */
182 pthread_mutex_t __mf_biglock
=
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
186 PTHREAD_MUTEX_INITIALIZER
;
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
197 #define pthread_join NULL
199 const void *threads_active_p
= (void *) pthread_join
;
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
206 static unsigned long __mf_count_check
;
207 static unsigned long __mf_lookup_cache_reusecount
[LOOKUP_CACHE_SIZE_MAX
];
208 static unsigned long __mf_count_register
;
209 static unsigned long __mf_total_register_size
[__MF_TYPE_MAX
+1];
210 static unsigned long __mf_count_unregister
;
211 static unsigned long __mf_total_unregister_size
;
212 static unsigned long __mf_count_violation
[__MF_VIOL_WATCH
+1];
213 static unsigned long __mf_sigusr1_received
;
214 static unsigned long __mf_sigusr1_handled
;
215 /* not static */ unsigned long __mf_reentrancy
;
217 /* not static */ unsigned long __mf_lock_contention
;
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
224 typedef struct __mf_object
226 uintptr_t low
, high
; /* __mf_register parameters */
228 char type
; /* __MF_TYPE_something */
229 char watching_p
; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count
; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count
; /* Likewise for __mf_check/write. */
232 unsigned liveness
; /* A measure of recent checking activity. */
233 unsigned description_epoch
; /* Last epoch __mf_describe_object printed this. */
236 struct timeval alloc_time
;
237 char **alloc_backtrace
;
238 size_t alloc_backtrace_size
;
240 pthread_t alloc_thread
;
244 uintptr_t dealloc_pc
;
245 struct timeval dealloc_time
;
246 char **dealloc_backtrace
;
247 size_t dealloc_backtrace_size
;
249 pthread_t dealloc_thread
;
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head
[__MF_TYPE_MAX_CEM
+1]; /* next empty spot */
258 static __mf_object_t
*__mf_object_cemetary
[__MF_TYPE_MAX_CEM
+1][__MF_PERSIST_MAX
];
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
264 void __mf_init () CTOR
;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
267 __mf_object_t
**objs
, unsigned max_objs
);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
269 __mf_object_t
**objs
, unsigned max_objs
, int type
);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
271 __mf_object_t
**objs
, unsigned max_objs
);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t
*obj
);
274 static unsigned __mf_watch_or_not (void *ptr
, size_t sz
, char flag
);
275 static mfsplay_tree
__mf_object_tree (int type
);
276 static void __mf_link_object (__mf_object_t
*node
);
277 static void __mf_unlink_object (__mf_object_t
*node
);
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
284 __mf_set_default_options ()
286 memset (& __mf_opts
, 0, sizeof (__mf_opts
));
288 __mf_opts
.adapt_cache
= 1000003;
289 __mf_opts
.abbreviate
= 1;
290 __mf_opts
.verbose_violations
= 1;
291 __mf_opts
.free_queue_length
= 4;
292 __mf_opts
.persistent_count
= 100;
293 __mf_opts
.crumple_zone
= 32;
294 __mf_opts
.backtrace
= 4;
295 __mf_opts
.timestamps
= 1;
296 __mf_opts
.mudflap_mode
= mode_check
;
297 __mf_opts
.violation_mode
= viol_nop
;
298 __mf_opts
.heur_std_data
= 1;
300 __mf_opts
.thread_stack
= 0;
319 "mudflaps do nothing",
320 set_option
, (unsigned)mode_nop
, (unsigned *)&__mf_opts
.mudflap_mode
},
322 "mudflaps populate object tree",
323 set_option
, (unsigned)mode_populate
, (unsigned *)&__mf_opts
.mudflap_mode
},
325 "mudflaps check for memory violations",
326 set_option
, (unsigned)mode_check
, (unsigned *)&__mf_opts
.mudflap_mode
},
328 "mudflaps always cause violations (diagnostic)",
329 set_option
, (unsigned)mode_violate
, (unsigned *)&__mf_opts
.mudflap_mode
},
332 "violations do not change program execution",
333 set_option
, (unsigned)viol_nop
, (unsigned *)&__mf_opts
.violation_mode
},
335 "violations cause a call to abort()",
336 set_option
, (unsigned)viol_abort
, (unsigned *)&__mf_opts
.violation_mode
},
338 "violations are promoted to SIGSEGV signals",
339 set_option
, (unsigned)viol_segv
, (unsigned *)&__mf_opts
.violation_mode
},
341 "violations fork a gdb process attached to current program",
342 set_option
, (unsigned)viol_gdb
, (unsigned *)&__mf_opts
.violation_mode
},
344 "trace calls to mudflap runtime library",
345 set_option
, 1, &__mf_opts
.trace_mf_calls
},
347 "trace internal events within mudflap runtime library",
348 set_option
, 1, &__mf_opts
.verbose_trace
},
350 "collect statistics on mudflap's operation",
351 set_option
, 1, &__mf_opts
.collect_stats
},
354 "print report upon SIGUSR1",
355 set_option
, 1, &__mf_opts
.sigusr1_report
},
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option
, 1, &__mf_opts
.internal_checking
},
361 "print any memory leaks at program shutdown",
362 set_option
, 1, &__mf_opts
.print_leaks
},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option
, 1, &__mf_opts
.check_initialization
},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option
, 1, &__mf_opts
.verbose_violations
},
370 "abbreviate repetitive listings",
371 set_option
, 1, &__mf_opts
.abbreviate
},
373 "track object lifetime timestamps",
374 set_option
, 1, &__mf_opts
.timestamps
},
376 "ignore read accesses - assume okay",
377 set_option
, 1, &__mf_opts
.ignore_reads
},
379 "wipe stack objects at unwind",
380 set_option
, 1, &__mf_opts
.wipe_stack
},
382 "wipe heap objects at free",
383 set_option
, 1, &__mf_opts
.wipe_heap
},
385 "support /proc/self/map heuristics",
386 set_option
, 1, &__mf_opts
.heur_proc_map
},
388 "enable a simple upper stack bound heuristic",
389 set_option
, 1, &__mf_opts
.heur_stack_bound
},
391 "support _start.._end heuristics",
392 set_option
, 1, &__mf_opts
.heur_start_end
},
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option
, 1, &__mf_opts
.heur_std_data
},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option
, 0, &__mf_opts
.free_queue_length
},
400 "keep a history of N unregistered regions",
401 read_integer_option
, 0, &__mf_opts
.persistent_count
},
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option
, 0, &__mf_opts
.crumple_zone
},
405 /* XXX: not type-safe.
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option
, 1, &__mf_opts
.adapt_cache
},
417 "keep an N-level stack trace of each call context",
418 read_integer_option
, 0, &__mf_opts
.backtrace
},
421 "override thread stacks allocation: N kB",
422 read_integer_option
, 0, &__mf_opts
.thread_stack
},
424 {0, 0, set_option
, 0, NULL
}
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
436 "The mudflap code can be controlled by an environment variable:\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
445 (threads_active_p
? "multi-threaded " : "single-threaded "),
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
457 for (opt
= options
; opt
->name
; opt
++)
459 int default_p
= (opt
->value
== * opt
->target
);
465 fprintf (stderr
, "-%-23.23s %s", opt
->name
, opt
->description
);
467 fprintf (stderr
, " [active]\n");
469 fprintf (stderr
, "\n");
471 case read_integer_option
:
472 strncpy (buf
, opt
->name
, 128);
473 strncpy (buf
+ strlen (opt
->name
), "=N", 2);
474 fprintf (stderr
, "-%-23.23s %s", buf
, opt
->description
);
475 fprintf (stderr
, " [%d]\n", * opt
->target
);
481 fprintf (stderr
, "\n");
486 __mf_set_options (const char *optstr
)
490 BEGIN_RECURSION_PROTECT ();
491 rc
= __mfu_set_options (optstr
);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
502 __mfu_set_options (const char *optstr
)
504 struct option
*opts
= 0;
508 const char *saved_optstr
= optstr
;
510 /* XXX: bounds-check for optstr! */
527 if (*optstr
== '?' ||
528 strncmp (optstr
, "help", 4) == 0)
530 /* Caller will print help and exit. */
534 if (strncmp (optstr
, "no-", 3) == 0)
537 optstr
= & optstr
[3];
540 for (opts
= options
; opts
->name
; opts
++)
542 if (strncmp (optstr
, opts
->name
, strlen (opts
->name
)) == 0)
544 optstr
+= strlen (opts
->name
);
545 assert (opts
->target
);
552 *(opts
->target
) = opts
->value
;
554 case read_integer_option
:
555 if (! negate
&& (*optstr
== '=' && *(optstr
+1)))
558 tmp
= strtol (optstr
, &nxt
, 10);
559 if ((optstr
!= nxt
) && (tmp
!= LONG_MAX
))
562 *(opts
->target
) = (int)tmp
;
576 "warning: unrecognized string '%s' in mudflap options\n",
578 optstr
+= strlen (optstr
);
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
586 __mf_opts
.free_queue_length
&= (__MF_FREEQ_MAX
- 1);
588 /* Clear the lookup cache, in case the parameters got changed. */
590 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
592 __mf_lookup_cache
[0].low
= MAXPTR
;
594 TRACE ("set options from `%s'\n", saved_optstr
);
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry
*e
)
611 if (e
->pointer
) return;
614 if (e
->version
!= NULL
&& e
->version
[0] != '\0') /* non-null/empty */
615 e
->pointer
= dlvsym (RTLD_NEXT
, e
->name
, e
->version
);
618 e
->pointer
= dlsym (RTLD_NEXT
, e
->name
);
624 fprintf (stderr
, "mf: error in dlsym(\"%s\"): %s\n",
630 fprintf (stderr
, "mf: dlsym(\"%s\") = NULL\n", e
->name
);
637 __mf_resolve_dynamics ()
640 for (i
= 0; i
< dyn_INITRESOLVE
; i
++)
641 __mf_resolve_single_dynamic (& __mf_dynamic
[i
]);
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic
[] =
648 {NULL
, "calloc", NULL
},
649 {NULL
, "free", NULL
},
650 {NULL
, "malloc", NULL
},
651 {NULL
, "mmap", NULL
},
652 {NULL
, "munmap", NULL
},
653 {NULL
, "realloc", NULL
},
654 {NULL
, "DUMMY", NULL
}, /* dyn_INITRESOLVE */
656 {NULL
, "pthread_create", PTHREAD_CREATE_VERSION
},
657 {NULL
, "pthread_join", NULL
},
658 {NULL
, "pthread_exit", NULL
}
666 /* ------------------------------------------------------------------------ */
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
670 __mf_object_tree (int type
)
672 static mfsplay_tree trees
[__MF_TYPE_MAX
+1];
673 assert (type
>= 0 && type
<= __MF_TYPE_MAX
);
674 if (UNLIKELY (trees
[type
] == NULL
))
675 trees
[type
] = mfsplay_tree_new ();
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p
== 0))
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
691 __mf_resolve_dynamics ();
695 __mf_set_default_options ();
697 ov
= getenv ("MUDFLAP_OPTIONS");
700 int rc
= __mfu_set_options (ov
);
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL
);
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
714 REG_RESERVED (__mf_lookup_cache
);
715 REG_RESERVED (__mf_lc_mask
);
716 REG_RESERVED (__mf_lc_shift
);
717 /* XXX: others of our statics? */
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR
, 1, __MF_TYPE_NOACCESS
, "NULL");
721 __mf_lookup_cache
[0].low
= (uintptr_t) -1;
727 __wrap_main (int argc
, char* argv
[])
729 extern char **environ
;
731 extern int __real_main ();
732 static int been_here
= 0;
734 if (__mf_opts
.heur_std_data
&& ! been_here
)
739 __mf_register (argv
, sizeof(char *)*(argc
+1), __MF_TYPE_STATIC
, "argv[]");
740 for (i
=0; i
<argc
; i
++)
742 unsigned j
= strlen (argv
[i
]);
743 __mf_register (argv
[i
], j
+1, __MF_TYPE_STATIC
, "argv element");
748 char *e
= environ
[i
];
750 if (e
== NULL
) break;
751 j
= strlen (environ
[i
]);
752 __mf_register (environ
[i
], j
+1, __MF_TYPE_STATIC
, "environ element");
754 __mf_register (environ
, sizeof(char *)*(i
+1), __MF_TYPE_STATIC
, "environ[]");
756 __mf_register (& errno
, sizeof (errno
), __MF_TYPE_STATIC
, "errno area");
758 __mf_register (stdin
, sizeof (*stdin
), __MF_TYPE_STATIC
, "stdin");
759 __mf_register (stdout
, sizeof (*stdout
), __MF_TYPE_STATIC
, "stdout");
760 __mf_register (stderr
, sizeof (*stderr
), __MF_TYPE_STATIC
, "stderr");
762 /* Make some effort to register ctype.h static arrays. */
763 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
764 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
765 with in mf-hooks2.c. */
769 return main (argc
, argv
, environ
);
771 return __real_main (argc
, argv
, environ
);
777 extern void __mf_fini () DTOR
;
780 TRACE ("__mf_fini\n");
784 /* Since we didn't populate the tree for allocations in constructors
785 before __mf_init, we cannot check destructors after __mf_fini. */
786 __mf_opts
.mudflap_mode
= mode_nop
;
792 /* ------------------------------------------------------------------------ */
795 void __mf_check (void *ptr
, size_t sz
, int type
, const char *location
)
798 BEGIN_RECURSION_PROTECT ();
799 __mfu_check (ptr
, sz
, type
, location
);
800 END_RECURSION_PROTECT ();
805 void __mfu_check (void *ptr
, size_t sz
, int type
, const char *location
)
807 unsigned entry_idx
= __MF_CACHE_INDEX (ptr
);
808 struct __mf_cache
*entry
= & __mf_lookup_cache
[entry_idx
];
809 int judgement
= 0; /* 0=undecided; <0=violation; >0=okay */
810 uintptr_t ptr_low
= (uintptr_t) ptr
;
811 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
812 struct __mf_cache old_entry
= *entry
;
814 if (UNLIKELY (__mf_opts
.sigusr1_report
))
815 __mf_sigusr1_respond ();
817 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
818 ptr
, entry_idx
, (unsigned long)sz
,
819 (type
== 0 ? "read" : "write"), location
);
821 switch (__mf_opts
.mudflap_mode
)
824 /* It is tempting to poison the cache here similarly to
825 mode_populate. However that eliminates a valuable
826 distinction between these two modes. mode_nop is useful to
827 let a user count & trace every single check / registration
828 call. mode_populate is useful to let a program run fast
835 entry
->low
= ptr_low
;
836 entry
->high
= ptr_high
;
842 unsigned heuristics
= 0;
844 /* Advance aging/adaptation counters. */
845 static unsigned adapt_count
;
847 if (UNLIKELY (__mf_opts
.adapt_cache
> 0 &&
848 adapt_count
> __mf_opts
.adapt_cache
))
854 /* Looping only occurs if heuristics were triggered. */
855 while (judgement
== 0)
857 DECLARE (void, free
, void *p
);
858 __mf_object_t
* ovr_obj
[1];
860 __mf_object_t
** all_ovr_obj
= NULL
;
861 __mf_object_t
** dealloc_me
= NULL
;
864 /* Find all overlapping objects. Be optimistic that there is just one. */
865 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, ovr_obj
, 1);
866 if (UNLIKELY (obj_count
> 1))
868 /* Allocate a real buffer and do the search again. */
869 DECLARE (void *, malloc
, size_t c
);
871 all_ovr_obj
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) *
873 if (all_ovr_obj
== NULL
) abort ();
874 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_obj
, obj_count
);
875 assert (n
== obj_count
);
876 dealloc_me
= all_ovr_obj
;
880 all_ovr_obj
= ovr_obj
;
884 /* Update object statistics. */
885 for (i
= 0; i
< obj_count
; i
++)
887 __mf_object_t
*obj
= all_ovr_obj
[i
];
888 assert (obj
!= NULL
);
889 if (type
== __MF_CHECK_READ
)
896 /* Iterate over the various objects. There are a number of special cases. */
897 for (i
= 0; i
< obj_count
; i
++)
899 __mf_object_t
*obj
= all_ovr_obj
[i
];
901 /* Any __MF_TYPE_NOACCESS hit is bad. */
902 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
905 /* Any object with a watch flag is bad. */
906 if (UNLIKELY (obj
->watching_p
))
907 judgement
= -2; /* trigger VIOL_WATCH */
909 /* A read from an uninitialized object is bad. */
910 if (UNLIKELY (__mf_opts
.check_initialization
912 && type
== __MF_CHECK_READ
914 && obj
->write_count
== 0
915 /* uninitialized (heap) */
916 && obj
->type
== __MF_TYPE_HEAP
))
920 /* We now know that the access spans one or more valid objects. */
921 if (LIKELY (judgement
>= 0))
922 for (i
= 0; i
< obj_count
; i
++)
924 __mf_object_t
*obj
= all_ovr_obj
[i
];
926 /* Is this access entirely contained within this object? */
927 if (LIKELY (ptr_low
>= obj
->low
&& ptr_high
<= obj
->high
))
930 entry
->low
= obj
->low
;
931 entry
->high
= obj
->high
;
935 /* XXX: Access runs off left or right side of this
936 object. That could be okay, if there are
937 other objects that fill in all the holes. */
940 if (dealloc_me
!= NULL
)
941 CALL_REAL (free
, dealloc_me
);
943 /* If the judgment is still unknown at this stage, loop
944 around at most one more time. */
947 if (heuristics
++ < 2) /* XXX parametrize this number? */
948 judgement
= __mf_heuristic_check (ptr_low
, ptr_high
);
962 if (__mf_opts
.collect_stats
)
966 if (LIKELY (old_entry
.low
!= entry
->low
|| old_entry
.high
!= entry
->high
))
967 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
968 __mf_lookup_cache_reusecount
[entry_idx
] ++;
971 if (UNLIKELY (judgement
< 0))
972 __mf_violation (ptr
, sz
,
973 (uintptr_t) __builtin_return_address (0), location
,
975 (type
== __MF_CHECK_READ
? __MF_VIOL_READ
: __MF_VIOL_WRITE
) :
980 static __mf_object_t
*
981 __mf_insert_new_object (uintptr_t low
, uintptr_t high
, int type
,
982 const char *name
, uintptr_t pc
)
984 DECLARE (void *, calloc
, size_t c
, size_t n
);
986 __mf_object_t
*new_obj
;
987 new_obj
= CALL_REAL (calloc
, 1, sizeof(__mf_object_t
));
989 new_obj
->high
= high
;
990 new_obj
->type
= type
;
991 new_obj
->name
= name
;
992 new_obj
->alloc_pc
= pc
;
993 #if HAVE_GETTIMEOFDAY
994 if (__mf_opts
.timestamps
)
995 gettimeofday (& new_obj
->alloc_time
, NULL
);
998 new_obj
->alloc_thread
= pthread_self ();
1001 if (__mf_opts
.backtrace
> 0 && (type
== __MF_TYPE_HEAP
|| type
== __MF_TYPE_HEAP_I
))
1002 new_obj
->alloc_backtrace_size
=
1003 __mf_backtrace (& new_obj
->alloc_backtrace
,
1006 __mf_link_object (new_obj
);
1012 __mf_uncache_object (__mf_object_t
*old_obj
)
1014 /* Remove any low/high pointers for this object from the lookup cache. */
1016 /* Can it possibly exist in the cache? */
1017 if (LIKELY (old_obj
->read_count
+ old_obj
->write_count
))
1019 uintptr_t low
= old_obj
->low
;
1020 uintptr_t high
= old_obj
->high
;
1021 unsigned idx_low
= __MF_CACHE_INDEX (low
);
1022 unsigned idx_high
= __MF_CACHE_INDEX (high
);
1024 for (i
= idx_low
; i
<= idx_high
; i
++)
1026 struct __mf_cache
*entry
= & __mf_lookup_cache
[i
];
1027 /* NB: the "||" in the following test permits this code to
1028 tolerate the situation introduced by __mf_check over
1029 contiguous objects, where a cache entry spans several
1031 if (entry
->low
== low
|| entry
->high
== high
)
1033 entry
->low
= MAXPTR
;
1034 entry
->high
= MINPTR
;
1042 __mf_register (void *ptr
, size_t sz
, int type
, const char *name
)
1045 BEGIN_RECURSION_PROTECT ();
1046 __mfu_register (ptr
, sz
, type
, name
);
1047 END_RECURSION_PROTECT ();
1053 __mfu_register (void *ptr
, size_t sz
, int type
, const char *name
)
1055 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1056 ptr
, (unsigned long) sz
, type
, name
? name
: "");
1058 if (__mf_opts
.collect_stats
)
1060 __mf_count_register
++;
1061 __mf_total_register_size
[(type
< 0) ? 0 :
1062 (type
> __MF_TYPE_MAX
) ? 0 :
1066 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1067 __mf_sigusr1_respond ();
1069 switch (__mf_opts
.mudflap_mode
)
1075 __mf_violation (ptr
, sz
, (uintptr_t) __builtin_return_address (0), NULL
,
1076 __MF_VIOL_REGISTER
);
1080 /* Clear the cache. */
1081 /* XXX: why the entire cache? */
1083 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1085 __mf_lookup_cache
[0].low
= MAXPTR
;
1090 __mf_object_t
*ovr_objs
[1];
1091 unsigned num_overlapping_objs
;
1092 uintptr_t low
= (uintptr_t) ptr
;
1093 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1094 uintptr_t pc
= (uintptr_t) __builtin_return_address (0);
1096 /* Treat unknown size indication as 1. */
1097 if (UNLIKELY (sz
== 0)) sz
= 1;
1099 /* Look for objects only of the same type. This will e.g. permit a registration
1100 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1101 __mf_check time however harmful overlaps will be detected. */
1102 num_overlapping_objs
= __mf_find_objects2 (low
, high
, ovr_objs
, 1, type
);
1104 /* Handle overlaps. */
1105 if (UNLIKELY (num_overlapping_objs
> 0))
1107 __mf_object_t
*ovr_obj
= ovr_objs
[0];
1109 /* Accept certain specific duplication pairs. */
1110 if (((type
== __MF_TYPE_STATIC
) || (type
== __MF_TYPE_GUESS
))
1111 && ovr_obj
->low
== low
1112 && ovr_obj
->high
== high
1113 && ovr_obj
->type
== type
)
1115 /* Duplicate registration for static objects may come
1116 from distinct compilation units. */
1117 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1118 (void *) low
, (void *) high
,
1119 (ovr_obj
->name
? ovr_obj
->name
: ""));
1123 /* Alas, a genuine violation. */
1126 /* Two or more *real* mappings here. */
1127 __mf_violation ((void *) ptr
, sz
,
1128 (uintptr_t) __builtin_return_address (0), NULL
,
1129 __MF_VIOL_REGISTER
);
1132 else /* No overlapping objects: AOK. */
1133 __mf_insert_new_object (low
, high
, type
, name
, pc
);
1135 /* We could conceivably call __mf_check() here to prime the cache,
1136 but then the read_count/write_count field is not reliable. */
1139 } /* end switch (__mf_opts.mudflap_mode) */
1144 __mf_unregister (void *ptr
, size_t sz
, int type
)
1147 BEGIN_RECURSION_PROTECT ();
1148 __mfu_unregister (ptr
, sz
, type
);
1149 END_RECURSION_PROTECT ();
1155 __mfu_unregister (void *ptr
, size_t sz
, int type
)
1157 DECLARE (void, free
, void *ptr
);
1159 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1160 __mf_sigusr1_respond ();
1162 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr
, (unsigned long) sz
, type
);
1164 switch (__mf_opts
.mudflap_mode
)
1170 __mf_violation (ptr
, sz
,
1171 (uintptr_t) __builtin_return_address (0), NULL
,
1172 __MF_VIOL_UNREGISTER
);
1176 /* Clear the cache. */
1178 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1180 __mf_lookup_cache
[0].low
= MAXPTR
;
1185 __mf_object_t
*old_obj
= NULL
;
1186 __mf_object_t
*del_obj
= NULL
; /* Object to actually delete. */
1187 __mf_object_t
*objs
[1] = {NULL
};
1188 unsigned num_overlapping_objs
;
1190 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1191 CLAMPSZ (ptr
, sz
), objs
, 1, type
);
1193 /* Special case for HEAP_I - see free & realloc hook. They don't
1194 know whether the input region was HEAP or HEAP_I before
1195 unmapping it. Here we give HEAP a try in case HEAP_I
1197 if ((type
== __MF_TYPE_HEAP_I
) && (num_overlapping_objs
== 0))
1199 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1200 CLAMPSZ (ptr
, sz
), objs
, 1, __MF_TYPE_HEAP
);
1204 if (UNLIKELY ((num_overlapping_objs
!= 1) /* more than one overlap */
1205 || ((sz
== 0) ? 0 : (sz
!= (old_obj
->high
- old_obj
->low
+ 1))) /* size mismatch */
1206 || ((uintptr_t) ptr
!= old_obj
->low
))) /* base mismatch */
1208 __mf_violation (ptr
, sz
,
1209 (uintptr_t) __builtin_return_address (0), NULL
,
1210 __MF_VIOL_UNREGISTER
);
1214 __mf_unlink_object (old_obj
);
1215 __mf_uncache_object (old_obj
);
1217 /* Wipe buffer contents if desired. */
1218 if ((__mf_opts
.wipe_stack
&& old_obj
->type
== __MF_TYPE_STACK
)
1219 || (__mf_opts
.wipe_heap
&& (old_obj
->type
== __MF_TYPE_HEAP
1220 || old_obj
->type
== __MF_TYPE_HEAP_I
)))
1222 memset ((void *) old_obj
->low
,
1224 (size_t) (old_obj
->high
- old_obj
->low
+ 1));
1227 /* Manage the object cemetary. */
1228 if (__mf_opts
.persistent_count
> 0 &&
1229 old_obj
->type
>= 0 &&
1230 old_obj
->type
<= __MF_TYPE_MAX_CEM
)
1232 old_obj
->deallocated_p
= 1;
1233 old_obj
->dealloc_pc
= (uintptr_t) __builtin_return_address (0);
1234 #if HAVE_GETTIMEOFDAY
1235 if (__mf_opts
.timestamps
)
1236 gettimeofday (& old_obj
->dealloc_time
, NULL
);
1239 old_obj
->dealloc_thread
= pthread_self ();
1242 if (__mf_opts
.backtrace
> 0 && old_obj
->type
== __MF_TYPE_HEAP
)
1243 old_obj
->dealloc_backtrace_size
=
1244 __mf_backtrace (& old_obj
->dealloc_backtrace
,
1247 /* Encourage this object to be displayed again in current epoch. */
1248 old_obj
->description_epoch
--;
1250 /* Put this object into the cemetary. This may require this plot to
1251 be recycled, and the previous resident to be designated del_obj. */
1253 unsigned row
= old_obj
->type
;
1254 unsigned plot
= __mf_object_dead_head
[row
];
1256 del_obj
= __mf_object_cemetary
[row
][plot
];
1257 __mf_object_cemetary
[row
][plot
] = old_obj
;
1259 if (plot
== __mf_opts
.persistent_count
) plot
= 0;
1260 __mf_object_dead_head
[row
] = plot
;
1266 if (__mf_opts
.print_leaks
)
1268 if ((old_obj
->read_count
+ old_obj
->write_count
) == 0 &&
1269 (old_obj
->type
== __MF_TYPE_HEAP
1270 || old_obj
->type
== __MF_TYPE_HEAP_I
))
1274 "mudflap warning: unaccessed registered object:\n");
1275 __mf_describe_object (old_obj
);
1279 if (del_obj
!= NULL
) /* May or may not equal old_obj. */
1281 if (__mf_opts
.backtrace
> 0)
1283 CALL_REAL(free
, del_obj
->alloc_backtrace
);
1284 if (__mf_opts
.persistent_count
> 0)
1286 CALL_REAL(free
, del_obj
->dealloc_backtrace
);
1289 CALL_REAL(free
, del_obj
);
1294 } /* end switch (__mf_opts.mudflap_mode) */
1297 if (__mf_opts
.collect_stats
)
1299 __mf_count_unregister
++;
1300 __mf_total_unregister_size
+= sz
;
1309 unsigned long total_size
;
1310 unsigned live_obj_count
;
1311 double total_weight
;
1312 double weighted_size
;
1313 unsigned long weighted_address_bits
[sizeof (uintptr_t) * 8][2];
1319 __mf_adapt_cache_fn (mfsplay_tree_node n
, void *param
)
1321 __mf_object_t
*obj
= (__mf_object_t
*) n
->value
;
1322 struct tree_stats
*s
= (struct tree_stats
*) param
;
1324 assert (obj
!= NULL
&& s
!= NULL
);
1326 /* Exclude never-accessed objects. */
1327 if (obj
->read_count
+ obj
->write_count
)
1330 s
->total_size
+= (obj
->high
- obj
->low
+ 1);
1337 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1338 (void *) obj->low, obj->liveness, obj->name); */
1340 s
->live_obj_count
++;
1341 s
->total_weight
+= (double) obj
->liveness
;
1343 (double) (obj
->high
- obj
->low
+ 1) *
1344 (double) obj
->liveness
;
1347 for (i
=0; i
<sizeof(uintptr_t) * 8; i
++)
1349 unsigned bit
= addr
& 1;
1350 s
->weighted_address_bits
[i
][bit
] += obj
->liveness
;
1354 /* Age the liveness value. */
1355 obj
->liveness
>>= 1;
1366 struct tree_stats s
;
1367 uintptr_t new_mask
= 0;
1368 unsigned char new_shift
;
1369 float cache_utilization
;
1371 static float smoothed_new_shift
= -1.0;
1374 memset (&s
, 0, sizeof (s
));
1376 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
), __mf_adapt_cache_fn
, (void *) & s
);
1377 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
), __mf_adapt_cache_fn
, (void *) & s
);
1378 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK
), __mf_adapt_cache_fn
, (void *) & s
);
1379 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC
), __mf_adapt_cache_fn
, (void *) & s
);
1380 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS
), __mf_adapt_cache_fn
, (void *) & s
);
1382 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1383 empty tree. Just leave the cache alone in such cases, rather
1384 than risk dying by division-by-zero. */
1385 if (! (s
.obj_count
> 0) && (s
.live_obj_count
> 0) && (s
.total_weight
> 0.0))
1388 /* Guess a good value for the shift parameter by finding an address bit that is a
1389 good discriminant of lively objects. */
1391 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1393 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1394 if (max_value
< value
) max_value
= value
;
1396 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1398 float shoulder_factor
= 0.7; /* Include slightly less popular bits too. */
1399 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1400 if (value
>= max_value
* shoulder_factor
)
1403 if (smoothed_new_shift
< 0) smoothed_new_shift
= __mf_lc_shift
;
1404 /* Converge toward this slowly to reduce flapping. */
1405 smoothed_new_shift
= 0.9*smoothed_new_shift
+ 0.1*i
;
1406 new_shift
= (unsigned) (smoothed_new_shift
+ 0.5);
1407 assert (new_shift
< sizeof (uintptr_t)*8);
1409 /* Count number of used buckets. */
1410 cache_utilization
= 0.0;
1411 for (i
= 0; i
< (1 + __mf_lc_mask
); i
++)
1412 if (__mf_lookup_cache
[i
].low
!= 0 || __mf_lookup_cache
[i
].high
!= 0)
1413 cache_utilization
+= 1.0;
1414 cache_utilization
/= (1 + __mf_lc_mask
);
1416 new_mask
|= 0x3ff; /* XXX: force a large cache. */
1417 new_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
1419 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1420 "util=%u%% m=%p s=%u\n",
1421 s
.obj_count
, s
.live_obj_count
, s
.total_size
, s
.total_weight
, s
.weighted_size
,
1422 (unsigned)(cache_utilization
*100.0), (void *) new_mask
, new_shift
);
1424 /* We should reinitialize cache if its parameters have changed. */
1425 if (new_mask
!= __mf_lc_mask
||
1426 new_shift
!= __mf_lc_shift
)
1428 __mf_lc_mask
= new_mask
;
1429 __mf_lc_shift
= new_shift
;
1431 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1433 __mf_lookup_cache
[0].low
= MAXPTR
;
1439 /* __mf_find_object[s] */
1441 /* Find overlapping live objecs between [low,high]. Return up to
1442 max_objs of their pointers in objs[]. Return total count of
1443 overlaps (may exceed max_objs). */
1446 __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
1447 __mf_object_t
**objs
, unsigned max_objs
, int type
)
1450 mfsplay_tree t
= __mf_object_tree (type
);
1451 mfsplay_tree_key k
= (mfsplay_tree_key
) ptr_low
;
1454 mfsplay_tree_node n
= mfsplay_tree_lookup (t
, k
);
1455 /* An exact match for base address implies a hit. */
1458 if (count
< max_objs
)
1459 objs
[count
] = (__mf_object_t
*) n
->value
;
1463 /* Iterate left then right near this key value to find all overlapping objects. */
1464 for (direction
= 0; direction
< 2; direction
++)
1466 /* Reset search origin. */
1467 k
= (mfsplay_tree_key
) ptr_low
;
1473 n
= (direction
== 0 ? mfsplay_tree_successor (t
, k
) : mfsplay_tree_predecessor (t
, k
));
1474 if (n
== NULL
) break;
1475 obj
= (__mf_object_t
*) n
->value
;
1477 if (! (obj
->low
<= ptr_high
&& obj
->high
>= ptr_low
)) /* No overlap? */
1480 if (count
< max_objs
)
1481 objs
[count
] = (__mf_object_t
*) n
->value
;
1484 k
= (mfsplay_tree_key
) obj
->low
;
1493 __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
1494 __mf_object_t
**objs
, unsigned max_objs
)
1499 /* Search each splay tree for overlaps. */
1500 for (type
= __MF_TYPE_NOACCESS
; type
<= __MF_TYPE_GUESS
; type
++)
1502 unsigned c
= __mf_find_objects2 (ptr_low
, ptr_high
, objs
, max_objs
, type
);
1508 else /* NB: C may equal 0 */
1521 /* __mf_link_object */
1524 __mf_link_object (__mf_object_t
*node
)
1526 mfsplay_tree t
= __mf_object_tree (node
->type
);
1527 mfsplay_tree_insert (t
, (mfsplay_tree_key
) node
->low
, (mfsplay_tree_value
) node
);
1530 /* __mf_unlink_object */
1533 __mf_unlink_object (__mf_object_t
*node
)
1535 mfsplay_tree t
= __mf_object_tree (node
->type
);
1536 mfsplay_tree_remove (t
, (mfsplay_tree_key
) node
->low
);
1539 /* __mf_find_dead_objects */
1541 /* Find overlapping dead objecs between [low,high]. Return up to
1542 max_objs of their pointers in objs[]. Return total count of
1543 overlaps (may exceed max_objs). */
1546 __mf_find_dead_objects (uintptr_t low
, uintptr_t high
,
1547 __mf_object_t
**objs
, unsigned max_objs
)
1549 if (__mf_opts
.persistent_count
> 0)
1552 unsigned recollection
= 0;
1555 assert (low
<= high
);
1556 assert (max_objs
== 0 || objs
!= NULL
);
1558 /* Widen the search from the most recent plots in each row, looking
1559 backward in time. */
1561 while (recollection
< __mf_opts
.persistent_count
)
1565 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1570 plot
= __mf_object_dead_head
[row
];
1571 for (i
= 0; i
<= recollection
; i
++)
1575 /* Look backward through row: it's a circular buffer. */
1576 if (plot
> 0) plot
--;
1577 else plot
= __mf_opts
.persistent_count
- 1;
1579 obj
= __mf_object_cemetary
[row
][plot
];
1580 if (obj
&& obj
->low
<= high
&& obj
->high
>= low
)
1582 /* Found an overlapping dead object! */
1583 if (count
< max_objs
)
1593 /* Look farther back in time. */
1594 recollection
= (recollection
* 2) + 1;
1603 /* __mf_describe_object */
1606 __mf_describe_object (__mf_object_t
*obj
)
1608 static unsigned epoch
= 0;
1615 if (__mf_opts
.abbreviate
&& obj
->description_epoch
== epoch
)
1618 "mudflap %sobject %p: name=`%s'\n",
1619 (obj
->deallocated_p
? "dead " : ""),
1620 (void *) obj
, (obj
->name
? obj
->name
: ""));
1624 obj
->description_epoch
= epoch
;
1627 "mudflap %sobject %p: name=`%s'\n"
1628 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1629 "alloc time=%lu.%06lu pc=%p"
1634 (obj
->deallocated_p
? "dead " : ""),
1635 (void *) obj
, (obj
->name
? obj
->name
: ""),
1636 (void *) obj
->low
, (void *) obj
->high
,
1637 (unsigned long) (obj
->high
- obj
->low
+ 1),
1638 (obj
->type
== __MF_TYPE_NOACCESS
? "no-access" :
1639 obj
->type
== __MF_TYPE_HEAP
? "heap" :
1640 obj
->type
== __MF_TYPE_HEAP_I
? "heap-init" :
1641 obj
->type
== __MF_TYPE_STACK
? "stack" :
1642 obj
->type
== __MF_TYPE_STATIC
? "static" :
1643 obj
->type
== __MF_TYPE_GUESS
? "guess" :
1645 obj
->read_count
, obj
->write_count
, obj
->liveness
,
1646 obj
->watching_p
? " watching" : "",
1647 obj
->alloc_time
.tv_sec
, obj
->alloc_time
.tv_usec
,
1648 (void *) obj
->alloc_pc
1650 , (unsigned) obj
->alloc_thread
1654 if (__mf_opts
.backtrace
> 0)
1657 for (i
=0; i
<obj
->alloc_backtrace_size
; i
++)
1658 fprintf (stderr
, " %s\n", obj
->alloc_backtrace
[i
]);
1661 if (__mf_opts
.persistent_count
> 0)
1663 if (obj
->deallocated_p
)
1665 fprintf (stderr
, "dealloc time=%lu.%06lu pc=%p"
1670 obj
->dealloc_time
.tv_sec
, obj
->dealloc_time
.tv_usec
,
1671 (void *) obj
->dealloc_pc
1673 , (unsigned) obj
->dealloc_thread
1678 if (__mf_opts
.backtrace
> 0)
1681 for (i
=0; i
<obj
->dealloc_backtrace_size
; i
++)
1682 fprintf (stderr
, " %s\n", obj
->dealloc_backtrace
[i
]);
1690 __mf_report_leaks_fn (mfsplay_tree_node n
, void *param
)
1692 __mf_object_t
*node
= (__mf_object_t
*) n
->value
;
1693 unsigned *count
= (unsigned *) param
;
1698 fprintf (stderr
, "Leaked object %u:\n", (*count
));
1699 __mf_describe_object (node
);
1706 __mf_report_leaks ()
1710 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
),
1711 __mf_report_leaks_fn
, & count
);
1712 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
),
1713 __mf_report_leaks_fn
, & count
);
1718 /* ------------------------------------------------------------------------ */
1725 BEGIN_RECURSION_PROTECT ();
1727 END_RECURSION_PROTECT ();
1734 if (__mf_opts
.collect_stats
)
1739 "calls to __mf_check: %lu\n"
1740 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1741 " __mf_unregister: %lu [%luB]\n"
1742 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1744 __mf_count_register
,
1745 __mf_total_register_size
[0], __mf_total_register_size
[1],
1746 __mf_total_register_size
[2], __mf_total_register_size
[3],
1747 __mf_total_register_size
[4], /* XXX */
1748 __mf_count_unregister
, __mf_total_unregister_size
,
1749 __mf_count_violation
[0], __mf_count_violation
[1],
1750 __mf_count_violation
[2], __mf_count_violation
[3],
1751 __mf_count_violation
[4]);
1754 "calls with reentrancy: %lu\n", __mf_reentrancy
);
1757 " lock contention: %lu\n", __mf_lock_contention
);
1760 /* Lookup cache stats. */
1763 unsigned max_reuse
= 0;
1764 unsigned num_used
= 0;
1765 unsigned num_unused
= 0;
1767 for (i
= 0; i
< LOOKUP_CACHE_SIZE
; i
++)
1769 if (__mf_lookup_cache_reusecount
[i
])
1773 if (max_reuse
< __mf_lookup_cache_reusecount
[i
])
1774 max_reuse
= __mf_lookup_cache_reusecount
[i
];
1776 fprintf (stderr
, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1777 num_used
, num_unused
, max_reuse
);
1781 unsigned live_count
;
1782 live_count
= __mf_find_objects (MINPTR
, MAXPTR
, NULL
, 0);
1783 fprintf (stderr
, "number of live objects: %u\n", live_count
);
1786 if (__mf_opts
.persistent_count
> 0)
1788 unsigned dead_count
= 0;
1790 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1791 for (plot
= 0 ; plot
< __mf_opts
.persistent_count
; plot
++)
1792 if (__mf_object_cemetary
[row
][plot
] != 0)
1794 fprintf (stderr
, " zombie objects: %u\n", dead_count
);
1797 if (__mf_opts
.print_leaks
&& (__mf_opts
.mudflap_mode
== mode_check
))
1800 extern void * __mf_wrap_alloca_indirect (size_t c
);
1802 /* Free up any remaining alloca()'d blocks. */
1803 __mf_wrap_alloca_indirect (0);
1804 __mf_describe_object (NULL
); /* Reset description epoch. */
1805 l
= __mf_report_leaks ();
1806 fprintf (stderr
, "number of leaked objects: %u\n", l
);
1810 /* __mf_backtrace */
1813 __mf_backtrace (char ***symbols
, void *guess_pc
, unsigned guess_omit_levels
)
1816 unsigned pc_array_size
= __mf_opts
.backtrace
+ guess_omit_levels
;
1817 unsigned remaining_size
;
1818 unsigned omitted_size
= 0;
1820 DECLARE (void, free
, void *ptr
);
1821 DECLARE (void *, calloc
, size_t c
, size_t n
);
1822 DECLARE (void *, malloc
, size_t n
);
1824 pc_array
= CALL_REAL (calloc
, pc_array_size
, sizeof (void *) );
1825 #ifdef HAVE_BACKTRACE
1826 pc_array_size
= backtrace (pc_array
, pc_array_size
);
1828 #define FETCH(n) do { if (pc_array_size >= n) { \
1829 pc_array[n] = __builtin_return_address(n); \
1830 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1832 /* Unroll some calls __builtin_return_address because this function
1833 only takes a literal integer parameter. */
1836 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1837 rather than simply returning 0. :-( */
1846 if (pc_array_size
> 8) pc_array_size
= 9;
1848 if (pc_array_size
> 0) pc_array_size
= 1;
1854 /* We want to trim the first few levels of the stack traceback,
1855 since they contain libmudflap wrappers and junk. If pc_array[]
1856 ends up containing a non-NULL guess_pc, then trim everything
1857 before that. Otherwise, omit the first guess_omit_levels
1860 if (guess_pc
!= NULL
)
1861 for (i
=0; i
<pc_array_size
; i
++)
1862 if (pc_array
[i
] == guess_pc
)
1865 if (omitted_size
== 0) /* No match? */
1866 if (pc_array_size
> guess_omit_levels
)
1867 omitted_size
= guess_omit_levels
;
1869 remaining_size
= pc_array_size
- omitted_size
;
1871 #ifdef HAVE_BACKTRACE_SYMBOLS
1872 *symbols
= backtrace_symbols (pc_array
+ omitted_size
, remaining_size
);
1875 /* Let's construct a buffer by hand. It will have <remaining_size>
1876 char*'s at the front, pointing at individual strings immediately
1881 enum { perline
= 30 };
1882 buffer
= CALL_REAL (malloc
, remaining_size
* (perline
+ sizeof(char *)));
1883 pointers
= (char **) buffer
;
1884 chars
= (char *)buffer
+ (remaining_size
* sizeof (char *));
1885 for (i
= 0; i
< remaining_size
; i
++)
1887 pointers
[i
] = chars
;
1888 sprintf (chars
, "[0x%p]", pc_array
[omitted_size
+ i
]);
1889 chars
= chars
+ perline
;
1891 *symbols
= pointers
;
1894 CALL_REAL (free
, pc_array
);
1896 return remaining_size
;
1899 /* ------------------------------------------------------------------------ */
1900 /* __mf_violation */
1903 __mf_violation (void *ptr
, size_t sz
, uintptr_t pc
,
1904 const char *location
, int type
)
1907 static unsigned violation_number
;
1908 DECLARE(void, free
, void *ptr
);
1910 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1912 (location
!= NULL
? location
: ""), type
, ptr
, (unsigned long) sz
);
1914 if (__mf_opts
.collect_stats
)
1915 __mf_count_violation
[(type
< 0) ? 0 :
1916 (type
> __MF_VIOL_WATCH
) ? 0 :
1919 /* Print out a basic warning message. */
1920 if (__mf_opts
.verbose_violations
)
1923 unsigned num_helpful
= 0;
1924 struct timeval now
= { 0, 0 };
1925 #if HAVE_GETTIMEOFDAY
1926 gettimeofday (& now
, NULL
);
1929 violation_number
++;
1932 "mudflap violation %u (%s): time=%lu.%06lu "
1933 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1935 ((type
== __MF_VIOL_READ
) ? "check/read" :
1936 (type
== __MF_VIOL_WRITE
) ? "check/write" :
1937 (type
== __MF_VIOL_REGISTER
) ? "register" :
1938 (type
== __MF_VIOL_UNREGISTER
) ? "unregister" :
1939 (type
== __MF_VIOL_WATCH
) ? "watch" : "unknown"),
1940 now
.tv_sec
, now
.tv_usec
,
1941 (void *) ptr
, (unsigned long)sz
, (void *) pc
,
1942 (location
!= NULL
? " location=`" : ""),
1943 (location
!= NULL
? location
: ""),
1944 (location
!= NULL
? "'" : ""));
1946 if (__mf_opts
.backtrace
> 0)
1951 num
= __mf_backtrace (& symbols
, (void *) pc
, 2);
1952 /* Note: backtrace_symbols calls malloc(). But since we're in
1953 __mf_violation and presumably __mf_check, it'll detect
1954 recursion, and not put the new string into the database. */
1956 for (i
=0; i
<num
; i
++)
1957 fprintf (stderr
, " %s\n", symbols
[i
]);
1959 /* Calling free() here would trigger a violation. */
1960 CALL_REAL(free
, symbols
);
1964 /* Look for nearby objects. For this, we start with s_low/s_high
1965 pointing to the given area, looking for overlapping objects.
1966 If none show up, widen the search area and keep looking. */
1968 if (sz
== 0) sz
= 1;
1970 for (dead_p
= 0; dead_p
<= 1; dead_p
++) /* for dead_p in 0 1 */
1972 enum {max_objs
= 3}; /* magic */
1973 __mf_object_t
*objs
[max_objs
];
1974 unsigned num_objs
= 0;
1975 uintptr_t s_low
, s_high
;
1979 s_low
= (uintptr_t) ptr
;
1980 s_high
= CLAMPSZ (ptr
, sz
);
1982 while (tries
< 16) /* magic */
1985 num_objs
= __mf_find_dead_objects (s_low
, s_high
, objs
, max_objs
);
1987 num_objs
= __mf_find_objects (s_low
, s_high
, objs
, max_objs
);
1989 if (num_objs
) /* good enough */
1994 /* XXX: tune this search strategy. It's too dependent on
1995 sz, which can vary from 1 to very big (when array index
1996 checking) numbers. */
1997 s_low
= CLAMPSUB (s_low
, (sz
* tries
* tries
));
1998 s_high
= CLAMPADD (s_high
, (sz
* tries
* tries
));
2001 for (i
= 0; i
< min (num_objs
, max_objs
); i
++)
2003 __mf_object_t
*obj
= objs
[i
];
2004 uintptr_t low
= (uintptr_t) ptr
;
2005 uintptr_t high
= CLAMPSZ (ptr
, sz
);
2006 unsigned before1
= (low
< obj
->low
) ? obj
->low
- low
: 0;
2007 unsigned after1
= (low
> obj
->high
) ? low
- obj
->high
: 0;
2008 unsigned into1
= (high
>= obj
->low
&& low
<= obj
->high
) ? low
- obj
->low
: 0;
2009 unsigned before2
= (high
< obj
->low
) ? obj
->low
- high
: 0;
2010 unsigned after2
= (high
> obj
->high
) ? high
- obj
->high
: 0;
2011 unsigned into2
= (high
>= obj
->low
&& low
<= obj
->high
) ? high
- obj
->low
: 0;
2013 fprintf (stderr
, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2014 num_helpful
+ i
+ 1,
2015 (before1
? before1
: after1
? after1
: into1
),
2016 (before1
? "before" : after1
? "after" : "into"),
2017 (before2
? before2
: after2
? after2
: into2
),
2018 (before2
? "before" : after2
? "after" : "into"));
2019 __mf_describe_object (obj
);
2021 num_helpful
+= num_objs
;
2024 fprintf (stderr
, "number of nearby objects: %u\n", num_helpful
);
2027 /* How to finally handle this violation? */
2028 switch (__mf_opts
.violation_mode
)
2033 kill (getpid(), SIGSEGV
);
2040 snprintf (buf
, 128, "gdb --pid=%u", (unsigned) getpid ());
2042 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2043 instead, and let the forked child execlp() gdb. That way, this
2044 subject process can be resumed under the supervision of gdb.
2045 This can't happen now, since system() only returns when gdb
2046 dies. In that case, we need to beware of starting a second
2047 concurrent gdb child upon the next violation. (But if the first
2048 gdb dies, then starting a new one is appropriate.) */
2053 /* ------------------------------------------------------------------------ */
2056 unsigned __mf_watch (void *ptr
, size_t sz
)
2060 BEGIN_RECURSION_PROTECT ();
2061 rc
= __mf_watch_or_not (ptr
, sz
, 1);
2062 END_RECURSION_PROTECT ();
2067 unsigned __mf_unwatch (void *ptr
, size_t sz
)
2071 rc
= __mf_watch_or_not (ptr
, sz
, 0);
2078 __mf_watch_or_not (void *ptr
, size_t sz
, char flag
)
2080 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
2081 uintptr_t ptr_low
= (uintptr_t) ptr
;
2084 TRACE ("%s ptr=%p size=%lu\n",
2085 (flag
? "watch" : "unwatch"), ptr
, (unsigned long) sz
);
2087 switch (__mf_opts
.mudflap_mode
)
2097 __mf_object_t
**all_ovr_objs
;
2100 DECLARE (void *, malloc
, size_t c
);
2101 DECLARE (void, free
, void *p
);
2103 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, NULL
, 0);
2104 VERBOSE_TRACE (" %u:", obj_count
);
2106 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) * obj_count
));
2107 if (all_ovr_objs
== NULL
) abort ();
2108 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
2109 assert (n
== obj_count
);
2111 for (n
= 0; n
< obj_count
; n
++)
2113 __mf_object_t
*obj
= all_ovr_objs
[n
];
2115 VERBOSE_TRACE (" [%p]", (void *) obj
);
2116 if (obj
->watching_p
!= flag
)
2118 obj
->watching_p
= flag
;
2121 /* Remove object from cache, to ensure next access
2122 goes through __mf_check(). */
2124 __mf_uncache_object (obj
);
2127 CALL_REAL (free
, all_ovr_objs
);
2137 __mf_sigusr1_handler (int num
)
2139 __mf_sigusr1_received
++;
2142 /* Install or remove SIGUSR1 handler as necessary.
2143 Also, respond to a received pending SIGUSR1. */
2145 __mf_sigusr1_respond ()
2147 static int handler_installed
;
2150 /* Manage handler */
2151 if (__mf_opts
.sigusr1_report
&& ! handler_installed
)
2153 signal (SIGUSR1
, __mf_sigusr1_handler
);
2154 handler_installed
= 1;
2156 else if(! __mf_opts
.sigusr1_report
&& handler_installed
)
2158 signal (SIGUSR1
, SIG_DFL
);
2159 handler_installed
= 0;
2163 /* Manage enqueued signals */
2164 if (__mf_sigusr1_received
> __mf_sigusr1_handled
)
2166 __mf_sigusr1_handled
++;
2167 assert (__mf_state
== reentrant
);
2169 handler_installed
= 0; /* We may need to re-enable signal; this might be a SysV library. */
2174 /* XXX: provide an alternative __assert_fail function that cannot
2175 fail due to libmudflap infinite recursion. */
2179 write_itoa (int fd
, unsigned n
)
2181 enum x
{ bufsize
= sizeof(n
)*4 };
2185 for (i
=0; i
<bufsize
-1; i
++)
2187 unsigned digit
= n
% 10;
2188 buf
[bufsize
-2-i
] = digit
+ '0';
2192 char *m
= & buf
[bufsize
-2-i
];
2193 buf
[bufsize
-1] = '\0';
2194 write (fd
, m
, strlen(m
));
2202 __assert_fail (const char *msg
, const char *file
, unsigned line
, const char *func
)
2204 #define write2(string) write (2, (string), strlen ((string)));
2208 write_itoa (2, (unsigned) pthread_self ());
2211 write2(": assertion failure: `");
2212 write (2, msg
, strlen (msg
));
2214 write (2, func
, strlen (func
));
2216 write (2, file
, strlen (file
));
2218 write_itoa (2, line
);
2229 /* Adapted splay tree code, originally from libiberty. It has been
2230 specialized for libmudflap as requested by RMS. */
2233 mfsplay_tree_free (void *p
)
2235 DECLARE (void, free
, void *p
);
2236 CALL_REAL (free
, p
);
2240 mfsplay_tree_xmalloc (size_t s
)
2242 DECLARE (void *, malloc
, size_t s
);
2243 return CALL_REAL (malloc
, s
);
2247 static void mfsplay_tree_splay (mfsplay_tree
, mfsplay_tree_key
);
2248 static mfsplay_tree_node
mfsplay_tree_splay_helper (mfsplay_tree
,
2250 mfsplay_tree_node
*,
2251 mfsplay_tree_node
*,
2252 mfsplay_tree_node
*);
2255 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2256 and grandparent, respectively, of NODE. */
2258 static mfsplay_tree_node
2259 mfsplay_tree_splay_helper (mfsplay_tree sp
,
2260 mfsplay_tree_key key
,
2261 mfsplay_tree_node
* node
,
2262 mfsplay_tree_node
* parent
,
2263 mfsplay_tree_node
* grandparent
)
2265 mfsplay_tree_node
*next
;
2266 mfsplay_tree_node n
;
2274 comparison
= ((key
> n
->key
) ? 1 : ((key
< n
->key
) ? -1 : 0));
2276 if (comparison
== 0)
2277 /* We've found the target. */
2279 else if (comparison
< 0)
2280 /* The target is to the left. */
2283 /* The target is to the right. */
2288 /* Check whether our recursion depth is too high. Abort this search,
2289 and signal that a rebalance is required to continue. */
2290 if (sp
->depth
> sp
->max_depth
)
2292 sp
->rebalance_p
= 1;
2296 /* Continue down the tree. */
2298 n
= mfsplay_tree_splay_helper (sp
, key
, next
, node
, parent
);
2301 /* The recursive call will change the place to which NODE
2303 if (*node
!= n
|| sp
->rebalance_p
)
2308 /* NODE is the root. We are done. */
2311 /* First, handle the case where there is no grandparent (i.e.,
2312 *PARENT is the root of the tree.) */
2315 if (n
== (*parent
)->left
)
2329 /* Next handle the cases where both N and *PARENT are left children,
2330 or where both are right children. */
2331 if (n
== (*parent
)->left
&& *parent
== (*grandparent
)->left
)
2333 mfsplay_tree_node p
= *parent
;
2335 (*grandparent
)->left
= p
->right
;
2336 p
->right
= *grandparent
;
2342 else if (n
== (*parent
)->right
&& *parent
== (*grandparent
)->right
)
2344 mfsplay_tree_node p
= *parent
;
2346 (*grandparent
)->right
= p
->left
;
2347 p
->left
= *grandparent
;
2354 /* Finally, deal with the case where N is a left child, but *PARENT
2355 is a right child, or vice versa. */
2356 if (n
== (*parent
)->left
)
2358 (*parent
)->left
= n
->right
;
2360 (*grandparent
)->right
= n
->left
;
2361 n
->left
= *grandparent
;
2367 (*parent
)->right
= n
->left
;
2369 (*grandparent
)->left
= n
->right
;
2370 n
->right
= *grandparent
;
2379 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n
, void *array_ptr
)
2381 mfsplay_tree_node
**p
= array_ptr
;
2388 static mfsplay_tree_node
2389 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node
* array
, unsigned low
,
2392 unsigned middle
= low
+ (high
- low
) / 2;
2393 mfsplay_tree_node n
= array
[middle
];
2395 /* Note that since we're producing a balanced binary tree, it is not a problem
2396 that this function is recursive. */
2397 if (low
+ 1 <= middle
)
2398 n
->left
= mfsplay_tree_rebalance_helper2 (array
, low
, middle
- 1);
2402 if (middle
+ 1 <= high
)
2403 n
->right
= mfsplay_tree_rebalance_helper2 (array
, middle
+ 1, high
);
2411 /* Rebalance the entire tree. Do this by copying all the node
2412 pointers into an array, then cleverly re-linking them. */
2414 mfsplay_tree_rebalance (mfsplay_tree sp
)
2416 mfsplay_tree_node
*all_nodes
, *all_nodes_1
;
2418 if (sp
->num_keys
<= 2)
2421 all_nodes
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * sp
->num_keys
);
2423 /* Traverse all nodes to copy their addresses into this array. */
2424 all_nodes_1
= all_nodes
;
2425 mfsplay_tree_foreach (sp
, mfsplay_tree_rebalance_helper1
,
2426 (void *) &all_nodes_1
);
2428 /* Relink all the nodes. */
2429 sp
->root
= mfsplay_tree_rebalance_helper2 (all_nodes
, 0, sp
->num_keys
- 1);
2431 mfsplay_tree_free (all_nodes
);
2435 /* Splay SP around KEY. */
2437 mfsplay_tree_splay (mfsplay_tree sp
, mfsplay_tree_key key
)
2442 /* If we just splayed the tree with the same key, do nothing. */
2443 if (sp
->last_splayed_key_p
&&
2444 (sp
->last_splayed_key
== key
))
2447 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2448 The idea is to limit excessive stack usage if we're facing
2449 degenerate access patterns. Unfortunately such patterns can occur
2450 e.g. during static initialization, where many static objects might
2451 be registered in increasing address sequence, or during a case where
2452 large tree-like heap data structures are allocated quickly.
2454 On x86, this corresponds to roughly 200K of stack usage.
2455 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2456 sp
->max_depth
= 2500;
2457 sp
->rebalance_p
= sp
->depth
= 0;
2459 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2460 if (sp
->rebalance_p
)
2462 mfsplay_tree_rebalance (sp
);
2464 sp
->rebalance_p
= sp
->depth
= 0;
2465 mfsplay_tree_splay_helper (sp
, key
, &sp
->root
, NULL
, NULL
);
2467 if (sp
->rebalance_p
)
2472 /* Cache this splay key. */
2473 sp
->last_splayed_key
= key
;
2474 sp
->last_splayed_key_p
= 1;
2479 /* Allocate a new splay tree. */
2483 mfsplay_tree sp
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s
));
2485 sp
->last_splayed_key_p
= 0;
2493 /* Insert a new node (associating KEY with DATA) into SP. If a
2494 previous node with the indicated KEY exists, its data is replaced
2495 with the new value. Returns the new node. */
2496 static mfsplay_tree_node
2497 mfsplay_tree_insert (mfsplay_tree sp
, mfsplay_tree_key key
, mfsplay_tree_value value
)
2501 mfsplay_tree_splay (sp
, key
);
2504 comparison
= ((sp
->root
->key
> key
) ? 1 :
2505 ((sp
->root
->key
< key
) ? -1 : 0));
2507 if (sp
->root
&& comparison
== 0)
2509 /* If the root of the tree already has the indicated KEY, just
2510 replace the value with VALUE. */
2511 sp
->root
->value
= value
;
2515 /* Create a new node, and insert it at the root. */
2516 mfsplay_tree_node node
;
2518 node
= mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s
));
2520 node
->value
= value
;
2523 node
->left
= node
->right
= 0;
2524 else if (comparison
< 0)
2526 node
->left
= sp
->root
;
2527 node
->right
= node
->left
->right
;
2528 node
->left
->right
= 0;
2532 node
->right
= sp
->root
;
2533 node
->left
= node
->right
->left
;
2534 node
->right
->left
= 0;
2538 sp
->last_splayed_key_p
= 0;
2544 /* Remove KEY from SP. It is not an error if it did not exist. */
2547 mfsplay_tree_remove (mfsplay_tree sp
, mfsplay_tree_key key
)
2549 mfsplay_tree_splay (sp
, key
);
2550 sp
->last_splayed_key_p
= 0;
2551 if (sp
->root
&& (sp
->root
->key
== key
))
2553 mfsplay_tree_node left
, right
;
2554 left
= sp
->root
->left
;
2555 right
= sp
->root
->right
;
2556 /* Delete the root node itself. */
2557 mfsplay_tree_free (sp
->root
);
2559 /* One of the children is now the root. Doesn't matter much
2560 which, so long as we preserve the properties of the tree. */
2564 /* If there was a right child as well, hang it off the
2565 right-most leaf of the left child. */
2570 left
->right
= right
;
2578 /* Lookup KEY in SP, returning VALUE if present, and NULL
2581 static mfsplay_tree_node
2582 mfsplay_tree_lookup (mfsplay_tree sp
, mfsplay_tree_key key
)
2584 mfsplay_tree_splay (sp
, key
);
2585 if (sp
->root
&& (sp
->root
->key
== key
))
2592 /* Return the immediate predecessor KEY, or NULL if there is no
2593 predecessor. KEY need not be present in the tree. */
2595 static mfsplay_tree_node
2596 mfsplay_tree_predecessor (mfsplay_tree sp
, mfsplay_tree_key key
)
2599 mfsplay_tree_node node
;
2600 /* If the tree is empty, there is certainly no predecessor. */
2603 /* Splay the tree around KEY. That will leave either the KEY
2604 itself, its predecessor, or its successor at the root. */
2605 mfsplay_tree_splay (sp
, key
);
2606 comparison
= ((sp
->root
->key
> key
) ? 1 :
2607 ((sp
->root
->key
< key
) ? -1 : 0));
2609 /* If the predecessor is at the root, just return it. */
2612 /* Otherwise, find the rightmost element of the left subtree. */
2613 node
= sp
->root
->left
;
2620 /* Return the immediate successor KEY, or NULL if there is no
2621 successor. KEY need not be present in the tree. */
2623 static mfsplay_tree_node
2624 mfsplay_tree_successor (mfsplay_tree sp
, mfsplay_tree_key key
)
2627 mfsplay_tree_node node
;
2628 /* If the tree is empty, there is certainly no successor. */
2631 /* Splay the tree around KEY. That will leave either the KEY
2632 itself, its predecessor, or its successor at the root. */
2633 mfsplay_tree_splay (sp
, key
);
2634 comparison
= ((sp
->root
->key
> key
) ? 1 :
2635 ((sp
->root
->key
< key
) ? -1 : 0));
2636 /* If the successor is at the root, just return it. */
2639 /* Otherwise, find the leftmost element of the right subtree. */
2640 node
= sp
->root
->right
;
2647 /* Call FN, passing it the DATA, for every node in SP, following an
2648 in-order traversal. If FN every returns a non-zero value, the
2649 iteration ceases immediately, and the value is returned.
2650 Otherwise, this function returns 0.
2652 This function simulates recursion using dynamically allocated
2653 arrays, since it may be called from mfsplay_tree_rebalance(), which
2654 in turn means that the tree is already uncomfortably deep for stack
2657 mfsplay_tree_foreach (mfsplay_tree st
, mfsplay_tree_foreach_fn fn
, void *data
)
2659 mfsplay_tree_node
*stack1
;
2663 enum s
{ s_left
, s_here
, s_right
, s_up
};
2665 if (st
->root
== NULL
) /* => num_keys == 0 */
2668 stack1
= mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node
) * st
->num_keys
);
2669 stack2
= mfsplay_tree_xmalloc (sizeof (char) * st
->num_keys
);
2672 stack1
[sp
] = st
->root
;
2673 stack2
[sp
] = s_left
;
2677 mfsplay_tree_node n
;
2683 /* Handle each of the four possible states separately. */
2685 /* 1: We're here to traverse the left subtree (if any). */
2688 stack2
[sp
] = s_here
;
2689 if (n
->left
!= NULL
)
2692 stack1
[sp
] = n
->left
;
2693 stack2
[sp
] = s_left
;
2697 /* 2: We're here to traverse this node. */
2698 else if (s
== s_here
)
2700 stack2
[sp
] = s_right
;
2701 val
= (*fn
) (n
, data
);
2705 /* 3: We're here to traverse the right subtree (if any). */
2706 else if (s
== s_right
)
2709 if (n
->right
!= NULL
)
2712 stack1
[sp
] = n
->right
;
2713 stack2
[sp
] = s_left
;
2717 /* 4: We're here after both subtrees (if any) have been traversed. */
2720 /* Pop the stack. */
2721 if (sp
== 0) break; /* Popping off the root note: we're finished! */
2729 mfsplay_tree_free (stack1
);
2730 mfsplay_tree_free (stack2
);