1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
68 #include "mf-runtime.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
95 __mf_state = reentrant; \
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache
[LOOKUP_CACHE_SIZE_MAX
];
112 uintptr_t __mf_lc_mask
= LOOKUP_CACHE_MASK_DFL
;
113 unsigned char __mf_lc_shift
= LOOKUP_CACHE_SHIFT_DFL
;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts
;
118 int __mf_starting_p
= 1;
120 enum __mf_state_enum __mf_state
= active
;
122 /* See __mf_state_perthread() in mf-hooks.c. */
127 pthread_mutex_t __mf_biglock
=
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
131 PTHREAD_MUTEX_INITIALIZER
;
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
139 #ifdef _POSIX_THREADS
140 #pragma weak pthread_join
142 #define pthread_join NULL
144 const void *threads_active_p
= (void *) pthread_join
;
148 /* ------------------------------------------------------------------------ */
149 /* stats-related globals. */
151 static unsigned long __mf_count_check
;
152 static unsigned long __mf_lookup_cache_reusecount
[LOOKUP_CACHE_SIZE_MAX
];
153 static unsigned long __mf_count_register
;
154 static unsigned long __mf_total_register_size
[__MF_TYPE_MAX
+1];
155 static unsigned long __mf_count_unregister
;
156 static unsigned long __mf_total_unregister_size
;
157 static unsigned long __mf_count_violation
[__MF_VIOL_WATCH
+1];
158 static unsigned long __mf_sigusr1_received
;
159 static unsigned long __mf_sigusr1_handled
;
160 /* not static */ unsigned long __mf_reentrancy
;
162 /* not static */ unsigned long __mf_lock_contention
;
166 /* ------------------------------------------------------------------------ */
167 /* mode-check-related globals. */
169 typedef struct __mf_object
171 uintptr_t low
, high
; /* __mf_register parameters */
173 char type
; /* __MF_TYPE_something */
174 char watching_p
; /* Trigger a VIOL_WATCH on access? */
175 unsigned read_count
; /* Number of times __mf_check/read was called on this object. */
176 unsigned write_count
; /* Likewise for __mf_check/write. */
177 unsigned liveness
; /* A measure of recent checking activity. */
178 unsigned description_epoch
; /* Last epoch __mf_describe_object printed this. */
181 struct timeval alloc_time
;
182 char **alloc_backtrace
;
183 size_t alloc_backtrace_size
;
185 pthread_t alloc_thread
;
189 uintptr_t dealloc_pc
;
190 struct timeval dealloc_time
;
191 char **dealloc_backtrace
;
192 size_t dealloc_backtrace_size
;
194 pthread_t dealloc_thread
;
198 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
199 /* Actually stored as static vars within lookup function below. */
201 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
202 static unsigned __mf_object_dead_head
[__MF_TYPE_MAX_CEM
+1]; /* next empty spot */
203 static __mf_object_t
*__mf_object_cemetary
[__MF_TYPE_MAX_CEM
+1][__MF_PERSIST_MAX
];
206 /* ------------------------------------------------------------------------ */
207 /* Forward function declarations */
209 void __mf_init () CTOR
;
210 static void __mf_sigusr1_respond ();
211 static unsigned __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
212 __mf_object_t
**objs
, unsigned max_objs
);
213 static unsigned __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
214 __mf_object_t
**objs
, unsigned max_objs
, int type
);
215 static unsigned __mf_find_dead_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
216 __mf_object_t
**objs
, unsigned max_objs
);
217 static void __mf_adapt_cache ();
218 static void __mf_describe_object (__mf_object_t
*obj
);
219 static unsigned __mf_watch_or_not (void *ptr
, size_t sz
, char flag
);
220 static splay_tree
__mf_object_tree (int type
);
221 static void __mf_link_object (__mf_object_t
*node
);
222 static void __mf_unlink_object (__mf_object_t
*node
);
225 /* ------------------------------------------------------------------------ */
226 /* Configuration engine */
229 __mf_set_default_options ()
231 memset (& __mf_opts
, 0, sizeof (__mf_opts
));
233 __mf_opts
.adapt_cache
= 1000003;
234 __mf_opts
.abbreviate
= 1;
235 __mf_opts
.verbose_violations
= 1;
236 __mf_opts
.free_queue_length
= 4;
237 __mf_opts
.persistent_count
= 100;
238 __mf_opts
.crumple_zone
= 32;
239 __mf_opts
.backtrace
= 4;
240 __mf_opts
.timestamps
= 1;
241 __mf_opts
.mudflap_mode
= mode_check
;
242 __mf_opts
.violation_mode
= viol_nop
;
243 __mf_opts
.heur_std_data
= 1;
245 __mf_opts
.thread_stack
= 0;
264 "mudflaps do nothing",
265 set_option
, (int)mode_nop
, (int *)&__mf_opts
.mudflap_mode
},
267 "mudflaps populate object tree",
268 set_option
, (int)mode_populate
, (int *)&__mf_opts
.mudflap_mode
},
270 "mudflaps check for memory violations",
271 set_option
, (int)mode_check
, (int *)&__mf_opts
.mudflap_mode
},
273 "mudflaps always cause violations (diagnostic)",
274 set_option
, (int)mode_violate
, (int *)&__mf_opts
.mudflap_mode
},
277 "violations do not change program execution",
278 set_option
, (int)viol_nop
, (int *)&__mf_opts
.violation_mode
},
280 "violations cause a call to abort()",
281 set_option
, (int)viol_abort
, (int *)&__mf_opts
.violation_mode
},
283 "violations are promoted to SIGSEGV signals",
284 set_option
, (int)viol_segv
, (int *)&__mf_opts
.violation_mode
},
286 "violations fork a gdb process attached to current program",
287 set_option
, (int)viol_gdb
, (int *)&__mf_opts
.violation_mode
},
289 "trace calls to mudflap runtime library",
290 set_option
, 1, &__mf_opts
.trace_mf_calls
},
292 "trace internal events within mudflap runtime library",
293 set_option
, 1, &__mf_opts
.verbose_trace
},
295 "collect statistics on mudflap's operation",
296 set_option
, 1, &__mf_opts
.collect_stats
},
299 "print report upon SIGUSR1",
300 set_option
, 1, &__mf_opts
.sigusr1_report
},
302 {"internal-checking",
303 "perform more expensive internal checking",
304 set_option
, 1, &__mf_opts
.internal_checking
},
306 "print any memory leaks at program shutdown",
307 set_option
, 1, &__mf_opts
.print_leaks
},
308 {"check-initialization",
309 "detect uninitialized object reads",
310 set_option
, 1, &__mf_opts
.check_initialization
},
311 {"verbose-violations",
312 "print verbose messages when memory violations occur",
313 set_option
, 1, &__mf_opts
.verbose_violations
},
315 "abbreviate repetitive listings",
316 set_option
, 1, &__mf_opts
.abbreviate
},
318 "track object lifetime timestamps",
319 set_option
, 1, &__mf_opts
.timestamps
},
321 "ignore read accesses - assume okay",
322 set_option
, 1, &__mf_opts
.ignore_reads
},
324 "wipe stack objects at unwind",
325 set_option
, 1, &__mf_opts
.wipe_stack
},
327 "wipe heap objects at free",
328 set_option
, 1, &__mf_opts
.wipe_heap
},
330 "support /proc/self/map heuristics",
331 set_option
, 1, &__mf_opts
.heur_proc_map
},
333 "enable a simple upper stack bound heuristic",
334 set_option
, 1, &__mf_opts
.heur_stack_bound
},
336 "support _start.._end heuristics",
337 set_option
, 1, &__mf_opts
.heur_start_end
},
339 "register standard library data (argv, errno, stdin, ...)",
340 set_option
, 1, &__mf_opts
.heur_std_data
},
341 {"free-queue-length",
342 "queue N deferred free() calls before performing them",
343 read_integer_option
, 0, &__mf_opts
.free_queue_length
},
345 "keep a history of N unregistered regions",
346 read_integer_option
, 0, &__mf_opts
.persistent_count
},
348 "surround allocations with crumple zones of N bytes",
349 read_integer_option
, 0, &__mf_opts
.crumple_zone
},
350 /* XXX: not type-safe.
352 "set lookup cache size mask to N (2**M - 1)",
353 read_integer_option, 0, (int *)(&__mf_lc_mask)},
355 "set lookup cache pointer shift",
356 read_integer_option, 0, (int *)(&__mf_lc_shift)},
359 "adapt mask/shift parameters after N cache misses",
360 read_integer_option
, 1, &__mf_opts
.adapt_cache
},
362 "keep an N-level stack trace of each call context",
363 read_integer_option
, 0, &__mf_opts
.backtrace
},
366 "override thread stacks allocation: N kB",
367 read_integer_option
, 0, &__mf_opts
.thread_stack
},
369 {0, 0, set_option
, 0, NULL
}
378 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
379 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
381 "The mudflap code can be controlled by an environment variable:\n"
383 "$ export MUDFLAP_OPTIONS='<options>'\n"
384 "$ <mudflapped_program>\n"
386 "where <options> is a space-separated list of \n"
387 "any of the following options. Use `-no-OPTION' to disable options.\n"
390 (threads_active_p
? "multi-threaded " : "single-threaded "),
400 /* XXX: The multi-threaded thread-unaware combination is bad. */
402 for (opt
= options
; opt
->name
; opt
++)
404 int default_p
= (opt
->value
== * opt
->target
);
410 fprintf (stderr
, "-%-23.23s %s", opt
->name
, opt
->description
);
412 fprintf (stderr
, " [active]\n");
414 fprintf (stderr
, "\n");
416 case read_integer_option
:
417 strncpy (buf
, opt
->name
, 128);
418 strncpy (buf
+ strlen (opt
->name
), "=N", 2);
419 fprintf (stderr
, "-%-23.23s %s", buf
, opt
->description
);
420 fprintf (stderr
, " [%d]\n", * opt
->target
);
426 fprintf (stderr
, "\n");
431 __mf_set_options (const char *optstr
)
435 BEGIN_RECURSION_PROTECT ();
436 rc
= __mfu_set_options (optstr
);
437 /* XXX: It's not really that easy. A change to a bunch of parameters
438 can require updating auxiliary state or risk crashing:
439 free_queue_length, crumple_zone ... */
440 END_RECURSION_PROTECT ();
447 __mfu_set_options (const char *optstr
)
449 struct option
*opts
= 0;
453 const char *saved_optstr
= optstr
;
455 /* XXX: bounds-check for optstr! */
472 if (*optstr
== '?' ||
473 strncmp (optstr
, "help", 4) == 0)
475 /* Caller will print help and exit. */
479 if (strncmp (optstr
, "no-", 3) == 0)
482 optstr
= & optstr
[3];
485 for (opts
= options
; opts
->name
; opts
++)
487 if (strncmp (optstr
, opts
->name
, strlen (opts
->name
)) == 0)
489 optstr
+= strlen (opts
->name
);
490 assert (opts
->target
);
497 *(opts
->target
) = opts
->value
;
499 case read_integer_option
:
500 if (! negate
&& (*optstr
== '=' && *(optstr
+1)))
503 tmp
= strtol (optstr
, &nxt
, 10);
504 if ((optstr
!= nxt
) && (tmp
!= LONG_MAX
))
507 *(opts
->target
) = (int)tmp
;
521 "warning: unrecognized string '%s' in mudflap options\n",
523 optstr
+= strlen (optstr
);
529 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
530 __mf_lc_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
531 __mf_opts
.free_queue_length
&= (__MF_FREEQ_MAX
- 1);
533 /* Clear the lookup cache, in case the parameters got changed. */
535 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
537 __mf_lookup_cache
[0].low
= MAXPTR
;
539 TRACE ("set options from `%s'\n", saved_optstr
);
541 /* Call this unconditionally, in case -sigusr1-report was toggled. */
542 __mf_sigusr1_respond ();
551 __mf_resolve_single_dynamic (struct __mf_dynamic_entry
*e
)
556 if (e
->pointer
) return;
559 if (e
->version
!= NULL
&& e
->version
[0] != '\0') /* non-null/empty */
560 e
->pointer
= dlvsym (RTLD_NEXT
, e
->name
, e
->version
);
563 e
->pointer
= dlsym (RTLD_NEXT
, e
->name
);
569 fprintf (stderr
, "mf: error in dlsym(\"%s\"): %s\n",
575 fprintf (stderr
, "mf: dlsym(\"%s\") = NULL\n", e
->name
);
582 __mf_resolve_dynamics ()
585 for (i
= 0; i
< dyn_INITRESOLVE
; i
++)
586 __mf_resolve_single_dynamic (& __mf_dynamic
[i
]);
590 /* NB: order must match enums in mf-impl.h */
591 struct __mf_dynamic_entry __mf_dynamic
[] =
593 {NULL
, "calloc", NULL
},
594 {NULL
, "free", NULL
},
595 {NULL
, "malloc", NULL
},
596 {NULL
, "mmap", NULL
},
597 {NULL
, "munmap", NULL
},
598 {NULL
, "realloc", NULL
},
599 {NULL
, "DUMMY", NULL
}, /* dyn_INITRESOLVE */
601 {NULL
, "pthread_create", PTHREAD_CREATE_VERSION
},
602 {NULL
, "pthread_join", NULL
},
603 {NULL
, "pthread_exit", NULL
}
611 /* ------------------------------------------------------------------------ */
613 /* Lookup & manage automatic initialization of the five or so splay trees. */
615 __mf_object_tree (int type
)
617 static splay_tree trees
[__MF_TYPE_MAX
+1];
618 assert (type
>= 0 && type
<= __MF_TYPE_MAX
);
619 if (UNLIKELY (trees
[type
] == NULL
))
620 trees
[type
] = splay_tree_new ();
630 /* Return if initialization has already been done. */
631 if (LIKELY (__mf_starting_p
== 0))
634 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
636 __mf_resolve_dynamics ();
640 __mf_set_default_options ();
642 ov
= getenv ("MUDFLAP_OPTIONS");
645 int rc
= __mfu_set_options (ov
);
653 /* Initialize to a non-zero description epoch. */
654 __mf_describe_object (NULL
);
656 #define REG_RESERVED(obj) \
657 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
659 REG_RESERVED (__mf_lookup_cache
);
660 REG_RESERVED (__mf_lc_mask
);
661 REG_RESERVED (__mf_lc_shift
);
662 /* XXX: others of our statics? */
664 /* Prevent access to *NULL. */
665 __mf_register (MINPTR
, 1, __MF_TYPE_NOACCESS
, "NULL");
666 __mf_lookup_cache
[0].low
= (uintptr_t) -1;
672 __wrap_main (int argc
, char* argv
[])
674 extern char **environ
;
676 static int been_here
= 0;
678 if (__mf_opts
.heur_std_data
&& ! been_here
)
683 __mf_register (argv
, sizeof(char *)*(argc
+1), __MF_TYPE_STATIC
, "argv[]");
684 for (i
=0; i
<argc
; i
++)
686 unsigned j
= strlen (argv
[i
]);
687 __mf_register (argv
[i
], j
+1, __MF_TYPE_STATIC
, "argv element");
692 char *e
= environ
[i
];
694 if (e
== NULL
) break;
695 j
= strlen (environ
[i
]);
696 __mf_register (environ
[i
], j
+1, __MF_TYPE_STATIC
, "environ element");
698 __mf_register (environ
, sizeof(char *)*(i
+1), __MF_TYPE_STATIC
, "environ[]");
700 __mf_register (& errno
, sizeof (errno
), __MF_TYPE_STATIC
, "errno area");
702 __mf_register (stdin
, sizeof (*stdin
), __MF_TYPE_STATIC
, "stdin");
703 __mf_register (stdout
, sizeof (*stdout
), __MF_TYPE_STATIC
, "stdout");
704 __mf_register (stderr
, sizeof (*stderr
), __MF_TYPE_STATIC
, "stderr");
706 /* Make some effort to register ctype.h static arrays. */
707 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
708 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
709 with in mf-hooks2.c. */
713 return main (argc
, argv
, environ
);
715 return __real_main (argc
, argv
, environ
);
721 extern void __mf_fini () DTOR
;
724 TRACE ("__mf_fini\n");
730 /* ------------------------------------------------------------------------ */
733 void __mf_check (void *ptr
, size_t sz
, int type
, const char *location
)
736 BEGIN_RECURSION_PROTECT ();
737 __mfu_check (ptr
, sz
, type
, location
);
738 END_RECURSION_PROTECT ();
743 void __mfu_check (void *ptr
, size_t sz
, int type
, const char *location
)
745 unsigned entry_idx
= __MF_CACHE_INDEX (ptr
);
746 struct __mf_cache
*entry
= & __mf_lookup_cache
[entry_idx
];
747 int judgement
= 0; /* 0=undecided; <0=violation; >0=okay */
748 uintptr_t ptr_low
= (uintptr_t) ptr
;
749 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
750 struct __mf_cache old_entry
= *entry
;
752 if (UNLIKELY (__mf_opts
.sigusr1_report
))
753 __mf_sigusr1_respond ();
755 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
756 ptr
, entry_idx
, (unsigned long)sz
,
757 (type
== 0 ? "read" : "write"), location
);
759 switch (__mf_opts
.mudflap_mode
)
763 entry
->high
= MAXPTR
;
768 entry
->low
= ptr_low
;
769 entry
->high
= ptr_high
;
775 unsigned heuristics
= 0;
777 /* Advance aging/adaptation counters. */
778 static unsigned adapt_count
;
780 if (UNLIKELY (__mf_opts
.adapt_cache
> 0 &&
781 adapt_count
> __mf_opts
.adapt_cache
))
787 /* Looping only occurs if heuristics were triggered. */
788 while (judgement
== 0)
790 DECLARE (void, free
, void *p
);
791 __mf_object_t
* ovr_obj
[1];
793 __mf_object_t
** all_ovr_obj
= NULL
;
794 __mf_object_t
** dealloc_me
= NULL
;
797 /* Find all overlapping objects. Be optimistic that there is just one. */
798 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, ovr_obj
, 1);
799 if (UNLIKELY (obj_count
> 1))
801 /* Allocate a real buffer and do the search again. */
802 DECLARE (void *, malloc
, size_t c
);
804 all_ovr_obj
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) *
806 if (all_ovr_obj
== NULL
) abort ();
807 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_obj
, obj_count
);
808 assert (n
== obj_count
);
809 dealloc_me
= all_ovr_obj
;
813 all_ovr_obj
= ovr_obj
;
817 /* Update object statistics. */
818 for (i
= 0; i
< obj_count
; i
++)
820 __mf_object_t
*obj
= all_ovr_obj
[i
];
821 assert (obj
!= NULL
);
822 if (type
== __MF_CHECK_READ
)
829 /* Iterate over the various objects. There are a number of special cases. */
830 for (i
= 0; i
< obj_count
; i
++)
832 __mf_object_t
*obj
= all_ovr_obj
[i
];
834 /* Any __MF_TYPE_NOACCESS hit is bad. */
835 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
838 /* Any object with a watch flag is bad. */
839 if (UNLIKELY (obj
->watching_p
))
840 judgement
= -2; /* trigger VIOL_WATCH */
842 /* A read from an uninitialized object is bad. */
843 if (UNLIKELY (__mf_opts
.check_initialization
845 && type
== __MF_CHECK_READ
847 && obj
->write_count
== 0
848 /* uninitialized (heap) */
849 && obj
->type
== __MF_TYPE_HEAP
))
853 /* We now know that the access spans one or more valid objects. */
854 if (LIKELY (judgement
>= 0))
855 for (i
= 0; i
< obj_count
; i
++)
857 __mf_object_t
*obj
= all_ovr_obj
[i
];
859 /* Is this access entirely contained within this object? */
860 if (LIKELY (ptr_low
>= obj
->low
&& ptr_high
<= obj
->high
))
863 entry
->low
= obj
->low
;
864 entry
->high
= obj
->high
;
868 /* XXX: Access runs off left or right side of this
869 object. That could be okay, if there are
870 other objects that fill in all the holes. */
873 if (dealloc_me
!= NULL
)
874 CALL_REAL (free
, dealloc_me
);
876 /* If the judgment is still unknown at this stage, loop
877 around at most one more time. */
880 if (heuristics
++ < 2) /* XXX parametrize this number? */
881 judgement
= __mf_heuristic_check (ptr_low
, ptr_high
);
895 if (__mf_opts
.collect_stats
)
899 if (LIKELY (old_entry
.low
!= entry
->low
|| old_entry
.high
!= entry
->high
))
900 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
901 __mf_lookup_cache_reusecount
[entry_idx
] ++;
904 if (UNLIKELY (judgement
< 0))
905 __mf_violation (ptr
, sz
,
906 (uintptr_t) __builtin_return_address (0), location
,
908 (type
== __MF_CHECK_READ
? __MF_VIOL_READ
: __MF_VIOL_WRITE
) :
913 static __mf_object_t
*
914 __mf_insert_new_object (uintptr_t low
, uintptr_t high
, int type
,
915 const char *name
, uintptr_t pc
)
917 DECLARE (void *, calloc
, size_t c
, size_t n
);
919 __mf_object_t
*new_obj
;
920 new_obj
= CALL_REAL (calloc
, 1, sizeof(__mf_object_t
));
922 new_obj
->high
= high
;
923 new_obj
->type
= type
;
924 new_obj
->name
= name
;
925 new_obj
->alloc_pc
= pc
;
926 #if HAVE_GETTIMEOFDAY
927 if (__mf_opts
.timestamps
)
928 gettimeofday (& new_obj
->alloc_time
, NULL
);
931 new_obj
->alloc_thread
= pthread_self ();
934 if (__mf_opts
.backtrace
> 0 && (type
== __MF_TYPE_HEAP
|| type
== __MF_TYPE_HEAP_I
))
935 new_obj
->alloc_backtrace_size
=
936 __mf_backtrace (& new_obj
->alloc_backtrace
,
939 __mf_link_object (new_obj
);
945 __mf_uncache_object (__mf_object_t
*old_obj
)
947 /* Remove any low/high pointers for this object from the lookup cache. */
949 /* Can it possibly exist in the cache? */
950 if (LIKELY (old_obj
->read_count
+ old_obj
->write_count
))
952 uintptr_t low
= old_obj
->low
;
953 uintptr_t high
= old_obj
->high
;
954 unsigned idx_low
= __MF_CACHE_INDEX (low
);
955 unsigned idx_high
= __MF_CACHE_INDEX (high
);
957 for (i
= idx_low
; i
<= idx_high
; i
++)
959 struct __mf_cache
*entry
= & __mf_lookup_cache
[i
];
960 /* NB: the "||" in the following test permits this code to
961 tolerate the situation introduced by __mf_check over
962 contiguous objects, where a cache entry spans several
964 if (entry
->low
== low
|| entry
->high
== high
)
967 entry
->high
= MINPTR
;
975 __mf_register (void *ptr
, size_t sz
, int type
, const char *name
)
978 BEGIN_RECURSION_PROTECT ();
979 __mfu_register (ptr
, sz
, type
, name
);
980 END_RECURSION_PROTECT ();
986 __mfu_register (void *ptr
, size_t sz
, int type
, const char *name
)
988 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
989 ptr
, (unsigned long) sz
, type
, name
? name
: "");
991 if (__mf_opts
.collect_stats
)
993 __mf_count_register
++;
994 __mf_total_register_size
[(type
< 0) ? 0 :
995 (type
> __MF_TYPE_MAX
) ? 0 :
999 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1000 __mf_sigusr1_respond ();
1002 switch (__mf_opts
.mudflap_mode
)
1008 __mf_violation (ptr
, sz
, (uintptr_t) __builtin_return_address (0), NULL
,
1009 __MF_VIOL_REGISTER
);
1013 /* Clear the cache. */
1014 /* XXX: why the entire cache? */
1016 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1018 __mf_lookup_cache
[0].low
= MAXPTR
;
1023 __mf_object_t
*ovr_objs
[1];
1024 unsigned num_overlapping_objs
;
1025 uintptr_t low
= (uintptr_t) ptr
;
1026 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1027 uintptr_t pc
= (uintptr_t) __builtin_return_address (0);
1029 /* Treat unknown size indication as 1. */
1030 if (UNLIKELY (sz
== 0)) sz
= 1;
1032 /* Look for objects only of the same type. This will e.g. permit a registration
1033 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1034 __mf_check time however harmful overlaps will be detected. */
1035 num_overlapping_objs
= __mf_find_objects2 (low
, high
, ovr_objs
, 1, type
);
1037 /* Handle overlaps. */
1038 if (UNLIKELY (num_overlapping_objs
> 0))
1040 __mf_object_t
*ovr_obj
= ovr_objs
[0];
1042 /* Accept certain specific duplication pairs. */
1043 if (((type
== __MF_TYPE_STATIC
) || (type
== __MF_TYPE_GUESS
))
1044 && ovr_obj
->low
== low
1045 && ovr_obj
->high
== high
1046 && ovr_obj
->type
== type
)
1048 /* Duplicate registration for static objects may come
1049 from distinct compilation units. */
1050 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1051 (void *) low
, (void *) high
,
1052 (ovr_obj
->name
? ovr_obj
->name
: ""));
1056 /* Alas, a genuine violation. */
1059 /* Two or more *real* mappings here. */
1060 __mf_violation ((void *) ptr
, sz
,
1061 (uintptr_t) __builtin_return_address (0), NULL
,
1062 __MF_VIOL_REGISTER
);
1065 else /* No overlapping objects: AOK. */
1066 __mf_insert_new_object (low
, high
, type
, name
, pc
);
1068 /* We could conceivably call __mf_check() here to prime the cache,
1069 but then the read_count/write_count field is not reliable. */
1072 } /* end switch (__mf_opts.mudflap_mode) */
1077 __mf_unregister (void *ptr
, size_t sz
, int type
)
1080 BEGIN_RECURSION_PROTECT ();
1081 __mfu_unregister (ptr
, sz
, type
);
1082 END_RECURSION_PROTECT ();
1088 __mfu_unregister (void *ptr
, size_t sz
, int type
)
1090 DECLARE (void, free
, void *ptr
);
1092 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1093 __mf_sigusr1_respond ();
1095 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr
, (unsigned long) sz
, type
);
1097 switch (__mf_opts
.mudflap_mode
)
1103 __mf_violation (ptr
, sz
,
1104 (uintptr_t) __builtin_return_address (0), NULL
,
1105 __MF_VIOL_UNREGISTER
);
1109 /* Clear the cache. */
1111 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1113 __mf_lookup_cache
[0].low
= MAXPTR
;
1118 __mf_object_t
*old_obj
= NULL
;
1119 __mf_object_t
*del_obj
= NULL
; /* Object to actually delete. */
1120 __mf_object_t
*objs
[1] = {NULL
};
1121 unsigned num_overlapping_objs
;
1123 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1124 CLAMPSZ (ptr
, sz
), objs
, 1, type
);
1126 /* Special case for HEAP_I - see free & realloc hook. They don't
1127 know whether the input region was HEAP or HEAP_I before
1128 unmapping it. Here we give HEAP a try in case HEAP_I
1130 if ((type
== __MF_TYPE_HEAP_I
) && (num_overlapping_objs
== 0))
1132 num_overlapping_objs
= __mf_find_objects2 ((uintptr_t) ptr
,
1133 CLAMPSZ (ptr
, sz
), objs
, 1, __MF_TYPE_HEAP
);
1137 if (UNLIKELY ((num_overlapping_objs
!= 1) /* more than one overlap */
1138 || ((sz
== 0) ? 0 : (sz
!= (old_obj
->high
- old_obj
->low
+ 1))) /* size mismatch */
1139 || ((uintptr_t) ptr
!= old_obj
->low
))) /* base mismatch */
1141 __mf_violation (ptr
, sz
,
1142 (uintptr_t) __builtin_return_address (0), NULL
,
1143 __MF_VIOL_UNREGISTER
);
1147 __mf_unlink_object (old_obj
);
1148 __mf_uncache_object (old_obj
);
1150 /* Wipe buffer contents if desired. */
1151 if ((__mf_opts
.wipe_stack
&& old_obj
->type
== __MF_TYPE_STACK
)
1152 || (__mf_opts
.wipe_heap
&& (old_obj
->type
== __MF_TYPE_HEAP
1153 || old_obj
->type
== __MF_TYPE_HEAP_I
)))
1155 memset ((void *) old_obj
->low
,
1157 (size_t) (old_obj
->high
- old_obj
->low
+ 1));
1160 /* Manage the object cemetary. */
1161 if (__mf_opts
.persistent_count
> 0 &&
1162 old_obj
->type
>= 0 &&
1163 old_obj
->type
<= __MF_TYPE_MAX_CEM
)
1165 old_obj
->deallocated_p
= 1;
1166 old_obj
->dealloc_pc
= (uintptr_t) __builtin_return_address (0);
1167 #if HAVE_GETTIMEOFDAY
1168 if (__mf_opts
.timestamps
)
1169 gettimeofday (& old_obj
->dealloc_time
, NULL
);
1172 old_obj
->dealloc_thread
= pthread_self ();
1175 if (__mf_opts
.backtrace
> 0 && old_obj
->type
== __MF_TYPE_HEAP
)
1176 old_obj
->dealloc_backtrace_size
=
1177 __mf_backtrace (& old_obj
->dealloc_backtrace
,
1180 /* Encourage this object to be displayed again in current epoch. */
1181 old_obj
->description_epoch
--;
1183 /* Put this object into the cemetary. This may require this plot to
1184 be recycled, and the previous resident to be designated del_obj. */
1186 unsigned row
= old_obj
->type
;
1187 unsigned plot
= __mf_object_dead_head
[row
];
1189 del_obj
= __mf_object_cemetary
[row
][plot
];
1190 __mf_object_cemetary
[row
][plot
] = old_obj
;
1192 if (plot
== __mf_opts
.persistent_count
) plot
= 0;
1193 __mf_object_dead_head
[row
] = plot
;
1199 if (__mf_opts
.print_leaks
)
1201 if ((old_obj
->read_count
+ old_obj
->write_count
) == 0 &&
1202 (old_obj
->type
== __MF_TYPE_HEAP
1203 || old_obj
->type
== __MF_TYPE_HEAP_I
))
1207 "mudflap warning: unaccessed registered object:\n");
1208 __mf_describe_object (old_obj
);
1212 if (del_obj
!= NULL
) /* May or may not equal old_obj. */
1214 if (__mf_opts
.backtrace
> 0)
1216 CALL_REAL(free
, del_obj
->alloc_backtrace
);
1217 if (__mf_opts
.persistent_count
> 0)
1219 CALL_REAL(free
, del_obj
->dealloc_backtrace
);
1222 CALL_REAL(free
, del_obj
);
1227 } /* end switch (__mf_opts.mudflap_mode) */
1230 if (__mf_opts
.collect_stats
)
1232 __mf_count_unregister
++;
1233 __mf_total_unregister_size
+= sz
;
1242 unsigned long total_size
;
1243 unsigned live_obj_count
;
1244 double total_weight
;
1245 double weighted_size
;
1246 unsigned long weighted_address_bits
[sizeof (uintptr_t) * 8][2];
1252 __mf_adapt_cache_fn (splay_tree_node n
, void *param
)
1254 __mf_object_t
*obj
= (__mf_object_t
*) n
->value
;
1255 struct tree_stats
*s
= (struct tree_stats
*) param
;
1257 assert (obj
!= NULL
&& s
!= NULL
);
1259 /* Exclude never-accessed objects. */
1260 if (obj
->read_count
+ obj
->write_count
)
1263 s
->total_size
+= (obj
->high
- obj
->low
+ 1);
1270 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1271 (void *) obj->low, obj->liveness, obj->name); */
1273 s
->live_obj_count
++;
1274 s
->total_weight
+= (double) obj
->liveness
;
1276 (double) (obj
->high
- obj
->low
+ 1) *
1277 (double) obj
->liveness
;
1280 for (i
=0; i
<sizeof(uintptr_t) * 8; i
++)
1282 unsigned bit
= addr
& 1;
1283 s
->weighted_address_bits
[i
][bit
] += obj
->liveness
;
1287 /* Age the liveness value. */
1288 obj
->liveness
>>= 1;
1299 struct tree_stats s
;
1300 uintptr_t new_mask
= 0;
1301 unsigned char new_shift
;
1302 float cache_utilization
;
1304 static float smoothed_new_shift
= -1.0;
1307 memset (&s
, 0, sizeof (s
));
1309 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
), __mf_adapt_cache_fn
, (void *) & s
);
1310 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
), __mf_adapt_cache_fn
, (void *) & s
);
1311 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK
), __mf_adapt_cache_fn
, (void *) & s
);
1312 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC
), __mf_adapt_cache_fn
, (void *) & s
);
1313 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS
), __mf_adapt_cache_fn
, (void *) & s
);
1315 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1316 empty tree. Just leave the cache alone in such cases, rather
1317 than risk dying by division-by-zero. */
1318 if (! (s
.obj_count
> 0) && (s
.live_obj_count
> 0) && (s
.total_weight
> 0.0))
1321 /* Guess a good value for the shift parameter by finding an address bit that is a
1322 good discriminant of lively objects. */
1324 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1326 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1327 if (max_value
< value
) max_value
= value
;
1329 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1331 float shoulder_factor
= 0.7; /* Include slightly less popular bits too. */
1332 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1333 if (value
>= max_value
* shoulder_factor
)
1336 if (smoothed_new_shift
< 0) smoothed_new_shift
= __mf_lc_shift
;
1337 /* Converge toward this slowly to reduce flapping. */
1338 smoothed_new_shift
= 0.9*smoothed_new_shift
+ 0.1*i
;
1339 new_shift
= (unsigned) (smoothed_new_shift
+ 0.5);
1340 assert (new_shift
< sizeof (uintptr_t)*8);
1342 /* Count number of used buckets. */
1343 cache_utilization
= 0.0;
1344 for (i
= 0; i
< (1 + __mf_lc_mask
); i
++)
1345 if (__mf_lookup_cache
[i
].low
!= 0 || __mf_lookup_cache
[i
].high
!= 0)
1346 cache_utilization
+= 1.0;
1347 cache_utilization
/= (1 + __mf_lc_mask
);
1349 new_mask
|= 0x3ff; /* XXX: force a large cache. */
1350 new_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
1352 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1353 "util=%u%% m=%p s=%u\n",
1354 s
.obj_count
, s
.live_obj_count
, s
.total_size
, s
.total_weight
, s
.weighted_size
,
1355 (unsigned)(cache_utilization
*100.0), (void *) new_mask
, new_shift
);
1357 /* We should reinitialize cache if its parameters have changed. */
1358 if (new_mask
!= __mf_lc_mask
||
1359 new_shift
!= __mf_lc_shift
)
1361 __mf_lc_mask
= new_mask
;
1362 __mf_lc_shift
= new_shift
;
1364 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1366 __mf_lookup_cache
[0].low
= MAXPTR
;
1372 /* __mf_find_object[s] */
1374 /* Find overlapping live objecs between [low,high]. Return up to
1375 max_objs of their pointers in objs[]. Return total count of
1376 overlaps (may exceed max_objs). */
1379 __mf_find_objects2 (uintptr_t ptr_low
, uintptr_t ptr_high
,
1380 __mf_object_t
**objs
, unsigned max_objs
, int type
)
1383 splay_tree t
= __mf_object_tree (type
);
1384 splay_tree_key k
= (splay_tree_key
) ptr_low
;
1387 splay_tree_node n
= splay_tree_lookup (t
, k
);
1388 /* An exact match for base address implies a hit. */
1391 if (count
< max_objs
)
1392 objs
[count
] = (__mf_object_t
*) n
->value
;
1396 /* Iterate left then right near this key value to find all overlapping objects. */
1397 for (direction
= 0; direction
< 2; direction
++)
1399 /* Reset search origin. */
1400 k
= (splay_tree_key
) ptr_low
;
1406 n
= (direction
== 0 ? splay_tree_successor (t
, k
) : splay_tree_predecessor (t
, k
));
1407 if (n
== NULL
) break;
1408 obj
= (__mf_object_t
*) n
->value
;
1410 if (! (obj
->low
<= ptr_high
&& obj
->high
>= ptr_low
)) /* No overlap? */
1413 if (count
< max_objs
)
1414 objs
[count
] = (__mf_object_t
*) n
->value
;
1417 k
= (splay_tree_key
) obj
->low
;
1426 __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
1427 __mf_object_t
**objs
, unsigned max_objs
)
1432 /* Search each splay tree for overlaps. */
1433 for (type
= __MF_TYPE_NOACCESS
; type
<= __MF_TYPE_GUESS
; type
++)
1435 unsigned c
= __mf_find_objects2 (ptr_low
, ptr_high
, objs
, max_objs
, type
);
1441 else /* NB: C may equal 0 */
1454 /* __mf_link_object */
1457 __mf_link_object (__mf_object_t
*node
)
1459 splay_tree t
= __mf_object_tree (node
->type
);
1460 splay_tree_insert (t
, (splay_tree_key
) node
->low
, (splay_tree_value
) node
);
1463 /* __mf_unlink_object */
1466 __mf_unlink_object (__mf_object_t
*node
)
1468 splay_tree t
= __mf_object_tree (node
->type
);
1469 splay_tree_remove (t
, (splay_tree_key
) node
->low
);
1472 /* __mf_find_dead_objects */
1474 /* Find overlapping dead objecs between [low,high]. Return up to
1475 max_objs of their pointers in objs[]. Return total count of
1476 overlaps (may exceed max_objs). */
1479 __mf_find_dead_objects (uintptr_t low
, uintptr_t high
,
1480 __mf_object_t
**objs
, unsigned max_objs
)
1482 if (__mf_opts
.persistent_count
> 0)
1485 unsigned recollection
= 0;
1488 assert (low
<= high
);
1489 assert (max_objs
== 0 || objs
!= NULL
);
1491 /* Widen the search from the most recent plots in each row, looking
1492 backward in time. */
1494 while (recollection
< __mf_opts
.persistent_count
)
1498 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1503 plot
= __mf_object_dead_head
[row
];
1504 for (i
= 0; i
<= recollection
; i
++)
1508 /* Look backward through row: it's a circular buffer. */
1509 if (plot
> 0) plot
--;
1510 else plot
= __mf_opts
.persistent_count
- 1;
1512 obj
= __mf_object_cemetary
[row
][plot
];
1513 if (obj
&& obj
->low
<= high
&& obj
->high
>= low
)
1515 /* Found an overlapping dead object! */
1516 if (count
< max_objs
)
1526 /* Look farther back in time. */
1527 recollection
= (recollection
* 2) + 1;
1536 /* __mf_describe_object */
1539 __mf_describe_object (__mf_object_t
*obj
)
1541 static unsigned epoch
= 0;
1548 if (__mf_opts
.abbreviate
&& obj
->description_epoch
== epoch
)
1551 "mudflap object %p: name=`%s'\n",
1552 (void *) obj
, (obj
->name
? obj
->name
: ""));
1556 obj
->description_epoch
= epoch
;
1559 "mudflap object %p: name=`%s'\n"
1560 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1561 "alloc time=%lu.%06lu pc=%p"
1566 (void *) obj
, (obj
->name
? obj
->name
: ""),
1567 (void *) obj
->low
, (void *) obj
->high
,
1568 (unsigned long) (obj
->high
- obj
->low
+ 1),
1569 (obj
->type
== __MF_TYPE_NOACCESS
? "no-access" :
1570 obj
->type
== __MF_TYPE_HEAP
? "heap" :
1571 obj
->type
== __MF_TYPE_HEAP_I
? "heap-init" :
1572 obj
->type
== __MF_TYPE_STACK
? "stack" :
1573 obj
->type
== __MF_TYPE_STATIC
? "static" :
1574 obj
->type
== __MF_TYPE_GUESS
? "guess" :
1576 obj
->read_count
, obj
->write_count
, obj
->liveness
,
1577 obj
->watching_p
? " watching" : "",
1578 obj
->alloc_time
.tv_sec
, obj
->alloc_time
.tv_usec
,
1579 (void *) obj
->alloc_pc
1581 , (unsigned) obj
->alloc_thread
1585 if (__mf_opts
.backtrace
> 0)
1588 for (i
=0; i
<obj
->alloc_backtrace_size
; i
++)
1589 fprintf (stderr
, " %s\n", obj
->alloc_backtrace
[i
]);
1592 if (__mf_opts
.persistent_count
> 0)
1594 if (obj
->deallocated_p
)
1596 fprintf (stderr
, "dealloc time=%lu.%06lu pc=%p"
1601 obj
->dealloc_time
.tv_sec
, obj
->dealloc_time
.tv_usec
,
1602 (void *) obj
->dealloc_pc
1604 , (unsigned) obj
->dealloc_thread
1609 if (__mf_opts
.backtrace
> 0)
1612 for (i
=0; i
<obj
->dealloc_backtrace_size
; i
++)
1613 fprintf (stderr
, " %s\n", obj
->dealloc_backtrace
[i
]);
1621 __mf_report_leaks_fn (splay_tree_node n
, void *param
)
1623 __mf_object_t
*node
= (__mf_object_t
*) n
->value
;
1624 unsigned *count
= (unsigned *) param
;
1629 fprintf (stderr
, "Leaked object %u:\n", (*count
));
1630 __mf_describe_object (node
);
1637 __mf_report_leaks ()
1641 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP
),
1642 __mf_report_leaks_fn
, & count
);
1643 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I
),
1644 __mf_report_leaks_fn
, & count
);
1649 /* ------------------------------------------------------------------------ */
1656 BEGIN_RECURSION_PROTECT ();
1658 END_RECURSION_PROTECT ();
1665 if (__mf_opts
.collect_stats
)
1670 "calls to __mf_check: %lu\n"
1671 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1672 " __mf_unregister: %lu [%luB]\n"
1673 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1675 __mf_count_register
,
1676 __mf_total_register_size
[0], __mf_total_register_size
[1],
1677 __mf_total_register_size
[2], __mf_total_register_size
[3],
1678 __mf_total_register_size
[4], /* XXX */
1679 __mf_count_unregister
, __mf_total_unregister_size
,
1680 __mf_count_violation
[0], __mf_count_violation
[1],
1681 __mf_count_violation
[2], __mf_count_violation
[3],
1682 __mf_count_violation
[4]);
1685 "calls with reentrancy: %lu\n", __mf_reentrancy
);
1688 " lock contention: %lu\n", __mf_lock_contention
);
1691 /* Lookup cache stats. */
1694 unsigned max_reuse
= 0;
1695 unsigned num_used
= 0;
1696 unsigned num_unused
= 0;
1698 for (i
= 0; i
< LOOKUP_CACHE_SIZE
; i
++)
1700 if (__mf_lookup_cache_reusecount
[i
])
1704 if (max_reuse
< __mf_lookup_cache_reusecount
[i
])
1705 max_reuse
= __mf_lookup_cache_reusecount
[i
];
1707 fprintf (stderr
, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1708 num_used
, num_unused
, max_reuse
);
1712 unsigned live_count
;
1713 live_count
= __mf_find_objects (MINPTR
, MAXPTR
, NULL
, 0);
1714 fprintf (stderr
, "number of live objects: %u\n", live_count
);
1717 if (__mf_opts
.persistent_count
> 0)
1719 unsigned dead_count
= 0;
1721 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1722 for (plot
= 0 ; plot
< __mf_opts
.persistent_count
; plot
++)
1723 if (__mf_object_cemetary
[row
][plot
] != 0)
1725 fprintf (stderr
, " zombie objects: %u\n", dead_count
);
1728 if (__mf_opts
.print_leaks
&& (__mf_opts
.mudflap_mode
== mode_check
))
1731 extern void * __mf_wrap_alloca_indirect (size_t c
);
1733 /* Free up any remaining alloca()'d blocks. */
1734 __mf_wrap_alloca_indirect (0);
1735 __mf_describe_object (NULL
); /* Reset description epoch. */
1736 l
= __mf_report_leaks ();
1737 fprintf (stderr
, "number of leaked objects: %u\n", l
);
1741 /* __mf_backtrace */
1744 __mf_backtrace (char ***symbols
, void *guess_pc
, unsigned guess_omit_levels
)
1747 unsigned pc_array_size
= __mf_opts
.backtrace
+ guess_omit_levels
;
1748 unsigned remaining_size
;
1749 unsigned omitted_size
= 0;
1751 DECLARE (void, free
, void *ptr
);
1752 DECLARE (void *, calloc
, size_t c
, size_t n
);
1753 DECLARE (void *, malloc
, size_t n
);
1755 pc_array
= CALL_REAL (calloc
, pc_array_size
, sizeof (void *) );
1756 #ifdef HAVE_BACKTRACE
1757 pc_array_size
= backtrace (pc_array
, pc_array_size
);
1759 #define FETCH(n) do { if (pc_array_size >= n) { \
1760 pc_array[n] = __builtin_return_address(n); \
1761 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1763 /* Unroll some calls __builtin_return_address because this function
1764 only takes a literal integer parameter. */
1767 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1768 rather than simply returning 0. :-( */
1777 if (pc_array_size
> 8) pc_array_size
= 9;
1779 if (pc_array_size
> 0) pc_array_size
= 1;
1785 /* We want to trim the first few levels of the stack traceback,
1786 since they contain libmudflap wrappers and junk. If pc_array[]
1787 ends up containing a non-NULL guess_pc, then trim everything
1788 before that. Otherwise, omit the first guess_omit_levels
1791 if (guess_pc
!= NULL
)
1792 for (i
=0; i
<pc_array_size
; i
++)
1793 if (pc_array
[i
] == guess_pc
)
1796 if (omitted_size
== 0) /* No match? */
1797 if (pc_array_size
> guess_omit_levels
)
1798 omitted_size
= guess_omit_levels
;
1800 remaining_size
= pc_array_size
- omitted_size
;
1802 #ifdef HAVE_BACKTRACE_SYMBOLS
1803 *symbols
= backtrace_symbols (pc_array
+ omitted_size
, remaining_size
);
1806 /* Let's construct a buffer by hand. It will have <remaining_size>
1807 char*'s at the front, pointing at individual strings immediately
1812 enum { perline
= 30 };
1813 buffer
= CALL_REAL (malloc
, remaining_size
* (perline
+ sizeof(char *)));
1814 pointers
= (char **) buffer
;
1815 chars
= (char *)buffer
+ (remaining_size
* sizeof (char *));
1816 for (i
= 0; i
< remaining_size
; i
++)
1818 pointers
[i
] = chars
;
1819 sprintf (chars
, "[0x%p]", pc_array
[omitted_size
+ i
]);
1820 chars
= chars
+ perline
;
1822 *symbols
= pointers
;
1825 CALL_REAL (free
, pc_array
);
1827 return remaining_size
;
1830 /* ------------------------------------------------------------------------ */
1831 /* __mf_violation */
1834 __mf_violation (void *ptr
, size_t sz
, uintptr_t pc
,
1835 const char *location
, int type
)
1838 static unsigned violation_number
;
1839 DECLARE(void, free
, void *ptr
);
1841 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1843 (location
!= NULL
? location
: ""), type
, ptr
, (unsigned long) sz
);
1845 if (__mf_opts
.collect_stats
)
1846 __mf_count_violation
[(type
< 0) ? 0 :
1847 (type
> __MF_VIOL_WATCH
) ? 0 :
1850 /* Print out a basic warning message. */
1851 if (__mf_opts
.verbose_violations
)
1854 unsigned num_helpful
= 0;
1855 struct timeval now
= { 0, 0 };
1856 #if HAVE_GETTIMEOFDAY
1857 gettimeofday (& now
, NULL
);
1860 violation_number
++;
1863 "mudflap violation %u (%s): time=%lu.%06lu "
1864 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1866 ((type
== __MF_VIOL_READ
) ? "check/read" :
1867 (type
== __MF_VIOL_WRITE
) ? "check/write" :
1868 (type
== __MF_VIOL_REGISTER
) ? "register" :
1869 (type
== __MF_VIOL_UNREGISTER
) ? "unregister" :
1870 (type
== __MF_VIOL_WATCH
) ? "watch" : "unknown"),
1871 now
.tv_sec
, now
.tv_usec
,
1872 (void *) ptr
, (unsigned long)sz
, (void *) pc
,
1873 (location
!= NULL
? " location=`" : ""),
1874 (location
!= NULL
? location
: ""),
1875 (location
!= NULL
? "'" : ""));
1877 if (__mf_opts
.backtrace
> 0)
1882 num
= __mf_backtrace (& symbols
, (void *) pc
, 2);
1883 /* Note: backtrace_symbols calls malloc(). But since we're in
1884 __mf_violation and presumably __mf_check, it'll detect
1885 recursion, and not put the new string into the database. */
1887 for (i
=0; i
<num
; i
++)
1888 fprintf (stderr
, " %s\n", symbols
[i
]);
1890 /* Calling free() here would trigger a violation. */
1891 CALL_REAL(free
, symbols
);
1895 /* Look for nearby objects. For this, we start with s_low/s_high
1896 pointing to the given area, looking for overlapping objects.
1897 If none show up, widen the search area and keep looking. */
1899 if (sz
== 0) sz
= 1;
1901 for (dead_p
= 0; dead_p
<= 1; dead_p
++) /* for dead_p in 0 1 */
1903 enum {max_objs
= 3}; /* magic */
1904 __mf_object_t
*objs
[max_objs
];
1905 unsigned num_objs
= 0;
1906 uintptr_t s_low
, s_high
;
1910 s_low
= (uintptr_t) ptr
;
1911 s_high
= CLAMPSZ (ptr
, sz
);
1913 while (tries
< 16) /* magic */
1916 num_objs
= __mf_find_dead_objects (s_low
, s_high
, objs
, max_objs
);
1918 num_objs
= __mf_find_objects (s_low
, s_high
, objs
, max_objs
);
1920 if (num_objs
) /* good enough */
1925 /* XXX: tune this search strategy. It's too dependent on
1926 sz, which can vary from 1 to very big (when array index
1927 checking) numbers. */
1928 s_low
= CLAMPSUB (s_low
, (sz
* tries
* tries
));
1929 s_high
= CLAMPADD (s_high
, (sz
* tries
* tries
));
1932 for (i
= 0; i
< min (num_objs
, max_objs
); i
++)
1934 __mf_object_t
*obj
= objs
[i
];
1935 uintptr_t low
= (uintptr_t) ptr
;
1936 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1937 unsigned before1
= (low
< obj
->low
) ? obj
->low
- low
: 0;
1938 unsigned after1
= (low
> obj
->high
) ? low
- obj
->high
: 0;
1939 unsigned into1
= (high
>= obj
->low
&& low
<= obj
->high
) ? low
- obj
->low
: 0;
1940 unsigned before2
= (high
< obj
->low
) ? obj
->low
- high
: 0;
1941 unsigned after2
= (high
> obj
->high
) ? high
- obj
->high
: 0;
1942 unsigned into2
= (high
>= obj
->low
&& low
<= obj
->high
) ? high
- obj
->low
: 0;
1944 fprintf (stderr
, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1945 num_helpful
+ i
+ 1,
1946 (before1
? before1
: after1
? after1
: into1
),
1947 (before1
? "before" : after1
? "after" : "into"),
1948 (before2
? before2
: after2
? after2
: into2
),
1949 (before2
? "before" : after2
? "after" : "into"));
1950 __mf_describe_object (obj
);
1952 num_helpful
+= num_objs
;
1955 fprintf (stderr
, "number of nearby objects: %u\n", num_helpful
);
1958 /* How to finally handle this violation? */
1959 switch (__mf_opts
.violation_mode
)
1964 kill (getpid(), SIGSEGV
);
1971 snprintf (buf
, 128, "gdb --pid=%u", (unsigned) getpid ());
1973 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1974 instead, and let the forked child execlp() gdb. That way, this
1975 subject process can be resumed under the supervision of gdb.
1976 This can't happen now, since system() only returns when gdb
1977 dies. In that case, we need to beware of starting a second
1978 concurrent gdb child upon the next violation. (But if the first
1979 gdb dies, then starting a new one is appropriate.) */
1984 /* ------------------------------------------------------------------------ */
1987 unsigned __mf_watch (void *ptr
, size_t sz
)
1991 BEGIN_RECURSION_PROTECT ();
1992 rc
= __mf_watch_or_not (ptr
, sz
, 1);
1993 END_RECURSION_PROTECT ();
1998 unsigned __mf_unwatch (void *ptr
, size_t sz
)
2002 rc
= __mf_watch_or_not (ptr
, sz
, 0);
2009 __mf_watch_or_not (void *ptr
, size_t sz
, char flag
)
2011 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
2012 uintptr_t ptr_low
= (uintptr_t) ptr
;
2015 TRACE ("%s ptr=%p size=%lu\n",
2016 (flag
? "watch" : "unwatch"), ptr
, (unsigned long) sz
);
2018 switch (__mf_opts
.mudflap_mode
)
2028 __mf_object_t
**all_ovr_objs
;
2031 DECLARE (void *, malloc
, size_t c
);
2032 DECLARE (void, free
, void *p
);
2034 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, NULL
, 0);
2035 VERBOSE_TRACE (" %u:", obj_count
);
2037 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_t
*) * obj_count
));
2038 if (all_ovr_objs
== NULL
) abort ();
2039 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
2040 assert (n
== obj_count
);
2042 for (n
= 0; n
< obj_count
; n
++)
2044 __mf_object_t
*obj
= all_ovr_objs
[n
];
2046 VERBOSE_TRACE (" [%p]", (void *) obj
);
2047 if (obj
->watching_p
!= flag
)
2049 obj
->watching_p
= flag
;
2052 /* Remove object from cache, to ensure next access
2053 goes through __mf_check(). */
2055 __mf_uncache_object (obj
);
2058 CALL_REAL (free
, all_ovr_objs
);
2068 __mf_sigusr1_handler (int num
)
2070 __mf_sigusr1_received
++;
2073 /* Install or remove SIGUSR1 handler as necessary.
2074 Also, respond to a received pending SIGUSR1. */
2076 __mf_sigusr1_respond ()
2078 static int handler_installed
;
2081 /* Manage handler */
2082 if (__mf_opts
.sigusr1_report
&& ! handler_installed
)
2084 signal (SIGUSR1
, __mf_sigusr1_handler
);
2085 handler_installed
= 1;
2087 else if(! __mf_opts
.sigusr1_report
&& handler_installed
)
2089 signal (SIGUSR1
, SIG_DFL
);
2090 handler_installed
= 0;
2094 /* Manage enqueued signals */
2095 if (__mf_sigusr1_received
> __mf_sigusr1_handled
)
2097 __mf_sigusr1_handled
++;
2098 assert (__mf_state
== reentrant
);
2100 handler_installed
= 0; /* We may need to re-enable signal; this might be a SysV library. */
2105 /* XXX: provide an alternative __assert_fail function that cannot
2106 fail due to libmudflap infinite recursion. */
2110 write_itoa (int fd
, unsigned n
)
2112 enum x
{ bufsize
= sizeof(n
)*4 };
2116 for (i
=0; i
<bufsize
-1; i
++)
2118 unsigned digit
= n
% 10;
2119 buf
[bufsize
-2-i
] = digit
+ '0';
2123 char *m
= & buf
[bufsize
-2-i
];
2124 buf
[bufsize
-1] = '\0';
2125 write (fd
, m
, strlen(m
));
2133 __assert_fail (const char *msg
, const char *file
, unsigned line
, const char *func
)
2135 #define write2(string) write (2, (string), strlen ((string)));
2139 write_itoa (2, (unsigned) pthread_self ());
2142 write2(": assertion failure: `");
2143 write (2, msg
, strlen (msg
));
2145 write (2, func
, strlen (func
));
2147 write (2, file
, strlen (file
));
2149 write_itoa (2, line
);
2162 /* #include the generic splay tree implementation from libiberty here, to
2163 ensure that it uses our memory allocation primitives. */
2166 splay_tree_free (void *p
)
2168 DECLARE (void, free
, void *p
);
2169 CALL_REAL (free
, p
);
2173 splay_tree_xmalloc (size_t s
)
2175 DECLARE (void *, malloc
, size_t s
);
2176 return CALL_REAL (malloc
, s
);
2179 #define free(z) splay_tree_free(z)
2180 #define xmalloc(z) splay_tree_xmalloc(z)
2181 #include "splay-tree.c"