1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
67 #include "mf-runtime.h"
71 /* ------------------------------------------------------------------------ */
74 #define CTOR __attribute__ ((constructor))
75 #define DTOR __attribute__ ((destructor))
78 /* Codes to describe the context in which a violation occurs. */
79 #define __MF_VIOL_UNKNOWN 0
80 #define __MF_VIOL_READ 1
81 #define __MF_VIOL_WRITE 2
82 #define __MF_VIOL_REGISTER 3
83 #define __MF_VIOL_UNREGISTER 4
84 #define __MF_VIOL_WATCH 5
86 /* Protect against recursive calls. */
87 #define BEGIN_RECURSION_PROTECT() do { \
88 if (UNLIKELY (__mf_state == reentrant)) { \
89 write (2, "mf: erroneous reentrancy detected in `", 38); \
90 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
91 write (2, "'\n", 2); \
93 __mf_state = reentrant; \
96 #define END_RECURSION_PROTECT() do { \
97 __mf_state = active; \
102 /* ------------------------------------------------------------------------ */
103 /* Required globals. */
105 #define LOOKUP_CACHE_MASK_DFL 1023
106 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
107 #define LOOKUP_CACHE_SHIFT_DFL 2
109 struct __mf_cache __mf_lookup_cache
[LOOKUP_CACHE_SIZE_MAX
];
110 uintptr_t __mf_lc_mask
= LOOKUP_CACHE_MASK_DFL
;
111 unsigned char __mf_lc_shift
= LOOKUP_CACHE_SHIFT_DFL
;
112 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
114 struct __mf_options __mf_opts
;
116 int __mf_starting_p
= 1;
118 enum __mf_state_enum __mf_state
= active
;
120 /* See __mf_state_perthread() in mf-hooks.c. */
125 pthread_mutex_t __mf_biglock
=
126 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
127 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
129 PTHREAD_MUTEX_INITIALIZER
;
133 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
134 the libmudflap.la (no threading support) can diagnose whether
135 the application is linked with -lpthread. See __mf_usage() below. */
137 #pragma weak pthread_join
138 const void *threads_active_p
= (void *) pthread_join
;
142 /* ------------------------------------------------------------------------ */
143 /* stats-related globals. */
145 static unsigned long __mf_count_check
;
146 static unsigned long __mf_lookup_cache_reusecount
[LOOKUP_CACHE_SIZE_MAX
];
147 static unsigned long __mf_treerot_left
, __mf_treerot_right
;
148 static unsigned long __mf_count_register
;
149 static unsigned long __mf_total_register_size
[__MF_TYPE_MAX
+1];
150 static unsigned long __mf_count_unregister
;
151 static unsigned long __mf_total_unregister_size
;
152 static unsigned long __mf_count_violation
[__MF_VIOL_WATCH
+1];
153 static unsigned long __mf_sigusr1_received
;
154 static unsigned long __mf_sigusr1_handled
;
155 /* not static */ unsigned long __mf_reentrancy
;
157 /* not static */ unsigned long __mf_lock_contention
;
161 /* ------------------------------------------------------------------------ */
162 /* mode-check-related globals. */
164 typedef struct __mf_object
166 uintptr_t low
, high
; /* __mf_register parameters */
168 char type
; /* __MF_TYPE_something */
169 char watching_p
; /* Trigger a VIOL_WATCH on access? */
170 unsigned read_count
; /* Number of times __mf_check/read was called on this object. */
171 unsigned write_count
; /* Likewise for __mf_check/write. */
172 unsigned liveness
; /* A measure of recent checking activity. */
173 unsigned description_epoch
; /* Last epoch __mf_describe_object printed this. */
176 struct timeval alloc_time
;
177 char **alloc_backtrace
;
178 size_t alloc_backtrace_size
;
180 pthread_t alloc_thread
;
184 uintptr_t dealloc_pc
;
185 struct timeval dealloc_time
;
186 char **dealloc_backtrace
;
187 size_t dealloc_backtrace_size
;
189 pthread_t dealloc_thread
;
194 typedef struct __mf_object_tree
197 struct __mf_object_tree
*left
;
198 struct __mf_object_tree
*right
;
199 } __mf_object_tree_t
;
201 /* Live objects: binary tree on __mf_object_t.low */
202 static __mf_object_tree_t
*__mf_object_root
;
204 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
205 static unsigned __mf_object_dead_head
[__MF_TYPE_MAX_CEM
+1]; /* next empty spot */
206 static __mf_object_tree_t
*__mf_object_cemetary
[__MF_TYPE_MAX_CEM
+1][__MF_PERSIST_MAX
];
209 /* ------------------------------------------------------------------------ */
210 /* Forward function declarations */
212 static void __mf_init () CTOR
;
213 static void __mf_sigusr1_respond ();
214 static unsigned __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
215 __mf_object_tree_t
**objs
, unsigned max_objs
);
216 static unsigned __mf_find_dead_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
217 __mf_object_tree_t
**objs
, unsigned max_objs
);
218 static void __mf_link_object (__mf_object_tree_t
*obj
);
219 static void __mf_age_tree (__mf_object_tree_t
*obj
);
220 static void __mf_adapt_cache ();
221 static void __mf_unlink_object (__mf_object_tree_t
*obj
);
222 static void __mf_describe_object (__mf_object_t
*obj
);
223 static unsigned __mf_watch_or_not (void *ptr
, size_t sz
, char flag
);
227 /* ------------------------------------------------------------------------ */
228 /* Configuration engine */
231 __mf_set_default_options ()
233 memset (& __mf_opts
, 0, sizeof (__mf_opts
));
235 __mf_opts
.tree_aging
= 13037;
236 __mf_opts
.adapt_cache
= 1000003;
237 __mf_opts
.abbreviate
= 1;
238 __mf_opts
.verbose_violations
= 1;
239 __mf_opts
.free_queue_length
= 4;
240 __mf_opts
.persistent_count
= 100;
241 __mf_opts
.crumple_zone
= 32;
242 __mf_opts
.backtrace
= 4;
243 __mf_opts
.mudflap_mode
= mode_check
;
244 __mf_opts
.violation_mode
= viol_nop
;
245 __mf_opts
.heur_std_data
= 1;
247 __mf_opts
.thread_stack
= 0;
266 "mudflaps do nothing",
267 set_option
, (int)mode_nop
, (int *)&__mf_opts
.mudflap_mode
},
269 "mudflaps populate object tree",
270 set_option
, (int)mode_populate
, (int *)&__mf_opts
.mudflap_mode
},
272 "mudflaps check for memory violations",
273 set_option
, (int)mode_check
, (int *)&__mf_opts
.mudflap_mode
},
275 "mudflaps always cause violations (diagnostic)",
276 set_option
, (int)mode_violate
, (int *)&__mf_opts
.mudflap_mode
},
279 "violations do not change program execution",
280 set_option
, (int)viol_nop
, (int *)&__mf_opts
.violation_mode
},
282 "violations cause a call to abort()",
283 set_option
, (int)viol_abort
, (int *)&__mf_opts
.violation_mode
},
285 "violations are promoted to SIGSEGV signals",
286 set_option
, (int)viol_segv
, (int *)&__mf_opts
.violation_mode
},
288 "violations fork a gdb process attached to current program",
289 set_option
, (int)viol_gdb
, (int *)&__mf_opts
.violation_mode
},
291 "trace calls to mudflap runtime library",
292 set_option
, 1, &__mf_opts
.trace_mf_calls
},
294 "trace internal events within mudflap runtime library",
295 set_option
, 1, &__mf_opts
.verbose_trace
},
297 "collect statistics on mudflap's operation",
298 set_option
, 1, &__mf_opts
.collect_stats
},
301 "print report upon SIGUSR1",
302 set_option
, 1, &__mf_opts
.sigusr1_report
},
304 {"internal-checking",
305 "perform more expensive internal checking",
306 set_option
, 1, &__mf_opts
.internal_checking
},
308 "age the object tree after N accesses for working set",
309 read_integer_option
, 1000000, &__mf_opts
.tree_aging
},
311 "print any memory leaks at program shutdown",
312 set_option
, 1, &__mf_opts
.print_leaks
},
313 {"check-initialization",
314 "detect uninitialized object reads",
315 set_option
, 1, &__mf_opts
.check_initialization
},
316 {"verbose-violations",
317 "print verbose messages when memory violations occur",
318 set_option
, 1, &__mf_opts
.verbose_violations
},
320 "abbreviate repetitive listings",
321 set_option
, 1, &__mf_opts
.abbreviate
},
323 "wipe stack objects at unwind",
324 set_option
, 1, &__mf_opts
.wipe_stack
},
326 "wipe heap objects at free",
327 set_option
, 1, &__mf_opts
.wipe_heap
},
329 "support /proc/self/map heuristics",
330 set_option
, 1, &__mf_opts
.heur_proc_map
},
332 "enable a simple upper stack bound heuristic",
333 set_option
, 1, &__mf_opts
.heur_stack_bound
},
335 "support _start.._end heuristics",
336 set_option
, 1, &__mf_opts
.heur_start_end
},
338 "register standard library data (argv, errno, stdin, ...)",
339 set_option
, 1, &__mf_opts
.heur_std_data
},
340 {"free-queue-length",
341 "queue N deferred free() calls before performing them",
342 read_integer_option
, 0, &__mf_opts
.free_queue_length
},
344 "keep a history of N unregistered regions",
345 read_integer_option
, 0, &__mf_opts
.persistent_count
},
347 "surround allocations with crumple zones of N bytes",
348 read_integer_option
, 0, &__mf_opts
.crumple_zone
},
349 /* XXX: not type-safe.
351 "set lookup cache size mask to N (2**M - 1)",
352 read_integer_option, 0, (int *)(&__mf_lc_mask)},
354 "set lookup cache pointer shift",
355 read_integer_option, 0, (int *)(&__mf_lc_shift)},
358 "adapt mask/shift parameters after N cache misses",
359 read_integer_option
, 1, &__mf_opts
.adapt_cache
},
361 "keep an N-level stack trace of each call context",
362 read_integer_option
, 0, &__mf_opts
.backtrace
},
365 "override thread stacks allocation: N kB",
366 read_integer_option
, 0, &__mf_opts
.thread_stack
},
368 {0, 0, set_option
, 0, NULL
}
377 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
378 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
380 "The mudflap code can be controlled by an environment variable:\n"
382 "$ export MUDFLAP_OPTIONS='<options>'\n"
383 "$ <mudflapped_program>\n"
385 "where <options> is a space-separated list of \n"
386 "any of the following options. Use `-no-OPTION' to disable options.\n"
389 (threads_active_p
? "multi-threaded " : "single-threaded "),
399 /* XXX: The multi-threaded thread-unaware combination is bad. */
401 for (opt
= options
; opt
->name
; opt
++)
403 int default_p
= (opt
->value
== * opt
->target
);
409 fprintf (stderr
, "-%-23.23s %s", opt
->name
, opt
->description
);
411 fprintf (stderr
, " [active]\n");
413 fprintf (stderr
, "\n");
415 case read_integer_option
:
416 strncpy (buf
, opt
->name
, 128);
417 strncpy (buf
+ strlen (opt
->name
), "=N", 2);
418 fprintf (stderr
, "-%-23.23s %s", buf
, opt
->description
);
419 fprintf (stderr
, " [%d]\n", * opt
->target
);
425 fprintf (stderr
, "\n");
430 __mf_set_options (const char *optstr
)
434 BEGIN_RECURSION_PROTECT ();
435 rc
= __mfu_set_options (optstr
);
436 /* XXX: It's not really that easy. A change to a bunch of parameters
437 can require updating auxiliary state or risk crashing:
438 free_queue_length, crumple_zone ... */
439 END_RECURSION_PROTECT ();
446 __mfu_set_options (const char *optstr
)
448 struct option
*opts
= 0;
452 const char *saved_optstr
= optstr
;
454 /* XXX: bounds-check for optstr! */
471 if (*optstr
== '?' ||
472 strncmp (optstr
, "help", 4) == 0)
474 /* Caller will print help and exit. */
478 if (strncmp (optstr
, "no-", 3) == 0)
481 optstr
= & optstr
[3];
484 for (opts
= options
; opts
->name
; opts
++)
486 if (strncmp (optstr
, opts
->name
, strlen (opts
->name
)) == 0)
488 optstr
+= strlen (opts
->name
);
489 assert (opts
->target
);
496 *(opts
->target
) = opts
->value
;
498 case read_integer_option
:
499 if (! negate
&& (*optstr
== '=' && *(optstr
+1)))
502 tmp
= strtol (optstr
, &nxt
, 10);
503 if ((optstr
!= nxt
) && (tmp
!= LONG_MAX
))
506 *(opts
->target
) = (int)tmp
;
520 "warning: unrecognized string '%s' in mudflap options\n",
522 optstr
+= strlen (optstr
);
528 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
529 __mf_lc_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
530 __mf_opts
.free_queue_length
&= (__MF_FREEQ_MAX
- 1);
532 /* Clear the lookup cache, in case the parameters got changed. */
534 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
536 __mf_lookup_cache
[0].low
= MAXPTR
;
538 TRACE ("set options from `%s'\n", saved_optstr
);
540 /* Call this unconditionally, in case -sigusr1-report was toggled. */
541 __mf_sigusr1_respond ();
550 __mf_resolve_single_dynamic (struct __mf_dynamic_entry
*e
)
555 if (e
->pointer
) return;
558 if (e
->version
!= NULL
&& e
->version
[0] != '\0') /* non-null/empty */
559 e
->pointer
= dlvsym (RTLD_NEXT
, e
->name
, e
->version
);
562 e
->pointer
= dlsym (RTLD_NEXT
, e
->name
);
568 fprintf (stderr
, "mf: error in dlsym(\"%s\"): %s\n",
574 fprintf (stderr
, "mf: dlsym(\"%s\") = NULL\n", e
->name
);
581 __mf_resolve_dynamics ()
584 for (i
= 0; i
< dyn_INITRESOLVE
; i
++)
585 __mf_resolve_single_dynamic (& __mf_dynamic
[i
]);
589 /* NB: order must match enums in mf-impl.h */
590 struct __mf_dynamic_entry __mf_dynamic
[] =
592 {NULL
, "calloc", NULL
},
593 {NULL
, "free", NULL
},
594 {NULL
, "malloc", NULL
},
595 {NULL
, "mmap", NULL
},
596 {NULL
, "munmap", NULL
},
597 {NULL
, "realloc", NULL
},
598 {NULL
, "DUMMY", NULL
}, /* dyn_INITRESOLVE */
600 {NULL
, "pthread_create", PTHREAD_CREATE_VERSION
},
601 {NULL
, "pthread_join", NULL
},
602 {NULL
, "pthread_exit", NULL
}
610 /* ------------------------------------------------------------------------ */
617 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
619 __mf_resolve_dynamics ();
623 __mf_set_default_options ();
625 ov
= getenv ("MUDFLAP_OPTIONS");
628 int rc
= __mfu_set_options (ov
);
636 /* Initialize to a non-zero description epoch. */
637 __mf_describe_object (NULL
);
639 #define REG_RESERVED(obj) \
640 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
642 REG_RESERVED (__mf_lookup_cache
);
643 REG_RESERVED (__mf_lc_mask
);
644 REG_RESERVED (__mf_lc_shift
);
645 /* XXX: others of our statics? */
647 /* Prevent access to *NULL. */
648 __mf_register (MINPTR
, 1, __MF_TYPE_NOACCESS
, "NULL");
649 __mf_lookup_cache
[0].low
= (uintptr_t) -1;
655 __wrap_main (int argc
, char* argv
[])
657 extern char **environ
;
659 static int been_here
= 0;
661 if (__mf_opts
.heur_std_data
&& ! been_here
)
666 __mf_register (argv
, sizeof(char *)*(argc
+1), __MF_TYPE_STATIC
, "argv[]");
667 for (i
=0; i
<argc
; i
++)
669 unsigned j
= strlen (argv
[i
]);
670 __mf_register (argv
[i
], j
+1, __MF_TYPE_STATIC
, "argv element");
675 char *e
= environ
[i
];
677 if (e
== NULL
) break;
678 j
= strlen (environ
[i
]);
679 __mf_register (environ
[i
], j
+1, __MF_TYPE_STATIC
, "environ element");
681 __mf_register (environ
, sizeof(char *)*(i
+1), __MF_TYPE_STATIC
, "environ[]");
683 __mf_register (& errno
, sizeof (errno
), __MF_TYPE_STATIC
, "errno area");
685 __mf_register (stdin
, sizeof (*stdin
), __MF_TYPE_STATIC
, "stdin");
686 __mf_register (stdout
, sizeof (*stdout
), __MF_TYPE_STATIC
, "stdout");
687 __mf_register (stderr
, sizeof (*stderr
), __MF_TYPE_STATIC
, "stderr");
691 return main (argc
, argv
, environ
);
693 return __real_main (argc
, argv
, environ
);
699 extern void __mf_fini () DTOR
;
702 TRACE ("__mf_fini\n");
708 /* ------------------------------------------------------------------------ */
711 void __mf_check (void *ptr
, size_t sz
, int type
, const char *location
)
714 BEGIN_RECURSION_PROTECT ();
715 __mfu_check (ptr
, sz
, type
, location
);
716 END_RECURSION_PROTECT ();
721 void __mfu_check (void *ptr
, size_t sz
, int type
, const char *location
)
723 unsigned entry_idx
= __MF_CACHE_INDEX (ptr
);
724 struct __mf_cache
*entry
= & __mf_lookup_cache
[entry_idx
];
725 int judgement
= 0; /* 0=undecided; <0=violation; >0=okay */
726 uintptr_t ptr_low
= (uintptr_t) ptr
;
727 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
728 struct __mf_cache old_entry
= *entry
;
730 if (UNLIKELY (__mf_opts
.sigusr1_report
))
731 __mf_sigusr1_respond ();
733 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
734 ptr
, entry_idx
, (unsigned long)sz
,
735 (type
== 0 ? "read" : "write"), location
);
737 switch (__mf_opts
.mudflap_mode
)
744 entry
->low
= ptr_low
;
745 entry
->high
= ptr_high
;
751 unsigned heuristics
= 0;
753 /* Advance aging/adaptation counters. */
754 if (__mf_object_root
)
756 static unsigned aging_count
;
757 static unsigned adapt_count
;
760 if (UNLIKELY (__mf_opts
.tree_aging
> 0 &&
761 aging_count
> __mf_opts
.tree_aging
))
764 __mf_age_tree (__mf_object_root
);
766 if (UNLIKELY (__mf_opts
.adapt_cache
> 0 &&
767 adapt_count
> __mf_opts
.adapt_cache
))
774 /* Looping only occurs if heuristics were triggered. */
775 while (judgement
== 0)
777 __mf_object_tree_t
* ovr_obj
[1];
780 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, ovr_obj
, 1);
782 if (LIKELY (obj_count
== 1)) /* A single hit! */
784 __mf_object_t
*obj
= & ovr_obj
[0]->data
;
785 assert (obj
!= NULL
);
786 if (LIKELY (ptr_low
>= obj
->low
&& ptr_high
<= obj
->high
))
788 /* XXX: hope for no overflow! */
789 if (type
== __MF_CHECK_READ
)
796 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
798 else if (UNLIKELY (obj
->watching_p
))
799 judgement
= -2; /* trigger VIOL_WATCH */
800 else if (UNLIKELY (__mf_opts
.check_initialization
802 && type
== __MF_CHECK_READ
804 && obj
->write_count
== 0
805 /* uninitialized (heap) */
806 && obj
->type
== __MF_TYPE_HEAP
))
811 entry
->low
= obj
->low
;
812 entry
->high
= obj
->high
;
816 /* The object did not cover the entire accessed region. */
818 else if (LIKELY (obj_count
> 1))
820 __mf_object_tree_t
**all_ovr_objs
;
822 DECLARE (void *, malloc
, size_t c
);
823 DECLARE (void, free
, void *p
);
825 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_tree_t
*) *
827 if (all_ovr_objs
== NULL
) abort ();
828 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
829 assert (n
== obj_count
);
831 /* Confirm that accessed range is covered by first/last object. */
832 if (LIKELY ((ptr_low
>= all_ovr_objs
[0]->data
.low
) &&
833 (ptr_high
<= all_ovr_objs
[obj_count
-1]->data
.high
)))
835 /* Presume valid access. */
838 /* Confirm that intermediate objects are
839 contiguous and share a single name. Thus they
840 are likely split up GUESS regions, or mmap
841 pages. The idea of the name check is to
842 prevent an oversize access to a
843 stack-registered object (followed by some GUESS
844 type) from being accepted as a hit. */
845 for (n
=0; n
<obj_count
-1; n
++)
847 __mf_object_t
*obj
= & (all_ovr_objs
[n
]->data
);
848 __mf_object_t
*nextobj
= & (all_ovr_objs
[n
+1]->data
);
850 if (UNLIKELY (obj
->type
== __MF_TYPE_NOACCESS
))
851 judgement
= -1; /* Force error. */
853 if (UNLIKELY (judgement
== 1 &&
854 (obj
->high
+ 1 != nextobj
->low
)))
855 judgement
= 0; /* Cancel presumption. */
857 if (UNLIKELY (judgement
== 1 &&
858 (obj
->name
!= nextobj
->name
)))
859 judgement
= 0; /* Cancel presumption. */
860 /* NB: strcmp above is not necessary since the
861 same literal string pointer is normally
862 used when creating regions. */
864 /* XXX: hope for no overflow! */
865 if (type
== __MF_CHECK_READ
)
872 /* If the access is otherwise successful, check whether
873 any of the covered objects are being watched. */
877 for (i
=0; i
<obj_count
; i
++)
878 if (all_ovr_objs
[i
]->data
.watching_p
)
879 judgement
= -2; /* trigger VIOL_WATCH */
882 /* Check for uninitialized reads. */
884 __mf_opts
.check_initialization
&&
885 type
== __MF_CHECK_READ
)
888 unsigned written_count
= 0;
890 for (i
=0; i
<obj_count
; i
++)
892 __mf_object_t
*obj
= & all_ovr_objs
[i
]->data
;
895 || obj
->type
== __MF_TYPE_HEAP_I
896 || obj
->type
== __MF_TYPE_GUESS
)
900 /* Check for ALL pieces having been written-to.
901 XXX: should this be ANY instead? */
902 if (written_count
!= obj_count
)
906 /* Fill out the cache with the bounds of the first
907 object and the last object that covers this
908 cache line (== includes the same __MF_CACHE_INDEX).
909 This could let this cache line span *two* distinct
910 registered objects: a peculiar but reasonable
911 situation. The cache line may not include the
912 entire object though. */
916 entry
->low
= all_ovr_objs
[0]->data
.low
;
917 for (i
=0; i
<obj_count
; i
++)
919 uintptr_t high
= all_ovr_objs
[i
]->data
.high
;
920 if (__MF_CACHE_INDEX (high
) == entry_idx
)
926 CALL_REAL (free
, all_ovr_objs
);
931 if (heuristics
++ < 2) /* XXX parametrize this number? */
932 judgement
= __mf_heuristic_check (ptr_low
, ptr_high
);
946 if (__mf_opts
.collect_stats
)
950 if (LIKELY (old_entry
.low
!= entry
->low
|| old_entry
.high
!= entry
->high
))
951 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
952 __mf_lookup_cache_reusecount
[entry_idx
] ++;
955 if (UNLIKELY (judgement
< 0))
956 __mf_violation (ptr
, sz
,
957 (uintptr_t) __builtin_return_address (0), location
,
959 (type
== __MF_CHECK_READ
? __MF_VIOL_READ
: __MF_VIOL_WRITE
) :
964 static __mf_object_tree_t
*
965 __mf_insert_new_object (uintptr_t low
, uintptr_t high
, int type
,
966 const char *name
, uintptr_t pc
)
968 DECLARE (void *, calloc
, size_t c
, size_t n
);
970 __mf_object_tree_t
*new_obj
;
971 new_obj
= CALL_REAL (calloc
, 1, sizeof(__mf_object_tree_t
));
972 new_obj
->data
.low
= low
;
973 new_obj
->data
.high
= high
;
974 new_obj
->data
.type
= type
;
975 new_obj
->data
.name
= name
;
976 new_obj
->data
.alloc_pc
= pc
;
977 #if HAVE_GETTIMEOFDAY
978 gettimeofday (& new_obj
->data
.alloc_time
, NULL
);
981 new_obj
->data
.alloc_thread
= pthread_self ();
984 if (__mf_opts
.backtrace
> 0 && (type
== __MF_TYPE_HEAP
|| type
== __MF_TYPE_HEAP_I
))
985 new_obj
->data
.alloc_backtrace_size
=
986 __mf_backtrace (& new_obj
->data
.alloc_backtrace
,
989 __mf_link_object (new_obj
);
995 __mf_uncache_object (__mf_object_t
*old_obj
)
997 /* Remove any low/high pointers for this object from the lookup cache. */
999 /* Can it possibly exist in the cache? */
1000 if (LIKELY (old_obj
->read_count
+ old_obj
->write_count
))
1002 uintptr_t low
= old_obj
->low
;
1003 uintptr_t high
= old_obj
->high
;
1004 unsigned idx_low
= __MF_CACHE_INDEX (low
);
1005 unsigned idx_high
= __MF_CACHE_INDEX (high
);
1007 for (i
= idx_low
; i
<= idx_high
; i
++)
1009 struct __mf_cache
*entry
= & __mf_lookup_cache
[i
];
1010 /* NB: the "||" in the following test permits this code to
1011 tolerate the situation introduced by __mf_check over
1012 contiguous objects, where a cache entry spans several
1014 if (entry
->low
== low
|| entry
->high
== high
)
1016 entry
->low
= MAXPTR
;
1017 entry
->high
= MINPTR
;
1025 __mf_register (void *ptr
, size_t sz
, int type
, const char *name
)
1028 BEGIN_RECURSION_PROTECT ();
1029 __mfu_register (ptr
, sz
, type
, name
);
1030 END_RECURSION_PROTECT ();
1036 __mfu_register (void *ptr
, size_t sz
, int type
, const char *name
)
1038 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1039 ptr
, (unsigned long) sz
, type
, name
? name
: "");
1041 if (__mf_opts
.collect_stats
)
1043 __mf_count_register
++;
1044 __mf_total_register_size
[(type
< 0) ? 0 :
1045 (type
> __MF_TYPE_MAX
) ? 0 :
1049 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1050 __mf_sigusr1_respond ();
1052 switch (__mf_opts
.mudflap_mode
)
1058 __mf_violation (ptr
, sz
, (uintptr_t) __builtin_return_address (0), NULL
,
1059 __MF_VIOL_REGISTER
);
1063 /* Clear the cache. */
1064 /* XXX: why the entire cache? */
1066 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1068 __mf_lookup_cache
[0].low
= MAXPTR
;
1073 __mf_object_tree_t
*ovr_objs
[1];
1074 unsigned num_overlapping_objs
;
1075 uintptr_t low
= (uintptr_t) ptr
;
1076 uintptr_t high
= CLAMPSZ (ptr
, sz
);
1077 uintptr_t pc
= (uintptr_t) __builtin_return_address (0);
1079 /* Treat unknown size indication as 1. */
1080 if (UNLIKELY (sz
== 0)) sz
= 1;
1082 num_overlapping_objs
= __mf_find_objects (low
, high
, ovr_objs
, 1);
1084 /* Handle overlaps. */
1085 if (UNLIKELY (num_overlapping_objs
> 0))
1087 __mf_object_tree_t
*ovr_obj
= ovr_objs
[0];
1089 /* Quietly accept a single duplicate registration for
1090 static objects, since these may come from distinct
1091 compilation units. */
1092 if (type
== __MF_TYPE_STATIC
1093 && ovr_obj
->data
.type
== __MF_TYPE_STATIC
1094 && ovr_obj
->data
.low
== low
1095 && ovr_obj
->data
.high
== high
)
1098 VERBOSE_TRACE ("duplicate static reg %p-%p `%s'\n",
1099 (void *) low
, (void *) high
,
1100 (ovr_obj
->data
.name
? ovr_obj
->data
.name
: ""));
1104 /* Quietly accept a single duplicate registration for
1105 guess objects too. */
1106 if (type
== __MF_TYPE_GUESS
&&
1107 ovr_obj
->data
.type
== __MF_TYPE_GUESS
&&
1108 ovr_obj
->data
.low
== low
&&
1109 ovr_obj
->data
.high
== high
)
1112 VERBOSE_TRACE ("duplicate guess reg %p-%p\n",
1113 (void *) low
, (void *) high
);
1117 /* Quietly accept new a guess registration that overlaps
1118 at least one existing object. Trim it down to size. */
1119 else if (type
== __MF_TYPE_GUESS
)
1121 /* We need to split this new GUESS region into some
1122 smaller ones. Or we may not need to insert it at
1123 all if it is covered by the overlapping region. */
1125 /* First, identify all the overlapping objects. */
1126 __mf_object_tree_t
**all_ovr_objs
;
1127 unsigned num_ovr_objs
, n
;
1129 DECLARE (void *, malloc
, size_t c
);
1130 DECLARE (void, free
, void *p
);
1132 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_tree_t
*) *
1133 num_overlapping_objs
));
1134 if (all_ovr_objs
== NULL
) abort ();
1135 num_ovr_objs
= __mf_find_objects (low
, high
, all_ovr_objs
,
1136 num_overlapping_objs
);
1137 assert (num_ovr_objs
== num_overlapping_objs
);
1139 VERBOSE_TRACE ("splitting guess %p-%p, # overlaps: %u\n",
1140 (void *) low
, (void *) high
, num_ovr_objs
);
1142 /* Add GUESS regions between the holes: before each
1143 overlapping region. */
1146 /* This makes use of the assumption that __mf_find_objects() returns
1147 overlapping objects in an increasing sequence. */
1148 for (n
=0; n
< min (num_ovr_objs
, num_overlapping_objs
); n
++)
1150 if (all_ovr_objs
[n
]->data
.low
> next_low
) /* Gap? */
1152 uintptr_t next_high
= CLAMPSUB (all_ovr_objs
[n
]->data
.low
, 1);
1153 __mfu_register ((void *) next_low
, next_high
-next_low
+1,
1154 __MF_TYPE_GUESS
, name
);
1156 next_low
= CLAMPADD (all_ovr_objs
[n
]->data
.high
, 1);
1158 /* Add in any leftover room at the top. */
1159 if (next_low
<= high
)
1160 __mfu_register ((void *) next_low
, high
-next_low
+1,
1161 __MF_TYPE_GUESS
, name
);
1163 /* XXX: future optimization: allow consecutive GUESS regions to
1164 be glued together. */
1165 CALL_REAL (free
, all_ovr_objs
);
1169 /* Quietly accept a non-GUESS region overlaying a GUESS
1170 region. Handle it by removing the GUESS region
1171 temporarily, then recursively adding this new object,
1172 and then the GUESS back. The latter will be split up
1173 by the recursive process above. */
1174 else if (ovr_obj
->data
.type
== __MF_TYPE_GUESS
)
1176 uintptr_t old_low
= ovr_obj
->data
.low
;
1177 uintptr_t old_high
= ovr_obj
->data
.high
;
1178 const char* old_name
= ovr_obj
->data
.name
;
1180 /* Now to recursively remove the guess piece, and
1181 reinsert them in the opposite order. Recursion
1182 should bottom out if another non-GUESS overlapping
1183 region is found for this new object (resulting in a
1184 violation), or if no further overlap occurs. The
1185 located GUESS region should end up being split up
1187 __mfu_unregister ((void *) old_low
, old_high
-old_low
+1);
1188 __mfu_register ((void *) low
, sz
, type
, name
);
1189 __mfu_register ((void *) old_low
, old_high
-old_low
+1,
1190 __MF_TYPE_GUESS
, old_name
);
1194 /* Alas, a genuine violation. */
1197 /* Two or more *real* mappings here. */
1198 __mf_violation ((void *) ptr
, sz
,
1199 (uintptr_t) __builtin_return_address (0), NULL
,
1200 __MF_VIOL_REGISTER
);
1204 /* No overlapping objects: AOK. */
1207 __mf_insert_new_object (low
, high
, type
, name
, pc
);
1210 /* We could conceivably call __mf_check() here to prime the cache,
1211 but then the read_count/write_count field is not reliable. */
1215 } /* end switch (__mf_opts.mudflap_mode) */
1220 __mf_unregister (void *ptr
, size_t sz
)
1223 BEGIN_RECURSION_PROTECT ();
1224 __mfu_unregister (ptr
, sz
);
1225 END_RECURSION_PROTECT ();
1231 __mfu_unregister (void *ptr
, size_t sz
)
1233 DECLARE (void, free
, void *ptr
);
1235 if (UNLIKELY (__mf_opts
.sigusr1_report
))
1236 __mf_sigusr1_respond ();
1238 TRACE ("unregister ptr=%p size=%lu\n", ptr
, (unsigned long) sz
);
1240 switch (__mf_opts
.mudflap_mode
)
1246 __mf_violation (ptr
, sz
,
1247 (uintptr_t) __builtin_return_address (0), NULL
,
1248 __MF_VIOL_UNREGISTER
);
1252 /* Clear the cache. */
1254 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1256 __mf_lookup_cache
[0].low
= MAXPTR
;
1261 __mf_object_tree_t
*old_obj
= NULL
;
1262 __mf_object_tree_t
*del_obj
= NULL
; /* Object to actually delete. */
1263 __mf_object_tree_t
*objs
[1] = {NULL
};
1264 unsigned num_overlapping_objs
;
1266 /* Treat unknown size indication as 1. */
1267 if (sz
== 0) sz
= 1;
1269 num_overlapping_objs
= __mf_find_objects ((uintptr_t) ptr
,
1270 CLAMPSZ (ptr
, sz
), objs
, 1);
1272 /* XXX: handle unregistration of big old GUESS region, that has since
1276 if (UNLIKELY (num_overlapping_objs
!= 1 ||
1277 (uintptr_t)ptr
!= old_obj
->data
.low
)) /* XXX: what about sz? */
1279 __mf_violation (ptr
, sz
,
1280 (uintptr_t) __builtin_return_address (0), NULL
,
1281 __MF_VIOL_UNREGISTER
);
1285 __mf_unlink_object (old_obj
);
1286 __mf_uncache_object (& old_obj
->data
);
1288 /* Wipe buffer contents if desired. */
1289 if ((__mf_opts
.wipe_stack
&& old_obj
->data
.type
== __MF_TYPE_STACK
)
1290 || (__mf_opts
.wipe_heap
&& (old_obj
->data
.type
== __MF_TYPE_HEAP
1291 || old_obj
->data
.type
== __MF_TYPE_HEAP_I
)))
1293 memset ((void *) old_obj
->data
.low
,
1295 (size_t) (old_obj
->data
.high
- old_obj
->data
.low
+ 1));
1298 /* Manage the object cemetary. */
1299 if (__mf_opts
.persistent_count
> 0 &&
1300 old_obj
->data
.type
>= 0 &&
1301 old_obj
->data
.type
<= __MF_TYPE_MAX_CEM
)
1303 old_obj
->data
.deallocated_p
= 1;
1304 old_obj
->left
= old_obj
->right
= NULL
;
1305 old_obj
->data
.dealloc_pc
= (uintptr_t) __builtin_return_address (0);
1306 #if HAVE_GETTIMEOFDAY
1307 gettimeofday (& old_obj
->data
.dealloc_time
, NULL
);
1310 old_obj
->data
.dealloc_thread
= pthread_self ();
1313 if (__mf_opts
.backtrace
> 0 && old_obj
->data
.type
== __MF_TYPE_HEAP
)
1314 old_obj
->data
.dealloc_backtrace_size
=
1315 __mf_backtrace (& old_obj
->data
.dealloc_backtrace
,
1318 /* Encourage this object to be displayed again in current epoch. */
1319 old_obj
->data
.description_epoch
--;
1321 /* Put this object into the cemetary. This may require this plot to
1322 be recycled, and the previous resident to be designated del_obj. */
1324 unsigned row
= old_obj
->data
.type
;
1325 unsigned plot
= __mf_object_dead_head
[row
];
1327 del_obj
= __mf_object_cemetary
[row
][plot
];
1328 __mf_object_cemetary
[row
][plot
] = old_obj
;
1330 if (plot
== __mf_opts
.persistent_count
) plot
= 0;
1331 __mf_object_dead_head
[row
] = plot
;
1337 if (__mf_opts
.print_leaks
)
1339 if ((old_obj
->data
.read_count
+ old_obj
->data
.write_count
) == 0 &&
1340 (old_obj
->data
.type
== __MF_TYPE_HEAP
1341 || old_obj
->data
.type
== __MF_TYPE_HEAP_I
))
1345 "mudflap warning: unaccessed registered object:\n");
1346 __mf_describe_object (& old_obj
->data
);
1350 if (del_obj
!= NULL
) /* May or may not equal old_obj. */
1352 if (__mf_opts
.backtrace
> 0)
1354 CALL_REAL(free
, del_obj
->data
.alloc_backtrace
);
1355 if (__mf_opts
.persistent_count
> 0)
1357 CALL_REAL(free
, del_obj
->data
.dealloc_backtrace
);
1360 CALL_REAL(free
, del_obj
);
1365 } /* end switch (__mf_opts.mudflap_mode) */
1368 if (__mf_opts
.collect_stats
)
1370 __mf_count_unregister
++;
1371 __mf_total_unregister_size
+= sz
;
1375 /* ------------------------------------------------------------------------ */
1376 /* __mf_validate_live_object_tree, _object_cemetary */
1379 __mf_validate_live_object_tree (__mf_object_tree_t
*obj
)
1381 assert (obj
!= NULL
);
1383 if (__mf_opts
.persistent_count
> 0)
1384 assert (! obj
->data
.deallocated_p
);
1388 assert (obj
->left
->data
.high
< obj
->data
.low
);
1389 __mf_validate_live_object_tree (obj
->left
);
1393 assert (obj
->right
->data
.low
> obj
->data
.high
);
1394 __mf_validate_live_object_tree (obj
->right
);
1399 __mf_validate_object_cemetary ()
1404 for (cls
= 0; cls
<= __MF_TYPE_MAX_CEM
; cls
++)
1406 assert (__mf_object_dead_head
[cls
] >= 0 &&
1407 __mf_object_dead_head
[cls
] < __mf_opts
.persistent_count
);
1408 for (i
= 0; i
< __mf_opts
.persistent_count
; i
++)
1410 __mf_object_tree_t
*obj
= __mf_object_cemetary
[cls
][i
];
1413 assert (obj
->data
.deallocated_p
);
1414 assert (obj
->left
== NULL
);
1415 assert (obj
->right
== NULL
);
1422 __mf_validate_objects ()
1424 if (__mf_object_root
)
1425 __mf_validate_live_object_tree (__mf_object_root
);
1427 if (__mf_opts
.persistent_count
> 0)
1428 __mf_validate_object_cemetary ();
1433 __mf_age_tree (__mf_object_tree_t
*obj
)
1435 assert (obj
!= NULL
);
1436 obj
->data
.liveness
= obj
->data
.liveness
>> 1;
1439 __mf_age_tree (obj
->left
);
1441 __mf_age_tree (obj
->right
);
1449 unsigned long total_size
;
1450 unsigned live_obj_count
;
1451 double total_weight
;
1452 double weighted_size
;
1453 unsigned long weighted_address_bits
[sizeof (uintptr_t) * 8][2];
1458 __mf_tree_analyze (__mf_object_tree_t
*obj
, struct tree_stats
* s
)
1460 assert (obj
!= NULL
);
1463 __mf_tree_analyze (obj
->left
, s
);
1465 /* Exclude never-accessed objects. */
1466 if (obj
->data
.read_count
+ obj
->data
.write_count
)
1469 s
->total_size
+= (obj
->data
.high
- obj
->data
.low
+ 1);
1471 if (obj
->data
.liveness
)
1476 VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1477 (void *) obj
->data
.low
, obj
->data
.liveness
, obj
->data
.name
);
1479 s
->live_obj_count
++;
1480 s
->total_weight
+= (double) obj
->data
.liveness
;
1482 (double) (obj
->data
.high
- obj
->data
.low
+ 1) *
1483 (double) obj
->data
.liveness
;
1485 addr
= obj
->data
.low
;
1486 for (i
=0; i
<sizeof(uintptr_t) * 8; i
++)
1488 unsigned bit
= addr
& 1;
1489 s
->weighted_address_bits
[i
][bit
] += obj
->data
.liveness
;
1496 __mf_tree_analyze (obj
->right
, s
);
1503 struct tree_stats s
;
1504 uintptr_t new_mask
= 0;
1505 unsigned char new_shift
;
1506 float cache_utilization
;
1508 static float smoothed_new_shift
= -1.0;
1511 memset (&s
, 0, sizeof (s
));
1512 if (__mf_object_root
)
1513 __mf_tree_analyze (__mf_object_root
, & s
);
1515 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1516 empty tree. Just leave the cache alone in such cases, rather
1517 than risk dying by division-by-zero. */
1518 if (! (s
.obj_count
> 0) && (s
.live_obj_count
> 0) && (s
.total_weight
> 0.0))
1521 /* Guess a good value for the shift parameter by finding an address bit that is a
1522 good discriminant of lively objects. */
1524 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1526 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1527 if (max_value
< value
) max_value
= value
;
1529 for (i
=0; i
<sizeof (uintptr_t)*8; i
++)
1531 float shoulder_factor
= 0.7; /* Include slightly less popular bits too. */
1532 float value
= (float) s
.weighted_address_bits
[i
][0] * (float) s
.weighted_address_bits
[i
][1];
1533 if (value
>= max_value
* shoulder_factor
)
1536 if (smoothed_new_shift
< 0) smoothed_new_shift
= __mf_lc_shift
;
1537 /* Converge toward this slowly to reduce flapping. */
1538 smoothed_new_shift
= 0.9*smoothed_new_shift
+ 0.1*i
;
1539 new_shift
= (unsigned) (smoothed_new_shift
+ 0.5);
1540 assert (new_shift
< sizeof (uintptr_t)*8);
1542 /* Count number of used buckets. */
1543 cache_utilization
= 0.0;
1544 for (i
= 0; i
< (1 + __mf_lc_mask
); i
++)
1545 if (__mf_lookup_cache
[i
].low
!= 0 || __mf_lookup_cache
[i
].high
!= 0)
1546 cache_utilization
+= 1.0;
1547 cache_utilization
/= (1 + __mf_lc_mask
);
1549 new_mask
|= 0x3ff; /* XXX: force a large cache. */
1550 new_mask
&= (LOOKUP_CACHE_SIZE_MAX
- 1);
1552 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1553 "util=%u%% m=%p s=%u\n",
1554 s
.obj_count
, s
.live_obj_count
, s
.total_size
, s
.total_weight
, s
.weighted_size
,
1555 (unsigned)(cache_utilization
*100.0), (void *) new_mask
, new_shift
);
1557 /* We should reinitialize cache if its parameters have changed. */
1558 if (new_mask
!= __mf_lc_mask
||
1559 new_shift
!= __mf_lc_shift
)
1561 __mf_lc_mask
= new_mask
;
1562 __mf_lc_shift
= new_shift
;
1564 memset (__mf_lookup_cache
, 0, sizeof(__mf_lookup_cache
));
1566 __mf_lookup_cache
[0].low
= MAXPTR
;
1573 /* __mf_find_object[s] */
1575 /* Find overlapping live objecs between [low,high]. Return up to
1576 max_objs of their pointers in objs[]. Return total count of
1577 overlaps (may exceed max_objs). */
1579 /* XXX: track traversal statistics, like average depth, balance. */
1582 __mf_find_objects_rec (uintptr_t low
, uintptr_t high
, __mf_object_tree_t
**nodep
,
1583 __mf_object_tree_t
**objs
, unsigned max_objs
)
1586 __mf_object_tree_t
*node
= *nodep
;
1588 assert (low
<= high
);
1589 assert (max_objs
== 0 || objs
!= NULL
);
1591 if (UNLIKELY (node
== NULL
)) return 0;
1593 /* Traverse down left subtree. */
1595 if (low
< node
->data
.low
)
1596 count
+= __mf_find_objects_rec (low
, min(high
, node
->data
.low
),
1597 & node
->left
, objs
, max_objs
);
1599 /* Track the used slots of objs[]. */
1600 if (count
< max_objs
)
1610 /* Check for overlap with this node. */
1611 if (high
>= node
->data
.low
&& low
<= node
->data
.high
)
1614 if (max_objs
> 0) /* Any room left? */
1622 /* Traverse down right subtree. */
1623 if (high
> node
->data
.high
)
1624 count
+= __mf_find_objects_rec (max (low
, node
->data
.high
), high
,
1625 & node
->right
, objs
, max_objs
);
1626 /* There is no need to manipulate objs/max_objs any further. */
1629 /* Rotate a child node up if its access count is higher. */
1630 if (UNLIKELY ((node
->left
&& node
->left
->data
.liveness
> node
->data
.liveness
) &&
1631 ((!node
->right
|| (node
->right
&&
1632 node
->left
->data
.liveness
>
1633 node
->right
->data
.liveness
)))))
1635 __mf_object_tree_t
*l
= node
->left
;
1636 __mf_object_tree_t
*l_r
= l
->right
;
1641 __mf_treerot_left
++;
1643 else if (UNLIKELY ((node
->right
&& node
->right
->data
.liveness
> node
->data
.liveness
) &&
1644 ((!node
->left
|| (node
->left
&&
1645 node
->right
->data
.liveness
>
1646 node
->left
->data
.liveness
)))))
1648 __mf_object_tree_t
*r
= node
->right
;
1649 __mf_object_tree_t
*r_l
= r
->left
;
1654 __mf_treerot_right
++;
1662 __mf_find_objects (uintptr_t ptr_low
, uintptr_t ptr_high
,
1663 __mf_object_tree_t
**objs
, unsigned max_objs
)
1665 if (UNLIKELY(__mf_opts
.internal_checking
))
1666 __mf_validate_objects ();
1668 return __mf_find_objects_rec (ptr_low
, ptr_high
, & __mf_object_root
, objs
, max_objs
);
1671 /* __mf_link_object */
1674 __mf_link_object2 (__mf_object_tree_t
*ptr
, __mf_object_tree_t
**link
)
1676 __mf_object_tree_t
*node
= *link
;
1678 assert (ptr
!= NULL
);
1679 if (UNLIKELY(node
== NULL
))
1685 if (ptr
->data
.high
< node
->data
.low
)
1686 return __mf_link_object2 (ptr
, & node
->left
);
1687 else if (ptr
->data
.low
> node
->data
.high
)
1688 return __mf_link_object2 (ptr
, & node
->right
);
1690 abort (); /* XXX: duplicate object */
1695 __mf_link_object (__mf_object_tree_t
*ptr
)
1697 if (UNLIKELY(__mf_opts
.internal_checking
))
1698 __mf_validate_objects ();
1700 return __mf_link_object2 (ptr
, & __mf_object_root
);
1703 /* __mf_unlink_object */
1706 __mf_unlink_object2 (__mf_object_tree_t
*ptr
, __mf_object_tree_t
**link
)
1708 __mf_object_tree_t
*node
= *link
;
1710 assert (ptr
!= NULL
);
1711 if (UNLIKELY(node
== ptr
))
1713 static unsigned promote_left_p
= 0;
1714 promote_left_p
= 1 - promote_left_p
;
1716 /* Alternate promoting the left & right subtrees. */
1720 if (ptr
->right
!= NULL
)
1721 __mf_link_object2 (ptr
->right
, link
);
1726 if (ptr
->left
!= NULL
)
1727 __mf_link_object2 (ptr
->left
, link
);
1733 if (ptr
->data
.high
< node
->data
.low
)
1734 return __mf_unlink_object2 (ptr
, & node
->left
);
1735 else if (ptr
->data
.low
> node
->data
.high
)
1736 return __mf_unlink_object2 (ptr
, & node
->right
);
1738 abort (); /* XXX: missing object; should fail more gracefully. */
1742 __mf_unlink_object (__mf_object_tree_t
*node
)
1744 __mf_unlink_object2 (node
, & __mf_object_root
);
1747 /* __mf_find_dead_objects */
1749 /* Find overlapping dead objecs between [low,high]. Return up to
1750 max_objs of their pointers in objs[]. Return total count of
1751 overlaps (may exceed max_objs). */
1754 __mf_find_dead_objects (uintptr_t low
, uintptr_t high
,
1755 __mf_object_tree_t
**objs
, unsigned max_objs
)
1757 if (__mf_opts
.persistent_count
> 0)
1760 unsigned recollection
= 0;
1763 assert (low
<= high
);
1764 assert (max_objs
== 0 || objs
!= NULL
);
1766 /* Widen the search from the most recent plots in each row, looking
1767 backward in time. */
1769 while (recollection
< __mf_opts
.persistent_count
)
1773 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1778 plot
= __mf_object_dead_head
[row
];
1779 for (i
= 0; i
<= recollection
; i
++)
1781 __mf_object_tree_t
*obj
;
1783 /* Look backward through row: it's a circular buffer. */
1784 if (plot
> 0) plot
--;
1785 else plot
= __mf_opts
.persistent_count
- 1;
1787 obj
= __mf_object_cemetary
[row
][plot
];
1788 if (obj
&& obj
->data
.low
<= high
&& obj
->data
.high
>= low
)
1790 /* Found an overlapping dead object! */
1791 if (count
< max_objs
)
1801 /* Look farther back in time. */
1802 recollection
= (recollection
* 2) + 1;
1811 /* __mf_describe_object */
1814 __mf_describe_object (__mf_object_t
*obj
)
1816 static unsigned epoch
= 0;
1823 if (__mf_opts
.abbreviate
&& obj
->description_epoch
== epoch
)
1826 "mudflap object %p: name=`%s'\n",
1827 (void *) obj
, (obj
->name
? obj
->name
: ""));
1831 obj
->description_epoch
= epoch
;
1834 "mudflap object %p: name=`%s'\n"
1835 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1836 "alloc time=%lu.%06lu pc=%p"
1841 (void *) obj
, (obj
->name
? obj
->name
: ""),
1842 (void *) obj
->low
, (void *) obj
->high
,
1843 (unsigned long) (obj
->high
- obj
->low
+ 1),
1844 (obj
->type
== __MF_TYPE_NOACCESS
? "no-access" :
1845 obj
->type
== __MF_TYPE_HEAP
? "heap" :
1846 obj
->type
== __MF_TYPE_HEAP_I
? "heap-init" :
1847 obj
->type
== __MF_TYPE_STACK
? "stack" :
1848 obj
->type
== __MF_TYPE_STATIC
? "static" :
1849 obj
->type
== __MF_TYPE_GUESS
? "guess" :
1851 obj
->read_count
, obj
->write_count
, obj
->liveness
,
1852 obj
->watching_p
? " watching" : "",
1853 obj
->alloc_time
.tv_sec
, obj
->alloc_time
.tv_usec
,
1854 (void *) obj
->alloc_pc
1856 , (unsigned) obj
->alloc_thread
1860 if (__mf_opts
.backtrace
> 0)
1863 for (i
=0; i
<obj
->alloc_backtrace_size
; i
++)
1864 fprintf (stderr
, " %s\n", obj
->alloc_backtrace
[i
]);
1867 if (__mf_opts
.persistent_count
> 0)
1869 if (obj
->deallocated_p
)
1871 fprintf (stderr
, "dealloc time=%lu.%06lu pc=%p"
1876 obj
->dealloc_time
.tv_sec
, obj
->dealloc_time
.tv_usec
,
1877 (void *) obj
->dealloc_pc
1879 , (unsigned) obj
->dealloc_thread
1884 if (__mf_opts
.backtrace
> 0)
1887 for (i
=0; i
<obj
->dealloc_backtrace_size
; i
++)
1888 fprintf (stderr
, " %s\n", obj
->dealloc_backtrace
[i
]);
1895 __mf_report_leaks (__mf_object_tree_t
*node
)
1897 /* The counter is amongst recursive calls, so
1898 that cumulative numbers are printed below. */
1899 static unsigned count
= 0;
1901 if (node
== NULL
) /* Reset */
1907 /* Inorder traversal. */
1909 __mf_report_leaks (node
->left
);
1910 if (node
->data
.type
== __MF_TYPE_HEAP
1911 || node
->data
.type
== __MF_TYPE_HEAP_I
)
1914 fprintf (stderr
, "Leaked object %u:\n", count
);
1915 __mf_describe_object (& node
->data
);
1918 __mf_report_leaks (node
->right
);
1923 /* ------------------------------------------------------------------------ */
1930 BEGIN_RECURSION_PROTECT ();
1932 END_RECURSION_PROTECT ();
1939 if (__mf_opts
.collect_stats
)
1944 "calls to __mf_check: %lu rot: %lu/%lu\n"
1945 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1946 " __mf_unregister: %lu [%luB]\n"
1947 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1948 __mf_count_check
, __mf_treerot_left
, __mf_treerot_right
,
1949 __mf_count_register
,
1950 __mf_total_register_size
[0], __mf_total_register_size
[1],
1951 __mf_total_register_size
[2], __mf_total_register_size
[3],
1952 __mf_total_register_size
[4], /* XXX */
1953 __mf_count_unregister
, __mf_total_unregister_size
,
1954 __mf_count_violation
[0], __mf_count_violation
[1],
1955 __mf_count_violation
[2], __mf_count_violation
[3],
1956 __mf_count_violation
[4]);
1959 "calls with reentrancy: %lu\n", __mf_reentrancy
);
1962 " lock contention: %lu\n", __mf_lock_contention
);
1965 /* Lookup cache stats. */
1968 unsigned max_reuse
= 0;
1969 unsigned num_used
= 0;
1970 unsigned num_unused
= 0;
1972 for (i
= 0; i
< LOOKUP_CACHE_SIZE
; i
++)
1974 if (__mf_lookup_cache_reusecount
[i
])
1978 if (max_reuse
< __mf_lookup_cache_reusecount
[i
])
1979 max_reuse
= __mf_lookup_cache_reusecount
[i
];
1981 fprintf (stderr
, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1982 num_used
, num_unused
, max_reuse
);
1986 unsigned live_count
;
1987 live_count
= __mf_find_objects (MINPTR
, MAXPTR
, NULL
, 0);
1988 fprintf (stderr
, "number of live objects: %u\n", live_count
);
1991 if (__mf_opts
.persistent_count
> 0)
1993 unsigned dead_count
= 0;
1995 for (row
= 0; row
<= __MF_TYPE_MAX_CEM
; row
++)
1996 for (plot
= 0 ; plot
< __mf_opts
.persistent_count
; plot
++)
1997 if (__mf_object_cemetary
[row
][plot
] != 0)
1999 fprintf (stderr
, " zombie objects: %u\n", dead_count
);
2002 if (__mf_opts
.print_leaks
&& (__mf_opts
.mudflap_mode
== mode_check
))
2005 extern void * __mf_wrap_alloca_indirect (size_t c
);
2007 /* Free up any remaining alloca()'d blocks. */
2008 __mf_wrap_alloca_indirect (0);
2009 __mf_describe_object (NULL
); /* Reset description epoch. */
2010 __mf_report_leaks (NULL
); /* Reset cumulative count. */
2011 l
= __mf_report_leaks (__mf_object_root
);
2012 fprintf (stderr
, "number of leaked objects: %u\n", l
);
2016 /* __mf_backtrace */
2019 __mf_backtrace (char ***symbols
, void *guess_pc
, unsigned guess_omit_levels
)
2022 unsigned pc_array_size
= __mf_opts
.backtrace
+ guess_omit_levels
;
2023 unsigned remaining_size
;
2024 unsigned omitted_size
= 0;
2026 DECLARE (void, free
, void *ptr
);
2027 DECLARE (void *, calloc
, size_t c
, size_t n
);
2028 DECLARE (void *, malloc
, size_t n
);
2030 pc_array
= CALL_REAL (calloc
, pc_array_size
, sizeof (void *) );
2031 #ifdef HAVE_BACKTRACE
2032 pc_array_size
= backtrace (pc_array
, pc_array_size
);
2034 #define FETCH(n) do { if (pc_array_size >= n) { \
2035 pc_array[n] = __builtin_return_address(n); \
2036 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
2038 /* Unroll some calls __builtin_return_address because this function
2039 only takes a literal integer parameter. */
2042 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
2043 rather than simply returning 0. :-( */
2052 if (pc_array_size
> 8) pc_array_size
= 9;
2054 if (pc_array_size
> 0) pc_array_size
= 1;
2060 /* We want to trim the first few levels of the stack traceback,
2061 since they contain libmudflap wrappers and junk. If pc_array[]
2062 ends up containing a non-NULL guess_pc, then trim everything
2063 before that. Otherwise, omit the first guess_omit_levels
2066 if (guess_pc
!= NULL
)
2067 for (i
=0; i
<pc_array_size
; i
++)
2068 if (pc_array
[i
] == guess_pc
)
2071 if (omitted_size
== 0) /* No match? */
2072 if (pc_array_size
> guess_omit_levels
)
2073 omitted_size
= guess_omit_levels
;
2075 remaining_size
= pc_array_size
- omitted_size
;
2077 #ifdef HAVE_BACKTRACE_SYMBOLS
2078 *symbols
= backtrace_symbols (pc_array
+ omitted_size
, remaining_size
);
2081 /* Let's construct a buffer by hand. It will have <remaining_size>
2082 char*'s at the front, pointing at individual strings immediately
2087 enum { perline
= 30 };
2088 buffer
= CALL_REAL (malloc
, remaining_size
* (perline
+ sizeof(char *)));
2089 pointers
= (char **) buffer
;
2090 chars
= (char *)buffer
+ (remaining_size
* sizeof (char *));
2091 for (i
= 0; i
< remaining_size
; i
++)
2093 pointers
[i
] = chars
;
2094 sprintf (chars
, "[0x%p]", pc_array
[omitted_size
+ i
]);
2095 chars
= chars
+ perline
;
2097 *symbols
= pointers
;
2100 CALL_REAL (free
, pc_array
);
2102 return remaining_size
;
2105 /* ------------------------------------------------------------------------ */
2106 /* __mf_violation */
2109 __mf_violation (void *ptr
, size_t sz
, uintptr_t pc
,
2110 const char *location
, int type
)
2113 static unsigned violation_number
;
2114 DECLARE(void, free
, void *ptr
);
2116 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2118 (location
!= NULL
? location
: ""), type
, ptr
, (unsigned long) sz
);
2120 if (__mf_opts
.collect_stats
)
2121 __mf_count_violation
[(type
< 0) ? 0 :
2122 (type
> __MF_VIOL_WATCH
) ? 0 :
2125 /* Print out a basic warning message. */
2126 if (__mf_opts
.verbose_violations
)
2129 unsigned num_helpful
= 0;
2131 #if HAVE_GETTIMEOFDAY
2132 gettimeofday (& now
, NULL
);
2135 violation_number
++;
2138 "mudflap violation %u (%s): time=%lu.%06lu "
2139 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2141 ((type
== __MF_VIOL_READ
) ? "check/read" :
2142 (type
== __MF_VIOL_WRITE
) ? "check/write" :
2143 (type
== __MF_VIOL_REGISTER
) ? "register" :
2144 (type
== __MF_VIOL_UNREGISTER
) ? "unregister" :
2145 (type
== __MF_VIOL_WATCH
) ? "watch" : "unknown"),
2146 now
.tv_sec
, now
.tv_usec
,
2147 (void *) ptr
, (unsigned long)sz
, (void *) pc
,
2148 (location
!= NULL
? " location=`" : ""),
2149 (location
!= NULL
? location
: ""),
2150 (location
!= NULL
? "'" : ""));
2152 if (__mf_opts
.backtrace
> 0)
2157 num
= __mf_backtrace (& symbols
, (void *) pc
, 2);
2158 /* Note: backtrace_symbols calls malloc(). But since we're in
2159 __mf_violation and presumably __mf_check, it'll detect
2160 recursion, and not put the new string into the database. */
2162 for (i
=0; i
<num
; i
++)
2163 fprintf (stderr
, " %s\n", symbols
[i
]);
2165 /* Calling free() here would trigger a violation. */
2166 CALL_REAL(free
, symbols
);
2170 /* Look for nearby objects. For this, we start with s_low/s_high
2171 pointing to the given area, looking for overlapping objects.
2172 If none show up, widen the search area and keep looking. */
2174 if (sz
== 0) sz
= 1;
2176 for (dead_p
= 0; dead_p
<= 1; dead_p
++) /* for dead_p in 0 1 */
2178 enum {max_objs
= 3}; /* magic */
2179 __mf_object_tree_t
*objs
[max_objs
];
2180 unsigned num_objs
= 0;
2181 uintptr_t s_low
, s_high
;
2185 s_low
= (uintptr_t) ptr
;
2186 s_high
= CLAMPSZ (ptr
, sz
);
2188 while (tries
< 16) /* magic */
2191 num_objs
= __mf_find_dead_objects (s_low
, s_high
, objs
, max_objs
);
2193 num_objs
= __mf_find_objects (s_low
, s_high
, objs
, max_objs
);
2195 if (num_objs
) /* good enough */
2200 /* XXX: tune this search strategy. It's too dependent on
2201 sz, which can vary from 1 to very big (when array index
2202 checking) numbers. */
2203 s_low
= CLAMPSUB (s_low
, (sz
* tries
* tries
));
2204 s_high
= CLAMPADD (s_high
, (sz
* tries
* tries
));
2207 for (i
= 0; i
< min (num_objs
, max_objs
); i
++)
2209 __mf_object_t
*obj
= & objs
[i
]->data
;
2210 uintptr_t low
= (uintptr_t) ptr
;
2211 uintptr_t high
= CLAMPSZ (ptr
, sz
);
2212 unsigned before1
= (low
< obj
->low
) ? obj
->low
- low
: 0;
2213 unsigned after1
= (low
> obj
->high
) ? low
- obj
->high
: 0;
2214 unsigned into1
= (high
>= obj
->low
&& low
<= obj
->high
) ? low
- obj
->low
: 0;
2215 unsigned before2
= (high
< obj
->low
) ? obj
->low
- high
: 0;
2216 unsigned after2
= (high
> obj
->high
) ? high
- obj
->high
: 0;
2217 unsigned into2
= (high
>= obj
->low
&& low
<= obj
->high
) ? high
- obj
->low
: 0;
2219 fprintf (stderr
, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2220 num_helpful
+ i
+ 1,
2221 (before1
? before1
: after1
? after1
: into1
),
2222 (before1
? "before" : after1
? "after" : "into"),
2223 (before2
? before2
: after2
? after2
: into2
),
2224 (before2
? "before" : after2
? "after" : "into"));
2225 __mf_describe_object (obj
);
2227 num_helpful
+= num_objs
;
2230 fprintf (stderr
, "number of nearby objects: %u\n", num_helpful
);
2233 /* How to finally handle this violation? */
2234 switch (__mf_opts
.violation_mode
)
2239 kill (getpid(), SIGSEGV
);
2245 snprintf (buf
, 128, "gdb --pid=%d", getpid ());
2247 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2248 instead, and let the forked child execlp() gdb. That way, this
2249 subject process can be resumed under the supervision of gdb.
2250 This can't happen now, since system() only returns when gdb
2251 dies. In that case, we need to beware of starting a second
2252 concurrent gdb child upon the next violation. (But if the first
2253 gdb dies, then starting a new one is appropriate.) */
2258 /* ------------------------------------------------------------------------ */
2261 unsigned __mf_watch (void *ptr
, size_t sz
)
2265 BEGIN_RECURSION_PROTECT ();
2266 rc
= __mf_watch_or_not (ptr
, sz
, 1);
2267 END_RECURSION_PROTECT ();
2272 unsigned __mf_unwatch (void *ptr
, size_t sz
)
2276 rc
= __mf_watch_or_not (ptr
, sz
, 0);
2283 __mf_watch_or_not (void *ptr
, size_t sz
, char flag
)
2285 uintptr_t ptr_high
= CLAMPSZ (ptr
, sz
);
2286 uintptr_t ptr_low
= (uintptr_t) ptr
;
2289 TRACE ("%s ptr=%p size=%lu\n",
2290 (flag
? "watch" : "unwatch"), ptr
, (unsigned long) sz
);
2292 switch (__mf_opts
.mudflap_mode
)
2302 __mf_object_tree_t
**all_ovr_objs
;
2305 DECLARE (void *, malloc
, size_t c
);
2306 DECLARE (void, free
, void *p
);
2308 obj_count
= __mf_find_objects (ptr_low
, ptr_high
, NULL
, 0);
2309 VERBOSE_TRACE (" %u:", obj_count
);
2311 all_ovr_objs
= CALL_REAL (malloc
, (sizeof (__mf_object_tree_t
*) *
2313 if (all_ovr_objs
== NULL
) abort ();
2314 n
= __mf_find_objects (ptr_low
, ptr_high
, all_ovr_objs
, obj_count
);
2315 assert (n
== obj_count
);
2317 for (n
= 0; n
< obj_count
; n
++)
2319 __mf_object_t
*obj
= & (all_ovr_objs
[n
]->data
);
2321 VERBOSE_TRACE (" [%p]", (void *) obj
);
2322 if (obj
->watching_p
!= flag
)
2324 obj
->watching_p
= flag
;
2327 /* Remove object from cache, to ensure next access
2328 goes through __mf_check(). */
2330 __mf_uncache_object (obj
);
2333 CALL_REAL (free
, all_ovr_objs
);
2343 __mf_sigusr1_handler (int num
)
2345 __mf_sigusr1_received
++;
2348 /* Install or remove SIGUSR1 handler as necessary.
2349 Also, respond to a received pending SIGUSR1. */
2351 __mf_sigusr1_respond ()
2353 static int handler_installed
;
2356 /* Manage handler */
2357 if (__mf_opts
.sigusr1_report
&& ! handler_installed
)
2359 signal (SIGUSR1
, __mf_sigusr1_handler
);
2360 handler_installed
= 1;
2362 else if(! __mf_opts
.sigusr1_report
&& handler_installed
)
2364 signal (SIGUSR1
, SIG_DFL
);
2365 handler_installed
= 0;
2369 /* Manage enqueued signals */
2370 if (__mf_sigusr1_received
> __mf_sigusr1_handled
)
2372 __mf_sigusr1_handled
++;
2373 assert (__mf_state
== reentrant
);
2375 handler_installed
= 0; /* We may need to re-enable signal; this might be a SysV library. */
2380 /* XXX: provide an alternative __assert_fail function that cannot
2381 fail due to libmudflap infinite recursion. */
2385 write_itoa (int fd
, unsigned n
)
2387 enum x
{ bufsize
= sizeof(n
)*4 };
2391 for (i
=0; i
<bufsize
-1; i
++)
2393 unsigned digit
= n
% 10;
2394 buf
[bufsize
-2-i
] = digit
+ '0';
2398 char *m
= & buf
[bufsize
-2-i
];
2399 buf
[bufsize
-1] = '\0';
2400 write (fd
, m
, strlen(m
));
2408 __assert_fail (const char *msg
, const char *file
, unsigned line
, const char *func
)
2410 #define write2(string) write (2, (string), strlen ((string)));
2414 write_itoa (2, (unsigned) pthread_self ());
2417 write2(": assertion failure: `");
2418 write (2, msg
, strlen (msg
));
2420 write (2, func
, strlen (func
));
2422 write (2, file
, strlen (file
));
2424 write_itoa (2, line
);