* Make-lang.in (F95_OBJS, F95_PARSER_OBJS): Alpabetize. Move data.c
[official-gcc.git] / libmudflap / mf-runtime.c
blob19e9fe28cc281542bb500d5a746712cf6cf5c33b
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
32 #include "config.h"
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
37 #define _POSIX_SOURCE
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
39 #define _GNU_SOURCE
40 #define _XOPEN_SOURCE
41 #define _BSD_TYPES
42 #define __EXTENSIONS__
43 #define _ALL_SOURCE
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <sys/types.h>
50 #include <sys/time.h>
51 #include <time.h>
52 #include <unistd.h>
53 #ifdef HAVE_EXECINFO_H
54 #include <execinfo.h>
55 #endif
56 #ifdef HAVE_SIGNAL_H
57 #include <signal.h>
58 #endif
59 #include <assert.h>
61 #include <string.h>
62 #include <limits.h>
63 #include <sys/types.h>
64 #include <signal.h>
65 #include <errno.h>
66 #include <ctype.h>
68 #include "mf-runtime.h"
69 #include "mf-impl.h"
72 /* ------------------------------------------------------------------------ */
73 /* Utility macros */
75 #define CTOR __attribute__ ((constructor))
76 #define DTOR __attribute__ ((destructor))
79 /* Codes to describe the context in which a violation occurs. */
80 #define __MF_VIOL_UNKNOWN 0
81 #define __MF_VIOL_READ 1
82 #define __MF_VIOL_WRITE 2
83 #define __MF_VIOL_REGISTER 3
84 #define __MF_VIOL_UNREGISTER 4
85 #define __MF_VIOL_WATCH 5
87 /* Protect against recursive calls. */
88 #define BEGIN_RECURSION_PROTECT() do { \
89 if (UNLIKELY (__mf_state == reentrant)) { \
90 write (2, "mf: erroneous reentrancy detected in `", 38); \
91 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
92 write (2, "'\n", 2); \
93 abort (); } \
94 __mf_state = reentrant; \
95 } while (0)
97 #define END_RECURSION_PROTECT() do { \
98 __mf_state = active; \
99 } while (0)
103 /* ------------------------------------------------------------------------ */
104 /* Required globals. */
106 #define LOOKUP_CACHE_MASK_DFL 1023
107 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
108 #define LOOKUP_CACHE_SHIFT_DFL 2
110 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
111 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
112 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
113 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
115 struct __mf_options __mf_opts;
117 int __mf_starting_p = 1;
118 #ifndef LIBMUDFLAPTH
119 enum __mf_state_enum __mf_state = active;
120 #else
121 /* See __mf_state_perthread() in mf-hooks.c. */
122 #endif
125 #ifdef LIBMUDFLAPTH
126 pthread_mutex_t __mf_biglock =
127 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
128 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
129 #else
130 PTHREAD_MUTEX_INITIALIZER;
131 #endif
132 #endif
134 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
135 the libmudflap.la (no threading support) can diagnose whether
136 the application is linked with -lpthread. See __mf_usage() below. */
137 #if HAVE_PTHREAD_H
138 #pragma weak pthread_join
139 const void *threads_active_p = (void *) pthread_join;
140 #endif
143 /* ------------------------------------------------------------------------ */
144 /* stats-related globals. */
146 static unsigned long __mf_count_check;
147 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
148 static unsigned long __mf_treerot_left, __mf_treerot_right;
149 static unsigned long __mf_count_register;
150 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
151 static unsigned long __mf_count_unregister;
152 static unsigned long __mf_total_unregister_size;
153 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
154 static unsigned long __mf_sigusr1_received;
155 static unsigned long __mf_sigusr1_handled;
156 /* not static */ unsigned long __mf_reentrancy;
157 #ifdef LIBMUDFLAPTH
158 /* not static */ unsigned long __mf_lock_contention;
159 #endif
162 /* ------------------------------------------------------------------------ */
163 /* mode-check-related globals. */
165 typedef struct __mf_object
167 uintptr_t low, high; /* __mf_register parameters */
168 const char *name;
169 char type; /* __MF_TYPE_something */
170 char watching_p; /* Trigger a VIOL_WATCH on access? */
171 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
172 unsigned write_count; /* Likewise for __mf_check/write. */
173 unsigned liveness; /* A measure of recent checking activity. */
174 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
176 uintptr_t alloc_pc;
177 struct timeval alloc_time;
178 char **alloc_backtrace;
179 size_t alloc_backtrace_size;
180 #ifdef LIBMUDFLAPTH
181 pthread_t alloc_thread;
182 #endif
184 int deallocated_p;
185 uintptr_t dealloc_pc;
186 struct timeval dealloc_time;
187 char **dealloc_backtrace;
188 size_t dealloc_backtrace_size;
189 #ifdef LIBMUDFLAPTH
190 pthread_t dealloc_thread;
191 #endif
192 } __mf_object_t;
195 typedef struct __mf_object_tree
197 __mf_object_t data;
198 struct __mf_object_tree *left;
199 struct __mf_object_tree *right;
200 } __mf_object_tree_t;
202 /* Live objects: binary tree on __mf_object_t.low */
203 static __mf_object_tree_t *__mf_object_root;
205 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
206 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
207 static __mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
210 /* ------------------------------------------------------------------------ */
211 /* Forward function declarations */
213 static void __mf_init () CTOR;
214 static void __mf_sigusr1_respond ();
215 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_tree_t **objs, unsigned max_objs);
217 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
218 __mf_object_tree_t **objs, unsigned max_objs);
219 static void __mf_link_object (__mf_object_tree_t *obj);
220 static void __mf_age_tree (__mf_object_tree_t *obj);
221 static void __mf_adapt_cache ();
222 static void __mf_unlink_object (__mf_object_tree_t *obj);
223 static void __mf_describe_object (__mf_object_t *obj);
224 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
228 /* ------------------------------------------------------------------------ */
229 /* Configuration engine */
231 static void
232 __mf_set_default_options ()
234 memset (& __mf_opts, 0, sizeof (__mf_opts));
236 __mf_opts.tree_aging = 13037;
237 __mf_opts.adapt_cache = 1000003;
238 __mf_opts.abbreviate = 1;
239 __mf_opts.verbose_violations = 1;
240 __mf_opts.free_queue_length = 4;
241 __mf_opts.persistent_count = 100;
242 __mf_opts.crumple_zone = 32;
243 __mf_opts.backtrace = 4;
244 __mf_opts.mudflap_mode = mode_check;
245 __mf_opts.violation_mode = viol_nop;
246 __mf_opts.heur_std_data = 1;
247 #ifdef LIBMUDFLAPTH
248 __mf_opts.thread_stack = 0;
249 #endif
252 static struct option
254 char *name;
255 char *description;
256 enum
258 set_option,
259 read_integer_option,
260 } type;
261 int value;
262 int *target;
264 options [] =
266 {"mode-nop",
267 "mudflaps do nothing",
268 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
269 {"mode-populate",
270 "mudflaps populate object tree",
271 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
272 {"mode-check",
273 "mudflaps check for memory violations",
274 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
275 {"mode-violate",
276 "mudflaps always cause violations (diagnostic)",
277 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
279 {"viol-nop",
280 "violations do not change program execution",
281 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
282 {"viol-abort",
283 "violations cause a call to abort()",
284 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
285 {"viol-segv",
286 "violations are promoted to SIGSEGV signals",
287 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
288 {"viol-gdb",
289 "violations fork a gdb process attached to current program",
290 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
291 {"trace-calls",
292 "trace calls to mudflap runtime library",
293 set_option, 1, &__mf_opts.trace_mf_calls},
294 {"verbose-trace",
295 "trace internal events within mudflap runtime library",
296 set_option, 1, &__mf_opts.verbose_trace},
297 {"collect-stats",
298 "collect statistics on mudflap's operation",
299 set_option, 1, &__mf_opts.collect_stats},
300 #ifdef SIGUSR1
301 {"sigusr1-report",
302 "print report upon SIGUSR1",
303 set_option, 1, &__mf_opts.sigusr1_report},
304 #endif
305 {"internal-checking",
306 "perform more expensive internal checking",
307 set_option, 1, &__mf_opts.internal_checking},
308 {"age-tree",
309 "age the object tree after N accesses for working set",
310 read_integer_option, 1000000, &__mf_opts.tree_aging},
311 {"print-leaks",
312 "print any memory leaks at program shutdown",
313 set_option, 1, &__mf_opts.print_leaks},
314 {"check-initialization",
315 "detect uninitialized object reads",
316 set_option, 1, &__mf_opts.check_initialization},
317 {"verbose-violations",
318 "print verbose messages when memory violations occur",
319 set_option, 1, &__mf_opts.verbose_violations},
320 {"abbreviate",
321 "abbreviate repetitive listings",
322 set_option, 1, &__mf_opts.abbreviate},
323 {"wipe-stack",
324 "wipe stack objects at unwind",
325 set_option, 1, &__mf_opts.wipe_stack},
326 {"wipe-heap",
327 "wipe heap objects at free",
328 set_option, 1, &__mf_opts.wipe_heap},
329 {"heur-proc-map",
330 "support /proc/self/map heuristics",
331 set_option, 1, &__mf_opts.heur_proc_map},
332 {"heur-stack-bound",
333 "enable a simple upper stack bound heuristic",
334 set_option, 1, &__mf_opts.heur_stack_bound},
335 {"heur-start-end",
336 "support _start.._end heuristics",
337 set_option, 1, &__mf_opts.heur_start_end},
338 {"heur-stdlib",
339 "register standard library data (argv, errno, stdin, ...)",
340 set_option, 1, &__mf_opts.heur_std_data},
341 {"free-queue-length",
342 "queue N deferred free() calls before performing them",
343 read_integer_option, 0, &__mf_opts.free_queue_length},
344 {"persistent-count",
345 "keep a history of N unregistered regions",
346 read_integer_option, 0, &__mf_opts.persistent_count},
347 {"crumple-zone",
348 "surround allocations with crumple zones of N bytes",
349 read_integer_option, 0, &__mf_opts.crumple_zone},
350 /* XXX: not type-safe.
351 {"lc-mask",
352 "set lookup cache size mask to N (2**M - 1)",
353 read_integer_option, 0, (int *)(&__mf_lc_mask)},
354 {"lc-shift",
355 "set lookup cache pointer shift",
356 read_integer_option, 0, (int *)(&__mf_lc_shift)},
358 {"lc-adapt",
359 "adapt mask/shift parameters after N cache misses",
360 read_integer_option, 1, &__mf_opts.adapt_cache},
361 {"backtrace",
362 "keep an N-level stack trace of each call context",
363 read_integer_option, 0, &__mf_opts.backtrace},
364 #ifdef LIBMUDFLAPTH
365 {"thread-stack",
366 "override thread stacks allocation: N kB",
367 read_integer_option, 0, &__mf_opts.thread_stack},
368 #endif
369 {0, 0, set_option, 0, NULL}
372 static void
373 __mf_usage ()
375 struct option *opt;
377 fprintf (stderr,
378 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
379 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
380 "\n"
381 "The mudflap code can be controlled by an environment variable:\n"
382 "\n"
383 "$ export MUDFLAP_OPTIONS='<options>'\n"
384 "$ <mudflapped_program>\n"
385 "\n"
386 "where <options> is a space-separated list of \n"
387 "any of the following options. Use `-no-OPTION' to disable options.\n"
388 "\n",
389 #if HAVE_PTHREAD_H
390 (threads_active_p ? "multi-threaded " : "single-threaded "),
391 #else
393 #endif
394 #if LIBMUDFLAPTH
395 "thread-aware "
396 #else
397 "thread-unaware "
398 #endif
400 /* XXX: The multi-threaded thread-unaware combination is bad. */
402 for (opt = options; opt->name; opt++)
404 int default_p = (opt->value == * opt->target);
406 switch (opt->type)
408 char buf[128];
409 case set_option:
410 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
411 if (default_p)
412 fprintf (stderr, " [active]\n");
413 else
414 fprintf (stderr, "\n");
415 break;
416 case read_integer_option:
417 strncpy (buf, opt->name, 128);
418 strncpy (buf + strlen (opt->name), "=N", 2);
419 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
420 fprintf (stderr, " [%d]\n", * opt->target);
421 break;
422 default: abort();
426 fprintf (stderr, "\n");
430 int
431 __mf_set_options (const char *optstr)
433 int rc;
434 LOCKTH ();
435 BEGIN_RECURSION_PROTECT ();
436 rc = __mfu_set_options (optstr);
437 /* XXX: It's not really that easy. A change to a bunch of parameters
438 can require updating auxiliary state or risk crashing:
439 free_queue_length, crumple_zone ... */
440 END_RECURSION_PROTECT ();
441 UNLOCKTH ();
442 return rc;
446 int
447 __mfu_set_options (const char *optstr)
449 struct option *opts = 0;
450 char *nxt = 0;
451 long tmp = 0;
452 int rc = 0;
453 const char *saved_optstr = optstr;
455 /* XXX: bounds-check for optstr! */
457 while (*optstr)
459 switch (*optstr) {
460 case ' ':
461 case '\t':
462 case '\n':
463 optstr++;
464 break;
466 case '-':
467 if (*optstr+1)
469 int negate = 0;
470 optstr++;
472 if (*optstr == '?' ||
473 strncmp (optstr, "help", 4) == 0)
475 /* Caller will print help and exit. */
476 return -1;
479 if (strncmp (optstr, "no-", 3) == 0)
481 negate = 1;
482 optstr = & optstr[3];
485 for (opts = options; opts->name; opts++)
487 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
489 optstr += strlen (opts->name);
490 assert (opts->target);
491 switch (opts->type)
493 case set_option:
494 if (negate)
495 *(opts->target) = 0;
496 else
497 *(opts->target) = opts->value;
498 break;
499 case read_integer_option:
500 if (! negate && (*optstr == '=' && *(optstr+1)))
502 optstr++;
503 tmp = strtol (optstr, &nxt, 10);
504 if ((optstr != nxt) && (tmp != LONG_MAX))
506 optstr = nxt;
507 *(opts->target) = (int)tmp;
510 else if (negate)
511 * opts->target = 0;
512 break;
517 break;
519 default:
520 fprintf (stderr,
521 "warning: unrecognized string '%s' in mudflap options\n",
522 optstr);
523 optstr += strlen (optstr);
524 rc = -1;
525 break;
529 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
530 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
531 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
533 /* Clear the lookup cache, in case the parameters got changed. */
534 /* XXX: race */
535 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
536 /* void slot 0 */
537 __mf_lookup_cache[0].low = MAXPTR;
539 TRACE ("set options from `%s'\n", saved_optstr);
541 /* Call this unconditionally, in case -sigusr1-report was toggled. */
542 __mf_sigusr1_respond ();
544 return rc;
548 #ifdef PIC
550 void
551 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
553 char *err;
555 assert (e);
556 if (e->pointer) return;
558 #if HAVE_DLVSYM
559 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
560 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
561 else
562 #endif
563 e->pointer = dlsym (RTLD_NEXT, e->name);
565 err = dlerror ();
567 if (err)
569 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
570 e->name, err);
571 abort ();
573 if (! e->pointer)
575 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
576 abort ();
581 static void
582 __mf_resolve_dynamics ()
584 int i;
585 for (i = 0; i < dyn_INITRESOLVE; i++)
586 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
590 /* NB: order must match enums in mf-impl.h */
591 struct __mf_dynamic_entry __mf_dynamic [] =
593 {NULL, "calloc", NULL},
594 {NULL, "free", NULL},
595 {NULL, "malloc", NULL},
596 {NULL, "mmap", NULL},
597 {NULL, "munmap", NULL},
598 {NULL, "realloc", NULL},
599 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
600 #ifdef LIBMUDFLAPTH
601 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
602 {NULL, "pthread_join", NULL},
603 {NULL, "pthread_exit", NULL}
604 #endif
607 #endif /* PIC */
611 /* ------------------------------------------------------------------------ */
614 void __mf_init ()
616 char *ov = 0;
618 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
619 #ifdef PIC
620 __mf_resolve_dynamics ();
621 #endif
622 __mf_starting_p = 0;
624 __mf_set_default_options ();
626 ov = getenv ("MUDFLAP_OPTIONS");
627 if (ov)
629 int rc = __mfu_set_options (ov);
630 if (rc < 0)
632 __mf_usage ();
633 exit (1);
637 /* Initialize to a non-zero description epoch. */
638 __mf_describe_object (NULL);
640 #define REG_RESERVED(obj) \
641 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
643 REG_RESERVED (__mf_lookup_cache);
644 REG_RESERVED (__mf_lc_mask);
645 REG_RESERVED (__mf_lc_shift);
646 /* XXX: others of our statics? */
648 /* Prevent access to *NULL. */
649 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
650 __mf_lookup_cache[0].low = (uintptr_t) -1;
656 __wrap_main (int argc, char* argv[])
658 extern char **environ;
659 extern int main ();
660 static int been_here = 0;
662 if (__mf_opts.heur_std_data && ! been_here)
664 unsigned i;
666 been_here = 1;
667 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
668 for (i=0; i<argc; i++)
670 unsigned j = strlen (argv[i]);
671 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
674 for (i=0; ; i++)
676 char *e = environ[i];
677 unsigned j;
678 if (e == NULL) break;
679 j = strlen (environ[i]);
680 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
682 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
684 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
686 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
687 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
688 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
690 /* Make some effort to register ctype.h static arrays. */
691 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
692 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
693 with in mf-hooks2.c. */
696 #ifdef PIC
697 return main (argc, argv, environ);
698 #else
699 return __real_main (argc, argv, environ);
700 #endif
705 extern void __mf_fini () DTOR;
706 void __mf_fini ()
708 TRACE ("__mf_fini\n");
709 __mfu_report ();
714 /* ------------------------------------------------------------------------ */
715 /* __mf_check */
717 void __mf_check (void *ptr, size_t sz, int type, const char *location)
719 LOCKTH ();
720 BEGIN_RECURSION_PROTECT ();
721 __mfu_check (ptr, sz, type, location);
722 END_RECURSION_PROTECT ();
723 UNLOCKTH ();
727 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
729 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
730 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
731 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
732 uintptr_t ptr_low = (uintptr_t) ptr;
733 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
734 struct __mf_cache old_entry = *entry;
736 if (UNLIKELY (__mf_opts.sigusr1_report))
737 __mf_sigusr1_respond ();
739 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
740 ptr, entry_idx, (unsigned long)sz,
741 (type == 0 ? "read" : "write"), location);
743 switch (__mf_opts.mudflap_mode)
745 case mode_nop:
746 entry->low = MINPTR;
747 entry->high = MAXPTR;
748 judgement = 1;
749 break;
751 case mode_populate:
752 entry->low = ptr_low;
753 entry->high = ptr_high;
754 judgement = 1;
755 break;
757 case mode_check:
759 unsigned heuristics = 0;
761 /* Advance aging/adaptation counters. */
762 if (__mf_object_root)
764 static unsigned aging_count;
765 static unsigned adapt_count;
766 aging_count ++;
767 adapt_count ++;
768 if (UNLIKELY (__mf_opts.tree_aging > 0 &&
769 aging_count > __mf_opts.tree_aging))
771 aging_count = 0;
772 __mf_age_tree (__mf_object_root);
774 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
775 adapt_count > __mf_opts.adapt_cache))
777 adapt_count = 0;
778 __mf_adapt_cache ();
782 /* Looping only occurs if heuristics were triggered. */
783 while (judgement == 0)
785 __mf_object_tree_t* ovr_obj[1];
786 unsigned obj_count;
788 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
790 if (LIKELY (obj_count == 1)) /* A single hit! */
792 __mf_object_t *obj = & ovr_obj[0]->data;
793 assert (obj != NULL);
794 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
796 /* XXX: hope for no overflow! */
797 if (type == __MF_CHECK_READ)
798 obj->read_count ++;
799 else
800 obj->write_count ++;
802 obj->liveness ++;
804 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
805 judgement = -1;
806 else if (UNLIKELY (obj->watching_p))
807 judgement = -2; /* trigger VIOL_WATCH */
808 else if (UNLIKELY (__mf_opts.check_initialization
809 /* reading */
810 && type == __MF_CHECK_READ
811 /* not written */
812 && obj->write_count == 0
813 /* uninitialized (heap) */
814 && obj->type == __MF_TYPE_HEAP))
815 judgement = -1;
816 else
818 /* Valid access. */
819 entry->low = obj->low;
820 entry->high = obj->high;
821 judgement = 1;
824 /* The object did not cover the entire accessed region. */
826 else if (LIKELY (obj_count > 1))
828 __mf_object_tree_t **all_ovr_objs;
829 unsigned n;
830 DECLARE (void *, malloc, size_t c);
831 DECLARE (void, free, void *p);
833 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
834 obj_count));
835 if (all_ovr_objs == NULL) abort ();
836 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
837 assert (n == obj_count);
839 /* Confirm that accessed range is covered by first/last object. */
840 if (LIKELY ((ptr_low >= all_ovr_objs[0]->data.low) &&
841 (ptr_high <= all_ovr_objs[obj_count-1]->data.high)))
843 /* Presume valid access. */
844 judgement = 1;
846 /* Confirm that intermediate objects are
847 contiguous and share a single name. Thus they
848 are likely split up GUESS regions, or mmap
849 pages. The idea of the name check is to
850 prevent an oversize access to a
851 stack-registered object (followed by some GUESS
852 type) from being accepted as a hit. */
853 for (n=0; n<obj_count-1; n++)
855 __mf_object_t *obj = & (all_ovr_objs[n]->data);
856 __mf_object_t *nextobj = & (all_ovr_objs[n+1]->data);
858 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
859 judgement = -1; /* Force error. */
861 if (UNLIKELY (judgement == 1 &&
862 (obj->high + 1 != nextobj->low)))
863 judgement = 0; /* Cancel presumption. */
865 if (UNLIKELY (judgement == 1 &&
866 (obj->name != nextobj->name)))
867 judgement = 0; /* Cancel presumption. */
868 /* NB: strcmp above is not necessary since the
869 same literal string pointer is normally
870 used when creating regions. */
872 /* XXX: hope for no overflow! */
873 if (type == __MF_CHECK_READ)
874 obj->read_count ++;
875 else
876 obj->write_count ++;
877 obj->liveness ++;
880 /* If the access is otherwise successful, check whether
881 any of the covered objects are being watched. */
882 if (judgement > 0)
884 unsigned i;
885 for (i=0; i<obj_count; i++)
886 if (all_ovr_objs[i]->data.watching_p)
887 judgement = -2; /* trigger VIOL_WATCH */
890 /* Check for uninitialized reads. */
891 if (judgement > 0 &&
892 __mf_opts.check_initialization &&
893 type == __MF_CHECK_READ)
895 unsigned i;
896 unsigned written_count = 0;
898 for (i=0; i<obj_count; i++)
900 __mf_object_t *obj = & all_ovr_objs[i]->data;
902 if (obj->write_count
903 || obj->type == __MF_TYPE_HEAP_I
904 || obj->type == __MF_TYPE_GUESS)
905 written_count ++;
908 /* Check for ALL pieces having been written-to.
909 XXX: should this be ANY instead? */
910 if (written_count != obj_count)
911 judgement = -1;
914 /* Fill out the cache with the bounds of the first
915 object and the last object that covers this
916 cache line (== includes the same __MF_CACHE_INDEX).
917 This could let this cache line span *two* distinct
918 registered objects: a peculiar but reasonable
919 situation. The cache line may not include the
920 entire object though. */
921 if (judgement > 0)
923 unsigned i;
924 entry->low = all_ovr_objs[0]->data.low;
925 for (i=0; i<obj_count; i++)
927 uintptr_t high = all_ovr_objs[i]->data.high;
928 if (__MF_CACHE_INDEX (high) == entry_idx)
929 entry->high = high;
934 CALL_REAL (free, all_ovr_objs);
937 if (judgement == 0)
939 if (heuristics++ < 2) /* XXX parametrize this number? */
940 judgement = __mf_heuristic_check (ptr_low, ptr_high);
941 else
942 judgement = -1;
947 break;
949 case mode_violate:
950 judgement = -1;
951 break;
954 if (__mf_opts.collect_stats)
956 __mf_count_check ++;
958 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
959 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
960 __mf_lookup_cache_reusecount [entry_idx] ++;
963 if (UNLIKELY (judgement < 0))
964 __mf_violation (ptr, sz,
965 (uintptr_t) __builtin_return_address (0), location,
966 ((judgement == -1) ?
967 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
968 __MF_VIOL_WATCH));
972 static __mf_object_tree_t *
973 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
974 const char *name, uintptr_t pc)
976 DECLARE (void *, calloc, size_t c, size_t n);
978 __mf_object_tree_t *new_obj;
979 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_tree_t));
980 new_obj->data.low = low;
981 new_obj->data.high = high;
982 new_obj->data.type = type;
983 new_obj->data.name = name;
984 new_obj->data.alloc_pc = pc;
985 #if HAVE_GETTIMEOFDAY
986 gettimeofday (& new_obj->data.alloc_time, NULL);
987 #endif
988 #if LIBMUDFLAPTH
989 new_obj->data.alloc_thread = pthread_self ();
990 #endif
992 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
993 new_obj->data.alloc_backtrace_size =
994 __mf_backtrace (& new_obj->data.alloc_backtrace,
995 (void *) pc, 2);
997 __mf_link_object (new_obj);
998 return new_obj;
1002 static void
1003 __mf_uncache_object (__mf_object_t *old_obj)
1005 /* Remove any low/high pointers for this object from the lookup cache. */
1007 /* Can it possibly exist in the cache? */
1008 if (LIKELY (old_obj->read_count + old_obj->write_count))
1010 uintptr_t low = old_obj->low;
1011 uintptr_t high = old_obj->high;
1012 unsigned idx_low = __MF_CACHE_INDEX (low);
1013 unsigned idx_high = __MF_CACHE_INDEX (high);
1014 unsigned i;
1015 for (i = idx_low; i <= idx_high; i++)
1017 struct __mf_cache *entry = & __mf_lookup_cache [i];
1018 /* NB: the "||" in the following test permits this code to
1019 tolerate the situation introduced by __mf_check over
1020 contiguous objects, where a cache entry spans several
1021 objects. */
1022 if (entry->low == low || entry->high == high)
1024 entry->low = MAXPTR;
1025 entry->high = MINPTR;
1032 void
1033 __mf_register (void *ptr, size_t sz, int type, const char *name)
1035 LOCKTH ();
1036 BEGIN_RECURSION_PROTECT ();
1037 __mfu_register (ptr, sz, type, name);
1038 END_RECURSION_PROTECT ();
1039 UNLOCKTH ();
1043 void
1044 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1046 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1047 ptr, (unsigned long) sz, type, name ? name : "");
1049 if (__mf_opts.collect_stats)
1051 __mf_count_register ++;
1052 __mf_total_register_size [(type < 0) ? 0 :
1053 (type > __MF_TYPE_MAX) ? 0 :
1054 type] += sz;
1057 if (UNLIKELY (__mf_opts.sigusr1_report))
1058 __mf_sigusr1_respond ();
1060 switch (__mf_opts.mudflap_mode)
1062 case mode_nop:
1063 break;
1065 case mode_violate:
1066 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1067 __MF_VIOL_REGISTER);
1068 break;
1070 case mode_populate:
1071 /* Clear the cache. */
1072 /* XXX: why the entire cache? */
1073 /* XXX: race */
1074 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1075 /* void slot 0 */
1076 __mf_lookup_cache[0].low = MAXPTR;
1077 break;
1079 case mode_check:
1081 __mf_object_tree_t *ovr_objs [1];
1082 unsigned num_overlapping_objs;
1083 uintptr_t low = (uintptr_t) ptr;
1084 uintptr_t high = CLAMPSZ (ptr, sz);
1085 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1087 /* Treat unknown size indication as 1. */
1088 if (UNLIKELY (sz == 0)) sz = 1;
1090 num_overlapping_objs = __mf_find_objects (low, high, ovr_objs, 1);
1092 /* Handle overlaps. */
1093 if (UNLIKELY (num_overlapping_objs > 0))
1095 __mf_object_tree_t *ovr_obj = ovr_objs[0];
1097 /* Quietly accept a single duplicate registration for
1098 static objects, since these may come from distinct
1099 compilation units. */
1100 if (type == __MF_TYPE_STATIC
1101 && ovr_obj->data.type == __MF_TYPE_STATIC
1102 && ovr_obj->data.low == low
1103 && ovr_obj->data.high == high)
1105 /* do nothing */
1106 VERBOSE_TRACE ("duplicate static reg %p-%p `%s'\n",
1107 (void *) low, (void *) high,
1108 (ovr_obj->data.name ? ovr_obj->data.name : ""));
1109 break;
1112 /* Quietly accept a single duplicate registration for
1113 guess objects too. */
1114 if (type == __MF_TYPE_GUESS &&
1115 ovr_obj->data.type == __MF_TYPE_GUESS &&
1116 ovr_obj->data.low == low &&
1117 ovr_obj->data.high == high)
1119 /* do nothing */
1120 VERBOSE_TRACE ("duplicate guess reg %p-%p\n",
1121 (void *) low, (void *) high);
1122 break;
1125 /* Quietly accept new a guess registration that overlaps
1126 at least one existing object. Trim it down to size. */
1127 else if (type == __MF_TYPE_GUESS)
1129 /* We need to split this new GUESS region into some
1130 smaller ones. Or we may not need to insert it at
1131 all if it is covered by the overlapping region. */
1133 /* First, identify all the overlapping objects. */
1134 __mf_object_tree_t **all_ovr_objs;
1135 unsigned num_ovr_objs, n;
1136 uintptr_t next_low;
1137 DECLARE (void *, malloc, size_t c);
1138 DECLARE (void, free, void *p);
1140 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
1141 num_overlapping_objs));
1142 if (all_ovr_objs == NULL) abort ();
1143 num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
1144 num_overlapping_objs);
1145 assert (num_ovr_objs == num_overlapping_objs);
1147 VERBOSE_TRACE ("splitting guess %p-%p, # overlaps: %u\n",
1148 (void *) low, (void *) high, num_ovr_objs);
1150 /* Add GUESS regions between the holes: before each
1151 overlapping region. */
1153 next_low = low;
1154 /* This makes use of the assumption that __mf_find_objects() returns
1155 overlapping objects in an increasing sequence. */
1156 for (n=0; n < min (num_ovr_objs, num_overlapping_objs); n++)
1158 if (all_ovr_objs[n]->data.low > next_low) /* Gap? */
1160 uintptr_t next_high = CLAMPSUB (all_ovr_objs[n]->data.low, 1);
1161 __mfu_register ((void *) next_low, next_high-next_low+1,
1162 __MF_TYPE_GUESS, name);
1164 next_low = CLAMPADD (all_ovr_objs[n]->data.high, 1);
1166 /* Add in any leftover room at the top. */
1167 if (next_low <= high)
1168 __mfu_register ((void *) next_low, high-next_low+1,
1169 __MF_TYPE_GUESS, name);
1171 /* XXX: future optimization: allow consecutive GUESS regions to
1172 be glued together. */
1173 CALL_REAL (free, all_ovr_objs);
1174 return;
1177 /* Quietly accept a non-GUESS region overlaying a GUESS
1178 region. Handle it by removing the GUESS region
1179 temporarily, then recursively adding this new object,
1180 and then the GUESS back. The latter will be split up
1181 by the recursive process above. */
1182 else if (ovr_obj->data.type == __MF_TYPE_GUESS)
1184 uintptr_t old_low = ovr_obj->data.low;
1185 uintptr_t old_high = ovr_obj->data.high;
1186 const char* old_name = ovr_obj->data.name;
1188 /* Now to recursively remove the guess piece, and
1189 reinsert them in the opposite order. Recursion
1190 should bottom out if another non-GUESS overlapping
1191 region is found for this new object (resulting in a
1192 violation), or if no further overlap occurs. The
1193 located GUESS region should end up being split up
1194 in any case. */
1195 __mfu_unregister ((void *) old_low, old_high-old_low+1);
1196 __mfu_register ((void *) low, sz, type, name);
1197 __mfu_register ((void *) old_low, old_high-old_low+1,
1198 __MF_TYPE_GUESS, old_name);
1199 return;
1202 /* Alas, a genuine violation. */
1203 else
1205 /* Two or more *real* mappings here. */
1206 __mf_violation ((void *) ptr, sz,
1207 (uintptr_t) __builtin_return_address (0), NULL,
1208 __MF_VIOL_REGISTER);
1212 /* No overlapping objects: AOK. */
1213 else
1215 __mf_insert_new_object (low, high, type, name, pc);
1218 /* We could conceivably call __mf_check() here to prime the cache,
1219 but then the read_count/write_count field is not reliable. */
1221 break;
1223 } /* end switch (__mf_opts.mudflap_mode) */
1227 void
1228 __mf_unregister (void *ptr, size_t sz)
1230 LOCKTH ();
1231 BEGIN_RECURSION_PROTECT ();
1232 __mfu_unregister (ptr, sz);
1233 END_RECURSION_PROTECT ();
1234 UNLOCKTH ();
1238 void
1239 __mfu_unregister (void *ptr, size_t sz)
1241 DECLARE (void, free, void *ptr);
1243 if (UNLIKELY (__mf_opts.sigusr1_report))
1244 __mf_sigusr1_respond ();
1246 TRACE ("unregister ptr=%p size=%lu\n", ptr, (unsigned long) sz);
1248 switch (__mf_opts.mudflap_mode)
1250 case mode_nop:
1251 break;
1253 case mode_violate:
1254 __mf_violation (ptr, sz,
1255 (uintptr_t) __builtin_return_address (0), NULL,
1256 __MF_VIOL_UNREGISTER);
1257 break;
1259 case mode_populate:
1260 /* Clear the cache. */
1261 /* XXX: race */
1262 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1263 /* void slot 0 */
1264 __mf_lookup_cache[0].low = MAXPTR;
1265 break;
1267 case mode_check:
1269 __mf_object_tree_t *old_obj = NULL;
1270 __mf_object_tree_t *del_obj = NULL; /* Object to actually delete. */
1271 __mf_object_tree_t *objs[1] = {NULL};
1272 unsigned num_overlapping_objs;
1274 /* Treat unknown size indication as 1. */
1275 if (sz == 0) sz = 1;
1277 num_overlapping_objs = __mf_find_objects ((uintptr_t) ptr,
1278 CLAMPSZ (ptr, sz), objs, 1);
1280 /* XXX: handle unregistration of big old GUESS region, that has since
1281 been splintered. */
1282 old_obj = objs[0];
1284 if (UNLIKELY (num_overlapping_objs != 1 ||
1285 (uintptr_t)ptr != old_obj->data.low)) /* XXX: what about sz? */
1287 __mf_violation (ptr, sz,
1288 (uintptr_t) __builtin_return_address (0), NULL,
1289 __MF_VIOL_UNREGISTER);
1290 break;
1293 __mf_unlink_object (old_obj);
1294 __mf_uncache_object (& old_obj->data);
1296 /* Wipe buffer contents if desired. */
1297 if ((__mf_opts.wipe_stack && old_obj->data.type == __MF_TYPE_STACK)
1298 || (__mf_opts.wipe_heap && (old_obj->data.type == __MF_TYPE_HEAP
1299 || old_obj->data.type == __MF_TYPE_HEAP_I)))
1301 memset ((void *) old_obj->data.low,
1303 (size_t) (old_obj->data.high - old_obj->data.low + 1));
1306 /* Manage the object cemetary. */
1307 if (__mf_opts.persistent_count > 0 &&
1308 old_obj->data.type >= 0 &&
1309 old_obj->data.type <= __MF_TYPE_MAX_CEM)
1311 old_obj->data.deallocated_p = 1;
1312 old_obj->left = old_obj->right = NULL;
1313 old_obj->data.dealloc_pc = (uintptr_t) __builtin_return_address (0);
1314 #if HAVE_GETTIMEOFDAY
1315 gettimeofday (& old_obj->data.dealloc_time, NULL);
1316 #endif
1317 #ifdef LIBMUDFLAPTH
1318 old_obj->data.dealloc_thread = pthread_self ();
1319 #endif
1321 if (__mf_opts.backtrace > 0 && old_obj->data.type == __MF_TYPE_HEAP)
1322 old_obj->data.dealloc_backtrace_size =
1323 __mf_backtrace (& old_obj->data.dealloc_backtrace,
1324 NULL, 2);
1326 /* Encourage this object to be displayed again in current epoch. */
1327 old_obj->data.description_epoch --;
1329 /* Put this object into the cemetary. This may require this plot to
1330 be recycled, and the previous resident to be designated del_obj. */
1332 unsigned row = old_obj->data.type;
1333 unsigned plot = __mf_object_dead_head [row];
1335 del_obj = __mf_object_cemetary [row][plot];
1336 __mf_object_cemetary [row][plot] = old_obj;
1337 plot ++;
1338 if (plot == __mf_opts.persistent_count) plot = 0;
1339 __mf_object_dead_head [row] = plot;
1342 else
1343 del_obj = old_obj;
1345 if (__mf_opts.print_leaks)
1347 if ((old_obj->data.read_count + old_obj->data.write_count) == 0 &&
1348 (old_obj->data.type == __MF_TYPE_HEAP
1349 || old_obj->data.type == __MF_TYPE_HEAP_I))
1351 fprintf (stderr,
1352 "*******\n"
1353 "mudflap warning: unaccessed registered object:\n");
1354 __mf_describe_object (& old_obj->data);
1358 if (del_obj != NULL) /* May or may not equal old_obj. */
1360 if (__mf_opts.backtrace > 0)
1362 CALL_REAL(free, del_obj->data.alloc_backtrace);
1363 if (__mf_opts.persistent_count > 0)
1365 CALL_REAL(free, del_obj->data.dealloc_backtrace);
1368 CALL_REAL(free, del_obj);
1371 break;
1373 } /* end switch (__mf_opts.mudflap_mode) */
1376 if (__mf_opts.collect_stats)
1378 __mf_count_unregister ++;
1379 __mf_total_unregister_size += sz;
1383 /* ------------------------------------------------------------------------ */
1384 /* __mf_validate_live_object_tree, _object_cemetary */
1386 static void
1387 __mf_validate_live_object_tree (__mf_object_tree_t *obj)
1389 assert (obj != NULL);
1391 if (__mf_opts.persistent_count > 0)
1392 assert (! obj->data.deallocated_p);
1394 if (obj->left)
1396 assert (obj->left->data.high < obj->data.low);
1397 __mf_validate_live_object_tree (obj->left);
1399 if (obj->right)
1401 assert (obj->right->data.low > obj->data.high);
1402 __mf_validate_live_object_tree (obj->right);
1406 static void
1407 __mf_validate_object_cemetary ()
1409 unsigned cls;
1410 unsigned i;
1412 for (cls = 0; cls <= __MF_TYPE_MAX_CEM; cls++)
1414 assert (__mf_object_dead_head [cls] >= 0 &&
1415 __mf_object_dead_head [cls] < __mf_opts.persistent_count);
1416 for (i = 0; i < __mf_opts.persistent_count; i++)
1418 __mf_object_tree_t *obj = __mf_object_cemetary [cls][i];
1419 if (obj != NULL)
1421 assert (obj->data.deallocated_p);
1422 assert (obj->left == NULL);
1423 assert (obj->right == NULL);
1429 static void
1430 __mf_validate_objects ()
1432 if (__mf_object_root)
1433 __mf_validate_live_object_tree (__mf_object_root);
1435 if (__mf_opts.persistent_count > 0)
1436 __mf_validate_object_cemetary ();
1440 static void
1441 __mf_age_tree (__mf_object_tree_t *obj)
1443 assert (obj != NULL);
1444 obj->data.liveness = obj->data.liveness >> 1;
1446 if (obj->left)
1447 __mf_age_tree (obj->left);
1448 if (obj->right)
1449 __mf_age_tree (obj->right);
1454 struct tree_stats
1456 unsigned obj_count;
1457 unsigned long total_size;
1458 unsigned live_obj_count;
1459 double total_weight;
1460 double weighted_size;
1461 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1465 static void
1466 __mf_tree_analyze (__mf_object_tree_t *obj, struct tree_stats* s)
1468 assert (obj != NULL);
1470 if (obj->left)
1471 __mf_tree_analyze (obj->left, s);
1473 /* Exclude never-accessed objects. */
1474 if (obj->data.read_count + obj->data.write_count)
1476 s->obj_count ++;
1477 s->total_size += (obj->data.high - obj->data.low + 1);
1479 if (obj->data.liveness)
1481 unsigned i;
1482 uintptr_t addr;
1484 VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1485 (void *) obj->data.low, obj->data.liveness, obj->data.name);
1487 s->live_obj_count ++;
1488 s->total_weight += (double) obj->data.liveness;
1489 s->weighted_size +=
1490 (double) (obj->data.high - obj->data.low + 1) *
1491 (double) obj->data.liveness;
1493 addr = obj->data.low;
1494 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1496 unsigned bit = addr & 1;
1497 s->weighted_address_bits[i][bit] += obj->data.liveness;
1498 addr = addr >> 1;
1503 if (obj->right)
1504 __mf_tree_analyze (obj->right, s);
1508 static void
1509 __mf_adapt_cache ()
1511 struct tree_stats s;
1512 uintptr_t new_mask = 0;
1513 unsigned char new_shift;
1514 float cache_utilization;
1515 float max_value;
1516 static float smoothed_new_shift = -1.0;
1517 unsigned i;
1519 memset (&s, 0, sizeof (s));
1520 if (__mf_object_root)
1521 __mf_tree_analyze (__mf_object_root, & s);
1523 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1524 empty tree. Just leave the cache alone in such cases, rather
1525 than risk dying by division-by-zero. */
1526 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1527 return;
1529 /* Guess a good value for the shift parameter by finding an address bit that is a
1530 good discriminant of lively objects. */
1531 max_value = 0.0;
1532 for (i=0; i<sizeof (uintptr_t)*8; i++)
1534 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1535 if (max_value < value) max_value = value;
1537 for (i=0; i<sizeof (uintptr_t)*8; i++)
1539 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1540 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1541 if (value >= max_value * shoulder_factor)
1542 break;
1544 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1545 /* Converge toward this slowly to reduce flapping. */
1546 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1547 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1548 assert (new_shift < sizeof (uintptr_t)*8);
1550 /* Count number of used buckets. */
1551 cache_utilization = 0.0;
1552 for (i = 0; i < (1 + __mf_lc_mask); i++)
1553 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1554 cache_utilization += 1.0;
1555 cache_utilization /= (1 + __mf_lc_mask);
1557 new_mask |= 0x3ff; /* XXX: force a large cache. */
1558 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1560 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1561 "util=%u%% m=%p s=%u\n",
1562 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1563 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1565 /* We should reinitialize cache if its parameters have changed. */
1566 if (new_mask != __mf_lc_mask ||
1567 new_shift != __mf_lc_shift)
1569 __mf_lc_mask = new_mask;
1570 __mf_lc_shift = new_shift;
1571 /* XXX: race */
1572 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1573 /* void slot 0 */
1574 __mf_lookup_cache[0].low = MAXPTR;
1581 /* __mf_find_object[s] */
1583 /* Find overlapping live objecs between [low,high]. Return up to
1584 max_objs of their pointers in objs[]. Return total count of
1585 overlaps (may exceed max_objs). */
1587 /* XXX: track traversal statistics, like average depth, balance. */
1589 static unsigned
1590 __mf_find_objects_rec (uintptr_t low, uintptr_t high, __mf_object_tree_t **nodep,
1591 __mf_object_tree_t **objs, unsigned max_objs)
1593 unsigned count;
1594 __mf_object_tree_t *node = *nodep;
1596 assert (low <= high);
1597 assert (max_objs == 0 || objs != NULL);
1599 if (UNLIKELY (node == NULL)) return 0;
1601 /* Traverse down left subtree. */
1602 count = 0;
1603 if (low < node->data.low)
1604 count += __mf_find_objects_rec (low, min(high, node->data.low),
1605 & node->left, objs, max_objs);
1607 /* Track the used slots of objs[]. */
1608 if (count < max_objs)
1610 objs += count;
1611 max_objs -= count;
1613 else
1615 max_objs = 0;
1618 /* Check for overlap with this node. */
1619 if (high >= node->data.low && low <= node->data.high)
1621 count ++;
1622 if (max_objs > 0) /* Any room left? */
1624 objs[0] = node;
1625 objs ++;
1626 max_objs --;
1630 /* Traverse down right subtree. */
1631 if (high > node->data.high)
1632 count += __mf_find_objects_rec (max (low, node->data.high), high,
1633 & node->right, objs, max_objs);
1634 /* There is no need to manipulate objs/max_objs any further. */
1637 /* Rotate a child node up if its access count is higher. */
1638 if (UNLIKELY ((node->left && node->left->data.liveness > node->data.liveness) &&
1639 ((!node->right || (node->right &&
1640 node->left->data.liveness >
1641 node->right->data.liveness)))))
1643 __mf_object_tree_t *l = node->left;
1644 __mf_object_tree_t *l_r = l->right;
1646 *nodep = l;
1647 l->right = node;
1648 node->left = l_r;
1649 __mf_treerot_left ++;
1651 else if (UNLIKELY ((node->right && node->right->data.liveness > node->data.liveness) &&
1652 ((!node->left || (node->left &&
1653 node->right->data.liveness >
1654 node->left->data.liveness)))))
1656 __mf_object_tree_t *r = node->right;
1657 __mf_object_tree_t *r_l = r->left;
1659 *nodep = r;
1660 r->left = node;
1661 node->right = r_l;
1662 __mf_treerot_right ++;
1665 return count;
1669 unsigned
1670 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1671 __mf_object_tree_t **objs, unsigned max_objs)
1673 if (UNLIKELY(__mf_opts.internal_checking))
1674 __mf_validate_objects ();
1676 return __mf_find_objects_rec (ptr_low, ptr_high, & __mf_object_root, objs, max_objs);
1679 /* __mf_link_object */
1681 static void
1682 __mf_link_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1684 __mf_object_tree_t *node = *link;
1686 assert (ptr != NULL);
1687 if (UNLIKELY(node == NULL))
1689 *link = ptr;
1690 return;
1693 if (ptr->data.high < node->data.low)
1694 return __mf_link_object2 (ptr, & node->left);
1695 else if (ptr->data.low > node->data.high)
1696 return __mf_link_object2 (ptr, & node->right);
1697 else
1698 abort (); /* XXX: duplicate object */
1702 void
1703 __mf_link_object (__mf_object_tree_t *ptr)
1705 if (UNLIKELY(__mf_opts.internal_checking))
1706 __mf_validate_objects ();
1708 return __mf_link_object2 (ptr, & __mf_object_root);
1711 /* __mf_unlink_object */
1713 static void
1714 __mf_unlink_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1716 __mf_object_tree_t *node = *link;
1718 assert (ptr != NULL);
1719 if (UNLIKELY(node == ptr))
1721 static unsigned promote_left_p = 0;
1722 promote_left_p = 1 - promote_left_p;
1724 /* Alternate promoting the left & right subtrees. */
1725 if (promote_left_p)
1727 *link = ptr->left;
1728 if (ptr->right != NULL)
1729 __mf_link_object2 (ptr->right, link);
1731 else
1733 *link = ptr->right;
1734 if (ptr->left != NULL)
1735 __mf_link_object2 (ptr->left, link);
1738 return;
1741 if (ptr->data.high < node->data.low)
1742 return __mf_unlink_object2 (ptr, & node->left);
1743 else if (ptr->data.low > node->data.high)
1744 return __mf_unlink_object2 (ptr, & node->right);
1745 else
1746 abort (); /* XXX: missing object; should fail more gracefully. */
1749 static void
1750 __mf_unlink_object (__mf_object_tree_t *node)
1752 __mf_unlink_object2 (node, & __mf_object_root);
1755 /* __mf_find_dead_objects */
1757 /* Find overlapping dead objecs between [low,high]. Return up to
1758 max_objs of their pointers in objs[]. Return total count of
1759 overlaps (may exceed max_objs). */
1761 static unsigned
1762 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1763 __mf_object_tree_t **objs, unsigned max_objs)
1765 if (__mf_opts.persistent_count > 0)
1767 unsigned count = 0;
1768 unsigned recollection = 0;
1769 unsigned row = 0;
1771 assert (low <= high);
1772 assert (max_objs == 0 || objs != NULL);
1774 /* Widen the search from the most recent plots in each row, looking
1775 backward in time. */
1776 recollection = 0;
1777 while (recollection < __mf_opts.persistent_count)
1779 count = 0;
1781 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1783 unsigned plot;
1784 unsigned i;
1786 plot = __mf_object_dead_head [row];
1787 for (i = 0; i <= recollection; i ++)
1789 __mf_object_tree_t *obj;
1791 /* Look backward through row: it's a circular buffer. */
1792 if (plot > 0) plot --;
1793 else plot = __mf_opts.persistent_count - 1;
1795 obj = __mf_object_cemetary [row][plot];
1796 if (obj && obj->data.low <= high && obj->data.high >= low)
1798 /* Found an overlapping dead object! */
1799 if (count < max_objs)
1800 objs [count] = obj;
1801 count ++;
1806 if (count)
1807 break;
1809 /* Look farther back in time. */
1810 recollection = (recollection * 2) + 1;
1813 return count;
1814 } else {
1815 return 0;
1819 /* __mf_describe_object */
1821 static void
1822 __mf_describe_object (__mf_object_t *obj)
1824 static unsigned epoch = 0;
1825 if (obj == NULL)
1827 epoch ++;
1828 return;
1831 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1833 fprintf (stderr,
1834 "mudflap object %p: name=`%s'\n",
1835 (void *) obj, (obj->name ? obj->name : ""));
1836 return;
1838 else
1839 obj->description_epoch = epoch;
1841 fprintf (stderr,
1842 "mudflap object %p: name=`%s'\n"
1843 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1844 "alloc time=%lu.%06lu pc=%p"
1845 #ifdef LIBMUDFLAPTH
1846 " thread=%u"
1847 #endif
1848 "\n",
1849 (void *) obj, (obj->name ? obj->name : ""),
1850 (void *) obj->low, (void *) obj->high,
1851 (unsigned long) (obj->high - obj->low + 1),
1852 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1853 obj->type == __MF_TYPE_HEAP ? "heap" :
1854 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1855 obj->type == __MF_TYPE_STACK ? "stack" :
1856 obj->type == __MF_TYPE_STATIC ? "static" :
1857 obj->type == __MF_TYPE_GUESS ? "guess" :
1858 "unknown"),
1859 obj->read_count, obj->write_count, obj->liveness,
1860 obj->watching_p ? " watching" : "",
1861 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1862 (void *) obj->alloc_pc
1863 #ifdef LIBMUDFLAPTH
1864 , (unsigned) obj->alloc_thread
1865 #endif
1868 if (__mf_opts.backtrace > 0)
1870 unsigned i;
1871 for (i=0; i<obj->alloc_backtrace_size; i++)
1872 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1875 if (__mf_opts.persistent_count > 0)
1877 if (obj->deallocated_p)
1879 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1880 #ifdef LIBMUDFLAPTH
1881 " thread=%u"
1882 #endif
1883 "\n",
1884 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1885 (void *) obj->dealloc_pc
1886 #ifdef LIBMUDFLAPTH
1887 , (unsigned) obj->dealloc_thread
1888 #endif
1892 if (__mf_opts.backtrace > 0)
1894 unsigned i;
1895 for (i=0; i<obj->dealloc_backtrace_size; i++)
1896 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1902 static unsigned
1903 __mf_report_leaks (__mf_object_tree_t *node)
1905 /* The counter is amongst recursive calls, so
1906 that cumulative numbers are printed below. */
1907 static unsigned count = 0;
1909 if (node == NULL) /* Reset */
1911 count = 0;
1912 return 0;
1915 /* Inorder traversal. */
1916 if (node->left)
1917 __mf_report_leaks (node->left);
1918 if (node->data.type == __MF_TYPE_HEAP
1919 || node->data.type == __MF_TYPE_HEAP_I)
1921 count ++;
1922 fprintf (stderr, "Leaked object %u:\n", count);
1923 __mf_describe_object (& node->data);
1925 if (node->right)
1926 __mf_report_leaks (node->right);
1928 return count;
1931 /* ------------------------------------------------------------------------ */
1932 /* __mf_report */
1934 void
1935 __mf_report ()
1937 LOCKTH ();
1938 BEGIN_RECURSION_PROTECT ();
1939 __mfu_report ();
1940 END_RECURSION_PROTECT ();
1941 UNLOCKTH ();
1944 void
1945 __mfu_report ()
1947 if (__mf_opts.collect_stats)
1949 fprintf (stderr,
1950 "*******\n"
1951 "mudflap stats:\n"
1952 "calls to __mf_check: %lu rot: %lu/%lu\n"
1953 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1954 " __mf_unregister: %lu [%luB]\n"
1955 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1956 __mf_count_check, __mf_treerot_left, __mf_treerot_right,
1957 __mf_count_register,
1958 __mf_total_register_size[0], __mf_total_register_size[1],
1959 __mf_total_register_size[2], __mf_total_register_size[3],
1960 __mf_total_register_size[4], /* XXX */
1961 __mf_count_unregister, __mf_total_unregister_size,
1962 __mf_count_violation[0], __mf_count_violation[1],
1963 __mf_count_violation[2], __mf_count_violation[3],
1964 __mf_count_violation[4]);
1966 fprintf (stderr,
1967 "calls with reentrancy: %lu\n", __mf_reentrancy);
1968 #ifdef LIBMUDFLAPTH
1969 fprintf (stderr,
1970 " lock contention: %lu\n", __mf_lock_contention);
1971 #endif
1973 /* Lookup cache stats. */
1975 unsigned i;
1976 unsigned max_reuse = 0;
1977 unsigned num_used = 0;
1978 unsigned num_unused = 0;
1980 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1982 if (__mf_lookup_cache_reusecount[i])
1983 num_used ++;
1984 else
1985 num_unused ++;
1986 if (max_reuse < __mf_lookup_cache_reusecount[i])
1987 max_reuse = __mf_lookup_cache_reusecount[i];
1989 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1990 num_used, num_unused, max_reuse);
1994 unsigned live_count;
1995 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1996 fprintf (stderr, "number of live objects: %u\n", live_count);
1999 if (__mf_opts.persistent_count > 0)
2001 unsigned dead_count = 0;
2002 unsigned row, plot;
2003 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
2004 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
2005 if (__mf_object_cemetary [row][plot] != 0)
2006 dead_count ++;
2007 fprintf (stderr, " zombie objects: %u\n", dead_count);
2010 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
2012 unsigned l;
2013 extern void * __mf_wrap_alloca_indirect (size_t c);
2015 /* Free up any remaining alloca()'d blocks. */
2016 __mf_wrap_alloca_indirect (0);
2017 __mf_describe_object (NULL); /* Reset description epoch. */
2018 __mf_report_leaks (NULL); /* Reset cumulative count. */
2019 l = __mf_report_leaks (__mf_object_root);
2020 fprintf (stderr, "number of leaked objects: %u\n", l);
2024 /* __mf_backtrace */
2026 size_t
2027 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
2029 void ** pc_array;
2030 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
2031 unsigned remaining_size;
2032 unsigned omitted_size = 0;
2033 unsigned i;
2034 DECLARE (void, free, void *ptr);
2035 DECLARE (void *, calloc, size_t c, size_t n);
2036 DECLARE (void *, malloc, size_t n);
2038 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
2039 #ifdef HAVE_BACKTRACE
2040 pc_array_size = backtrace (pc_array, pc_array_size);
2041 #else
2042 #define FETCH(n) do { if (pc_array_size >= n) { \
2043 pc_array[n] = __builtin_return_address(n); \
2044 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
2046 /* Unroll some calls __builtin_return_address because this function
2047 only takes a literal integer parameter. */
2048 FETCH (0);
2049 #if 0
2050 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
2051 rather than simply returning 0. :-( */
2052 FETCH (1);
2053 FETCH (2);
2054 FETCH (3);
2055 FETCH (4);
2056 FETCH (5);
2057 FETCH (6);
2058 FETCH (7);
2059 FETCH (8);
2060 if (pc_array_size > 8) pc_array_size = 9;
2061 #else
2062 if (pc_array_size > 0) pc_array_size = 1;
2063 #endif
2065 #undef FETCH
2066 #endif
2068 /* We want to trim the first few levels of the stack traceback,
2069 since they contain libmudflap wrappers and junk. If pc_array[]
2070 ends up containing a non-NULL guess_pc, then trim everything
2071 before that. Otherwise, omit the first guess_omit_levels
2072 entries. */
2074 if (guess_pc != NULL)
2075 for (i=0; i<pc_array_size; i++)
2076 if (pc_array [i] == guess_pc)
2077 omitted_size = i;
2079 if (omitted_size == 0) /* No match? */
2080 if (pc_array_size > guess_omit_levels)
2081 omitted_size = guess_omit_levels;
2083 remaining_size = pc_array_size - omitted_size;
2085 #ifdef HAVE_BACKTRACE_SYMBOLS
2086 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2087 #else
2089 /* Let's construct a buffer by hand. It will have <remaining_size>
2090 char*'s at the front, pointing at individual strings immediately
2091 afterwards. */
2092 void *buffer;
2093 char *chars;
2094 char **pointers;
2095 enum { perline = 30 };
2096 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2097 pointers = (char **) buffer;
2098 chars = (char *)buffer + (remaining_size * sizeof (char *));
2099 for (i = 0; i < remaining_size; i++)
2101 pointers[i] = chars;
2102 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2103 chars = chars + perline;
2105 *symbols = pointers;
2107 #endif
2108 CALL_REAL (free, pc_array);
2110 return remaining_size;
2113 /* ------------------------------------------------------------------------ */
2114 /* __mf_violation */
2116 void
2117 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2118 const char *location, int type)
2120 char buf [128];
2121 static unsigned violation_number;
2122 DECLARE(void, free, void *ptr);
2124 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2125 (void *) pc,
2126 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2128 if (__mf_opts.collect_stats)
2129 __mf_count_violation [(type < 0) ? 0 :
2130 (type > __MF_VIOL_WATCH) ? 0 :
2131 type] ++;
2133 /* Print out a basic warning message. */
2134 if (__mf_opts.verbose_violations)
2136 unsigned dead_p;
2137 unsigned num_helpful = 0;
2138 struct timeval now;
2139 #if HAVE_GETTIMEOFDAY
2140 gettimeofday (& now, NULL);
2141 #endif
2143 violation_number ++;
2144 fprintf (stderr,
2145 "*******\n"
2146 "mudflap violation %u (%s): time=%lu.%06lu "
2147 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2148 violation_number,
2149 ((type == __MF_VIOL_READ) ? "check/read" :
2150 (type == __MF_VIOL_WRITE) ? "check/write" :
2151 (type == __MF_VIOL_REGISTER) ? "register" :
2152 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2153 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2154 now.tv_sec, now.tv_usec,
2155 (void *) ptr, (unsigned long)sz, (void *) pc,
2156 (location != NULL ? " location=`" : ""),
2157 (location != NULL ? location : ""),
2158 (location != NULL ? "'" : ""));
2160 if (__mf_opts.backtrace > 0)
2162 char ** symbols;
2163 unsigned i, num;
2165 num = __mf_backtrace (& symbols, (void *) pc, 2);
2166 /* Note: backtrace_symbols calls malloc(). But since we're in
2167 __mf_violation and presumably __mf_check, it'll detect
2168 recursion, and not put the new string into the database. */
2170 for (i=0; i<num; i++)
2171 fprintf (stderr, " %s\n", symbols[i]);
2173 /* Calling free() here would trigger a violation. */
2174 CALL_REAL(free, symbols);
2178 /* Look for nearby objects. For this, we start with s_low/s_high
2179 pointing to the given area, looking for overlapping objects.
2180 If none show up, widen the search area and keep looking. */
2182 if (sz == 0) sz = 1;
2184 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2186 enum {max_objs = 3}; /* magic */
2187 __mf_object_tree_t *objs[max_objs];
2188 unsigned num_objs = 0;
2189 uintptr_t s_low, s_high;
2190 unsigned tries = 0;
2191 unsigned i;
2193 s_low = (uintptr_t) ptr;
2194 s_high = CLAMPSZ (ptr, sz);
2196 while (tries < 16) /* magic */
2198 if (dead_p)
2199 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2200 else
2201 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2203 if (num_objs) /* good enough */
2204 break;
2206 tries ++;
2208 /* XXX: tune this search strategy. It's too dependent on
2209 sz, which can vary from 1 to very big (when array index
2210 checking) numbers. */
2211 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2212 s_high = CLAMPADD (s_high, (sz * tries * tries));
2215 for (i = 0; i < min (num_objs, max_objs); i++)
2217 __mf_object_t *obj = & objs[i]->data;
2218 uintptr_t low = (uintptr_t) ptr;
2219 uintptr_t high = CLAMPSZ (ptr, sz);
2220 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2221 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2222 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2223 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2224 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2225 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2227 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2228 num_helpful + i + 1,
2229 (before1 ? before1 : after1 ? after1 : into1),
2230 (before1 ? "before" : after1 ? "after" : "into"),
2231 (before2 ? before2 : after2 ? after2 : into2),
2232 (before2 ? "before" : after2 ? "after" : "into"));
2233 __mf_describe_object (obj);
2235 num_helpful += num_objs;
2238 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2241 /* How to finally handle this violation? */
2242 switch (__mf_opts.violation_mode)
2244 case viol_nop:
2245 break;
2246 case viol_segv:
2247 kill (getpid(), SIGSEGV);
2248 break;
2249 case viol_abort:
2250 abort ();
2251 break;
2252 case viol_gdb:
2254 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2255 system (buf);
2256 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2257 instead, and let the forked child execlp() gdb. That way, this
2258 subject process can be resumed under the supervision of gdb.
2259 This can't happen now, since system() only returns when gdb
2260 dies. In that case, we need to beware of starting a second
2261 concurrent gdb child upon the next violation. (But if the first
2262 gdb dies, then starting a new one is appropriate.) */
2263 break;
2267 /* ------------------------------------------------------------------------ */
2270 unsigned __mf_watch (void *ptr, size_t sz)
2272 unsigned rc;
2273 LOCKTH ();
2274 BEGIN_RECURSION_PROTECT ();
2275 rc = __mf_watch_or_not (ptr, sz, 1);
2276 END_RECURSION_PROTECT ();
2277 UNLOCKTH ();
2278 return rc;
2281 unsigned __mf_unwatch (void *ptr, size_t sz)
2283 unsigned rc;
2284 LOCKTH ();
2285 rc = __mf_watch_or_not (ptr, sz, 0);
2286 UNLOCKTH ();
2287 return rc;
2291 static unsigned
2292 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2294 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2295 uintptr_t ptr_low = (uintptr_t) ptr;
2296 unsigned count = 0;
2298 TRACE ("%s ptr=%p size=%lu\n",
2299 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2301 switch (__mf_opts.mudflap_mode)
2303 case mode_nop:
2304 case mode_populate:
2305 case mode_violate:
2306 count = 0;
2307 break;
2309 case mode_check:
2311 __mf_object_tree_t **all_ovr_objs;
2312 unsigned obj_count;
2313 unsigned n;
2314 DECLARE (void *, malloc, size_t c);
2315 DECLARE (void, free, void *p);
2317 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2318 VERBOSE_TRACE (" %u:", obj_count);
2320 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
2321 obj_count));
2322 if (all_ovr_objs == NULL) abort ();
2323 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2324 assert (n == obj_count);
2326 for (n = 0; n < obj_count; n ++)
2328 __mf_object_t *obj = & (all_ovr_objs[n]->data);
2330 VERBOSE_TRACE (" [%p]", (void *) obj);
2331 if (obj->watching_p != flag)
2333 obj->watching_p = flag;
2334 count ++;
2336 /* Remove object from cache, to ensure next access
2337 goes through __mf_check(). */
2338 if (flag)
2339 __mf_uncache_object (obj);
2342 CALL_REAL (free, all_ovr_objs);
2344 break;
2347 return count;
2351 void
2352 __mf_sigusr1_handler (int num)
2354 __mf_sigusr1_received ++;
2357 /* Install or remove SIGUSR1 handler as necessary.
2358 Also, respond to a received pending SIGUSR1. */
2359 void
2360 __mf_sigusr1_respond ()
2362 static int handler_installed;
2364 #ifdef SIGUSR1
2365 /* Manage handler */
2366 if (__mf_opts.sigusr1_report && ! handler_installed)
2368 signal (SIGUSR1, __mf_sigusr1_handler);
2369 handler_installed = 1;
2371 else if(! __mf_opts.sigusr1_report && handler_installed)
2373 signal (SIGUSR1, SIG_DFL);
2374 handler_installed = 0;
2376 #endif
2378 /* Manage enqueued signals */
2379 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2381 __mf_sigusr1_handled ++;
2382 assert (__mf_state == reentrant);
2383 __mfu_report ();
2384 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2389 /* XXX: provide an alternative __assert_fail function that cannot
2390 fail due to libmudflap infinite recursion. */
2391 #ifndef NDEBUG
2393 static void
2394 write_itoa (int fd, unsigned n)
2396 enum x { bufsize = sizeof(n)*4 };
2397 char buf [bufsize];
2398 unsigned i;
2400 for (i=0; i<bufsize-1; i++)
2402 unsigned digit = n % 10;
2403 buf[bufsize-2-i] = digit + '0';
2404 n /= 10;
2405 if (n == 0)
2407 char *m = & buf [bufsize-2-i];
2408 buf[bufsize-1] = '\0';
2409 write (fd, m, strlen(m));
2410 break;
2416 void
2417 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2419 #define write2(string) write (2, (string), strlen ((string)));
2420 write2("mf");
2421 #ifdef LIBMUDFLAPTH
2422 write2("(");
2423 write_itoa (2, (unsigned) pthread_self ());
2424 write2(")");
2425 #endif
2426 write2(": assertion failure: `");
2427 write (2, msg, strlen (msg));
2428 write2("' in ");
2429 write (2, func, strlen (func));
2430 write2(" at ");
2431 write (2, file, strlen (file));
2432 write2(":");
2433 write_itoa (2, line);
2434 write2("\n");
2435 #undef write2
2436 abort ();
2440 #endif