* arith.c: Fix comment typos.
[official-gcc.git] / libmudflap / mf-runtime.c
blob88a3682081c6c2e44c140e069da5d2590ed6207c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
32 #include "config.h"
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
37 #define _POSIX_SOURCE
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
39 #define _GNU_SOURCE
40 #define _XOPEN_SOURCE
41 #define _BSD_TYPES
42 #define __EXTENSIONS__
43 #define _ALL_SOURCE
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <sys/types.h>
50 #include <sys/time.h>
51 #include <time.h>
52 #include <unistd.h>
53 #ifdef HAVE_EXECINFO_H
54 #include <execinfo.h>
55 #endif
56 #ifdef HAVE_SIGNAL_H
57 #include <signal.h>
58 #endif
59 #include <assert.h>
61 #include <string.h>
62 #include <limits.h>
63 #include <sys/types.h>
64 #include <signal.h>
65 #include <errno.h>
67 #include "mf-runtime.h"
68 #include "mf-impl.h"
71 /* ------------------------------------------------------------------------ */
72 /* Utility macros */
74 #define CTOR __attribute__ ((constructor))
75 #define DTOR __attribute__ ((destructor))
78 /* Codes to describe the context in which a violation occurs. */
79 #define __MF_VIOL_UNKNOWN 0
80 #define __MF_VIOL_READ 1
81 #define __MF_VIOL_WRITE 2
82 #define __MF_VIOL_REGISTER 3
83 #define __MF_VIOL_UNREGISTER 4
84 #define __MF_VIOL_WATCH 5
86 /* Protect against recursive calls. */
87 #define BEGIN_RECURSION_PROTECT() do { \
88 if (UNLIKELY (__mf_state == reentrant)) { \
89 write (2, "mf: erroneous reentrancy detected in `", 38); \
90 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
91 write (2, "'\n", 2); \
92 abort (); } \
93 __mf_state = reentrant; \
94 } while (0)
96 #define END_RECURSION_PROTECT() do { \
97 __mf_state = active; \
98 } while (0)
102 /* ------------------------------------------------------------------------ */
103 /* Required globals. */
105 #define LOOKUP_CACHE_MASK_DFL 1023
106 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
107 #define LOOKUP_CACHE_SHIFT_DFL 2
109 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
110 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
111 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
112 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
114 struct __mf_options __mf_opts;
116 int __mf_starting_p = 1;
117 #ifndef LIBMUDFLAPTH
118 enum __mf_state_enum __mf_state = active;
119 #else
120 /* See __mf_state_perthread() in mf-hooks.c. */
121 #endif
124 #ifdef LIBMUDFLAPTH
125 pthread_mutex_t __mf_biglock =
126 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
127 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
128 #else
129 PTHREAD_MUTEX_INITIALIZER;
130 #endif
131 #endif
133 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
134 the libmudflap.la (no threading support) can diagnose whether
135 the application is linked with -lpthread. See __mf_usage() below. */
136 #if HAVE_PTHREAD_H
137 #pragma weak pthread_join
138 const void *threads_active_p = (void *) pthread_join;
139 #endif
142 /* ------------------------------------------------------------------------ */
143 /* stats-related globals. */
145 static unsigned long __mf_count_check;
146 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
147 static unsigned long __mf_treerot_left, __mf_treerot_right;
148 static unsigned long __mf_count_register;
149 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
150 static unsigned long __mf_count_unregister;
151 static unsigned long __mf_total_unregister_size;
152 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
153 static unsigned long __mf_sigusr1_received;
154 static unsigned long __mf_sigusr1_handled;
155 /* not static */ unsigned long __mf_reentrancy;
156 #ifdef LIBMUDFLAPTH
157 /* not static */ unsigned long __mf_lock_contention;
158 #endif
161 /* ------------------------------------------------------------------------ */
162 /* mode-check-related globals. */
164 typedef struct __mf_object
166 uintptr_t low, high; /* __mf_register parameters */
167 const char *name;
168 char type; /* __MF_TYPE_something */
169 char watching_p; /* Trigger a VIOL_WATCH on access? */
170 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
171 unsigned write_count; /* Likewise for __mf_check/write. */
172 unsigned liveness; /* A measure of recent checking activity. */
173 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
175 uintptr_t alloc_pc;
176 struct timeval alloc_time;
177 char **alloc_backtrace;
178 size_t alloc_backtrace_size;
179 #ifdef LIBMUDFLAPTH
180 pthread_t alloc_thread;
181 #endif
183 int deallocated_p;
184 uintptr_t dealloc_pc;
185 struct timeval dealloc_time;
186 char **dealloc_backtrace;
187 size_t dealloc_backtrace_size;
188 #ifdef LIBMUDFLAPTH
189 pthread_t dealloc_thread;
190 #endif
191 } __mf_object_t;
194 typedef struct __mf_object_tree
196 __mf_object_t data;
197 struct __mf_object_tree *left;
198 struct __mf_object_tree *right;
199 } __mf_object_tree_t;
201 /* Live objects: binary tree on __mf_object_t.low */
202 static __mf_object_tree_t *__mf_object_root;
204 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
205 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
206 static __mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
209 /* ------------------------------------------------------------------------ */
210 /* Forward function declarations */
212 static void __mf_init () CTOR;
213 static void __mf_sigusr1_respond ();
214 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
215 __mf_object_tree_t **objs, unsigned max_objs);
216 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
217 __mf_object_tree_t **objs, unsigned max_objs);
218 static void __mf_link_object (__mf_object_tree_t *obj);
219 static void __mf_age_tree (__mf_object_tree_t *obj);
220 static void __mf_adapt_cache ();
221 static void __mf_unlink_object (__mf_object_tree_t *obj);
222 static void __mf_describe_object (__mf_object_t *obj);
223 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
227 /* ------------------------------------------------------------------------ */
228 /* Configuration engine */
230 static void
231 __mf_set_default_options ()
233 memset (& __mf_opts, 0, sizeof (__mf_opts));
235 __mf_opts.tree_aging = 13037;
236 __mf_opts.adapt_cache = 1000003;
237 __mf_opts.abbreviate = 1;
238 __mf_opts.verbose_violations = 1;
239 __mf_opts.free_queue_length = 4;
240 __mf_opts.persistent_count = 100;
241 __mf_opts.crumple_zone = 32;
242 __mf_opts.backtrace = 4;
243 __mf_opts.mudflap_mode = mode_check;
244 __mf_opts.violation_mode = viol_nop;
245 __mf_opts.heur_std_data = 1;
246 #ifdef LIBMUDFLAPTH
247 __mf_opts.thread_stack = 0;
248 #endif
251 static struct option
253 char *name;
254 char *description;
255 enum
257 set_option,
258 read_integer_option,
259 } type;
260 int value;
261 int *target;
263 options [] =
265 {"mode-nop",
266 "mudflaps do nothing",
267 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
268 {"mode-populate",
269 "mudflaps populate object tree",
270 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
271 {"mode-check",
272 "mudflaps check for memory violations",
273 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
274 {"mode-violate",
275 "mudflaps always cause violations (diagnostic)",
276 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
278 {"viol-nop",
279 "violations do not change program execution",
280 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
281 {"viol-abort",
282 "violations cause a call to abort()",
283 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
284 {"viol-segv",
285 "violations are promoted to SIGSEGV signals",
286 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
287 {"viol-gdb",
288 "violations fork a gdb process attached to current program",
289 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
290 {"trace-calls",
291 "trace calls to mudflap runtime library",
292 set_option, 1, &__mf_opts.trace_mf_calls},
293 {"verbose-trace",
294 "trace internal events within mudflap runtime library",
295 set_option, 1, &__mf_opts.verbose_trace},
296 {"collect-stats",
297 "collect statistics on mudflap's operation",
298 set_option, 1, &__mf_opts.collect_stats},
299 #if HAVE_SIGNAL
300 {"sigusr1-report",
301 "print report upon SIGUSR1",
302 set_option, 1, &__mf_opts.sigusr1_report},
303 #endif
304 {"internal-checking",
305 "perform more expensive internal checking",
306 set_option, 1, &__mf_opts.internal_checking},
307 {"age-tree",
308 "age the object tree after N accesses for working set",
309 read_integer_option, 1000000, &__mf_opts.tree_aging},
310 {"print-leaks",
311 "print any memory leaks at program shutdown",
312 set_option, 1, &__mf_opts.print_leaks},
313 {"check-initialization",
314 "detect uninitialized object reads",
315 set_option, 1, &__mf_opts.check_initialization},
316 {"verbose-violations",
317 "print verbose messages when memory violations occur",
318 set_option, 1, &__mf_opts.verbose_violations},
319 {"abbreviate",
320 "abbreviate repetitive listings",
321 set_option, 1, &__mf_opts.abbreviate},
322 {"wipe-stack",
323 "wipe stack objects at unwind",
324 set_option, 1, &__mf_opts.wipe_stack},
325 {"wipe-heap",
326 "wipe heap objects at free",
327 set_option, 1, &__mf_opts.wipe_heap},
328 {"heur-proc-map",
329 "support /proc/self/map heuristics",
330 set_option, 1, &__mf_opts.heur_proc_map},
331 {"heur-stack-bound",
332 "enable a simple upper stack bound heuristic",
333 set_option, 1, &__mf_opts.heur_stack_bound},
334 {"heur-start-end",
335 "support _start.._end heuristics",
336 set_option, 1, &__mf_opts.heur_start_end},
337 {"heur-stdlib",
338 "register standard library data (argv, errno, stdin, ...)",
339 set_option, 1, &__mf_opts.heur_std_data},
340 {"free-queue-length",
341 "queue N deferred free() calls before performing them",
342 read_integer_option, 0, &__mf_opts.free_queue_length},
343 {"persistent-count",
344 "keep a history of N unregistered regions",
345 read_integer_option, 0, &__mf_opts.persistent_count},
346 {"crumple-zone",
347 "surround allocations with crumple zones of N bytes",
348 read_integer_option, 0, &__mf_opts.crumple_zone},
349 /* XXX: not type-safe.
350 {"lc-mask",
351 "set lookup cache size mask to N (2**M - 1)",
352 read_integer_option, 0, (int *)(&__mf_lc_mask)},
353 {"lc-shift",
354 "set lookup cache pointer shift",
355 read_integer_option, 0, (int *)(&__mf_lc_shift)},
357 {"lc-adapt",
358 "adapt mask/shift parameters after N cache misses",
359 read_integer_option, 1, &__mf_opts.adapt_cache},
360 {"backtrace",
361 "keep an N-level stack trace of each call context",
362 read_integer_option, 0, &__mf_opts.backtrace},
363 #ifdef LIBMUDFLAPTH
364 {"thread-stack",
365 "override thread stacks allocation: N kB",
366 read_integer_option, 0, &__mf_opts.thread_stack},
367 #endif
368 {0, 0, set_option, 0, NULL}
371 static void
372 __mf_usage ()
374 struct option *opt;
376 fprintf (stderr,
377 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
378 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
379 "\n"
380 "The mudflap code can be controlled by an environment variable:\n"
381 "\n"
382 "$ export MUDFLAP_OPTIONS='<options>'\n"
383 "$ <mudflapped_program>\n"
384 "\n"
385 "where <options> is a space-separated list of \n"
386 "any of the following options. Use `-no-OPTION' to disable options.\n"
387 "\n",
388 #if HAVE_PTHREAD_H
389 (threads_active_p ? "multi-threaded " : "single-threaded "),
390 #else
392 #endif
393 #if LIBMUDFLAPTH
394 "thread-aware "
395 #else
396 "thread-unaware "
397 #endif
399 /* XXX: The multi-threaded thread-unaware combination is bad. */
401 for (opt = options; opt->name; opt++)
403 int default_p = (opt->value == * opt->target);
405 switch (opt->type)
407 char buf[128];
408 case set_option:
409 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
410 if (default_p)
411 fprintf (stderr, " [active]\n");
412 else
413 fprintf (stderr, "\n");
414 break;
415 case read_integer_option:
416 strncpy (buf, opt->name, 128);
417 strncpy (buf + strlen (opt->name), "=N", 2);
418 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
419 fprintf (stderr, " [%d]\n", * opt->target);
420 break;
421 default: abort();
425 fprintf (stderr, "\n");
429 int
430 __mf_set_options (const char *optstr)
432 int rc;
433 LOCKTH ();
434 BEGIN_RECURSION_PROTECT ();
435 rc = __mfu_set_options (optstr);
436 /* XXX: It's not really that easy. A change to a bunch of parameters
437 can require updating auxiliary state or risk crashing:
438 free_queue_length, crumple_zone ... */
439 END_RECURSION_PROTECT ();
440 UNLOCKTH ();
441 return rc;
445 int
446 __mfu_set_options (const char *optstr)
448 struct option *opts = 0;
449 char *nxt = 0;
450 long tmp = 0;
451 int rc = 0;
452 const char *saved_optstr = optstr;
454 /* XXX: bounds-check for optstr! */
456 while (*optstr)
458 switch (*optstr) {
459 case ' ':
460 case '\t':
461 case '\n':
462 optstr++;
463 break;
465 case '-':
466 if (*optstr+1)
468 int negate = 0;
469 optstr++;
471 if (*optstr == '?' ||
472 strncmp (optstr, "help", 4) == 0)
474 /* Caller will print help and exit. */
475 return -1;
478 if (strncmp (optstr, "no-", 3) == 0)
480 negate = 1;
481 optstr = & optstr[3];
484 for (opts = options; opts->name; opts++)
486 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
488 optstr += strlen (opts->name);
489 assert (opts->target);
490 switch (opts->type)
492 case set_option:
493 if (negate)
494 *(opts->target) = 0;
495 else
496 *(opts->target) = opts->value;
497 break;
498 case read_integer_option:
499 if (! negate && (*optstr == '=' && *(optstr+1)))
501 optstr++;
502 tmp = strtol (optstr, &nxt, 10);
503 if ((optstr != nxt) && (tmp != LONG_MAX))
505 optstr = nxt;
506 *(opts->target) = (int)tmp;
509 else if (negate)
510 * opts->target = 0;
511 break;
516 break;
518 default:
519 fprintf (stderr,
520 "warning: unrecognized string '%s' in mudflap options\n",
521 optstr);
522 optstr += strlen (optstr);
523 rc = -1;
524 break;
528 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
529 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
530 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
532 /* Clear the lookup cache, in case the parameters got changed. */
533 /* XXX: race */
534 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
535 /* void slot 0 */
536 __mf_lookup_cache[0].low = MAXPTR;
538 TRACE ("set options from `%s'\n", saved_optstr);
540 /* Call this unconditionally, in case -sigusr1-report was toggled. */
541 __mf_sigusr1_respond ();
543 return rc;
547 #ifdef PIC
549 void
550 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
552 char *err;
554 assert (e);
555 if (e->pointer) return;
557 #if HAVE_DLVSYM
558 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
559 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
560 else
561 #endif
562 e->pointer = dlsym (RTLD_NEXT, e->name);
564 err = dlerror ();
566 if (err)
568 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
569 e->name, err);
570 abort ();
572 if (! e->pointer)
574 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
575 abort ();
580 static void
581 __mf_resolve_dynamics ()
583 int i;
584 for (i = 0; i < dyn_INITRESOLVE; i++)
585 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
589 /* NB: order must match enums in mf-impl.h */
590 struct __mf_dynamic_entry __mf_dynamic [] =
592 {NULL, "calloc", NULL},
593 {NULL, "free", NULL},
594 {NULL, "malloc", NULL},
595 {NULL, "mmap", NULL},
596 {NULL, "munmap", NULL},
597 {NULL, "realloc", NULL},
598 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
599 #ifdef LIBMUDFLAPTH
600 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
601 {NULL, "pthread_join", NULL},
602 {NULL, "pthread_exit", NULL}
603 #endif
606 #endif /* PIC */
610 /* ------------------------------------------------------------------------ */
613 void __mf_init ()
615 char *ov = 0;
617 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
618 #ifdef PIC
619 __mf_resolve_dynamics ();
620 #endif
621 __mf_starting_p = 0;
623 __mf_set_default_options ();
625 ov = getenv ("MUDFLAP_OPTIONS");
626 if (ov)
628 int rc = __mfu_set_options (ov);
629 if (rc < 0)
631 __mf_usage ();
632 exit (1);
636 /* Initialize to a non-zero description epoch. */
637 __mf_describe_object (NULL);
639 #define REG_RESERVED(obj) \
640 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
642 REG_RESERVED (__mf_lookup_cache);
643 REG_RESERVED (__mf_lc_mask);
644 REG_RESERVED (__mf_lc_shift);
645 /* XXX: others of our statics? */
647 /* Prevent access to *NULL. */
648 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
649 __mf_lookup_cache[0].low = (uintptr_t) -1;
655 __wrap_main (int argc, char* argv[])
657 extern char **environ;
658 extern int main ();
659 static int been_here = 0;
661 if (__mf_opts.heur_std_data && ! been_here)
663 unsigned i;
665 been_here = 1;
666 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
667 for (i=0; i<argc; i++)
669 unsigned j = strlen (argv[i]);
670 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
673 for (i=0; ; i++)
675 char *e = environ[i];
676 unsigned j;
677 if (e == NULL) break;
678 j = strlen (environ[i]);
679 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
681 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
683 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
685 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
686 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
687 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
690 #ifdef PIC
691 return main (argc, argv, environ);
692 #else
693 return __real_main (argc, argv, environ);
694 #endif
699 extern void __mf_fini () DTOR;
700 void __mf_fini ()
702 TRACE ("__mf_fini\n");
703 __mfu_report ();
708 /* ------------------------------------------------------------------------ */
709 /* __mf_check */
711 void __mf_check (void *ptr, size_t sz, int type, const char *location)
713 LOCKTH ();
714 BEGIN_RECURSION_PROTECT ();
715 __mfu_check (ptr, sz, type, location);
716 END_RECURSION_PROTECT ();
717 UNLOCKTH ();
721 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
723 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
724 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
725 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
726 uintptr_t ptr_low = (uintptr_t) ptr;
727 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
728 struct __mf_cache old_entry = *entry;
730 if (UNLIKELY (__mf_opts.sigusr1_report))
731 __mf_sigusr1_respond ();
733 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
734 ptr, entry_idx, (unsigned long)sz,
735 (type == 0 ? "read" : "write"), location);
737 switch (__mf_opts.mudflap_mode)
739 case mode_nop:
740 judgement = 1;
741 break;
743 case mode_populate:
744 entry->low = ptr_low;
745 entry->high = ptr_high;
746 judgement = 1;
747 break;
749 case mode_check:
751 unsigned heuristics = 0;
753 /* Advance aging/adaptation counters. */
754 if (__mf_object_root)
756 static unsigned aging_count;
757 static unsigned adapt_count;
758 aging_count ++;
759 adapt_count ++;
760 if (UNLIKELY (__mf_opts.tree_aging > 0 &&
761 aging_count > __mf_opts.tree_aging))
763 aging_count = 0;
764 __mf_age_tree (__mf_object_root);
766 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
767 adapt_count > __mf_opts.adapt_cache))
769 adapt_count = 0;
770 __mf_adapt_cache ();
774 /* Looping only occurs if heuristics were triggered. */
775 while (judgement == 0)
777 __mf_object_tree_t* ovr_obj[1];
778 unsigned obj_count;
780 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
782 if (LIKELY (obj_count == 1)) /* A single hit! */
784 __mf_object_t *obj = & ovr_obj[0]->data;
785 assert (obj != NULL);
786 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
788 /* XXX: hope for no overflow! */
789 if (type == __MF_CHECK_READ)
790 obj->read_count ++;
791 else
792 obj->write_count ++;
794 obj->liveness ++;
796 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
797 judgement = -1;
798 else if (UNLIKELY (obj->watching_p))
799 judgement = -2; /* trigger VIOL_WATCH */
800 else if (UNLIKELY (__mf_opts.check_initialization
801 /* reading */
802 && type == __MF_CHECK_READ
803 /* not written */
804 && obj->write_count == 0
805 /* uninitialized (heap) */
806 && obj->type == __MF_TYPE_HEAP))
807 judgement = -1;
808 else
810 /* Valid access. */
811 entry->low = obj->low;
812 entry->high = obj->high;
813 judgement = 1;
816 /* The object did not cover the entire accessed region. */
818 else if (LIKELY (obj_count > 1))
820 __mf_object_tree_t **all_ovr_objs;
821 unsigned n;
822 DECLARE (void *, malloc, size_t c);
823 DECLARE (void, free, void *p);
825 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
826 obj_count));
827 if (all_ovr_objs == NULL) abort ();
828 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
829 assert (n == obj_count);
831 /* Confirm that accessed range is covered by first/last object. */
832 if (LIKELY ((ptr_low >= all_ovr_objs[0]->data.low) &&
833 (ptr_high <= all_ovr_objs[obj_count-1]->data.high)))
835 /* Presume valid access. */
836 judgement = 1;
838 /* Confirm that intermediate objects are
839 contiguous and share a single name. Thus they
840 are likely split up GUESS regions, or mmap
841 pages. The idea of the name check is to
842 prevent an oversize access to a
843 stack-registered object (followed by some GUESS
844 type) from being accepted as a hit. */
845 for (n=0; n<obj_count-1; n++)
847 __mf_object_t *obj = & (all_ovr_objs[n]->data);
848 __mf_object_t *nextobj = & (all_ovr_objs[n+1]->data);
850 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
851 judgement = -1; /* Force error. */
853 if (UNLIKELY (judgement == 1 &&
854 (obj->high + 1 != nextobj->low)))
855 judgement = 0; /* Cancel presumption. */
857 if (UNLIKELY (judgement == 1 &&
858 (obj->name != nextobj->name)))
859 judgement = 0; /* Cancel presumption. */
860 /* NB: strcmp above is not necessary since the
861 same literal string pointer is normally
862 used when creating regions. */
864 /* XXX: hope for no overflow! */
865 if (type == __MF_CHECK_READ)
866 obj->read_count ++;
867 else
868 obj->write_count ++;
869 obj->liveness ++;
872 /* If the access is otherwise successful, check whether
873 any of the covered objects are being watched. */
874 if (judgement > 0)
876 unsigned i;
877 for (i=0; i<obj_count; i++)
878 if (all_ovr_objs[i]->data.watching_p)
879 judgement = -2; /* trigger VIOL_WATCH */
882 /* Check for uninitialized reads. */
883 if (judgement > 0 &&
884 __mf_opts.check_initialization &&
885 type == __MF_CHECK_READ)
887 unsigned i;
888 unsigned written_count = 0;
890 for (i=0; i<obj_count; i++)
892 __mf_object_t *obj = & all_ovr_objs[i]->data;
894 if (obj->write_count
895 || obj->type == __MF_TYPE_HEAP_I
896 || obj->type == __MF_TYPE_GUESS)
897 written_count ++;
900 /* Check for ALL pieces having been written-to.
901 XXX: should this be ANY instead? */
902 if (written_count != obj_count)
903 judgement = -1;
906 /* Fill out the cache with the bounds of the first
907 object and the last object that covers this
908 cache line (== includes the same __MF_CACHE_INDEX).
909 This could let this cache line span *two* distinct
910 registered objects: a peculiar but reasonable
911 situation. The cache line may not include the
912 entire object though. */
913 if (judgement > 0)
915 unsigned i;
916 entry->low = all_ovr_objs[0]->data.low;
917 for (i=0; i<obj_count; i++)
919 uintptr_t high = all_ovr_objs[i]->data.high;
920 if (__MF_CACHE_INDEX (high) == entry_idx)
921 entry->high = high;
926 CALL_REAL (free, all_ovr_objs);
929 if (judgement == 0)
931 if (heuristics++ < 2) /* XXX parametrize this number? */
932 judgement = __mf_heuristic_check (ptr_low, ptr_high);
933 else
934 judgement = -1;
939 break;
941 case mode_violate:
942 judgement = -1;
943 break;
946 if (__mf_opts.collect_stats)
948 __mf_count_check ++;
950 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
951 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
952 __mf_lookup_cache_reusecount [entry_idx] ++;
955 if (UNLIKELY (judgement < 0))
956 __mf_violation (ptr, sz,
957 (uintptr_t) __builtin_return_address (0), location,
958 ((judgement == -1) ?
959 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
960 __MF_VIOL_WATCH));
964 static __mf_object_tree_t *
965 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
966 const char *name, uintptr_t pc)
968 DECLARE (void *, calloc, size_t c, size_t n);
970 __mf_object_tree_t *new_obj;
971 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_tree_t));
972 new_obj->data.low = low;
973 new_obj->data.high = high;
974 new_obj->data.type = type;
975 new_obj->data.name = name;
976 new_obj->data.alloc_pc = pc;
977 #if HAVE_GETTIMEOFDAY
978 gettimeofday (& new_obj->data.alloc_time, NULL);
979 #endif
980 #if LIBMUDFLAPTH
981 new_obj->data.alloc_thread = pthread_self ();
982 #endif
984 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
985 new_obj->data.alloc_backtrace_size =
986 __mf_backtrace (& new_obj->data.alloc_backtrace,
987 (void *) pc, 2);
989 __mf_link_object (new_obj);
990 return new_obj;
994 static void
995 __mf_uncache_object (__mf_object_t *old_obj)
997 /* Remove any low/high pointers for this object from the lookup cache. */
999 /* Can it possibly exist in the cache? */
1000 if (LIKELY (old_obj->read_count + old_obj->write_count))
1002 uintptr_t low = old_obj->low;
1003 uintptr_t high = old_obj->high;
1004 unsigned idx_low = __MF_CACHE_INDEX (low);
1005 unsigned idx_high = __MF_CACHE_INDEX (high);
1006 unsigned i;
1007 for (i = idx_low; i <= idx_high; i++)
1009 struct __mf_cache *entry = & __mf_lookup_cache [i];
1010 /* NB: the "||" in the following test permits this code to
1011 tolerate the situation introduced by __mf_check over
1012 contiguous objects, where a cache entry spans several
1013 objects. */
1014 if (entry->low == low || entry->high == high)
1016 entry->low = MAXPTR;
1017 entry->high = MINPTR;
1024 void
1025 __mf_register (void *ptr, size_t sz, int type, const char *name)
1027 LOCKTH ();
1028 BEGIN_RECURSION_PROTECT ();
1029 __mfu_register (ptr, sz, type, name);
1030 END_RECURSION_PROTECT ();
1031 UNLOCKTH ();
1035 void
1036 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1038 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1039 ptr, (unsigned long) sz, type, name ? name : "");
1041 if (__mf_opts.collect_stats)
1043 __mf_count_register ++;
1044 __mf_total_register_size [(type < 0) ? 0 :
1045 (type > __MF_TYPE_MAX) ? 0 :
1046 type] += sz;
1049 if (UNLIKELY (__mf_opts.sigusr1_report))
1050 __mf_sigusr1_respond ();
1052 switch (__mf_opts.mudflap_mode)
1054 case mode_nop:
1055 break;
1057 case mode_violate:
1058 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1059 __MF_VIOL_REGISTER);
1060 break;
1062 case mode_populate:
1063 /* Clear the cache. */
1064 /* XXX: why the entire cache? */
1065 /* XXX: race */
1066 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1067 /* void slot 0 */
1068 __mf_lookup_cache[0].low = MAXPTR;
1069 break;
1071 case mode_check:
1073 __mf_object_tree_t *ovr_objs [1];
1074 unsigned num_overlapping_objs;
1075 uintptr_t low = (uintptr_t) ptr;
1076 uintptr_t high = CLAMPSZ (ptr, sz);
1077 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1079 /* Treat unknown size indication as 1. */
1080 if (UNLIKELY (sz == 0)) sz = 1;
1082 num_overlapping_objs = __mf_find_objects (low, high, ovr_objs, 1);
1084 /* Handle overlaps. */
1085 if (UNLIKELY (num_overlapping_objs > 0))
1087 __mf_object_tree_t *ovr_obj = ovr_objs[0];
1089 /* Quietly accept a single duplicate registration for
1090 static objects, since these may come from distinct
1091 compilation units. */
1092 if (type == __MF_TYPE_STATIC
1093 && ovr_obj->data.type == __MF_TYPE_STATIC
1094 && ovr_obj->data.low == low
1095 && ovr_obj->data.high == high)
1097 /* do nothing */
1098 VERBOSE_TRACE ("duplicate static reg %p-%p `%s'\n",
1099 (void *) low, (void *) high,
1100 (ovr_obj->data.name ? ovr_obj->data.name : ""));
1101 break;
1104 /* Quietly accept a single duplicate registration for
1105 guess objects too. */
1106 if (type == __MF_TYPE_GUESS &&
1107 ovr_obj->data.type == __MF_TYPE_GUESS &&
1108 ovr_obj->data.low == low &&
1109 ovr_obj->data.high == high)
1111 /* do nothing */
1112 VERBOSE_TRACE ("duplicate guess reg %p-%p\n",
1113 (void *) low, (void *) high);
1114 break;
1117 /* Quietly accept new a guess registration that overlaps
1118 at least one existing object. Trim it down to size. */
1119 else if (type == __MF_TYPE_GUESS)
1121 /* We need to split this new GUESS region into some
1122 smaller ones. Or we may not need to insert it at
1123 all if it is covered by the overlapping region. */
1125 /* First, identify all the overlapping objects. */
1126 __mf_object_tree_t **all_ovr_objs;
1127 unsigned num_ovr_objs, n;
1128 uintptr_t next_low;
1129 DECLARE (void *, malloc, size_t c);
1130 DECLARE (void, free, void *p);
1132 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
1133 num_overlapping_objs));
1134 if (all_ovr_objs == NULL) abort ();
1135 num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
1136 num_overlapping_objs);
1137 assert (num_ovr_objs == num_overlapping_objs);
1139 VERBOSE_TRACE ("splitting guess %p-%p, # overlaps: %u\n",
1140 (void *) low, (void *) high, num_ovr_objs);
1142 /* Add GUESS regions between the holes: before each
1143 overlapping region. */
1145 next_low = low;
1146 /* This makes use of the assumption that __mf_find_objects() returns
1147 overlapping objects in an increasing sequence. */
1148 for (n=0; n < min (num_ovr_objs, num_overlapping_objs); n++)
1150 if (all_ovr_objs[n]->data.low > next_low) /* Gap? */
1152 uintptr_t next_high = CLAMPSUB (all_ovr_objs[n]->data.low, 1);
1153 __mfu_register ((void *) next_low, next_high-next_low+1,
1154 __MF_TYPE_GUESS, name);
1156 next_low = CLAMPADD (all_ovr_objs[n]->data.high, 1);
1158 /* Add in any leftover room at the top. */
1159 if (next_low <= high)
1160 __mfu_register ((void *) next_low, high-next_low+1,
1161 __MF_TYPE_GUESS, name);
1163 /* XXX: future optimization: allow consecutive GUESS regions to
1164 be glued together. */
1165 CALL_REAL (free, all_ovr_objs);
1166 return;
1169 /* Quietly accept a non-GUESS region overlaying a GUESS
1170 region. Handle it by removing the GUESS region
1171 temporarily, then recursively adding this new object,
1172 and then the GUESS back. The latter will be split up
1173 by the recursive process above. */
1174 else if (ovr_obj->data.type == __MF_TYPE_GUESS)
1176 uintptr_t old_low = ovr_obj->data.low;
1177 uintptr_t old_high = ovr_obj->data.high;
1178 const char* old_name = ovr_obj->data.name;
1180 /* Now to recursively remove the guess piece, and
1181 reinsert them in the opposite order. Recursion
1182 should bottom out if another non-GUESS overlapping
1183 region is found for this new object (resulting in a
1184 violation), or if no further overlap occurs. The
1185 located GUESS region should end up being split up
1186 in any case. */
1187 __mfu_unregister ((void *) old_low, old_high-old_low+1);
1188 __mfu_register ((void *) low, sz, type, name);
1189 __mfu_register ((void *) old_low, old_high-old_low+1,
1190 __MF_TYPE_GUESS, old_name);
1191 return;
1194 /* Alas, a genuine violation. */
1195 else
1197 /* Two or more *real* mappings here. */
1198 __mf_violation ((void *) ptr, sz,
1199 (uintptr_t) __builtin_return_address (0), NULL,
1200 __MF_VIOL_REGISTER);
1204 /* No overlapping objects: AOK. */
1205 else
1207 __mf_insert_new_object (low, high, type, name, pc);
1210 /* We could conceivably call __mf_check() here to prime the cache,
1211 but then the read_count/write_count field is not reliable. */
1213 break;
1215 } /* end switch (__mf_opts.mudflap_mode) */
1219 void
1220 __mf_unregister (void *ptr, size_t sz)
1222 LOCKTH ();
1223 BEGIN_RECURSION_PROTECT ();
1224 __mfu_unregister (ptr, sz);
1225 END_RECURSION_PROTECT ();
1226 UNLOCKTH ();
1230 void
1231 __mfu_unregister (void *ptr, size_t sz)
1233 DECLARE (void, free, void *ptr);
1235 if (UNLIKELY (__mf_opts.sigusr1_report))
1236 __mf_sigusr1_respond ();
1238 TRACE ("unregister ptr=%p size=%lu\n", ptr, (unsigned long) sz);
1240 switch (__mf_opts.mudflap_mode)
1242 case mode_nop:
1243 break;
1245 case mode_violate:
1246 __mf_violation (ptr, sz,
1247 (uintptr_t) __builtin_return_address (0), NULL,
1248 __MF_VIOL_UNREGISTER);
1249 break;
1251 case mode_populate:
1252 /* Clear the cache. */
1253 /* XXX: race */
1254 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1255 /* void slot 0 */
1256 __mf_lookup_cache[0].low = MAXPTR;
1257 break;
1259 case mode_check:
1261 __mf_object_tree_t *old_obj = NULL;
1262 __mf_object_tree_t *del_obj = NULL; /* Object to actually delete. */
1263 __mf_object_tree_t *objs[1] = {NULL};
1264 unsigned num_overlapping_objs;
1266 /* Treat unknown size indication as 1. */
1267 if (sz == 0) sz = 1;
1269 num_overlapping_objs = __mf_find_objects ((uintptr_t) ptr,
1270 CLAMPSZ (ptr, sz), objs, 1);
1272 /* XXX: handle unregistration of big old GUESS region, that has since
1273 been splintered. */
1274 old_obj = objs[0];
1276 if (UNLIKELY (num_overlapping_objs != 1 ||
1277 (uintptr_t)ptr != old_obj->data.low)) /* XXX: what about sz? */
1279 __mf_violation (ptr, sz,
1280 (uintptr_t) __builtin_return_address (0), NULL,
1281 __MF_VIOL_UNREGISTER);
1282 break;
1285 __mf_unlink_object (old_obj);
1286 __mf_uncache_object (& old_obj->data);
1288 /* Wipe buffer contents if desired. */
1289 if ((__mf_opts.wipe_stack && old_obj->data.type == __MF_TYPE_STACK)
1290 || (__mf_opts.wipe_heap && (old_obj->data.type == __MF_TYPE_HEAP
1291 || old_obj->data.type == __MF_TYPE_HEAP_I)))
1293 memset ((void *) old_obj->data.low,
1295 (size_t) (old_obj->data.high - old_obj->data.low + 1));
1298 /* Manage the object cemetary. */
1299 if (__mf_opts.persistent_count > 0 &&
1300 old_obj->data.type >= 0 &&
1301 old_obj->data.type <= __MF_TYPE_MAX_CEM)
1303 old_obj->data.deallocated_p = 1;
1304 old_obj->left = old_obj->right = NULL;
1305 old_obj->data.dealloc_pc = (uintptr_t) __builtin_return_address (0);
1306 #if HAVE_GETTIMEOFDAY
1307 gettimeofday (& old_obj->data.dealloc_time, NULL);
1308 #endif
1309 #ifdef LIBMUDFLAPTH
1310 old_obj->data.dealloc_thread = pthread_self ();
1311 #endif
1313 if (__mf_opts.backtrace > 0 && old_obj->data.type == __MF_TYPE_HEAP)
1314 old_obj->data.dealloc_backtrace_size =
1315 __mf_backtrace (& old_obj->data.dealloc_backtrace,
1316 NULL, 2);
1318 /* Encourage this object to be displayed again in current epoch. */
1319 old_obj->data.description_epoch --;
1321 /* Put this object into the cemetary. This may require this plot to
1322 be recycled, and the previous resident to be designated del_obj. */
1324 unsigned row = old_obj->data.type;
1325 unsigned plot = __mf_object_dead_head [row];
1327 del_obj = __mf_object_cemetary [row][plot];
1328 __mf_object_cemetary [row][plot] = old_obj;
1329 plot ++;
1330 if (plot == __mf_opts.persistent_count) plot = 0;
1331 __mf_object_dead_head [row] = plot;
1334 else
1335 del_obj = old_obj;
1337 if (__mf_opts.print_leaks)
1339 if ((old_obj->data.read_count + old_obj->data.write_count) == 0 &&
1340 (old_obj->data.type == __MF_TYPE_HEAP
1341 || old_obj->data.type == __MF_TYPE_HEAP_I))
1343 fprintf (stderr,
1344 "*******\n"
1345 "mudflap warning: unaccessed registered object:\n");
1346 __mf_describe_object (& old_obj->data);
1350 if (del_obj != NULL) /* May or may not equal old_obj. */
1352 if (__mf_opts.backtrace > 0)
1354 CALL_REAL(free, del_obj->data.alloc_backtrace);
1355 if (__mf_opts.persistent_count > 0)
1357 CALL_REAL(free, del_obj->data.dealloc_backtrace);
1360 CALL_REAL(free, del_obj);
1363 break;
1365 } /* end switch (__mf_opts.mudflap_mode) */
1368 if (__mf_opts.collect_stats)
1370 __mf_count_unregister ++;
1371 __mf_total_unregister_size += sz;
1375 /* ------------------------------------------------------------------------ */
1376 /* __mf_validate_live_object_tree, _object_cemetary */
1378 static void
1379 __mf_validate_live_object_tree (__mf_object_tree_t *obj)
1381 assert (obj != NULL);
1383 if (__mf_opts.persistent_count > 0)
1384 assert (! obj->data.deallocated_p);
1386 if (obj->left)
1388 assert (obj->left->data.high < obj->data.low);
1389 __mf_validate_live_object_tree (obj->left);
1391 if (obj->right)
1393 assert (obj->right->data.low > obj->data.high);
1394 __mf_validate_live_object_tree (obj->right);
1398 static void
1399 __mf_validate_object_cemetary ()
1401 unsigned cls;
1402 unsigned i;
1404 for (cls = 0; cls <= __MF_TYPE_MAX_CEM; cls++)
1406 assert (__mf_object_dead_head [cls] >= 0 &&
1407 __mf_object_dead_head [cls] < __mf_opts.persistent_count);
1408 for (i = 0; i < __mf_opts.persistent_count; i++)
1410 __mf_object_tree_t *obj = __mf_object_cemetary [cls][i];
1411 if (obj != NULL)
1413 assert (obj->data.deallocated_p);
1414 assert (obj->left == NULL);
1415 assert (obj->right == NULL);
1421 static void
1422 __mf_validate_objects ()
1424 if (__mf_object_root)
1425 __mf_validate_live_object_tree (__mf_object_root);
1427 if (__mf_opts.persistent_count > 0)
1428 __mf_validate_object_cemetary ();
1432 static void
1433 __mf_age_tree (__mf_object_tree_t *obj)
1435 assert (obj != NULL);
1436 obj->data.liveness = obj->data.liveness >> 1;
1438 if (obj->left)
1439 __mf_age_tree (obj->left);
1440 if (obj->right)
1441 __mf_age_tree (obj->right);
1446 struct tree_stats
1448 unsigned obj_count;
1449 unsigned long total_size;
1450 unsigned live_obj_count;
1451 double total_weight;
1452 double weighted_size;
1453 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1457 static void
1458 __mf_tree_analyze (__mf_object_tree_t *obj, struct tree_stats* s)
1460 assert (obj != NULL);
1462 if (obj->left)
1463 __mf_tree_analyze (obj->left, s);
1465 /* Exclude never-accessed objects. */
1466 if (obj->data.read_count + obj->data.write_count)
1468 s->obj_count ++;
1469 s->total_size += (obj->data.high - obj->data.low + 1);
1471 if (obj->data.liveness)
1473 unsigned i;
1474 uintptr_t addr;
1476 VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1477 (void *) obj->data.low, obj->data.liveness, obj->data.name);
1479 s->live_obj_count ++;
1480 s->total_weight += (double) obj->data.liveness;
1481 s->weighted_size +=
1482 (double) (obj->data.high - obj->data.low + 1) *
1483 (double) obj->data.liveness;
1485 addr = obj->data.low;
1486 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1488 unsigned bit = addr & 1;
1489 s->weighted_address_bits[i][bit] += obj->data.liveness;
1490 addr = addr >> 1;
1495 if (obj->right)
1496 __mf_tree_analyze (obj->right, s);
1500 static void
1501 __mf_adapt_cache ()
1503 struct tree_stats s;
1504 uintptr_t new_mask = 0;
1505 unsigned char new_shift;
1506 float cache_utilization;
1507 float max_value;
1508 static float smoothed_new_shift = -1.0;
1509 unsigned i;
1511 memset (&s, 0, sizeof (s));
1512 if (__mf_object_root)
1513 __mf_tree_analyze (__mf_object_root, & s);
1515 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1516 empty tree. Just leave the cache alone in such cases, rather
1517 than risk dying by division-by-zero. */
1518 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1519 return;
1521 /* Guess a good value for the shift parameter by finding an address bit that is a
1522 good discriminant of lively objects. */
1523 max_value = 0.0;
1524 for (i=0; i<sizeof (uintptr_t)*8; i++)
1526 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1527 if (max_value < value) max_value = value;
1529 for (i=0; i<sizeof (uintptr_t)*8; i++)
1531 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1532 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1533 if (value >= max_value * shoulder_factor)
1534 break;
1536 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1537 /* Converge toward this slowly to reduce flapping. */
1538 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1539 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1540 assert (new_shift < sizeof (uintptr_t)*8);
1542 /* Count number of used buckets. */
1543 cache_utilization = 0.0;
1544 for (i = 0; i < (1 + __mf_lc_mask); i++)
1545 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1546 cache_utilization += 1.0;
1547 cache_utilization /= (1 + __mf_lc_mask);
1549 new_mask |= 0x3ff; /* XXX: force a large cache. */
1550 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1552 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1553 "util=%u%% m=%p s=%u\n",
1554 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1555 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1557 /* We should reinitialize cache if its parameters have changed. */
1558 if (new_mask != __mf_lc_mask ||
1559 new_shift != __mf_lc_shift)
1561 __mf_lc_mask = new_mask;
1562 __mf_lc_shift = new_shift;
1563 /* XXX: race */
1564 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1565 /* void slot 0 */
1566 __mf_lookup_cache[0].low = MAXPTR;
1573 /* __mf_find_object[s] */
1575 /* Find overlapping live objecs between [low,high]. Return up to
1576 max_objs of their pointers in objs[]. Return total count of
1577 overlaps (may exceed max_objs). */
1579 /* XXX: track traversal statistics, like average depth, balance. */
1581 static unsigned
1582 __mf_find_objects_rec (uintptr_t low, uintptr_t high, __mf_object_tree_t **nodep,
1583 __mf_object_tree_t **objs, unsigned max_objs)
1585 unsigned count;
1586 __mf_object_tree_t *node = *nodep;
1588 assert (low <= high);
1589 assert (max_objs == 0 || objs != NULL);
1591 if (UNLIKELY (node == NULL)) return 0;
1593 /* Traverse down left subtree. */
1594 count = 0;
1595 if (low < node->data.low)
1596 count += __mf_find_objects_rec (low, min(high, node->data.low),
1597 & node->left, objs, max_objs);
1599 /* Track the used slots of objs[]. */
1600 if (count < max_objs)
1602 objs += count;
1603 max_objs -= count;
1605 else
1607 max_objs = 0;
1610 /* Check for overlap with this node. */
1611 if (high >= node->data.low && low <= node->data.high)
1613 count ++;
1614 if (max_objs > 0) /* Any room left? */
1616 objs[0] = node;
1617 objs ++;
1618 max_objs --;
1622 /* Traverse down right subtree. */
1623 if (high > node->data.high)
1624 count += __mf_find_objects_rec (max (low, node->data.high), high,
1625 & node->right, objs, max_objs);
1626 /* There is no need to manipulate objs/max_objs any further. */
1629 /* Rotate a child node up if its access count is higher. */
1630 if (UNLIKELY ((node->left && node->left->data.liveness > node->data.liveness) &&
1631 ((!node->right || (node->right &&
1632 node->left->data.liveness >
1633 node->right->data.liveness)))))
1635 __mf_object_tree_t *l = node->left;
1636 __mf_object_tree_t *l_r = l->right;
1638 *nodep = l;
1639 l->right = node;
1640 node->left = l_r;
1641 __mf_treerot_left ++;
1643 else if (UNLIKELY ((node->right && node->right->data.liveness > node->data.liveness) &&
1644 ((!node->left || (node->left &&
1645 node->right->data.liveness >
1646 node->left->data.liveness)))))
1648 __mf_object_tree_t *r = node->right;
1649 __mf_object_tree_t *r_l = r->left;
1651 *nodep = r;
1652 r->left = node;
1653 node->right = r_l;
1654 __mf_treerot_right ++;
1657 return count;
1661 unsigned
1662 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1663 __mf_object_tree_t **objs, unsigned max_objs)
1665 if (UNLIKELY(__mf_opts.internal_checking))
1666 __mf_validate_objects ();
1668 return __mf_find_objects_rec (ptr_low, ptr_high, & __mf_object_root, objs, max_objs);
1671 /* __mf_link_object */
1673 static void
1674 __mf_link_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1676 __mf_object_tree_t *node = *link;
1678 assert (ptr != NULL);
1679 if (UNLIKELY(node == NULL))
1681 *link = ptr;
1682 return;
1685 if (ptr->data.high < node->data.low)
1686 return __mf_link_object2 (ptr, & node->left);
1687 else if (ptr->data.low > node->data.high)
1688 return __mf_link_object2 (ptr, & node->right);
1689 else
1690 abort (); /* XXX: duplicate object */
1694 void
1695 __mf_link_object (__mf_object_tree_t *ptr)
1697 if (UNLIKELY(__mf_opts.internal_checking))
1698 __mf_validate_objects ();
1700 return __mf_link_object2 (ptr, & __mf_object_root);
1703 /* __mf_unlink_object */
1705 static void
1706 __mf_unlink_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1708 __mf_object_tree_t *node = *link;
1710 assert (ptr != NULL);
1711 if (UNLIKELY(node == ptr))
1713 static unsigned promote_left_p = 0;
1714 promote_left_p = 1 - promote_left_p;
1716 /* Alternate promoting the left & right subtrees. */
1717 if (promote_left_p)
1719 *link = ptr->left;
1720 if (ptr->right != NULL)
1721 __mf_link_object2 (ptr->right, link);
1723 else
1725 *link = ptr->right;
1726 if (ptr->left != NULL)
1727 __mf_link_object2 (ptr->left, link);
1730 return;
1733 if (ptr->data.high < node->data.low)
1734 return __mf_unlink_object2 (ptr, & node->left);
1735 else if (ptr->data.low > node->data.high)
1736 return __mf_unlink_object2 (ptr, & node->right);
1737 else
1738 abort (); /* XXX: missing object; should fail more gracefully. */
1741 static void
1742 __mf_unlink_object (__mf_object_tree_t *node)
1744 __mf_unlink_object2 (node, & __mf_object_root);
1747 /* __mf_find_dead_objects */
1749 /* Find overlapping dead objecs between [low,high]. Return up to
1750 max_objs of their pointers in objs[]. Return total count of
1751 overlaps (may exceed max_objs). */
1753 static unsigned
1754 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1755 __mf_object_tree_t **objs, unsigned max_objs)
1757 if (__mf_opts.persistent_count > 0)
1759 unsigned count = 0;
1760 unsigned recollection = 0;
1761 unsigned row = 0;
1763 assert (low <= high);
1764 assert (max_objs == 0 || objs != NULL);
1766 /* Widen the search from the most recent plots in each row, looking
1767 backward in time. */
1768 recollection = 0;
1769 while (recollection < __mf_opts.persistent_count)
1771 count = 0;
1773 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1775 unsigned plot;
1776 unsigned i;
1778 plot = __mf_object_dead_head [row];
1779 for (i = 0; i <= recollection; i ++)
1781 __mf_object_tree_t *obj;
1783 /* Look backward through row: it's a circular buffer. */
1784 if (plot > 0) plot --;
1785 else plot = __mf_opts.persistent_count - 1;
1787 obj = __mf_object_cemetary [row][plot];
1788 if (obj && obj->data.low <= high && obj->data.high >= low)
1790 /* Found an overlapping dead object! */
1791 if (count < max_objs)
1792 objs [count] = obj;
1793 count ++;
1798 if (count)
1799 break;
1801 /* Look farther back in time. */
1802 recollection = (recollection * 2) + 1;
1805 return count;
1806 } else {
1807 return 0;
1811 /* __mf_describe_object */
1813 static void
1814 __mf_describe_object (__mf_object_t *obj)
1816 static unsigned epoch = 0;
1817 if (obj == NULL)
1819 epoch ++;
1820 return;
1823 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1825 fprintf (stderr,
1826 "mudflap object %p: name=`%s'\n",
1827 (void *) obj, (obj->name ? obj->name : ""));
1828 return;
1830 else
1831 obj->description_epoch = epoch;
1833 fprintf (stderr,
1834 "mudflap object %p: name=`%s'\n"
1835 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1836 "alloc time=%lu.%06lu pc=%p"
1837 #ifdef LIBMUDFLAPTH
1838 " thread=%u"
1839 #endif
1840 "\n",
1841 (void *) obj, (obj->name ? obj->name : ""),
1842 (void *) obj->low, (void *) obj->high,
1843 (unsigned long) (obj->high - obj->low + 1),
1844 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1845 obj->type == __MF_TYPE_HEAP ? "heap" :
1846 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1847 obj->type == __MF_TYPE_STACK ? "stack" :
1848 obj->type == __MF_TYPE_STATIC ? "static" :
1849 obj->type == __MF_TYPE_GUESS ? "guess" :
1850 "unknown"),
1851 obj->read_count, obj->write_count, obj->liveness,
1852 obj->watching_p ? " watching" : "",
1853 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1854 (void *) obj->alloc_pc
1855 #ifdef LIBMUDFLAPTH
1856 , (unsigned) obj->alloc_thread
1857 #endif
1860 if (__mf_opts.backtrace > 0)
1862 unsigned i;
1863 for (i=0; i<obj->alloc_backtrace_size; i++)
1864 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1867 if (__mf_opts.persistent_count > 0)
1869 if (obj->deallocated_p)
1871 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1872 #ifdef LIBMUDFLAPTH
1873 " thread=%u"
1874 #endif
1875 "\n",
1876 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1877 (void *) obj->dealloc_pc
1878 #ifdef LIBMUDFLAPTH
1879 , (unsigned) obj->dealloc_thread
1880 #endif
1884 if (__mf_opts.backtrace > 0)
1886 unsigned i;
1887 for (i=0; i<obj->dealloc_backtrace_size; i++)
1888 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1894 static unsigned
1895 __mf_report_leaks (__mf_object_tree_t *node)
1897 /* The counter is amongst recursive calls, so
1898 that cumulative numbers are printed below. */
1899 static unsigned count = 0;
1901 if (node == NULL) /* Reset */
1903 count = 0;
1904 return 0;
1907 /* Inorder traversal. */
1908 if (node->left)
1909 __mf_report_leaks (node->left);
1910 if (node->data.type == __MF_TYPE_HEAP
1911 || node->data.type == __MF_TYPE_HEAP_I)
1913 count ++;
1914 fprintf (stderr, "Leaked object %u:\n", count);
1915 __mf_describe_object (& node->data);
1917 if (node->right)
1918 __mf_report_leaks (node->right);
1920 return count;
1923 /* ------------------------------------------------------------------------ */
1924 /* __mf_report */
1926 void
1927 __mf_report ()
1929 LOCKTH ();
1930 BEGIN_RECURSION_PROTECT ();
1931 __mfu_report ();
1932 END_RECURSION_PROTECT ();
1933 UNLOCKTH ();
1936 void
1937 __mfu_report ()
1939 if (__mf_opts.collect_stats)
1941 fprintf (stderr,
1942 "*******\n"
1943 "mudflap stats:\n"
1944 "calls to __mf_check: %lu rot: %lu/%lu\n"
1945 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1946 " __mf_unregister: %lu [%luB]\n"
1947 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1948 __mf_count_check, __mf_treerot_left, __mf_treerot_right,
1949 __mf_count_register,
1950 __mf_total_register_size[0], __mf_total_register_size[1],
1951 __mf_total_register_size[2], __mf_total_register_size[3],
1952 __mf_total_register_size[4], /* XXX */
1953 __mf_count_unregister, __mf_total_unregister_size,
1954 __mf_count_violation[0], __mf_count_violation[1],
1955 __mf_count_violation[2], __mf_count_violation[3],
1956 __mf_count_violation[4]);
1958 fprintf (stderr,
1959 "calls with reentrancy: %lu\n", __mf_reentrancy);
1960 #ifdef LIBMUDFLAPTH
1961 fprintf (stderr,
1962 " lock contention: %lu\n", __mf_lock_contention);
1963 #endif
1965 /* Lookup cache stats. */
1967 unsigned i;
1968 unsigned max_reuse = 0;
1969 unsigned num_used = 0;
1970 unsigned num_unused = 0;
1972 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1974 if (__mf_lookup_cache_reusecount[i])
1975 num_used ++;
1976 else
1977 num_unused ++;
1978 if (max_reuse < __mf_lookup_cache_reusecount[i])
1979 max_reuse = __mf_lookup_cache_reusecount[i];
1981 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1982 num_used, num_unused, max_reuse);
1986 unsigned live_count;
1987 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1988 fprintf (stderr, "number of live objects: %u\n", live_count);
1991 if (__mf_opts.persistent_count > 0)
1993 unsigned dead_count = 0;
1994 unsigned row, plot;
1995 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1996 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1997 if (__mf_object_cemetary [row][plot] != 0)
1998 dead_count ++;
1999 fprintf (stderr, " zombie objects: %u\n", dead_count);
2002 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
2004 unsigned l;
2005 extern void * __mf_wrap_alloca_indirect (size_t c);
2007 /* Free up any remaining alloca()'d blocks. */
2008 __mf_wrap_alloca_indirect (0);
2009 __mf_describe_object (NULL); /* Reset description epoch. */
2010 __mf_report_leaks (NULL); /* Reset cumulative count. */
2011 l = __mf_report_leaks (__mf_object_root);
2012 fprintf (stderr, "number of leaked objects: %u\n", l);
2016 /* __mf_backtrace */
2018 size_t
2019 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
2021 void ** pc_array;
2022 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
2023 unsigned remaining_size;
2024 unsigned omitted_size = 0;
2025 unsigned i;
2026 DECLARE (void, free, void *ptr);
2027 DECLARE (void *, calloc, size_t c, size_t n);
2028 DECLARE (void *, malloc, size_t n);
2030 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
2031 #ifdef HAVE_BACKTRACE
2032 pc_array_size = backtrace (pc_array, pc_array_size);
2033 #else
2034 #define FETCH(n) do { if (pc_array_size >= n) { \
2035 pc_array[n] = __builtin_return_address(n); \
2036 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
2038 /* Unroll some calls __builtin_return_address because this function
2039 only takes a literal integer parameter. */
2040 FETCH (0);
2041 #if 0
2042 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
2043 rather than simply returning 0. :-( */
2044 FETCH (1);
2045 FETCH (2);
2046 FETCH (3);
2047 FETCH (4);
2048 FETCH (5);
2049 FETCH (6);
2050 FETCH (7);
2051 FETCH (8);
2052 if (pc_array_size > 8) pc_array_size = 9;
2053 #else
2054 if (pc_array_size > 0) pc_array_size = 1;
2055 #endif
2057 #undef FETCH
2058 #endif
2060 /* We want to trim the first few levels of the stack traceback,
2061 since they contain libmudflap wrappers and junk. If pc_array[]
2062 ends up containing a non-NULL guess_pc, then trim everything
2063 before that. Otherwise, omit the first guess_omit_levels
2064 entries. */
2066 if (guess_pc != NULL)
2067 for (i=0; i<pc_array_size; i++)
2068 if (pc_array [i] == guess_pc)
2069 omitted_size = i;
2071 if (omitted_size == 0) /* No match? */
2072 if (pc_array_size > guess_omit_levels)
2073 omitted_size = guess_omit_levels;
2075 remaining_size = pc_array_size - omitted_size;
2077 #ifdef HAVE_BACKTRACE_SYMBOLS
2078 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2079 #else
2081 /* Let's construct a buffer by hand. It will have <remaining_size>
2082 char*'s at the front, pointing at individual strings immediately
2083 afterwards. */
2084 void *buffer;
2085 char *chars;
2086 char **pointers;
2087 enum { perline = 30 };
2088 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2089 pointers = (char **) buffer;
2090 chars = (char *)buffer + (remaining_size * sizeof (char *));
2091 for (i = 0; i < remaining_size; i++)
2093 pointers[i] = chars;
2094 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2095 chars = chars + perline;
2097 *symbols = pointers;
2099 #endif
2100 CALL_REAL (free, pc_array);
2102 return remaining_size;
2105 /* ------------------------------------------------------------------------ */
2106 /* __mf_violation */
2108 void
2109 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2110 const char *location, int type)
2112 char buf [128];
2113 static unsigned violation_number;
2114 DECLARE(void, free, void *ptr);
2116 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2117 (void *) pc,
2118 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2120 if (__mf_opts.collect_stats)
2121 __mf_count_violation [(type < 0) ? 0 :
2122 (type > __MF_VIOL_WATCH) ? 0 :
2123 type] ++;
2125 /* Print out a basic warning message. */
2126 if (__mf_opts.verbose_violations)
2128 unsigned dead_p;
2129 unsigned num_helpful = 0;
2130 struct timeval now;
2131 #if HAVE_GETTIMEOFDAY
2132 gettimeofday (& now, NULL);
2133 #endif
2135 violation_number ++;
2136 fprintf (stderr,
2137 "*******\n"
2138 "mudflap violation %u (%s): time=%lu.%06lu "
2139 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2140 violation_number,
2141 ((type == __MF_VIOL_READ) ? "check/read" :
2142 (type == __MF_VIOL_WRITE) ? "check/write" :
2143 (type == __MF_VIOL_REGISTER) ? "register" :
2144 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2145 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2146 now.tv_sec, now.tv_usec,
2147 (void *) ptr, (unsigned long)sz, (void *) pc,
2148 (location != NULL ? " location=`" : ""),
2149 (location != NULL ? location : ""),
2150 (location != NULL ? "'" : ""));
2152 if (__mf_opts.backtrace > 0)
2154 char ** symbols;
2155 unsigned i, num;
2157 num = __mf_backtrace (& symbols, (void *) pc, 2);
2158 /* Note: backtrace_symbols calls malloc(). But since we're in
2159 __mf_violation and presumably __mf_check, it'll detect
2160 recursion, and not put the new string into the database. */
2162 for (i=0; i<num; i++)
2163 fprintf (stderr, " %s\n", symbols[i]);
2165 /* Calling free() here would trigger a violation. */
2166 CALL_REAL(free, symbols);
2170 /* Look for nearby objects. For this, we start with s_low/s_high
2171 pointing to the given area, looking for overlapping objects.
2172 If none show up, widen the search area and keep looking. */
2174 if (sz == 0) sz = 1;
2176 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2178 enum {max_objs = 3}; /* magic */
2179 __mf_object_tree_t *objs[max_objs];
2180 unsigned num_objs = 0;
2181 uintptr_t s_low, s_high;
2182 unsigned tries = 0;
2183 unsigned i;
2185 s_low = (uintptr_t) ptr;
2186 s_high = CLAMPSZ (ptr, sz);
2188 while (tries < 16) /* magic */
2190 if (dead_p)
2191 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2192 else
2193 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2195 if (num_objs) /* good enough */
2196 break;
2198 tries ++;
2200 /* XXX: tune this search strategy. It's too dependent on
2201 sz, which can vary from 1 to very big (when array index
2202 checking) numbers. */
2203 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2204 s_high = CLAMPADD (s_high, (sz * tries * tries));
2207 for (i = 0; i < min (num_objs, max_objs); i++)
2209 __mf_object_t *obj = & objs[i]->data;
2210 uintptr_t low = (uintptr_t) ptr;
2211 uintptr_t high = CLAMPSZ (ptr, sz);
2212 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2213 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2214 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2215 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2216 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2217 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2219 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2220 num_helpful + i + 1,
2221 (before1 ? before1 : after1 ? after1 : into1),
2222 (before1 ? "before" : after1 ? "after" : "into"),
2223 (before2 ? before2 : after2 ? after2 : into2),
2224 (before2 ? "before" : after2 ? "after" : "into"));
2225 __mf_describe_object (obj);
2227 num_helpful += num_objs;
2230 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2233 /* How to finally handle this violation? */
2234 switch (__mf_opts.violation_mode)
2236 case viol_nop:
2237 break;
2238 case viol_segv:
2239 kill (getpid(), SIGSEGV);
2240 break;
2241 case viol_abort:
2242 abort ();
2243 break;
2244 case viol_gdb:
2245 snprintf (buf, 128, "gdb --pid=%d", getpid ());
2246 system (buf);
2247 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2248 instead, and let the forked child execlp() gdb. That way, this
2249 subject process can be resumed under the supervision of gdb.
2250 This can't happen now, since system() only returns when gdb
2251 dies. In that case, we need to beware of starting a second
2252 concurrent gdb child upon the next violation. (But if the first
2253 gdb dies, then starting a new one is appropriate.) */
2254 break;
2258 /* ------------------------------------------------------------------------ */
2261 unsigned __mf_watch (void *ptr, size_t sz)
2263 unsigned rc;
2264 LOCKTH ();
2265 BEGIN_RECURSION_PROTECT ();
2266 rc = __mf_watch_or_not (ptr, sz, 1);
2267 END_RECURSION_PROTECT ();
2268 UNLOCKTH ();
2269 return rc;
2272 unsigned __mf_unwatch (void *ptr, size_t sz)
2274 unsigned rc;
2275 LOCKTH ();
2276 rc = __mf_watch_or_not (ptr, sz, 0);
2277 UNLOCKTH ();
2278 return rc;
2282 static unsigned
2283 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2285 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2286 uintptr_t ptr_low = (uintptr_t) ptr;
2287 unsigned count = 0;
2289 TRACE ("%s ptr=%p size=%lu\n",
2290 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2292 switch (__mf_opts.mudflap_mode)
2294 case mode_nop:
2295 case mode_populate:
2296 case mode_violate:
2297 count = 0;
2298 break;
2300 case mode_check:
2302 __mf_object_tree_t **all_ovr_objs;
2303 unsigned obj_count;
2304 unsigned n;
2305 DECLARE (void *, malloc, size_t c);
2306 DECLARE (void, free, void *p);
2308 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2309 VERBOSE_TRACE (" %u:", obj_count);
2311 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
2312 obj_count));
2313 if (all_ovr_objs == NULL) abort ();
2314 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2315 assert (n == obj_count);
2317 for (n = 0; n < obj_count; n ++)
2319 __mf_object_t *obj = & (all_ovr_objs[n]->data);
2321 VERBOSE_TRACE (" [%p]", (void *) obj);
2322 if (obj->watching_p != flag)
2324 obj->watching_p = flag;
2325 count ++;
2327 /* Remove object from cache, to ensure next access
2328 goes through __mf_check(). */
2329 if (flag)
2330 __mf_uncache_object (obj);
2333 CALL_REAL (free, all_ovr_objs);
2335 break;
2338 return count;
2342 void
2343 __mf_sigusr1_handler (int num)
2345 __mf_sigusr1_received ++;
2348 /* Install or remove SIGUSR1 handler as necessary.
2349 Also, respond to a received pending SIGUSR1. */
2350 void
2351 __mf_sigusr1_respond ()
2353 static int handler_installed;
2355 #if HAVE_SIGNAL
2356 /* Manage handler */
2357 if (__mf_opts.sigusr1_report && ! handler_installed)
2359 signal (SIGUSR1, __mf_sigusr1_handler);
2360 handler_installed = 1;
2362 else if(! __mf_opts.sigusr1_report && handler_installed)
2364 signal (SIGUSR1, SIG_DFL);
2365 handler_installed = 0;
2367 #endif
2369 /* Manage enqueued signals */
2370 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2372 __mf_sigusr1_handled ++;
2373 assert (__mf_state == reentrant);
2374 __mfu_report ();
2375 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2380 /* XXX: provide an alternative __assert_fail function that cannot
2381 fail due to libmudflap infinite recursion. */
2382 #ifndef NDEBUG
2384 static void
2385 write_itoa (int fd, unsigned n)
2387 enum x { bufsize = sizeof(n)*4 };
2388 char buf [bufsize];
2389 unsigned i;
2391 for (i=0; i<bufsize-1; i++)
2393 unsigned digit = n % 10;
2394 buf[bufsize-2-i] = digit + '0';
2395 n /= 10;
2396 if (n == 0)
2398 char *m = & buf [bufsize-2-i];
2399 buf[bufsize-1] = '\0';
2400 write (fd, m, strlen(m));
2401 break;
2407 void
2408 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2410 #define write2(string) write (2, (string), strlen ((string)));
2411 write2("mf");
2412 #ifdef LIBMUDFLAPTH
2413 write2("(");
2414 write_itoa (2, (unsigned) pthread_self ());
2415 write2(")");
2416 #endif
2417 write2(": assertion failure: `");
2418 write (2, msg, strlen (msg));
2419 write2("' in ");
2420 write (2, func, strlen (func));
2421 write2(" at ");
2422 write (2, file, strlen (file));
2423 write2(":");
2424 write_itoa (2, line);
2425 write2("\n");
2426 #undef write2
2427 abort ();
2431 #endif