2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_gdbserver.h"
34 #include "pub_tool_poolalloc.h" // For mc_include.h
35 #include "pub_tool_hashtable.h" // For mc_include.h
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_machine.h"
40 #include "pub_tool_mallocfree.h"
41 #include "pub_tool_options.h"
42 #include "pub_tool_replacemalloc.h"
43 #include "pub_tool_tooliface.h"
44 #include "pub_tool_threadstate.h"
45 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
46 #include "pub_tool_xarray.h"
47 #include "pub_tool_aspacemgr.h"
48 #include "pub_tool_addrinfo.h"
50 #include "mc_include.h"
53 /*------------------------------------------------------------*/
54 /*--- Error types ---*/
55 /*------------------------------------------------------------*/
57 /* See comment in mc_include.h */
58 Bool
MC_(any_value_errors
) = False
;
61 /* ------------------ Errors ----------------------- */
63 /* What kind of error it is. */
84 typedef struct _MC_Error MC_Error
;
87 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
91 // Use of an undefined value:
92 // - as a pointer in a load or store
95 SizeT szB
; // size of value in bytes
97 UInt otag
; // origin tag
98 ExeContext
* origin_ec
; // filled in later
101 // Use of an undefined value in a conditional branch or move.
104 UInt otag
; // origin tag
105 ExeContext
* origin_ec
; // filled in later
108 // Addressability error in core (signal-handling) operation.
109 // It would be good to get rid of this error kind, merge it with
110 // another one somehow.
114 // Use of an unaddressable memory location in a load or store.
116 Bool isWrite
; // read or write?
117 SizeT szB
; // not used for exec (jump) errors
118 Bool maybe_gcc
; // True if just below %esp -- could be a gcc bug
122 // Jump to an unaddressable memory location.
127 // System call register input contains undefined bytes.
130 UInt otag
; // origin tag
131 ExeContext
* origin_ec
; // filled in later
134 // System call memory input contains undefined/unaddressable bytes
136 Bool isAddrErr
; // Addressability or definedness error?
139 UInt otag
; // origin tag
140 ExeContext
* origin_ec
; // filled in later
143 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
145 Bool isAddrErr
; // Addressability or definedness error?
148 UInt otag
; // origin tag
149 ExeContext
* origin_ec
; // filled in later
152 // Program tried to free() something that's not a heap block (this
153 // covers double-frees). */
158 // Program allocates heap block with one function
159 // (malloc/new/new[]/custom) and deallocates with not the matching one.
164 // Call to strcpy, memcpy, etc, with overlapping blocks.
166 Addr src
; // Source block
167 Addr dst
; // Destination block
168 SizeT szB
; // Size in bytes; 0 if unused.
174 UInt n_total_records
;
178 // A memory pool error.
183 // A fishy function argument value
184 // An argument value is considered fishy if the corresponding
185 // parameter has SizeT type and the value when interpreted as a
186 // signed number is negative.
188 const HChar
*function_name
;
189 const HChar
*argument_name
;
196 /*------------------------------------------------------------*/
197 /*--- Printing errors ---*/
198 /*------------------------------------------------------------*/
200 /* This is the "this error is due to be printed shortly; so have a
201 look at it any print any preamble you want" function. Which, in
202 Memcheck, we don't use. Hence a no-op.
204 void MC_(before_pp_Error
) ( const Error
* err
) {
207 /* Do a printf-style operation on either the XML or normal output
208 channel, depending on the setting of VG_(clo_xml).
210 static void emit_WRK ( const HChar
* format
, va_list vargs
)
213 VG_(vprintf_xml
)(format
, vargs
);
215 VG_(vmessage
)(Vg_UserMsg
, format
, vargs
);
218 static void emit ( const HChar
* format
, ... ) PRINTF_CHECK(1, 2);
219 static void emit ( const HChar
* format
, ... )
222 va_start(vargs
, format
);
223 emit_WRK(format
, vargs
);
228 static const HChar
* str_leak_lossmode ( Reachedness lossmode
)
230 const HChar
*loss
= "?";
232 case Unreached
: loss
= "definitely lost"; break;
233 case IndirectLeak
: loss
= "indirectly lost"; break;
234 case Possible
: loss
= "possibly lost"; break;
235 case Reachable
: loss
= "still reachable"; break;
240 static const HChar
* xml_leak_kind ( Reachedness lossmode
)
242 const HChar
*loss
= "?";
244 case Unreached
: loss
= "Leak_DefinitelyLost"; break;
245 case IndirectLeak
: loss
= "Leak_IndirectlyLost"; break;
246 case Possible
: loss
= "Leak_PossiblyLost"; break;
247 case Reachable
: loss
= "Leak_StillReachable"; break;
252 const HChar
* MC_(parse_leak_kinds_tokens
) =
253 "reachable,possible,indirect,definite";
255 UInt
MC_(all_Reachedness
)(void)
260 // Compute a set with all values by doing a parsing of the "all" keyword.
261 Bool parseok
= VG_(parse_enum_set
)(MC_(parse_leak_kinds_tokens
),
265 tl_assert (parseok
&& all
);
271 static const HChar
* pp_Reachedness_for_leak_kinds(Reachedness r
)
274 case Reachable
: return "reachable";
275 case Possible
: return "possible";
276 case IndirectLeak
: return "indirect";
277 case Unreached
: return "definite";
278 default: tl_assert(0);
282 static void mc_pp_origin ( ExeContext
* ec
, UInt okind
)
284 const HChar
* src
= NULL
;
288 case MC_OKIND_STACK
: src
= " by a stack allocation"; break;
289 case MC_OKIND_HEAP
: src
= " by a heap allocation"; break;
290 case MC_OKIND_USER
: src
= " by a client request"; break;
291 case MC_OKIND_UNKNOWN
: src
= ""; break;
293 tl_assert(src
); /* guards against invalid 'okind' */
296 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
298 VG_(pp_ExeContext
)( ec
);
300 emit( " Uninitialised value was created%s\n", src
);
301 VG_(pp_ExeContext
)( ec
);
305 HChar
* MC_(snprintf_delta
) (HChar
* buf
, Int size
,
306 SizeT current_val
, SizeT old_val
,
307 LeakCheckDeltaMode delta_mode
)
309 // Make sure the buffer size is large enough. With old_val == 0 and
310 // current_val == ULLONG_MAX the delta including inserted commas is:
311 // 18,446,744,073,709,551,615
312 // whose length is 26. Therefore:
313 tl_assert(size
>= 26 + 4 + 1);
315 if (delta_mode
== LCD_Any
)
317 else if (current_val
>= old_val
)
318 VG_(snprintf
) (buf
, size
, " (+%'lu)", current_val
- old_val
);
320 VG_(snprintf
) (buf
, size
, " (-%'lu)", old_val
- current_val
);
325 static void pp_LossRecord(UInt n_this_record
, UInt n_total_records
,
326 LossRecord
* lr
, Bool xml
)
328 // char arrays to produce the indication of increase/decrease in case
329 // of delta_mode != LCD_Any
331 HChar d_direct_bytes
[31];
332 HChar d_indirect_bytes
[31];
333 HChar d_num_blocks
[31];
335 MC_(snprintf_delta
) (d_bytes
, sizeof(d_bytes
),
336 lr
->szB
+ lr
->indirect_szB
,
337 lr
->old_szB
+ lr
->old_indirect_szB
,
338 MC_(detect_memory_leaks_last_delta_mode
));
339 MC_(snprintf_delta
) (d_direct_bytes
, sizeof(d_direct_bytes
),
342 MC_(detect_memory_leaks_last_delta_mode
));
343 MC_(snprintf_delta
) (d_indirect_bytes
, sizeof(d_indirect_bytes
),
345 lr
->old_indirect_szB
,
346 MC_(detect_memory_leaks_last_delta_mode
));
347 MC_(snprintf_delta
) (d_num_blocks
, sizeof(d_num_blocks
),
348 (SizeT
) lr
->num_blocks
,
349 (SizeT
) lr
->old_num_blocks
,
350 MC_(detect_memory_leaks_last_delta_mode
));
353 emit(" <kind>%s</kind>\n", xml_leak_kind(lr
->key
.state
));
354 if (lr
->indirect_szB
> 0) {
355 emit( " <xwhat>\n" );
356 emit( " <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
358 " are %s in loss record %'u of %'u</text>\n",
359 lr
->szB
+ lr
->indirect_szB
, d_bytes
,
360 lr
->szB
, d_direct_bytes
,
361 lr
->indirect_szB
, d_indirect_bytes
,
362 lr
->num_blocks
, d_num_blocks
,
363 str_leak_lossmode(lr
->key
.state
),
364 n_this_record
, n_total_records
);
365 // Nb: don't put commas in these XML numbers
366 emit( " <leakedbytes>%lu</leakedbytes>\n",
367 lr
->szB
+ lr
->indirect_szB
);
368 emit( " <leakedblocks>%u</leakedblocks>\n", lr
->num_blocks
);
369 emit( " </xwhat>\n" );
371 emit( " <xwhat>\n" );
372 emit( " <text>%'lu%s bytes in %'u%s blocks"
373 " are %s in loss record %'u of %'u</text>\n",
374 lr
->szB
, d_direct_bytes
,
375 lr
->num_blocks
, d_num_blocks
,
376 str_leak_lossmode(lr
->key
.state
),
377 n_this_record
, n_total_records
);
378 emit( " <leakedbytes>%lu</leakedbytes>\n", lr
->szB
);
379 emit( " <leakedblocks>%u</leakedblocks>\n", lr
->num_blocks
);
380 emit( " </xwhat>\n" );
382 VG_(pp_ExeContext
)(lr
->key
.allocated_at
);
383 } else { /* ! if (xml) */
384 if (lr
->indirect_szB
> 0) {
386 "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
387 " are %s in loss record %'u of %'u\n",
388 lr
->szB
+ lr
->indirect_szB
, d_bytes
,
389 lr
->szB
, d_direct_bytes
,
390 lr
->indirect_szB
, d_indirect_bytes
,
391 lr
->num_blocks
, d_num_blocks
,
392 str_leak_lossmode(lr
->key
.state
),
393 n_this_record
, n_total_records
397 "%'lu%s bytes in %'u%s blocks are %s in loss record %'u of %'u\n",
398 lr
->szB
, d_direct_bytes
,
399 lr
->num_blocks
, d_num_blocks
,
400 str_leak_lossmode(lr
->key
.state
),
401 n_this_record
, n_total_records
404 VG_(pp_ExeContext
)(lr
->key
.allocated_at
);
408 void MC_(pp_LossRecord
)(UInt n_this_record
, UInt n_total_records
,
411 pp_LossRecord (n_this_record
, n_total_records
, l
, /* xml */ False
);
414 void MC_(pp_Error
) ( const Error
* err
)
416 const Bool xml
= VG_(clo_xml
); /* a shorthand */
417 MC_Error
* extra
= VG_(get_error_extra
)(err
);
419 switch (VG_(get_error_kind
)(err
)) {
421 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
422 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
423 signal handler frame. --njn */
424 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
425 // the following code is untested. Bad.
427 emit( " <kind>CoreMemError</kind>\n" );
428 emit( " <what>%pS contains unaddressable byte(s)</what>\n",
429 VG_(get_error_string
)(err
));
430 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
432 emit( "%s contains unaddressable byte(s)\n",
433 VG_(get_error_string
)(err
));
434 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
439 MC_(any_value_errors
) = True
;
441 emit( " <kind>UninitValue</kind>\n" );
442 emit( " <what>Use of uninitialised value of size %lu</what>\n",
443 extra
->Err
.Value
.szB
);
444 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
445 if (extra
->Err
.Value
.origin_ec
)
446 mc_pp_origin( extra
->Err
.Value
.origin_ec
,
447 extra
->Err
.Value
.otag
& 3 );
449 /* Could also show extra->Err.Cond.otag if debugging origin
451 emit( "Use of uninitialised value of size %lu\n",
452 extra
->Err
.Value
.szB
);
453 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
454 if (extra
->Err
.Value
.origin_ec
)
455 mc_pp_origin( extra
->Err
.Value
.origin_ec
,
456 extra
->Err
.Value
.otag
& 3 );
461 MC_(any_value_errors
) = True
;
463 emit( " <kind>UninitCondition</kind>\n" );
464 emit( " <what>Conditional jump or move depends"
465 " on uninitialised value(s)</what>\n" );
466 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
467 if (extra
->Err
.Cond
.origin_ec
)
468 mc_pp_origin( extra
->Err
.Cond
.origin_ec
,
469 extra
->Err
.Cond
.otag
& 3 );
471 /* Could also show extra->Err.Cond.otag if debugging origin
473 emit( "Conditional jump or move depends"
474 " on uninitialised value(s)\n" );
475 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
476 if (extra
->Err
.Cond
.origin_ec
)
477 mc_pp_origin( extra
->Err
.Cond
.origin_ec
,
478 extra
->Err
.Cond
.otag
& 3 );
483 MC_(any_value_errors
) = True
;
485 emit( " <kind>SyscallParam</kind>\n" );
486 emit( " <what>Syscall param %pS contains "
487 "uninitialised byte(s)</what>\n",
488 VG_(get_error_string
)(err
) );
489 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
490 if (extra
->Err
.RegParam
.origin_ec
)
491 mc_pp_origin( extra
->Err
.RegParam
.origin_ec
,
492 extra
->Err
.RegParam
.otag
& 3 );
494 emit( "Syscall param %s contains uninitialised byte(s)\n",
495 VG_(get_error_string
)(err
) );
496 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
497 if (extra
->Err
.RegParam
.origin_ec
)
498 mc_pp_origin( extra
->Err
.RegParam
.origin_ec
,
499 extra
->Err
.RegParam
.otag
& 3 );
504 if (!extra
->Err
.MemParam
.isAddrErr
)
505 MC_(any_value_errors
) = True
;
507 emit( " <kind>SyscallParam</kind>\n" );
508 emit( " <what>Syscall param %pS points to %s byte(s)</what>\n",
509 VG_(get_error_string
)(err
),
510 extra
->Err
.MemParam
.isAddrErr
511 ? "unaddressable" : "uninitialised" );
512 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
513 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
514 &extra
->Err
.MemParam
.ai
, False
);
515 if (extra
->Err
.MemParam
.origin_ec
516 && !extra
->Err
.MemParam
.isAddrErr
)
517 mc_pp_origin( extra
->Err
.MemParam
.origin_ec
,
518 extra
->Err
.MemParam
.otag
& 3 );
520 emit( "Syscall param %s points to %s byte(s)\n",
521 VG_(get_error_string
)(err
),
522 extra
->Err
.MemParam
.isAddrErr
523 ? "unaddressable" : "uninitialised" );
524 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
525 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
526 &extra
->Err
.MemParam
.ai
, False
);
527 if (extra
->Err
.MemParam
.origin_ec
528 && !extra
->Err
.MemParam
.isAddrErr
)
529 mc_pp_origin( extra
->Err
.MemParam
.origin_ec
,
530 extra
->Err
.MemParam
.otag
& 3 );
535 if (!extra
->Err
.User
.isAddrErr
)
536 MC_(any_value_errors
) = True
;
538 emit( " <kind>ClientCheck</kind>\n" );
539 emit( " <what>%s byte(s) found "
540 "during client check request</what>\n",
541 extra
->Err
.User
.isAddrErr
542 ? "Unaddressable" : "Uninitialised" );
543 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
544 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
), &extra
->Err
.User
.ai
,
546 if (extra
->Err
.User
.origin_ec
&& !extra
->Err
.User
.isAddrErr
)
547 mc_pp_origin( extra
->Err
.User
.origin_ec
,
548 extra
->Err
.User
.otag
& 3 );
550 emit( "%s byte(s) found during client check request\n",
551 extra
->Err
.User
.isAddrErr
552 ? "Unaddressable" : "Uninitialised" );
553 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
554 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
), &extra
->Err
.User
.ai
,
556 if (extra
->Err
.User
.origin_ec
&& !extra
->Err
.User
.isAddrErr
)
557 mc_pp_origin( extra
->Err
.User
.origin_ec
,
558 extra
->Err
.User
.otag
& 3 );
564 emit( " <kind>InvalidFree</kind>\n" );
565 emit( " <what>Invalid free() / delete / delete[]"
566 " / realloc()</what>\n" );
567 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
568 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
569 &extra
->Err
.Free
.ai
, False
);
571 emit( "Invalid free() / delete / delete[] / realloc()\n" );
572 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
573 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
574 &extra
->Err
.Free
.ai
, False
);
578 case Err_FreeMismatch
:
580 emit( " <kind>MismatchedFree</kind>\n" );
581 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
582 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
583 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
584 &extra
->Err
.FreeMismatch
.ai
, False
);
586 emit( "Mismatched free() / delete / delete []\n" );
587 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
588 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
589 &extra
->Err
.FreeMismatch
.ai
, False
);
595 emit( " <kind>Invalid%s</kind>\n",
596 extra
->Err
.Addr
.isWrite
? "Write" : "Read" );
597 emit( " <what>Invalid %s of size %lu</what>\n",
598 extra
->Err
.Addr
.isWrite
? "write" : "read",
599 extra
->Err
.Addr
.szB
);
600 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
601 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
603 extra
->Err
.Addr
.maybe_gcc
);
605 emit( "Invalid %s of size %lu\n",
606 extra
->Err
.Addr
.isWrite
? "write" : "read",
607 extra
->Err
.Addr
.szB
);
608 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
610 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
612 extra
->Err
.Addr
.maybe_gcc
);
618 emit( " <kind>InvalidJump</kind>\n" );
619 emit( " <what>Jump to the invalid address stated "
620 "on the next line</what>\n" );
621 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
622 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
), &extra
->Err
.Jump
.ai
,
625 emit( "Jump to the invalid address stated on the next line\n" );
626 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
627 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
), &extra
->Err
.Jump
.ai
,
634 emit( " <kind>Overlap</kind>\n" );
635 if (extra
->Err
.Overlap
.szB
== 0) {
636 emit( " <what>Source and destination overlap "
637 "in %pS(%#lx, %#lx)\n</what>\n",
638 VG_(get_error_string
)(err
),
639 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
);
641 emit( " <what>Source and destination overlap "
642 "in %pS(%#lx, %#lx, %lu)</what>\n",
643 VG_(get_error_string
)(err
),
644 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
,
645 extra
->Err
.Overlap
.szB
);
647 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
649 if (extra
->Err
.Overlap
.szB
== 0) {
650 emit( "Source and destination overlap in %s(%#lx, %#lx)\n",
651 VG_(get_error_string
)(err
),
652 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
);
654 emit( "Source and destination overlap in %s(%#lx, %#lx, %lu)\n",
655 VG_(get_error_string
)(err
),
656 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
,
657 extra
->Err
.Overlap
.szB
);
659 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
663 case Err_IllegalMempool
:
664 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
665 // the following code is untested. Bad.
667 emit( " <kind>InvalidMemPool</kind>\n" );
668 emit( " <what>Illegal memory pool address</what>\n" );
669 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
670 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
671 &extra
->Err
.IllegalMempool
.ai
, False
);
673 emit( "Illegal memory pool address\n" );
674 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
675 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
676 &extra
->Err
.IllegalMempool
.ai
, False
);
681 UInt n_this_record
= extra
->Err
.Leak
.n_this_record
;
682 UInt n_total_records
= extra
->Err
.Leak
.n_total_records
;
683 LossRecord
* lr
= extra
->Err
.Leak
.lr
;
684 pp_LossRecord (n_this_record
, n_total_records
, lr
, xml
);
690 emit( " <kind>FishyValue</kind>\n" );
692 emit( "Argument '%s' of function %s has a fishy "
693 "(possibly negative) value: %ld\n",
694 extra
->Err
.FishyValue
.argument_name
,
695 extra
->Err
.FishyValue
.function_name
,
696 (SSizeT
)extra
->Err
.FishyValue
.value
);
698 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
700 emit( "Argument '%s' of function %s has a fishy "
701 "(possibly negative) value: %ld\n",
702 extra
->Err
.FishyValue
.argument_name
,
703 extra
->Err
.FishyValue
.function_name
,
704 (SSizeT
)extra
->Err
.FishyValue
.value
);
705 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
710 VG_(printf
)("Error:\n unknown Memcheck error code %d\n",
711 VG_(get_error_kind
)(err
));
712 VG_(tool_panic
)("unknown error code in mc_pp_Error)");
716 /*------------------------------------------------------------*/
717 /*--- Recording errors ---*/
718 /*------------------------------------------------------------*/
720 /* These many bytes below %ESP are considered addressible if we're
721 doing the --workaround-gcc296-bugs hack. */
722 #define VG_GCC296_BUG_STACK_SLOP 1024
724 /* Is this address within some small distance below %ESP? Used only
725 for the --workaround-gcc296-bugs kludge. */
726 static Bool
is_just_below_ESP( Addr esp
, Addr aa
)
728 esp
-= VG_STACK_REDZONE_SZB
;
729 if (esp
> aa
&& (esp
- aa
) <= VG_GCC296_BUG_STACK_SLOP
)
735 /* --- Called from generated and non-generated code --- */
737 void MC_(record_address_error
) ( ThreadId tid
, Addr a
, Int szB
,
743 if (MC_(in_ignored_range
)(a
))
746 if (VG_(is_watched
)( (isWrite
? write_watchpoint
: read_watchpoint
), a
, szB
))
749 Addr current_sp
= VG_(get_SP
)(tid
);
750 just_below_esp
= is_just_below_ESP( current_sp
, a
);
752 /* If this is caused by an access immediately below %ESP, and the
753 user asks nicely, we just ignore it. */
754 if (MC_(clo_workaround_gcc296_bugs
) && just_below_esp
)
757 /* Also, if this is caused by an access in the range of offsets
758 below the stack pointer as described by
759 --ignore-range-below-sp, ignore it. */
760 if (MC_(in_ignored_range_below_sp
)( current_sp
, a
, szB
))
763 extra
.Err
.Addr
.isWrite
= isWrite
;
764 extra
.Err
.Addr
.szB
= szB
;
765 extra
.Err
.Addr
.maybe_gcc
= just_below_esp
;
766 extra
.Err
.Addr
.ai
.tag
= Addr_Undescribed
;
767 VG_(maybe_record_error
)( tid
, Err_Addr
, a
, /*s*/NULL
, &extra
);
770 void MC_(record_value_error
) ( ThreadId tid
, Int szB
, UInt otag
)
773 tl_assert( MC_(clo_mc_level
) >= 2 );
775 tl_assert( MC_(clo_mc_level
) == 3 );
776 extra
.Err
.Value
.szB
= szB
;
777 extra
.Err
.Value
.otag
= otag
;
778 extra
.Err
.Value
.origin_ec
= NULL
; /* Filled in later */
779 VG_(maybe_record_error
)( tid
, Err_Value
, /*addr*/0, /*s*/NULL
, &extra
);
782 void MC_(record_cond_error
) ( ThreadId tid
, UInt otag
)
785 tl_assert( MC_(clo_mc_level
) >= 2 );
787 tl_assert( MC_(clo_mc_level
) == 3 );
788 extra
.Err
.Cond
.otag
= otag
;
789 extra
.Err
.Cond
.origin_ec
= NULL
; /* Filled in later */
790 VG_(maybe_record_error
)( tid
, Err_Cond
, /*addr*/0, /*s*/NULL
, &extra
);
793 /* --- Called from non-generated code --- */
795 /* This is for memory errors in signal-related memory. */
796 void MC_(record_core_mem_error
) ( ThreadId tid
, const HChar
* msg
)
798 VG_(maybe_record_error
)( tid
, Err_CoreMem
, /*addr*/0, msg
, /*extra*/NULL
);
801 void MC_(record_regparam_error
) ( ThreadId tid
, const HChar
* msg
, UInt otag
)
804 tl_assert(VG_INVALID_THREADID
!= tid
);
806 tl_assert( MC_(clo_mc_level
) == 3 );
807 extra
.Err
.RegParam
.otag
= otag
;
808 extra
.Err
.RegParam
.origin_ec
= NULL
; /* Filled in later */
809 VG_(maybe_record_error
)( tid
, Err_RegParam
, /*addr*/0, msg
, &extra
);
812 void MC_(record_memparam_error
) ( ThreadId tid
, Addr a
,
813 Bool isAddrErr
, const HChar
* msg
, UInt otag
)
816 tl_assert(VG_INVALID_THREADID
!= tid
);
818 tl_assert( MC_(clo_mc_level
) >= 2 );
820 tl_assert( MC_(clo_mc_level
) == 3 );
821 tl_assert( !isAddrErr
);
823 extra
.Err
.MemParam
.isAddrErr
= isAddrErr
;
824 extra
.Err
.MemParam
.ai
.tag
= Addr_Undescribed
;
825 extra
.Err
.MemParam
.otag
= otag
;
826 extra
.Err
.MemParam
.origin_ec
= NULL
; /* Filled in later */
827 VG_(maybe_record_error
)( tid
, Err_MemParam
, a
, msg
, &extra
);
830 void MC_(record_jump_error
) ( ThreadId tid
, Addr a
)
833 tl_assert(VG_INVALID_THREADID
!= tid
);
834 extra
.Err
.Jump
.ai
.tag
= Addr_Undescribed
;
835 VG_(maybe_record_error
)( tid
, Err_Jump
, a
, /*s*/NULL
, &extra
);
838 void MC_(record_free_error
) ( ThreadId tid
, Addr a
)
841 tl_assert(VG_INVALID_THREADID
!= tid
);
842 extra
.Err
.Free
.ai
.tag
= Addr_Undescribed
;
843 VG_(maybe_record_error
)( tid
, Err_Free
, a
, /*s*/NULL
, &extra
);
846 void MC_(record_freemismatch_error
) ( ThreadId tid
, MC_Chunk
* mc
)
849 AddrInfo
* ai
= &extra
.Err
.FreeMismatch
.ai
;
850 tl_assert(VG_INVALID_THREADID
!= tid
);
851 ai
->tag
= Addr_Block
;
852 ai
->Addr
.Block
.block_kind
= Block_Mallocd
; // Nb: Not 'Block_Freed'
853 ai
->Addr
.Block
.block_desc
= "block";
854 ai
->Addr
.Block
.block_szB
= mc
->szB
;
855 ai
->Addr
.Block
.rwoffset
= 0;
856 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
) (mc
);
857 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
858 ai
->Addr
.Block
.freed_at
= MC_(freed_at
) (mc
);
859 VG_(maybe_record_error
)( tid
, Err_FreeMismatch
, mc
->data
, /*s*/NULL
,
863 void MC_(record_illegal_mempool_error
) ( ThreadId tid
, Addr a
)
866 tl_assert(VG_INVALID_THREADID
!= tid
);
867 extra
.Err
.IllegalMempool
.ai
.tag
= Addr_Undescribed
;
868 VG_(maybe_record_error
)( tid
, Err_IllegalMempool
, a
, /*s*/NULL
, &extra
);
871 void MC_(record_overlap_error
) ( ThreadId tid
, const HChar
* function
,
872 Addr src
, Addr dst
, SizeT szB
)
875 tl_assert(VG_INVALID_THREADID
!= tid
);
876 extra
.Err
.Overlap
.src
= src
;
877 extra
.Err
.Overlap
.dst
= dst
;
878 extra
.Err
.Overlap
.szB
= szB
;
879 VG_(maybe_record_error
)(
880 tid
, Err_Overlap
, /*addr*/0, /*s*/function
, &extra
);
883 Bool
MC_(record_leak_error
) ( ThreadId tid
, UInt n_this_record
,
884 UInt n_total_records
, LossRecord
* lr
,
885 Bool print_record
, Bool count_error
)
888 extra
.Err
.Leak
.n_this_record
= n_this_record
;
889 extra
.Err
.Leak
.n_total_records
= n_total_records
;
890 extra
.Err
.Leak
.lr
= lr
;
892 VG_(unique_error
) ( tid
, Err_Leak
, /*Addr*/0, /*s*/NULL
, &extra
,
893 lr
->key
.allocated_at
, print_record
,
894 /*allow_GDB_attach*/False
, count_error
);
897 Bool
MC_(record_fishy_value_error
) ( ThreadId tid
, const HChar
*function_name
,
898 const HChar
*argument_name
, SizeT value
)
902 tl_assert(VG_INVALID_THREADID
!= tid
);
904 if ((SSizeT
)value
>= 0) return False
; // not a fishy value
906 extra
.Err
.FishyValue
.function_name
= function_name
;
907 extra
.Err
.FishyValue
.argument_name
= argument_name
;
908 extra
.Err
.FishyValue
.value
= value
;
910 VG_(maybe_record_error
)(
911 tid
, Err_FishyValue
, /*addr*/0, /*s*/NULL
, &extra
);
916 void MC_(record_user_error
) ( ThreadId tid
, Addr a
,
917 Bool isAddrErr
, UInt otag
)
921 tl_assert(!isAddrErr
);
922 tl_assert( MC_(clo_mc_level
) == 3 );
925 tl_assert( MC_(clo_mc_level
) >= 2 );
927 tl_assert(VG_INVALID_THREADID
!= tid
);
928 extra
.Err
.User
.isAddrErr
= isAddrErr
;
929 extra
.Err
.User
.ai
.tag
= Addr_Undescribed
;
930 extra
.Err
.User
.otag
= otag
;
931 extra
.Err
.User
.origin_ec
= NULL
; /* Filled in later */
932 VG_(maybe_record_error
)( tid
, Err_User
, a
, /*s*/NULL
, &extra
);
935 Bool
MC_(is_mempool_block
)(MC_Chunk
* mc_search
)
939 if (!MC_(mempool_list
))
942 // A chunk can only come from a mempool if a custom allocator
943 // is used. No search required for other kinds.
944 if (mc_search
->allockind
== MC_AllocCustom
) {
945 VG_(HT_ResetIter
)( MC_(mempool_list
) );
946 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
948 VG_(HT_ResetIter
)(mp
->chunks
);
949 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
959 /*------------------------------------------------------------*/
960 /*--- Other error operations ---*/
961 /*------------------------------------------------------------*/
963 /* Compare error contexts, to detect duplicates. Note that if they
964 are otherwise the same, the faulting addrs and associated rwoffsets
965 are allowed to be different. */
966 Bool
MC_(eq_Error
) ( VgRes res
, const Error
* e1
, const Error
* e2
)
968 MC_Error
* extra1
= VG_(get_error_extra
)(e1
);
969 MC_Error
* extra2
= VG_(get_error_extra
)(e2
);
971 /* Guaranteed by calling function */
972 tl_assert(VG_(get_error_kind
)(e1
) == VG_(get_error_kind
)(e2
));
974 switch (VG_(get_error_kind
)(e1
)) {
976 const HChar
*e1s
, *e2s
;
977 e1s
= VG_(get_error_string
)(e1
);
978 e2s
= VG_(get_error_string
)(e2
);
979 if (e1s
== e2s
) return True
;
980 if (VG_STREQ(e1s
, e2s
)) return True
;
985 return VG_STREQ(VG_(get_error_string
)(e1
), VG_(get_error_string
)(e2
));
987 // Perhaps we should also check the addrinfo.akinds for equality.
988 // That would result in more error reports, but only in cases where
989 // a register contains uninitialised bytes and points to memory
990 // containing uninitialised bytes. Currently, the 2nd of those to be
991 // detected won't be reported. That is (nearly?) always the memory
992 // error, which is good.
994 if (!VG_STREQ(VG_(get_error_string
)(e1
),
995 VG_(get_error_string
)(e2
))) return False
;
998 return ( extra1
->Err
.User
.isAddrErr
== extra2
->Err
.User
.isAddrErr
1002 case Err_FreeMismatch
:
1004 case Err_IllegalMempool
:
1009 case Err_FishyValue
:
1010 return VG_STREQ(extra1
->Err
.FishyValue
.function_name
,
1011 extra2
->Err
.FishyValue
.function_name
) &&
1012 VG_STREQ(extra1
->Err
.FishyValue
.argument_name
,
1013 extra2
->Err
.FishyValue
.argument_name
);
1016 return ( extra1
->Err
.Addr
.szB
== extra2
->Err
.Addr
.szB
1020 return ( extra1
->Err
.Value
.szB
== extra2
->Err
.Value
.szB
1024 VG_(tool_panic
)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1025 "since it's handled with VG_(unique_error)()!");
1028 VG_(printf
)("Error:\n unknown error code %d\n",
1029 VG_(get_error_kind
)(e1
));
1030 VG_(tool_panic
)("unknown error code in mc_eq_Error");
1034 /* Functions used when searching MC_Chunk lists */
1036 Bool
addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk
* mc
, Addr a
)
1038 return VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
1039 MC_(Malloc_Redzone_SzB
) );
1042 Bool
addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk
* mc
, Addr a
, SizeT rzB
)
1044 return VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
1048 // Forward declarations
1049 static Bool
client_block_maybe_describe( Addr a
, AddrInfo
* ai
);
1050 static Bool
mempool_block_maybe_describe( Addr a
, Bool is_metapool
,
1054 /* Describe an address as best you can, for error messages,
1055 putting the result in ai. */
1056 static void describe_addr ( DiEpoch ep
, Addr a
, /*OUT*/AddrInfo
* ai
)
1060 tl_assert(Addr_Undescribed
== ai
->tag
);
1062 /* -- Perhaps it's a user-named block? -- */
1063 if (client_block_maybe_describe( a
, ai
)) {
1067 /* -- Perhaps it's in mempool block (non-meta)? -- */
1068 if (mempool_block_maybe_describe( a
, /*is_metapool*/ False
, ai
)) {
1072 /* Blocks allocated by memcheck malloc functions are either
1073 on the recently freed list or on the malloc-ed list.
1074 Custom blocks can be on both : a recently freed block might
1075 have been just re-allocated.
1076 So, first search the malloc-ed block, as the most recent
1077 block is the probable cause of error.
1078 We however detect and report that this is a recently re-allocated
1080 /* -- Search for a currently malloc'd block which might bracket it. -- */
1081 VG_(HT_ResetIter
)(MC_(malloc_list
));
1082 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
1083 if (!MC_(is_mempool_block
)(mc
) &&
1084 addr_is_in_MC_Chunk_default_REDZONE_SZB(mc
, a
)) {
1085 ai
->tag
= Addr_Block
;
1086 ai
->Addr
.Block
.block_kind
= Block_Mallocd
;
1087 if (MC_(get_freed_block_bracketting
)( a
))
1088 ai
->Addr
.Block
.block_desc
= "recently re-allocated block";
1090 ai
->Addr
.Block
.block_desc
= "block";
1091 ai
->Addr
.Block
.block_szB
= mc
->szB
;
1092 ai
->Addr
.Block
.rwoffset
= (Word
)a
- (Word
)mc
->data
;
1093 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
)(mc
);
1094 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1095 ai
->Addr
.Block
.freed_at
= MC_(freed_at
)(mc
);
1099 /* -- Search for a recently freed block which might bracket it. -- */
1100 mc
= MC_(get_freed_block_bracketting
)( a
);
1102 ai
->tag
= Addr_Block
;
1103 ai
->Addr
.Block
.block_kind
= Block_Freed
;
1104 ai
->Addr
.Block
.block_desc
= "block";
1105 ai
->Addr
.Block
.block_szB
= mc
->szB
;
1106 ai
->Addr
.Block
.rwoffset
= (Word
)a
- (Word
)mc
->data
;
1107 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
)(mc
);
1108 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1109 ai
->Addr
.Block
.freed_at
= MC_(freed_at
)(mc
);
1113 /* -- Perhaps it's in a meta mempool block? -- */
1114 /* This test is done last, because metapool blocks overlap with blocks
1115 handed out to the application. That makes every heap address part of
1116 a metapool block, so the interesting cases are handled first.
1117 This final search is a last-ditch attempt. When found, it is probably
1118 an error in the custom allocator itself. */
1119 if (mempool_block_maybe_describe( a
, /*is_metapool*/ True
, ai
)) {
1123 /* No block found. Search a non-heap block description. */
1124 VG_(describe_addr
) (ep
, a
, ai
);
1127 void MC_(pp_describe_addr
) ( DiEpoch ep
, Addr a
)
1131 ai
.tag
= Addr_Undescribed
;
1132 describe_addr (ep
, a
, &ai
);
1133 VG_(pp_addrinfo_mc
) (a
, &ai
, /* maybe_gcc */ False
);
1134 VG_(clear_addrinfo
) (&ai
);
1137 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1138 does not refer to a known origin. */
1139 static void update_origin ( /*OUT*/ExeContext
** origin_ec
,
1142 UInt ecu
= otag
& ~3;
1144 if (VG_(is_plausible_ECU
)(ecu
)) {
1145 *origin_ec
= VG_(get_ExeContext_from_ECU
)( ecu
);
1149 /* Updates the copy with address info if necessary (but not for all errors). */
1150 UInt
MC_(update_Error_extra
)( const Error
* err
)
1152 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1153 DiEpoch ep
= VG_(get_ExeContext_epoch
)(VG_(get_error_where
)(err
));
1155 switch (VG_(get_error_kind
)(err
)) {
1156 // These ones don't have addresses associated with them, and so don't
1157 // need any updating.
1162 case Err_FishyValue
:
1163 // For Err_Leaks the returned size does not matter -- they are always
1164 // shown with VG_(unique_error)() so they 'extra' not copied. But
1165 // we make it consistent with the others.
1167 return sizeof(MC_Error
);
1169 // For value errors, get the ExeContext corresponding to the
1170 // origin tag. Note that it is a kludge to assume that
1171 // a length-1 trace indicates a stack origin. FIXME.
1173 update_origin( &extra
->Err
.Value
.origin_ec
,
1174 extra
->Err
.Value
.otag
);
1175 return sizeof(MC_Error
);
1177 update_origin( &extra
->Err
.Cond
.origin_ec
,
1178 extra
->Err
.Cond
.otag
);
1179 return sizeof(MC_Error
);
1181 update_origin( &extra
->Err
.RegParam
.origin_ec
,
1182 extra
->Err
.RegParam
.otag
);
1183 return sizeof(MC_Error
);
1185 // These ones always involve a memory address.
1187 describe_addr ( ep
, VG_(get_error_address
)(err
),
1188 &extra
->Err
.Addr
.ai
);
1189 return sizeof(MC_Error
);
1191 describe_addr ( ep
, VG_(get_error_address
)(err
),
1192 &extra
->Err
.MemParam
.ai
);
1193 update_origin( &extra
->Err
.MemParam
.origin_ec
,
1194 extra
->Err
.MemParam
.otag
);
1195 return sizeof(MC_Error
);
1197 describe_addr ( ep
, VG_(get_error_address
)(err
),
1198 &extra
->Err
.Jump
.ai
);
1199 return sizeof(MC_Error
);
1201 describe_addr ( ep
, VG_(get_error_address
)(err
),
1202 &extra
->Err
.User
.ai
);
1203 update_origin( &extra
->Err
.User
.origin_ec
,
1204 extra
->Err
.User
.otag
);
1205 return sizeof(MC_Error
);
1207 describe_addr ( ep
, VG_(get_error_address
)(err
),
1208 &extra
->Err
.Free
.ai
);
1209 return sizeof(MC_Error
);
1210 case Err_IllegalMempool
:
1211 describe_addr ( ep
, VG_(get_error_address
)(err
),
1212 &extra
->Err
.IllegalMempool
.ai
);
1213 return sizeof(MC_Error
);
1215 // Err_FreeMismatches have already had their address described; this is
1216 // possible because we have the MC_Chunk on hand when the error is
1217 // detected. However, the address may be part of a user block, and if so
1218 // we override the pre-determined description with a user block one.
1219 case Err_FreeMismatch
: {
1220 tl_assert(extra
&& Block_Mallocd
==
1221 extra
->Err
.FreeMismatch
.ai
.Addr
.Block
.block_kind
);
1222 (void)client_block_maybe_describe( VG_(get_error_address
)(err
),
1223 &extra
->Err
.FreeMismatch
.ai
);
1224 return sizeof(MC_Error
);
1227 default: VG_(tool_panic
)("mc_update_extra: bad errkind");
1232 static Bool
client_block_maybe_describe( Addr a
,
1233 /*OUT*/AddrInfo
* ai
)
1236 CGenBlock
* cgbs
= NULL
;
1239 MC_(get_ClientBlock_array
)( &cgbs
, &cgb_used
);
1241 tl_assert(cgb_used
== 0);
1243 /* Perhaps it's a general block ? */
1244 for (i
= 0; i
< cgb_used
; i
++) {
1245 if (cgbs
[i
].start
== 0 && cgbs
[i
].size
== 0)
1247 // Use zero as the redzone for client blocks.
1248 if (VG_(addr_is_in_block
)(a
, cgbs
[i
].start
, cgbs
[i
].size
, 0)) {
1249 ai
->tag
= Addr_Block
;
1250 ai
->Addr
.Block
.block_kind
= Block_UserG
;
1251 ai
->Addr
.Block
.block_desc
= cgbs
[i
].desc
;
1252 ai
->Addr
.Block
.block_szB
= cgbs
[i
].size
;
1253 ai
->Addr
.Block
.rwoffset
= (Word
)(a
) - (Word
)(cgbs
[i
].start
);
1254 ai
->Addr
.Block
.allocated_at
= cgbs
[i
].where
;
1255 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1256 ai
->Addr
.Block
.freed_at
= VG_(null_ExeContext
)();;
1264 static Bool
mempool_block_maybe_describe( Addr a
, Bool is_metapool
,
1265 /*OUT*/AddrInfo
* ai
)
1268 tl_assert( MC_(mempool_list
) );
1270 VG_(HT_ResetIter
)( MC_(mempool_list
) );
1271 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
1272 if (mp
->chunks
!= NULL
&& mp
->metapool
== is_metapool
) {
1274 VG_(HT_ResetIter
)(mp
->chunks
);
1275 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
1276 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc
, a
, mp
->rzB
)) {
1277 ai
->tag
= Addr_Block
;
1278 ai
->Addr
.Block
.block_kind
= Block_MempoolChunk
;
1279 ai
->Addr
.Block
.block_desc
= "block";
1280 ai
->Addr
.Block
.block_szB
= mc
->szB
;
1281 ai
->Addr
.Block
.rwoffset
= (Word
)a
- (Word
)mc
->data
;
1282 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
)(mc
);
1283 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1284 ai
->Addr
.Block
.freed_at
= MC_(freed_at
)(mc
);
1294 /*------------------------------------------------------------*/
1295 /*--- Suppressions ---*/
1296 /*------------------------------------------------------------*/
1300 ParamSupp
, // Bad syscall params
1301 UserSupp
, // Errors arising from client-request checks
1302 CoreMemSupp
, // Memory errors in core (pthread ops, signal handling)
1304 // Undefined value errors of given size
1305 Value1Supp
, Value2Supp
, Value4Supp
, Value8Supp
, Value16Supp
, Value32Supp
,
1307 // Undefined value error in conditional.
1310 // Unaddressable read/write attempt at given size
1311 Addr1Supp
, Addr2Supp
, Addr4Supp
, Addr8Supp
, Addr16Supp
, Addr32Supp
,
1313 JumpSupp
, // Jump to unaddressable target
1314 FreeSupp
, // Invalid or mismatching free
1315 OverlapSupp
, // Overlapping blocks in memcpy(), strcpy(), etc
1316 LeakSupp
, // Something to be suppressed in a leak check.
1317 MempoolSupp
, // Memory pool suppression.
1318 FishyValueSupp
,// Fishy value suppression.
1322 Bool
MC_(is_recognised_suppression
) ( const HChar
* name
, Supp
* su
)
1326 if (VG_STREQ(name
, "Param")) skind
= ParamSupp
;
1327 else if (VG_STREQ(name
, "User")) skind
= UserSupp
;
1328 else if (VG_STREQ(name
, "CoreMem")) skind
= CoreMemSupp
;
1329 else if (VG_STREQ(name
, "Addr1")) skind
= Addr1Supp
;
1330 else if (VG_STREQ(name
, "Addr2")) skind
= Addr2Supp
;
1331 else if (VG_STREQ(name
, "Addr4")) skind
= Addr4Supp
;
1332 else if (VG_STREQ(name
, "Addr8")) skind
= Addr8Supp
;
1333 else if (VG_STREQ(name
, "Addr16")) skind
= Addr16Supp
;
1334 else if (VG_STREQ(name
, "Addr32")) skind
= Addr32Supp
;
1335 else if (VG_STREQ(name
, "Jump")) skind
= JumpSupp
;
1336 else if (VG_STREQ(name
, "Free")) skind
= FreeSupp
;
1337 else if (VG_STREQ(name
, "Leak")) skind
= LeakSupp
;
1338 else if (VG_STREQ(name
, "Overlap")) skind
= OverlapSupp
;
1339 else if (VG_STREQ(name
, "Mempool")) skind
= MempoolSupp
;
1340 else if (VG_STREQ(name
, "Cond")) skind
= CondSupp
;
1341 else if (VG_STREQ(name
, "Value0")) skind
= CondSupp
; /* backwards compat */
1342 else if (VG_STREQ(name
, "Value1")) skind
= Value1Supp
;
1343 else if (VG_STREQ(name
, "Value2")) skind
= Value2Supp
;
1344 else if (VG_STREQ(name
, "Value4")) skind
= Value4Supp
;
1345 else if (VG_STREQ(name
, "Value8")) skind
= Value8Supp
;
1346 else if (VG_STREQ(name
, "Value16")) skind
= Value16Supp
;
1347 else if (VG_STREQ(name
, "Value32")) skind
= Value32Supp
;
1348 else if (VG_STREQ(name
, "FishyValue")) skind
= FishyValueSupp
;
1352 VG_(set_supp_kind
)(su
, skind
);
1356 typedef struct _MC_LeakSuppExtra MC_LeakSuppExtra
;
1358 struct _MC_LeakSuppExtra
{
1359 UInt match_leak_kinds
;
1361 /* Maintains nr of blocks and bytes suppressed with this suppression
1362 during the leak search identified by leak_search_gen.
1363 blocks_suppressed and bytes_suppressed are reset to 0 when
1364 used the first time during a leak search. */
1365 SizeT blocks_suppressed
;
1366 SizeT bytes_suppressed
;
1367 UInt leak_search_gen
;
1371 const HChar
*function_name
;
1372 const HChar
*argument_name
;
1373 } MC_FishyValueExtra
;
1375 Bool
MC_(read_extra_suppression_info
) ( Int fd
, HChar
** bufpp
,
1376 SizeT
* nBufp
, Int
* lineno
, Supp
*su
)
1381 if (VG_(get_supp_kind
)(su
) == ParamSupp
) {
1382 eof
= VG_(get_line
) ( fd
, bufpp
, nBufp
, lineno
);
1383 if (eof
) return False
;
1384 VG_(set_supp_string
)(su
, VG_(strdup
)("mc.resi.1", *bufpp
));
1385 } else if (VG_(get_supp_kind
)(su
) == LeakSupp
) {
1386 // We might have the optional match-leak-kinds line
1387 MC_LeakSuppExtra
* lse
;
1388 lse
= VG_(malloc
)("mc.resi.2", sizeof(MC_LeakSuppExtra
));
1389 lse
->match_leak_kinds
= MC_(all_Reachedness
)();
1390 lse
->blocks_suppressed
= 0;
1391 lse
->bytes_suppressed
= 0;
1392 lse
->leak_search_gen
= 0;
1393 VG_(set_supp_extra
)(su
, lse
); // By default, all kinds will match.
1394 eof
= VG_(get_line
) ( fd
, bufpp
, nBufp
, lineno
);
1395 if (eof
) return True
; // old LeakSupp style, no match-leak-kinds line.
1396 if (0 == VG_(strncmp
)(*bufpp
, "match-leak-kinds:", 17)) {
1398 while ((*bufpp
)[i
] && VG_(isspace
)((*bufpp
)[i
]))
1400 if (!VG_(parse_enum_set
)(MC_(parse_leak_kinds_tokens
),
1402 (*bufpp
)+i
, &lse
->match_leak_kinds
)) {
1406 return False
; // unknown extra line.
1408 } else if (VG_(get_supp_kind
)(su
) == FishyValueSupp
) {
1409 MC_FishyValueExtra
*extra
;
1410 HChar
*p
, *function_name
, *argument_name
= NULL
;
1412 eof
= VG_(get_line
) ( fd
, bufpp
, nBufp
, lineno
);
1413 if (eof
) return True
;
1415 // The suppression string is: function_name(argument_name)
1416 function_name
= VG_(strdup
)("mv.resi.4", *bufpp
);
1417 p
= VG_(strchr
)(function_name
, '(');
1421 p
= VG_(strchr
)(p
, ')');
1425 if (p
== NULL
) { // malformed suppression string
1426 VG_(free
)(function_name
);
1430 extra
= VG_(malloc
)("mc.resi.3", sizeof *extra
);
1431 extra
->function_name
= function_name
;
1432 extra
->argument_name
= argument_name
;
1434 VG_(set_supp_extra
)(su
, extra
);
1439 Bool
MC_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
)
1442 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1443 ErrorKind ekind
= VG_(get_error_kind
)(err
);
1445 switch (VG_(get_supp_kind
)(su
)) {
1447 return ((ekind
== Err_RegParam
|| ekind
== Err_MemParam
)
1448 && VG_STREQ(VG_(get_error_string
)(err
),
1449 VG_(get_supp_string
)(su
)));
1452 return (ekind
== Err_User
);
1455 return (ekind
== Err_CoreMem
1456 && VG_STREQ(VG_(get_error_string
)(err
),
1457 VG_(get_supp_string
)(su
)));
1459 case Value1Supp
: su_szB
= 1; goto value_case
;
1460 case Value2Supp
: su_szB
= 2; goto value_case
;
1461 case Value4Supp
: su_szB
= 4; goto value_case
;
1462 case Value8Supp
: su_szB
= 8; goto value_case
;
1463 case Value16Supp
:su_szB
=16; goto value_case
;
1464 case Value32Supp
:su_szB
=32; goto value_case
;
1466 return (ekind
== Err_Value
&& extra
->Err
.Value
.szB
== su_szB
);
1469 return (ekind
== Err_Cond
);
1471 case Addr1Supp
: su_szB
= 1; goto addr_case
;
1472 case Addr2Supp
: su_szB
= 2; goto addr_case
;
1473 case Addr4Supp
: su_szB
= 4; goto addr_case
;
1474 case Addr8Supp
: su_szB
= 8; goto addr_case
;
1475 case Addr16Supp
:su_szB
=16; goto addr_case
;
1476 case Addr32Supp
:su_szB
=32; goto addr_case
;
1478 return (ekind
== Err_Addr
&& extra
->Err
.Addr
.szB
== su_szB
);
1481 return (ekind
== Err_Jump
);
1484 return (ekind
== Err_Free
|| ekind
== Err_FreeMismatch
);
1487 return (ekind
== Err_Overlap
);
1490 if (ekind
== Err_Leak
) {
1491 MC_LeakSuppExtra
* lse
= (MC_LeakSuppExtra
*) VG_(get_supp_extra
)(su
);
1492 if (lse
->leak_search_gen
!= MC_(leak_search_gen
)) {
1493 // First time we see this suppression during this leak search.
1494 // => reset the counters to 0.
1495 lse
->blocks_suppressed
= 0;
1496 lse
->bytes_suppressed
= 0;
1497 lse
->leak_search_gen
= MC_(leak_search_gen
);
1499 return RiS(extra
->Err
.Leak
.lr
->key
.state
, lse
->match_leak_kinds
);
1504 return (ekind
== Err_IllegalMempool
);
1506 case FishyValueSupp
: {
1507 MC_FishyValueExtra
*supp_extra
= VG_(get_supp_extra
)(su
);
1509 return (ekind
== Err_FishyValue
) &&
1510 VG_STREQ(extra
->Err
.FishyValue
.function_name
,
1511 supp_extra
->function_name
) &&
1512 VG_STREQ(extra
->Err
.FishyValue
.argument_name
,
1513 supp_extra
->argument_name
);
1517 VG_(printf
)("Error:\n"
1518 " unknown suppression type %d\n",
1519 VG_(get_supp_kind
)(su
));
1520 VG_(tool_panic
)("unknown suppression type in "
1521 "MC_(error_matches_suppression)");
1525 const HChar
* MC_(get_error_name
) ( const Error
* err
)
1527 switch (VG_(get_error_kind
)(err
)) {
1528 case Err_RegParam
: return "Param";
1529 case Err_MemParam
: return "Param";
1530 case Err_User
: return "User";
1531 case Err_FreeMismatch
: return "Free";
1532 case Err_IllegalMempool
: return "Mempool";
1533 case Err_Free
: return "Free";
1534 case Err_Jump
: return "Jump";
1535 case Err_CoreMem
: return "CoreMem";
1536 case Err_Overlap
: return "Overlap";
1537 case Err_Leak
: return "Leak";
1538 case Err_Cond
: return "Cond";
1539 case Err_FishyValue
: return "FishyValue";
1541 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1542 switch ( extra
->Err
.Addr
.szB
) {
1543 case 1: return "Addr1";
1544 case 2: return "Addr2";
1545 case 4: return "Addr4";
1546 case 8: return "Addr8";
1547 case 16: return "Addr16";
1548 case 32: return "Addr32";
1549 default: VG_(tool_panic
)("unexpected size for Addr");
1553 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1554 switch ( extra
->Err
.Value
.szB
) {
1555 case 1: return "Value1";
1556 case 2: return "Value2";
1557 case 4: return "Value4";
1558 case 8: return "Value8";
1559 case 16: return "Value16";
1560 case 32: return "Value32";
1561 default: VG_(tool_panic
)("unexpected size for Value");
1564 default: VG_(tool_panic
)("get_error_name: unexpected type");
1568 SizeT
MC_(get_extra_suppression_info
) ( const Error
* err
,
1569 /*OUT*/HChar
* buf
, Int nBuf
)
1571 ErrorKind ekind
= VG_(get_error_kind
)(err
);
1573 tl_assert(nBuf
>= 1);
1575 if (Err_RegParam
== ekind
|| Err_MemParam
== ekind
) {
1576 const HChar
* errstr
= VG_(get_error_string
)(err
);
1578 return VG_(snprintf
)(buf
, nBuf
, "%s", errstr
);
1579 } else if (Err_Leak
== ekind
) {
1580 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1581 return VG_(snprintf
) (buf
, nBuf
, "match-leak-kinds: %s",
1582 pp_Reachedness_for_leak_kinds(extra
->Err
.Leak
.lr
->key
.state
));
1583 } else if (Err_FishyValue
== ekind
) {
1584 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1585 return VG_(snprintf
) (buf
, nBuf
, "%s(%s)",
1586 extra
->Err
.FishyValue
.function_name
,
1587 extra
->Err
.FishyValue
.argument_name
);
1594 SizeT
MC_(print_extra_suppression_use
) ( const Supp
*su
,
1595 /*OUT*/HChar
*buf
, Int nBuf
)
1597 tl_assert(nBuf
>= 1);
1599 if (VG_(get_supp_kind
)(su
) == LeakSupp
) {
1600 MC_LeakSuppExtra
*lse
= (MC_LeakSuppExtra
*) VG_(get_supp_extra
) (su
);
1602 if (lse
->leak_search_gen
== MC_(leak_search_gen
)
1603 && lse
->blocks_suppressed
> 0) {
1604 return VG_(snprintf
) (buf
, nBuf
,
1605 "suppressed: %'lu bytes in %'lu blocks",
1606 lse
->bytes_suppressed
,
1607 lse
->blocks_suppressed
);
1615 void MC_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
)
1617 if (VG_(get_supp_kind
)(su
) == LeakSupp
) {
1618 MC_LeakSuppExtra
*lse
= (MC_LeakSuppExtra
*) VG_(get_supp_extra
) (su
);
1619 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1621 tl_assert (lse
->leak_search_gen
== MC_(leak_search_gen
));
1622 lse
->blocks_suppressed
+= extra
->Err
.Leak
.lr
->num_blocks
;
1623 lse
->bytes_suppressed
1624 += extra
->Err
.Leak
.lr
->szB
+ extra
->Err
.Leak
.lr
->indirect_szB
;
1628 /*--------------------------------------------------------------------*/
1629 /*--- end mc_errors.c ---*/
1630 /*--------------------------------------------------------------------*/