2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_gdbserver.h"
32 #include "pub_tool_poolalloc.h" // For mc_include.h
33 #include "pub_tool_hashtable.h" // For mc_include.h
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_machine.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_tooliface.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_aspacemgr.h"
46 #include "pub_tool_addrinfo.h"
48 #include "mc_include.h"
51 /*------------------------------------------------------------*/
52 /*--- Error types ---*/
53 /*------------------------------------------------------------*/
55 /* See comment in mc_include.h */
56 Bool
MC_(any_value_errors
) = False
;
59 /* ------------------ Errors ----------------------- */
61 /* What kind of error it is. */
83 typedef struct _MC_Error MC_Error
;
86 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
90 // Use of an undefined value:
91 // - as a pointer in a load or store
94 SizeT szB
; // size of value in bytes
96 UInt otag
; // origin tag
97 ExeContext
* origin_ec
; // filled in later
100 // Use of an undefined value in a conditional branch or move.
103 UInt otag
; // origin tag
104 ExeContext
* origin_ec
; // filled in later
107 // Addressability error in core (signal-handling) operation.
108 // It would be good to get rid of this error kind, merge it with
109 // another one somehow.
113 // Use of an unaddressable memory location in a load or store.
115 Bool isWrite
; // read or write?
116 SizeT szB
; // not used for exec (jump) errors
117 Bool maybe_gcc
; // True if just below %esp -- could be a gcc bug
121 // Jump to an unaddressable memory location.
126 // System call register input contains undefined bytes.
129 UInt otag
; // origin tag
130 ExeContext
* origin_ec
; // filled in later
133 // System call memory input contains undefined/unaddressable bytes
135 Bool isAddrErr
; // Addressability or definedness error?
138 UInt otag
; // origin tag
139 ExeContext
* origin_ec
; // filled in later
142 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
144 Bool isAddrErr
; // Addressability or definedness error?
147 UInt otag
; // origin tag
148 ExeContext
* origin_ec
; // filled in later
151 // Program tried to free() something that's not a heap block (this
152 // covers double-frees). */
157 // Program allocates heap block with one function
158 // (malloc/new/new[]/custom) and deallocates with not the matching one.
167 // Call to strcpy, memcpy, etc, with overlapping blocks.
169 Addr src
; // Source block
170 Addr dst
; // Destination block
171 SizeT szB
; // Size in bytes; 0 if unused.
177 UInt n_total_records
;
181 // A memory pool error.
186 // A fishy function argument value
187 // An argument value is considered fishy if the corresponding
188 // parameter has SizeT type and the value when interpreted as a
189 // signed number is negative.
191 const HChar
*function_name
;
192 const HChar
*argument_name
;
199 /*------------------------------------------------------------*/
200 /*--- Printing errors ---*/
201 /*------------------------------------------------------------*/
203 /* This is the "this error is due to be printed shortly; so have a
204 look at it any print any preamble you want" function. Which, in
205 Memcheck, we don't use. Hence a no-op.
207 void MC_(before_pp_Error
) ( const Error
* err
) {
210 /* Do a printf-style operation on either the XML or normal output
211 channel, depending on the setting of VG_(clo_xml).
213 static void emit_WRK ( const HChar
* format
, va_list vargs
)
216 VG_(vprintf_xml
)(format
, vargs
);
218 VG_(vmessage
)(Vg_UserMsg
, format
, vargs
);
221 static void emit ( const HChar
* format
, ... ) PRINTF_CHECK(1, 2);
222 static void emit ( const HChar
* format
, ... )
225 va_start(vargs
, format
);
226 emit_WRK(format
, vargs
);
231 static const HChar
* str_leak_lossmode ( Reachedness lossmode
)
233 const HChar
*loss
= "?";
235 case Unreached
: loss
= "definitely lost"; break;
236 case IndirectLeak
: loss
= "indirectly lost"; break;
237 case Possible
: loss
= "possibly lost"; break;
238 case Reachable
: loss
= "still reachable"; break;
243 static const HChar
* xml_leak_kind ( Reachedness lossmode
)
245 const HChar
*loss
= "?";
247 case Unreached
: loss
= "Leak_DefinitelyLost"; break;
248 case IndirectLeak
: loss
= "Leak_IndirectlyLost"; break;
249 case Possible
: loss
= "Leak_PossiblyLost"; break;
250 case Reachable
: loss
= "Leak_StillReachable"; break;
255 const HChar
* MC_(parse_leak_kinds_tokens
) =
256 "reachable,possible,indirect,definite";
258 UInt
MC_(all_Reachedness
)(void)
263 // Compute a set with all values by doing a parsing of the "all" keyword.
264 Bool parseok
= VG_(parse_enum_set
)(MC_(parse_leak_kinds_tokens
),
268 tl_assert (parseok
&& all
);
274 static const HChar
* pp_Reachedness_for_leak_kinds(Reachedness r
)
277 case Reachable
: return "reachable";
278 case Possible
: return "possible";
279 case IndirectLeak
: return "indirect";
280 case Unreached
: return "definite";
281 default: tl_assert(0);
285 static void mc_pp_origin ( ExeContext
* ec
, UInt okind
)
287 const HChar
* src
= NULL
;
291 case MC_OKIND_STACK
: src
= " by a stack allocation"; break;
292 case MC_OKIND_HEAP
: src
= " by a heap allocation"; break;
293 case MC_OKIND_USER
: src
= " by a client request"; break;
294 case MC_OKIND_UNKNOWN
: src
= ""; break;
296 tl_assert(src
); /* guards against invalid 'okind' */
299 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
301 VG_(pp_ExeContext
)( ec
);
303 emit( " Uninitialised value was created%s\n", src
);
304 VG_(pp_ExeContext
)( ec
);
308 HChar
* MC_(snprintf_delta
) (HChar
* buf
, Int size
,
309 SizeT current_val
, SizeT old_val
,
310 LeakCheckDeltaMode delta_mode
)
312 // Make sure the buffer size is large enough. With old_val == 0 and
313 // current_val == ULLONG_MAX the delta including inserted commas is:
314 // 18,446,744,073,709,551,615
315 // whose length is 26. Therefore:
316 tl_assert(size
>= 26 + 4 + 1);
318 if (delta_mode
== LCD_Any
)
320 else if (current_val
>= old_val
)
321 VG_(snprintf
) (buf
, size
, " (+%'lu)", current_val
- old_val
);
323 VG_(snprintf
) (buf
, size
, " (-%'lu)", old_val
- current_val
);
328 static void pp_LossRecord(UInt n_this_record
, UInt n_total_records
,
329 LossRecord
* lr
, Bool xml
)
331 // char arrays to produce the indication of increase/decrease in case
332 // of delta_mode != LCD_Any
334 HChar d_direct_bytes
[31];
335 HChar d_indirect_bytes
[31];
336 HChar d_num_blocks
[31];
337 /* A loss record that had an old number of blocks 0 is a new loss record.
338 We mark it as new only when doing any kind of delta leak search. */
339 const HChar
*new_loss_record_marker
340 = MC_(detect_memory_leaks_last_delta_mode
) != LCD_Any
341 && lr
->old_num_blocks
== 0
344 MC_(snprintf_delta
) (d_bytes
, sizeof(d_bytes
),
345 lr
->szB
+ lr
->indirect_szB
,
346 lr
->old_szB
+ lr
->old_indirect_szB
,
347 MC_(detect_memory_leaks_last_delta_mode
));
348 MC_(snprintf_delta
) (d_direct_bytes
, sizeof(d_direct_bytes
),
351 MC_(detect_memory_leaks_last_delta_mode
));
352 MC_(snprintf_delta
) (d_indirect_bytes
, sizeof(d_indirect_bytes
),
354 lr
->old_indirect_szB
,
355 MC_(detect_memory_leaks_last_delta_mode
));
356 MC_(snprintf_delta
) (d_num_blocks
, sizeof(d_num_blocks
),
357 (SizeT
) lr
->num_blocks
,
358 (SizeT
) lr
->old_num_blocks
,
359 MC_(detect_memory_leaks_last_delta_mode
));
362 emit(" <kind>%s</kind>\n", xml_leak_kind(lr
->key
.state
));
363 if (lr
->indirect_szB
> 0) {
364 emit( " <xwhat>\n" );
365 emit( " <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
367 " are %s in %sloss record %'u of %'u</text>\n",
368 lr
->szB
+ lr
->indirect_szB
, d_bytes
,
369 lr
->szB
, d_direct_bytes
,
370 lr
->indirect_szB
, d_indirect_bytes
,
371 lr
->num_blocks
, d_num_blocks
,
372 str_leak_lossmode(lr
->key
.state
),
373 new_loss_record_marker
,
374 n_this_record
, n_total_records
);
375 // Nb: don't put commas in these XML numbers
376 emit( " <leakedbytes>%lu</leakedbytes>\n",
377 lr
->szB
+ lr
->indirect_szB
);
378 emit( " <leakedblocks>%u</leakedblocks>\n", lr
->num_blocks
);
379 emit( " </xwhat>\n" );
381 emit( " <xwhat>\n" );
382 emit( " <text>%'lu%s bytes in %'u%s blocks"
383 " are %s in %sloss record %'u of %'u</text>\n",
384 lr
->szB
, d_direct_bytes
,
385 lr
->num_blocks
, d_num_blocks
,
386 str_leak_lossmode(lr
->key
.state
),
387 new_loss_record_marker
,
388 n_this_record
, n_total_records
);
389 emit( " <leakedbytes>%lu</leakedbytes>\n", lr
->szB
);
390 emit( " <leakedblocks>%u</leakedblocks>\n", lr
->num_blocks
);
391 emit( " </xwhat>\n" );
393 VG_(pp_ExeContext
)(lr
->key
.allocated_at
);
394 } else { /* ! if (xml) */
395 if (lr
->indirect_szB
> 0) {
397 "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
398 " are %s in %sloss record %'u of %'u\n",
399 lr
->szB
+ lr
->indirect_szB
, d_bytes
,
400 lr
->szB
, d_direct_bytes
,
401 lr
->indirect_szB
, d_indirect_bytes
,
402 lr
->num_blocks
, d_num_blocks
,
403 str_leak_lossmode(lr
->key
.state
),
404 new_loss_record_marker
,
405 n_this_record
, n_total_records
409 "%'lu%s bytes in %'u%s blocks are %s in %sloss record %'u of %'u\n",
410 lr
->szB
, d_direct_bytes
,
411 lr
->num_blocks
, d_num_blocks
,
412 str_leak_lossmode(lr
->key
.state
),
413 new_loss_record_marker
,
414 n_this_record
, n_total_records
417 VG_(pp_ExeContext
)(lr
->key
.allocated_at
);
421 void MC_(pp_LossRecord
)(UInt n_this_record
, UInt n_total_records
,
424 pp_LossRecord (n_this_record
, n_total_records
, l
, /* xml */ False
);
427 void MC_(pp_Error
) ( const Error
* err
)
429 const Bool xml
= VG_(clo_xml
); /* a shorthand */
430 MC_Error
* extra
= VG_(get_error_extra
)(err
);
432 switch (VG_(get_error_kind
)(err
)) {
434 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
435 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
436 signal handler frame. --njn */
437 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
438 // the following code is untested. Bad.
440 emit( " <kind>CoreMemError</kind>\n" );
441 emit( " <what>%pS contains unaddressable byte(s)</what>\n",
442 VG_(get_error_string
)(err
));
443 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
445 emit( "%s contains unaddressable byte(s)\n",
446 VG_(get_error_string
)(err
));
447 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
452 MC_(any_value_errors
) = True
;
454 emit( " <kind>UninitValue</kind>\n" );
455 emit( " <what>Use of uninitialised value of size %lu</what>\n",
456 extra
->Err
.Value
.szB
);
457 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
458 if (extra
->Err
.Value
.origin_ec
)
459 mc_pp_origin( extra
->Err
.Value
.origin_ec
,
460 extra
->Err
.Value
.otag
& 3 );
462 /* Could also show extra->Err.Cond.otag if debugging origin
464 emit( "Use of uninitialised value of size %lu\n",
465 extra
->Err
.Value
.szB
);
466 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
467 if (extra
->Err
.Value
.origin_ec
)
468 mc_pp_origin( extra
->Err
.Value
.origin_ec
,
469 extra
->Err
.Value
.otag
& 3 );
474 MC_(any_value_errors
) = True
;
476 emit( " <kind>UninitCondition</kind>\n" );
477 emit( " <what>Conditional jump or move depends"
478 " on uninitialised value(s)</what>\n" );
479 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
480 if (extra
->Err
.Cond
.origin_ec
)
481 mc_pp_origin( extra
->Err
.Cond
.origin_ec
,
482 extra
->Err
.Cond
.otag
& 3 );
484 /* Could also show extra->Err.Cond.otag if debugging origin
486 emit( "Conditional jump or move depends"
487 " on uninitialised value(s)\n" );
488 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
489 if (extra
->Err
.Cond
.origin_ec
)
490 mc_pp_origin( extra
->Err
.Cond
.origin_ec
,
491 extra
->Err
.Cond
.otag
& 3 );
496 MC_(any_value_errors
) = True
;
498 emit( " <kind>SyscallParam</kind>\n" );
499 emit( " <what>Syscall param %pS contains "
500 "uninitialised byte(s)</what>\n",
501 VG_(get_error_string
)(err
) );
502 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
503 if (extra
->Err
.RegParam
.origin_ec
)
504 mc_pp_origin( extra
->Err
.RegParam
.origin_ec
,
505 extra
->Err
.RegParam
.otag
& 3 );
507 emit( "Syscall param %s contains uninitialised byte(s)\n",
508 VG_(get_error_string
)(err
) );
509 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
510 if (extra
->Err
.RegParam
.origin_ec
)
511 mc_pp_origin( extra
->Err
.RegParam
.origin_ec
,
512 extra
->Err
.RegParam
.otag
& 3 );
517 if (!extra
->Err
.MemParam
.isAddrErr
)
518 MC_(any_value_errors
) = True
;
520 emit( " <kind>SyscallParam</kind>\n" );
521 emit( " <what>Syscall param %pS points to %s byte(s)</what>\n",
522 VG_(get_error_string
)(err
),
523 extra
->Err
.MemParam
.isAddrErr
524 ? "unaddressable" : "uninitialised" );
525 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
526 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
527 &extra
->Err
.MemParam
.ai
, False
);
528 if (extra
->Err
.MemParam
.origin_ec
529 && !extra
->Err
.MemParam
.isAddrErr
)
530 mc_pp_origin( extra
->Err
.MemParam
.origin_ec
,
531 extra
->Err
.MemParam
.otag
& 3 );
533 emit( "Syscall param %s points to %s byte(s)\n",
534 VG_(get_error_string
)(err
),
535 extra
->Err
.MemParam
.isAddrErr
536 ? "unaddressable" : "uninitialised" );
537 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
538 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
539 &extra
->Err
.MemParam
.ai
, False
);
540 if (extra
->Err
.MemParam
.origin_ec
541 && !extra
->Err
.MemParam
.isAddrErr
)
542 mc_pp_origin( extra
->Err
.MemParam
.origin_ec
,
543 extra
->Err
.MemParam
.otag
& 3 );
548 if (!extra
->Err
.User
.isAddrErr
)
549 MC_(any_value_errors
) = True
;
551 emit( " <kind>ClientCheck</kind>\n" );
552 emit( " <what>%s byte(s) found "
553 "during client check request</what>\n",
554 extra
->Err
.User
.isAddrErr
555 ? "Unaddressable" : "Uninitialised" );
556 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
557 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
), &extra
->Err
.User
.ai
,
559 if (extra
->Err
.User
.origin_ec
&& !extra
->Err
.User
.isAddrErr
)
560 mc_pp_origin( extra
->Err
.User
.origin_ec
,
561 extra
->Err
.User
.otag
& 3 );
563 emit( "%s byte(s) found during client check request\n",
564 extra
->Err
.User
.isAddrErr
565 ? "Unaddressable" : "Uninitialised" );
566 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
567 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
), &extra
->Err
.User
.ai
,
569 if (extra
->Err
.User
.origin_ec
&& !extra
->Err
.User
.isAddrErr
)
570 mc_pp_origin( extra
->Err
.User
.origin_ec
,
571 extra
->Err
.User
.otag
& 3 );
577 emit( " <kind>InvalidFree</kind>\n" );
578 emit( " <what>Invalid free() / delete / delete[]"
579 " / realloc()</what>\n" );
580 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
581 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
582 &extra
->Err
.Free
.ai
, False
);
584 emit( "Invalid free() / delete / delete[] / realloc()\n" );
585 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
586 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
587 &extra
->Err
.Free
.ai
, False
);
591 case Err_FreeMismatch
:
593 emit( " <kind>MismatchedFree</kind>\n" );
594 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
595 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
596 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
597 &extra
->Err
.FreeMismatch
.ai
, False
);
599 emit( "Mismatched free() / delete / delete []\n" );
600 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
601 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
602 &extra
->Err
.FreeMismatch
.ai
, False
);
608 emit( " <kind>Invalid%s</kind>\n",
609 extra
->Err
.Addr
.isWrite
? "Write" : "Read" );
610 emit( " <what>Invalid %s of size %lu</what>\n",
611 extra
->Err
.Addr
.isWrite
? "write" : "read",
612 extra
->Err
.Addr
.szB
);
613 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
614 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
616 extra
->Err
.Addr
.maybe_gcc
);
618 emit( "Invalid %s of size %lu\n",
619 extra
->Err
.Addr
.isWrite
? "write" : "read",
620 extra
->Err
.Addr
.szB
);
621 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
623 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
625 extra
->Err
.Addr
.maybe_gcc
);
631 emit( " <kind>InvalidJump</kind>\n" );
632 emit( " <what>Jump to the invalid address stated "
633 "on the next line</what>\n" );
634 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
635 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
), &extra
->Err
.Jump
.ai
,
638 emit( "Jump to the invalid address stated on the next line\n" );
639 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
640 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
), &extra
->Err
.Jump
.ai
,
647 emit( " <kind>Overlap</kind>\n" );
648 if (extra
->Err
.Overlap
.szB
== 0) {
649 emit( " <what>Source and destination overlap "
650 "in %pS(%#lx, %#lx)\n</what>\n",
651 VG_(get_error_string
)(err
),
652 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
);
654 emit( " <what>Source and destination overlap "
655 "in %pS(%#lx, %#lx, %lu)</what>\n",
656 VG_(get_error_string
)(err
),
657 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
,
658 extra
->Err
.Overlap
.szB
);
660 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
662 if (extra
->Err
.Overlap
.szB
== 0) {
663 emit( "Source and destination overlap in %s(%#lx, %#lx)\n",
664 VG_(get_error_string
)(err
),
665 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
);
667 emit( "Source and destination overlap in %s(%#lx, %#lx, %lu)\n",
668 VG_(get_error_string
)(err
),
669 extra
->Err
.Overlap
.dst
, extra
->Err
.Overlap
.src
,
670 extra
->Err
.Overlap
.szB
);
672 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
676 case Err_IllegalMempool
:
677 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
678 // the following code is untested. Bad.
680 emit( " <kind>InvalidMemPool</kind>\n" );
681 emit( " <what>Illegal memory pool address</what>\n" );
682 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
683 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
684 &extra
->Err
.IllegalMempool
.ai
, False
);
686 emit( "Illegal memory pool address\n" );
687 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
688 VG_(pp_addrinfo_mc
)( VG_(get_error_address
)(err
),
689 &extra
->Err
.IllegalMempool
.ai
, False
);
694 UInt n_this_record
= extra
->Err
.Leak
.n_this_record
;
695 UInt n_total_records
= extra
->Err
.Leak
.n_total_records
;
696 LossRecord
* lr
= extra
->Err
.Leak
.lr
;
697 pp_LossRecord (n_this_record
, n_total_records
, lr
, xml
);
703 emit( " <kind>FishyValue</kind>\n" );
705 emit( "Argument '%s' of function %s has a fishy "
706 "(possibly negative) value: %ld\n",
707 extra
->Err
.FishyValue
.argument_name
,
708 extra
->Err
.FishyValue
.function_name
,
709 (SSizeT
)extra
->Err
.FishyValue
.value
);
711 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
713 emit( "Argument '%s' of function %s has a fishy "
714 "(possibly negative) value: %ld\n",
715 extra
->Err
.FishyValue
.argument_name
,
716 extra
->Err
.FishyValue
.function_name
,
717 (SSizeT
)extra
->Err
.FishyValue
.value
);
718 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
722 case Err_ReallocSizeZero
:
724 emit( " <kind>ReallocSizeZero</kind>\n" );
725 emit( " <what>realloc() with size 0</what>\n" );
726 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
727 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
728 &extra
->Err
.ReallocSizeZero
.ai
, False
);
730 emit( "realloc() with size 0\n" );
731 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
732 VG_(pp_addrinfo_mc
)(VG_(get_error_address
)(err
),
733 &extra
->Err
.ReallocSizeZero
.ai
, False
);
738 VG_(printf
)("Error:\n unknown Memcheck error code %d\n",
739 VG_(get_error_kind
)(err
));
740 VG_(tool_panic
)("unknown error code in mc_pp_Error)");
744 /*------------------------------------------------------------*/
745 /*--- Recording errors ---*/
746 /*------------------------------------------------------------*/
748 /* These many bytes below %ESP are considered addressible if we're
749 doing the --workaround-gcc296-bugs hack. */
750 #define VG_GCC296_BUG_STACK_SLOP 1024
752 /* Is this address within some small distance below %ESP? Used only
753 for the --workaround-gcc296-bugs kludge. */
754 static Bool
is_just_below_ESP( Addr esp
, Addr aa
)
756 esp
-= VG_STACK_REDZONE_SZB
;
757 if (esp
> aa
&& (esp
- aa
) <= VG_GCC296_BUG_STACK_SLOP
)
763 /* --- Called from generated and non-generated code --- */
765 void MC_(record_address_error
) ( ThreadId tid
, Addr a
, Int szB
,
771 if (MC_(in_ignored_range
)(a
))
774 if (VG_(is_watched
)( (isWrite
? write_watchpoint
: read_watchpoint
), a
, szB
))
777 Addr current_sp
= VG_(get_SP
)(tid
);
778 just_below_esp
= is_just_below_ESP( current_sp
, a
);
780 /* If this is caused by an access immediately below %ESP, and the
781 user asks nicely, we just ignore it. */
782 if (MC_(clo_workaround_gcc296_bugs
) && just_below_esp
)
785 /* Also, if this is caused by an access in the range of offsets
786 below the stack pointer as described by
787 --ignore-range-below-sp, ignore it. */
788 if (MC_(in_ignored_range_below_sp
)( current_sp
, a
, szB
))
791 extra
.Err
.Addr
.isWrite
= isWrite
;
792 extra
.Err
.Addr
.szB
= szB
;
793 extra
.Err
.Addr
.maybe_gcc
= just_below_esp
;
794 extra
.Err
.Addr
.ai
.tag
= Addr_Undescribed
;
795 VG_(maybe_record_error
)( tid
, Err_Addr
, a
, /*s*/NULL
, &extra
);
798 void MC_(record_value_error
) ( ThreadId tid
, Int szB
, UInt otag
)
801 tl_assert( MC_(clo_mc_level
) >= 2 );
803 tl_assert( MC_(clo_mc_level
) == 3 );
804 extra
.Err
.Value
.szB
= szB
;
805 extra
.Err
.Value
.otag
= otag
;
806 extra
.Err
.Value
.origin_ec
= NULL
; /* Filled in later */
807 VG_(maybe_record_error
)( tid
, Err_Value
, /*addr*/0, /*s*/NULL
, &extra
);
810 void MC_(record_cond_error
) ( ThreadId tid
, UInt otag
)
813 tl_assert( MC_(clo_mc_level
) >= 2 );
815 tl_assert( MC_(clo_mc_level
) == 3 );
816 extra
.Err
.Cond
.otag
= otag
;
817 extra
.Err
.Cond
.origin_ec
= NULL
; /* Filled in later */
818 VG_(maybe_record_error
)( tid
, Err_Cond
, /*addr*/0, /*s*/NULL
, &extra
);
821 /* --- Called from non-generated code --- */
823 /* This is for memory errors in signal-related memory. */
824 void MC_(record_core_mem_error
) ( ThreadId tid
, const HChar
* msg
)
826 VG_(maybe_record_error
)( tid
, Err_CoreMem
, /*addr*/0, msg
, /*extra*/NULL
);
829 void MC_(record_regparam_error
) ( ThreadId tid
, const HChar
* msg
, UInt otag
)
832 tl_assert(VG_INVALID_THREADID
!= tid
);
834 tl_assert( MC_(clo_mc_level
) == 3 );
835 extra
.Err
.RegParam
.otag
= otag
;
836 extra
.Err
.RegParam
.origin_ec
= NULL
; /* Filled in later */
837 VG_(maybe_record_error
)( tid
, Err_RegParam
, /*addr*/0, msg
, &extra
);
840 void MC_(record_memparam_error
) ( ThreadId tid
, Addr a
,
841 Bool isAddrErr
, const HChar
* msg
, UInt otag
)
844 tl_assert(VG_INVALID_THREADID
!= tid
);
846 tl_assert( MC_(clo_mc_level
) >= 2 );
848 tl_assert( MC_(clo_mc_level
) == 3 );
849 tl_assert( !isAddrErr
);
851 extra
.Err
.MemParam
.isAddrErr
= isAddrErr
;
852 extra
.Err
.MemParam
.ai
.tag
= Addr_Undescribed
;
853 extra
.Err
.MemParam
.otag
= otag
;
854 extra
.Err
.MemParam
.origin_ec
= NULL
; /* Filled in later */
855 VG_(maybe_record_error
)( tid
, Err_MemParam
, a
, msg
, &extra
);
858 void MC_(record_jump_error
) ( ThreadId tid
, Addr a
)
861 tl_assert(VG_INVALID_THREADID
!= tid
);
862 extra
.Err
.Jump
.ai
.tag
= Addr_Undescribed
;
863 VG_(maybe_record_error
)( tid
, Err_Jump
, a
, /*s*/NULL
, &extra
);
866 void MC_(record_free_error
) ( ThreadId tid
, Addr a
)
869 tl_assert(VG_INVALID_THREADID
!= tid
);
870 extra
.Err
.Free
.ai
.tag
= Addr_Undescribed
;
871 VG_(maybe_record_error
)( tid
, Err_Free
, a
, /*s*/NULL
, &extra
);
874 void MC_(record_freemismatch_error
) ( ThreadId tid
, MC_Chunk
* mc
)
877 AddrInfo
* ai
= &extra
.Err
.FreeMismatch
.ai
;
878 tl_assert(VG_INVALID_THREADID
!= tid
);
879 ai
->tag
= Addr_Block
;
880 ai
->Addr
.Block
.block_kind
= Block_Mallocd
; // Nb: Not 'Block_Freed'
881 ai
->Addr
.Block
.block_desc
= "block";
882 ai
->Addr
.Block
.block_szB
= mc
->szB
;
883 ai
->Addr
.Block
.rwoffset
= 0;
884 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
) (mc
);
885 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
886 ai
->Addr
.Block
.freed_at
= MC_(freed_at
) (mc
);
887 VG_(maybe_record_error
)( tid
, Err_FreeMismatch
, mc
->data
, /*s*/NULL
,
891 void MC_(record_realloc_size_zero
) ( ThreadId tid
, Addr a
)
894 tl_assert(VG_INVALID_THREADID
!= tid
);
895 extra
.Err
.ReallocSizeZero
.ai
.tag
= Addr_Undescribed
;
896 VG_(maybe_record_error
)( tid
, Err_ReallocSizeZero
, a
, /*s*/NULL
, &extra
);
900 void MC_(record_illegal_mempool_error
) ( ThreadId tid
, Addr a
)
903 tl_assert(VG_INVALID_THREADID
!= tid
);
904 extra
.Err
.IllegalMempool
.ai
.tag
= Addr_Undescribed
;
905 VG_(maybe_record_error
)( tid
, Err_IllegalMempool
, a
, /*s*/NULL
, &extra
);
908 void MC_(record_overlap_error
) ( ThreadId tid
, const HChar
* function
,
909 Addr src
, Addr dst
, SizeT szB
)
912 tl_assert(VG_INVALID_THREADID
!= tid
);
913 extra
.Err
.Overlap
.src
= src
;
914 extra
.Err
.Overlap
.dst
= dst
;
915 extra
.Err
.Overlap
.szB
= szB
;
916 VG_(maybe_record_error
)(
917 tid
, Err_Overlap
, /*addr*/0, /*s*/function
, &extra
);
920 Bool
MC_(record_leak_error
) ( ThreadId tid
, UInt n_this_record
,
921 UInt n_total_records
, LossRecord
* lr
,
922 Bool print_record
, Bool count_error
)
925 extra
.Err
.Leak
.n_this_record
= n_this_record
;
926 extra
.Err
.Leak
.n_total_records
= n_total_records
;
927 extra
.Err
.Leak
.lr
= lr
;
929 VG_(unique_error
) ( tid
, Err_Leak
, /*Addr*/0, /*s*/NULL
, &extra
,
930 lr
->key
.allocated_at
, print_record
,
931 /*allow_GDB_attach*/False
, count_error
);
934 Bool
MC_(record_fishy_value_error
) ( ThreadId tid
, const HChar
*function_name
,
935 const HChar
*argument_name
, SizeT value
)
939 tl_assert(VG_INVALID_THREADID
!= tid
);
941 if ((SSizeT
)value
>= 0) return False
; // not a fishy value
943 extra
.Err
.FishyValue
.function_name
= function_name
;
944 extra
.Err
.FishyValue
.argument_name
= argument_name
;
945 extra
.Err
.FishyValue
.value
= value
;
947 VG_(maybe_record_error
)(
948 tid
, Err_FishyValue
, /*addr*/0, /*s*/NULL
, &extra
);
953 void MC_(record_user_error
) ( ThreadId tid
, Addr a
,
954 Bool isAddrErr
, UInt otag
)
958 tl_assert(!isAddrErr
);
959 tl_assert( MC_(clo_mc_level
) == 3 );
962 tl_assert( MC_(clo_mc_level
) >= 2 );
964 tl_assert(VG_INVALID_THREADID
!= tid
);
965 extra
.Err
.User
.isAddrErr
= isAddrErr
;
966 extra
.Err
.User
.ai
.tag
= Addr_Undescribed
;
967 extra
.Err
.User
.otag
= otag
;
968 extra
.Err
.User
.origin_ec
= NULL
; /* Filled in later */
969 VG_(maybe_record_error
)( tid
, Err_User
, a
, /*s*/NULL
, &extra
);
972 Bool
MC_(is_mempool_block
)(MC_Chunk
* mc_search
)
976 if (!MC_(mempool_list
))
979 // A chunk can only come from a mempool if a custom allocator
980 // is used. No search required for other kinds.
981 if (mc_search
->allockind
== MC_AllocCustom
) {
982 VG_(HT_ResetIter
)( MC_(mempool_list
) );
983 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
985 VG_(HT_ResetIter
)(mp
->chunks
);
986 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
996 /*------------------------------------------------------------*/
997 /*--- Other error operations ---*/
998 /*------------------------------------------------------------*/
1000 /* Compare error contexts, to detect duplicates. Note that if they
1001 are otherwise the same, the faulting addrs and associated rwoffsets
1002 are allowed to be different. */
1003 Bool
MC_(eq_Error
) ( VgRes res
, const Error
* e1
, const Error
* e2
)
1005 MC_Error
* extra1
= VG_(get_error_extra
)(e1
);
1006 MC_Error
* extra2
= VG_(get_error_extra
)(e2
);
1008 /* Guaranteed by calling function */
1009 tl_assert(VG_(get_error_kind
)(e1
) == VG_(get_error_kind
)(e2
));
1011 switch (VG_(get_error_kind
)(e1
)) {
1013 const HChar
*e1s
, *e2s
;
1014 e1s
= VG_(get_error_string
)(e1
);
1015 e2s
= VG_(get_error_string
)(e2
);
1016 if (e1s
== e2s
) return True
;
1017 if (VG_STREQ(e1s
, e2s
)) return True
;
1022 return VG_STREQ(VG_(get_error_string
)(e1
), VG_(get_error_string
)(e2
));
1024 // Perhaps we should also check the addrinfo.akinds for equality.
1025 // That would result in more error reports, but only in cases where
1026 // a register contains uninitialised bytes and points to memory
1027 // containing uninitialised bytes. Currently, the 2nd of those to be
1028 // detected won't be reported. That is (nearly?) always the memory
1029 // error, which is good.
1031 if (!VG_STREQ(VG_(get_error_string
)(e1
),
1032 VG_(get_error_string
)(e2
))) return False
;
1035 return ( extra1
->Err
.User
.isAddrErr
== extra2
->Err
.User
.isAddrErr
1039 case Err_FreeMismatch
:
1041 case Err_IllegalMempool
:
1046 case Err_FishyValue
:
1047 return VG_STREQ(extra1
->Err
.FishyValue
.function_name
,
1048 extra2
->Err
.FishyValue
.function_name
) &&
1049 VG_STREQ(extra1
->Err
.FishyValue
.argument_name
,
1050 extra2
->Err
.FishyValue
.argument_name
);
1053 return ( extra1
->Err
.Addr
.szB
== extra2
->Err
.Addr
.szB
1057 return ( extra1
->Err
.Value
.szB
== extra2
->Err
.Value
.szB
1061 VG_(tool_panic
)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1062 "since it's handled with VG_(unique_error)()!");
1065 VG_(printf
)("Error:\n unknown error code %d\n",
1066 VG_(get_error_kind
)(e1
));
1067 VG_(tool_panic
)("unknown error code in mc_eq_Error");
1071 /* Functions used when searching MC_Chunk lists */
1073 Bool
addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk
* mc
, Addr a
)
1075 return VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
1076 MC_(Malloc_Redzone_SzB
) );
1079 Bool
addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk
* mc
, Addr a
, SizeT rzB
)
1081 return VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
1085 // Forward declarations
1086 static Bool
client_block_maybe_describe( Addr a
, AddrInfo
* ai
);
1087 static Bool
mempool_block_maybe_describe( Addr a
, Bool is_metapool
,
1091 /* Describe an address as best you can, for error messages,
1092 putting the result in ai. */
1093 static void describe_addr ( DiEpoch ep
, Addr a
, /*OUT*/AddrInfo
* ai
)
1097 tl_assert(Addr_Undescribed
== ai
->tag
);
1099 /* -- Perhaps it's a user-named block? -- */
1100 if (client_block_maybe_describe( a
, ai
)) {
1104 /* -- Perhaps it's in mempool block (non-meta)? -- */
1105 if (mempool_block_maybe_describe( a
, /*is_metapool*/ False
, ai
)) {
1109 /* Blocks allocated by memcheck malloc functions are either
1110 on the recently freed list or on the malloc-ed list.
1111 Custom blocks can be on both : a recently freed block might
1112 have been just re-allocated.
1113 So, first search the malloc-ed block, as the most recent
1114 block is the probable cause of error.
1115 We however detect and report that this is a recently re-allocated
1117 /* -- Search for a currently malloc'd block which might bracket it. -- */
1118 VG_(HT_ResetIter
)(MC_(malloc_list
));
1119 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
1120 if (!MC_(is_mempool_block
)(mc
) &&
1121 addr_is_in_MC_Chunk_default_REDZONE_SZB(mc
, a
)) {
1122 ai
->tag
= Addr_Block
;
1123 ai
->Addr
.Block
.block_kind
= Block_Mallocd
;
1124 if (MC_(get_freed_block_bracketting
)( a
))
1125 ai
->Addr
.Block
.block_desc
= "recently re-allocated block";
1127 ai
->Addr
.Block
.block_desc
= "block";
1128 ai
->Addr
.Block
.block_szB
= mc
->szB
;
1129 ai
->Addr
.Block
.rwoffset
= (Word
)a
- (Word
)mc
->data
;
1130 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
)(mc
);
1131 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1132 ai
->Addr
.Block
.freed_at
= MC_(freed_at
)(mc
);
1136 /* -- Search for a recently freed block which might bracket it. -- */
1137 mc
= MC_(get_freed_block_bracketting
)( a
);
1139 ai
->tag
= Addr_Block
;
1140 ai
->Addr
.Block
.block_kind
= Block_Freed
;
1141 ai
->Addr
.Block
.block_desc
= "block";
1142 ai
->Addr
.Block
.block_szB
= mc
->szB
;
1143 ai
->Addr
.Block
.rwoffset
= (Word
)a
- (Word
)mc
->data
;
1144 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
)(mc
);
1145 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1146 ai
->Addr
.Block
.freed_at
= MC_(freed_at
)(mc
);
1150 /* -- Perhaps it's in a meta mempool block? -- */
1151 /* This test is done last, because metapool blocks overlap with blocks
1152 handed out to the application. That makes every heap address part of
1153 a metapool block, so the interesting cases are handled first.
1154 This final search is a last-ditch attempt. When found, it is probably
1155 an error in the custom allocator itself. */
1156 if (mempool_block_maybe_describe( a
, /*is_metapool*/ True
, ai
)) {
1160 /* No block found. Search a non-heap block description. */
1161 VG_(describe_addr
) (ep
, a
, ai
);
1164 void MC_(pp_describe_addr
) ( DiEpoch ep
, Addr a
)
1168 ai
.tag
= Addr_Undescribed
;
1169 describe_addr (ep
, a
, &ai
);
1170 VG_(pp_addrinfo_mc
) (a
, &ai
, /* maybe_gcc */ False
);
1171 VG_(clear_addrinfo
) (&ai
);
1174 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1175 does not refer to a known origin. */
1176 static void update_origin ( /*OUT*/ExeContext
** origin_ec
,
1179 UInt ecu
= otag
& ~3;
1181 if (VG_(is_plausible_ECU
)(ecu
)) {
1182 *origin_ec
= VG_(get_ExeContext_from_ECU
)( ecu
);
1186 /* Updates the copy with address info if necessary (but not for all errors). */
1187 UInt
MC_(update_Error_extra
)( const Error
* err
)
1189 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1190 DiEpoch ep
= VG_(get_ExeContext_epoch
)(VG_(get_error_where
)(err
));
1192 switch (VG_(get_error_kind
)(err
)) {
1193 // These ones don't have addresses associated with them, and so don't
1194 // need any updating.
1199 case Err_FishyValue
:
1200 // For Err_Leaks the returned size does not matter -- they are always
1201 // shown with VG_(unique_error)() so they 'extra' not copied. But
1202 // we make it consistent with the others.
1204 return sizeof(MC_Error
);
1206 // For value errors, get the ExeContext corresponding to the
1207 // origin tag. Note that it is a kludge to assume that
1208 // a length-1 trace indicates a stack origin. FIXME.
1210 update_origin( &extra
->Err
.Value
.origin_ec
,
1211 extra
->Err
.Value
.otag
);
1212 return sizeof(MC_Error
);
1214 update_origin( &extra
->Err
.Cond
.origin_ec
,
1215 extra
->Err
.Cond
.otag
);
1216 return sizeof(MC_Error
);
1218 update_origin( &extra
->Err
.RegParam
.origin_ec
,
1219 extra
->Err
.RegParam
.otag
);
1220 return sizeof(MC_Error
);
1222 // These ones always involve a memory address.
1224 describe_addr ( ep
, VG_(get_error_address
)(err
),
1225 &extra
->Err
.Addr
.ai
);
1226 return sizeof(MC_Error
);
1228 describe_addr ( ep
, VG_(get_error_address
)(err
),
1229 &extra
->Err
.MemParam
.ai
);
1230 update_origin( &extra
->Err
.MemParam
.origin_ec
,
1231 extra
->Err
.MemParam
.otag
);
1232 return sizeof(MC_Error
);
1234 describe_addr ( ep
, VG_(get_error_address
)(err
),
1235 &extra
->Err
.Jump
.ai
);
1236 return sizeof(MC_Error
);
1238 describe_addr ( ep
, VG_(get_error_address
)(err
),
1239 &extra
->Err
.User
.ai
);
1240 update_origin( &extra
->Err
.User
.origin_ec
,
1241 extra
->Err
.User
.otag
);
1242 return sizeof(MC_Error
);
1244 describe_addr ( ep
, VG_(get_error_address
)(err
),
1245 &extra
->Err
.Free
.ai
);
1246 return sizeof(MC_Error
);
1247 case Err_IllegalMempool
:
1248 describe_addr ( ep
, VG_(get_error_address
)(err
),
1249 &extra
->Err
.IllegalMempool
.ai
);
1250 return sizeof(MC_Error
);
1252 // Err_FreeMismatches have already had their address described; this is
1253 // possible because we have the MC_Chunk on hand when the error is
1254 // detected. However, the address may be part of a user block, and if so
1255 // we override the pre-determined description with a user block one.
1256 case Err_FreeMismatch
: {
1257 tl_assert(extra
&& Block_Mallocd
==
1258 extra
->Err
.FreeMismatch
.ai
.Addr
.Block
.block_kind
);
1259 (void)client_block_maybe_describe( VG_(get_error_address
)(err
),
1260 &extra
->Err
.FreeMismatch
.ai
);
1261 return sizeof(MC_Error
);
1263 case Err_ReallocSizeZero
:
1264 describe_addr ( ep
, VG_(get_error_address
)(err
),
1265 &extra
->Err
.ReallocSizeZero
.ai
);
1266 return sizeof(MC_Error
);
1268 default: VG_(tool_panic
)("mc_update_extra: bad errkind");
1273 static Bool
client_block_maybe_describe( Addr a
,
1274 /*OUT*/AddrInfo
* ai
)
1277 CGenBlock
* cgbs
= NULL
;
1280 MC_(get_ClientBlock_array
)( &cgbs
, &cgb_used
);
1282 tl_assert(cgb_used
== 0);
1284 /* Perhaps it's a general block ? */
1285 for (i
= 0; i
< cgb_used
; i
++) {
1286 if (cgbs
[i
].start
== 0 && cgbs
[i
].size
== 0)
1288 // Use zero as the redzone for client blocks.
1289 if (VG_(addr_is_in_block
)(a
, cgbs
[i
].start
, cgbs
[i
].size
, 0)) {
1290 ai
->tag
= Addr_Block
;
1291 ai
->Addr
.Block
.block_kind
= Block_UserG
;
1292 ai
->Addr
.Block
.block_desc
= cgbs
[i
].desc
;
1293 ai
->Addr
.Block
.block_szB
= cgbs
[i
].size
;
1294 ai
->Addr
.Block
.rwoffset
= (Word
)(a
) - (Word
)(cgbs
[i
].start
);
1295 ai
->Addr
.Block
.allocated_at
= cgbs
[i
].where
;
1296 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1297 ai
->Addr
.Block
.freed_at
= VG_(null_ExeContext
)();;
1305 static Bool
mempool_block_maybe_describe( Addr a
, Bool is_metapool
,
1306 /*OUT*/AddrInfo
* ai
)
1309 tl_assert( MC_(mempool_list
) );
1311 VG_(HT_ResetIter
)( MC_(mempool_list
) );
1312 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
1313 if (mp
->chunks
!= NULL
&& mp
->metapool
== is_metapool
) {
1315 VG_(HT_ResetIter
)(mp
->chunks
);
1316 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
1317 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc
, a
, mp
->rzB
)) {
1318 ai
->tag
= Addr_Block
;
1319 ai
->Addr
.Block
.block_kind
= Block_MempoolChunk
;
1320 ai
->Addr
.Block
.block_desc
= "block";
1321 ai
->Addr
.Block
.block_szB
= mc
->szB
;
1322 ai
->Addr
.Block
.rwoffset
= (Word
)a
- (Word
)mc
->data
;
1323 ai
->Addr
.Block
.allocated_at
= MC_(allocated_at
)(mc
);
1324 VG_(initThreadInfo
) (&ai
->Addr
.Block
.alloc_tinfo
);
1325 ai
->Addr
.Block
.freed_at
= MC_(freed_at
)(mc
);
1335 /*------------------------------------------------------------*/
1336 /*--- Suppressions ---*/
1337 /*------------------------------------------------------------*/
1341 ParamSupp
, // Bad syscall params
1342 UserSupp
, // Errors arising from client-request checks
1343 CoreMemSupp
, // Memory errors in core (pthread ops, signal handling)
1345 // Undefined value errors of given size
1346 Value1Supp
, Value2Supp
, Value4Supp
, Value8Supp
, Value16Supp
, Value32Supp
,
1348 // Undefined value error in conditional.
1351 // Unaddressable read/write attempt at given size
1352 Addr1Supp
, Addr2Supp
, Addr4Supp
, Addr8Supp
, Addr16Supp
, Addr32Supp
,
1354 JumpSupp
, // Jump to unaddressable target
1355 FreeSupp
, // Invalid or mismatching free
1356 OverlapSupp
, // Overlapping blocks in memcpy(), strcpy(), etc
1357 LeakSupp
, // Something to be suppressed in a leak check.
1358 MempoolSupp
, // Memory pool suppression.
1359 FishyValueSupp
,// Fishy value suppression.
1360 ReallocSizeZeroSupp
, // realloc size 0 suppression
1364 Bool
MC_(is_recognised_suppression
) ( const HChar
* name
, Supp
* su
)
1368 if (VG_STREQ(name
, "Param")) skind
= ParamSupp
;
1369 else if (VG_STREQ(name
, "User")) skind
= UserSupp
;
1370 else if (VG_STREQ(name
, "CoreMem")) skind
= CoreMemSupp
;
1371 else if (VG_STREQ(name
, "Addr1")) skind
= Addr1Supp
;
1372 else if (VG_STREQ(name
, "Addr2")) skind
= Addr2Supp
;
1373 else if (VG_STREQ(name
, "Addr4")) skind
= Addr4Supp
;
1374 else if (VG_STREQ(name
, "Addr8")) skind
= Addr8Supp
;
1375 else if (VG_STREQ(name
, "Addr16")) skind
= Addr16Supp
;
1376 else if (VG_STREQ(name
, "Addr32")) skind
= Addr32Supp
;
1377 else if (VG_STREQ(name
, "Jump")) skind
= JumpSupp
;
1378 else if (VG_STREQ(name
, "Free")) skind
= FreeSupp
;
1379 else if (VG_STREQ(name
, "Leak")) skind
= LeakSupp
;
1380 else if (VG_STREQ(name
, "Overlap")) skind
= OverlapSupp
;
1381 else if (VG_STREQ(name
, "Mempool")) skind
= MempoolSupp
;
1382 else if (VG_STREQ(name
, "Cond")) skind
= CondSupp
;
1383 else if (VG_STREQ(name
, "Value0")) skind
= CondSupp
; /* backwards compat */
1384 else if (VG_STREQ(name
, "Value1")) skind
= Value1Supp
;
1385 else if (VG_STREQ(name
, "Value2")) skind
= Value2Supp
;
1386 else if (VG_STREQ(name
, "Value4")) skind
= Value4Supp
;
1387 else if (VG_STREQ(name
, "Value8")) skind
= Value8Supp
;
1388 else if (VG_STREQ(name
, "Value16")) skind
= Value16Supp
;
1389 else if (VG_STREQ(name
, "Value32")) skind
= Value32Supp
;
1390 else if (VG_STREQ(name
, "FishyValue")) skind
= FishyValueSupp
;
1391 else if (VG_STREQ(name
, "ReallocZero")) skind
= ReallocSizeZeroSupp
;
1395 VG_(set_supp_kind
)(su
, skind
);
1399 typedef struct _MC_LeakSuppExtra MC_LeakSuppExtra
;
1401 struct _MC_LeakSuppExtra
{
1402 UInt match_leak_kinds
;
1404 /* Maintains nr of blocks and bytes suppressed with this suppression
1405 during the leak search identified by leak_search_gen.
1406 blocks_suppressed and bytes_suppressed are reset to 0 when
1407 used the first time during a leak search. */
1408 SizeT blocks_suppressed
;
1409 SizeT bytes_suppressed
;
1410 UInt leak_search_gen
;
1414 const HChar
*function_name
;
1415 const HChar
*argument_name
;
1416 } MC_FishyValueExtra
;
1418 Bool
MC_(read_extra_suppression_info
) ( Int fd
, HChar
** bufpp
,
1419 SizeT
* nBufp
, Int
* lineno
, Supp
*su
)
1424 if (VG_(get_supp_kind
)(su
) == ParamSupp
) {
1425 eof
= VG_(get_line
) ( fd
, bufpp
, nBufp
, lineno
);
1426 if (eof
) return False
;
1427 VG_(set_supp_string
)(su
, VG_(strdup
)("mc.resi.1", *bufpp
));
1428 if (VG_(strcmp
) (*bufpp
, "preadv(vector[...])") == 0
1429 || VG_(strcmp
) (*bufpp
, "pwritev(vector[...])") == 0) {
1430 /* Report the incompatible change introduced in 3.15
1431 when reading a unsupported 3.14 or before entry.
1433 VG_(umsg
)("WARNING: %s is an obsolete suppression line "
1434 "not supported in valgrind 3.15 or later.\n"
1435 "You should replace [...] by a specific index"
1436 " such as [0] or [1] or [2] or similar\n\n", *bufpp
);
1438 } else if (VG_(get_supp_kind
)(su
) == LeakSupp
) {
1439 // We might have the optional match-leak-kinds line
1440 MC_LeakSuppExtra
* lse
;
1441 lse
= VG_(malloc
)("mc.resi.2", sizeof(MC_LeakSuppExtra
));
1442 lse
->match_leak_kinds
= MC_(all_Reachedness
)();
1443 lse
->blocks_suppressed
= 0;
1444 lse
->bytes_suppressed
= 0;
1445 lse
->leak_search_gen
= 0;
1446 VG_(set_supp_extra
)(su
, lse
); // By default, all kinds will match.
1447 eof
= VG_(get_line
) ( fd
, bufpp
, nBufp
, lineno
);
1448 if (eof
) return True
; // old LeakSupp style, no match-leak-kinds line.
1449 if (0 == VG_(strncmp
)(*bufpp
, "match-leak-kinds:", 17)) {
1451 while ((*bufpp
)[i
] && VG_(isspace
)((*bufpp
)[i
]))
1453 if (!VG_(parse_enum_set
)(MC_(parse_leak_kinds_tokens
),
1455 (*bufpp
)+i
, &lse
->match_leak_kinds
)) {
1459 return False
; // unknown extra line.
1461 } else if (VG_(get_supp_kind
)(su
) == FishyValueSupp
) {
1462 MC_FishyValueExtra
*extra
;
1463 HChar
*p
, *function_name
, *argument_name
= NULL
;
1465 eof
= VG_(get_line
) ( fd
, bufpp
, nBufp
, lineno
);
1466 if (eof
) return True
;
1468 // The suppression string is: function_name(argument_name)
1469 function_name
= VG_(strdup
)("mv.resi.4", *bufpp
);
1470 p
= VG_(strchr
)(function_name
, '(');
1474 p
= VG_(strchr
)(p
, ')');
1478 if (p
== NULL
) { // malformed suppression string
1479 VG_(free
)(function_name
);
1483 extra
= VG_(malloc
)("mc.resi.3", sizeof *extra
);
1484 extra
->function_name
= function_name
;
1485 extra
->argument_name
= argument_name
;
1487 VG_(set_supp_extra
)(su
, extra
);
1492 Bool
MC_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
)
1495 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1496 ErrorKind ekind
= VG_(get_error_kind
)(err
);
1498 switch (VG_(get_supp_kind
)(su
)) {
1500 return ((ekind
== Err_RegParam
|| ekind
== Err_MemParam
)
1501 && VG_STREQ(VG_(get_error_string
)(err
),
1502 VG_(get_supp_string
)(su
)));
1505 return (ekind
== Err_User
);
1508 return (ekind
== Err_CoreMem
1509 && VG_STREQ(VG_(get_error_string
)(err
),
1510 VG_(get_supp_string
)(su
)));
1512 case Value1Supp
: su_szB
= 1; goto value_case
;
1513 case Value2Supp
: su_szB
= 2; goto value_case
;
1514 case Value4Supp
: su_szB
= 4; goto value_case
;
1515 case Value8Supp
: su_szB
= 8; goto value_case
;
1516 case Value16Supp
:su_szB
=16; goto value_case
;
1517 case Value32Supp
:su_szB
=32; goto value_case
;
1519 return (ekind
== Err_Value
&& extra
->Err
.Value
.szB
== su_szB
);
1522 return (ekind
== Err_Cond
);
1524 case Addr1Supp
: su_szB
= 1; goto addr_case
;
1525 case Addr2Supp
: su_szB
= 2; goto addr_case
;
1526 case Addr4Supp
: su_szB
= 4; goto addr_case
;
1527 case Addr8Supp
: su_szB
= 8; goto addr_case
;
1528 case Addr16Supp
:su_szB
=16; goto addr_case
;
1529 case Addr32Supp
:su_szB
=32; goto addr_case
;
1531 return (ekind
== Err_Addr
&& extra
->Err
.Addr
.szB
== su_szB
);
1534 return (ekind
== Err_Jump
);
1537 return (ekind
== Err_Free
|| ekind
== Err_FreeMismatch
);
1540 return (ekind
== Err_Overlap
);
1543 if (ekind
== Err_Leak
) {
1544 MC_LeakSuppExtra
* lse
= (MC_LeakSuppExtra
*) VG_(get_supp_extra
)(su
);
1545 if (lse
->leak_search_gen
!= MC_(leak_search_gen
)) {
1546 // First time we see this suppression during this leak search.
1547 // => reset the counters to 0.
1548 lse
->blocks_suppressed
= 0;
1549 lse
->bytes_suppressed
= 0;
1550 lse
->leak_search_gen
= MC_(leak_search_gen
);
1552 return RiS(extra
->Err
.Leak
.lr
->key
.state
, lse
->match_leak_kinds
);
1557 return (ekind
== Err_IllegalMempool
);
1559 case FishyValueSupp
: {
1560 MC_FishyValueExtra
*supp_extra
= VG_(get_supp_extra
)(su
);
1562 return (ekind
== Err_FishyValue
) &&
1563 VG_STREQ(extra
->Err
.FishyValue
.function_name
,
1564 supp_extra
->function_name
) &&
1565 VG_STREQ(extra
->Err
.FishyValue
.argument_name
,
1566 supp_extra
->argument_name
);
1569 case ReallocSizeZeroSupp
: {
1571 return (ekind
== Err_ReallocSizeZero
);
1575 VG_(printf
)("Error:\n"
1576 " unknown suppression type %d\n",
1577 VG_(get_supp_kind
)(su
));
1578 VG_(tool_panic
)("unknown suppression type in "
1579 "MC_(error_matches_suppression)");
1583 const HChar
* MC_(get_error_name
) ( const Error
* err
)
1585 switch (VG_(get_error_kind
)(err
)) {
1586 case Err_RegParam
: return "Param";
1587 case Err_MemParam
: return "Param";
1588 case Err_User
: return "User";
1589 case Err_FreeMismatch
: return "Free";
1590 case Err_IllegalMempool
: return "Mempool";
1591 case Err_Free
: return "Free";
1592 case Err_Jump
: return "Jump";
1593 case Err_CoreMem
: return "CoreMem";
1594 case Err_Overlap
: return "Overlap";
1595 case Err_Leak
: return "Leak";
1596 case Err_Cond
: return "Cond";
1597 case Err_FishyValue
: return "FishyValue";
1598 case Err_ReallocSizeZero
: return "ReallocZero";
1600 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1601 switch ( extra
->Err
.Addr
.szB
) {
1602 case 1: return "Addr1";
1603 case 2: return "Addr2";
1604 case 4: return "Addr4";
1605 case 8: return "Addr8";
1606 case 16: return "Addr16";
1607 case 32: return "Addr32";
1608 default: VG_(tool_panic
)("unexpected size for Addr");
1612 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1613 switch ( extra
->Err
.Value
.szB
) {
1614 case 1: return "Value1";
1615 case 2: return "Value2";
1616 case 4: return "Value4";
1617 case 8: return "Value8";
1618 case 16: return "Value16";
1619 case 32: return "Value32";
1620 default: VG_(tool_panic
)("unexpected size for Value");
1623 default: VG_(tool_panic
)("get_error_name: unexpected type");
1627 SizeT
MC_(get_extra_suppression_info
) ( const Error
* err
,
1628 /*OUT*/HChar
* buf
, Int nBuf
)
1630 ErrorKind ekind
= VG_(get_error_kind
)(err
);
1632 tl_assert(nBuf
>= 1);
1634 if (Err_RegParam
== ekind
|| Err_MemParam
== ekind
) {
1635 const HChar
* errstr
= VG_(get_error_string
)(err
);
1637 return VG_(snprintf
)(buf
, nBuf
, "%s", errstr
);
1638 } else if (Err_Leak
== ekind
) {
1639 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1640 return VG_(snprintf
) (buf
, nBuf
, "match-leak-kinds: %s",
1641 pp_Reachedness_for_leak_kinds(extra
->Err
.Leak
.lr
->key
.state
));
1642 } else if (Err_FishyValue
== ekind
) {
1643 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1644 return VG_(snprintf
) (buf
, nBuf
, "%s(%s)",
1645 extra
->Err
.FishyValue
.function_name
,
1646 extra
->Err
.FishyValue
.argument_name
);
1653 SizeT
MC_(print_extra_suppression_use
) ( const Supp
*su
,
1654 /*OUT*/HChar
*buf
, Int nBuf
)
1656 tl_assert(nBuf
>= 1);
1658 if (VG_(get_supp_kind
)(su
) == LeakSupp
) {
1659 MC_LeakSuppExtra
*lse
= (MC_LeakSuppExtra
*) VG_(get_supp_extra
) (su
);
1661 if (lse
->leak_search_gen
== MC_(leak_search_gen
)
1662 && lse
->blocks_suppressed
> 0) {
1663 return VG_(snprintf
) (buf
, nBuf
,
1664 "suppressed: %'lu bytes in %'lu blocks",
1665 lse
->bytes_suppressed
,
1666 lse
->blocks_suppressed
);
1674 void MC_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
)
1676 if (VG_(get_supp_kind
)(su
) == LeakSupp
) {
1677 MC_LeakSuppExtra
*lse
= (MC_LeakSuppExtra
*) VG_(get_supp_extra
) (su
);
1678 MC_Error
* extra
= VG_(get_error_extra
)(err
);
1680 tl_assert (lse
->leak_search_gen
== MC_(leak_search_gen
));
1681 lse
->blocks_suppressed
+= extra
->Err
.Leak
.lr
->num_blocks
;
1682 lse
->bytes_suppressed
1683 += extra
->Err
.Leak
.lr
->szB
+ extra
->Err
.Leak
.lr
->indirect_szB
;
1687 /*--------------------------------------------------------------------*/
1688 /*--- end mc_errors.c ---*/
1689 /*--------------------------------------------------------------------*/