vgdb: Handle EAGAIN in read_buf
[valgrind.git] / memcheck / mc_errors.c
blob00d6ec301eb1ee4cff2150cf2553aa4caf77e941
2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_gdbserver.h"
32 #include "pub_tool_poolalloc.h" // For mc_include.h
33 #include "pub_tool_hashtable.h" // For mc_include.h
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_machine.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_tooliface.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_aspacemgr.h"
46 #include "pub_tool_addrinfo.h"
48 #include "mc_include.h"
51 /*------------------------------------------------------------*/
52 /*--- Error types ---*/
53 /*------------------------------------------------------------*/
55 /* See comment in mc_include.h */
56 Bool MC_(any_value_errors) = False;
59 /* ------------------ Errors ----------------------- */
61 /* What kind of error it is. */
62 typedef
63 enum {
64 Err_Value,
65 Err_Cond,
66 Err_CoreMem,
67 Err_Addr,
68 Err_Jump,
69 Err_RegParam,
70 Err_MemParam,
71 Err_User,
72 Err_Free,
73 Err_FreeMismatch,
74 Err_Overlap,
75 Err_Leak,
76 Err_IllegalMempool,
77 Err_FishyValue,
78 Err_ReallocSizeZero,
80 MC_ErrorTag;
83 typedef struct _MC_Error MC_Error;
85 struct _MC_Error {
86 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
87 //MC_ErrorTag tag;
89 union {
90 // Use of an undefined value:
91 // - as a pointer in a load or store
92 // - as a jump target
93 struct {
94 SizeT szB; // size of value in bytes
95 // Origin info
96 UInt otag; // origin tag
97 ExeContext* origin_ec; // filled in later
98 } Value;
100 // Use of an undefined value in a conditional branch or move.
101 struct {
102 // Origin info
103 UInt otag; // origin tag
104 ExeContext* origin_ec; // filled in later
105 } Cond;
107 // Addressability error in core (signal-handling) operation.
108 // It would be good to get rid of this error kind, merge it with
109 // another one somehow.
110 struct {
111 } CoreMem;
113 // Use of an unaddressable memory location in a load or store.
114 struct {
115 Bool isWrite; // read or write?
116 SizeT szB; // not used for exec (jump) errors
117 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
118 AddrInfo ai;
119 } Addr;
121 // Jump to an unaddressable memory location.
122 struct {
123 AddrInfo ai;
124 } Jump;
126 // System call register input contains undefined bytes.
127 struct {
128 // Origin info
129 UInt otag; // origin tag
130 ExeContext* origin_ec; // filled in later
131 } RegParam;
133 // System call memory input contains undefined/unaddressable bytes
134 struct {
135 Bool isAddrErr; // Addressability or definedness error?
136 AddrInfo ai;
137 // Origin info
138 UInt otag; // origin tag
139 ExeContext* origin_ec; // filled in later
140 } MemParam;
142 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
143 struct {
144 Bool isAddrErr; // Addressability or definedness error?
145 AddrInfo ai;
146 // Origin info
147 UInt otag; // origin tag
148 ExeContext* origin_ec; // filled in later
149 } User;
151 // Program tried to free() something that's not a heap block (this
152 // covers double-frees). */
153 struct {
154 AddrInfo ai;
155 } Free;
157 // Program allocates heap block with one function
158 // (malloc/new/new[]/custom) and deallocates with not the matching one.
159 struct {
160 AddrInfo ai;
161 } FreeMismatch;
163 struct {
164 AddrInfo ai;
165 } ReallocSizeZero;
167 // Call to strcpy, memcpy, etc, with overlapping blocks.
168 struct {
169 Addr src; // Source block
170 Addr dst; // Destination block
171 SizeT szB; // Size in bytes; 0 if unused.
172 } Overlap;
174 // A memory leak.
175 struct {
176 UInt n_this_record;
177 UInt n_total_records;
178 LossRecord* lr;
179 } Leak;
181 // A memory pool error.
182 struct {
183 AddrInfo ai;
184 } IllegalMempool;
186 // A fishy function argument value
187 // An argument value is considered fishy if the corresponding
188 // parameter has SizeT type and the value when interpreted as a
189 // signed number is negative.
190 struct {
191 const HChar *function_name;
192 const HChar *argument_name;
193 SizeT value;
194 } FishyValue;
195 } Err;
199 /*------------------------------------------------------------*/
200 /*--- Printing errors ---*/
201 /*------------------------------------------------------------*/
203 /* This is the "this error is due to be printed shortly; so have a
204 look at it any print any preamble you want" function. Which, in
205 Memcheck, we don't use. Hence a no-op.
207 void MC_(before_pp_Error) ( const Error* err ) {
210 /* Do a printf-style operation on either the XML or normal output
211 channel, depending on the setting of VG_(clo_xml).
213 static void emit_WRK ( const HChar* format, va_list vargs )
215 if (VG_(clo_xml)) {
216 VG_(vprintf_xml)(format, vargs);
217 } else {
218 VG_(vmessage)(Vg_UserMsg, format, vargs);
221 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
222 static void emit ( const HChar* format, ... )
224 va_list vargs;
225 va_start(vargs, format);
226 emit_WRK(format, vargs);
227 va_end(vargs);
231 static const HChar* str_leak_lossmode ( Reachedness lossmode )
233 const HChar *loss = "?";
234 switch (lossmode) {
235 case Unreached: loss = "definitely lost"; break;
236 case IndirectLeak: loss = "indirectly lost"; break;
237 case Possible: loss = "possibly lost"; break;
238 case Reachable: loss = "still reachable"; break;
240 return loss;
243 static const HChar* xml_leak_kind ( Reachedness lossmode )
245 const HChar *loss = "?";
246 switch (lossmode) {
247 case Unreached: loss = "Leak_DefinitelyLost"; break;
248 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
249 case Possible: loss = "Leak_PossiblyLost"; break;
250 case Reachable: loss = "Leak_StillReachable"; break;
252 return loss;
255 const HChar* MC_(parse_leak_kinds_tokens) =
256 "reachable,possible,indirect,definite";
258 UInt MC_(all_Reachedness)(void)
260 static UInt all;
262 if (all == 0) {
263 // Compute a set with all values by doing a parsing of the "all" keyword.
264 Bool parseok = VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
265 True,/*allow_all*/
266 "all",
267 &all);
268 tl_assert (parseok && all);
271 return all;
274 static const HChar* pp_Reachedness_for_leak_kinds(Reachedness r)
276 switch(r) {
277 case Reachable: return "reachable";
278 case Possible: return "possible";
279 case IndirectLeak: return "indirect";
280 case Unreached: return "definite";
281 default: tl_assert(0);
285 static void mc_pp_origin ( ExeContext* ec, UInt okind )
287 const HChar* src = NULL;
288 tl_assert(ec);
290 switch (okind) {
291 case MC_OKIND_STACK: src = " by a stack allocation"; break;
292 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
293 case MC_OKIND_USER: src = " by a client request"; break;
294 case MC_OKIND_UNKNOWN: src = ""; break;
296 tl_assert(src); /* guards against invalid 'okind' */
298 if (VG_(clo_xml)) {
299 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
300 src);
301 VG_(pp_ExeContext)( ec );
302 } else {
303 emit( " Uninitialised value was created%s\n", src);
304 VG_(pp_ExeContext)( ec );
308 HChar * MC_(snprintf_delta) (HChar * buf, Int size,
309 SizeT current_val, SizeT old_val,
310 LeakCheckDeltaMode delta_mode)
312 // Make sure the buffer size is large enough. With old_val == 0 and
313 // current_val == ULLONG_MAX the delta including inserted commas is:
314 // 18,446,744,073,709,551,615
315 // whose length is 26. Therefore:
316 tl_assert(size >= 26 + 4 + 1);
318 if (delta_mode == LCD_Any)
319 buf[0] = '\0';
320 else if (current_val >= old_val)
321 VG_(snprintf) (buf, size, " (+%'lu)", current_val - old_val);
322 else
323 VG_(snprintf) (buf, size, " (-%'lu)", old_val - current_val);
325 return buf;
328 static void pp_LossRecord(UInt n_this_record, UInt n_total_records,
329 LossRecord* lr, Bool xml)
331 // char arrays to produce the indication of increase/decrease in case
332 // of delta_mode != LCD_Any
333 HChar d_bytes[31];
334 HChar d_direct_bytes[31];
335 HChar d_indirect_bytes[31];
336 HChar d_num_blocks[31];
337 /* A loss record that had an old number of blocks 0 is a new loss record.
338 We mark it as new only when doing any kind of delta leak search. */
339 const HChar *new_loss_record_marker
340 = MC_(detect_memory_leaks_last_delta_mode) != LCD_Any
341 && lr->old_num_blocks == 0
342 ? "new " : "";
344 MC_(snprintf_delta) (d_bytes, sizeof(d_bytes),
345 lr->szB + lr->indirect_szB,
346 lr->old_szB + lr->old_indirect_szB,
347 MC_(detect_memory_leaks_last_delta_mode));
348 MC_(snprintf_delta) (d_direct_bytes, sizeof(d_direct_bytes),
349 lr->szB,
350 lr->old_szB,
351 MC_(detect_memory_leaks_last_delta_mode));
352 MC_(snprintf_delta) (d_indirect_bytes, sizeof(d_indirect_bytes),
353 lr->indirect_szB,
354 lr->old_indirect_szB,
355 MC_(detect_memory_leaks_last_delta_mode));
356 MC_(snprintf_delta) (d_num_blocks, sizeof(d_num_blocks),
357 (SizeT) lr->num_blocks,
358 (SizeT) lr->old_num_blocks,
359 MC_(detect_memory_leaks_last_delta_mode));
361 if (xml) {
362 emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
363 if (lr->indirect_szB > 0) {
364 emit( " <xwhat>\n" );
365 emit( " <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
366 "in %'u%s blocks"
367 " are %s in %sloss record %'u of %'u</text>\n",
368 lr->szB + lr->indirect_szB, d_bytes,
369 lr->szB, d_direct_bytes,
370 lr->indirect_szB, d_indirect_bytes,
371 lr->num_blocks, d_num_blocks,
372 str_leak_lossmode(lr->key.state),
373 new_loss_record_marker,
374 n_this_record, n_total_records );
375 // Nb: don't put commas in these XML numbers
376 emit( " <leakedbytes>%lu</leakedbytes>\n",
377 lr->szB + lr->indirect_szB );
378 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
379 emit( " </xwhat>\n" );
380 } else {
381 emit( " <xwhat>\n" );
382 emit( " <text>%'lu%s bytes in %'u%s blocks"
383 " are %s in %sloss record %'u of %'u</text>\n",
384 lr->szB, d_direct_bytes,
385 lr->num_blocks, d_num_blocks,
386 str_leak_lossmode(lr->key.state),
387 new_loss_record_marker,
388 n_this_record, n_total_records );
389 emit( " <leakedbytes>%lu</leakedbytes>\n", lr->szB);
390 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
391 emit( " </xwhat>\n" );
393 VG_(pp_ExeContext)(lr->key.allocated_at);
394 } else { /* ! if (xml) */
395 if (lr->indirect_szB > 0) {
396 emit(
397 "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
398 " are %s in %sloss record %'u of %'u\n",
399 lr->szB + lr->indirect_szB, d_bytes,
400 lr->szB, d_direct_bytes,
401 lr->indirect_szB, d_indirect_bytes,
402 lr->num_blocks, d_num_blocks,
403 str_leak_lossmode(lr->key.state),
404 new_loss_record_marker,
405 n_this_record, n_total_records
407 } else {
408 emit(
409 "%'lu%s bytes in %'u%s blocks are %s in %sloss record %'u of %'u\n",
410 lr->szB, d_direct_bytes,
411 lr->num_blocks, d_num_blocks,
412 str_leak_lossmode(lr->key.state),
413 new_loss_record_marker,
414 n_this_record, n_total_records
417 VG_(pp_ExeContext)(lr->key.allocated_at);
418 } /* if (xml) */
421 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
422 LossRecord* l)
424 pp_LossRecord (n_this_record, n_total_records, l, /* xml */ False);
427 void MC_(pp_Error) ( const Error* err )
429 const Bool xml = VG_(clo_xml); /* a shorthand */
430 MC_Error* extra = VG_(get_error_extra)(err);
432 switch (VG_(get_error_kind)(err)) {
433 case Err_CoreMem:
434 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
435 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
436 signal handler frame. --njn */
437 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
438 // the following code is untested. Bad.
439 if (xml) {
440 emit( " <kind>CoreMemError</kind>\n" );
441 emit( " <what>%pS contains unaddressable byte(s)</what>\n",
442 VG_(get_error_string)(err));
443 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
444 } else {
445 emit( "%s contains unaddressable byte(s)\n",
446 VG_(get_error_string)(err));
447 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
449 break;
451 case Err_Value:
452 MC_(any_value_errors) = True;
453 if (xml) {
454 emit( " <kind>UninitValue</kind>\n" );
455 emit( " <what>Use of uninitialised value of size %lu</what>\n",
456 extra->Err.Value.szB );
457 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
458 if (extra->Err.Value.origin_ec)
459 mc_pp_origin( extra->Err.Value.origin_ec,
460 extra->Err.Value.otag & 3 );
461 } else {
462 /* Could also show extra->Err.Cond.otag if debugging origin
463 tracking */
464 emit( "Use of uninitialised value of size %lu\n",
465 extra->Err.Value.szB );
466 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
467 if (extra->Err.Value.origin_ec)
468 mc_pp_origin( extra->Err.Value.origin_ec,
469 extra->Err.Value.otag & 3 );
471 break;
473 case Err_Cond:
474 MC_(any_value_errors) = True;
475 if (xml) {
476 emit( " <kind>UninitCondition</kind>\n" );
477 emit( " <what>Conditional jump or move depends"
478 " on uninitialised value(s)</what>\n" );
479 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
480 if (extra->Err.Cond.origin_ec)
481 mc_pp_origin( extra->Err.Cond.origin_ec,
482 extra->Err.Cond.otag & 3 );
483 } else {
484 /* Could also show extra->Err.Cond.otag if debugging origin
485 tracking */
486 emit( "Conditional jump or move depends"
487 " on uninitialised value(s)\n" );
488 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
489 if (extra->Err.Cond.origin_ec)
490 mc_pp_origin( extra->Err.Cond.origin_ec,
491 extra->Err.Cond.otag & 3 );
493 break;
495 case Err_RegParam:
496 MC_(any_value_errors) = True;
497 if (xml) {
498 emit( " <kind>SyscallParam</kind>\n" );
499 emit( " <what>Syscall param %pS contains "
500 "uninitialised byte(s)</what>\n",
501 VG_(get_error_string)(err) );
502 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
503 if (extra->Err.RegParam.origin_ec)
504 mc_pp_origin( extra->Err.RegParam.origin_ec,
505 extra->Err.RegParam.otag & 3 );
506 } else {
507 emit( "Syscall param %s contains uninitialised byte(s)\n",
508 VG_(get_error_string)(err) );
509 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
510 if (extra->Err.RegParam.origin_ec)
511 mc_pp_origin( extra->Err.RegParam.origin_ec,
512 extra->Err.RegParam.otag & 3 );
514 break;
516 case Err_MemParam:
517 if (!extra->Err.MemParam.isAddrErr)
518 MC_(any_value_errors) = True;
519 if (xml) {
520 emit( " <kind>SyscallParam</kind>\n" );
521 emit( " <what>Syscall param %pS points to %s byte(s)</what>\n",
522 VG_(get_error_string)(err),
523 extra->Err.MemParam.isAddrErr
524 ? "unaddressable" : "uninitialised" );
525 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
526 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
527 &extra->Err.MemParam.ai, False);
528 if (extra->Err.MemParam.origin_ec
529 && !extra->Err.MemParam.isAddrErr)
530 mc_pp_origin( extra->Err.MemParam.origin_ec,
531 extra->Err.MemParam.otag & 3 );
532 } else {
533 emit( "Syscall param %s points to %s byte(s)\n",
534 VG_(get_error_string)(err),
535 extra->Err.MemParam.isAddrErr
536 ? "unaddressable" : "uninitialised" );
537 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
538 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
539 &extra->Err.MemParam.ai, False);
540 if (extra->Err.MemParam.origin_ec
541 && !extra->Err.MemParam.isAddrErr)
542 mc_pp_origin( extra->Err.MemParam.origin_ec,
543 extra->Err.MemParam.otag & 3 );
545 break;
547 case Err_User:
548 if (!extra->Err.User.isAddrErr)
549 MC_(any_value_errors) = True;
550 if (xml) {
551 emit( " <kind>ClientCheck</kind>\n" );
552 emit( " <what>%s byte(s) found "
553 "during client check request</what>\n",
554 extra->Err.User.isAddrErr
555 ? "Unaddressable" : "Uninitialised" );
556 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
557 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
558 False);
559 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
560 mc_pp_origin( extra->Err.User.origin_ec,
561 extra->Err.User.otag & 3 );
562 } else {
563 emit( "%s byte(s) found during client check request\n",
564 extra->Err.User.isAddrErr
565 ? "Unaddressable" : "Uninitialised" );
566 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
567 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
568 False);
569 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
570 mc_pp_origin( extra->Err.User.origin_ec,
571 extra->Err.User.otag & 3 );
573 break;
575 case Err_Free:
576 if (xml) {
577 emit( " <kind>InvalidFree</kind>\n" );
578 emit( " <what>Invalid free() / delete / delete[]"
579 " / realloc()</what>\n" );
580 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
581 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
582 &extra->Err.Free.ai, False );
583 } else {
584 emit( "Invalid free() / delete / delete[] / realloc()\n" );
585 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
586 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
587 &extra->Err.Free.ai, False );
589 break;
591 case Err_FreeMismatch:
592 if (xml) {
593 emit( " <kind>MismatchedFree</kind>\n" );
594 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
595 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
596 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
597 &extra->Err.FreeMismatch.ai, False);
598 } else {
599 emit( "Mismatched free() / delete / delete []\n" );
600 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
601 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
602 &extra->Err.FreeMismatch.ai, False);
604 break;
606 case Err_Addr:
607 if (xml) {
608 emit( " <kind>Invalid%s</kind>\n",
609 extra->Err.Addr.isWrite ? "Write" : "Read" );
610 emit( " <what>Invalid %s of size %lu</what>\n",
611 extra->Err.Addr.isWrite ? "write" : "read",
612 extra->Err.Addr.szB );
613 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
614 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
615 &extra->Err.Addr.ai,
616 extra->Err.Addr.maybe_gcc );
617 } else {
618 emit( "Invalid %s of size %lu\n",
619 extra->Err.Addr.isWrite ? "write" : "read",
620 extra->Err.Addr.szB );
621 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
623 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
624 &extra->Err.Addr.ai,
625 extra->Err.Addr.maybe_gcc );
627 break;
629 case Err_Jump:
630 if (xml) {
631 emit( " <kind>InvalidJump</kind>\n" );
632 emit( " <what>Jump to the invalid address stated "
633 "on the next line</what>\n" );
634 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
635 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
636 False );
637 } else {
638 emit( "Jump to the invalid address stated on the next line\n" );
639 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
640 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
641 False );
643 break;
645 case Err_Overlap:
646 if (xml) {
647 emit( " <kind>Overlap</kind>\n" );
648 if (extra->Err.Overlap.szB == 0) {
649 emit( " <what>Source and destination overlap "
650 "in %pS(%#lx, %#lx)\n</what>\n",
651 VG_(get_error_string)(err),
652 extra->Err.Overlap.dst, extra->Err.Overlap.src );
653 } else {
654 emit( " <what>Source and destination overlap "
655 "in %pS(%#lx, %#lx, %lu)</what>\n",
656 VG_(get_error_string)(err),
657 extra->Err.Overlap.dst, extra->Err.Overlap.src,
658 extra->Err.Overlap.szB );
660 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
661 } else {
662 if (extra->Err.Overlap.szB == 0) {
663 emit( "Source and destination overlap in %s(%#lx, %#lx)\n",
664 VG_(get_error_string)(err),
665 extra->Err.Overlap.dst, extra->Err.Overlap.src );
666 } else {
667 emit( "Source and destination overlap in %s(%#lx, %#lx, %lu)\n",
668 VG_(get_error_string)(err),
669 extra->Err.Overlap.dst, extra->Err.Overlap.src,
670 extra->Err.Overlap.szB );
672 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
674 break;
676 case Err_IllegalMempool:
677 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
678 // the following code is untested. Bad.
679 if (xml) {
680 emit( " <kind>InvalidMemPool</kind>\n" );
681 emit( " <what>Illegal memory pool address</what>\n" );
682 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
683 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
684 &extra->Err.IllegalMempool.ai, False );
685 } else {
686 emit( "Illegal memory pool address\n" );
687 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
688 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
689 &extra->Err.IllegalMempool.ai, False );
691 break;
693 case Err_Leak: {
694 UInt n_this_record = extra->Err.Leak.n_this_record;
695 UInt n_total_records = extra->Err.Leak.n_total_records;
696 LossRecord* lr = extra->Err.Leak.lr;
697 pp_LossRecord (n_this_record, n_total_records, lr, xml);
698 break;
701 case Err_FishyValue:
702 if (xml) {
703 emit( " <kind>FishyValue</kind>\n" );
704 emit( " <what>");
705 emit( "Argument '%s' of function %s has a fishy "
706 "(possibly negative) value: %ld\n",
707 extra->Err.FishyValue.argument_name,
708 extra->Err.FishyValue.function_name,
709 (SSizeT)extra->Err.FishyValue.value);
710 emit( "</what>");
711 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
712 } else {
713 emit( "Argument '%s' of function %s has a fishy "
714 "(possibly negative) value: %ld\n",
715 extra->Err.FishyValue.argument_name,
716 extra->Err.FishyValue.function_name,
717 (SSizeT)extra->Err.FishyValue.value);
718 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
720 break;
722 case Err_ReallocSizeZero:
723 if (xml) {
724 emit( " <kind>ReallocSizeZero</kind>\n" );
725 emit( " <what>realloc() with size 0</what>\n" );
726 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
727 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
728 &extra->Err.ReallocSizeZero.ai, False);
729 } else {
730 emit( "realloc() with size 0\n" );
731 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
732 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
733 &extra->Err.ReallocSizeZero.ai, False);
735 break;
737 default:
738 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
739 VG_(get_error_kind)(err));
740 VG_(tool_panic)("unknown error code in mc_pp_Error)");
744 /*------------------------------------------------------------*/
745 /*--- Recording errors ---*/
746 /*------------------------------------------------------------*/
748 /* These many bytes below %ESP are considered addressible if we're
749 doing the --workaround-gcc296-bugs hack. */
750 #define VG_GCC296_BUG_STACK_SLOP 1024
752 /* Is this address within some small distance below %ESP? Used only
753 for the --workaround-gcc296-bugs kludge. */
754 static Bool is_just_below_ESP( Addr esp, Addr aa )
756 esp -= VG_STACK_REDZONE_SZB;
757 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
758 return True;
759 else
760 return False;
763 /* --- Called from generated and non-generated code --- */
765 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
766 Bool isWrite )
768 MC_Error extra;
769 Bool just_below_esp;
771 if (MC_(in_ignored_range)(a))
772 return;
774 if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
775 return;
777 Addr current_sp = VG_(get_SP)(tid);
778 just_below_esp = is_just_below_ESP( current_sp, a );
780 /* If this is caused by an access immediately below %ESP, and the
781 user asks nicely, we just ignore it. */
782 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
783 return;
785 /* Also, if this is caused by an access in the range of offsets
786 below the stack pointer as described by
787 --ignore-range-below-sp, ignore it. */
788 if (MC_(in_ignored_range_below_sp)( current_sp, a, szB ))
789 return;
791 extra.Err.Addr.isWrite = isWrite;
792 extra.Err.Addr.szB = szB;
793 extra.Err.Addr.maybe_gcc = just_below_esp;
794 extra.Err.Addr.ai.tag = Addr_Undescribed;
795 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
798 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
800 MC_Error extra;
801 tl_assert( MC_(clo_mc_level) >= 2 );
802 if (otag > 0)
803 tl_assert( MC_(clo_mc_level) == 3 );
804 extra.Err.Value.szB = szB;
805 extra.Err.Value.otag = otag;
806 extra.Err.Value.origin_ec = NULL; /* Filled in later */
807 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
810 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
812 MC_Error extra;
813 tl_assert( MC_(clo_mc_level) >= 2 );
814 if (otag > 0)
815 tl_assert( MC_(clo_mc_level) == 3 );
816 extra.Err.Cond.otag = otag;
817 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
818 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
821 /* --- Called from non-generated code --- */
823 /* This is for memory errors in signal-related memory. */
824 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg )
826 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
829 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag )
831 MC_Error extra;
832 tl_assert(VG_INVALID_THREADID != tid);
833 if (otag > 0)
834 tl_assert( MC_(clo_mc_level) == 3 );
835 extra.Err.RegParam.otag = otag;
836 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
837 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
840 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
841 Bool isAddrErr, const HChar* msg, UInt otag )
843 MC_Error extra;
844 tl_assert(VG_INVALID_THREADID != tid);
845 if (!isAddrErr)
846 tl_assert( MC_(clo_mc_level) >= 2 );
847 if (otag != 0) {
848 tl_assert( MC_(clo_mc_level) == 3 );
849 tl_assert( !isAddrErr );
851 extra.Err.MemParam.isAddrErr = isAddrErr;
852 extra.Err.MemParam.ai.tag = Addr_Undescribed;
853 extra.Err.MemParam.otag = otag;
854 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
855 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
858 void MC_(record_jump_error) ( ThreadId tid, Addr a )
860 MC_Error extra;
861 tl_assert(VG_INVALID_THREADID != tid);
862 extra.Err.Jump.ai.tag = Addr_Undescribed;
863 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
866 void MC_(record_free_error) ( ThreadId tid, Addr a )
868 MC_Error extra;
869 tl_assert(VG_INVALID_THREADID != tid);
870 extra.Err.Free.ai.tag = Addr_Undescribed;
871 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
874 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
876 MC_Error extra;
877 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
878 tl_assert(VG_INVALID_THREADID != tid);
879 ai->tag = Addr_Block;
880 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
881 ai->Addr.Block.block_desc = "block";
882 ai->Addr.Block.block_szB = mc->szB;
883 ai->Addr.Block.rwoffset = 0;
884 ai->Addr.Block.allocated_at = MC_(allocated_at) (mc);
885 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
886 ai->Addr.Block.freed_at = MC_(freed_at) (mc);
887 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
888 &extra );
891 void MC_(record_realloc_size_zero) ( ThreadId tid, Addr a )
893 MC_Error extra;
894 tl_assert(VG_INVALID_THREADID != tid);
895 extra.Err.ReallocSizeZero.ai.tag = Addr_Undescribed;
896 VG_(maybe_record_error)( tid, Err_ReallocSizeZero, a, /*s*/NULL, &extra );
900 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
902 MC_Error extra;
903 tl_assert(VG_INVALID_THREADID != tid);
904 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
905 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
908 void MC_(record_overlap_error) ( ThreadId tid, const HChar* function,
909 Addr src, Addr dst, SizeT szB )
911 MC_Error extra;
912 tl_assert(VG_INVALID_THREADID != tid);
913 extra.Err.Overlap.src = src;
914 extra.Err.Overlap.dst = dst;
915 extra.Err.Overlap.szB = szB;
916 VG_(maybe_record_error)(
917 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
920 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
921 UInt n_total_records, LossRecord* lr,
922 Bool print_record, Bool count_error )
924 MC_Error extra;
925 extra.Err.Leak.n_this_record = n_this_record;
926 extra.Err.Leak.n_total_records = n_total_records;
927 extra.Err.Leak.lr = lr;
928 return
929 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
930 lr->key.allocated_at, print_record,
931 /*allow_GDB_attach*/False, count_error );
934 Bool MC_(record_fishy_value_error) ( ThreadId tid, const HChar *function_name,
935 const HChar *argument_name, SizeT value)
937 MC_Error extra;
939 tl_assert(VG_INVALID_THREADID != tid);
941 if ((SSizeT)value >= 0) return False; // not a fishy value
943 extra.Err.FishyValue.function_name = function_name;
944 extra.Err.FishyValue.argument_name = argument_name;
945 extra.Err.FishyValue.value = value;
947 VG_(maybe_record_error)(
948 tid, Err_FishyValue, /*addr*/0, /*s*/NULL, &extra );
950 return True;
953 void MC_(record_user_error) ( ThreadId tid, Addr a,
954 Bool isAddrErr, UInt otag )
956 MC_Error extra;
957 if (otag != 0) {
958 tl_assert(!isAddrErr);
959 tl_assert( MC_(clo_mc_level) == 3 );
961 if (!isAddrErr) {
962 tl_assert( MC_(clo_mc_level) >= 2 );
964 tl_assert(VG_INVALID_THREADID != tid);
965 extra.Err.User.isAddrErr = isAddrErr;
966 extra.Err.User.ai.tag = Addr_Undescribed;
967 extra.Err.User.otag = otag;
968 extra.Err.User.origin_ec = NULL; /* Filled in later */
969 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
972 Bool MC_(is_mempool_block)(MC_Chunk* mc_search)
974 MC_Mempool* mp;
976 if (!MC_(mempool_list))
977 return False;
979 // A chunk can only come from a mempool if a custom allocator
980 // is used. No search required for other kinds.
981 if (mc_search->allockind == MC_AllocCustom) {
982 VG_(HT_ResetIter)( MC_(mempool_list) );
983 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
984 MC_Chunk* mc;
985 VG_(HT_ResetIter)(mp->chunks);
986 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
987 if (mc == mc_search)
988 return True;
993 return False;
996 /*------------------------------------------------------------*/
997 /*--- Other error operations ---*/
998 /*------------------------------------------------------------*/
1000 /* Compare error contexts, to detect duplicates. Note that if they
1001 are otherwise the same, the faulting addrs and associated rwoffsets
1002 are allowed to be different. */
1003 Bool MC_(eq_Error) ( VgRes res, const Error* e1, const Error* e2 )
1005 MC_Error* extra1 = VG_(get_error_extra)(e1);
1006 MC_Error* extra2 = VG_(get_error_extra)(e2);
1008 /* Guaranteed by calling function */
1009 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
1011 switch (VG_(get_error_kind)(e1)) {
1012 case Err_CoreMem: {
1013 const HChar *e1s, *e2s;
1014 e1s = VG_(get_error_string)(e1);
1015 e2s = VG_(get_error_string)(e2);
1016 if (e1s == e2s) return True;
1017 if (VG_STREQ(e1s, e2s)) return True;
1018 return False;
1021 case Err_RegParam:
1022 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1024 // Perhaps we should also check the addrinfo.akinds for equality.
1025 // That would result in more error reports, but only in cases where
1026 // a register contains uninitialised bytes and points to memory
1027 // containing uninitialised bytes. Currently, the 2nd of those to be
1028 // detected won't be reported. That is (nearly?) always the memory
1029 // error, which is good.
1030 case Err_MemParam:
1031 if (!VG_STREQ(VG_(get_error_string)(e1),
1032 VG_(get_error_string)(e2))) return False;
1033 // fall through
1034 case Err_User:
1035 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1036 ? True : False );
1038 case Err_Free:
1039 case Err_FreeMismatch:
1040 case Err_Jump:
1041 case Err_IllegalMempool:
1042 case Err_Overlap:
1043 case Err_Cond:
1044 return True;
1046 case Err_FishyValue:
1047 return VG_STREQ(extra1->Err.FishyValue.function_name,
1048 extra2->Err.FishyValue.function_name) &&
1049 VG_STREQ(extra1->Err.FishyValue.argument_name,
1050 extra2->Err.FishyValue.argument_name);
1052 case Err_Addr:
1053 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1054 ? True : False );
1056 case Err_Value:
1057 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1058 ? True : False );
1060 case Err_Leak:
1061 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1062 "since it's handled with VG_(unique_error)()!");
1064 default:
1065 VG_(printf)("Error:\n unknown error code %d\n",
1066 VG_(get_error_kind)(e1));
1067 VG_(tool_panic)("unknown error code in mc_eq_Error");
1071 /* Functions used when searching MC_Chunk lists */
1072 static
1073 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1075 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1076 MC_(Malloc_Redzone_SzB) );
1078 static
1079 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1081 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1082 rzB );
1085 // Forward declarations
1086 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1087 static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
1088 AddrInfo* ai );
1091 /* Describe an address as best you can, for error messages,
1092 putting the result in ai. */
1093 static void describe_addr ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
1095 MC_Chunk* mc;
1097 tl_assert(Addr_Undescribed == ai->tag);
1099 /* -- Perhaps it's a user-named block? -- */
1100 if (client_block_maybe_describe( a, ai )) {
1101 return;
1104 /* -- Perhaps it's in mempool block (non-meta)? -- */
1105 if (mempool_block_maybe_describe( a, /*is_metapool*/ False, ai)) {
1106 return;
1109 /* Blocks allocated by memcheck malloc functions are either
1110 on the recently freed list or on the malloc-ed list.
1111 Custom blocks can be on both : a recently freed block might
1112 have been just re-allocated.
1113 So, first search the malloc-ed block, as the most recent
1114 block is the probable cause of error.
1115 We however detect and report that this is a recently re-allocated
1116 block. */
1117 /* -- Search for a currently malloc'd block which might bracket it. -- */
1118 VG_(HT_ResetIter)(MC_(malloc_list));
1119 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1120 if (!MC_(is_mempool_block)(mc) &&
1121 addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1122 ai->tag = Addr_Block;
1123 ai->Addr.Block.block_kind = Block_Mallocd;
1124 if (MC_(get_freed_block_bracketting)( a ))
1125 ai->Addr.Block.block_desc = "recently re-allocated block";
1126 else
1127 ai->Addr.Block.block_desc = "block";
1128 ai->Addr.Block.block_szB = mc->szB;
1129 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1130 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1131 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1132 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1133 return;
1136 /* -- Search for a recently freed block which might bracket it. -- */
1137 mc = MC_(get_freed_block_bracketting)( a );
1138 if (mc) {
1139 ai->tag = Addr_Block;
1140 ai->Addr.Block.block_kind = Block_Freed;
1141 ai->Addr.Block.block_desc = "block";
1142 ai->Addr.Block.block_szB = mc->szB;
1143 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1144 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1145 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1146 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1147 return;
1150 /* -- Perhaps it's in a meta mempool block? -- */
1151 /* This test is done last, because metapool blocks overlap with blocks
1152 handed out to the application. That makes every heap address part of
1153 a metapool block, so the interesting cases are handled first.
1154 This final search is a last-ditch attempt. When found, it is probably
1155 an error in the custom allocator itself. */
1156 if (mempool_block_maybe_describe( a, /*is_metapool*/ True, ai )) {
1157 return;
1160 /* No block found. Search a non-heap block description. */
1161 VG_(describe_addr) (ep, a, ai);
1164 void MC_(pp_describe_addr) ( DiEpoch ep, Addr a )
1166 AddrInfo ai;
1168 ai.tag = Addr_Undescribed;
1169 describe_addr (ep, a, &ai);
1170 VG_(pp_addrinfo_mc) (a, &ai, /* maybe_gcc */ False);
1171 VG_(clear_addrinfo) (&ai);
1174 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1175 does not refer to a known origin. */
1176 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1177 UInt otag )
1179 UInt ecu = otag & ~3;
1180 *origin_ec = NULL;
1181 if (VG_(is_plausible_ECU)(ecu)) {
1182 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1186 /* Updates the copy with address info if necessary (but not for all errors). */
1187 UInt MC_(update_Error_extra)( const Error* err )
1189 MC_Error* extra = VG_(get_error_extra)(err);
1190 DiEpoch ep = VG_(get_ExeContext_epoch)(VG_(get_error_where)(err));
1192 switch (VG_(get_error_kind)(err)) {
1193 // These ones don't have addresses associated with them, and so don't
1194 // need any updating.
1195 case Err_CoreMem:
1196 //case Err_Value:
1197 //case Err_Cond:
1198 case Err_Overlap:
1199 case Err_FishyValue:
1200 // For Err_Leaks the returned size does not matter -- they are always
1201 // shown with VG_(unique_error)() so they 'extra' not copied. But
1202 // we make it consistent with the others.
1203 case Err_Leak:
1204 return sizeof(MC_Error);
1206 // For value errors, get the ExeContext corresponding to the
1207 // origin tag. Note that it is a kludge to assume that
1208 // a length-1 trace indicates a stack origin. FIXME.
1209 case Err_Value:
1210 update_origin( &extra->Err.Value.origin_ec,
1211 extra->Err.Value.otag );
1212 return sizeof(MC_Error);
1213 case Err_Cond:
1214 update_origin( &extra->Err.Cond.origin_ec,
1215 extra->Err.Cond.otag );
1216 return sizeof(MC_Error);
1217 case Err_RegParam:
1218 update_origin( &extra->Err.RegParam.origin_ec,
1219 extra->Err.RegParam.otag );
1220 return sizeof(MC_Error);
1222 // These ones always involve a memory address.
1223 case Err_Addr:
1224 describe_addr ( ep, VG_(get_error_address)(err),
1225 &extra->Err.Addr.ai );
1226 return sizeof(MC_Error);
1227 case Err_MemParam:
1228 describe_addr ( ep, VG_(get_error_address)(err),
1229 &extra->Err.MemParam.ai );
1230 update_origin( &extra->Err.MemParam.origin_ec,
1231 extra->Err.MemParam.otag );
1232 return sizeof(MC_Error);
1233 case Err_Jump:
1234 describe_addr ( ep, VG_(get_error_address)(err),
1235 &extra->Err.Jump.ai );
1236 return sizeof(MC_Error);
1237 case Err_User:
1238 describe_addr ( ep, VG_(get_error_address)(err),
1239 &extra->Err.User.ai );
1240 update_origin( &extra->Err.User.origin_ec,
1241 extra->Err.User.otag );
1242 return sizeof(MC_Error);
1243 case Err_Free:
1244 describe_addr ( ep, VG_(get_error_address)(err),
1245 &extra->Err.Free.ai );
1246 return sizeof(MC_Error);
1247 case Err_IllegalMempool:
1248 describe_addr ( ep, VG_(get_error_address)(err),
1249 &extra->Err.IllegalMempool.ai );
1250 return sizeof(MC_Error);
1252 // Err_FreeMismatches have already had their address described; this is
1253 // possible because we have the MC_Chunk on hand when the error is
1254 // detected. However, the address may be part of a user block, and if so
1255 // we override the pre-determined description with a user block one.
1256 case Err_FreeMismatch: {
1257 tl_assert(extra && Block_Mallocd ==
1258 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1259 (void)client_block_maybe_describe( VG_(get_error_address)(err),
1260 &extra->Err.FreeMismatch.ai );
1261 return sizeof(MC_Error);
1263 case Err_ReallocSizeZero:
1264 describe_addr ( ep, VG_(get_error_address)(err),
1265 &extra->Err.ReallocSizeZero.ai );
1266 return sizeof(MC_Error);
1268 default: VG_(tool_panic)("mc_update_extra: bad errkind");
1273 static Bool client_block_maybe_describe( Addr a,
1274 /*OUT*/AddrInfo* ai )
1276 UWord i;
1277 CGenBlock* cgbs = NULL;
1278 UWord cgb_used = 0;
1280 MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1281 if (cgbs == NULL)
1282 tl_assert(cgb_used == 0);
1284 /* Perhaps it's a general block ? */
1285 for (i = 0; i < cgb_used; i++) {
1286 if (cgbs[i].start == 0 && cgbs[i].size == 0)
1287 continue;
1288 // Use zero as the redzone for client blocks.
1289 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1290 ai->tag = Addr_Block;
1291 ai->Addr.Block.block_kind = Block_UserG;
1292 ai->Addr.Block.block_desc = cgbs[i].desc;
1293 ai->Addr.Block.block_szB = cgbs[i].size;
1294 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1295 ai->Addr.Block.allocated_at = cgbs[i].where;
1296 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1297 ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
1298 return True;
1301 return False;
1305 static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
1306 /*OUT*/AddrInfo* ai )
1308 MC_Mempool* mp;
1309 tl_assert( MC_(mempool_list) );
1311 VG_(HT_ResetIter)( MC_(mempool_list) );
1312 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1313 if (mp->chunks != NULL && mp->metapool == is_metapool) {
1314 MC_Chunk* mc;
1315 VG_(HT_ResetIter)(mp->chunks);
1316 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1317 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1318 ai->tag = Addr_Block;
1319 ai->Addr.Block.block_kind = Block_MempoolChunk;
1320 ai->Addr.Block.block_desc = "block";
1321 ai->Addr.Block.block_szB = mc->szB;
1322 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1323 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1324 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1325 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1326 return True;
1331 return False;
1335 /*------------------------------------------------------------*/
1336 /*--- Suppressions ---*/
1337 /*------------------------------------------------------------*/
1339 typedef
1340 enum {
1341 ParamSupp, // Bad syscall params
1342 UserSupp, // Errors arising from client-request checks
1343 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
1345 // Undefined value errors of given size
1346 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp, Value32Supp,
1348 // Undefined value error in conditional.
1349 CondSupp,
1351 // Unaddressable read/write attempt at given size
1352 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp, Addr32Supp,
1354 JumpSupp, // Jump to unaddressable target
1355 FreeSupp, // Invalid or mismatching free
1356 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
1357 LeakSupp, // Something to be suppressed in a leak check.
1358 MempoolSupp, // Memory pool suppression.
1359 FishyValueSupp,// Fishy value suppression.
1360 ReallocSizeZeroSupp, // realloc size 0 suppression
1362 MC_SuppKind;
1364 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su )
1366 SuppKind skind;
1368 if (VG_STREQ(name, "Param")) skind = ParamSupp;
1369 else if (VG_STREQ(name, "User")) skind = UserSupp;
1370 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1371 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
1372 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
1373 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
1374 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
1375 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
1376 else if (VG_STREQ(name, "Addr32")) skind = Addr32Supp;
1377 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
1378 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
1379 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
1380 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1381 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1382 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
1383 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
1384 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1385 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1386 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1387 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1388 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1389 else if (VG_STREQ(name, "Value32")) skind = Value32Supp;
1390 else if (VG_STREQ(name, "FishyValue")) skind = FishyValueSupp;
1391 else if (VG_STREQ(name, "ReallocZero")) skind = ReallocSizeZeroSupp;
1392 else
1393 return False;
1395 VG_(set_supp_kind)(su, skind);
1396 return True;
1399 typedef struct _MC_LeakSuppExtra MC_LeakSuppExtra;
1401 struct _MC_LeakSuppExtra {
1402 UInt match_leak_kinds;
1404 /* Maintains nr of blocks and bytes suppressed with this suppression
1405 during the leak search identified by leak_search_gen.
1406 blocks_suppressed and bytes_suppressed are reset to 0 when
1407 used the first time during a leak search. */
1408 SizeT blocks_suppressed;
1409 SizeT bytes_suppressed;
1410 UInt leak_search_gen;
1413 typedef struct {
1414 const HChar *function_name;
1415 const HChar *argument_name;
1416 } MC_FishyValueExtra;
1418 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** bufpp,
1419 SizeT* nBufp, Int* lineno, Supp *su )
1421 Bool eof;
1422 Int i;
1424 if (VG_(get_supp_kind)(su) == ParamSupp) {
1425 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1426 if (eof) return False;
1427 VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1428 if (VG_(strcmp) (*bufpp, "preadv(vector[...])") == 0
1429 || VG_(strcmp) (*bufpp, "pwritev(vector[...])") == 0) {
1430 /* Report the incompatible change introduced in 3.15
1431 when reading a unsupported 3.14 or before entry.
1432 See bug 417075. */
1433 VG_(umsg)("WARNING: %s is an obsolete suppression line "
1434 "not supported in valgrind 3.15 or later.\n"
1435 "You should replace [...] by a specific index"
1436 " such as [0] or [1] or [2] or similar\n\n", *bufpp);
1438 } else if (VG_(get_supp_kind)(su) == LeakSupp) {
1439 // We might have the optional match-leak-kinds line
1440 MC_LeakSuppExtra* lse;
1441 lse = VG_(malloc)("mc.resi.2", sizeof(MC_LeakSuppExtra));
1442 lse->match_leak_kinds = MC_(all_Reachedness)();
1443 lse->blocks_suppressed = 0;
1444 lse->bytes_suppressed = 0;
1445 lse->leak_search_gen = 0;
1446 VG_(set_supp_extra)(su, lse); // By default, all kinds will match.
1447 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1448 if (eof) return True; // old LeakSupp style, no match-leak-kinds line.
1449 if (0 == VG_(strncmp)(*bufpp, "match-leak-kinds:", 17)) {
1450 i = 17;
1451 while ((*bufpp)[i] && VG_(isspace)((*bufpp)[i]))
1452 i++;
1453 if (!VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
1454 True/*allow_all*/,
1455 (*bufpp)+i, &lse->match_leak_kinds)) {
1456 return False;
1458 } else {
1459 return False; // unknown extra line.
1461 } else if (VG_(get_supp_kind)(su) == FishyValueSupp) {
1462 MC_FishyValueExtra *extra;
1463 HChar *p, *function_name, *argument_name = NULL;
1465 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1466 if (eof) return True;
1468 // The suppression string is: function_name(argument_name)
1469 function_name = VG_(strdup)("mv.resi.4", *bufpp);
1470 p = VG_(strchr)(function_name, '(');
1471 if (p != NULL) {
1472 *p++ = '\0';
1473 argument_name = p;
1474 p = VG_(strchr)(p, ')');
1475 if (p != NULL)
1476 *p = '\0';
1478 if (p == NULL) { // malformed suppression string
1479 VG_(free)(function_name);
1480 return False;
1483 extra = VG_(malloc)("mc.resi.3", sizeof *extra);
1484 extra->function_name = function_name;
1485 extra->argument_name = argument_name;
1487 VG_(set_supp_extra)(su, extra);
1489 return True;
1492 Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su )
1494 Int su_szB;
1495 MC_Error* extra = VG_(get_error_extra)(err);
1496 ErrorKind ekind = VG_(get_error_kind)(err);
1498 switch (VG_(get_supp_kind)(su)) {
1499 case ParamSupp:
1500 return ((ekind == Err_RegParam || ekind == Err_MemParam)
1501 && VG_STREQ(VG_(get_error_string)(err),
1502 VG_(get_supp_string)(su)));
1504 case UserSupp:
1505 return (ekind == Err_User);
1507 case CoreMemSupp:
1508 return (ekind == Err_CoreMem
1509 && VG_STREQ(VG_(get_error_string)(err),
1510 VG_(get_supp_string)(su)));
1512 case Value1Supp: su_szB = 1; goto value_case;
1513 case Value2Supp: su_szB = 2; goto value_case;
1514 case Value4Supp: su_szB = 4; goto value_case;
1515 case Value8Supp: su_szB = 8; goto value_case;
1516 case Value16Supp:su_szB =16; goto value_case;
1517 case Value32Supp:su_szB =32; goto value_case;
1518 value_case:
1519 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1521 case CondSupp:
1522 return (ekind == Err_Cond);
1524 case Addr1Supp: su_szB = 1; goto addr_case;
1525 case Addr2Supp: su_szB = 2; goto addr_case;
1526 case Addr4Supp: su_szB = 4; goto addr_case;
1527 case Addr8Supp: su_szB = 8; goto addr_case;
1528 case Addr16Supp:su_szB =16; goto addr_case;
1529 case Addr32Supp:su_szB =32; goto addr_case;
1530 addr_case:
1531 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1533 case JumpSupp:
1534 return (ekind == Err_Jump);
1536 case FreeSupp:
1537 return (ekind == Err_Free || ekind == Err_FreeMismatch);
1539 case OverlapSupp:
1540 return (ekind == Err_Overlap);
1542 case LeakSupp:
1543 if (ekind == Err_Leak) {
1544 MC_LeakSuppExtra* lse = (MC_LeakSuppExtra*) VG_(get_supp_extra)(su);
1545 if (lse->leak_search_gen != MC_(leak_search_gen)) {
1546 // First time we see this suppression during this leak search.
1547 // => reset the counters to 0.
1548 lse->blocks_suppressed = 0;
1549 lse->bytes_suppressed = 0;
1550 lse->leak_search_gen = MC_(leak_search_gen);
1552 return RiS(extra->Err.Leak.lr->key.state, lse->match_leak_kinds);
1553 } else
1554 return False;
1556 case MempoolSupp:
1557 return (ekind == Err_IllegalMempool);
1559 case FishyValueSupp: {
1560 MC_FishyValueExtra *supp_extra = VG_(get_supp_extra)(su);
1562 return (ekind == Err_FishyValue) &&
1563 VG_STREQ(extra->Err.FishyValue.function_name,
1564 supp_extra->function_name) &&
1565 VG_STREQ(extra->Err.FishyValue.argument_name,
1566 supp_extra->argument_name);
1569 case ReallocSizeZeroSupp: {
1571 return (ekind == Err_ReallocSizeZero);
1574 default:
1575 VG_(printf)("Error:\n"
1576 " unknown suppression type %d\n",
1577 VG_(get_supp_kind)(su));
1578 VG_(tool_panic)("unknown suppression type in "
1579 "MC_(error_matches_suppression)");
1583 const HChar* MC_(get_error_name) ( const Error* err )
1585 switch (VG_(get_error_kind)(err)) {
1586 case Err_RegParam: return "Param";
1587 case Err_MemParam: return "Param";
1588 case Err_User: return "User";
1589 case Err_FreeMismatch: return "Free";
1590 case Err_IllegalMempool: return "Mempool";
1591 case Err_Free: return "Free";
1592 case Err_Jump: return "Jump";
1593 case Err_CoreMem: return "CoreMem";
1594 case Err_Overlap: return "Overlap";
1595 case Err_Leak: return "Leak";
1596 case Err_Cond: return "Cond";
1597 case Err_FishyValue: return "FishyValue";
1598 case Err_ReallocSizeZero: return "ReallocZero";
1599 case Err_Addr: {
1600 MC_Error* extra = VG_(get_error_extra)(err);
1601 switch ( extra->Err.Addr.szB ) {
1602 case 1: return "Addr1";
1603 case 2: return "Addr2";
1604 case 4: return "Addr4";
1605 case 8: return "Addr8";
1606 case 16: return "Addr16";
1607 case 32: return "Addr32";
1608 default: VG_(tool_panic)("unexpected size for Addr");
1611 case Err_Value: {
1612 MC_Error* extra = VG_(get_error_extra)(err);
1613 switch ( extra->Err.Value.szB ) {
1614 case 1: return "Value1";
1615 case 2: return "Value2";
1616 case 4: return "Value4";
1617 case 8: return "Value8";
1618 case 16: return "Value16";
1619 case 32: return "Value32";
1620 default: VG_(tool_panic)("unexpected size for Value");
1623 default: VG_(tool_panic)("get_error_name: unexpected type");
1627 SizeT MC_(get_extra_suppression_info) ( const Error* err,
1628 /*OUT*/HChar* buf, Int nBuf )
1630 ErrorKind ekind = VG_(get_error_kind)(err);
1631 tl_assert(buf);
1632 tl_assert(nBuf >= 1);
1634 if (Err_RegParam == ekind || Err_MemParam == ekind) {
1635 const HChar* errstr = VG_(get_error_string)(err);
1636 tl_assert(errstr);
1637 return VG_(snprintf)(buf, nBuf, "%s", errstr);
1638 } else if (Err_Leak == ekind) {
1639 MC_Error* extra = VG_(get_error_extra)(err);
1640 return VG_(snprintf) (buf, nBuf, "match-leak-kinds: %s",
1641 pp_Reachedness_for_leak_kinds(extra->Err.Leak.lr->key.state));
1642 } else if (Err_FishyValue == ekind) {
1643 MC_Error* extra = VG_(get_error_extra)(err);
1644 return VG_(snprintf) (buf, nBuf, "%s(%s)",
1645 extra->Err.FishyValue.function_name,
1646 extra->Err.FishyValue.argument_name);
1647 } else {
1648 buf[0] = '\0';
1649 return 0;
1653 SizeT MC_(print_extra_suppression_use) ( const Supp *su,
1654 /*OUT*/HChar *buf, Int nBuf )
1656 tl_assert(nBuf >= 1);
1658 if (VG_(get_supp_kind)(su) == LeakSupp) {
1659 MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
1661 if (lse->leak_search_gen == MC_(leak_search_gen)
1662 && lse->blocks_suppressed > 0) {
1663 return VG_(snprintf) (buf, nBuf,
1664 "suppressed: %'lu bytes in %'lu blocks",
1665 lse->bytes_suppressed,
1666 lse->blocks_suppressed);
1670 buf[0] = '\0';
1671 return 0;
1674 void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su)
1676 if (VG_(get_supp_kind)(su) == LeakSupp) {
1677 MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
1678 MC_Error* extra = VG_(get_error_extra)(err);
1680 tl_assert (lse->leak_search_gen == MC_(leak_search_gen));
1681 lse->blocks_suppressed += extra->Err.Leak.lr->num_blocks;
1682 lse->bytes_suppressed
1683 += extra->Err.Leak.lr->szB + extra->Err.Leak.lr->indirect_szB;
1687 /*--------------------------------------------------------------------*/
1688 /*--- end mc_errors.c ---*/
1689 /*--------------------------------------------------------------------*/