2 /*--------------------------------------------------------------------*/
3 /*--- A header file for all parts of the MemCheck tool. ---*/
4 /*--- mc_include.h ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #ifndef __MC_INCLUDE_H
31 #define __MC_INCLUDE_H
33 #define MC_(str) VGAPPEND(vgMemCheck_,str)
36 /* This is a private header file for use only within the
37 memcheck/ directory. */
39 /*------------------------------------------------------------*/
40 /*--- Tracking the heap ---*/
41 /*------------------------------------------------------------*/
43 /* By default, we want at least a 16B redzone on client heap blocks
45 The default can be modified by --redzone-size. */
46 #define MC_MALLOC_DEFAULT_REDZONE_SZB 16
47 // effective redzone, as (possibly) modified by --redzone-size:
48 extern SizeT
MC_(Malloc_Redzone_SzB
);
50 /* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
60 /* This describes a heap block. Nb: first two fields must match core's
64 struct _MC_Chunk
* next
;
65 Addr data
; // Address of the actual block.
66 SizeT szB
: (sizeof(SizeT
)*8)-2; // Size requested; 30 or 62 bits.
67 MC_AllocKind allockind
: 2; // Which operation did the allocation.
69 /* Variable-length array. The size depends on MC_(clo_keep_stacktraces).
70 This array optionally stores the alloc and/or free stack trace. */
74 /* Returns the execontext where the MC_Chunk was allocated/freed.
75 Returns VG_(null_ExeContext)() if the execontext has not been recorded (due
76 to MC_(clo_keep_stacktraces) and/or because block not yet freed). */
77 ExeContext
* MC_(allocated_at
) (MC_Chunk
*);
78 ExeContext
* MC_(freed_at
) (MC_Chunk
*);
80 /* Records and sets execontext according to MC_(clo_keep_stacktraces) */
81 void MC_(set_allocated_at
) (ThreadId
, MC_Chunk
*);
82 void MC_(set_freed_at
) (ThreadId
, MC_Chunk
*);
84 /* number of pointers needed according to MC_(clo_keep_stacktraces). */
85 UInt
MC_(n_where_pointers
) (void);
87 /* Memory pool. Nb: first two fields must match core's VgHashNode. */
90 struct _MC_Mempool
* next
;
91 Addr pool
; // pool identifier
92 SizeT rzB
; // pool red-zone size
93 Bool is_zeroed
; // allocations from this pool are zeroed
94 Bool auto_free
; // De-alloc block frees all chunks in block
95 Bool metapool
; // These chunks are VALGRIND_MALLOC_LIKE
96 // memory, and used as pool.
97 VgHashTable
*chunks
; // chunks associated with this pool
102 void* MC_(new_block
) ( ThreadId tid
,
103 Addr p
, SizeT size
, SizeT align
,
104 Bool is_zeroed
, MC_AllocKind kind
,
106 void MC_(handle_free
) ( ThreadId tid
,
107 Addr p
, UInt rzB
, MC_AllocKind kind
);
109 void MC_(create_mempool
) ( Addr pool
, UInt rzB
, Bool is_zeroed
,
110 Bool auto_free
, Bool metapool
);
111 void MC_(destroy_mempool
) ( Addr pool
);
112 void MC_(mempool_alloc
) ( ThreadId tid
, Addr pool
,
113 Addr addr
, SizeT size
);
114 void MC_(mempool_free
) ( Addr pool
, Addr addr
);
115 void MC_(mempool_trim
) ( Addr pool
, Addr addr
, SizeT size
);
116 void MC_(move_mempool
) ( Addr poolA
, Addr poolB
);
117 void MC_(mempool_change
) ( Addr pool
, Addr addrA
, Addr addrB
, SizeT size
);
118 Bool
MC_(mempool_exists
) ( Addr pool
);
119 Bool
MC_(is_mempool_block
)( MC_Chunk
* mc_search
);
121 /* Searches for a recently freed block which might bracket Addr a.
122 Return the MC_Chunk* for this block or NULL if no bracketting block
124 MC_Chunk
* MC_(get_freed_block_bracketting
)( Addr a
);
126 /* For efficient pooled alloc/free of the MC_Chunk. */
127 extern PoolAlloc
* MC_(chunk_poolalloc
);
129 /* For tracking malloc'd blocks. Nb: it's quite important that it's a
130 VgHashTable, because VgHashTable allows duplicate keys without complaint.
131 This can occur if a user marks a malloc() block as also a custom block with
133 extern VgHashTable
*MC_(malloc_list
);
135 /* For tracking memory pools. */
136 extern VgHashTable
*MC_(mempool_list
);
138 /* Shadow memory functions */
139 Bool
MC_(check_mem_is_noaccess
)( Addr a
, SizeT len
, Addr
* bad_addr
);
140 void MC_(make_mem_noaccess
) ( Addr a
, SizeT len
);
141 void MC_(make_mem_undefined_w_otag
)( Addr a
, SizeT len
, UInt otag
);
142 void MC_(make_mem_defined
) ( Addr a
, SizeT len
);
143 void MC_(copy_address_range_state
) ( Addr src
, Addr dst
, SizeT len
);
145 void MC_(xtmemory_report
) ( const HChar
* filename
, Bool fini
);
147 void MC_(print_malloc_stats
) ( void );
148 /* nr of free operations done */
149 SizeT
MC_(get_cmalloc_n_frees
) ( void );
151 void* MC_(malloc
) ( ThreadId tid
, SizeT n
);
152 void* MC_(__builtin_new
) ( ThreadId tid
, SizeT n
);
153 void* MC_(__builtin_new_aligned
)( ThreadId tid
, SizeT n
, SizeT alignB
);
154 void* MC_(__builtin_vec_new
) ( ThreadId tid
, SizeT n
);
155 void* MC_(__builtin_vec_new_aligned
) ( ThreadId tid
, SizeT n
, SizeT alignB
);
156 void* MC_(memalign
) ( ThreadId tid
, SizeT align
, SizeT n
);
157 void* MC_(calloc
) ( ThreadId tid
, SizeT nmemb
, SizeT size1
);
158 void MC_(free
) ( ThreadId tid
, void* p
);
159 void MC_(__builtin_delete
) ( ThreadId tid
, void* p
);
160 void MC_(__builtin_delete_aligned
) ( ThreadId tid
, void* p
, SizeT alignB
);
161 void MC_(__builtin_vec_delete
) ( ThreadId tid
, void* p
);
162 void MC_(__builtin_vec_delete_aligned
) ( ThreadId tid
, void* p
, SizeT alignB
);
163 void* MC_(realloc
) ( ThreadId tid
, void* p
, SizeT new_size
);
164 SizeT
MC_(malloc_usable_size
) ( ThreadId tid
, void* p
);
166 void MC_(handle_resizeInPlace
)(ThreadId tid
, Addr p
,
167 SizeT oldSizeB
, SizeT newSizeB
, SizeT rzB
);
170 /*------------------------------------------------------------*/
171 /*--- Origin tracking translate-time support ---*/
172 /*------------------------------------------------------------*/
174 /* See detailed comments in mc_machine.c. */
175 Int
MC_(get_otrack_shadow_offset
) ( Int offset
, Int szB
);
176 IRType
MC_(get_otrack_reg_array_equiv_int_type
) ( IRRegArray
* arr
);
178 /* Constants which are used as the lowest 2 bits in origin tags.
180 An origin tag comprises an upper 30-bit ECU field and a lower 2-bit
181 'kind' field. The ECU field is a number given out by m_execontext
182 and has a 1-1 mapping with ExeContext*s. An ECU can be used
183 directly as an origin tag (otag), but in fact we want to put
184 additional information 'kind' field to indicate roughly where the
185 tag came from. This helps print more understandable error messages
186 for the user -- it has no other purpose.
188 Hence the following 2-bit constants are needed for 'kind' field.
192 * Both ECUs and origin tags are represented as 32-bit words
194 * m_execontext and the core-tool interface deal purely in ECUs.
195 They have no knowledge of origin tags - that is a purely
196 Memcheck-internal matter.
198 * all valid ECUs have the lowest 2 bits zero and at least
199 one of the upper 30 bits nonzero (see VG_(is_plausible_ECU))
201 * to convert from an ECU to an otag, OR in one of the MC_OKIND_
204 * to convert an otag back to an ECU, AND it with ~3
207 #define MC_OKIND_UNKNOWN 0 /* unknown origin */
208 #define MC_OKIND_HEAP 1 /* this is a heap origin */
209 #define MC_OKIND_STACK 2 /* this is a stack origin */
210 #define MC_OKIND_USER 3 /* arises from user-supplied client req */
213 /*------------------------------------------------------------*/
214 /*--- Profiling of memory events ---*/
215 /*------------------------------------------------------------*/
217 /* Define to collect detailed performance info. */
218 /* #define MC_PROFILE_MEMORY */
219 #ifdef MC_PROFILE_MEMORY
221 /* Order of enumerators does not matter. But MCPE_LAST has to be the
222 last entry in the list as it is used as an array bound. */
236 MCPE_LOADV_128_OR_256
,
237 MCPE_LOADV_128_OR_256_SLOW_LOOP
,
238 MCPE_LOADV_128_OR_256_SLOW1
,
239 MCPE_LOADV_128_OR_256_SLOW2
,
241 MCPE_LOADVN_SLOW_LOOP
,
263 MCPE_STOREVN_SLOW_LOOP
,
264 MCPE_MAKE_ALIGNED_WORD32_UNDEFINED
,
265 MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW
,
266 MCPE_MAKE_ALIGNED_WORD64_UNDEFINED
,
267 MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW
,
268 MCPE_MAKE_ALIGNED_WORD32_NOACCESS
,
269 MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW
,
270 MCPE_MAKE_ALIGNED_WORD64_NOACCESS
,
271 MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW
,
272 MCPE_MAKE_MEM_NOACCESS
,
273 MCPE_MAKE_MEM_UNDEFINED
,
274 MCPE_MAKE_MEM_UNDEFINED_W_OTAG
,
275 MCPE_MAKE_MEM_DEFINED
,
276 MCPE_CHEAP_SANITY_CHECK
,
277 MCPE_EXPENSIVE_SANITY_CHECK
,
278 MCPE_COPY_ADDRESS_RANGE_STATE
,
279 MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1
,
280 MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2
,
281 MCPE_CHECK_MEM_IS_NOACCESS
,
282 MCPE_CHECK_MEM_IS_NOACCESS_LOOP
,
283 MCPE_IS_MEM_ADDRESSABLE
,
284 MCPE_IS_MEM_ADDRESSABLE_LOOP
,
286 MCPE_IS_MEM_DEFINED_LOOP
,
287 MCPE_IS_MEM_DEFINED_COMPREHENSIVE
,
288 MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP
,
289 MCPE_IS_DEFINED_ASCIIZ
,
290 MCPE_IS_DEFINED_ASCIIZ_LOOP
,
291 MCPE_FIND_CHUNK_FOR_OLD
,
292 MCPE_FIND_CHUNK_FOR_OLD_LOOP
,
293 MCPE_SET_ADDRESS_RANGE_PERMS
,
294 MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP
,
295 MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP
,
296 MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS
,
297 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1
,
298 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2
,
299 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK
,
300 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK
,
301 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A
,
302 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B
,
303 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C
,
304 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A
,
305 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B
,
306 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K
,
307 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM
,
309 MCPE_NEW_MEM_STACK_4
,
310 MCPE_NEW_MEM_STACK_8
,
311 MCPE_NEW_MEM_STACK_12
,
312 MCPE_NEW_MEM_STACK_16
,
313 MCPE_NEW_MEM_STACK_32
,
314 MCPE_NEW_MEM_STACK_112
,
315 MCPE_NEW_MEM_STACK_128
,
316 MCPE_NEW_MEM_STACK_144
,
317 MCPE_NEW_MEM_STACK_160
,
319 MCPE_DIE_MEM_STACK_4
,
320 MCPE_DIE_MEM_STACK_8
,
321 MCPE_DIE_MEM_STACK_12
,
322 MCPE_DIE_MEM_STACK_16
,
323 MCPE_DIE_MEM_STACK_32
,
324 MCPE_DIE_MEM_STACK_112
,
325 MCPE_DIE_MEM_STACK_128
,
326 MCPE_DIE_MEM_STACK_144
,
327 MCPE_DIE_MEM_STACK_160
,
328 MCPE_MAKE_STACK_UNINIT_W_O
,
329 MCPE_MAKE_STACK_UNINIT_NO_O
,
330 MCPE_MAKE_STACK_UNINIT_128_NO_O
,
331 MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_16
,
332 MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_8
,
333 MCPE_MAKE_STACK_UNINIT_128_NO_O_SLOWCASE
,
334 /* Do not add enumerators past this line. */
338 extern ULong
MC_(event_ctr
)[MCPE_LAST
];
340 # define PROF_EVENT(ev) \
341 do { tl_assert((ev) >= 0 && (ev) < MCPE_LAST); \
342 MC_(event_ctr)[ev]++; \
347 # define PROF_EVENT(ev) /* */
349 #endif /* MC_PROFILE_MEMORY */
352 /*------------------------------------------------------------*/
353 /*--- V and A bits (Victoria & Albert ?) ---*/
354 /*------------------------------------------------------------*/
356 /* The number of entries in the primary map can be altered. However
357 we hardwire the assumption that each secondary map covers precisely
358 64k of address space. */
359 #define SM_SIZE 65536 /* DO NOT CHANGE */
360 #define SM_MASK (SM_SIZE-1) /* DO NOT CHANGE */
362 #define V_BIT_DEFINED 0
363 #define V_BIT_UNDEFINED 1
365 #define V_BITS8_DEFINED 0
366 #define V_BITS8_UNDEFINED 0xFF
368 #define V_BITS16_DEFINED 0
369 #define V_BITS16_UNDEFINED 0xFFFF
371 #define V_BITS32_DEFINED 0
372 #define V_BITS32_UNDEFINED 0xFFFFFFFF
374 #define V_BITS64_DEFINED 0ULL
375 #define V_BITS64_UNDEFINED 0xFFFFFFFFFFFFFFFFULL
377 /* Set to 1 to enable handwritten assembly helpers on targets for
378 which it is supported. */
379 #define ENABLE_ASSEMBLY_HELPERS 1
381 /* Comment the below to disable the fast case LOADV */
382 #define PERF_FAST_LOADV 1
384 /*------------------------------------------------------------*/
385 /*--- Leak checking ---*/
386 /*------------------------------------------------------------*/
390 // Nb: the order is important -- it dictates the order of loss records
392 Reachable
=0, // Definitely reachable from root-set.
393 Possible
=1, // Possibly reachable from root-set; involves at
394 // least one interior-pointer along the way.
395 IndirectLeak
=2, // Leaked, but reachable from another leaked block
396 // (be it Unreached or IndirectLeak).
397 Unreached
=3 // Not reached, ie. leaked.
398 // (At best, only reachable from itself via a cycle.)
402 // Build mask to check or set Reachedness r membership
403 #define R2S(r) (1 << (r))
404 // Reachedness r is member of the Set s ?
405 #define RiS(r,s) ((s) & R2S(r))
406 // Returns a set containing all Reachedness
407 UInt
MC_(all_Reachedness
)(void);
409 /* For VALGRIND_COUNT_LEAKS client request */
410 extern SizeT
MC_(bytes_leaked
);
411 extern SizeT
MC_(bytes_indirect
);
412 extern SizeT
MC_(bytes_dubious
);
413 extern SizeT
MC_(bytes_reachable
);
414 extern SizeT
MC_(bytes_suppressed
);
416 /* For VALGRIND_COUNT_LEAK_BLOCKS client request */
417 extern SizeT
MC_(blocks_leaked
);
418 extern SizeT
MC_(blocks_indirect
);
419 extern SizeT
MC_(blocks_dubious
);
420 extern SizeT
MC_(blocks_reachable
);
421 extern SizeT
MC_(blocks_suppressed
);
433 LCD_Any
, // Output all loss records, whatever the delta.
434 LCD_Increased
, // Output loss records with an increase in size or blocks.
435 LCD_Changed
, // Output loss records with an increase or
436 // decrease in size or blocks.
437 LCD_New
// Output new loss records.
441 /* When a LossRecord is put into an OSet, these elements represent the key. */
443 struct _LossRecordKey
{
444 Reachedness state
; // LC_Extra.state value shared by all blocks.
445 ExeContext
* allocated_at
; // Where they were allocated.
449 /* A loss record, used for generating err msgs. Multiple leaked blocks can be
450 * merged into a single loss record if they have the same state and similar
451 * enough allocation points (controlled by --leak-resolution). */
454 LossRecordKey key
; // Key, when used in an OSet.
455 SizeT szB
; // Sum of all MC_Chunk.szB values.
456 SizeT indirect_szB
; // Sum of all LC_Extra.indirect_szB values.
457 UInt num_blocks
; // Number of blocks represented by the record.
458 UInt old_num_blocks
; // output only the changed/new loss records
459 SizeT old_szB
; // old_* values are the values found during the
460 SizeT old_indirect_szB
; // previous leak search. old_* values are used to
465 struct _LeakCheckParams
{
467 UInt show_leak_kinds
;
468 UInt errors_for_leak_kinds
;
470 LeakCheckDeltaMode deltamode
;
471 UInt max_loss_records_output
; // limit on the nr of loss records output.
472 Bool requested_by_monitor_command
; // True when requested by gdb/vgdb.
473 const HChar
* xt_filename
; // if != NULL, produce an xtree leak file.
477 void MC_(detect_memory_leaks
) ( ThreadId tid
, LeakCheckParams
* lcp
);
479 // Each time a leak search is done, the leak search generation
480 // MC_(leak_search_gen) is incremented.
481 extern UInt
MC_(leak_search_gen
);
483 // maintains the lcp.deltamode given in the last call to detect_memory_leaks
484 extern LeakCheckDeltaMode
MC_(detect_memory_leaks_last_delta_mode
);
486 // prints the list of blocks corresponding to the given loss_record_nr slice
487 // (from/to) (up to maximum max_blocks)
488 // Returns True if loss_record_nr_from identifies a correct loss record
489 // from last leak search, returns False otherwise.
490 // Note that loss_record_nr_to can be bigger than the nr of loss records. All
491 // loss records after from will then be examined and maybe printed.
492 // If heuristics != 0, print only the loss records/blocks found via
493 // one of the heuristics in the set.
494 Bool
MC_(print_block_list
) ( UInt loss_record_nr_from
, UInt loss_record_nr_to
,
495 UInt max_blocks
, UInt heuristics
);
497 // Prints the addresses/registers/... at which a pointer to
498 // the given range [address, address+szB[ is found.
499 void MC_(who_points_at
) ( Addr address
, SizeT szB
);
501 // if delta_mode == LCD_Any, prints in buf an empty string
502 // otherwise prints a delta in the layout " (+%'lu)" or " (-%'lu)"
503 extern HChar
* MC_(snprintf_delta
) (HChar
* buf
, Int size
,
504 SizeT current_val
, SizeT old_val
,
505 LeakCheckDeltaMode delta_mode
);
508 Bool
MC_(is_valid_aligned_word
) ( Addr a
);
509 Bool
MC_(is_within_valid_secondary
) ( Addr a
);
511 // Prints as user msg a description of the given loss record.
512 void MC_(pp_LossRecord
)(UInt n_this_record
, UInt n_total_records
,
516 /*------------------------------------------------------------*/
517 /*--- Errors and suppressions ---*/
518 /*------------------------------------------------------------*/
520 /* Did we show to the user, any errors for which an uninitialised
521 value origin could have been collected (but wasn't) ? If yes,
522 then, at the end of the run, print a 1 line message advising that a
523 rerun with --track-origins=yes might help. */
524 extern Bool
MC_(any_value_errors
);
526 /* Standard functions for error and suppressions as required by the
528 Bool
MC_(eq_Error
) ( VgRes res
, const Error
* e1
, const Error
* e2
);
529 void MC_(before_pp_Error
) ( const Error
* err
);
530 void MC_(pp_Error
) ( const Error
* err
);
531 UInt
MC_(update_Error_extra
) ( const Error
* err
);
533 Bool
MC_(is_recognised_suppression
) ( const HChar
* name
, Supp
* su
);
535 Bool
MC_(read_extra_suppression_info
) ( Int fd
, HChar
** buf
,
536 SizeT
* nBuf
, Int
* lineno
, Supp
*su
);
538 Bool
MC_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
);
540 SizeT
MC_(get_extra_suppression_info
) ( const Error
* err
,
541 /*OUT*/HChar
* buf
, Int nBuf
);
542 SizeT
MC_(print_extra_suppression_use
) ( const Supp
* su
,
543 /*OUT*/HChar
* buf
, Int nBuf
);
544 void MC_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
);
546 const HChar
* MC_(get_error_name
) ( const Error
* err
);
548 /* Recording of errors */
549 void MC_(record_address_error
) ( ThreadId tid
, Addr a
, Int szB
,
551 void MC_(record_cond_error
) ( ThreadId tid
, UInt otag
);
552 void MC_(record_value_error
) ( ThreadId tid
, Int szB
, UInt otag
);
553 void MC_(record_jump_error
) ( ThreadId tid
, Addr a
);
555 void MC_(record_free_error
) ( ThreadId tid
, Addr a
);
556 void MC_(record_illegal_mempool_error
) ( ThreadId tid
, Addr a
);
557 void MC_(record_freemismatch_error
) ( ThreadId tid
, MC_Chunk
* mc
);
558 void MC_(record_realloc_size_zero
) ( ThreadId tid
, Addr a
);
560 void MC_(record_overlap_error
) ( ThreadId tid
, const HChar
* function
,
561 Addr src
, Addr dst
, SizeT szB
);
562 void MC_(record_core_mem_error
) ( ThreadId tid
, const HChar
* msg
);
563 void MC_(record_regparam_error
) ( ThreadId tid
, const HChar
* msg
, UInt otag
);
564 void MC_(record_memparam_error
) ( ThreadId tid
, Addr a
,
565 Bool isAddrErr
, const HChar
* msg
, UInt otag
);
566 void MC_(record_user_error
) ( ThreadId tid
, Addr a
,
567 Bool isAddrErr
, UInt otag
);
569 Bool
MC_(record_leak_error
) ( ThreadId tid
,
571 UInt n_total_records
,
572 LossRecord
* lossRecord
,
576 Bool
MC_(record_fishy_value_error
) ( ThreadId tid
, const HChar
* function
,
577 const HChar
*argument_name
, SizeT value
);
579 /* Leak kinds tokens to call VG_(parse_enum_set). */
580 extern const HChar
* MC_(parse_leak_kinds_tokens
);
582 /* prints a description of address a in the specified debuginfo epoch */
583 void MC_(pp_describe_addr
) ( DiEpoch ep
, Addr a
);
585 /* Is this address in a user-specified "ignored range" ? */
586 Bool
MC_(in_ignored_range
) ( Addr a
);
588 /* Is this address in a user-specified "ignored range of offsets below
589 the current thread's stack pointer?" */
590 Bool
MC_(in_ignored_range_below_sp
) ( Addr sp
, Addr a
, UInt szB
);
593 /*------------------------------------------------------------*/
594 /*--- Client blocks ---*/
595 /*------------------------------------------------------------*/
597 /* Describes a client block. See mc_main.c. An unused block has
598 start == size == 0. */
608 /* Get access to the client block array. */
609 void MC_(get_ClientBlock_array
)( /*OUT*/CGenBlock
** blocks
,
610 /*OUT*/UWord
* nBlocks
);
613 /*------------------------------------------------------------*/
614 /*--- Command line options + defaults ---*/
615 /*------------------------------------------------------------*/
617 /* Allow loads from partially-valid addresses? default: YES */
618 extern Bool
MC_(clo_partial_loads_ok
);
620 /* Max volume of the freed blocks queue. */
621 extern Long
MC_(clo_freelist_vol
);
623 /* Blocks with a size >= MC_(clo_freelist_big_blocks) will be put
624 in the "big block" freed blocks queue. */
625 extern Long
MC_(clo_freelist_big_blocks
);
627 /* Do leak check at exit? default: NO */
628 extern LeakCheckMode
MC_(clo_leak_check
);
630 /* How closely should we compare ExeContexts in leak records? default: 2 */
631 extern VgRes
MC_(clo_leak_resolution
);
633 /* In leak check, show loss records if their R2S(reachedness) is set.
634 Default : R2S(Possible) | R2S(Unreached). */
635 extern UInt
MC_(clo_show_leak_kinds
);
637 /* In leak check, a loss record is an error if its R2S(reachedness) is set.
638 Default : R2S(Possible) | R2S(Unreached). */
639 extern UInt
MC_(clo_errors_for_leak_kinds
);
641 /* Various leak check heuristics which can be activated/deactivated. */
647 // Consider interior pointer pointing at the array of char in a
648 // std::string as reachable.
650 // Consider interior pointer pointing at offset 64bit of a block as
651 // reachable, when the first 8 bytes contains the block size - 8.
652 // Such length+interior pointers are used by e.g. sqlite3MemMalloc.
653 // On 64bit platforms LchNewArray will also match these blocks.
655 // Consider interior pointer pointing at second word of a new[] array as
656 // reachable. Such interior pointers are used for arrays whose elements
657 // have a destructor.
658 LchMultipleInheritance
=4,
659 // Conside interior pointer pointing just after what looks a vtable
664 // Nr of heuristics, including the LchNone heuristic.
665 #define N_LEAK_CHECK_HEURISTICS 5
667 // Build mask to check or set Heuristic h membership
668 #define H2S(h) (1 << (h))
669 // Heuristic h is member of the Set s ?
670 #define HiS(h,s) ((s) & H2S(h))
672 /* Heuristics set to use for the leak search.
673 Default : all heuristics. */
674 extern UInt
MC_(clo_leak_check_heuristics
);
676 /* Assume accesses immediately below %esp are due to gcc-2.96 bugs.
678 extern Bool
MC_(clo_workaround_gcc296_bugs
);
680 /* Fill malloc-d/free-d client blocks with a specific value? -1 if
681 not, else 0x00 .. 0xFF indicating the fill value to use. Can be
682 useful for causing programs with bad heap corruption to fail in
683 more repeatable ways. Note that malloc-filled and free-filled
684 areas are still undefined and noaccess respectively. This merely
685 causes them to contain the specified values. */
686 extern Int
MC_(clo_malloc_fill
);
687 extern Int
MC_(clo_free_fill
);
689 /* Which stack trace(s) to keep for malloc'd/free'd client blocks?
690 For each client block, the stack traces where it was allocated
691 and/or freed are optionally kept depending on MC_(clo_keep_stacktraces). */
693 enum { // keep alloc stack trace ? keep free stack trace ?
694 KS_none
, // never never
695 KS_alloc
, // always never
696 KS_free
, // never always
697 KS_alloc_then_free
, // when still malloc'd when free'd
698 KS_alloc_and_free
, // always always
701 extern KeepStacktraces
MC_(clo_keep_stacktraces
);
703 /* Indicates the level of instrumentation/checking done by Memcheck.
705 1 = No undefined value checking, Addrcheck-style behaviour only:
706 only address checking is done. This is faster but finds fewer
707 errors. Note that although Addrcheck had 1 bit per byte
708 overhead vs the old Memcheck's 9 bits per byte, with this mode
709 and compressed V bits, no memory is saved with this mode --
710 it's still 2 bits per byte overhead. This is a little wasteful
711 -- it could be done with 1 bit per byte -- but lets us reuse
712 the many shadow memory access functions. Note that in this
713 mode neither the secondary V bit table nor the origin-tag cache
716 2 = Address checking and Undefined value checking are performed,
717 but origins are not tracked. So the origin-tag cache is not
718 used in this mode. This setting is the default and corresponds
719 to the "normal" Memcheck behaviour that has shipped for years.
721 3 = Address checking, undefined value checking, and origins for
722 undefined values are tracked.
726 extern Int
MC_(clo_mc_level
);
728 /* Should we show mismatched frees? Default: YES */
729 extern Bool
MC_(clo_show_mismatched_frees
);
731 /* Should we warn about deprecated realloc() of size 0 ? Default : YES */
732 extern Bool
MC_(clo_show_realloc_size_zero
);
734 /* Indicates the level of detail for Vbit tracking through integer add,
735 subtract, and some integer comparison operations. */
738 EdcNO
= 1000, // All operations instrumented cheaply
739 EdcAUTO
, // Chosen dynamically by analysing the block
740 EdcYES
// All operations instrumented expensively
742 ExpensiveDefinednessChecks
;
744 /* Level of expense in definedness checking for add/sub and compare
745 operations. Default: EdcAUTO */
746 extern ExpensiveDefinednessChecks
MC_(clo_expensive_definedness_checks
);
748 /* Do we have a range of stack offsets to ignore? Default: NO */
749 extern Bool
MC_(clo_ignore_range_below_sp
);
750 extern UInt
MC_(clo_ignore_range_below_sp__first_offset
);
751 extern UInt
MC_(clo_ignore_range_below_sp__last_offset
);
754 /*------------------------------------------------------------*/
755 /*--- Instrumentation ---*/
756 /*------------------------------------------------------------*/
758 /* Functions defined in mc_main.c */
760 /* For the fail_w_o functions, the UWord arg is actually the 32-bit
761 origin tag and should really be UInt, but to be simple and safe
762 considering it's called from generated code, just claim it to be a
764 VG_REGPARM(2) void MC_(helperc_value_checkN_fail_w_o
) ( HWord
, UWord
);
765 VG_REGPARM(1) void MC_(helperc_value_check8_fail_w_o
) ( UWord
);
766 VG_REGPARM(1) void MC_(helperc_value_check4_fail_w_o
) ( UWord
);
767 VG_REGPARM(1) void MC_(helperc_value_check1_fail_w_o
) ( UWord
);
768 VG_REGPARM(1) void MC_(helperc_value_check0_fail_w_o
) ( UWord
);
770 /* And call these ones instead to report an uninitialised value error
771 but with no origin available. */
772 VG_REGPARM(1) void MC_(helperc_value_checkN_fail_no_o
) ( HWord
);
773 VG_REGPARM(0) void MC_(helperc_value_check8_fail_no_o
) ( void );
774 VG_REGPARM(0) void MC_(helperc_value_check4_fail_no_o
) ( void );
775 VG_REGPARM(0) void MC_(helperc_value_check1_fail_no_o
) ( void );
776 VG_REGPARM(0) void MC_(helperc_value_check0_fail_no_o
) ( void );
778 /* V-bits load/store helpers */
779 VG_REGPARM(1) void MC_(helperc_STOREV64be
) ( Addr
, ULong
);
780 VG_REGPARM(1) void MC_(helperc_STOREV64le
) ( Addr
, ULong
);
781 VG_REGPARM(2) void MC_(helperc_STOREV32be
) ( Addr
, UWord
);
782 VG_REGPARM(2) void MC_(helperc_STOREV32le
) ( Addr
, UWord
);
783 VG_REGPARM(2) void MC_(helperc_STOREV16be
) ( Addr
, UWord
);
784 VG_REGPARM(2) void MC_(helperc_STOREV16le
) ( Addr
, UWord
);
785 VG_REGPARM(2) void MC_(helperc_STOREV8
) ( Addr
, UWord
);
787 VG_REGPARM(2) void MC_(helperc_LOADV256be
) ( /*OUT*/V256
*, Addr
);
788 VG_REGPARM(2) void MC_(helperc_LOADV256le
) ( /*OUT*/V256
*, Addr
);
789 VG_REGPARM(2) void MC_(helperc_LOADV128be
) ( /*OUT*/V128
*, Addr
);
790 VG_REGPARM(2) void MC_(helperc_LOADV128le
) ( /*OUT*/V128
*, Addr
);
791 VG_REGPARM(1) ULong
MC_(helperc_LOADV64be
) ( Addr
);
792 VG_REGPARM(1) ULong
MC_(helperc_LOADV64le
) ( Addr
);
793 VG_REGPARM(1) UWord
MC_(helperc_LOADV32be
) ( Addr
);
794 VG_REGPARM(1) UWord
MC_(helperc_LOADV32le
) ( Addr
);
795 VG_REGPARM(1) UWord
MC_(helperc_LOADV16be
) ( Addr
);
796 VG_REGPARM(1) UWord
MC_(helperc_LOADV16le
) ( Addr
);
797 VG_REGPARM(1) UWord
MC_(helperc_LOADV8
) ( Addr
);
800 void MC_(helperc_MAKE_STACK_UNINIT_w_o
) ( Addr base
, UWord len
, Addr nia
);
803 void MC_(helperc_MAKE_STACK_UNINIT_no_o
) ( Addr base
, UWord len
);
806 void MC_(helperc_MAKE_STACK_UNINIT_128_no_o
) ( Addr base
);
808 /* Origin tag load/store helpers */
809 VG_REGPARM(2) void MC_(helperc_b_store1
) ( Addr a
, UWord d32
);
810 VG_REGPARM(2) void MC_(helperc_b_store2
) ( Addr a
, UWord d32
);
811 VG_REGPARM(2) void MC_(helperc_b_store4
) ( Addr a
, UWord d32
);
812 VG_REGPARM(2) void MC_(helperc_b_store8
) ( Addr a
, UWord d32
);
813 VG_REGPARM(2) void MC_(helperc_b_store16
)( Addr a
, UWord d32
);
814 VG_REGPARM(2) void MC_(helperc_b_store32
)( Addr a
, UWord d32
);
815 VG_REGPARM(1) UWord
MC_(helperc_b_load1
) ( Addr a
);
816 VG_REGPARM(1) UWord
MC_(helperc_b_load2
) ( Addr a
);
817 VG_REGPARM(1) UWord
MC_(helperc_b_load4
) ( Addr a
);
818 VG_REGPARM(1) UWord
MC_(helperc_b_load8
) ( Addr a
);
819 VG_REGPARM(1) UWord
MC_(helperc_b_load16
)( Addr a
);
820 VG_REGPARM(1) UWord
MC_(helperc_b_load32
)( Addr a
);
822 /* Functions defined in mc_translate.c */
823 IRSB
* MC_(instrument
) ( VgCallbackClosure
* closure
,
825 const VexGuestLayout
* layout
,
826 const VexGuestExtents
* vge
,
827 const VexArchInfo
* archinfo_host
,
828 IRType gWordTy
, IRType hWordTy
);
830 IRSB
* MC_(final_tidy
) ( IRSB
* );
832 /* Check some assertions to do with the instrumentation machinery. */
833 void MC_(do_instrumentation_startup_checks
)( void );
835 #endif /* ndef __MC_INCLUDE_H */
837 /*--------------------------------------------------------------------*/
839 /*--------------------------------------------------------------------*/