Don't call vg_assert inside sync_signalhandler_from_kernel
[valgrind.git] / memcheck / mc_include.h
blobacc595a745e9800128d877f824afd5a5eacec634
2 /*--------------------------------------------------------------------*/
3 /*--- A header file for all parts of the MemCheck tool. ---*/
4 /*--- mc_include.h ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #ifndef __MC_INCLUDE_H
31 #define __MC_INCLUDE_H
33 #define MC_(str) VGAPPEND(vgMemCheck_,str)
36 /* This is a private header file for use only within the
37 memcheck/ directory. */
39 /*------------------------------------------------------------*/
40 /*--- Tracking the heap ---*/
41 /*------------------------------------------------------------*/
43 /* By default, we want at least a 16B redzone on client heap blocks
44 for Memcheck.
45 The default can be modified by --redzone-size. */
46 #define MC_MALLOC_DEFAULT_REDZONE_SZB 16
47 // effective redzone, as (possibly) modified by --redzone-size:
48 extern SizeT MC_(Malloc_Redzone_SzB);
50 /* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
51 typedef
52 enum {
53 MC_AllocMalloc = 0,
54 MC_AllocNew = 1,
55 MC_AllocNewVec = 2,
56 MC_AllocCustom = 3
58 MC_AllocKind;
60 /* This describes a heap block. Nb: first two fields must match core's
61 * VgHashNode. */
62 typedef
63 struct _MC_Chunk {
64 struct _MC_Chunk* next;
65 Addr data; // Address of the actual block.
66 SizeT szB : (sizeof(SizeT)*8)-2; // Size requested; 30 or 62 bits.
67 MC_AllocKind allockind : 2; // Which operation did the allocation.
68 SizeT alignB; // Alignment (if requested) of the allocation
69 ExeContext* where[0];
70 /* Variable-length array. The size depends on MC_(clo_keep_stacktraces).
71 This array optionally stores the alloc and/or free stack trace. */
73 MC_Chunk;
75 /* Returns the execontext where the MC_Chunk was allocated/freed.
76 Returns VG_(null_ExeContext)() if the execontext has not been recorded (due
77 to MC_(clo_keep_stacktraces) and/or because block not yet freed). */
78 ExeContext* MC_(allocated_at) (MC_Chunk*);
79 ExeContext* MC_(freed_at) (MC_Chunk*);
81 /* Records and sets execontext according to MC_(clo_keep_stacktraces) */
82 void MC_(set_allocated_at) (ThreadId, MC_Chunk*);
83 void MC_(set_freed_at) (ThreadId, MC_Chunk*);
85 /* number of pointers needed according to MC_(clo_keep_stacktraces). */
86 UInt MC_(n_where_pointers) (void);
88 /* Memory pool. Nb: first two fields must match core's VgHashNode. */
89 typedef
90 struct _MC_Mempool {
91 struct _MC_Mempool* next;
92 Addr pool; // pool identifier
93 SizeT rzB; // pool red-zone size
94 Bool is_zeroed; // allocations from this pool are zeroed
95 Bool auto_free; // De-alloc block frees all chunks in block
96 Bool metapool; // These chunks are VALGRIND_MALLOC_LIKE
97 // memory, and used as pool.
98 VgHashTable *chunks; // chunks associated with this pool
100 MC_Mempool;
103 void* MC_(new_block) ( ThreadId tid,
104 Addr p, SizeT size, SizeT align,
105 SizeT orig_align,
106 Bool is_zeroed, MC_AllocKind kind,
107 VgHashTable *table);
108 void MC_(handle_free) ( ThreadId tid,
109 Addr p, UInt rzB, MC_AllocKind kind );
111 void MC_(create_mempool) ( Addr pool, UInt rzB, Bool is_zeroed,
112 Bool auto_free, Bool metapool );
113 void MC_(destroy_mempool) ( Addr pool );
114 void MC_(mempool_alloc) ( ThreadId tid, Addr pool,
115 Addr addr, SizeT size );
116 void MC_(mempool_free) ( Addr pool, Addr addr );
117 void MC_(mempool_trim) ( Addr pool, Addr addr, SizeT size );
118 void MC_(move_mempool) ( Addr poolA, Addr poolB );
119 void MC_(mempool_change) ( Addr pool, Addr addrA, Addr addrB, SizeT size );
120 Bool MC_(mempool_exists) ( Addr pool );
121 Bool MC_(is_mempool_block)( MC_Chunk* mc_search );
123 /* Searches for a recently freed block which might bracket Addr a.
124 Return the MC_Chunk* for this block or NULL if no bracketting block
125 is found. */
126 MC_Chunk* MC_(get_freed_block_bracketting)( Addr a );
128 /* For efficient pooled alloc/free of the MC_Chunk. */
129 extern PoolAlloc* MC_(chunk_poolalloc);
131 /* For tracking malloc'd blocks. Nb: it's quite important that it's a
132 VgHashTable, because VgHashTable allows duplicate keys without complaint.
133 This can occur if a user marks a malloc() block as also a custom block with
134 MALLOCLIKE_BLOCK. */
135 extern VgHashTable *MC_(malloc_list);
137 /* For tracking memory pools. */
138 extern VgHashTable *MC_(mempool_list);
140 /* Shadow memory functions */
141 Bool MC_(check_mem_is_noaccess)( Addr a, SizeT len, Addr* bad_addr );
142 void MC_(make_mem_noaccess) ( Addr a, SizeT len );
143 void MC_(make_mem_undefined_w_otag)( Addr a, SizeT len, UInt otag );
144 void MC_(make_mem_defined) ( Addr a, SizeT len );
145 void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len );
147 void MC_(xtmemory_report) ( const HChar* filename, Bool fini );
149 void MC_(print_malloc_stats) ( void );
150 /* nr of free operations done */
151 SizeT MC_(get_cmalloc_n_frees) ( void );
153 void* MC_(malloc) ( ThreadId tid, SizeT n );
154 void* MC_(__builtin_new) ( ThreadId tid, SizeT n );
155 void* MC_(__builtin_new_aligned)( ThreadId tid, SizeT n, SizeT alignB, SizeT orig_alignB );
156 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n );
157 void* MC_(__builtin_vec_new_aligned) ( ThreadId tid, SizeT n, SizeT alignB, SizeT orig_alignB );
158 void* MC_(memalign) ( ThreadId tid, SizeT align, SizeT orig_alignB, SizeT n);
159 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 );
160 void MC_(free) ( ThreadId tid, void* p );
161 void MC_(__builtin_delete) ( ThreadId tid, void* p );
162 void MC_(__builtin_delete_aligned) ( ThreadId tid, void* p, SizeT alignB );
163 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p );
164 void MC_(__builtin_vec_delete_aligned) ( ThreadId tid, void* p, SizeT alignB );
165 void* MC_(realloc) ( ThreadId tid, void* p, SizeT new_size );
166 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p );
168 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
169 SizeT oldSizeB, SizeT newSizeB, SizeT rzB);
172 /*------------------------------------------------------------*/
173 /*--- Origin tracking translate-time support ---*/
174 /*------------------------------------------------------------*/
176 /* See detailed comments in mc_machine.c. */
177 Int MC_(get_otrack_shadow_offset) ( Int offset, Int szB );
178 IRType MC_(get_otrack_reg_array_equiv_int_type) ( IRRegArray* arr );
180 /* Constants which are used as the lowest 2 bits in origin tags.
182 An origin tag comprises an upper 30-bit ECU field and a lower 2-bit
183 'kind' field. The ECU field is a number given out by m_execontext
184 and has a 1-1 mapping with ExeContext*s. An ECU can be used
185 directly as an origin tag (otag), but in fact we want to put
186 additional information 'kind' field to indicate roughly where the
187 tag came from. This helps print more understandable error messages
188 for the user -- it has no other purpose.
190 Hence the following 2-bit constants are needed for 'kind' field.
192 To summarise:
194 * Both ECUs and origin tags are represented as 32-bit words
196 * m_execontext and the core-tool interface deal purely in ECUs.
197 They have no knowledge of origin tags - that is a purely
198 Memcheck-internal matter.
200 * all valid ECUs have the lowest 2 bits zero and at least
201 one of the upper 30 bits nonzero (see VG_(is_plausible_ECU))
203 * to convert from an ECU to an otag, OR in one of the MC_OKIND_
204 constants below
206 * to convert an otag back to an ECU, AND it with ~3
209 #define MC_OKIND_UNKNOWN 0 /* unknown origin */
210 #define MC_OKIND_HEAP 1 /* this is a heap origin */
211 #define MC_OKIND_STACK 2 /* this is a stack origin */
212 #define MC_OKIND_USER 3 /* arises from user-supplied client req */
215 /*------------------------------------------------------------*/
216 /*--- Profiling of memory events ---*/
217 /*------------------------------------------------------------*/
219 /* Define to collect detailed performance info. */
220 /* #define MC_PROFILE_MEMORY */
221 #ifdef MC_PROFILE_MEMORY
223 /* Order of enumerators does not matter. But MCPE_LAST has to be the
224 last entry in the list as it is used as an array bound. */
225 enum {
226 MCPE_LOADV8,
227 MCPE_LOADV8_SLOW1,
228 MCPE_LOADV8_SLOW2,
229 MCPE_LOADV16,
230 MCPE_LOADV16_SLOW1,
231 MCPE_LOADV16_SLOW2,
232 MCPE_LOADV32,
233 MCPE_LOADV32_SLOW1,
234 MCPE_LOADV32_SLOW2,
235 MCPE_LOADV64,
236 MCPE_LOADV64_SLOW1,
237 MCPE_LOADV64_SLOW2,
238 MCPE_LOADV_128_OR_256,
239 MCPE_LOADV_128_OR_256_SLOW_LOOP,
240 MCPE_LOADV_128_OR_256_SLOW1,
241 MCPE_LOADV_128_OR_256_SLOW2,
242 MCPE_LOADVN_SLOW,
243 MCPE_LOADVN_SLOW_LOOP,
244 MCPE_STOREV8,
245 MCPE_STOREV8_SLOW1,
246 MCPE_STOREV8_SLOW2,
247 MCPE_STOREV8_SLOW3,
248 MCPE_STOREV8_SLOW4,
249 MCPE_STOREV16,
250 MCPE_STOREV16_SLOW1,
251 MCPE_STOREV16_SLOW2,
252 MCPE_STOREV16_SLOW3,
253 MCPE_STOREV16_SLOW4,
254 MCPE_STOREV32,
255 MCPE_STOREV32_SLOW1,
256 MCPE_STOREV32_SLOW2,
257 MCPE_STOREV32_SLOW3,
258 MCPE_STOREV32_SLOW4,
259 MCPE_STOREV64,
260 MCPE_STOREV64_SLOW1,
261 MCPE_STOREV64_SLOW2,
262 MCPE_STOREV64_SLOW3,
263 MCPE_STOREV64_SLOW4,
264 MCPE_STOREVN_SLOW,
265 MCPE_STOREVN_SLOW_LOOP,
266 MCPE_MAKE_ALIGNED_WORD32_UNDEFINED,
267 MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW,
268 MCPE_MAKE_ALIGNED_WORD64_UNDEFINED,
269 MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW,
270 MCPE_MAKE_ALIGNED_WORD32_NOACCESS,
271 MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW,
272 MCPE_MAKE_ALIGNED_WORD64_NOACCESS,
273 MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW,
274 MCPE_MAKE_MEM_NOACCESS,
275 MCPE_MAKE_MEM_UNDEFINED,
276 MCPE_MAKE_MEM_UNDEFINED_W_OTAG,
277 MCPE_MAKE_MEM_DEFINED,
278 MCPE_CHEAP_SANITY_CHECK,
279 MCPE_EXPENSIVE_SANITY_CHECK,
280 MCPE_COPY_ADDRESS_RANGE_STATE,
281 MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1,
282 MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2,
283 MCPE_CHECK_MEM_IS_NOACCESS,
284 MCPE_CHECK_MEM_IS_NOACCESS_LOOP,
285 MCPE_IS_MEM_ADDRESSABLE,
286 MCPE_IS_MEM_ADDRESSABLE_LOOP,
287 MCPE_IS_MEM_DEFINED,
288 MCPE_IS_MEM_DEFINED_LOOP,
289 MCPE_IS_MEM_DEFINED_COMPREHENSIVE,
290 MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP,
291 MCPE_IS_DEFINED_ASCIIZ,
292 MCPE_IS_DEFINED_ASCIIZ_LOOP,
293 MCPE_FIND_CHUNK_FOR_OLD,
294 MCPE_FIND_CHUNK_FOR_OLD_LOOP,
295 MCPE_SET_ADDRESS_RANGE_PERMS,
296 MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP,
297 MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP,
298 MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS,
299 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1,
300 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2,
301 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK,
302 MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK,
303 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A,
304 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B,
305 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C,
306 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A,
307 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B,
308 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K,
309 MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM,
310 MCPE_NEW_MEM_STACK,
311 MCPE_NEW_MEM_STACK_4,
312 MCPE_NEW_MEM_STACK_8,
313 MCPE_NEW_MEM_STACK_12,
314 MCPE_NEW_MEM_STACK_16,
315 MCPE_NEW_MEM_STACK_32,
316 MCPE_NEW_MEM_STACK_112,
317 MCPE_NEW_MEM_STACK_128,
318 MCPE_NEW_MEM_STACK_144,
319 MCPE_NEW_MEM_STACK_160,
320 MCPE_DIE_MEM_STACK,
321 MCPE_DIE_MEM_STACK_4,
322 MCPE_DIE_MEM_STACK_8,
323 MCPE_DIE_MEM_STACK_12,
324 MCPE_DIE_MEM_STACK_16,
325 MCPE_DIE_MEM_STACK_32,
326 MCPE_DIE_MEM_STACK_112,
327 MCPE_DIE_MEM_STACK_128,
328 MCPE_DIE_MEM_STACK_144,
329 MCPE_DIE_MEM_STACK_160,
330 MCPE_MAKE_STACK_UNINIT_W_O,
331 MCPE_MAKE_STACK_UNINIT_NO_O,
332 MCPE_MAKE_STACK_UNINIT_128_NO_O,
333 MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_16,
334 MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_8,
335 MCPE_MAKE_STACK_UNINIT_128_NO_O_SLOWCASE,
336 /* Do not add enumerators past this line. */
337 MCPE_LAST
340 extern ULong MC_(event_ctr)[MCPE_LAST];
342 # define PROF_EVENT(ev) \
343 do { tl_assert((ev) >= 0 && (ev) < MCPE_LAST); \
344 MC_(event_ctr)[ev]++; \
345 } while (False);
347 #else
349 # define PROF_EVENT(ev) /* */
351 #endif /* MC_PROFILE_MEMORY */
354 /*------------------------------------------------------------*/
355 /*--- V and A bits (Victoria & Albert ?) ---*/
356 /*------------------------------------------------------------*/
358 /* The number of entries in the primary map can be altered. However
359 we hardwire the assumption that each secondary map covers precisely
360 64k of address space. */
361 #define SM_SIZE 65536 /* DO NOT CHANGE */
362 #define SM_MASK (SM_SIZE-1) /* DO NOT CHANGE */
364 #define V_BIT_DEFINED 0
365 #define V_BIT_UNDEFINED 1
367 #define V_BITS8_DEFINED 0
368 #define V_BITS8_UNDEFINED 0xFF
370 #define V_BITS16_DEFINED 0
371 #define V_BITS16_UNDEFINED 0xFFFF
373 #define V_BITS32_DEFINED 0
374 #define V_BITS32_UNDEFINED 0xFFFFFFFF
376 #define V_BITS64_DEFINED 0ULL
377 #define V_BITS64_UNDEFINED 0xFFFFFFFFFFFFFFFFULL
379 /* Set to 1 to enable handwritten assembly helpers on targets for
380 which it is supported. */
381 #define ENABLE_ASSEMBLY_HELPERS 1
383 /* Comment the below to disable the fast case LOADV */
384 #define PERF_FAST_LOADV 1
386 /*------------------------------------------------------------*/
387 /*--- Leak checking ---*/
388 /*------------------------------------------------------------*/
390 typedef
391 enum {
392 // Nb: the order is important -- it dictates the order of loss records
393 // of equal sizes.
394 Reachable =0, // Definitely reachable from root-set.
395 Possible =1, // Possibly reachable from root-set; involves at
396 // least one interior-pointer along the way.
397 IndirectLeak =2, // Leaked, but reachable from another leaked block
398 // (be it Unreached or IndirectLeak).
399 Unreached =3 // Not reached, ie. leaked.
400 // (At best, only reachable from itself via a cycle.)
402 Reachedness;
404 // Build mask to check or set Reachedness r membership
405 #define R2S(r) (1 << (r))
406 // Reachedness r is member of the Set s ?
407 #define RiS(r,s) ((s) & R2S(r))
408 // Returns a set containing all Reachedness
409 UInt MC_(all_Reachedness)(void);
411 /* For VALGRIND_COUNT_LEAKS client request */
412 extern SizeT MC_(bytes_leaked);
413 extern SizeT MC_(bytes_indirect);
414 extern SizeT MC_(bytes_dubious);
415 extern SizeT MC_(bytes_reachable);
416 extern SizeT MC_(bytes_suppressed);
418 /* For VALGRIND_COUNT_LEAK_BLOCKS client request */
419 extern SizeT MC_(blocks_leaked);
420 extern SizeT MC_(blocks_indirect);
421 extern SizeT MC_(blocks_dubious);
422 extern SizeT MC_(blocks_reachable);
423 extern SizeT MC_(blocks_suppressed);
425 typedef
426 enum {
427 LC_Off,
428 LC_Summary,
429 LC_Full,
431 LeakCheckMode;
433 typedef
434 enum {
435 LCD_Any, // Output all loss records, whatever the delta.
436 LCD_Increased, // Output loss records with an increase in size or blocks.
437 LCD_Changed, // Output loss records with an increase or
438 // decrease in size or blocks.
439 LCD_New // Output new loss records.
441 LeakCheckDeltaMode;
443 /* When a LossRecord is put into an OSet, these elements represent the key. */
444 typedef
445 struct _LossRecordKey {
446 Reachedness state; // LC_Extra.state value shared by all blocks.
447 ExeContext* allocated_at; // Where they were allocated.
449 LossRecordKey;
451 /* A loss record, used for generating err msgs. Multiple leaked blocks can be
452 * merged into a single loss record if they have the same state and similar
453 * enough allocation points (controlled by --leak-resolution). */
454 typedef
455 struct _LossRecord {
456 LossRecordKey key; // Key, when used in an OSet.
457 SizeT szB; // Sum of all MC_Chunk.szB values.
458 SizeT indirect_szB; // Sum of all LC_Extra.indirect_szB values.
459 UInt num_blocks; // Number of blocks represented by the record.
460 UInt old_num_blocks; // output only the changed/new loss records
461 SizeT old_szB; // old_* values are the values found during the
462 SizeT old_indirect_szB; // previous leak search. old_* values are used to
464 LossRecord;
466 typedef
467 struct _LeakCheckParams {
468 LeakCheckMode mode;
469 UInt show_leak_kinds;
470 UInt errors_for_leak_kinds;
471 UInt heuristics;
472 LeakCheckDeltaMode deltamode;
473 UInt max_loss_records_output; // limit on the nr of loss records output.
474 Bool requested_by_monitor_command; // True when requested by gdb/vgdb.
475 const HChar* xt_filename; // if != NULL, produce an xtree leak file.
477 LeakCheckParams;
479 void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams * lcp);
481 // Each time a leak search is done, the leak search generation
482 // MC_(leak_search_gen) is incremented.
483 extern UInt MC_(leak_search_gen);
485 // maintains the lcp.deltamode given in the last call to detect_memory_leaks
486 extern LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
488 // prints the list of blocks corresponding to the given loss_record_nr slice
489 // (from/to) (up to maximum max_blocks)
490 // Returns True if loss_record_nr_from identifies a correct loss record
491 // from last leak search, returns False otherwise.
492 // Note that loss_record_nr_to can be bigger than the nr of loss records. All
493 // loss records after from will then be examined and maybe printed.
494 // If heuristics != 0, print only the loss records/blocks found via
495 // one of the heuristics in the set.
496 Bool MC_(print_block_list) ( UInt loss_record_nr_from, UInt loss_record_nr_to,
497 UInt max_blocks, UInt heuristics);
499 // Prints the addresses/registers/... at which a pointer to
500 // the given range [address, address+szB[ is found.
501 void MC_(who_points_at) ( Addr address, SizeT szB);
503 // if delta_mode == LCD_Any, prints in buf an empty string
504 // otherwise prints a delta in the layout " (+%'lu)" or " (-%'lu)"
505 extern HChar * MC_(snprintf_delta) (HChar * buf, Int size,
506 SizeT current_val, SizeT old_val,
507 LeakCheckDeltaMode delta_mode);
510 Bool MC_(is_valid_aligned_word) ( Addr a );
511 Bool MC_(is_within_valid_secondary) ( Addr a );
513 // Prints as user msg a description of the given loss record.
514 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
515 LossRecord* l);
518 /*------------------------------------------------------------*/
519 /*--- Errors and suppressions ---*/
520 /*------------------------------------------------------------*/
522 /* Did we show to the user, any errors for which an uninitialised
523 value origin could have been collected (but wasn't) ? If yes,
524 then, at the end of the run, print a 1 line message advising that a
525 rerun with --track-origins=yes might help. */
526 extern Bool MC_(any_value_errors);
528 /* Standard functions for error and suppressions as required by the
529 core/tool iface */
530 Bool MC_(eq_Error) ( VgRes res, const Error* e1, const Error* e2 );
531 void MC_(before_pp_Error) ( const Error* err );
532 void MC_(pp_Error) ( const Error* err );
533 UInt MC_(update_Error_extra) ( const Error* err );
535 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su );
537 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** buf,
538 SizeT* nBuf, Int* lineno, Supp *su );
540 Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su );
542 SizeT MC_(get_extra_suppression_info) ( const Error* err,
543 /*OUT*/HChar* buf, Int nBuf );
544 SizeT MC_(print_extra_suppression_use) ( const Supp* su,
545 /*OUT*/HChar* buf, Int nBuf );
546 void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su );
548 const HChar* MC_(get_error_name) ( const Error* err );
550 /* Recording of errors */
551 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
552 Bool isWrite );
553 void MC_(record_cond_error) ( ThreadId tid, UInt otag );
554 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag );
555 void MC_(record_jump_error) ( ThreadId tid, Addr a );
557 void MC_(record_free_error) ( ThreadId tid, Addr a );
558 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a );
559 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc );
560 void MC_(record_realloc_size_zero) ( ThreadId tid, Addr a );
561 void MC_(record_bad_alignment) ( ThreadId tid, SizeT align, SizeT size, const HChar *msg);
562 void MC_(record_bad_size) ( ThreadId tid, SizeT align, const HChar *function);
564 void MC_(record_overlap_error) ( ThreadId tid, const HChar* function,
565 Addr src, Addr dst, SizeT szB );
566 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg );
567 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag );
568 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
569 Bool isAddrErr, const HChar* msg, UInt otag );
570 void MC_(record_user_error) ( ThreadId tid, Addr a,
571 Bool isAddrErr, UInt otag );
573 Bool MC_(record_leak_error) ( ThreadId tid,
574 UInt n_this_record,
575 UInt n_total_records,
576 LossRecord* lossRecord,
577 Bool print_record,
578 Bool count_error );
580 Bool MC_(record_fishy_value_error) ( ThreadId tid, const HChar* function,
581 const HChar *argument_name, SizeT value );
582 void MC_(record_size_mismatch_error) ( ThreadId tid, MC_Chunk* mc, SizeT size, const HChar *function_names );
583 void MC_(record_align_mismatch_error) ( ThreadId tid, MC_Chunk* mc, SizeT align, Bool default_delete, const HChar *function_names );
586 /* Leak kinds tokens to call VG_(parse_enum_set). */
587 extern const HChar* MC_(parse_leak_kinds_tokens);
589 /* prints a description of address a in the specified debuginfo epoch */
590 void MC_(pp_describe_addr) ( DiEpoch ep, Addr a );
592 /* Is this address in a user-specified "ignored range" ? */
593 Bool MC_(in_ignored_range) ( Addr a );
595 /* Is this address in a user-specified "ignored range of offsets below
596 the current thread's stack pointer?" */
597 Bool MC_(in_ignored_range_below_sp) ( Addr sp, Addr a, UInt szB );
600 /*------------------------------------------------------------*/
601 /*--- Client blocks ---*/
602 /*------------------------------------------------------------*/
604 /* Describes a client block. See mc_main.c. An unused block has
605 start == size == 0. */
606 typedef
607 struct {
608 Addr start;
609 SizeT size;
610 ExeContext* where;
611 HChar* desc;
613 CGenBlock;
615 /* Get access to the client block array. */
616 void MC_(get_ClientBlock_array)( /*OUT*/CGenBlock** blocks,
617 /*OUT*/UWord* nBlocks );
620 /*------------------------------------------------------------*/
621 /*--- Command line options + defaults ---*/
622 /*------------------------------------------------------------*/
624 /* Allow loads from partially-valid addresses? default: YES */
625 extern Bool MC_(clo_partial_loads_ok);
627 /* Max volume of the freed blocks queue. */
628 extern Long MC_(clo_freelist_vol);
630 /* Blocks with a size >= MC_(clo_freelist_big_blocks) will be put
631 in the "big block" freed blocks queue. */
632 extern Long MC_(clo_freelist_big_blocks);
634 /* Do leak check at exit? default: NO */
635 extern LeakCheckMode MC_(clo_leak_check);
637 /* How closely should we compare ExeContexts in leak records? default: 2 */
638 extern VgRes MC_(clo_leak_resolution);
640 /* In leak check, show loss records if their R2S(reachedness) is set.
641 Default : R2S(Possible) | R2S(Unreached). */
642 extern UInt MC_(clo_show_leak_kinds);
644 /* In leak check, a loss record is an error if its R2S(reachedness) is set.
645 Default : R2S(Possible) | R2S(Unreached). */
646 extern UInt MC_(clo_errors_for_leak_kinds);
648 /* Various leak check heuristics which can be activated/deactivated. */
649 typedef
650 enum {
651 LchNone =0,
652 // no heuristic.
653 LchStdString =1,
654 // Consider interior pointer pointing at the array of char in a
655 // std::string as reachable.
656 LchLength64 =2,
657 // Consider interior pointer pointing at offset 64bit of a block as
658 // reachable, when the first 8 bytes contains the block size - 8.
659 // Such length+interior pointers are used by e.g. sqlite3MemMalloc.
660 // On 64bit platforms LchNewArray will also match these blocks.
661 LchNewArray =3,
662 // Consider interior pointer pointing at second word of a new[] array as
663 // reachable. Such interior pointers are used for arrays whose elements
664 // have a destructor.
665 LchMultipleInheritance =4,
666 // Conside interior pointer pointing just after what looks a vtable
667 // as reachable.
669 LeakCheckHeuristic;
671 // Nr of heuristics, including the LchNone heuristic.
672 #define N_LEAK_CHECK_HEURISTICS 5
674 // Build mask to check or set Heuristic h membership
675 #define H2S(h) (1 << (h))
676 // Heuristic h is member of the Set s ?
677 #define HiS(h,s) ((s) & H2S(h))
679 /* Heuristics set to use for the leak search.
680 Default : all heuristics. */
681 extern UInt MC_(clo_leak_check_heuristics);
683 /* Assume accesses immediately below %esp are due to gcc-2.96 bugs.
684 * default: NO */
685 extern Bool MC_(clo_workaround_gcc296_bugs);
687 /* Fill malloc-d/free-d client blocks with a specific value? -1 if
688 not, else 0x00 .. 0xFF indicating the fill value to use. Can be
689 useful for causing programs with bad heap corruption to fail in
690 more repeatable ways. Note that malloc-filled and free-filled
691 areas are still undefined and noaccess respectively. This merely
692 causes them to contain the specified values. */
693 extern Int MC_(clo_malloc_fill);
694 extern Int MC_(clo_free_fill);
696 /* Which stack trace(s) to keep for malloc'd/free'd client blocks?
697 For each client block, the stack traces where it was allocated
698 and/or freed are optionally kept depending on MC_(clo_keep_stacktraces). */
699 typedef
700 enum { // keep alloc stack trace ? keep free stack trace ?
701 KS_none, // never never
702 KS_alloc, // always never
703 KS_free, // never always
704 KS_alloc_then_free, // when still malloc'd when free'd
705 KS_alloc_and_free, // always always
707 KeepStacktraces;
708 extern KeepStacktraces MC_(clo_keep_stacktraces);
710 /* Indicates the level of instrumentation/checking done by Memcheck.
712 1 = No undefined value checking, Addrcheck-style behaviour only:
713 only address checking is done. This is faster but finds fewer
714 errors. Note that although Addrcheck had 1 bit per byte
715 overhead vs the old Memcheck's 9 bits per byte, with this mode
716 and compressed V bits, no memory is saved with this mode --
717 it's still 2 bits per byte overhead. This is a little wasteful
718 -- it could be done with 1 bit per byte -- but lets us reuse
719 the many shadow memory access functions. Note that in this
720 mode neither the secondary V bit table nor the origin-tag cache
721 are used.
723 2 = Address checking and Undefined value checking are performed,
724 but origins are not tracked. So the origin-tag cache is not
725 used in this mode. This setting is the default and corresponds
726 to the "normal" Memcheck behaviour that has shipped for years.
728 3 = Address checking, undefined value checking, and origins for
729 undefined values are tracked.
731 The default is 2.
733 extern Int MC_(clo_mc_level);
735 /* Should we show mismatched frees? Default: YES */
736 extern Bool MC_(clo_show_mismatched_frees);
738 /* Should we warn about deprecated realloc() of size 0 ? Default : YES */
739 extern Bool MC_(clo_show_realloc_size_zero);
741 /* Indicates the level of detail for Vbit tracking through integer add,
742 subtract, and some integer comparison operations. */
743 typedef
744 enum {
745 EdcNO = 1000, // All operations instrumented cheaply
746 EdcAUTO, // Chosen dynamically by analysing the block
747 EdcYES // All operations instrumented expensively
749 ExpensiveDefinednessChecks;
751 /* Level of expense in definedness checking for add/sub and compare
752 operations. Default: EdcAUTO */
753 extern ExpensiveDefinednessChecks MC_(clo_expensive_definedness_checks);
755 /* Do we have a range of stack offsets to ignore? Default: NO */
756 extern Bool MC_(clo_ignore_range_below_sp);
757 extern UInt MC_(clo_ignore_range_below_sp__first_offset);
758 extern UInt MC_(clo_ignore_range_below_sp__last_offset);
761 /*------------------------------------------------------------*/
762 /*--- Instrumentation ---*/
763 /*------------------------------------------------------------*/
765 /* Functions defined in mc_main.c */
767 /* For the fail_w_o functions, the UWord arg is actually the 32-bit
768 origin tag and should really be UInt, but to be simple and safe
769 considering it's called from generated code, just claim it to be a
770 UWord. */
771 VG_REGPARM(2) void MC_(helperc_value_checkN_fail_w_o) ( HWord, UWord );
772 VG_REGPARM(1) void MC_(helperc_value_check8_fail_w_o) ( UWord );
773 VG_REGPARM(1) void MC_(helperc_value_check4_fail_w_o) ( UWord );
774 VG_REGPARM(1) void MC_(helperc_value_check1_fail_w_o) ( UWord );
775 VG_REGPARM(1) void MC_(helperc_value_check0_fail_w_o) ( UWord );
777 /* And call these ones instead to report an uninitialised value error
778 but with no origin available. */
779 VG_REGPARM(1) void MC_(helperc_value_checkN_fail_no_o) ( HWord );
780 VG_REGPARM(0) void MC_(helperc_value_check8_fail_no_o) ( void );
781 VG_REGPARM(0) void MC_(helperc_value_check4_fail_no_o) ( void );
782 VG_REGPARM(0) void MC_(helperc_value_check1_fail_no_o) ( void );
783 VG_REGPARM(0) void MC_(helperc_value_check0_fail_no_o) ( void );
785 /* V-bits load/store helpers */
786 VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr, ULong );
787 VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr, ULong );
788 VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr, UWord );
789 VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr, UWord );
790 VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr, UWord );
791 VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr, UWord );
792 VG_REGPARM(2) void MC_(helperc_STOREV8) ( Addr, UWord );
794 VG_REGPARM(2) void MC_(helperc_LOADV256be) ( /*OUT*/V256*, Addr );
795 VG_REGPARM(2) void MC_(helperc_LOADV256le) ( /*OUT*/V256*, Addr );
796 VG_REGPARM(2) void MC_(helperc_LOADV128be) ( /*OUT*/V128*, Addr );
797 VG_REGPARM(2) void MC_(helperc_LOADV128le) ( /*OUT*/V128*, Addr );
798 VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr );
799 VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr );
800 VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr );
801 VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr );
802 VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr );
803 VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr );
804 VG_REGPARM(1) UWord MC_(helperc_LOADV8) ( Addr );
806 VG_REGPARM(3)
807 void MC_(helperc_MAKE_STACK_UNINIT_w_o) ( Addr base, UWord len, Addr nia );
809 VG_REGPARM(2)
810 void MC_(helperc_MAKE_STACK_UNINIT_no_o) ( Addr base, UWord len );
812 VG_REGPARM(1)
813 void MC_(helperc_MAKE_STACK_UNINIT_128_no_o) ( Addr base );
815 /* Origin tag load/store helpers */
816 VG_REGPARM(2) void MC_(helperc_b_store1) ( Addr a, UWord d32 );
817 VG_REGPARM(2) void MC_(helperc_b_store2) ( Addr a, UWord d32 );
818 VG_REGPARM(2) void MC_(helperc_b_store4) ( Addr a, UWord d32 );
819 VG_REGPARM(2) void MC_(helperc_b_store8) ( Addr a, UWord d32 );
820 VG_REGPARM(2) void MC_(helperc_b_store16)( Addr a, UWord d32 );
821 VG_REGPARM(2) void MC_(helperc_b_store32)( Addr a, UWord d32 );
822 VG_REGPARM(1) UWord MC_(helperc_b_load1) ( Addr a );
823 VG_REGPARM(1) UWord MC_(helperc_b_load2) ( Addr a );
824 VG_REGPARM(1) UWord MC_(helperc_b_load4) ( Addr a );
825 VG_REGPARM(1) UWord MC_(helperc_b_load8) ( Addr a );
826 VG_REGPARM(1) UWord MC_(helperc_b_load16)( Addr a );
827 VG_REGPARM(1) UWord MC_(helperc_b_load32)( Addr a );
829 /* Functions defined in mc_translate.c */
830 IRSB* MC_(instrument) ( VgCallbackClosure* closure,
831 IRSB* bb_in,
832 const VexGuestLayout* layout,
833 const VexGuestExtents* vge,
834 const VexArchInfo* archinfo_host,
835 IRType gWordTy, IRType hWordTy );
837 IRSB* MC_(final_tidy) ( IRSB* );
839 /* Check some assertions to do with the instrumentation machinery. */
840 void MC_(do_instrumentation_startup_checks)( void );
842 #endif /* ndef __MC_INCLUDE_H */
844 /*--------------------------------------------------------------------*/
845 /*--- end ---*/
846 /*--------------------------------------------------------------------*/