2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_execontext.h"
32 #include "pub_tool_poolalloc.h"
33 #include "pub_tool_hashtable.h"
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_libcproc.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_tooliface.h" // Needed for mc_include.h
43 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_xtree.h"
46 #include "pub_tool_xtmemory.h"
48 #include "mc_include.h"
50 /*------------------------------------------------------------*/
52 /*------------------------------------------------------------*/
55 static SizeT cmalloc_n_mallocs
= 0;
56 static SizeT cmalloc_n_frees
= 0;
57 static ULong cmalloc_bs_mallocd
= 0;
59 /* For debug printing to do with mempools: what stack trace
61 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64 /*------------------------------------------------------------*/
65 /*--- Tracking malloc'd and free'd blocks ---*/
66 /*------------------------------------------------------------*/
68 SizeT
MC_(Malloc_Redzone_SzB
) = -10000000; // If used before set, should BOMB
70 /* Record malloc'd blocks. */
71 VgHashTable
*MC_(malloc_list
) = NULL
;
73 /* Memory pools: a hash table of MC_Mempools. Search key is
75 VgHashTable
*MC_(mempool_list
) = NULL
;
77 /* Pool allocator for MC_Chunk. */
78 PoolAlloc
*MC_(chunk_poolalloc
) = NULL
;
80 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
83 void delete_MC_Chunk (MC_Chunk
* mc
);
85 /* Records blocks after freeing. */
86 /* Blocks freed by the client are queued in one of two lists of
87 freed blocks not yet physically freed:
88 "big blocks" freed list.
89 "small blocks" freed list
90 The blocks with a size >= MC_(clo_freelist_big_blocks)
91 are linked in the big blocks freed list.
92 This allows a client to allocate and free big blocks
93 (e.g. bigger than VG_(clo_freelist_vol)) without losing
94 immediately all protection against dangling pointers.
95 position [0] is for big blocks, [1] is for small blocks. */
96 static MC_Chunk
* freed_list_start
[2] = {NULL
, NULL
};
97 static MC_Chunk
* freed_list_end
[2] = {NULL
, NULL
};
99 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
100 some of the oldest blocks in the queue at the same time. */
101 static void add_to_freed_queue ( MC_Chunk
* mc
)
103 const Bool show
= False
;
104 const int l
= (mc
->szB
>= MC_(clo_freelist_big_blocks
) ? 0 : 1);
106 /* Put it at the end of the freed list, unless the block
107 would be directly released any way : in this case, we
108 put it at the head of the freed list. */
109 if (freed_list_end
[l
] == NULL
) {
110 tl_assert(freed_list_start
[l
] == NULL
);
112 freed_list_end
[l
] = freed_list_start
[l
] = mc
;
114 tl_assert(freed_list_end
[l
]->next
== NULL
);
115 if (mc
->szB
>= MC_(clo_freelist_vol
)) {
116 mc
->next
= freed_list_start
[l
];
117 freed_list_start
[l
] = mc
;
120 freed_list_end
[l
]->next
= mc
;
121 freed_list_end
[l
] = mc
;
124 VG_(free_queue_volume
) += (Long
)mc
->szB
;
126 VG_(printf
)("mc_freelist: acquire: volume now %lld\n",
127 VG_(free_queue_volume
));
128 VG_(free_queue_length
)++;
131 /* Release enough of the oldest blocks to bring the free queue
132 volume below vg_clo_freelist_vol.
133 Start with big block list first.
134 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
135 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
136 static void release_oldest_block(void)
138 const Bool show
= False
;
140 tl_assert (VG_(free_queue_volume
) > MC_(clo_freelist_vol
));
141 tl_assert (freed_list_start
[0] != NULL
|| freed_list_start
[1] != NULL
);
143 for (i
= 0; i
< 2; i
++) {
144 while (VG_(free_queue_volume
) > MC_(clo_freelist_vol
)
145 && freed_list_start
[i
] != NULL
) {
148 tl_assert(freed_list_end
[i
] != NULL
);
150 mc1
= freed_list_start
[i
];
151 VG_(free_queue_volume
) -= (Long
)mc1
->szB
;
152 VG_(free_queue_length
)--;
154 VG_(printf
)("mc_freelist: discard: volume now %lld\n",
155 VG_(free_queue_volume
));
156 tl_assert(VG_(free_queue_volume
) >= 0);
158 if (freed_list_start
[i
] == freed_list_end
[i
]) {
159 freed_list_start
[i
] = freed_list_end
[i
] = NULL
;
161 freed_list_start
[i
] = mc1
->next
;
163 mc1
->next
= NULL
; /* just paranoia */
166 if (MC_AllocCustom
!= mc1
->allockind
)
167 VG_(cli_free
) ( (void*)(mc1
->data
) );
168 delete_MC_Chunk ( mc1
);
173 MC_Chunk
* MC_(get_freed_block_bracketting
) (Addr a
)
176 for (i
= 0; i
< 2; i
++) {
178 mc
= freed_list_start
[i
];
180 if (VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
181 MC_(Malloc_Redzone_SzB
) ))
189 /* Allocate a shadow chunk, put it on the appropriate list.
190 If needed, release oldest blocks from freed list. */
192 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
195 MC_Chunk
* mc
= VG_(allocEltPA
)(MC_(chunk_poolalloc
));
198 mc
->allockind
= kind
;
199 switch ( MC_(n_where_pointers
)() ) {
200 case 2: mc
->where
[1] = 0; // fallthrough to 1
201 case 1: mc
->where
[0] = 0; // fallthrough to 0
203 default: tl_assert(0);
205 MC_(set_allocated_at
) (tid
, mc
);
207 /* Each time a new MC_Chunk is created, release oldest blocks
208 if the free list volume is exceeded. */
209 if (VG_(free_queue_volume
) > MC_(clo_freelist_vol
))
210 release_oldest_block();
212 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
213 the mc->data field isn't visible to the leak checker. If memory
214 management is working correctly, any pointer returned by VG_(malloc)
215 should be noaccess as far as the client is concerned. */
216 if (!MC_(check_mem_is_noaccess
)( (Addr
)mc
, sizeof(MC_Chunk
), NULL
)) {
217 VG_(tool_panic
)("create_MC_Chunk: shadow area is accessible");
223 void delete_MC_Chunk (MC_Chunk
* mc
)
225 VG_(freeEltPA
) (MC_(chunk_poolalloc
), mc
);
228 // True if mc is in the given block list.
229 static Bool
in_block_list (const VgHashTable
*block_list
, MC_Chunk
* mc
)
231 MC_Chunk
* found_mc
= VG_(HT_lookup
) ( block_list
, (UWord
)mc
->data
);
233 tl_assert (found_mc
->data
== mc
->data
);
234 /* If a user builds a pool from a malloc-ed superblock
235 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
236 an address at the beginning of this superblock, then
237 this address will be twice in the block_list.
238 We handle this case by checking size and allockind.
239 Note: I suspect that having the same block
240 twice in MC_(malloc_list) is a recipe for bugs.
241 We might maybe better create a "standard" mempool to
242 handle all this more cleanly. */
243 if (found_mc
->szB
!= mc
->szB
244 || found_mc
->allockind
!= mc
->allockind
)
246 tl_assert (found_mc
== mc
);
252 // True if mc is a live block (not yet freed).
253 static Bool
live_block (MC_Chunk
* mc
)
255 if (mc
->allockind
== MC_AllocCustom
) {
257 VG_(HT_ResetIter
)(MC_(mempool_list
));
258 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
259 if ( in_block_list (mp
->chunks
, mc
) )
263 /* Note: we fallback here for a not found MC_AllocCustom
264 as such a block can be inserted in MC_(malloc_list)
265 by VALGRIND_MALLOCLIKE_BLOCK. */
266 return in_block_list ( MC_(malloc_list
), mc
);
269 ExeContext
* MC_(allocated_at
) (MC_Chunk
* mc
)
271 switch (MC_(clo_keep_stacktraces
)) {
272 case KS_none
: return VG_(null_ExeContext
) ();
273 case KS_alloc
: return mc
->where
[0];
274 case KS_free
: return VG_(null_ExeContext
) ();
275 case KS_alloc_then_free
: return (live_block(mc
) ?
276 mc
->where
[0] : VG_(null_ExeContext
) ());
277 case KS_alloc_and_free
: return mc
->where
[0];
278 default: tl_assert (0);
282 ExeContext
* MC_(freed_at
) (MC_Chunk
* mc
)
284 switch (MC_(clo_keep_stacktraces
)) {
285 case KS_none
: return VG_(null_ExeContext
) ();
286 case KS_alloc
: return VG_(null_ExeContext
) ();
287 case KS_free
: return (mc
->where
[0] ?
288 mc
->where
[0] : VG_(null_ExeContext
) ());
289 case KS_alloc_then_free
: return (live_block(mc
) ?
290 VG_(null_ExeContext
) () : mc
->where
[0]);
291 case KS_alloc_and_free
: return (mc
->where
[1] ?
292 mc
->where
[1] : VG_(null_ExeContext
) ());
293 default: tl_assert (0);
297 void MC_(set_allocated_at
) (ThreadId tid
, MC_Chunk
* mc
)
299 switch (MC_(clo_keep_stacktraces
)) {
300 case KS_none
: return;
301 case KS_alloc
: break;
302 case KS_free
: return;
303 case KS_alloc_then_free
: break;
304 case KS_alloc_and_free
: break;
305 default: tl_assert (0);
307 mc
->where
[0] = VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
308 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
309 VG_(XTMemory_Full_alloc
)(mc
->szB
, mc
->where
[0]);
312 void MC_(set_freed_at
) (ThreadId tid
, MC_Chunk
* mc
)
317 switch (MC_(clo_keep_stacktraces
)) {
318 case KS_none
: return;
320 if (LIKELY(VG_(clo_xtree_memory
)
321 != Vg_XTMemory_Full
))
324 case KS_free
: pos
= 0; break;
325 case KS_alloc_then_free
: pos
= 0; break;
326 case KS_alloc_and_free
: pos
= 1; break;
327 default: tl_assert (0);
329 /* We need the execontext for the free operation, either to store
330 it in the mc chunk and/or for full xtree memory profiling.
331 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
332 mc_post_clo_init verifies the consistency of --xtree-memory and
333 --keep-stacktraces. */
334 ec_free
= VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
335 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
336 VG_(XTMemory_Full_free
)(mc
->szB
, mc
->where
[0], ec_free
);
337 if (LIKELY(pos
>= 0))
338 mc
->where
[pos
] = ec_free
;
341 UInt
MC_(n_where_pointers
) (void)
343 switch (MC_(clo_keep_stacktraces
)) {
344 case KS_none
: return 0;
347 case KS_alloc_then_free
: return 1;
348 case KS_alloc_and_free
: return 2;
349 default: tl_assert (0);
353 /*------------------------------------------------------------*/
354 /*--- client_malloc(), etc ---*/
355 /*------------------------------------------------------------*/
357 /* Allocate memory and note change in memory available */
358 void* MC_(new_block
) ( ThreadId tid
,
359 Addr p
, SizeT szB
, SizeT alignB
,
360 Bool is_zeroed
, MC_AllocKind kind
,
365 // Allocate and zero if necessary
367 tl_assert(MC_AllocCustom
== kind
);
369 tl_assert(MC_AllocCustom
!= kind
);
370 p
= (Addr
)VG_(cli_malloc
)( alignB
, szB
);
375 VG_(memset
)((void*)p
, 0, szB
);
377 if (MC_(clo_malloc_fill
) != -1) {
378 tl_assert(MC_(clo_malloc_fill
) >= 0x00 && MC_(clo_malloc_fill
) <= 0xFF);
379 VG_(memset
)((void*)p
, MC_(clo_malloc_fill
), szB
);
383 // Only update stats if allocation succeeded.
384 cmalloc_n_mallocs
++;
385 cmalloc_bs_mallocd
+= (ULong
)szB
;
386 mc
= create_MC_Chunk (tid
, p
, szB
, kind
);
387 VG_(HT_add_node
)( table
, mc
);
390 MC_(make_mem_defined
)( p
, szB
);
392 UInt ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(mc
));
393 tl_assert(VG_(is_plausible_ECU
)(ecu
));
394 MC_(make_mem_undefined_w_otag
)( p
, szB
, ecu
| MC_OKIND_HEAP
);
400 void* MC_(malloc
) ( ThreadId tid
, SizeT n
)
402 if (MC_(record_fishy_value_error
)(tid
, "malloc", "size", n
)) {
405 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
406 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
410 void* MC_(__builtin_new
) ( ThreadId tid
, SizeT n
)
412 if (MC_(record_fishy_value_error
)(tid
, "__builtin_new", "size", n
)) {
415 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
416 /*is_zeroed*/False
, MC_AllocNew
, MC_(malloc_list
));
420 void* MC_(__builtin_new_aligned
) ( ThreadId tid
, SizeT n
, SizeT alignB
)
422 if (MC_(record_fishy_value_error
)(tid
, "__builtin_new_aligned", "size", n
)) {
425 return MC_(new_block
) ( tid
, 0, n
, alignB
,
426 /*is_zeroed*/False
, MC_AllocNew
, MC_(malloc_list
));
430 void* MC_(__builtin_vec_new
) ( ThreadId tid
, SizeT n
)
432 if (MC_(record_fishy_value_error
)(tid
, "__builtin_vec_new", "size", n
)) {
435 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
436 /*is_zeroed*/False
, MC_AllocNewVec
, MC_(malloc_list
));
440 void* MC_(__builtin_vec_new_aligned
) ( ThreadId tid
, SizeT n
, SizeT alignB
)
442 if (MC_(record_fishy_value_error
)(tid
, "__builtin_vec_new_aligned", "size", n
)) {
445 return MC_(new_block
) ( tid
, 0, n
, alignB
,
446 /*is_zeroed*/False
, MC_AllocNewVec
, MC_(malloc_list
));
450 void* MC_(memalign
) ( ThreadId tid
, SizeT alignB
, SizeT n
)
452 if (MC_(record_fishy_value_error
)(tid
, "memalign", "size", n
)) {
455 return MC_(new_block
) ( tid
, 0, n
, alignB
,
456 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
460 void* MC_(calloc
) ( ThreadId tid
, SizeT nmemb
, SizeT size1
)
462 if (MC_(record_fishy_value_error
)(tid
, "calloc", "nmemb", nmemb
) ||
463 MC_(record_fishy_value_error
)(tid
, "calloc", "size", size1
)) {
466 return MC_(new_block
) ( tid
, 0, nmemb
*size1
, VG_(clo_alignment
),
467 /*is_zeroed*/True
, MC_AllocMalloc
, MC_(malloc_list
));
472 void die_and_free_mem ( ThreadId tid
, MC_Chunk
* mc
, SizeT rzB
)
474 /* Note: we do not free fill the custom allocs produced
475 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
476 if (MC_(clo_free_fill
) != -1 && MC_AllocCustom
!= mc
->allockind
) {
477 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
478 VG_(memset
)((void*)mc
->data
, MC_(clo_free_fill
), mc
->szB
);
481 /* Note: make redzones noaccess again -- just in case user made them
482 accessible with a client request... */
483 MC_(make_mem_noaccess
)( mc
->data
-rzB
, mc
->szB
+ 2*rzB
);
485 /* Record where freed */
486 MC_(set_freed_at
) (tid
, mc
);
487 /* Put it out of harm's way for a while */
488 add_to_freed_queue ( mc
);
489 /* If the free list volume is bigger than MC_(clo_freelist_vol),
490 we wait till the next block allocation to release blocks.
491 This increase the chance to discover dangling pointer usage,
492 even for big blocks being freed by the client. */
497 void record_freemismatch_error (ThreadId tid
, MC_Chunk
* mc
)
499 /* Only show such an error if the user hasn't disabled doing so. */
500 if (!MC_(clo_show_mismatched_frees
))
503 /* MC_(record_freemismatch_error) reports errors for still
504 allocated blocks but we are in the middle of freeing it. To
505 report the error correctly, we re-insert the chunk (making it
506 again a "clean allocated block", report the error, and then
507 re-remove the chunk. This avoids to do a VG_(HT_lookup)
508 followed by a VG_(HT_remove) in all "non-erroneous cases". */
509 VG_(HT_add_node
)( MC_(malloc_list
), mc
);
510 MC_(record_freemismatch_error
) ( tid
, mc
);
511 if ((mc
!= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)mc
->data
)))
515 void MC_(handle_free
) ( ThreadId tid
, Addr p
, UInt rzB
, MC_AllocKind kind
)
521 mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p
);
523 MC_(record_free_error
) ( tid
, p
);
525 /* check if it is a matching free() / delete / delete [] */
526 if (kind
!= mc
->allockind
) {
527 tl_assert(p
== mc
->data
);
528 record_freemismatch_error ( tid
, mc
);
530 die_and_free_mem ( tid
, mc
, rzB
);
534 void MC_(free
) ( ThreadId tid
, void* p
)
537 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocMalloc
);
540 void MC_(__builtin_delete
) ( ThreadId tid
, void* p
)
543 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNew
);
547 void MC_(__builtin_delete_aligned
) ( ThreadId tid
, void* p
, SizeT alignB
)
550 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNew
);
553 void MC_(__builtin_vec_delete
) ( ThreadId tid
, void* p
)
556 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNewVec
);
559 void MC_(__builtin_vec_delete_aligned
) ( ThreadId tid
, void* p
, SizeT alignB
)
562 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNewVec
);
566 void* MC_(realloc
) ( ThreadId tid
, void* p_old
, SizeT new_szB
)
573 if (MC_(record_fishy_value_error
)(tid
, "realloc", "size", new_szB
))
577 return MC_(new_block
) ( tid
, 0, new_szB
, VG_(clo_alignment
),
578 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
582 if (MC_(clo_show_realloc_size_zero
)) {
583 MC_(record_realloc_size_zero
)(tid
, (Addr
)p_old
);
586 if (VG_(clo_realloc_zero_bytes_frees
) == True
) {
588 tid
, (Addr
)p_old
, MC_(Malloc_Redzone_SzB
), MC_AllocMalloc
);
596 cmalloc_n_mallocs
++;
597 cmalloc_bs_mallocd
+= (ULong
)new_szB
;
599 /* Remove the old block */
600 old_mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p_old
);
601 if (old_mc
== NULL
) {
602 MC_(record_free_error
) ( tid
, (Addr
)p_old
);
603 /* We return to the program regardless. */
607 /* check if its a matching free() / delete / delete [] */
608 if (MC_AllocMalloc
!= old_mc
->allockind
) {
609 /* can not realloc a range that was allocated with new or new [] */
610 tl_assert((Addr
)p_old
== old_mc
->data
);
611 record_freemismatch_error ( tid
, old_mc
);
612 /* but keep going anyway */
615 old_szB
= old_mc
->szB
;
618 a_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_szB
);
621 /* In all cases, even when the new size is smaller or unchanged, we
622 reallocate and copy the contents, and make the old block
623 inaccessible. This is so as to guarantee to catch all cases of
624 accesses via the old address after reallocation, regardless of
625 the change in size. (Of course the ability to detect accesses
626 to the old block also depends on the size of the freed blocks
629 // Allocate a new chunk.
630 new_mc
= create_MC_Chunk( tid
, a_new
, new_szB
, MC_AllocMalloc
);
632 // Now insert the new mc (with a new 'data' field) into malloc_list.
633 VG_(HT_add_node
)( MC_(malloc_list
), new_mc
);
635 /* Retained part is copied, red zones set as normal */
637 /* Redzone at the front */
638 MC_(make_mem_noaccess
)( a_new
-MC_(Malloc_Redzone_SzB
),
639 MC_(Malloc_Redzone_SzB
) );
642 if (old_szB
>= new_szB
) {
643 /* new size is smaller or the same */
645 /* Copy address range state and value from old to new */
646 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, new_szB
);
647 VG_(memcpy
)((void*)a_new
, p_old
, new_szB
);
649 /* new size is bigger */
652 /* Copy address range state and value from old to new */
653 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, old_szB
);
654 VG_(memcpy
)((void*)a_new
, p_old
, old_szB
);
656 // If the block has grown, we mark the grown area as undefined.
657 // We have to do that after VG_(HT_add_node) to ensure the ecu
658 // execontext is for a fully allocated block.
659 ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(new_mc
));
660 tl_assert(VG_(is_plausible_ECU
)(ecu
));
661 MC_(make_mem_undefined_w_otag
)( a_new
+old_szB
,
663 ecu
| MC_OKIND_HEAP
);
665 /* Possibly fill new area with specified junk */
666 if (MC_(clo_malloc_fill
) != -1) {
667 tl_assert(MC_(clo_malloc_fill
) >= 0x00
668 && MC_(clo_malloc_fill
) <= 0xFF);
669 VG_(memset
)((void*)(a_new
+old_szB
), MC_(clo_malloc_fill
),
674 /* Redzone at the back. */
675 MC_(make_mem_noaccess
) ( a_new
+new_szB
, MC_(Malloc_Redzone_SzB
));
677 /* Possibly fill freed area with specified junk. */
678 if (MC_(clo_free_fill
) != -1) {
679 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
680 VG_(memset
)((void*)p_old
, MC_(clo_free_fill
), old_szB
);
683 /* Free old memory */
684 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
685 than recycling the old one, so that any erroneous accesses to the
686 old memory are reported. */
687 die_and_free_mem ( tid
, old_mc
, MC_(Malloc_Redzone_SzB
) );
690 /* Could not allocate new client memory.
691 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
692 unconditionally removed at the beginning of the function. */
693 VG_(HT_add_node
)( MC_(malloc_list
), old_mc
);
699 SizeT
MC_(malloc_usable_size
) ( ThreadId tid
, void* p
)
701 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
703 // There may be slop, but pretend there isn't because only the asked-for
704 // area will be marked as addressable.
705 return ( mc
? mc
->szB
: 0 );
708 /* This handles the in place resize of a block, as performed by the
709 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
710 and not used for, handling of the normal libc realloc()
712 void MC_(handle_resizeInPlace
)(ThreadId tid
, Addr p
,
713 SizeT oldSizeB
, SizeT newSizeB
, SizeT rzB
)
715 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
716 if (!mc
|| mc
->szB
!= oldSizeB
|| newSizeB
== 0) {
717 /* Reject if: p is not found, or oldSizeB is wrong,
718 or new block would be empty. */
719 MC_(record_free_error
) ( tid
, p
);
723 if (oldSizeB
== newSizeB
)
726 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
727 VG_(XTMemory_Full_resize_in_place
)(oldSizeB
, newSizeB
, mc
->where
[0]);
730 if (newSizeB
< oldSizeB
) {
731 MC_(make_mem_noaccess
)( p
+ newSizeB
, oldSizeB
- newSizeB
+ rzB
);
733 ExeContext
* ec
= VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
734 UInt ecu
= VG_(get_ECU_from_ExeContext
)(ec
);
735 MC_(make_mem_undefined_w_otag
)( p
+ oldSizeB
, newSizeB
- oldSizeB
,
736 ecu
| MC_OKIND_HEAP
);
738 MC_(make_mem_noaccess
)( p
+ newSizeB
, rzB
);
743 /*------------------------------------------------------------*/
744 /*--- Memory pool stuff. ---*/
745 /*------------------------------------------------------------*/
747 /* Set to 1 for intensive sanity checking. Is very expensive though
748 and should not be used in production scenarios. See #255966. */
749 #define MP_DETAILED_SANITY_CHECKS 0
751 static void check_mempool_sane(MC_Mempool
* mp
); /*forward*/
753 static void free_mallocs_in_mempool_block (MC_Mempool
* mp
,
760 tl_assert(mp
->auto_free
);
762 if (VG_(clo_verbosity
) > 2) {
763 VG_(message
)(Vg_UserMsg
,
764 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
765 StartAddr
, (SizeT
) (EndAddr
- StartAddr
));
768 tid
= VG_(get_running_tid
)();
770 VG_(HT_ResetIter
)(MC_(malloc_list
));
771 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
772 if (mc
->data
>= StartAddr
&& mc
->data
+ mc
->szB
<= EndAddr
) {
773 if (VG_(clo_verbosity
) > 2) {
774 VG_(message
)(Vg_UserMsg
, "Auto-free of 0x%lx size=%lu\n",
775 mc
->data
, (mc
->szB
+ 0UL));
778 VG_(HT_remove_at_Iter
)(MC_(malloc_list
));
779 die_and_free_mem(tid
, mc
, mp
->rzB
);
784 void MC_(create_mempool
)(Addr pool
, UInt rzB
, Bool is_zeroed
,
785 Bool auto_free
, Bool metapool
)
789 if (VG_(clo_verbosity
) > 2 || (auto_free
&& !metapool
)) {
790 VG_(message
)(Vg_UserMsg
,
791 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
792 " autofree=%d, metapool=%d)\n",
793 pool
, rzB
, is_zeroed
,
794 auto_free
, metapool
);
795 VG_(get_and_pp_StackTrace
)
796 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
797 if (auto_free
&& !metapool
)
798 VG_(tool_panic
)("Inappropriate use of mempool:"
799 " an auto free pool must be a meta pool. Aborting\n");
802 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
804 VG_(tool_panic
)("MC_(create_mempool): duplicate pool creation");
807 mp
= VG_(malloc
)("mc.cm.1", sizeof(MC_Mempool
));
810 mp
->is_zeroed
= is_zeroed
;
811 mp
->auto_free
= auto_free
;
812 mp
->metapool
= metapool
;
813 mp
->chunks
= VG_(HT_construct
)( "MC_(create_mempool)" );
814 check_mempool_sane(mp
);
816 /* Paranoia ... ensure this area is off-limits to the client, so
817 the mp->data field isn't visible to the leak checker. If memory
818 management is working correctly, anything pointer returned by
819 VG_(malloc) should be noaccess as far as the client is
821 if (!MC_(check_mem_is_noaccess
)( (Addr
)mp
, sizeof(MC_Mempool
), NULL
)) {
822 VG_(tool_panic
)("MC_(create_mempool): shadow area is accessible");
825 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
828 void MC_(destroy_mempool
)(Addr pool
)
833 if (VG_(clo_verbosity
) > 2) {
834 VG_(message
)(Vg_UserMsg
, "destroy_mempool(0x%lx)\n", pool
);
835 VG_(get_and_pp_StackTrace
)
836 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
839 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)pool
);
842 ThreadId tid
= VG_(get_running_tid
)();
843 MC_(record_illegal_mempool_error
) ( tid
, pool
);
846 check_mempool_sane(mp
);
848 // Clean up the chunks, one by one
849 VG_(HT_ResetIter
)(mp
->chunks
);
850 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
851 /* Note: make redzones noaccess again -- just in case user made them
852 accessible with a client request... */
853 MC_(make_mem_noaccess
)(mc
->data
-mp
->rzB
, mc
->szB
+ 2*mp
->rzB
);
855 // Destroy the chunk table
856 VG_(HT_destruct
)(mp
->chunks
, (void (*)(void *))delete_MC_Chunk
);
862 mp_compar(const void* n1
, const void* n2
)
864 const MC_Chunk
* mc1
= *(const MC_Chunk
*const *)n1
;
865 const MC_Chunk
* mc2
= *(const MC_Chunk
*const *)n2
;
866 if (mc1
->data
< mc2
->data
) return -1;
867 if (mc1
->data
> mc2
->data
) return 1;
872 check_mempool_sane(MC_Mempool
* mp
)
874 UInt n_chunks
, i
, bad
= 0;
875 static UInt tick
= 0;
877 MC_Chunk
**chunks
= (MC_Chunk
**) VG_(HT_to_array
)( mp
->chunks
, &n_chunks
);
881 if (VG_(clo_verbosity
) > 1) {
884 UInt total_pools
= 0, total_chunks
= 0;
887 VG_(HT_ResetIter
)(MC_(mempool_list
));
888 while ( (mp2
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
890 VG_(HT_ResetIter
)(mp2
->chunks
);
891 while (VG_(HT_Next
)(mp2
->chunks
)) {
896 VG_(message
)(Vg_UserMsg
,
897 "Total mempools active: %u pools, %u chunks\n",
898 total_pools
, total_chunks
);
904 VG_(ssort
)((void*)chunks
, n_chunks
, sizeof(VgHashNode
*), mp_compar
);
906 /* Sanity check; assert that the blocks are now in order */
907 for (i
= 0; i
< n_chunks
-1; i
++) {
908 if (chunks
[i
]->data
> chunks
[i
+1]->data
) {
909 VG_(message
)(Vg_UserMsg
,
910 "Mempool chunk %u / %u is out of order "
911 "wrt. its successor\n",
917 /* Sanity check -- make sure they don't overlap */
918 for (i
= 0; i
< n_chunks
-1; i
++) {
919 if (chunks
[i
]->data
+ chunks
[i
]->szB
> chunks
[i
+1]->data
) {
920 VG_(message
)(Vg_UserMsg
,
921 "Mempool chunk %u / %u overlaps with its successor\n",
928 VG_(message
)(Vg_UserMsg
,
929 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
931 for (i
= 0; i
< n_chunks
; ++i
) {
932 VG_(message
)(Vg_UserMsg
,
933 "Mempool chunk %u / %u: %lu bytes "
934 "[%lx,%lx), allocated:\n",
937 chunks
[i
]->szB
+ 0UL,
939 chunks
[i
]->data
+ chunks
[i
]->szB
);
941 VG_(pp_ExeContext
)(MC_(allocated_at
)(chunks
[i
]));
947 void MC_(mempool_alloc
)(ThreadId tid
, Addr pool
, Addr addr
, SizeT szB
)
951 if (VG_(clo_verbosity
) > 2) {
952 VG_(message
)(Vg_UserMsg
, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
954 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
957 mp
= VG_(HT_lookup
) ( MC_(mempool_list
), (UWord
)pool
);
959 MC_(record_illegal_mempool_error
) ( tid
, pool
);
961 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
962 MC_(new_block
)(tid
, addr
, szB
, /*ignored*/0, mp
->is_zeroed
,
963 MC_AllocCustom
, mp
->chunks
);
965 // This is not needed if the user application has properly
966 // marked the superblock noaccess when defining the mempool.
967 // We however still mark the redzones noaccess to still catch
968 // some bugs if user forgot.
969 MC_(make_mem_noaccess
) ( addr
- mp
->rzB
, mp
->rzB
);
970 MC_(make_mem_noaccess
) ( addr
+ szB
, mp
->rzB
);
972 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
976 void MC_(mempool_free
)(Addr pool
, Addr addr
)
980 ThreadId tid
= VG_(get_running_tid
)();
982 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
984 MC_(record_illegal_mempool_error
)(tid
, pool
);
988 if (VG_(clo_verbosity
) > 2) {
989 VG_(message
)(Vg_UserMsg
, "mempool_free(0x%lx, 0x%lx)\n", pool
, addr
);
990 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
993 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
994 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addr
);
996 MC_(record_free_error
)(tid
, (Addr
)addr
);
1000 if (mp
->auto_free
) {
1001 free_mallocs_in_mempool_block(mp
, mc
->data
, mc
->data
+ (mc
->szB
+ 0UL));
1004 if (VG_(clo_verbosity
) > 2) {
1005 VG_(message
)(Vg_UserMsg
,
1006 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
1007 pool
, addr
, mc
->szB
+ 0UL);
1010 die_and_free_mem ( tid
, mc
, mp
->rzB
);
1011 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1015 void MC_(mempool_trim
)(Addr pool
, Addr addr
, SizeT szB
)
1019 ThreadId tid
= VG_(get_running_tid
)();
1021 VgHashNode
** chunks
;
1023 if (VG_(clo_verbosity
) > 2) {
1024 VG_(message
)(Vg_UserMsg
, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
1026 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1029 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1031 MC_(record_illegal_mempool_error
)(tid
, pool
);
1035 check_mempool_sane(mp
);
1036 chunks
= VG_(HT_to_array
) ( mp
->chunks
, &n_shadows
);
1037 if (n_shadows
== 0) {
1038 tl_assert(chunks
== NULL
);
1042 tl_assert(chunks
!= NULL
);
1043 for (i
= 0; i
< n_shadows
; ++i
) {
1045 Addr lo
, hi
, min
, max
;
1047 mc
= (MC_Chunk
*) chunks
[i
];
1050 hi
= mc
->szB
== 0 ? mc
->data
: mc
->data
+ mc
->szB
- 1;
1052 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1054 if (EXTENT_CONTAINS(lo
) && EXTENT_CONTAINS(hi
)) {
1056 /* The current chunk is entirely within the trim extent: keep
1061 } else if ( (! EXTENT_CONTAINS(lo
)) &&
1062 (! EXTENT_CONTAINS(hi
)) ) {
1064 /* The current chunk is entirely outside the trim extent:
1067 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
1068 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
1070 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1073 die_and_free_mem ( tid
, mc
, mp
->rzB
);
1077 /* The current chunk intersects the trim extent: remove,
1078 trim, and reinsert it. */
1080 tl_assert(EXTENT_CONTAINS(lo
) ||
1081 EXTENT_CONTAINS(hi
));
1082 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
1083 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
1085 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1089 if (mc
->data
< addr
) {
1097 if (mc
->data
+ szB
> addr
+ szB
) {
1098 max
= mc
->data
+ szB
;
1102 hi
= mc
->data
+ szB
;
1105 tl_assert(min
<= lo
);
1107 tl_assert(hi
<= max
);
1109 if (min
< lo
&& !EXTENT_CONTAINS(min
)) {
1110 MC_(make_mem_noaccess
)( min
, lo
- min
);
1113 if (hi
< max
&& !EXTENT_CONTAINS(max
)) {
1114 MC_(make_mem_noaccess
)( hi
, max
- hi
);
1118 mc
->szB
= (UInt
) (hi
- lo
);
1119 VG_(HT_add_node
)( mp
->chunks
, mc
);
1122 #undef EXTENT_CONTAINS
1125 check_mempool_sane(mp
);
1129 void MC_(move_mempool
)(Addr poolA
, Addr poolB
)
1133 if (VG_(clo_verbosity
) > 2) {
1134 VG_(message
)(Vg_UserMsg
, "move_mempool(0x%lx, 0x%lx)\n", poolA
, poolB
);
1135 VG_(get_and_pp_StackTrace
)
1136 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1139 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)poolA
);
1142 ThreadId tid
= VG_(get_running_tid
)();
1143 MC_(record_illegal_mempool_error
) ( tid
, poolA
);
1148 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
1151 void MC_(mempool_change
)(Addr pool
, Addr addrA
, Addr addrB
, SizeT szB
)
1155 ThreadId tid
= VG_(get_running_tid
)();
1157 if (VG_(clo_verbosity
) > 2) {
1158 VG_(message
)(Vg_UserMsg
, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1159 pool
, addrA
, addrB
, szB
);
1160 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1163 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1165 MC_(record_illegal_mempool_error
)(tid
, pool
);
1169 check_mempool_sane(mp
);
1171 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addrA
);
1173 MC_(record_free_error
)(tid
, (Addr
)addrA
);
1179 VG_(HT_add_node
)( mp
->chunks
, mc
);
1181 check_mempool_sane(mp
);
1184 Bool
MC_(mempool_exists
)(Addr pool
)
1188 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1195 static void xtmemory_report_next_block(XT_Allocs
* xta
, ExeContext
** ec_alloc
)
1197 MC_Chunk
* mc
= VG_(HT_Next
)(MC_(malloc_list
));
1199 xta
->nbytes
= mc
->szB
;
1201 *ec_alloc
= MC_(allocated_at
)(mc
);
1206 void MC_(xtmemory_report
) ( const HChar
* filename
, Bool fini
)
1208 // Make xtmemory_report_next_block ready to be called.
1209 VG_(HT_ResetIter
)(MC_(malloc_list
));
1211 VG_(XTMemory_report
)(filename
, fini
, xtmemory_report_next_block
,
1212 VG_(XT_filter_1top_and_maybe_below_main
));
1215 /*------------------------------------------------------------*/
1216 /*--- Statistics printing ---*/
1217 /*------------------------------------------------------------*/
1219 void MC_(print_malloc_stats
) ( void )
1225 if (VG_(clo_verbosity
) == 0)
1230 /* Count memory still in use. */
1231 VG_(HT_ResetIter
)(MC_(malloc_list
));
1232 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
1234 nbytes
+= (ULong
)mc
->szB
;
1239 " in use at exit: %'llu bytes in %'lu blocks\n"
1240 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1244 cmalloc_n_frees
, cmalloc_bs_mallocd
1248 SizeT
MC_(get_cmalloc_n_frees
) ( void )
1250 return cmalloc_n_frees
;
1254 /*--------------------------------------------------------------------*/
1256 /*--------------------------------------------------------------------*/