-> 3.17.0.RC1
[valgrind.git] / memcheck / mc_malloc_wrappers.c
blobd6775bd1d3815da9ce8d3eabc223fabd7068ea72
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_execontext.h"
32 #include "pub_tool_poolalloc.h"
33 #include "pub_tool_hashtable.h"
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_libcproc.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_tooliface.h" // Needed for mc_include.h
43 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_xtree.h"
46 #include "pub_tool_xtmemory.h"
48 #include "mc_include.h"
50 /*------------------------------------------------------------*/
51 /*--- Defns ---*/
52 /*------------------------------------------------------------*/
54 /* Stats ... */
55 static SizeT cmalloc_n_mallocs = 0;
56 static SizeT cmalloc_n_frees = 0;
57 static ULong cmalloc_bs_mallocd = 0;
59 /* For debug printing to do with mempools: what stack trace
60 depth to show. */
61 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64 /*------------------------------------------------------------*/
65 /*--- Tracking malloc'd and free'd blocks ---*/
66 /*------------------------------------------------------------*/
68 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
70 /* Record malloc'd blocks. */
71 VgHashTable *MC_(malloc_list) = NULL;
73 /* Memory pools: a hash table of MC_Mempools. Search key is
74 MC_Mempool::pool. */
75 VgHashTable *MC_(mempool_list) = NULL;
77 /* Pool allocator for MC_Chunk. */
78 PoolAlloc *MC_(chunk_poolalloc) = NULL;
79 static
80 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
81 MC_AllocKind kind);
82 static inline
83 void delete_MC_Chunk (MC_Chunk* mc);
85 /* Records blocks after freeing. */
86 /* Blocks freed by the client are queued in one of two lists of
87 freed blocks not yet physically freed:
88 "big blocks" freed list.
89 "small blocks" freed list
90 The blocks with a size >= MC_(clo_freelist_big_blocks)
91 are linked in the big blocks freed list.
92 This allows a client to allocate and free big blocks
93 (e.g. bigger than VG_(clo_freelist_vol)) without losing
94 immediately all protection against dangling pointers.
95 position [0] is for big blocks, [1] is for small blocks. */
96 static MC_Chunk* freed_list_start[2] = {NULL, NULL};
97 static MC_Chunk* freed_list_end[2] = {NULL, NULL};
99 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
100 some of the oldest blocks in the queue at the same time. */
101 static void add_to_freed_queue ( MC_Chunk* mc )
103 const Bool show = False;
104 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
106 /* Put it at the end of the freed list, unless the block
107 would be directly released any way : in this case, we
108 put it at the head of the freed list. */
109 if (freed_list_end[l] == NULL) {
110 tl_assert(freed_list_start[l] == NULL);
111 mc->next = NULL;
112 freed_list_end[l] = freed_list_start[l] = mc;
113 } else {
114 tl_assert(freed_list_end[l]->next == NULL);
115 if (mc->szB >= MC_(clo_freelist_vol)) {
116 mc->next = freed_list_start[l];
117 freed_list_start[l] = mc;
118 } else {
119 mc->next = NULL;
120 freed_list_end[l]->next = mc;
121 freed_list_end[l] = mc;
124 VG_(free_queue_volume) += (Long)mc->szB;
125 if (show)
126 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
127 VG_(free_queue_volume));
128 VG_(free_queue_length)++;
131 /* Release enough of the oldest blocks to bring the free queue
132 volume below vg_clo_freelist_vol.
133 Start with big block list first.
134 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
135 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
136 static void release_oldest_block(void)
138 const Bool show = False;
139 int i;
140 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
141 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
143 for (i = 0; i < 2; i++) {
144 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
145 && freed_list_start[i] != NULL) {
146 MC_Chunk* mc1;
148 tl_assert(freed_list_end[i] != NULL);
150 mc1 = freed_list_start[i];
151 VG_(free_queue_volume) -= (Long)mc1->szB;
152 VG_(free_queue_length)--;
153 if (show)
154 VG_(printf)("mc_freelist: discard: volume now %lld\n",
155 VG_(free_queue_volume));
156 tl_assert(VG_(free_queue_volume) >= 0);
158 if (freed_list_start[i] == freed_list_end[i]) {
159 freed_list_start[i] = freed_list_end[i] = NULL;
160 } else {
161 freed_list_start[i] = mc1->next;
163 mc1->next = NULL; /* just paranoia */
165 /* free MC_Chunk */
166 if (MC_AllocCustom != mc1->allockind)
167 VG_(cli_free) ( (void*)(mc1->data) );
168 delete_MC_Chunk ( mc1 );
173 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
175 int i;
176 for (i = 0; i < 2; i++) {
177 MC_Chunk* mc;
178 mc = freed_list_start[i];
179 while (mc) {
180 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
181 MC_(Malloc_Redzone_SzB) ))
182 return mc;
183 mc = mc->next;
186 return NULL;
189 /* Allocate a shadow chunk, put it on the appropriate list.
190 If needed, release oldest blocks from freed list. */
191 static
192 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
193 MC_AllocKind kind)
195 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
196 mc->data = p;
197 mc->szB = szB;
198 mc->allockind = kind;
199 switch ( MC_(n_where_pointers)() ) {
200 case 2: mc->where[1] = 0; // fallthrough to 1
201 case 1: mc->where[0] = 0; // fallthrough to 0
202 case 0: break;
203 default: tl_assert(0);
205 MC_(set_allocated_at) (tid, mc);
207 /* Each time a new MC_Chunk is created, release oldest blocks
208 if the free list volume is exceeded. */
209 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
210 release_oldest_block();
212 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
213 the mc->data field isn't visible to the leak checker. If memory
214 management is working correctly, any pointer returned by VG_(malloc)
215 should be noaccess as far as the client is concerned. */
216 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
217 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
219 return mc;
222 static inline
223 void delete_MC_Chunk (MC_Chunk* mc)
225 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
228 // True if mc is in the given block list.
229 static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
231 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
232 if (found_mc) {
233 tl_assert (found_mc->data == mc->data);
234 /* If a user builds a pool from a malloc-ed superblock
235 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
236 an address at the beginning of this superblock, then
237 this address will be twice in the block_list.
238 We handle this case by checking size and allockind.
239 Note: I suspect that having the same block
240 twice in MC_(malloc_list) is a recipe for bugs.
241 We might maybe better create a "standard" mempool to
242 handle all this more cleanly. */
243 if (found_mc->szB != mc->szB
244 || found_mc->allockind != mc->allockind)
245 return False;
246 tl_assert (found_mc == mc);
247 return True;
248 } else
249 return False;
252 // True if mc is a live block (not yet freed).
253 static Bool live_block (MC_Chunk* mc)
255 if (mc->allockind == MC_AllocCustom) {
256 MC_Mempool* mp;
257 VG_(HT_ResetIter)(MC_(mempool_list));
258 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
259 if ( in_block_list (mp->chunks, mc) )
260 return True;
263 /* Note: we fallback here for a not found MC_AllocCustom
264 as such a block can be inserted in MC_(malloc_list)
265 by VALGRIND_MALLOCLIKE_BLOCK. */
266 return in_block_list ( MC_(malloc_list), mc );
269 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
271 switch (MC_(clo_keep_stacktraces)) {
272 case KS_none: return VG_(null_ExeContext) ();
273 case KS_alloc: return mc->where[0];
274 case KS_free: return VG_(null_ExeContext) ();
275 case KS_alloc_then_free: return (live_block(mc) ?
276 mc->where[0] : VG_(null_ExeContext) ());
277 case KS_alloc_and_free: return mc->where[0];
278 default: tl_assert (0);
282 ExeContext* MC_(freed_at) (MC_Chunk* mc)
284 switch (MC_(clo_keep_stacktraces)) {
285 case KS_none: return VG_(null_ExeContext) ();
286 case KS_alloc: return VG_(null_ExeContext) ();
287 case KS_free: return (mc->where[0] ?
288 mc->where[0] : VG_(null_ExeContext) ());
289 case KS_alloc_then_free: return (live_block(mc) ?
290 VG_(null_ExeContext) () : mc->where[0]);
291 case KS_alloc_and_free: return (mc->where[1] ?
292 mc->where[1] : VG_(null_ExeContext) ());
293 default: tl_assert (0);
297 void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
299 switch (MC_(clo_keep_stacktraces)) {
300 case KS_none: return;
301 case KS_alloc: break;
302 case KS_free: return;
303 case KS_alloc_then_free: break;
304 case KS_alloc_and_free: break;
305 default: tl_assert (0);
307 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
308 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
309 VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
312 void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
314 Int pos;
315 ExeContext* ec_free;
317 switch (MC_(clo_keep_stacktraces)) {
318 case KS_none: return;
319 case KS_alloc:
320 if (LIKELY(VG_(clo_xtree_memory)
321 != Vg_XTMemory_Full))
322 return;
323 pos = -1; break;
324 case KS_free: pos = 0; break;
325 case KS_alloc_then_free: pos = 0; break;
326 case KS_alloc_and_free: pos = 1; break;
327 default: tl_assert (0);
329 /* We need the execontext for the free operation, either to store
330 it in the mc chunk and/or for full xtree memory profiling.
331 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
332 mc_post_clo_init verifies the consistency of --xtree-memory and
333 --keep-stacktraces. */
334 ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
335 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
336 VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
337 if (LIKELY(pos >= 0))
338 mc->where[pos] = ec_free;
341 UInt MC_(n_where_pointers) (void)
343 switch (MC_(clo_keep_stacktraces)) {
344 case KS_none: return 0;
345 case KS_alloc:
346 case KS_free:
347 case KS_alloc_then_free: return 1;
348 case KS_alloc_and_free: return 2;
349 default: tl_assert (0);
353 /*------------------------------------------------------------*/
354 /*--- client_malloc(), etc ---*/
355 /*------------------------------------------------------------*/
357 /* Allocate memory and note change in memory available */
358 void* MC_(new_block) ( ThreadId tid,
359 Addr p, SizeT szB, SizeT alignB,
360 Bool is_zeroed, MC_AllocKind kind,
361 VgHashTable *table)
363 MC_Chunk* mc;
365 // Allocate and zero if necessary
366 if (p) {
367 tl_assert(MC_AllocCustom == kind);
368 } else {
369 tl_assert(MC_AllocCustom != kind);
370 p = (Addr)VG_(cli_malloc)( alignB, szB );
371 if (!p) {
372 return NULL;
374 if (is_zeroed) {
375 VG_(memset)((void*)p, 0, szB);
376 } else
377 if (MC_(clo_malloc_fill) != -1) {
378 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
379 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
383 // Only update stats if allocation succeeded.
384 cmalloc_n_mallocs ++;
385 cmalloc_bs_mallocd += (ULong)szB;
386 mc = create_MC_Chunk (tid, p, szB, kind);
387 VG_(HT_add_node)( table, mc );
389 if (is_zeroed)
390 MC_(make_mem_defined)( p, szB );
391 else {
392 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
393 tl_assert(VG_(is_plausible_ECU)(ecu));
394 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
397 return (void*)p;
400 void* MC_(malloc) ( ThreadId tid, SizeT n )
402 if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
403 return NULL;
404 } else {
405 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
406 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
410 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
412 if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
413 return NULL;
414 } else {
415 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
416 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
420 void* MC_(__builtin_new_aligned) ( ThreadId tid, SizeT n, SizeT alignB )
422 if (MC_(record_fishy_value_error)(tid, "__builtin_new_aligned", "size", n)) {
423 return NULL;
424 } else {
425 return MC_(new_block) ( tid, 0, n, alignB,
426 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
430 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
432 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
433 return NULL;
434 } else {
435 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
436 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
440 void* MC_(__builtin_vec_new_aligned) ( ThreadId tid, SizeT n, SizeT alignB )
442 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new_aligned", "size", n)) {
443 return NULL;
444 } else {
445 return MC_(new_block) ( tid, 0, n, alignB,
446 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
450 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
452 if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
453 return NULL;
454 } else {
455 return MC_(new_block) ( tid, 0, n, alignB,
456 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
460 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
462 if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
463 MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
464 return NULL;
465 } else {
466 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
467 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
471 static
472 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
474 /* Note: we do not free fill the custom allocs produced
475 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
476 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
477 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
478 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
481 /* Note: make redzones noaccess again -- just in case user made them
482 accessible with a client request... */
483 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
485 /* Record where freed */
486 MC_(set_freed_at) (tid, mc);
487 /* Put it out of harm's way for a while */
488 add_to_freed_queue ( mc );
489 /* If the free list volume is bigger than MC_(clo_freelist_vol),
490 we wait till the next block allocation to release blocks.
491 This increase the chance to discover dangling pointer usage,
492 even for big blocks being freed by the client. */
496 static
497 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
499 /* Only show such an error if the user hasn't disabled doing so. */
500 if (!MC_(clo_show_mismatched_frees))
501 return;
503 /* MC_(record_freemismatch_error) reports errors for still
504 allocated blocks but we are in the middle of freeing it. To
505 report the error correctly, we re-insert the chunk (making it
506 again a "clean allocated block", report the error, and then
507 re-remove the chunk. This avoids to do a VG_(HT_lookup)
508 followed by a VG_(HT_remove) in all "non-erroneous cases". */
509 VG_(HT_add_node)( MC_(malloc_list), mc );
510 MC_(record_freemismatch_error) ( tid, mc );
511 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
512 tl_assert(0);
515 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
517 MC_Chunk* mc;
519 cmalloc_n_frees++;
521 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
522 if (mc == NULL) {
523 MC_(record_free_error) ( tid, p );
524 } else {
525 /* check if it is a matching free() / delete / delete [] */
526 if (kind != mc->allockind) {
527 tl_assert(p == mc->data);
528 record_freemismatch_error ( tid, mc );
530 die_and_free_mem ( tid, mc, rzB );
534 void MC_(free) ( ThreadId tid, void* p )
536 MC_(handle_free)(
537 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
540 void MC_(__builtin_delete) ( ThreadId tid, void* p )
542 MC_(handle_free)(
543 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
547 void MC_(__builtin_delete_aligned) ( ThreadId tid, void* p, SizeT alignB )
549 MC_(handle_free)(
550 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
553 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
555 MC_(handle_free)(
556 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
559 void MC_(__builtin_vec_delete_aligned) ( ThreadId tid, void* p, SizeT alignB )
561 MC_(handle_free)(
562 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
566 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
568 MC_Chunk* old_mc;
569 MC_Chunk* new_mc;
570 Addr a_new;
571 SizeT old_szB;
573 if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
574 return NULL;
576 cmalloc_n_frees ++;
577 cmalloc_n_mallocs ++;
578 cmalloc_bs_mallocd += (ULong)new_szB;
580 /* Remove the old block */
581 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
582 if (old_mc == NULL) {
583 MC_(record_free_error) ( tid, (Addr)p_old );
584 /* We return to the program regardless. */
585 return NULL;
588 /* check if its a matching free() / delete / delete [] */
589 if (MC_AllocMalloc != old_mc->allockind) {
590 /* can not realloc a range that was allocated with new or new [] */
591 tl_assert((Addr)p_old == old_mc->data);
592 record_freemismatch_error ( tid, old_mc );
593 /* but keep going anyway */
596 old_szB = old_mc->szB;
598 /* Get new memory */
599 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
601 if (a_new) {
602 /* In all cases, even when the new size is smaller or unchanged, we
603 reallocate and copy the contents, and make the old block
604 inaccessible. This is so as to guarantee to catch all cases of
605 accesses via the old address after reallocation, regardless of
606 the change in size. (Of course the ability to detect accesses
607 to the old block also depends on the size of the freed blocks
608 queue). */
610 // Allocate a new chunk.
611 new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
613 // Now insert the new mc (with a new 'data' field) into malloc_list.
614 VG_(HT_add_node)( MC_(malloc_list), new_mc );
616 /* Retained part is copied, red zones set as normal */
618 /* Redzone at the front */
619 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
620 MC_(Malloc_Redzone_SzB) );
622 /* payload */
623 if (old_szB >= new_szB) {
624 /* new size is smaller or the same */
626 /* Copy address range state and value from old to new */
627 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
628 VG_(memcpy)((void*)a_new, p_old, new_szB);
629 } else {
630 /* new size is bigger */
631 UInt ecu;
633 /* Copy address range state and value from old to new */
634 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
635 VG_(memcpy)((void*)a_new, p_old, old_szB);
637 // If the block has grown, we mark the grown area as undefined.
638 // We have to do that after VG_(HT_add_node) to ensure the ecu
639 // execontext is for a fully allocated block.
640 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
641 tl_assert(VG_(is_plausible_ECU)(ecu));
642 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
643 new_szB-old_szB,
644 ecu | MC_OKIND_HEAP );
646 /* Possibly fill new area with specified junk */
647 if (MC_(clo_malloc_fill) != -1) {
648 tl_assert(MC_(clo_malloc_fill) >= 0x00
649 && MC_(clo_malloc_fill) <= 0xFF);
650 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
651 new_szB-old_szB);
655 /* Redzone at the back. */
656 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
658 /* Possibly fill freed area with specified junk. */
659 if (MC_(clo_free_fill) != -1) {
660 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
661 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
664 /* Free old memory */
665 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
666 than recycling the old one, so that any erroneous accesses to the
667 old memory are reported. */
668 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
670 } else {
671 /* Could not allocate new client memory.
672 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
673 unconditionally removed at the beginning of the function. */
674 VG_(HT_add_node)( MC_(malloc_list), old_mc );
677 return (void*)a_new;
680 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
682 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
684 // There may be slop, but pretend there isn't because only the asked-for
685 // area will be marked as addressable.
686 return ( mc ? mc->szB : 0 );
689 /* This handles the in place resize of a block, as performed by the
690 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
691 and not used for, handling of the normal libc realloc()
692 function. */
693 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
694 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
696 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
697 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
698 /* Reject if: p is not found, or oldSizeB is wrong,
699 or new block would be empty. */
700 MC_(record_free_error) ( tid, p );
701 return;
704 if (oldSizeB == newSizeB)
705 return;
707 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
708 VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0]);
710 mc->szB = newSizeB;
711 if (newSizeB < oldSizeB) {
712 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
713 } else {
714 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
715 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
716 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
717 ecu | MC_OKIND_HEAP );
718 if (rzB > 0)
719 MC_(make_mem_noaccess)( p + newSizeB, rzB );
724 /*------------------------------------------------------------*/
725 /*--- Memory pool stuff. ---*/
726 /*------------------------------------------------------------*/
728 /* Set to 1 for intensive sanity checking. Is very expensive though
729 and should not be used in production scenarios. See #255966. */
730 #define MP_DETAILED_SANITY_CHECKS 0
732 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
734 static void free_mallocs_in_mempool_block (MC_Mempool* mp,
735 Addr StartAddr,
736 Addr EndAddr)
738 MC_Chunk *mc;
739 ThreadId tid;
741 tl_assert(mp->auto_free);
743 if (VG_(clo_verbosity) > 2) {
744 VG_(message)(Vg_UserMsg,
745 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
746 StartAddr, (SizeT) (EndAddr - StartAddr));
749 tid = VG_(get_running_tid)();
751 VG_(HT_ResetIter)(MC_(malloc_list));
752 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
753 if (mc->data >= StartAddr && mc->data + mc->szB <= EndAddr) {
754 if (VG_(clo_verbosity) > 2) {
755 VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
756 mc->data, (mc->szB + 0UL));
759 VG_(HT_remove_at_Iter)(MC_(malloc_list));
760 die_and_free_mem(tid, mc, mp->rzB);
765 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
766 Bool auto_free, Bool metapool)
768 MC_Mempool* mp;
770 if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
771 VG_(message)(Vg_UserMsg,
772 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
773 " autofree=%d, metapool=%d)\n",
774 pool, rzB, is_zeroed,
775 auto_free, metapool);
776 VG_(get_and_pp_StackTrace)
777 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
778 if (auto_free && !metapool)
779 VG_(tool_panic)("Inappropriate use of mempool:"
780 " an auto free pool must be a meta pool. Aborting\n");
783 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
784 if (mp != NULL) {
785 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
788 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
789 mp->pool = pool;
790 mp->rzB = rzB;
791 mp->is_zeroed = is_zeroed;
792 mp->auto_free = auto_free;
793 mp->metapool = metapool;
794 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
795 check_mempool_sane(mp);
797 /* Paranoia ... ensure this area is off-limits to the client, so
798 the mp->data field isn't visible to the leak checker. If memory
799 management is working correctly, anything pointer returned by
800 VG_(malloc) should be noaccess as far as the client is
801 concerned. */
802 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
803 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
806 VG_(HT_add_node)( MC_(mempool_list), mp );
809 void MC_(destroy_mempool)(Addr pool)
811 MC_Chunk* mc;
812 MC_Mempool* mp;
814 if (VG_(clo_verbosity) > 2) {
815 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
816 VG_(get_and_pp_StackTrace)
817 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
820 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
822 if (mp == NULL) {
823 ThreadId tid = VG_(get_running_tid)();
824 MC_(record_illegal_mempool_error) ( tid, pool );
825 return;
827 check_mempool_sane(mp);
829 // Clean up the chunks, one by one
830 VG_(HT_ResetIter)(mp->chunks);
831 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
832 /* Note: make redzones noaccess again -- just in case user made them
833 accessible with a client request... */
834 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
836 // Destroy the chunk table
837 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
839 VG_(free)(mp);
842 static Int
843 mp_compar(const void* n1, const void* n2)
845 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
846 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
847 if (mc1->data < mc2->data) return -1;
848 if (mc1->data > mc2->data) return 1;
849 return 0;
852 static void
853 check_mempool_sane(MC_Mempool* mp)
855 UInt n_chunks, i, bad = 0;
856 static UInt tick = 0;
858 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
859 if (!chunks)
860 return;
862 if (VG_(clo_verbosity) > 1) {
863 if (tick++ >= 10000)
865 UInt total_pools = 0, total_chunks = 0;
866 MC_Mempool* mp2;
868 VG_(HT_ResetIter)(MC_(mempool_list));
869 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
870 total_pools++;
871 VG_(HT_ResetIter)(mp2->chunks);
872 while (VG_(HT_Next)(mp2->chunks)) {
873 total_chunks++;
877 VG_(message)(Vg_UserMsg,
878 "Total mempools active: %u pools, %u chunks\n",
879 total_pools, total_chunks);
880 tick = 0;
885 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
887 /* Sanity check; assert that the blocks are now in order */
888 for (i = 0; i < n_chunks-1; i++) {
889 if (chunks[i]->data > chunks[i+1]->data) {
890 VG_(message)(Vg_UserMsg,
891 "Mempool chunk %u / %u is out of order "
892 "wrt. its successor\n",
893 i+1, n_chunks);
894 bad = 1;
898 /* Sanity check -- make sure they don't overlap */
899 for (i = 0; i < n_chunks-1; i++) {
900 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
901 VG_(message)(Vg_UserMsg,
902 "Mempool chunk %u / %u overlaps with its successor\n",
903 i+1, n_chunks);
904 bad = 1;
908 if (bad) {
909 VG_(message)(Vg_UserMsg,
910 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
911 n_chunks);
912 for (i = 0; i < n_chunks; ++i) {
913 VG_(message)(Vg_UserMsg,
914 "Mempool chunk %u / %u: %lu bytes "
915 "[%lx,%lx), allocated:\n",
916 i+1,
917 n_chunks,
918 chunks[i]->szB + 0UL,
919 chunks[i]->data,
920 chunks[i]->data + chunks[i]->szB);
922 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
925 VG_(free)(chunks);
928 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
930 MC_Mempool* mp;
932 if (VG_(clo_verbosity) > 2) {
933 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
934 pool, addr, szB);
935 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
938 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
939 if (mp == NULL) {
940 MC_(record_illegal_mempool_error) ( tid, pool );
941 } else {
942 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
943 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
944 MC_AllocCustom, mp->chunks);
945 if (mp->rzB > 0) {
946 // This is not needed if the user application has properly
947 // marked the superblock noaccess when defining the mempool.
948 // We however still mark the redzones noaccess to still catch
949 // some bugs if user forgot.
950 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
951 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
953 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
957 void MC_(mempool_free)(Addr pool, Addr addr)
959 MC_Mempool* mp;
960 MC_Chunk* mc;
961 ThreadId tid = VG_(get_running_tid)();
963 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
964 if (mp == NULL) {
965 MC_(record_illegal_mempool_error)(tid, pool);
966 return;
969 if (VG_(clo_verbosity) > 2) {
970 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
971 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
974 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
975 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
976 if (mc == NULL) {
977 MC_(record_free_error)(tid, (Addr)addr);
978 return;
981 if (mp->auto_free) {
982 free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
985 if (VG_(clo_verbosity) > 2) {
986 VG_(message)(Vg_UserMsg,
987 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
988 pool, addr, mc->szB + 0UL);
991 die_and_free_mem ( tid, mc, mp->rzB );
992 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
996 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
998 MC_Mempool* mp;
999 MC_Chunk* mc;
1000 ThreadId tid = VG_(get_running_tid)();
1001 UInt n_shadows, i;
1002 VgHashNode** chunks;
1004 if (VG_(clo_verbosity) > 2) {
1005 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
1006 pool, addr, szB);
1007 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1010 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1011 if (mp == NULL) {
1012 MC_(record_illegal_mempool_error)(tid, pool);
1013 return;
1016 check_mempool_sane(mp);
1017 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
1018 if (n_shadows == 0) {
1019 tl_assert(chunks == NULL);
1020 return;
1023 tl_assert(chunks != NULL);
1024 for (i = 0; i < n_shadows; ++i) {
1026 Addr lo, hi, min, max;
1028 mc = (MC_Chunk*) chunks[i];
1030 lo = mc->data;
1031 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
1033 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1035 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
1037 /* The current chunk is entirely within the trim extent: keep
1038 it. */
1040 continue;
1042 } else if ( (! EXTENT_CONTAINS(lo)) &&
1043 (! EXTENT_CONTAINS(hi)) ) {
1045 /* The current chunk is entirely outside the trim extent:
1046 delete it. */
1048 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1049 MC_(record_free_error)(tid, (Addr)mc->data);
1050 VG_(free)(chunks);
1051 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1052 return;
1054 die_and_free_mem ( tid, mc, mp->rzB );
1056 } else {
1058 /* The current chunk intersects the trim extent: remove,
1059 trim, and reinsert it. */
1061 tl_assert(EXTENT_CONTAINS(lo) ||
1062 EXTENT_CONTAINS(hi));
1063 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1064 MC_(record_free_error)(tid, (Addr)mc->data);
1065 VG_(free)(chunks);
1066 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1067 return;
1070 if (mc->data < addr) {
1071 min = mc->data;
1072 lo = addr;
1073 } else {
1074 min = addr;
1075 lo = mc->data;
1078 if (mc->data + szB > addr + szB) {
1079 max = mc->data + szB;
1080 hi = addr + szB;
1081 } else {
1082 max = addr + szB;
1083 hi = mc->data + szB;
1086 tl_assert(min <= lo);
1087 tl_assert(lo < hi);
1088 tl_assert(hi <= max);
1090 if (min < lo && !EXTENT_CONTAINS(min)) {
1091 MC_(make_mem_noaccess)( min, lo - min);
1094 if (hi < max && !EXTENT_CONTAINS(max)) {
1095 MC_(make_mem_noaccess)( hi, max - hi );
1098 mc->data = lo;
1099 mc->szB = (UInt) (hi - lo);
1100 VG_(HT_add_node)( mp->chunks, mc );
1103 #undef EXTENT_CONTAINS
1106 check_mempool_sane(mp);
1107 VG_(free)(chunks);
1110 void MC_(move_mempool)(Addr poolA, Addr poolB)
1112 MC_Mempool* mp;
1114 if (VG_(clo_verbosity) > 2) {
1115 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1116 VG_(get_and_pp_StackTrace)
1117 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1120 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1122 if (mp == NULL) {
1123 ThreadId tid = VG_(get_running_tid)();
1124 MC_(record_illegal_mempool_error) ( tid, poolA );
1125 return;
1128 mp->pool = poolB;
1129 VG_(HT_add_node)( MC_(mempool_list), mp );
1132 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1134 MC_Mempool* mp;
1135 MC_Chunk* mc;
1136 ThreadId tid = VG_(get_running_tid)();
1138 if (VG_(clo_verbosity) > 2) {
1139 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1140 pool, addrA, addrB, szB);
1141 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1144 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1145 if (mp == NULL) {
1146 MC_(record_illegal_mempool_error)(tid, pool);
1147 return;
1150 check_mempool_sane(mp);
1152 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1153 if (mc == NULL) {
1154 MC_(record_free_error)(tid, (Addr)addrA);
1155 return;
1158 mc->data = addrB;
1159 mc->szB = szB;
1160 VG_(HT_add_node)( mp->chunks, mc );
1162 check_mempool_sane(mp);
1165 Bool MC_(mempool_exists)(Addr pool)
1167 MC_Mempool* mp;
1169 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1170 if (mp == NULL) {
1171 return False;
1173 return True;
1176 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1178 MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
1179 if (mc) {
1180 xta->nbytes = mc->szB;
1181 xta->nblocks = 1;
1182 *ec_alloc = MC_(allocated_at)(mc);
1183 } else
1184 xta->nblocks = 0;
1187 void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
1189 // Make xtmemory_report_next_block ready to be called.
1190 VG_(HT_ResetIter)(MC_(malloc_list));
1192 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1193 VG_(XT_filter_1top_and_maybe_below_main));
1196 /*------------------------------------------------------------*/
1197 /*--- Statistics printing ---*/
1198 /*------------------------------------------------------------*/
1200 void MC_(print_malloc_stats) ( void )
1202 MC_Chunk* mc;
1203 SizeT nblocks = 0;
1204 ULong nbytes = 0;
1206 if (VG_(clo_verbosity) == 0)
1207 return;
1208 if (VG_(clo_xml))
1209 return;
1211 /* Count memory still in use. */
1212 VG_(HT_ResetIter)(MC_(malloc_list));
1213 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1214 nblocks++;
1215 nbytes += (ULong)mc->szB;
1218 VG_(umsg)(
1219 "HEAP SUMMARY:\n"
1220 " in use at exit: %'llu bytes in %'lu blocks\n"
1221 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1222 "\n",
1223 nbytes, nblocks,
1224 cmalloc_n_mallocs,
1225 cmalloc_n_frees, cmalloc_bs_mallocd
1229 SizeT MC_(get_cmalloc_n_frees) ( void )
1231 return cmalloc_n_frees;
1235 /*--------------------------------------------------------------------*/
1236 /*--- end ---*/
1237 /*--------------------------------------------------------------------*/