coverity: use some event tags and ensure some shift expression size promotion
[valgrind.git] / memcheck / mc_malloc_wrappers.c
blobf2fe1f74c6b300de697b33b7895370cbe38ecd41
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_execontext.h"
32 #include "pub_tool_poolalloc.h"
33 #include "pub_tool_hashtable.h"
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_libcproc.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_tooliface.h" // Needed for mc_include.h
43 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_xtree.h"
46 #include "pub_tool_xtmemory.h"
48 #include "mc_include.h"
50 /*------------------------------------------------------------*/
51 /*--- Defns ---*/
52 /*------------------------------------------------------------*/
54 /* Stats ... */
55 static SizeT cmalloc_n_mallocs = 0;
56 static SizeT cmalloc_n_frees = 0;
57 static ULong cmalloc_bs_mallocd = 0;
59 /* For debug printing to do with mempools: what stack trace
60 depth to show. */
61 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64 /*------------------------------------------------------------*/
65 /*--- Tracking malloc'd and free'd blocks ---*/
66 /*------------------------------------------------------------*/
68 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
70 /* Record malloc'd blocks. */
71 VgHashTable *MC_(malloc_list) = NULL;
73 /* Memory pools: a hash table of MC_Mempools. Search key is
74 MC_Mempool::pool. */
75 VgHashTable *MC_(mempool_list) = NULL;
77 /* Pool allocator for MC_Chunk. */
78 PoolAlloc *MC_(chunk_poolalloc) = NULL;
79 static
80 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
81 SizeT alignB,
82 MC_AllocKind kind);
83 static inline
84 void delete_MC_Chunk (MC_Chunk* mc);
86 /* Records blocks after freeing. */
87 /* Blocks freed by the client are queued in one of two lists of
88 freed blocks not yet physically freed:
89 "big blocks" freed list.
90 "small blocks" freed list
91 The blocks with a size >= MC_(clo_freelist_big_blocks)
92 are linked in the big blocks freed list.
93 This allows a client to allocate and free big blocks
94 (e.g. bigger than VG_(clo_freelist_vol)) without losing
95 immediately all protection against dangling pointers.
96 position [0] is for big blocks, [1] is for small blocks. */
97 static MC_Chunk* freed_list_start[2] = {NULL, NULL};
98 static MC_Chunk* freed_list_end[2] = {NULL, NULL};
100 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
101 some of the oldest blocks in the queue at the same time. */
102 static void add_to_freed_queue ( MC_Chunk* mc )
104 const Bool show = False;
105 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
107 /* Put it at the end of the freed list, unless the block
108 would be directly released any way : in this case, we
109 put it at the head of the freed list. */
110 if (freed_list_end[l] == NULL) {
111 tl_assert(freed_list_start[l] == NULL);
112 mc->next = NULL;
113 freed_list_end[l] = freed_list_start[l] = mc;
114 } else {
115 tl_assert(freed_list_end[l]->next == NULL);
116 if (mc->szB >= MC_(clo_freelist_vol)) {
117 mc->next = freed_list_start[l];
118 freed_list_start[l] = mc;
119 } else {
120 mc->next = NULL;
121 freed_list_end[l]->next = mc;
122 freed_list_end[l] = mc;
125 VG_(free_queue_volume) += (Long)mc->szB;
126 if (show)
127 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
128 VG_(free_queue_volume));
129 VG_(free_queue_length)++;
132 /* Release enough of the oldest blocks to bring the free queue
133 volume below vg_clo_freelist_vol.
134 Start with big block list first.
135 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
136 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
137 static void release_oldest_block(void)
139 const Bool show = False;
140 int i;
141 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
142 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
144 for (i = 0; i < 2; i++) {
145 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
146 && freed_list_start[i] != NULL) {
147 MC_Chunk* mc1;
149 tl_assert(freed_list_end[i] != NULL);
151 mc1 = freed_list_start[i];
152 VG_(free_queue_volume) -= (Long)mc1->szB;
153 VG_(free_queue_length)--;
154 if (show)
155 VG_(printf)("mc_freelist: discard: volume now %lld\n",
156 VG_(free_queue_volume));
157 tl_assert(VG_(free_queue_volume) >= 0);
159 if (freed_list_start[i] == freed_list_end[i]) {
160 freed_list_start[i] = freed_list_end[i] = NULL;
161 } else {
162 freed_list_start[i] = mc1->next;
164 mc1->next = NULL; /* just paranoia */
166 /* free MC_Chunk */
167 if (MC_AllocCustom != mc1->allockind)
168 VG_(cli_free) ( (void*)(mc1->data) );
169 delete_MC_Chunk ( mc1 );
174 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
176 int i;
177 for (i = 0; i < 2; i++) {
178 MC_Chunk* mc;
179 mc = freed_list_start[i];
180 while (mc) {
181 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
182 MC_(Malloc_Redzone_SzB) ))
183 return mc;
184 mc = mc->next;
187 return NULL;
190 // @todo PJF !!!!!!!!!!
191 // below alignB is the cleanup up value
192 // would prefer the original value !!!!!!!!!
194 /* Allocate a shadow chunk, put it on the appropriate list.
195 If needed, release oldest blocks from freed list. */
196 static
197 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
198 SizeT alignB,
199 MC_AllocKind kind)
201 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
202 mc->data = p;
203 mc->szB = szB;
204 mc->alignB = alignB;
205 mc->allockind = kind;
206 switch ( MC_(n_where_pointers)() ) {
207 case 2: mc->where[1] = 0; // fallthrough to 1
208 case 1: mc->where[0] = 0; // fallthrough to 0
209 case 0: break;
210 default: tl_assert(0);
212 MC_(set_allocated_at) (tid, mc);
214 /* Each time a new MC_Chunk is created, release oldest blocks
215 if the free list volume is exceeded. */
216 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
217 release_oldest_block();
219 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
220 the mc->data field isn't visible to the leak checker. If memory
221 management is working correctly, any pointer returned by VG_(malloc)
222 should be noaccess as far as the client is concerned. */
223 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
224 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
226 return mc;
229 static inline
230 void delete_MC_Chunk (MC_Chunk* mc)
232 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
235 // True if mc is in the given block list.
236 static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
238 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
239 if (found_mc) {
240 tl_assert (found_mc->data == mc->data);
241 /* If a user builds a pool from a malloc-ed superblock
242 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
243 an address at the beginning of this superblock, then
244 this address will be twice in the block_list.
245 We handle this case by checking size and allockind.
246 Note: I suspect that having the same block
247 twice in MC_(malloc_list) is a recipe for bugs.
248 We might maybe better create a "standard" mempool to
249 handle all this more cleanly. */
250 if (found_mc->szB != mc->szB
251 || found_mc->allockind != mc->allockind)
252 return False;
253 tl_assert (found_mc == mc);
254 return True;
255 } else
256 return False;
259 // True if mc is a live block (not yet freed).
260 static Bool live_block (MC_Chunk* mc)
262 if (mc->allockind == MC_AllocCustom) {
263 MC_Mempool* mp;
264 VG_(HT_ResetIter)(MC_(mempool_list));
265 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
266 if ( in_block_list (mp->chunks, mc) )
267 return True;
270 /* Note: we fallback here for a not found MC_AllocCustom
271 as such a block can be inserted in MC_(malloc_list)
272 by VALGRIND_MALLOCLIKE_BLOCK. */
273 return in_block_list ( MC_(malloc_list), mc );
276 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
278 switch (MC_(clo_keep_stacktraces)) {
279 case KS_none: return VG_(null_ExeContext) ();
280 case KS_alloc: return mc->where[0];
281 case KS_free: return VG_(null_ExeContext) ();
282 case KS_alloc_then_free: return (live_block(mc) ?
283 mc->where[0] : VG_(null_ExeContext) ());
284 case KS_alloc_and_free: return mc->where[0];
285 default: tl_assert (0);
289 ExeContext* MC_(freed_at) (MC_Chunk* mc)
291 switch (MC_(clo_keep_stacktraces)) {
292 case KS_none: return VG_(null_ExeContext) ();
293 case KS_alloc: return VG_(null_ExeContext) ();
294 case KS_free: return (mc->where[0] ?
295 mc->where[0] : VG_(null_ExeContext) ());
296 case KS_alloc_then_free: return (live_block(mc) ?
297 VG_(null_ExeContext) () : mc->where[0]);
298 case KS_alloc_and_free: return (mc->where[1] ?
299 mc->where[1] : VG_(null_ExeContext) ());
300 default: tl_assert (0);
304 void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
306 switch (MC_(clo_keep_stacktraces)) {
307 case KS_none: return;
308 case KS_alloc: break;
309 case KS_free: return;
310 case KS_alloc_then_free: break;
311 case KS_alloc_and_free: break;
312 default: tl_assert (0);
314 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
315 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
316 VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
319 void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
321 Int pos;
322 ExeContext* ec_free;
324 switch (MC_(clo_keep_stacktraces)) {
325 case KS_none: return;
326 case KS_alloc:
327 if (LIKELY(VG_(clo_xtree_memory)
328 != Vg_XTMemory_Full))
329 return;
330 pos = -1; break;
331 case KS_free: pos = 0; break;
332 case KS_alloc_then_free: pos = 0; break;
333 case KS_alloc_and_free: pos = 1; break;
334 default: tl_assert (0);
336 /* We need the execontext for the free operation, either to store
337 it in the mc chunk and/or for full xtree memory profiling.
338 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
339 mc_post_clo_init verifies the consistency of --xtree-memory and
340 --keep-stacktraces. */
341 ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
342 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
343 VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
344 if (LIKELY(pos >= 0))
345 mc->where[pos] = ec_free;
348 UInt MC_(n_where_pointers) (void)
350 switch (MC_(clo_keep_stacktraces)) {
351 case KS_none: return 0;
352 case KS_alloc:
353 case KS_free:
354 case KS_alloc_then_free: return 1;
355 case KS_alloc_and_free: return 2;
356 default: tl_assert (0);
360 /*------------------------------------------------------------*/
361 /*--- client_malloc(), etc ---*/
362 /*------------------------------------------------------------*/
364 /* Allocate memory and note change in memory available */
365 void* MC_(new_block) ( ThreadId tid,
366 Addr p, SizeT szB, SizeT alignB,
367 SizeT orig_alignB,
368 Bool is_zeroed, MC_AllocKind kind,
369 VgHashTable *table)
371 MC_Chunk* mc;
373 // Allocate and zero if necessary
374 if (p) {
375 tl_assert(MC_AllocCustom == kind);
376 } else {
377 tl_assert(MC_AllocCustom != kind);
378 p = (Addr)VG_(cli_malloc)( alignB, szB );
379 if (!p) {
380 return NULL;
382 if (is_zeroed) {
383 VG_(memset)((void*)p, 0, szB);
384 } else
385 if (MC_(clo_malloc_fill) != -1) {
386 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
387 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
391 // Only update stats if allocation succeeded.
392 cmalloc_n_mallocs ++;
393 cmalloc_bs_mallocd += (ULong)szB;
394 mc = create_MC_Chunk (tid, p, szB, orig_alignB, kind);
395 VG_(HT_add_node)( table, mc );
397 if (is_zeroed)
398 MC_(make_mem_defined)( p, szB );
399 else {
400 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
401 tl_assert(VG_(is_plausible_ECU)(ecu));
402 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
405 return (void*)p;
408 void* MC_(malloc) ( ThreadId tid, SizeT n )
410 if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
411 return NULL;
412 } else {
413 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment), 0U,
414 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
418 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
420 if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
421 return NULL;
422 } else {
423 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment), 0U,
424 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
428 void* MC_(__builtin_new_aligned) ( ThreadId tid, SizeT n, SizeT alignB, SizeT orig_alignB )
430 if (MC_(record_fishy_value_error)(tid, "__builtin_new_aligned", "size", n)) {
431 return NULL;
432 } else {
433 return MC_(new_block) ( tid, 0, n, alignB, orig_alignB,
434 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
438 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
440 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
441 return NULL;
442 } else {
443 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment), 0U,
444 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
448 void* MC_(__builtin_vec_new_aligned) ( ThreadId tid, SizeT n, SizeT alignB, SizeT orig_alignB )
450 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new_aligned", "size", n)) {
451 return NULL;
452 } else {
453 return MC_(new_block) ( tid, 0, n, alignB, orig_alignB,
454 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
458 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT orig_alignB, SizeT n)
460 if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
461 return NULL;
464 return MC_(new_block) ( tid, 0, n, alignB, orig_alignB,
465 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
468 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
470 if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
471 MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
472 return NULL;
473 } else {
474 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment), 0U,
475 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
479 static
480 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
482 /* Note: we do not free fill the custom allocs produced
483 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
484 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
485 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
486 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
489 /* Note: make redzones noaccess again -- just in case user made them
490 accessible with a client request... */
491 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
493 /* Record where freed */
494 MC_(set_freed_at) (tid, mc);
495 /* Put it out of harm's way for a while */
496 add_to_freed_queue ( mc );
497 /* If the free list volume is bigger than MC_(clo_freelist_vol),
498 we wait till the next block allocation to release blocks.
499 This increase the chance to discover dangling pointer usage,
500 even for big blocks being freed by the client. */
504 static
505 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
507 /* Only show such an error if the user hasn't disabled doing so. */
508 if (!MC_(clo_show_mismatched_frees))
509 return;
511 /* MC_(record_freemismatch_error) reports errors for still
512 allocated blocks but we are in the middle of freeing it. To
513 report the error correctly, we re-insert the chunk (making it
514 again a "clean allocated block", report the error, and then
515 re-remove the chunk. This avoids to do a VG_(HT_lookup)
516 followed by a VG_(HT_remove) in all "non-erroneous cases". */
517 VG_(HT_add_node)( MC_(malloc_list), mc );
518 MC_(record_freemismatch_error) ( tid, mc );
519 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
520 tl_assert(0);
523 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
525 MC_Chunk* mc;
527 cmalloc_n_frees++;
529 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
530 if (mc == NULL) {
531 MC_(record_free_error) ( tid, p );
532 } else {
533 /* check if it is a matching free() / delete / delete [] */
534 if (kind != mc->allockind) {
535 tl_assert(p == mc->data);
536 record_freemismatch_error ( tid, mc );
538 die_and_free_mem ( tid, mc, rzB );
542 void MC_(free) ( ThreadId tid, void* p )
544 MC_(handle_free)(
545 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
548 void MC_(__builtin_delete) ( ThreadId tid, void* p )
550 MC_(handle_free)(
551 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
555 void MC_(__builtin_delete_aligned) ( ThreadId tid, void* p, SizeT alignB )
557 MC_(handle_free)(
558 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
561 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
563 MC_(handle_free)(
564 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
567 void MC_(__builtin_vec_delete_aligned) ( ThreadId tid, void* p, SizeT alignB )
569 MC_(handle_free)(
570 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
574 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
576 MC_Chunk* old_mc;
577 MC_Chunk* new_mc;
578 Addr a_new;
579 SizeT old_szB;
581 if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
582 return NULL;
584 if (p_old == NULL) {
585 return MC_(new_block) ( tid, 0, new_szB, VG_(clo_alignment), 0U,
586 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
589 if (new_szB == 0U) {
590 if (MC_(clo_show_realloc_size_zero)) {
591 MC_(record_realloc_size_zero)(tid, (Addr)p_old);
594 if (VG_(clo_realloc_zero_bytes_frees) == True) {
595 MC_(handle_free)(
596 tid, (Addr)p_old, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
598 return NULL;
600 new_szB = 1U;
603 cmalloc_n_frees ++;
604 cmalloc_n_mallocs ++;
605 cmalloc_bs_mallocd += (ULong)new_szB;
607 /* Remove the old block */
608 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
609 if (old_mc == NULL) {
610 MC_(record_free_error) ( tid, (Addr)p_old );
611 /* We return to the program regardless. */
612 return NULL;
615 /* check if its a matching free() / delete / delete [] */
616 if (MC_AllocMalloc != old_mc->allockind) {
617 /* can not realloc a range that was allocated with new or new [] */
618 tl_assert((Addr)p_old == old_mc->data);
619 record_freemismatch_error ( tid, old_mc );
620 /* but keep going anyway */
623 old_szB = old_mc->szB;
625 /* Get new memory */
626 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
628 if (a_new) {
629 /* In all cases, even when the new size is smaller or unchanged, we
630 reallocate and copy the contents, and make the old block
631 inaccessible. This is so as to guarantee to catch all cases of
632 accesses via the old address after reallocation, regardless of
633 the change in size. (Of course the ability to detect accesses
634 to the old block also depends on the size of the freed blocks
635 queue). */
637 // Allocate a new chunk.
638 // Re-allocation does not conserve alignment.
639 new_mc = create_MC_Chunk( tid, a_new, new_szB, 0U, MC_AllocMalloc );
641 // Now insert the new mc (with a new 'data' field) into malloc_list.
642 VG_(HT_add_node)( MC_(malloc_list), new_mc );
644 /* Retained part is copied, red zones set as normal */
646 /* Redzone at the front */
647 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
648 MC_(Malloc_Redzone_SzB) );
650 /* payload */
651 if (old_szB >= new_szB) {
652 /* new size is smaller or the same */
654 /* Copy address range state and value from old to new */
655 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
656 VG_(memcpy)((void*)a_new, p_old, new_szB);
657 } else {
658 /* new size is bigger */
659 UInt ecu;
661 /* Copy address range state and value from old to new */
662 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
663 VG_(memcpy)((void*)a_new, p_old, old_szB);
665 // If the block has grown, we mark the grown area as undefined.
666 // We have to do that after VG_(HT_add_node) to ensure the ecu
667 // execontext is for a fully allocated block.
668 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
669 tl_assert(VG_(is_plausible_ECU)(ecu));
670 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
671 new_szB-old_szB,
672 ecu | MC_OKIND_HEAP );
674 /* Possibly fill new area with specified junk */
675 if (MC_(clo_malloc_fill) != -1) {
676 tl_assert(MC_(clo_malloc_fill) >= 0x00
677 && MC_(clo_malloc_fill) <= 0xFF);
678 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
679 new_szB-old_szB);
683 /* Redzone at the back. */
684 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
686 /* Possibly fill freed area with specified junk. */
687 if (MC_(clo_free_fill) != -1) {
688 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
689 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
692 /* Free old memory */
693 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
694 than recycling the old one, so that any erroneous accesses to the
695 old memory are reported. */
696 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
698 } else {
699 /* Could not allocate new client memory.
700 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
701 unconditionally removed at the beginning of the function. */
702 VG_(HT_add_node)( MC_(malloc_list), old_mc );
705 return (void*)a_new;
708 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
710 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
712 // There may be slop, but pretend there isn't because only the asked-for
713 // area will be marked as addressable.
714 return ( mc ? mc->szB : 0 );
717 /* This handles the in place resize of a block, as performed by the
718 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
719 and not used for, handling of the normal libc realloc()
720 function. */
721 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
722 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
724 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
725 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
726 /* Reject if: p is not found, or oldSizeB is wrong,
727 or new block would be empty. */
728 MC_(record_free_error) ( tid, p );
729 return;
732 if (oldSizeB == newSizeB)
733 return;
735 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
736 VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0]);
738 mc->szB = newSizeB;
739 if (newSizeB < oldSizeB) {
740 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
741 } else {
742 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
743 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
744 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
745 ecu | MC_OKIND_HEAP );
746 if (rzB > 0)
747 MC_(make_mem_noaccess)( p + newSizeB, rzB );
752 /*------------------------------------------------------------*/
753 /*--- Memory pool stuff. ---*/
754 /*------------------------------------------------------------*/
756 /* Set to 1 for intensive sanity checking. Is very expensive though
757 and should not be used in production scenarios. See #255966. */
758 #define MP_DETAILED_SANITY_CHECKS 0
760 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
762 static void free_mallocs_in_mempool_block (MC_Mempool* mp,
763 Addr StartAddr,
764 Addr EndAddr)
766 MC_Chunk *mc;
767 ThreadId tid;
769 tl_assert(mp->auto_free);
771 if (VG_(clo_verbosity) > 2) {
772 VG_(message)(Vg_UserMsg,
773 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
774 StartAddr, (SizeT) (EndAddr - StartAddr));
777 tid = VG_(get_running_tid)();
779 VG_(HT_ResetIter)(MC_(malloc_list));
780 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
781 if (mc->data >= StartAddr && mc->data + mc->szB <= EndAddr) {
782 if (VG_(clo_verbosity) > 2) {
783 VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
784 mc->data, (mc->szB + 0UL));
787 VG_(HT_remove_at_Iter)(MC_(malloc_list));
788 die_and_free_mem(tid, mc, mp->rzB);
793 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
794 Bool auto_free, Bool metapool)
796 MC_Mempool* mp;
798 if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
799 VG_(message)(Vg_UserMsg,
800 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
801 " autofree=%d, metapool=%d)\n",
802 pool, rzB, is_zeroed,
803 auto_free, metapool);
804 VG_(get_and_pp_StackTrace)
805 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
806 if (auto_free && !metapool)
807 VG_(tool_panic)("Inappropriate use of mempool:"
808 " an auto free pool must be a meta pool. Aborting\n");
811 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
812 if (mp != NULL) {
813 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
816 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
817 mp->pool = pool;
818 mp->rzB = rzB;
819 mp->is_zeroed = is_zeroed;
820 mp->auto_free = auto_free;
821 mp->metapool = metapool;
822 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
823 check_mempool_sane(mp);
825 /* Paranoia ... ensure this area is off-limits to the client, so
826 the mp->data field isn't visible to the leak checker. If memory
827 management is working correctly, anything pointer returned by
828 VG_(malloc) should be noaccess as far as the client is
829 concerned. */
830 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
831 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
834 VG_(HT_add_node)( MC_(mempool_list), mp );
837 void MC_(destroy_mempool)(Addr pool)
839 MC_Chunk* mc;
840 MC_Mempool* mp;
842 if (VG_(clo_verbosity) > 2) {
843 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
844 VG_(get_and_pp_StackTrace)
845 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
848 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
850 if (mp == NULL) {
851 ThreadId tid = VG_(get_running_tid)();
852 MC_(record_illegal_mempool_error) ( tid, pool );
853 return;
855 check_mempool_sane(mp);
857 // Clean up the chunks, one by one
858 VG_(HT_ResetIter)(mp->chunks);
859 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
860 /* Note: make redzones noaccess again -- just in case user made them
861 accessible with a client request... */
862 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
864 // Destroy the chunk table
865 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
867 VG_(free)(mp);
870 static Int
871 mp_compar(const void* n1, const void* n2)
873 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
874 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
875 if (mc1->data < mc2->data) return -1;
876 if (mc1->data > mc2->data) return 1;
877 return 0;
880 static void
881 check_mempool_sane(MC_Mempool* mp)
883 UInt n_chunks, i, bad = 0;
884 static UInt tick = 0;
886 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
887 if (!chunks)
888 return;
890 if (VG_(clo_verbosity) > 1) {
891 if (tick++ >= 10000)
893 UInt total_pools = 0, total_chunks = 0;
894 MC_Mempool* mp2;
896 VG_(HT_ResetIter)(MC_(mempool_list));
897 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
898 total_pools++;
899 VG_(HT_ResetIter)(mp2->chunks);
900 while (VG_(HT_Next)(mp2->chunks)) {
901 total_chunks++;
905 VG_(message)(Vg_UserMsg,
906 "Total mempools active: %u pools, %u chunks\n",
907 total_pools, total_chunks);
908 tick = 0;
913 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
915 /* Sanity check; assert that the blocks are now in order */
916 for (i = 0; i < n_chunks-1; i++) {
917 if (chunks[i]->data > chunks[i+1]->data) {
918 VG_(message)(Vg_UserMsg,
919 "Mempool chunk %u / %u is out of order "
920 "wrt. its successor\n",
921 i+1, n_chunks);
922 bad = 1;
926 /* Sanity check -- make sure they don't overlap */
927 for (i = 0; i < n_chunks-1; i++) {
928 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
929 VG_(message)(Vg_UserMsg,
930 "Mempool chunk %u / %u overlaps with its successor\n",
931 i+1, n_chunks);
932 bad = 1;
936 if (bad) {
937 VG_(message)(Vg_UserMsg,
938 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
939 n_chunks);
940 for (i = 0; i < n_chunks; ++i) {
941 VG_(message)(Vg_UserMsg,
942 "Mempool chunk %u / %u: %lu bytes "
943 "[%lx,%lx), allocated:\n",
944 i+1,
945 n_chunks,
946 chunks[i]->szB + 0UL,
947 chunks[i]->data,
948 chunks[i]->data + chunks[i]->szB);
950 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
953 VG_(free)(chunks);
956 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
958 MC_Mempool* mp;
960 if (VG_(clo_verbosity) > 2) {
961 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
962 pool, addr, szB);
963 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
966 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
967 if (mp == NULL) {
968 MC_(record_illegal_mempool_error) ( tid, pool );
969 } else {
970 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
971 MC_(new_block)(tid, addr, szB, /*ignored*/0U, 0U, mp->is_zeroed,
972 MC_AllocCustom, mp->chunks);
973 if (mp->rzB > 0) {
974 // This is not needed if the user application has properly
975 // marked the superblock noaccess when defining the mempool.
976 // We however still mark the redzones noaccess to still catch
977 // some bugs if user forgot.
978 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
979 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
981 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
985 void MC_(mempool_free)(Addr pool, Addr addr)
987 MC_Mempool* mp;
988 MC_Chunk* mc;
989 ThreadId tid = VG_(get_running_tid)();
991 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
992 if (mp == NULL) {
993 MC_(record_illegal_mempool_error)(tid, pool);
994 return;
997 if (VG_(clo_verbosity) > 2) {
998 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
999 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1002 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1003 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
1004 if (mc == NULL) {
1005 MC_(record_free_error)(tid, (Addr)addr);
1006 return;
1009 if (mp->auto_free) {
1010 free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
1013 if (VG_(clo_verbosity) > 2) {
1014 VG_(message)(Vg_UserMsg,
1015 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
1016 pool, addr, mc->szB + 0UL);
1019 die_and_free_mem ( tid, mc, mp->rzB );
1020 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1024 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
1026 MC_Mempool* mp;
1027 MC_Chunk* mc;
1028 ThreadId tid = VG_(get_running_tid)();
1029 UInt n_shadows, i;
1030 VgHashNode** chunks;
1032 if (VG_(clo_verbosity) > 2) {
1033 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
1034 pool, addr, szB);
1035 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1038 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1039 if (mp == NULL) {
1040 MC_(record_illegal_mempool_error)(tid, pool);
1041 return;
1044 check_mempool_sane(mp);
1045 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
1046 if (n_shadows == 0) {
1047 tl_assert(chunks == NULL);
1048 return;
1051 tl_assert(chunks != NULL);
1052 for (i = 0; i < n_shadows; ++i) {
1054 Addr lo, hi, min, max;
1056 mc = (MC_Chunk*) chunks[i];
1058 lo = mc->data;
1059 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
1061 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1063 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
1065 /* The current chunk is entirely within the trim extent: keep
1066 it. */
1068 continue;
1070 } else if ( (! EXTENT_CONTAINS(lo)) &&
1071 (! EXTENT_CONTAINS(hi)) ) {
1073 /* The current chunk is entirely outside the trim extent:
1074 delete it. */
1076 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1077 MC_(record_free_error)(tid, (Addr)mc->data);
1078 VG_(free)(chunks);
1079 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1080 return;
1082 die_and_free_mem ( tid, mc, mp->rzB );
1084 } else {
1086 /* The current chunk intersects the trim extent: remove,
1087 trim, and reinsert it. */
1089 tl_assert(EXTENT_CONTAINS(lo) ||
1090 EXTENT_CONTAINS(hi));
1091 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1092 MC_(record_free_error)(tid, (Addr)mc->data);
1093 VG_(free)(chunks);
1094 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1095 return;
1098 if (mc->data < addr) {
1099 min = mc->data;
1100 lo = addr;
1101 } else {
1102 min = addr;
1103 lo = mc->data;
1106 if (mc->data + szB > addr + szB) {
1107 max = mc->data + szB;
1108 hi = addr + szB;
1109 } else {
1110 max = addr + szB;
1111 hi = mc->data + szB;
1114 tl_assert(min <= lo);
1115 tl_assert(lo < hi);
1116 tl_assert(hi <= max);
1118 if (min < lo && !EXTENT_CONTAINS(min)) {
1119 MC_(make_mem_noaccess)( min, lo - min);
1122 if (hi < max && !EXTENT_CONTAINS(max)) {
1123 MC_(make_mem_noaccess)( hi, max - hi );
1126 mc->data = lo;
1127 mc->szB = (UInt) (hi - lo);
1128 VG_(HT_add_node)( mp->chunks, mc );
1131 #undef EXTENT_CONTAINS
1134 check_mempool_sane(mp);
1135 VG_(free)(chunks);
1138 void MC_(move_mempool)(Addr poolA, Addr poolB)
1140 MC_Mempool* mp;
1142 if (VG_(clo_verbosity) > 2) {
1143 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1144 VG_(get_and_pp_StackTrace)
1145 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1148 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1150 if (mp == NULL) {
1151 ThreadId tid = VG_(get_running_tid)();
1152 MC_(record_illegal_mempool_error) ( tid, poolA );
1153 return;
1156 mp->pool = poolB;
1157 VG_(HT_add_node)( MC_(mempool_list), mp );
1160 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1162 MC_Mempool* mp;
1163 MC_Chunk* mc;
1164 ThreadId tid = VG_(get_running_tid)();
1166 if (VG_(clo_verbosity) > 2) {
1167 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1168 pool, addrA, addrB, szB);
1169 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1172 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1173 if (mp == NULL) {
1174 MC_(record_illegal_mempool_error)(tid, pool);
1175 return;
1178 check_mempool_sane(mp);
1180 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1181 if (mc == NULL) {
1182 MC_(record_free_error)(tid, (Addr)addrA);
1183 return;
1186 mc->data = addrB;
1187 mc->szB = szB;
1188 VG_(HT_add_node)( mp->chunks, mc );
1190 check_mempool_sane(mp);
1193 Bool MC_(mempool_exists)(Addr pool)
1195 MC_Mempool* mp;
1197 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1198 if (mp == NULL) {
1199 return False;
1201 return True;
1204 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1206 MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
1207 if (mc) {
1208 xta->nbytes = mc->szB;
1209 xta->nblocks = 1;
1210 *ec_alloc = MC_(allocated_at)(mc);
1211 } else
1212 xta->nblocks = 0;
1215 void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
1217 // Make xtmemory_report_next_block ready to be called.
1218 VG_(HT_ResetIter)(MC_(malloc_list));
1220 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1221 VG_(XT_filter_1top_and_maybe_below_main));
1224 /*------------------------------------------------------------*/
1225 /*--- Statistics printing ---*/
1226 /*------------------------------------------------------------*/
1228 void MC_(print_malloc_stats) ( void )
1230 MC_Chunk* mc;
1231 SizeT nblocks = 0;
1232 ULong nbytes = 0;
1234 if (VG_(clo_verbosity) == 0)
1235 return;
1236 if (VG_(clo_xml))
1237 return;
1239 /* Count memory still in use. */
1240 VG_(HT_ResetIter)(MC_(malloc_list));
1241 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1242 nblocks++;
1243 nbytes += (ULong)mc->szB;
1246 VG_(umsg)(
1247 "HEAP SUMMARY:\n"
1248 " in use at exit: %'llu bytes in %'lu blocks\n"
1249 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1250 "\n",
1251 nbytes, nblocks,
1252 cmalloc_n_mallocs,
1253 cmalloc_n_frees, cmalloc_bs_mallocd
1257 SizeT MC_(get_cmalloc_n_frees) ( void )
1259 return cmalloc_n_frees;
1263 /*--------------------------------------------------------------------*/
1264 /*--- end ---*/
1265 /*--------------------------------------------------------------------*/