Add x32 C++ type data
[glibc.git] / malloc / hooks.c
blob8a34c78488e52dff8106483a389089afa8c11976
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2009, 2011, 2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
27 static void*
28 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
30 __malloc_hook = NULL;
31 ptmalloc_init();
32 return __libc_malloc(sz);
35 static void*
36 realloc_hook_ini(void* ptr, size_t sz, const __malloc_ptr_t caller)
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init();
41 return __libc_realloc(ptr, sz);
44 static void*
45 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
47 __memalign_hook = NULL;
48 ptmalloc_init();
49 return __libc_memalign(alignment, sz);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking;
55 /* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68 static int disallow_malloc_check;
70 /* Activate a standard set of debugging hooks. */
71 void
72 __malloc_check_init()
74 if (disallow_malloc_check) {
75 disallow_malloc_check = 0;
76 return;
78 using_malloc_checking = 1;
79 __malloc_hook = malloc_check;
80 __free_hook = free_check;
81 __realloc_hook = realloc_check;
82 __memalign_hook = memalign_check;
85 /* A simple, standard set of debugging hooks. Overhead is `only' one
86 byte per chunk; still this will catch most cases of double frees or
87 overruns. The goal here is to avoid obscure crashes due to invalid
88 usage, unlike in the MALLOC_DEBUG code. */
90 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
92 /* Instrument a chunk with overrun detector byte(s) and convert it
93 into a user pointer with requested size sz. */
95 static void*
96 internal_function
97 mem2mem_check(void *ptr, size_t sz)
99 mchunkptr p;
100 unsigned char* m_ptr = ptr;
101 size_t i;
103 if (!ptr)
104 return ptr;
105 p = mem2chunk(ptr);
106 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
107 i > sz;
108 i -= 0xFF) {
109 if(i-sz < 0x100) {
110 m_ptr[i] = (unsigned char)(i-sz);
111 break;
113 m_ptr[i] = 0xFF;
115 m_ptr[sz] = MAGICBYTE(p);
116 return (void*)m_ptr;
119 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
120 pointer. If the provided pointer is not valid, return NULL. */
122 static mchunkptr
123 internal_function
124 mem2chunk_check(void* mem, unsigned char **magic_p)
126 mchunkptr p;
127 INTERNAL_SIZE_T sz, c;
128 unsigned char magic;
130 if(!aligned_OK(mem)) return NULL;
131 p = mem2chunk(mem);
132 if (!chunk_is_mmapped(p)) {
133 /* Must be a chunk in conventional heap memory. */
134 int contig = contiguous(&main_arena);
135 sz = chunksize(p);
136 if((contig &&
137 ((char*)p<mp_.sbrk_base ||
138 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
139 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
140 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
141 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
142 next_chunk(prev_chunk(p))!=p) ))
143 return NULL;
144 magic = MAGICBYTE(p);
145 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
146 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
148 } else {
149 unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
151 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
152 alignment relative to the beginning of a page. Check this
153 first. */
154 offset = (unsigned long)mem & page_mask;
155 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
156 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
157 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
158 offset<0x2000) ||
159 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
160 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
161 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
162 return NULL;
163 magic = MAGICBYTE(p);
164 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
165 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
168 ((unsigned char*)p)[sz] ^= 0xFF;
169 if (magic_p)
170 *magic_p = (unsigned char *)p + sz;
171 return p;
174 /* Check for corruption of the top chunk, and try to recover if
175 necessary. */
177 static int
178 internal_function
179 top_check(void)
181 mchunkptr t = top(&main_arena);
182 char* brk, * new_brk;
183 INTERNAL_SIZE_T front_misalign, sbrk_size;
184 unsigned long pagesz = GLRO(dl_pagesize);
186 if (t == initial_top(&main_arena) ||
187 (!chunk_is_mmapped(t) &&
188 chunksize(t)>=MINSIZE &&
189 prev_inuse(t) &&
190 (!contiguous(&main_arena) ||
191 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
192 return 0;
194 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
196 /* Try to set up a new top chunk. */
197 brk = MORECORE(0);
198 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
199 if (front_misalign > 0)
200 front_misalign = MALLOC_ALIGNMENT - front_misalign;
201 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
202 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
203 new_brk = (char*)(MORECORE (sbrk_size));
204 if (new_brk == (char*)(MORECORE_FAILURE))
206 __set_errno (ENOMEM);
207 return -1;
209 /* Call the `morecore' hook if necessary. */
210 void (*hook) (void) = force_reg (__after_morecore_hook);
211 if (hook)
212 (*hook) ();
213 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
215 top(&main_arena) = (mchunkptr)(brk + front_misalign);
216 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
218 return 0;
221 static void*
222 malloc_check(size_t sz, const void *caller)
224 void *victim;
226 if (sz+1 == 0) {
227 __set_errno (ENOMEM);
228 return NULL;
231 (void)mutex_lock(&main_arena.mutex);
232 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
233 (void)mutex_unlock(&main_arena.mutex);
234 return mem2mem_check(victim, sz);
237 static void
238 free_check(void* mem, const void *caller)
240 mchunkptr p;
242 if(!mem) return;
243 (void)mutex_lock(&main_arena.mutex);
244 p = mem2chunk_check(mem, NULL);
245 if(!p) {
246 (void)mutex_unlock(&main_arena.mutex);
248 malloc_printerr(check_action, "free(): invalid pointer", mem);
249 return;
251 if (chunk_is_mmapped(p)) {
252 (void)mutex_unlock(&main_arena.mutex);
253 munmap_chunk(p);
254 return;
256 _int_free(&main_arena, p, 1);
257 (void)mutex_unlock(&main_arena.mutex);
260 static void*
261 realloc_check(void* oldmem, size_t bytes, const void *caller)
263 INTERNAL_SIZE_T nb;
264 void* newmem = 0;
265 unsigned char *magic_p;
267 if (bytes+1 == 0) {
268 __set_errno (ENOMEM);
269 return NULL;
271 if (oldmem == 0) return malloc_check(bytes, NULL);
272 if (bytes == 0) {
273 free_check (oldmem, NULL);
274 return NULL;
276 (void)mutex_lock(&main_arena.mutex);
277 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
278 (void)mutex_unlock(&main_arena.mutex);
279 if(!oldp) {
280 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
281 return malloc_check(bytes, NULL);
283 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
285 checked_request2size(bytes+1, nb);
286 (void)mutex_lock(&main_arena.mutex);
288 if (chunk_is_mmapped(oldp)) {
289 #if HAVE_MREMAP
290 mchunkptr newp = mremap_chunk(oldp, nb);
291 if(newp)
292 newmem = chunk2mem(newp);
293 else
294 #endif
296 /* Note the extra SIZE_SZ overhead. */
297 if(oldsize - SIZE_SZ >= nb)
298 newmem = oldmem; /* do nothing */
299 else {
300 /* Must alloc, copy, free. */
301 if (top_check() >= 0)
302 newmem = _int_malloc(&main_arena, bytes+1);
303 if (newmem) {
304 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
305 munmap_chunk(oldp);
309 } else {
310 if (top_check() >= 0) {
311 INTERNAL_SIZE_T nb;
312 checked_request2size(bytes + 1, nb);
313 newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
317 /* mem2chunk_check changed the magic byte in the old chunk.
318 If newmem is NULL, then the old chunk will still be used though,
319 so we need to invert that change here. */
320 if (newmem == NULL) *magic_p ^= 0xFF;
322 (void)mutex_unlock(&main_arena.mutex);
324 return mem2mem_check(newmem, bytes);
327 static void*
328 memalign_check(size_t alignment, size_t bytes, const void *caller)
330 void* mem;
332 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
333 if (alignment < MINSIZE) alignment = MINSIZE;
335 if (bytes+1 == 0) {
336 __set_errno (ENOMEM);
337 return NULL;
339 (void)mutex_lock(&main_arena.mutex);
340 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
341 NULL;
342 (void)mutex_unlock(&main_arena.mutex);
343 return mem2mem_check(mem, bytes);
347 /* Get/set state: malloc_get_state() records the current state of all
348 malloc variables (_except_ for the actual heap contents and `hook'
349 function pointers) in a system dependent, opaque data structure.
350 This data structure is dynamically allocated and can be free()d
351 after use. malloc_set_state() restores the state of all malloc
352 variables to the previously obtained state. This is especially
353 useful when using this malloc as part of a shared library, and when
354 the heap contents are saved/restored via some other method. The
355 primary example for this is GNU Emacs with its `dumping' procedure.
356 `Hook' function pointers are never saved or restored by these
357 functions, with two exceptions: If malloc checking was in use when
358 malloc_get_state() was called, then malloc_set_state() calls
359 __malloc_check_init() if possible; if malloc checking was not in
360 use in the recorded state but the user requested malloc checking,
361 then the hooks are reset to 0. */
363 #define MALLOC_STATE_MAGIC 0x444c4541l
364 #define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
366 struct malloc_save_state {
367 long magic;
368 long version;
369 mbinptr av[NBINS * 2 + 2];
370 char* sbrk_base;
371 int sbrked_mem_bytes;
372 unsigned long trim_threshold;
373 unsigned long top_pad;
374 unsigned int n_mmaps_max;
375 unsigned long mmap_threshold;
376 int check_action;
377 unsigned long max_sbrked_mem;
378 unsigned long max_total_mem;
379 unsigned int n_mmaps;
380 unsigned int max_n_mmaps;
381 unsigned long mmapped_mem;
382 unsigned long max_mmapped_mem;
383 int using_malloc_checking;
384 unsigned long max_fast;
385 unsigned long arena_test;
386 unsigned long arena_max;
387 unsigned long narenas;
390 void*
391 __malloc_get_state(void)
393 struct malloc_save_state* ms;
394 int i;
395 mbinptr b;
397 ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
398 if (!ms)
399 return 0;
400 (void)mutex_lock(&main_arena.mutex);
401 malloc_consolidate(&main_arena);
402 ms->magic = MALLOC_STATE_MAGIC;
403 ms->version = MALLOC_STATE_VERSION;
404 ms->av[0] = 0;
405 ms->av[1] = 0; /* used to be binblocks, now no longer used */
406 ms->av[2] = top(&main_arena);
407 ms->av[3] = 0; /* used to be undefined */
408 for(i=1; i<NBINS; i++) {
409 b = bin_at(&main_arena, i);
410 if(first(b) == b)
411 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
412 else {
413 ms->av[2*i+2] = first(b);
414 ms->av[2*i+3] = last(b);
417 ms->sbrk_base = mp_.sbrk_base;
418 ms->sbrked_mem_bytes = main_arena.system_mem;
419 ms->trim_threshold = mp_.trim_threshold;
420 ms->top_pad = mp_.top_pad;
421 ms->n_mmaps_max = mp_.n_mmaps_max;
422 ms->mmap_threshold = mp_.mmap_threshold;
423 ms->check_action = check_action;
424 ms->max_sbrked_mem = main_arena.max_system_mem;
425 ms->max_total_mem = 0;
426 ms->n_mmaps = mp_.n_mmaps;
427 ms->max_n_mmaps = mp_.max_n_mmaps;
428 ms->mmapped_mem = mp_.mmapped_mem;
429 ms->max_mmapped_mem = mp_.max_mmapped_mem;
430 ms->using_malloc_checking = using_malloc_checking;
431 ms->max_fast = get_max_fast();
432 #ifdef PER_THREAD
433 ms->arena_test = mp_.arena_test;
434 ms->arena_max = mp_.arena_max;
435 ms->narenas = narenas;
436 #endif
437 (void)mutex_unlock(&main_arena.mutex);
438 return (void*)ms;
442 __malloc_set_state(void* msptr)
444 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
445 size_t i;
446 mbinptr b;
448 disallow_malloc_check = 1;
449 ptmalloc_init();
450 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
451 /* Must fail if the major version is too high. */
452 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
453 (void)mutex_lock(&main_arena.mutex);
454 /* There are no fastchunks. */
455 clear_fastchunks(&main_arena);
456 if (ms->version >= 4)
457 set_max_fast(ms->max_fast);
458 else
459 set_max_fast(64); /* 64 used to be the value we always used. */
460 for (i=0; i<NFASTBINS; ++i)
461 fastbin (&main_arena, i) = 0;
462 for (i=0; i<BINMAPSIZE; ++i)
463 main_arena.binmap[i] = 0;
464 top(&main_arena) = ms->av[2];
465 main_arena.last_remainder = 0;
466 for(i=1; i<NBINS; i++) {
467 b = bin_at(&main_arena, i);
468 if(ms->av[2*i+2] == 0) {
469 assert(ms->av[2*i+3] == 0);
470 first(b) = last(b) = b;
471 } else {
472 if(ms->version >= 3 &&
473 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
474 largebin_index(chunksize(ms->av[2*i+3]))==i))) {
475 first(b) = ms->av[2*i+2];
476 last(b) = ms->av[2*i+3];
477 /* Make sure the links to the bins within the heap are correct. */
478 first(b)->bk = b;
479 last(b)->fd = b;
480 /* Set bit in binblocks. */
481 mark_bin(&main_arena, i);
482 } else {
483 /* Oops, index computation from chunksize must have changed.
484 Link the whole list into unsorted_chunks. */
485 first(b) = last(b) = b;
486 b = unsorted_chunks(&main_arena);
487 ms->av[2*i+2]->bk = b;
488 ms->av[2*i+3]->fd = b->fd;
489 b->fd->bk = ms->av[2*i+3];
490 b->fd = ms->av[2*i+2];
494 if (ms->version < 3) {
495 /* Clear fd_nextsize and bk_nextsize fields. */
496 b = unsorted_chunks(&main_arena)->fd;
497 while (b != unsorted_chunks(&main_arena)) {
498 if (!in_smallbin_range(chunksize(b))) {
499 b->fd_nextsize = NULL;
500 b->bk_nextsize = NULL;
502 b = b->fd;
505 mp_.sbrk_base = ms->sbrk_base;
506 main_arena.system_mem = ms->sbrked_mem_bytes;
507 mp_.trim_threshold = ms->trim_threshold;
508 mp_.top_pad = ms->top_pad;
509 mp_.n_mmaps_max = ms->n_mmaps_max;
510 mp_.mmap_threshold = ms->mmap_threshold;
511 check_action = ms->check_action;
512 main_arena.max_system_mem = ms->max_sbrked_mem;
513 mp_.n_mmaps = ms->n_mmaps;
514 mp_.max_n_mmaps = ms->max_n_mmaps;
515 mp_.mmapped_mem = ms->mmapped_mem;
516 mp_.max_mmapped_mem = ms->max_mmapped_mem;
517 /* add version-dependent code here */
518 if (ms->version >= 1) {
519 /* Check whether it is safe to enable malloc checking, or whether
520 it is necessary to disable it. */
521 if (ms->using_malloc_checking && !using_malloc_checking &&
522 !disallow_malloc_check)
523 __malloc_check_init ();
524 else if (!ms->using_malloc_checking && using_malloc_checking) {
525 __malloc_hook = NULL;
526 __free_hook = NULL;
527 __realloc_hook = NULL;
528 __memalign_hook = NULL;
529 using_malloc_checking = 0;
532 if (ms->version >= 4) {
533 #ifdef PER_THREAD
534 mp_.arena_test = ms->arena_test;
535 mp_.arena_max = ms->arena_max;
536 narenas = ms->narenas;
537 #endif
539 check_malloc_state(&main_arena);
541 (void)mutex_unlock(&main_arena.mutex);
542 return 0;
546 * Local variables:
547 * c-basic-offset: 2
548 * End: