Merge branch 'x32-strcase'
[glibc.git] / malloc / hooks.c
blobfc46e7d95a41f06bd931e1b1eab5f0e3b64b1040
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2006, 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* What to do if the standard debugging hooks are in place and a
22 corrupt pointer is detected: do nothing (0), print an error message
23 (1), or call abort() (2). */
25 /* Hooks for debugging versions. The initial hooks just call the
26 initialization routine, then do the normal work. */
28 static void*
29 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
31 __malloc_hook = NULL;
32 ptmalloc_init();
33 return public_mALLOc(sz);
36 static void*
37 realloc_hook_ini(void* ptr, size_t sz, const __malloc_ptr_t caller)
39 __malloc_hook = NULL;
40 __realloc_hook = NULL;
41 ptmalloc_init();
42 return public_rEALLOc(ptr, sz);
45 static void*
46 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
48 __memalign_hook = NULL;
49 ptmalloc_init();
50 return public_mEMALIGn(alignment, sz);
53 /* Whether we are using malloc checking. */
54 static int using_malloc_checking;
56 /* A flag that is set by malloc_set_state, to signal that malloc checking
57 must not be enabled on the request from the user (via the MALLOC_CHECK_
58 environment variable). It is reset by __malloc_check_init to tell
59 malloc_set_state that the user has requested malloc checking.
61 The purpose of this flag is to make sure that malloc checking is not
62 enabled when the heap to be restored was constructed without malloc
63 checking, and thus does not contain the required magic bytes.
64 Otherwise the heap would be corrupted by calls to free and realloc. If
65 it turns out that the heap was created with malloc checking and the
66 user has requested it malloc_set_state just calls __malloc_check_init
67 again to enable it. On the other hand, reusing such a heap without
68 further malloc checking is safe. */
69 static int disallow_malloc_check;
71 /* Activate a standard set of debugging hooks. */
72 void
73 __malloc_check_init()
75 if (disallow_malloc_check) {
76 disallow_malloc_check = 0;
77 return;
79 using_malloc_checking = 1;
80 __malloc_hook = malloc_check;
81 __free_hook = free_check;
82 __realloc_hook = realloc_check;
83 __memalign_hook = memalign_check;
86 /* A simple, standard set of debugging hooks. Overhead is `only' one
87 byte per chunk; still this will catch most cases of double frees or
88 overruns. The goal here is to avoid obscure crashes due to invalid
89 usage, unlike in the MALLOC_DEBUG code. */
91 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
93 /* Instrument a chunk with overrun detector byte(s) and convert it
94 into a user pointer with requested size sz. */
96 static void*
97 internal_function
98 mem2mem_check(void *ptr, size_t sz)
100 mchunkptr p;
101 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
102 size_t i;
104 if (!ptr)
105 return ptr;
106 p = mem2chunk(ptr);
107 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
108 i > sz;
109 i -= 0xFF) {
110 if(i-sz < 0x100) {
111 m_ptr[i] = (unsigned char)(i-sz);
112 break;
114 m_ptr[i] = 0xFF;
116 m_ptr[sz] = MAGICBYTE(p);
117 return (void*)m_ptr;
120 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
121 pointer. If the provided pointer is not valid, return NULL. */
123 static mchunkptr
124 internal_function
125 mem2chunk_check(void* mem, unsigned char **magic_p)
127 mchunkptr p;
128 INTERNAL_SIZE_T sz, c;
129 unsigned char magic;
131 if(!aligned_OK(mem)) return NULL;
132 p = mem2chunk(mem);
133 if (!chunk_is_mmapped(p)) {
134 /* Must be a chunk in conventional heap memory. */
135 int contig = contiguous(&main_arena);
136 sz = chunksize(p);
137 if((contig &&
138 ((char*)p<mp_.sbrk_base ||
139 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
140 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
141 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
142 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
143 next_chunk(prev_chunk(p))!=p) ))
144 return NULL;
145 magic = MAGICBYTE(p);
146 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
147 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
149 } else {
150 unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
152 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
153 alignment relative to the beginning of a page. Check this
154 first. */
155 offset = (unsigned long)mem & page_mask;
156 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
157 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
158 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
159 offset<0x2000) ||
160 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
161 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
162 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
163 return NULL;
164 magic = MAGICBYTE(p);
165 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
166 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
169 ((unsigned char*)p)[sz] ^= 0xFF;
170 if (magic_p)
171 *magic_p = (unsigned char *)p + sz;
172 return p;
175 /* Check for corruption of the top chunk, and try to recover if
176 necessary. */
178 static int
179 internal_function
180 top_check(void)
182 mchunkptr t = top(&main_arena);
183 char* brk, * new_brk;
184 INTERNAL_SIZE_T front_misalign, sbrk_size;
185 unsigned long pagesz = GLRO(dl_pagesize);
187 if (t == initial_top(&main_arena) ||
188 (!chunk_is_mmapped(t) &&
189 chunksize(t)>=MINSIZE &&
190 prev_inuse(t) &&
191 (!contiguous(&main_arena) ||
192 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
193 return 0;
195 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
197 /* Try to set up a new top chunk. */
198 brk = MORECORE(0);
199 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
200 if (front_misalign > 0)
201 front_misalign = MALLOC_ALIGNMENT - front_misalign;
202 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
203 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
204 new_brk = (char*)(MORECORE (sbrk_size));
205 if (new_brk == (char*)(MORECORE_FAILURE))
207 __set_errno (ENOMEM);
208 return -1;
210 /* Call the `morecore' hook if necessary. */
211 void (*hook) (void) = force_reg (__after_morecore_hook);
212 if (hook)
213 (*hook) ();
214 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
216 top(&main_arena) = (mchunkptr)(brk + front_misalign);
217 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
219 return 0;
222 static void*
223 malloc_check(size_t sz, const void *caller)
225 void *victim;
227 if (sz+1 == 0) {
228 __set_errno (ENOMEM);
229 return NULL;
232 (void)mutex_lock(&main_arena.mutex);
233 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
234 (void)mutex_unlock(&main_arena.mutex);
235 return mem2mem_check(victim, sz);
238 static void
239 free_check(void* mem, const void *caller)
241 mchunkptr p;
243 if(!mem) return;
244 (void)mutex_lock(&main_arena.mutex);
245 p = mem2chunk_check(mem, NULL);
246 if(!p) {
247 (void)mutex_unlock(&main_arena.mutex);
249 malloc_printerr(check_action, "free(): invalid pointer", mem);
250 return;
252 if (chunk_is_mmapped(p)) {
253 (void)mutex_unlock(&main_arena.mutex);
254 munmap_chunk(p);
255 return;
257 _int_free(&main_arena, p, 1);
258 (void)mutex_unlock(&main_arena.mutex);
261 static void*
262 realloc_check(void* oldmem, size_t bytes, const void *caller)
264 INTERNAL_SIZE_T nb;
265 void* newmem = 0;
266 unsigned char *magic_p;
268 if (bytes+1 == 0) {
269 __set_errno (ENOMEM);
270 return NULL;
272 if (oldmem == 0) return malloc_check(bytes, NULL);
273 if (bytes == 0) {
274 free_check (oldmem, NULL);
275 return NULL;
277 (void)mutex_lock(&main_arena.mutex);
278 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
279 (void)mutex_unlock(&main_arena.mutex);
280 if(!oldp) {
281 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
282 return malloc_check(bytes, NULL);
284 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
286 checked_request2size(bytes+1, nb);
287 (void)mutex_lock(&main_arena.mutex);
289 if (chunk_is_mmapped(oldp)) {
290 #if HAVE_MREMAP
291 mchunkptr newp = mremap_chunk(oldp, nb);
292 if(newp)
293 newmem = chunk2mem(newp);
294 else
295 #endif
297 /* Note the extra SIZE_SZ overhead. */
298 if(oldsize - SIZE_SZ >= nb)
299 newmem = oldmem; /* do nothing */
300 else {
301 /* Must alloc, copy, free. */
302 if (top_check() >= 0)
303 newmem = _int_malloc(&main_arena, bytes+1);
304 if (newmem) {
305 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
306 munmap_chunk(oldp);
310 } else {
311 if (top_check() >= 0) {
312 INTERNAL_SIZE_T nb;
313 checked_request2size(bytes + 1, nb);
314 newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
318 /* mem2chunk_check changed the magic byte in the old chunk.
319 If newmem is NULL, then the old chunk will still be used though,
320 so we need to invert that change here. */
321 if (newmem == NULL) *magic_p ^= 0xFF;
323 (void)mutex_unlock(&main_arena.mutex);
325 return mem2mem_check(newmem, bytes);
328 static void*
329 memalign_check(size_t alignment, size_t bytes, const void *caller)
331 void* mem;
333 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
334 if (alignment < MINSIZE) alignment = MINSIZE;
336 if (bytes+1 == 0) {
337 __set_errno (ENOMEM);
338 return NULL;
340 (void)mutex_lock(&main_arena.mutex);
341 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
342 NULL;
343 (void)mutex_unlock(&main_arena.mutex);
344 return mem2mem_check(mem, bytes);
348 /* Get/set state: malloc_get_state() records the current state of all
349 malloc variables (_except_ for the actual heap contents and `hook'
350 function pointers) in a system dependent, opaque data structure.
351 This data structure is dynamically allocated and can be free()d
352 after use. malloc_set_state() restores the state of all malloc
353 variables to the previously obtained state. This is especially
354 useful when using this malloc as part of a shared library, and when
355 the heap contents are saved/restored via some other method. The
356 primary example for this is GNU Emacs with its `dumping' procedure.
357 `Hook' function pointers are never saved or restored by these
358 functions, with two exceptions: If malloc checking was in use when
359 malloc_get_state() was called, then malloc_set_state() calls
360 __malloc_check_init() if possible; if malloc checking was not in
361 use in the recorded state but the user requested malloc checking,
362 then the hooks are reset to 0. */
364 #define MALLOC_STATE_MAGIC 0x444c4541l
365 #define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
367 struct malloc_save_state {
368 long magic;
369 long version;
370 mbinptr av[NBINS * 2 + 2];
371 char* sbrk_base;
372 int sbrked_mem_bytes;
373 unsigned long trim_threshold;
374 unsigned long top_pad;
375 unsigned int n_mmaps_max;
376 unsigned long mmap_threshold;
377 int check_action;
378 unsigned long max_sbrked_mem;
379 unsigned long max_total_mem;
380 unsigned int n_mmaps;
381 unsigned int max_n_mmaps;
382 unsigned long mmapped_mem;
383 unsigned long max_mmapped_mem;
384 int using_malloc_checking;
385 unsigned long max_fast;
386 unsigned long arena_test;
387 unsigned long arena_max;
388 unsigned long narenas;
391 void*
392 public_gET_STATe(void)
394 struct malloc_save_state* ms;
395 int i;
396 mbinptr b;
398 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
399 if (!ms)
400 return 0;
401 (void)mutex_lock(&main_arena.mutex);
402 malloc_consolidate(&main_arena);
403 ms->magic = MALLOC_STATE_MAGIC;
404 ms->version = MALLOC_STATE_VERSION;
405 ms->av[0] = 0;
406 ms->av[1] = 0; /* used to be binblocks, now no longer used */
407 ms->av[2] = top(&main_arena);
408 ms->av[3] = 0; /* used to be undefined */
409 for(i=1; i<NBINS; i++) {
410 b = bin_at(&main_arena, i);
411 if(first(b) == b)
412 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
413 else {
414 ms->av[2*i+2] = first(b);
415 ms->av[2*i+3] = last(b);
418 ms->sbrk_base = mp_.sbrk_base;
419 ms->sbrked_mem_bytes = main_arena.system_mem;
420 ms->trim_threshold = mp_.trim_threshold;
421 ms->top_pad = mp_.top_pad;
422 ms->n_mmaps_max = mp_.n_mmaps_max;
423 ms->mmap_threshold = mp_.mmap_threshold;
424 ms->check_action = check_action;
425 ms->max_sbrked_mem = main_arena.max_system_mem;
426 ms->max_total_mem = 0;
427 ms->n_mmaps = mp_.n_mmaps;
428 ms->max_n_mmaps = mp_.max_n_mmaps;
429 ms->mmapped_mem = mp_.mmapped_mem;
430 ms->max_mmapped_mem = mp_.max_mmapped_mem;
431 ms->using_malloc_checking = using_malloc_checking;
432 ms->max_fast = get_max_fast();
433 #ifdef PER_THREAD
434 ms->arena_test = mp_.arena_test;
435 ms->arena_max = mp_.arena_max;
436 ms->narenas = narenas;
437 #endif
438 (void)mutex_unlock(&main_arena.mutex);
439 return (void*)ms;
443 public_sET_STATe(void* msptr)
445 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
446 size_t i;
447 mbinptr b;
449 disallow_malloc_check = 1;
450 ptmalloc_init();
451 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
452 /* Must fail if the major version is too high. */
453 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
454 (void)mutex_lock(&main_arena.mutex);
455 /* There are no fastchunks. */
456 clear_fastchunks(&main_arena);
457 if (ms->version >= 4)
458 set_max_fast(ms->max_fast);
459 else
460 set_max_fast(64); /* 64 used to be the value we always used. */
461 for (i=0; i<NFASTBINS; ++i)
462 fastbin (&main_arena, i) = 0;
463 for (i=0; i<BINMAPSIZE; ++i)
464 main_arena.binmap[i] = 0;
465 top(&main_arena) = ms->av[2];
466 main_arena.last_remainder = 0;
467 for(i=1; i<NBINS; i++) {
468 b = bin_at(&main_arena, i);
469 if(ms->av[2*i+2] == 0) {
470 assert(ms->av[2*i+3] == 0);
471 first(b) = last(b) = b;
472 } else {
473 if(ms->version >= 3 &&
474 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
475 largebin_index(chunksize(ms->av[2*i+3]))==i))) {
476 first(b) = ms->av[2*i+2];
477 last(b) = ms->av[2*i+3];
478 /* Make sure the links to the bins within the heap are correct. */
479 first(b)->bk = b;
480 last(b)->fd = b;
481 /* Set bit in binblocks. */
482 mark_bin(&main_arena, i);
483 } else {
484 /* Oops, index computation from chunksize must have changed.
485 Link the whole list into unsorted_chunks. */
486 first(b) = last(b) = b;
487 b = unsorted_chunks(&main_arena);
488 ms->av[2*i+2]->bk = b;
489 ms->av[2*i+3]->fd = b->fd;
490 b->fd->bk = ms->av[2*i+3];
491 b->fd = ms->av[2*i+2];
495 if (ms->version < 3) {
496 /* Clear fd_nextsize and bk_nextsize fields. */
497 b = unsorted_chunks(&main_arena)->fd;
498 while (b != unsorted_chunks(&main_arena)) {
499 if (!in_smallbin_range(chunksize(b))) {
500 b->fd_nextsize = NULL;
501 b->bk_nextsize = NULL;
503 b = b->fd;
506 mp_.sbrk_base = ms->sbrk_base;
507 main_arena.system_mem = ms->sbrked_mem_bytes;
508 mp_.trim_threshold = ms->trim_threshold;
509 mp_.top_pad = ms->top_pad;
510 mp_.n_mmaps_max = ms->n_mmaps_max;
511 mp_.mmap_threshold = ms->mmap_threshold;
512 check_action = ms->check_action;
513 main_arena.max_system_mem = ms->max_sbrked_mem;
514 mp_.n_mmaps = ms->n_mmaps;
515 mp_.max_n_mmaps = ms->max_n_mmaps;
516 mp_.mmapped_mem = ms->mmapped_mem;
517 mp_.max_mmapped_mem = ms->max_mmapped_mem;
518 /* add version-dependent code here */
519 if (ms->version >= 1) {
520 /* Check whether it is safe to enable malloc checking, or whether
521 it is necessary to disable it. */
522 if (ms->using_malloc_checking && !using_malloc_checking &&
523 !disallow_malloc_check)
524 __malloc_check_init ();
525 else if (!ms->using_malloc_checking && using_malloc_checking) {
526 __malloc_hook = NULL;
527 __free_hook = NULL;
528 __realloc_hook = NULL;
529 __memalign_hook = NULL;
530 using_malloc_checking = 0;
533 if (ms->version >= 4) {
534 #ifdef PER_THREAD
535 mp_.arena_test = ms->arena_test;
536 mp_.arena_max = ms->arena_max;
537 narenas = ms->narenas;
538 #endif
540 check_malloc_state(&main_arena);
542 (void)mutex_unlock(&main_arena.mutex);
543 return 0;
547 * Local variables:
548 * c-basic-offset: 2
549 * End: