(CFLAGS-tst-align.c): Add -mpreferred-stack-boundary=4.
[glibc.git] / malloc / hooks.c
bloba5c97f3133f364c02657fad35a1d07b7cc0b8ba4
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* $Id$ */
23 /* What to do if the standard debugging hooks are in place and a
24 corrupt pointer is detected: do nothing (0), print an error message
25 (1), or call abort() (2). */
27 /* Hooks for debugging versions. The initial hooks just call the
28 initialization routine, then do the normal work. */
30 static Void_t*
31 #if __STD_C
32 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
33 #else
34 malloc_hook_ini(sz, caller)
35 size_t sz; const __malloc_ptr_t caller;
36 #endif
38 __malloc_hook = NULL;
39 ptmalloc_init();
40 return public_mALLOc(sz);
43 static Void_t*
44 #if __STD_C
45 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
46 #else
47 realloc_hook_ini(ptr, sz, caller)
48 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
49 #endif
51 __malloc_hook = NULL;
52 __realloc_hook = NULL;
53 ptmalloc_init();
54 return public_rEALLOc(ptr, sz);
57 static Void_t*
58 #if __STD_C
59 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
60 #else
61 memalign_hook_ini(alignment, sz, caller)
62 size_t alignment; size_t sz; const __malloc_ptr_t caller;
63 #endif
65 __memalign_hook = NULL;
66 ptmalloc_init();
67 return public_mEMALIGn(alignment, sz);
70 /* Whether we are using malloc checking. */
71 static int using_malloc_checking;
73 /* A flag that is set by malloc_set_state, to signal that malloc checking
74 must not be enabled on the request from the user (via the MALLOC_CHECK_
75 environment variable). It is reset by __malloc_check_init to tell
76 malloc_set_state that the user has requested malloc checking.
78 The purpose of this flag is to make sure that malloc checking is not
79 enabled when the heap to be restored was constructed without malloc
80 checking, and thus does not contain the required magic bytes.
81 Otherwise the heap would be corrupted by calls to free and realloc. If
82 it turns out that the heap was created with malloc checking and the
83 user has requested it malloc_set_state just calls __malloc_check_init
84 again to enable it. On the other hand, reusing such a heap without
85 further malloc checking is safe. */
86 static int disallow_malloc_check;
88 /* Activate a standard set of debugging hooks. */
89 void
90 __malloc_check_init()
92 if (disallow_malloc_check) {
93 disallow_malloc_check = 0;
94 return;
96 using_malloc_checking = 1;
97 __malloc_hook = malloc_check;
98 __free_hook = free_check;
99 __realloc_hook = realloc_check;
100 __memalign_hook = memalign_check;
101 if(check_action & 1)
102 malloc_printerr (5, "malloc: using debugging hooks", NULL);
105 /* A simple, standard set of debugging hooks. Overhead is `only' one
106 byte per chunk; still this will catch most cases of double frees or
107 overruns. The goal here is to avoid obscure crashes due to invalid
108 usage, unlike in the MALLOC_DEBUG code. */
110 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
112 /* Instrument a chunk with overrun detector byte(s) and convert it
113 into a user pointer with requested size sz. */
115 static Void_t*
116 internal_function
117 #if __STD_C
118 mem2mem_check(Void_t *ptr, size_t sz)
119 #else
120 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
121 #endif
123 mchunkptr p;
124 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
125 size_t i;
127 if (!ptr)
128 return ptr;
129 p = mem2chunk(ptr);
130 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
131 i > sz;
132 i -= 0xFF) {
133 if(i-sz < 0x100) {
134 m_ptr[i] = (unsigned char)(i-sz);
135 break;
137 m_ptr[i] = 0xFF;
139 m_ptr[sz] = MAGICBYTE(p);
140 return (Void_t*)m_ptr;
143 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
144 pointer. If the provided pointer is not valid, return NULL. */
146 static mchunkptr
147 internal_function
148 #if __STD_C
149 mem2chunk_check(Void_t* mem)
150 #else
151 mem2chunk_check(mem) Void_t* mem;
152 #endif
154 mchunkptr p;
155 INTERNAL_SIZE_T sz, c;
156 unsigned char magic;
158 if(!aligned_OK(mem)) return NULL;
159 p = mem2chunk(mem);
160 if (!chunk_is_mmapped(p)) {
161 /* Must be a chunk in conventional heap memory. */
162 int contig = contiguous(&main_arena);
163 sz = chunksize(p);
164 if((contig &&
165 ((char*)p<mp_.sbrk_base ||
166 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
167 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
168 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
169 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
170 next_chunk(prev_chunk(p))!=p) ))
171 return NULL;
172 magic = MAGICBYTE(p);
173 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
174 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
176 ((unsigned char*)p)[sz] ^= 0xFF;
177 } else {
178 unsigned long offset, page_mask = malloc_getpagesize-1;
180 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
181 alignment relative to the beginning of a page. Check this
182 first. */
183 offset = (unsigned long)mem & page_mask;
184 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
185 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
186 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
187 offset<0x2000) ||
188 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
189 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
190 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
191 return NULL;
192 magic = MAGICBYTE(p);
193 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
194 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
196 ((unsigned char*)p)[sz] ^= 0xFF;
198 return p;
201 /* Check for corruption of the top chunk, and try to recover if
202 necessary. */
204 static int
205 internal_function
206 #if __STD_C
207 top_check(void)
208 #else
209 top_check()
210 #endif
212 mchunkptr t = top(&main_arena);
213 char* brk, * new_brk;
214 INTERNAL_SIZE_T front_misalign, sbrk_size;
215 unsigned long pagesz = malloc_getpagesize;
217 if (t == initial_top(&main_arena) ||
218 (!chunk_is_mmapped(t) &&
219 chunksize(t)>=MINSIZE &&
220 prev_inuse(t) &&
221 (!contiguous(&main_arena) ||
222 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
223 return 0;
225 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
227 /* Try to set up a new top chunk. */
228 brk = MORECORE(0);
229 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
230 if (front_misalign > 0)
231 front_misalign = MALLOC_ALIGNMENT - front_misalign;
232 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
233 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
234 new_brk = (char*)(MORECORE (sbrk_size));
235 if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
236 /* Call the `morecore' hook if necessary. */
237 if (__after_morecore_hook)
238 (*__after_morecore_hook) ();
239 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
241 top(&main_arena) = (mchunkptr)(brk + front_misalign);
242 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
244 return 0;
247 static Void_t*
248 #if __STD_C
249 malloc_check(size_t sz, const Void_t *caller)
250 #else
251 malloc_check(sz, caller) size_t sz; const Void_t *caller;
252 #endif
254 Void_t *victim;
256 (void)mutex_lock(&main_arena.mutex);
257 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
258 (void)mutex_unlock(&main_arena.mutex);
259 return mem2mem_check(victim, sz);
262 static void
263 #if __STD_C
264 free_check(Void_t* mem, const Void_t *caller)
265 #else
266 free_check(mem, caller) Void_t* mem; const Void_t *caller;
267 #endif
269 mchunkptr p;
271 if(!mem) return;
272 (void)mutex_lock(&main_arena.mutex);
273 p = mem2chunk_check(mem);
274 if(!p) {
275 (void)mutex_unlock(&main_arena.mutex);
277 malloc_printerr(check_action, "free(): invalid pointer", mem);
278 return;
280 #if HAVE_MMAP
281 if (chunk_is_mmapped(p)) {
282 (void)mutex_unlock(&main_arena.mutex);
283 munmap_chunk(p);
284 return;
286 #endif
287 #if 0 /* Erase freed memory. */
288 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
289 #endif
290 _int_free(&main_arena, mem);
291 (void)mutex_unlock(&main_arena.mutex);
294 static Void_t*
295 #if __STD_C
296 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
297 #else
298 realloc_check(oldmem, bytes, caller)
299 Void_t* oldmem; size_t bytes; const Void_t *caller;
300 #endif
302 mchunkptr oldp;
303 INTERNAL_SIZE_T nb, oldsize;
304 Void_t* newmem = 0;
306 if (oldmem == 0) return malloc_check(bytes, NULL);
307 (void)mutex_lock(&main_arena.mutex);
308 oldp = mem2chunk_check(oldmem);
309 (void)mutex_unlock(&main_arena.mutex);
310 if(!oldp) {
311 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
312 return malloc_check(bytes, NULL);
314 oldsize = chunksize(oldp);
316 checked_request2size(bytes+1, nb);
317 (void)mutex_lock(&main_arena.mutex);
319 #if HAVE_MMAP
320 if (chunk_is_mmapped(oldp)) {
321 #if HAVE_MREMAP
322 mchunkptr newp = mremap_chunk(oldp, nb);
323 if(newp)
324 newmem = chunk2mem(newp);
325 else
326 #endif
328 /* Note the extra SIZE_SZ overhead. */
329 if(oldsize - SIZE_SZ >= nb)
330 newmem = oldmem; /* do nothing */
331 else {
332 /* Must alloc, copy, free. */
333 if (top_check() >= 0)
334 newmem = _int_malloc(&main_arena, bytes+1);
335 if (newmem) {
336 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
337 munmap_chunk(oldp);
341 } else {
342 #endif /* HAVE_MMAP */
343 if (top_check() >= 0)
344 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
345 #if 0 /* Erase freed memory. */
346 if(newmem)
347 newp = mem2chunk(newmem);
348 nb = chunksize(newp);
349 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
350 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
351 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
352 } else if(nb > oldsize+SIZE_SZ) {
353 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
354 0, nb - (oldsize+SIZE_SZ));
356 #endif
357 #if HAVE_MMAP
359 #endif
360 (void)mutex_unlock(&main_arena.mutex);
362 return mem2mem_check(newmem, bytes);
365 static Void_t*
366 #if __STD_C
367 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
368 #else
369 memalign_check(alignment, bytes, caller)
370 size_t alignment; size_t bytes; const Void_t *caller;
371 #endif
373 INTERNAL_SIZE_T nb;
374 Void_t* mem;
376 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
377 if (alignment < MINSIZE) alignment = MINSIZE;
379 checked_request2size(bytes+1, nb);
380 (void)mutex_lock(&main_arena.mutex);
381 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
382 NULL;
383 (void)mutex_unlock(&main_arena.mutex);
384 return mem2mem_check(mem, bytes);
387 #ifndef NO_THREADS
389 # ifdef _LIBC
390 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
391 /* These routines are never needed in this configuration. */
392 # define NO_STARTER
393 # endif
394 # endif
396 # ifdef NO_STARTER
397 # undef NO_STARTER
398 # else
400 /* The following hooks are used when the global initialization in
401 ptmalloc_init() hasn't completed yet. */
403 static Void_t*
404 #if __STD_C
405 malloc_starter(size_t sz, const Void_t *caller)
406 #else
407 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
408 #endif
410 Void_t* victim;
412 victim = _int_malloc(&main_arena, sz);
414 return victim ? BOUNDED_N(victim, sz) : 0;
417 static Void_t*
418 #if __STD_C
419 memalign_starter(size_t align, size_t sz, const Void_t *caller)
420 #else
421 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
422 #endif
424 Void_t* victim;
426 victim = _int_memalign(&main_arena, align, sz);
428 return victim ? BOUNDED_N(victim, sz) : 0;
431 static void
432 #if __STD_C
433 free_starter(Void_t* mem, const Void_t *caller)
434 #else
435 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
436 #endif
438 mchunkptr p;
440 if(!mem) return;
441 p = mem2chunk(mem);
442 #if HAVE_MMAP
443 if (chunk_is_mmapped(p)) {
444 munmap_chunk(p);
445 return;
447 #endif
448 _int_free(&main_arena, mem);
451 # endif /* !defiend NO_STARTER */
452 #endif /* NO_THREADS */
455 /* Get/set state: malloc_get_state() records the current state of all
456 malloc variables (_except_ for the actual heap contents and `hook'
457 function pointers) in a system dependent, opaque data structure.
458 This data structure is dynamically allocated and can be free()d
459 after use. malloc_set_state() restores the state of all malloc
460 variables to the previously obtained state. This is especially
461 useful when using this malloc as part of a shared library, and when
462 the heap contents are saved/restored via some other method. The
463 primary example for this is GNU Emacs with its `dumping' procedure.
464 `Hook' function pointers are never saved or restored by these
465 functions, with two exceptions: If malloc checking was in use when
466 malloc_get_state() was called, then malloc_set_state() calls
467 __malloc_check_init() if possible; if malloc checking was not in
468 use in the recorded state but the user requested malloc checking,
469 then the hooks are reset to 0. */
471 #define MALLOC_STATE_MAGIC 0x444c4541l
472 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
474 struct malloc_save_state {
475 long magic;
476 long version;
477 mbinptr av[NBINS * 2 + 2];
478 char* sbrk_base;
479 int sbrked_mem_bytes;
480 unsigned long trim_threshold;
481 unsigned long top_pad;
482 unsigned int n_mmaps_max;
483 unsigned long mmap_threshold;
484 int check_action;
485 unsigned long max_sbrked_mem;
486 unsigned long max_total_mem;
487 unsigned int n_mmaps;
488 unsigned int max_n_mmaps;
489 unsigned long mmapped_mem;
490 unsigned long max_mmapped_mem;
491 int using_malloc_checking;
494 Void_t*
495 public_gET_STATe(void)
497 struct malloc_save_state* ms;
498 int i;
499 mbinptr b;
501 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
502 if (!ms)
503 return 0;
504 (void)mutex_lock(&main_arena.mutex);
505 malloc_consolidate(&main_arena);
506 ms->magic = MALLOC_STATE_MAGIC;
507 ms->version = MALLOC_STATE_VERSION;
508 ms->av[0] = 0;
509 ms->av[1] = 0; /* used to be binblocks, now no longer used */
510 ms->av[2] = top(&main_arena);
511 ms->av[3] = 0; /* used to be undefined */
512 for(i=1; i<NBINS; i++) {
513 b = bin_at(&main_arena, i);
514 if(first(b) == b)
515 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
516 else {
517 ms->av[2*i+2] = first(b);
518 ms->av[2*i+3] = last(b);
521 ms->sbrk_base = mp_.sbrk_base;
522 ms->sbrked_mem_bytes = main_arena.system_mem;
523 ms->trim_threshold = mp_.trim_threshold;
524 ms->top_pad = mp_.top_pad;
525 ms->n_mmaps_max = mp_.n_mmaps_max;
526 ms->mmap_threshold = mp_.mmap_threshold;
527 ms->check_action = check_action;
528 ms->max_sbrked_mem = main_arena.max_system_mem;
529 #ifdef NO_THREADS
530 ms->max_total_mem = mp_.max_total_mem;
531 #else
532 ms->max_total_mem = 0;
533 #endif
534 ms->n_mmaps = mp_.n_mmaps;
535 ms->max_n_mmaps = mp_.max_n_mmaps;
536 ms->mmapped_mem = mp_.mmapped_mem;
537 ms->max_mmapped_mem = mp_.max_mmapped_mem;
538 ms->using_malloc_checking = using_malloc_checking;
539 (void)mutex_unlock(&main_arena.mutex);
540 return (Void_t*)ms;
544 public_sET_STATe(Void_t* msptr)
546 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
547 size_t i;
548 mbinptr b;
550 disallow_malloc_check = 1;
551 ptmalloc_init();
552 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
553 /* Must fail if the major version is too high. */
554 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
555 (void)mutex_lock(&main_arena.mutex);
556 /* There are no fastchunks. */
557 clear_fastchunks(&main_arena);
558 set_max_fast(&main_arena, DEFAULT_MXFAST);
559 for (i=0; i<NFASTBINS; ++i)
560 main_arena.fastbins[i] = 0;
561 for (i=0; i<BINMAPSIZE; ++i)
562 main_arena.binmap[i] = 0;
563 top(&main_arena) = ms->av[2];
564 main_arena.last_remainder = 0;
565 for(i=1; i<NBINS; i++) {
566 b = bin_at(&main_arena, i);
567 if(ms->av[2*i+2] == 0) {
568 assert(ms->av[2*i+3] == 0);
569 first(b) = last(b) = b;
570 } else {
571 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
572 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
573 first(b) = ms->av[2*i+2];
574 last(b) = ms->av[2*i+3];
575 /* Make sure the links to the bins within the heap are correct. */
576 first(b)->bk = b;
577 last(b)->fd = b;
578 /* Set bit in binblocks. */
579 mark_bin(&main_arena, i);
580 } else {
581 /* Oops, index computation from chunksize must have changed.
582 Link the whole list into unsorted_chunks. */
583 first(b) = last(b) = b;
584 b = unsorted_chunks(&main_arena);
585 ms->av[2*i+2]->bk = b;
586 ms->av[2*i+3]->fd = b->fd;
587 b->fd->bk = ms->av[2*i+3];
588 b->fd = ms->av[2*i+2];
592 mp_.sbrk_base = ms->sbrk_base;
593 main_arena.system_mem = ms->sbrked_mem_bytes;
594 mp_.trim_threshold = ms->trim_threshold;
595 mp_.top_pad = ms->top_pad;
596 mp_.n_mmaps_max = ms->n_mmaps_max;
597 mp_.mmap_threshold = ms->mmap_threshold;
598 check_action = ms->check_action;
599 main_arena.max_system_mem = ms->max_sbrked_mem;
600 #ifdef NO_THREADS
601 mp_.max_total_mem = ms->max_total_mem;
602 #endif
603 mp_.n_mmaps = ms->n_mmaps;
604 mp_.max_n_mmaps = ms->max_n_mmaps;
605 mp_.mmapped_mem = ms->mmapped_mem;
606 mp_.max_mmapped_mem = ms->max_mmapped_mem;
607 /* add version-dependent code here */
608 if (ms->version >= 1) {
609 /* Check whether it is safe to enable malloc checking, or whether
610 it is necessary to disable it. */
611 if (ms->using_malloc_checking && !using_malloc_checking &&
612 !disallow_malloc_check)
613 __malloc_check_init ();
614 else if (!ms->using_malloc_checking && using_malloc_checking) {
615 __malloc_hook = 0;
616 __free_hook = 0;
617 __realloc_hook = 0;
618 __memalign_hook = 0;
619 using_malloc_checking = 0;
622 check_malloc_state(&main_arena);
624 (void)mutex_unlock(&main_arena.mutex);
625 return 0;
629 * Local variables:
630 * c-basic-offset: 2
631 * End: