Updated to fedora-glibc-20051020T0651
[glibc.git] / malloc / hooks.c
blob5dd2d65e6208241e5ca38b39f00c38488953512d
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* $Id$ */
23 /* What to do if the standard debugging hooks are in place and a
24 corrupt pointer is detected: do nothing (0), print an error message
25 (1), or call abort() (2). */
27 /* Hooks for debugging versions. The initial hooks just call the
28 initialization routine, then do the normal work. */
30 static Void_t*
31 #if __STD_C
32 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
33 #else
34 malloc_hook_ini(sz, caller)
35 size_t sz; const __malloc_ptr_t caller;
36 #endif
38 __malloc_hook = NULL;
39 ptmalloc_init();
40 return public_mALLOc(sz);
43 static Void_t*
44 #if __STD_C
45 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
46 #else
47 realloc_hook_ini(ptr, sz, caller)
48 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
49 #endif
51 __malloc_hook = NULL;
52 __realloc_hook = NULL;
53 ptmalloc_init();
54 return public_rEALLOc(ptr, sz);
57 static Void_t*
58 #if __STD_C
59 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
60 #else
61 memalign_hook_ini(alignment, sz, caller)
62 size_t alignment; size_t sz; const __malloc_ptr_t caller;
63 #endif
65 __memalign_hook = NULL;
66 ptmalloc_init();
67 return public_mEMALIGn(alignment, sz);
70 /* Whether we are using malloc checking. */
71 static int using_malloc_checking;
73 /* A flag that is set by malloc_set_state, to signal that malloc checking
74 must not be enabled on the request from the user (via the MALLOC_CHECK_
75 environment variable). It is reset by __malloc_check_init to tell
76 malloc_set_state that the user has requested malloc checking.
78 The purpose of this flag is to make sure that malloc checking is not
79 enabled when the heap to be restored was constructed without malloc
80 checking, and thus does not contain the required magic bytes.
81 Otherwise the heap would be corrupted by calls to free and realloc. If
82 it turns out that the heap was created with malloc checking and the
83 user has requested it malloc_set_state just calls __malloc_check_init
84 again to enable it. On the other hand, reusing such a heap without
85 further malloc checking is safe. */
86 static int disallow_malloc_check;
88 /* Activate a standard set of debugging hooks. */
89 void
90 __malloc_check_init()
92 if (disallow_malloc_check) {
93 disallow_malloc_check = 0;
94 return;
96 using_malloc_checking = 1;
97 __malloc_hook = malloc_check;
98 __free_hook = free_check;
99 __realloc_hook = realloc_check;
100 __memalign_hook = memalign_check;
101 if(check_action & 1)
102 malloc_printerr (5, "malloc: using debugging hooks", NULL);
105 /* A simple, standard set of debugging hooks. Overhead is `only' one
106 byte per chunk; still this will catch most cases of double frees or
107 overruns. The goal here is to avoid obscure crashes due to invalid
108 usage, unlike in the MALLOC_DEBUG code. */
110 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
112 /* Instrument a chunk with overrun detector byte(s) and convert it
113 into a user pointer with requested size sz. */
115 static Void_t*
116 internal_function
117 #if __STD_C
118 mem2mem_check(Void_t *ptr, size_t sz)
119 #else
120 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
121 #endif
123 mchunkptr p;
124 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
125 size_t i;
127 if (!ptr)
128 return ptr;
129 p = mem2chunk(ptr);
130 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
131 i > sz;
132 i -= 0xFF) {
133 if(i-sz < 0x100) {
134 m_ptr[i] = (unsigned char)(i-sz);
135 break;
137 m_ptr[i] = 0xFF;
139 m_ptr[sz] = MAGICBYTE(p);
140 return (Void_t*)m_ptr;
143 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
144 pointer. If the provided pointer is not valid, return NULL. */
146 static mchunkptr
147 internal_function
148 #if __STD_C
149 mem2chunk_check(Void_t* mem, unsigned char **magic_p)
150 #else
151 mem2chunk_check(mem, magic_p) Void_t* mem; unsigned char **magic_p;
152 #endif
154 mchunkptr p;
155 INTERNAL_SIZE_T sz, c;
156 unsigned char magic;
158 if(!aligned_OK(mem)) return NULL;
159 p = mem2chunk(mem);
160 if (!chunk_is_mmapped(p)) {
161 /* Must be a chunk in conventional heap memory. */
162 int contig = contiguous(&main_arena);
163 sz = chunksize(p);
164 if((contig &&
165 ((char*)p<mp_.sbrk_base ||
166 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
167 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
168 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
169 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
170 next_chunk(prev_chunk(p))!=p) ))
171 return NULL;
172 magic = MAGICBYTE(p);
173 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
174 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
176 } else {
177 unsigned long offset, page_mask = malloc_getpagesize-1;
179 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
180 alignment relative to the beginning of a page. Check this
181 first. */
182 offset = (unsigned long)mem & page_mask;
183 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
184 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
185 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
186 offset<0x2000) ||
187 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
188 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
189 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
190 return NULL;
191 magic = MAGICBYTE(p);
192 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
193 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
196 ((unsigned char*)p)[sz] ^= 0xFF;
197 if (magic_p)
198 *magic_p = (unsigned char *)p + sz;
199 return p;
202 /* Check for corruption of the top chunk, and try to recover if
203 necessary. */
205 static int
206 internal_function
207 #if __STD_C
208 top_check(void)
209 #else
210 top_check()
211 #endif
213 mchunkptr t = top(&main_arena);
214 char* brk, * new_brk;
215 INTERNAL_SIZE_T front_misalign, sbrk_size;
216 unsigned long pagesz = malloc_getpagesize;
218 if (t == initial_top(&main_arena) ||
219 (!chunk_is_mmapped(t) &&
220 chunksize(t)>=MINSIZE &&
221 prev_inuse(t) &&
222 (!contiguous(&main_arena) ||
223 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
224 return 0;
226 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
228 /* Try to set up a new top chunk. */
229 brk = MORECORE(0);
230 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
231 if (front_misalign > 0)
232 front_misalign = MALLOC_ALIGNMENT - front_misalign;
233 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
234 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
235 new_brk = (char*)(MORECORE (sbrk_size));
236 if (new_brk == (char*)(MORECORE_FAILURE))
238 MALLOC_FAILURE_ACTION;
239 return -1;
241 /* Call the `morecore' hook if necessary. */
242 if (__after_morecore_hook)
243 (*__after_morecore_hook) ();
244 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
246 top(&main_arena) = (mchunkptr)(brk + front_misalign);
247 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
249 return 0;
252 static Void_t*
253 #if __STD_C
254 malloc_check(size_t sz, const Void_t *caller)
255 #else
256 malloc_check(sz, caller) size_t sz; const Void_t *caller;
257 #endif
259 Void_t *victim;
261 if (sz+1 == 0) {
262 MALLOC_FAILURE_ACTION;
263 return NULL;
266 (void)mutex_lock(&main_arena.mutex);
267 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
268 (void)mutex_unlock(&main_arena.mutex);
269 return mem2mem_check(victim, sz);
272 static void
273 #if __STD_C
274 free_check(Void_t* mem, const Void_t *caller)
275 #else
276 free_check(mem, caller) Void_t* mem; const Void_t *caller;
277 #endif
279 mchunkptr p;
281 if(!mem) return;
282 (void)mutex_lock(&main_arena.mutex);
283 p = mem2chunk_check(mem, NULL);
284 if(!p) {
285 (void)mutex_unlock(&main_arena.mutex);
287 malloc_printerr(check_action, "free(): invalid pointer", mem);
288 return;
290 #if HAVE_MMAP
291 if (chunk_is_mmapped(p)) {
292 (void)mutex_unlock(&main_arena.mutex);
293 munmap_chunk(p);
294 return;
296 #endif
297 #if 0 /* Erase freed memory. */
298 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
299 #endif
300 _int_free(&main_arena, mem);
301 (void)mutex_unlock(&main_arena.mutex);
304 static Void_t*
305 #if __STD_C
306 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
307 #else
308 realloc_check(oldmem, bytes, caller)
309 Void_t* oldmem; size_t bytes; const Void_t *caller;
310 #endif
312 mchunkptr oldp;
313 INTERNAL_SIZE_T nb, oldsize;
314 Void_t* newmem = 0;
315 unsigned char *magic_p;
317 if (bytes+1 == 0) {
318 MALLOC_FAILURE_ACTION;
319 return NULL;
321 if (oldmem == 0) return malloc_check(bytes, NULL);
322 if (bytes == 0) {
323 free_check (oldmem, NULL);
324 return NULL;
326 (void)mutex_lock(&main_arena.mutex);
327 oldp = mem2chunk_check(oldmem, &magic_p);
328 (void)mutex_unlock(&main_arena.mutex);
329 if(!oldp) {
330 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
331 return malloc_check(bytes, NULL);
333 oldsize = chunksize(oldp);
335 checked_request2size(bytes+1, nb);
336 (void)mutex_lock(&main_arena.mutex);
338 #if HAVE_MMAP
339 if (chunk_is_mmapped(oldp)) {
340 #if HAVE_MREMAP
341 mchunkptr newp = mremap_chunk(oldp, nb);
342 if(newp)
343 newmem = chunk2mem(newp);
344 else
345 #endif
347 /* Note the extra SIZE_SZ overhead. */
348 if(oldsize - SIZE_SZ >= nb)
349 newmem = oldmem; /* do nothing */
350 else {
351 /* Must alloc, copy, free. */
352 if (top_check() >= 0)
353 newmem = _int_malloc(&main_arena, bytes+1);
354 if (newmem) {
355 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
356 munmap_chunk(oldp);
360 } else {
361 #endif /* HAVE_MMAP */
362 if (top_check() >= 0)
363 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
364 #if 0 /* Erase freed memory. */
365 if(newmem)
366 newp = mem2chunk(newmem);
367 nb = chunksize(newp);
368 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
369 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
370 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
371 } else if(nb > oldsize+SIZE_SZ) {
372 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
373 0, nb - (oldsize+SIZE_SZ));
375 #endif
376 #if HAVE_MMAP
378 #endif
380 /* mem2chunk_check changed the magic byte in the old chunk.
381 If newmem is NULL, then the old chunk will still be used though,
382 so we need to invert that change here. */
383 if (newmem == NULL) *magic_p ^= 0xFF;
385 (void)mutex_unlock(&main_arena.mutex);
387 return mem2mem_check(newmem, bytes);
390 static Void_t*
391 #if __STD_C
392 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
393 #else
394 memalign_check(alignment, bytes, caller)
395 size_t alignment; size_t bytes; const Void_t *caller;
396 #endif
398 INTERNAL_SIZE_T nb;
399 Void_t* mem;
401 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
402 if (alignment < MINSIZE) alignment = MINSIZE;
404 if (bytes+1 == 0) {
405 MALLOC_FAILURE_ACTION;
406 return NULL;
408 checked_request2size(bytes+1, nb);
409 (void)mutex_lock(&main_arena.mutex);
410 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
411 NULL;
412 (void)mutex_unlock(&main_arena.mutex);
413 return mem2mem_check(mem, bytes);
416 #ifndef NO_THREADS
418 # ifdef _LIBC
419 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
420 /* These routines are never needed in this configuration. */
421 # define NO_STARTER
422 # endif
423 # endif
425 # ifdef NO_STARTER
426 # undef NO_STARTER
427 # else
429 /* The following hooks are used when the global initialization in
430 ptmalloc_init() hasn't completed yet. */
432 static Void_t*
433 #if __STD_C
434 malloc_starter(size_t sz, const Void_t *caller)
435 #else
436 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
437 #endif
439 Void_t* victim;
441 victim = _int_malloc(&main_arena, sz);
443 return victim ? BOUNDED_N(victim, sz) : 0;
446 static Void_t*
447 #if __STD_C
448 memalign_starter(size_t align, size_t sz, const Void_t *caller)
449 #else
450 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
451 #endif
453 Void_t* victim;
455 victim = _int_memalign(&main_arena, align, sz);
457 return victim ? BOUNDED_N(victim, sz) : 0;
460 static void
461 #if __STD_C
462 free_starter(Void_t* mem, const Void_t *caller)
463 #else
464 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
465 #endif
467 mchunkptr p;
469 if(!mem) return;
470 p = mem2chunk(mem);
471 #if HAVE_MMAP
472 if (chunk_is_mmapped(p)) {
473 munmap_chunk(p);
474 return;
476 #endif
477 _int_free(&main_arena, mem);
480 # endif /* !defiend NO_STARTER */
481 #endif /* NO_THREADS */
484 /* Get/set state: malloc_get_state() records the current state of all
485 malloc variables (_except_ for the actual heap contents and `hook'
486 function pointers) in a system dependent, opaque data structure.
487 This data structure is dynamically allocated and can be free()d
488 after use. malloc_set_state() restores the state of all malloc
489 variables to the previously obtained state. This is especially
490 useful when using this malloc as part of a shared library, and when
491 the heap contents are saved/restored via some other method. The
492 primary example for this is GNU Emacs with its `dumping' procedure.
493 `Hook' function pointers are never saved or restored by these
494 functions, with two exceptions: If malloc checking was in use when
495 malloc_get_state() was called, then malloc_set_state() calls
496 __malloc_check_init() if possible; if malloc checking was not in
497 use in the recorded state but the user requested malloc checking,
498 then the hooks are reset to 0. */
500 #define MALLOC_STATE_MAGIC 0x444c4541l
501 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
503 struct malloc_save_state {
504 long magic;
505 long version;
506 mbinptr av[NBINS * 2 + 2];
507 char* sbrk_base;
508 int sbrked_mem_bytes;
509 unsigned long trim_threshold;
510 unsigned long top_pad;
511 unsigned int n_mmaps_max;
512 unsigned long mmap_threshold;
513 int check_action;
514 unsigned long max_sbrked_mem;
515 unsigned long max_total_mem;
516 unsigned int n_mmaps;
517 unsigned int max_n_mmaps;
518 unsigned long mmapped_mem;
519 unsigned long max_mmapped_mem;
520 int using_malloc_checking;
523 Void_t*
524 public_gET_STATe(void)
526 struct malloc_save_state* ms;
527 int i;
528 mbinptr b;
530 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
531 if (!ms)
532 return 0;
533 (void)mutex_lock(&main_arena.mutex);
534 malloc_consolidate(&main_arena);
535 ms->magic = MALLOC_STATE_MAGIC;
536 ms->version = MALLOC_STATE_VERSION;
537 ms->av[0] = 0;
538 ms->av[1] = 0; /* used to be binblocks, now no longer used */
539 ms->av[2] = top(&main_arena);
540 ms->av[3] = 0; /* used to be undefined */
541 for(i=1; i<NBINS; i++) {
542 b = bin_at(&main_arena, i);
543 if(first(b) == b)
544 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
545 else {
546 ms->av[2*i+2] = first(b);
547 ms->av[2*i+3] = last(b);
550 ms->sbrk_base = mp_.sbrk_base;
551 ms->sbrked_mem_bytes = main_arena.system_mem;
552 ms->trim_threshold = mp_.trim_threshold;
553 ms->top_pad = mp_.top_pad;
554 ms->n_mmaps_max = mp_.n_mmaps_max;
555 ms->mmap_threshold = mp_.mmap_threshold;
556 ms->check_action = check_action;
557 ms->max_sbrked_mem = main_arena.max_system_mem;
558 #ifdef NO_THREADS
559 ms->max_total_mem = mp_.max_total_mem;
560 #else
561 ms->max_total_mem = 0;
562 #endif
563 ms->n_mmaps = mp_.n_mmaps;
564 ms->max_n_mmaps = mp_.max_n_mmaps;
565 ms->mmapped_mem = mp_.mmapped_mem;
566 ms->max_mmapped_mem = mp_.max_mmapped_mem;
567 ms->using_malloc_checking = using_malloc_checking;
568 (void)mutex_unlock(&main_arena.mutex);
569 return (Void_t*)ms;
573 public_sET_STATe(Void_t* msptr)
575 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
576 size_t i;
577 mbinptr b;
579 disallow_malloc_check = 1;
580 ptmalloc_init();
581 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
582 /* Must fail if the major version is too high. */
583 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
584 (void)mutex_lock(&main_arena.mutex);
585 /* There are no fastchunks. */
586 clear_fastchunks(&main_arena);
587 set_max_fast(DEFAULT_MXFAST);
588 for (i=0; i<NFASTBINS; ++i)
589 main_arena.fastbins[i] = 0;
590 for (i=0; i<BINMAPSIZE; ++i)
591 main_arena.binmap[i] = 0;
592 top(&main_arena) = ms->av[2];
593 main_arena.last_remainder = 0;
594 for(i=1; i<NBINS; i++) {
595 b = bin_at(&main_arena, i);
596 if(ms->av[2*i+2] == 0) {
597 assert(ms->av[2*i+3] == 0);
598 first(b) = last(b) = b;
599 } else {
600 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
601 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
602 first(b) = ms->av[2*i+2];
603 last(b) = ms->av[2*i+3];
604 /* Make sure the links to the bins within the heap are correct. */
605 first(b)->bk = b;
606 last(b)->fd = b;
607 /* Set bit in binblocks. */
608 mark_bin(&main_arena, i);
609 } else {
610 /* Oops, index computation from chunksize must have changed.
611 Link the whole list into unsorted_chunks. */
612 first(b) = last(b) = b;
613 b = unsorted_chunks(&main_arena);
614 ms->av[2*i+2]->bk = b;
615 ms->av[2*i+3]->fd = b->fd;
616 b->fd->bk = ms->av[2*i+3];
617 b->fd = ms->av[2*i+2];
621 mp_.sbrk_base = ms->sbrk_base;
622 main_arena.system_mem = ms->sbrked_mem_bytes;
623 mp_.trim_threshold = ms->trim_threshold;
624 mp_.top_pad = ms->top_pad;
625 mp_.n_mmaps_max = ms->n_mmaps_max;
626 mp_.mmap_threshold = ms->mmap_threshold;
627 check_action = ms->check_action;
628 main_arena.max_system_mem = ms->max_sbrked_mem;
629 #ifdef NO_THREADS
630 mp_.max_total_mem = ms->max_total_mem;
631 #endif
632 mp_.n_mmaps = ms->n_mmaps;
633 mp_.max_n_mmaps = ms->max_n_mmaps;
634 mp_.mmapped_mem = ms->mmapped_mem;
635 mp_.max_mmapped_mem = ms->max_mmapped_mem;
636 /* add version-dependent code here */
637 if (ms->version >= 1) {
638 /* Check whether it is safe to enable malloc checking, or whether
639 it is necessary to disable it. */
640 if (ms->using_malloc_checking && !using_malloc_checking &&
641 !disallow_malloc_check)
642 __malloc_check_init ();
643 else if (!ms->using_malloc_checking && using_malloc_checking) {
644 __malloc_hook = 0;
645 __free_hook = 0;
646 __realloc_hook = 0;
647 __memalign_hook = 0;
648 using_malloc_checking = 0;
651 check_malloc_state(&main_arena);
653 (void)mutex_unlock(&main_arena.mutex);
654 return 0;
658 * Local variables:
659 * c-basic-offset: 2
660 * End: