2003-09-10 Roland McGrath <roland@redhat.com>
[glibc.git] / malloc / hooks.c
blobcf6642c93002d8c3a00af2642185c98f76d8fd9d
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* $Id$ */
23 #ifndef DEFAULT_CHECK_ACTION
24 #define DEFAULT_CHECK_ACTION 1
25 #endif
27 /* What to do if the standard debugging hooks are in place and a
28 corrupt pointer is detected: do nothing (0), print an error message
29 (1), or call abort() (2). */
31 /* Hooks for debugging versions. The initial hooks just call the
32 initialization routine, then do the normal work. */
34 static Void_t*
35 #if __STD_C
36 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
37 #else
38 malloc_hook_ini(sz, caller)
39 size_t sz; const __malloc_ptr_t caller;
40 #endif
42 __malloc_hook = NULL;
43 ptmalloc_init();
44 return public_mALLOc(sz);
47 static Void_t*
48 #if __STD_C
49 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
50 #else
51 realloc_hook_ini(ptr, sz, caller)
52 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
53 #endif
55 __malloc_hook = NULL;
56 __realloc_hook = NULL;
57 ptmalloc_init();
58 return public_rEALLOc(ptr, sz);
61 static Void_t*
62 #if __STD_C
63 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
64 #else
65 memalign_hook_ini(alignment, sz, caller)
66 size_t alignment; size_t sz; const __malloc_ptr_t caller;
67 #endif
69 __memalign_hook = NULL;
70 ptmalloc_init();
71 return public_mEMALIGn(alignment, sz);
75 static int check_action = DEFAULT_CHECK_ACTION;
77 /* Whether we are using malloc checking. */
78 static int using_malloc_checking;
80 /* A flag that is set by malloc_set_state, to signal that malloc checking
81 must not be enabled on the request from the user (via the MALLOC_CHECK_
82 environment variable). It is reset by __malloc_check_init to tell
83 malloc_set_state that the user has requested malloc checking.
85 The purpose of this flag is to make sure that malloc checking is not
86 enabled when the heap to be restored was constructed without malloc
87 checking, and thus does not contain the required magic bytes.
88 Otherwise the heap would be corrupted by calls to free and realloc. If
89 it turns out that the heap was created with malloc checking and the
90 user has requested it malloc_set_state just calls __malloc_check_init
91 again to enable it. On the other hand, reusing such a heap without
92 further malloc checking is safe. */
93 static int disallow_malloc_check;
95 /* Activate a standard set of debugging hooks. */
96 void
97 __malloc_check_init()
99 if (disallow_malloc_check) {
100 disallow_malloc_check = 0;
101 return;
103 using_malloc_checking = 1;
104 __malloc_hook = malloc_check;
105 __free_hook = free_check;
106 __realloc_hook = realloc_check;
107 __memalign_hook = memalign_check;
108 if(check_action & 1)
109 fprintf(stderr, "malloc: using debugging hooks\n");
112 /* A simple, standard set of debugging hooks. Overhead is `only' one
113 byte per chunk; still this will catch most cases of double frees or
114 overruns. The goal here is to avoid obscure crashes due to invalid
115 usage, unlike in the MALLOC_DEBUG code. */
117 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
119 /* Instrument a chunk with overrun detector byte(s) and convert it
120 into a user pointer with requested size sz. */
122 static Void_t*
123 internal_function
124 #if __STD_C
125 mem2mem_check(Void_t *ptr, size_t sz)
126 #else
127 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
128 #endif
130 mchunkptr p;
131 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
132 size_t i;
134 if (!ptr)
135 return ptr;
136 p = mem2chunk(ptr);
137 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
138 i > sz;
139 i -= 0xFF) {
140 if(i-sz < 0x100) {
141 m_ptr[i] = (unsigned char)(i-sz);
142 break;
144 m_ptr[i] = 0xFF;
146 m_ptr[sz] = MAGICBYTE(p);
147 return (Void_t*)m_ptr;
150 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
151 pointer. If the provided pointer is not valid, return NULL. */
153 static mchunkptr
154 internal_function
155 #if __STD_C
156 mem2chunk_check(Void_t* mem)
157 #else
158 mem2chunk_check(mem) Void_t* mem;
159 #endif
161 mchunkptr p;
162 INTERNAL_SIZE_T sz, c;
163 unsigned char magic;
165 if(!aligned_OK(mem)) return NULL;
166 p = mem2chunk(mem);
167 if( (char*)p>=mp_.sbrk_base &&
168 (char*)p<(mp_.sbrk_base+main_arena.system_mem) ) {
169 /* Must be a chunk in conventional heap memory. */
170 if(chunk_is_mmapped(p) ||
171 ( (sz = chunksize(p)),
172 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) ) ||
173 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
174 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
175 (long)prev_chunk(p)<(long)mp_.sbrk_base ||
176 next_chunk(prev_chunk(p))!=p) ))
177 return NULL;
178 magic = MAGICBYTE(p);
179 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
180 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
182 ((unsigned char*)p)[sz] ^= 0xFF;
183 } else {
184 unsigned long offset, page_mask = malloc_getpagesize-1;
186 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
187 alignment relative to the beginning of a page. Check this
188 first. */
189 offset = (unsigned long)mem & page_mask;
190 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
191 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
192 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
193 offset<0x2000) ||
194 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
195 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
196 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
197 return NULL;
198 magic = MAGICBYTE(p);
199 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
200 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
202 ((unsigned char*)p)[sz] ^= 0xFF;
204 return p;
207 /* Check for corruption of the top chunk, and try to recover if
208 necessary. */
210 static int
211 internal_function
212 #if __STD_C
213 top_check(void)
214 #else
215 top_check()
216 #endif
218 mchunkptr t = top(&main_arena);
219 char* brk, * new_brk;
220 INTERNAL_SIZE_T front_misalign, sbrk_size;
221 unsigned long pagesz = malloc_getpagesize;
223 if((char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem ||
224 t == initial_top(&main_arena)) return 0;
226 if(check_action & 1)
227 fprintf(stderr, "malloc: top chunk is corrupt\n");
228 if(check_action & 2)
229 abort();
231 /* Try to set up a new top chunk. */
232 brk = MORECORE(0);
233 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
234 if (front_misalign > 0)
235 front_misalign = MALLOC_ALIGNMENT - front_misalign;
236 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
237 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
238 new_brk = (char*)(MORECORE (sbrk_size));
239 if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
240 /* Call the `morecore' hook if necessary. */
241 if (__after_morecore_hook)
242 (*__after_morecore_hook) ();
243 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
245 top(&main_arena) = (mchunkptr)(brk + front_misalign);
246 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
248 return 0;
251 static Void_t*
252 #if __STD_C
253 malloc_check(size_t sz, const Void_t *caller)
254 #else
255 malloc_check(sz, caller) size_t sz; const Void_t *caller;
256 #endif
258 Void_t *victim;
260 (void)mutex_lock(&main_arena.mutex);
261 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
262 (void)mutex_unlock(&main_arena.mutex);
263 return mem2mem_check(victim, sz);
266 static void
267 #if __STD_C
268 free_check(Void_t* mem, const Void_t *caller)
269 #else
270 free_check(mem, caller) Void_t* mem; const Void_t *caller;
271 #endif
273 mchunkptr p;
275 if(!mem) return;
276 (void)mutex_lock(&main_arena.mutex);
277 p = mem2chunk_check(mem);
278 if(!p) {
279 (void)mutex_unlock(&main_arena.mutex);
280 if(check_action & 1)
281 fprintf(stderr, "free(): invalid pointer %p!\n", mem);
282 if(check_action & 2)
283 abort();
284 return;
286 #if HAVE_MMAP
287 if (chunk_is_mmapped(p)) {
288 (void)mutex_unlock(&main_arena.mutex);
289 munmap_chunk(p);
290 return;
292 #endif
293 #if 0 /* Erase freed memory. */
294 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
295 #endif
296 _int_free(&main_arena, mem);
297 (void)mutex_unlock(&main_arena.mutex);
300 static Void_t*
301 #if __STD_C
302 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
303 #else
304 realloc_check(oldmem, bytes, caller)
305 Void_t* oldmem; size_t bytes; const Void_t *caller;
306 #endif
308 mchunkptr oldp;
309 INTERNAL_SIZE_T nb, oldsize;
310 Void_t* newmem = 0;
312 if (oldmem == 0) return malloc_check(bytes, NULL);
313 (void)mutex_lock(&main_arena.mutex);
314 oldp = mem2chunk_check(oldmem);
315 (void)mutex_unlock(&main_arena.mutex);
316 if(!oldp) {
317 if(check_action & 1)
318 fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
319 if(check_action & 2)
320 abort();
321 return malloc_check(bytes, NULL);
323 oldsize = chunksize(oldp);
325 checked_request2size(bytes+1, nb);
326 (void)mutex_lock(&main_arena.mutex);
328 #if HAVE_MMAP
329 if (chunk_is_mmapped(oldp)) {
330 #if HAVE_MREMAP
331 mchunkptr newp = mremap_chunk(oldp, nb);
332 if(newp)
333 newmem = chunk2mem(newp);
334 else
335 #endif
337 /* Note the extra SIZE_SZ overhead. */
338 if(oldsize - SIZE_SZ >= nb)
339 newmem = oldmem; /* do nothing */
340 else {
341 /* Must alloc, copy, free. */
342 if (top_check() >= 0)
343 newmem = _int_malloc(&main_arena, bytes+1);
344 if (newmem) {
345 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
346 munmap_chunk(oldp);
350 } else {
351 #endif /* HAVE_MMAP */
352 if (top_check() >= 0)
353 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
354 #if 0 /* Erase freed memory. */
355 if(newmem)
356 newp = mem2chunk(newmem);
357 nb = chunksize(newp);
358 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
359 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
360 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
361 } else if(nb > oldsize+SIZE_SZ) {
362 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
363 0, nb - (oldsize+SIZE_SZ));
365 #endif
366 #if HAVE_MMAP
368 #endif
369 (void)mutex_unlock(&main_arena.mutex);
371 return mem2mem_check(newmem, bytes);
374 static Void_t*
375 #if __STD_C
376 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
377 #else
378 memalign_check(alignment, bytes, caller)
379 size_t alignment; size_t bytes; const Void_t *caller;
380 #endif
382 INTERNAL_SIZE_T nb;
383 Void_t* mem;
385 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
386 if (alignment < MINSIZE) alignment = MINSIZE;
388 checked_request2size(bytes+1, nb);
389 (void)mutex_lock(&main_arena.mutex);
390 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
391 NULL;
392 (void)mutex_unlock(&main_arena.mutex);
393 return mem2mem_check(mem, bytes);
396 #ifndef NO_THREADS
398 # ifdef _LIBC
399 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
400 /* These routines are never needed in this configuration. */
401 # define NO_STARTER
402 # endif
403 # endif
405 # ifdef NO_STARTER
406 # undef NO_STARTER
407 # else
409 /* The following hooks are used when the global initialization in
410 ptmalloc_init() hasn't completed yet. */
412 static Void_t*
413 #if __STD_C
414 malloc_starter(size_t sz, const Void_t *caller)
415 #else
416 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
417 #endif
419 Void_t* victim;
421 victim = _int_malloc(&main_arena, sz);
423 return victim ? BOUNDED_N(victim, sz) : 0;
426 static Void_t*
427 #if __STD_C
428 memalign_starter(size_t align, size_t sz, const Void_t *caller)
429 #else
430 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
431 #endif
433 Void_t* victim;
435 victim = _int_memalign(&main_arena, align, sz);
437 return victim ? BOUNDED_N(victim, sz) : 0;
440 static void
441 #if __STD_C
442 free_starter(Void_t* mem, const Void_t *caller)
443 #else
444 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
445 #endif
447 mchunkptr p;
449 if(!mem) return;
450 p = mem2chunk(mem);
451 #if HAVE_MMAP
452 if (chunk_is_mmapped(p)) {
453 munmap_chunk(p);
454 return;
456 #endif
457 _int_free(&main_arena, mem);
460 # endif /* !defiend NO_STARTER */
461 #endif /* NO_THREADS */
464 /* Get/set state: malloc_get_state() records the current state of all
465 malloc variables (_except_ for the actual heap contents and `hook'
466 function pointers) in a system dependent, opaque data structure.
467 This data structure is dynamically allocated and can be free()d
468 after use. malloc_set_state() restores the state of all malloc
469 variables to the previously obtained state. This is especially
470 useful when using this malloc as part of a shared library, and when
471 the heap contents are saved/restored via some other method. The
472 primary example for this is GNU Emacs with its `dumping' procedure.
473 `Hook' function pointers are never saved or restored by these
474 functions, with two exceptions: If malloc checking was in use when
475 malloc_get_state() was called, then malloc_set_state() calls
476 __malloc_check_init() if possible; if malloc checking was not in
477 use in the recorded state but the user requested malloc checking,
478 then the hooks are reset to 0. */
480 #define MALLOC_STATE_MAGIC 0x444c4541l
481 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
483 struct malloc_save_state {
484 long magic;
485 long version;
486 mbinptr av[NBINS * 2 + 2];
487 char* sbrk_base;
488 int sbrked_mem_bytes;
489 unsigned long trim_threshold;
490 unsigned long top_pad;
491 unsigned int n_mmaps_max;
492 unsigned long mmap_threshold;
493 int check_action;
494 unsigned long max_sbrked_mem;
495 unsigned long max_total_mem;
496 unsigned int n_mmaps;
497 unsigned int max_n_mmaps;
498 unsigned long mmapped_mem;
499 unsigned long max_mmapped_mem;
500 int using_malloc_checking;
503 Void_t*
504 public_gET_STATe(void)
506 struct malloc_save_state* ms;
507 int i;
508 mbinptr b;
510 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
511 if (!ms)
512 return 0;
513 (void)mutex_lock(&main_arena.mutex);
514 malloc_consolidate(&main_arena);
515 ms->magic = MALLOC_STATE_MAGIC;
516 ms->version = MALLOC_STATE_VERSION;
517 ms->av[0] = 0;
518 ms->av[1] = 0; /* used to be binblocks, now no longer used */
519 ms->av[2] = top(&main_arena);
520 ms->av[3] = 0; /* used to be undefined */
521 for(i=1; i<NBINS; i++) {
522 b = bin_at(&main_arena, i);
523 if(first(b) == b)
524 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
525 else {
526 ms->av[2*i+2] = first(b);
527 ms->av[2*i+3] = last(b);
530 ms->sbrk_base = mp_.sbrk_base;
531 ms->sbrked_mem_bytes = main_arena.system_mem;
532 ms->trim_threshold = mp_.trim_threshold;
533 ms->top_pad = mp_.top_pad;
534 ms->n_mmaps_max = mp_.n_mmaps_max;
535 ms->mmap_threshold = mp_.mmap_threshold;
536 ms->check_action = check_action;
537 ms->max_sbrked_mem = main_arena.max_system_mem;
538 #ifdef NO_THREADS
539 ms->max_total_mem = mp_.max_total_mem;
540 #else
541 ms->max_total_mem = 0;
542 #endif
543 ms->n_mmaps = mp_.n_mmaps;
544 ms->max_n_mmaps = mp_.max_n_mmaps;
545 ms->mmapped_mem = mp_.mmapped_mem;
546 ms->max_mmapped_mem = mp_.max_mmapped_mem;
547 ms->using_malloc_checking = using_malloc_checking;
548 (void)mutex_unlock(&main_arena.mutex);
549 return (Void_t*)ms;
553 public_sET_STATe(Void_t* msptr)
555 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
556 size_t i;
557 mbinptr b;
559 disallow_malloc_check = 1;
560 ptmalloc_init();
561 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
562 /* Must fail if the major version is too high. */
563 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
564 (void)mutex_lock(&main_arena.mutex);
565 /* There are no fastchunks. */
566 clear_fastchunks(&main_arena);
567 set_max_fast(&main_arena, DEFAULT_MXFAST);
568 for (i=0; i<NFASTBINS; ++i)
569 main_arena.fastbins[i] = 0;
570 for (i=0; i<BINMAPSIZE; ++i)
571 main_arena.binmap[i] = 0;
572 top(&main_arena) = ms->av[2];
573 main_arena.last_remainder = 0;
574 for(i=1; i<NBINS; i++) {
575 b = bin_at(&main_arena, i);
576 if(ms->av[2*i+2] == 0) {
577 assert(ms->av[2*i+3] == 0);
578 first(b) = last(b) = b;
579 } else {
580 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
581 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
582 first(b) = ms->av[2*i+2];
583 last(b) = ms->av[2*i+3];
584 /* Make sure the links to the bins within the heap are correct. */
585 first(b)->bk = b;
586 last(b)->fd = b;
587 /* Set bit in binblocks. */
588 mark_bin(&main_arena, i);
589 } else {
590 /* Oops, index computation from chunksize must have changed.
591 Link the whole list into unsorted_chunks. */
592 first(b) = last(b) = b;
593 b = unsorted_chunks(&main_arena);
594 ms->av[2*i+2]->bk = b;
595 ms->av[2*i+3]->fd = b->fd;
596 b->fd->bk = ms->av[2*i+3];
597 b->fd = ms->av[2*i+2];
601 mp_.sbrk_base = ms->sbrk_base;
602 main_arena.system_mem = ms->sbrked_mem_bytes;
603 mp_.trim_threshold = ms->trim_threshold;
604 mp_.top_pad = ms->top_pad;
605 mp_.n_mmaps_max = ms->n_mmaps_max;
606 mp_.mmap_threshold = ms->mmap_threshold;
607 check_action = ms->check_action;
608 main_arena.max_system_mem = ms->max_sbrked_mem;
609 #ifdef NO_THREADS
610 mp_.max_total_mem = ms->max_total_mem;
611 #endif
612 mp_.n_mmaps = ms->n_mmaps;
613 mp_.max_n_mmaps = ms->max_n_mmaps;
614 mp_.mmapped_mem = ms->mmapped_mem;
615 mp_.max_mmapped_mem = ms->max_mmapped_mem;
616 /* add version-dependent code here */
617 if (ms->version >= 1) {
618 /* Check whether it is safe to enable malloc checking, or whether
619 it is necessary to disable it. */
620 if (ms->using_malloc_checking && !using_malloc_checking &&
621 !disallow_malloc_check)
622 __malloc_check_init ();
623 else if (!ms->using_malloc_checking && using_malloc_checking) {
624 __malloc_hook = 0;
625 __free_hook = 0;
626 __realloc_hook = 0;
627 __memalign_hook = 0;
628 using_malloc_checking = 0;
631 check_malloc_state(&main_arena);
633 (void)mutex_unlock(&main_arena.mutex);
634 return 0;
638 * Local variables:
639 * c-basic-offset: 2
640 * End: