Restore change unintentionally killed during merge.
[glibc.git] / malloc / hooks.c
blob8a94fd0be896b7c308a2999d174382c307bfb154
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* $Id$ */
23 /* What to do if the standard debugging hooks are in place and a
24 corrupt pointer is detected: do nothing (0), print an error message
25 (1), or call abort() (2). */
27 /* Hooks for debugging versions. The initial hooks just call the
28 initialization routine, then do the normal work. */
30 static Void_t*
31 #if __STD_C
32 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
33 #else
34 malloc_hook_ini(sz, caller)
35 size_t sz; const __malloc_ptr_t caller;
36 #endif
38 __malloc_hook = NULL;
39 ptmalloc_init();
40 return public_mALLOc(sz);
43 static Void_t*
44 #if __STD_C
45 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
46 #else
47 realloc_hook_ini(ptr, sz, caller)
48 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
49 #endif
51 __malloc_hook = NULL;
52 __realloc_hook = NULL;
53 ptmalloc_init();
54 return public_rEALLOc(ptr, sz);
57 static Void_t*
58 #if __STD_C
59 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
60 #else
61 memalign_hook_ini(alignment, sz, caller)
62 size_t alignment; size_t sz; const __malloc_ptr_t caller;
63 #endif
65 __memalign_hook = NULL;
66 ptmalloc_init();
67 return public_mEMALIGn(alignment, sz);
70 /* Whether we are using malloc checking. */
71 static int using_malloc_checking;
73 /* A flag that is set by malloc_set_state, to signal that malloc checking
74 must not be enabled on the request from the user (via the MALLOC_CHECK_
75 environment variable). It is reset by __malloc_check_init to tell
76 malloc_set_state that the user has requested malloc checking.
78 The purpose of this flag is to make sure that malloc checking is not
79 enabled when the heap to be restored was constructed without malloc
80 checking, and thus does not contain the required magic bytes.
81 Otherwise the heap would be corrupted by calls to free and realloc. If
82 it turns out that the heap was created with malloc checking and the
83 user has requested it malloc_set_state just calls __malloc_check_init
84 again to enable it. On the other hand, reusing such a heap without
85 further malloc checking is safe. */
86 static int disallow_malloc_check;
88 /* Activate a standard set of debugging hooks. */
89 void
90 __malloc_check_init()
92 if (disallow_malloc_check) {
93 disallow_malloc_check = 0;
94 return;
96 using_malloc_checking = 1;
97 __malloc_hook = malloc_check;
98 __free_hook = free_check;
99 __realloc_hook = realloc_check;
100 __memalign_hook = memalign_check;
101 if(check_action & 1)
102 malloc_printerr (5, "malloc: using debugging hooks", NULL);
105 /* A simple, standard set of debugging hooks. Overhead is `only' one
106 byte per chunk; still this will catch most cases of double frees or
107 overruns. The goal here is to avoid obscure crashes due to invalid
108 usage, unlike in the MALLOC_DEBUG code. */
110 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
112 /* Instrument a chunk with overrun detector byte(s) and convert it
113 into a user pointer with requested size sz. */
115 static Void_t*
116 internal_function
117 #if __STD_C
118 mem2mem_check(Void_t *ptr, size_t sz)
119 #else
120 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
121 #endif
123 mchunkptr p;
124 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
125 size_t i;
127 if (!ptr)
128 return ptr;
129 p = mem2chunk(ptr);
130 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
131 i > sz;
132 i -= 0xFF) {
133 if(i-sz < 0x100) {
134 m_ptr[i] = (unsigned char)(i-sz);
135 break;
137 m_ptr[i] = 0xFF;
139 m_ptr[sz] = MAGICBYTE(p);
140 return (Void_t*)m_ptr;
143 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
144 pointer. If the provided pointer is not valid, return NULL. */
146 static mchunkptr
147 internal_function
148 #if __STD_C
149 mem2chunk_check(Void_t* mem)
150 #else
151 mem2chunk_check(mem) Void_t* mem;
152 #endif
154 mchunkptr p;
155 INTERNAL_SIZE_T sz, c;
156 unsigned char magic;
158 if(!aligned_OK(mem)) return NULL;
159 p = mem2chunk(mem);
160 if( (char*)p>=mp_.sbrk_base &&
161 (char*)p<(mp_.sbrk_base+main_arena.system_mem) ) {
162 /* Must be a chunk in conventional heap memory. */
163 if(chunk_is_mmapped(p) ||
164 ( (sz = chunksize(p)),
165 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) ) ||
166 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
167 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
168 (long)prev_chunk(p)<(long)mp_.sbrk_base ||
169 next_chunk(prev_chunk(p))!=p) ))
170 return NULL;
171 magic = MAGICBYTE(p);
172 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
173 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
175 ((unsigned char*)p)[sz] ^= 0xFF;
176 } else {
177 unsigned long offset, page_mask = malloc_getpagesize-1;
179 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
180 alignment relative to the beginning of a page. Check this
181 first. */
182 offset = (unsigned long)mem & page_mask;
183 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
184 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
185 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
186 offset<0x2000) ||
187 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
188 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
189 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
190 return NULL;
191 magic = MAGICBYTE(p);
192 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
193 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
195 ((unsigned char*)p)[sz] ^= 0xFF;
197 return p;
200 /* Check for corruption of the top chunk, and try to recover if
201 necessary. */
203 static int
204 internal_function
205 #if __STD_C
206 top_check(void)
207 #else
208 top_check()
209 #endif
211 mchunkptr t = top(&main_arena);
212 char* brk, * new_brk;
213 INTERNAL_SIZE_T front_misalign, sbrk_size;
214 unsigned long pagesz = malloc_getpagesize;
216 if((char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem ||
217 t == initial_top(&main_arena)) return 0;
219 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
221 /* Try to set up a new top chunk. */
222 brk = MORECORE(0);
223 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
224 if (front_misalign > 0)
225 front_misalign = MALLOC_ALIGNMENT - front_misalign;
226 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
227 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
228 new_brk = (char*)(MORECORE (sbrk_size));
229 if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
230 /* Call the `morecore' hook if necessary. */
231 if (__after_morecore_hook)
232 (*__after_morecore_hook) ();
233 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
235 top(&main_arena) = (mchunkptr)(brk + front_misalign);
236 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
238 return 0;
241 static Void_t*
242 #if __STD_C
243 malloc_check(size_t sz, const Void_t *caller)
244 #else
245 malloc_check(sz, caller) size_t sz; const Void_t *caller;
246 #endif
248 Void_t *victim;
250 (void)mutex_lock(&main_arena.mutex);
251 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
252 (void)mutex_unlock(&main_arena.mutex);
253 return mem2mem_check(victim, sz);
256 static void
257 #if __STD_C
258 free_check(Void_t* mem, const Void_t *caller)
259 #else
260 free_check(mem, caller) Void_t* mem; const Void_t *caller;
261 #endif
263 mchunkptr p;
265 if(!mem) return;
266 (void)mutex_lock(&main_arena.mutex);
267 p = mem2chunk_check(mem);
268 if(!p) {
269 (void)mutex_unlock(&main_arena.mutex);
271 malloc_printerr(check_action, "free(): invalid pointer", mem);
272 return;
274 #if HAVE_MMAP
275 if (chunk_is_mmapped(p)) {
276 (void)mutex_unlock(&main_arena.mutex);
277 munmap_chunk(p);
278 return;
280 #endif
281 #if 0 /* Erase freed memory. */
282 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
283 #endif
284 _int_free(&main_arena, mem);
285 (void)mutex_unlock(&main_arena.mutex);
288 static Void_t*
289 #if __STD_C
290 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
291 #else
292 realloc_check(oldmem, bytes, caller)
293 Void_t* oldmem; size_t bytes; const Void_t *caller;
294 #endif
296 mchunkptr oldp;
297 INTERNAL_SIZE_T nb, oldsize;
298 Void_t* newmem = 0;
300 if (oldmem == 0) return malloc_check(bytes, NULL);
301 (void)mutex_lock(&main_arena.mutex);
302 oldp = mem2chunk_check(oldmem);
303 (void)mutex_unlock(&main_arena.mutex);
304 if(!oldp) {
305 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
306 return malloc_check(bytes, NULL);
308 oldsize = chunksize(oldp);
310 checked_request2size(bytes+1, nb);
311 (void)mutex_lock(&main_arena.mutex);
313 #if HAVE_MMAP
314 if (chunk_is_mmapped(oldp)) {
315 #if HAVE_MREMAP
316 mchunkptr newp = mremap_chunk(oldp, nb);
317 if(newp)
318 newmem = chunk2mem(newp);
319 else
320 #endif
322 /* Note the extra SIZE_SZ overhead. */
323 if(oldsize - SIZE_SZ >= nb)
324 newmem = oldmem; /* do nothing */
325 else {
326 /* Must alloc, copy, free. */
327 if (top_check() >= 0)
328 newmem = _int_malloc(&main_arena, bytes+1);
329 if (newmem) {
330 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
331 munmap_chunk(oldp);
335 } else {
336 #endif /* HAVE_MMAP */
337 if (top_check() >= 0)
338 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
339 #if 0 /* Erase freed memory. */
340 if(newmem)
341 newp = mem2chunk(newmem);
342 nb = chunksize(newp);
343 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
344 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
345 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
346 } else if(nb > oldsize+SIZE_SZ) {
347 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
348 0, nb - (oldsize+SIZE_SZ));
350 #endif
351 #if HAVE_MMAP
353 #endif
354 (void)mutex_unlock(&main_arena.mutex);
356 return mem2mem_check(newmem, bytes);
359 static Void_t*
360 #if __STD_C
361 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
362 #else
363 memalign_check(alignment, bytes, caller)
364 size_t alignment; size_t bytes; const Void_t *caller;
365 #endif
367 INTERNAL_SIZE_T nb;
368 Void_t* mem;
370 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
371 if (alignment < MINSIZE) alignment = MINSIZE;
373 checked_request2size(bytes+1, nb);
374 (void)mutex_lock(&main_arena.mutex);
375 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
376 NULL;
377 (void)mutex_unlock(&main_arena.mutex);
378 return mem2mem_check(mem, bytes);
381 #ifndef NO_THREADS
383 # ifdef _LIBC
384 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
385 /* These routines are never needed in this configuration. */
386 # define NO_STARTER
387 # endif
388 # endif
390 # ifdef NO_STARTER
391 # undef NO_STARTER
392 # else
394 /* The following hooks are used when the global initialization in
395 ptmalloc_init() hasn't completed yet. */
397 static Void_t*
398 #if __STD_C
399 malloc_starter(size_t sz, const Void_t *caller)
400 #else
401 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
402 #endif
404 Void_t* victim;
406 victim = _int_malloc(&main_arena, sz);
408 return victim ? BOUNDED_N(victim, sz) : 0;
411 static Void_t*
412 #if __STD_C
413 memalign_starter(size_t align, size_t sz, const Void_t *caller)
414 #else
415 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
416 #endif
418 Void_t* victim;
420 victim = _int_memalign(&main_arena, align, sz);
422 return victim ? BOUNDED_N(victim, sz) : 0;
425 static void
426 #if __STD_C
427 free_starter(Void_t* mem, const Void_t *caller)
428 #else
429 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
430 #endif
432 mchunkptr p;
434 if(!mem) return;
435 p = mem2chunk(mem);
436 #if HAVE_MMAP
437 if (chunk_is_mmapped(p)) {
438 munmap_chunk(p);
439 return;
441 #endif
442 _int_free(&main_arena, mem);
445 # endif /* !defiend NO_STARTER */
446 #endif /* NO_THREADS */
449 /* Get/set state: malloc_get_state() records the current state of all
450 malloc variables (_except_ for the actual heap contents and `hook'
451 function pointers) in a system dependent, opaque data structure.
452 This data structure is dynamically allocated and can be free()d
453 after use. malloc_set_state() restores the state of all malloc
454 variables to the previously obtained state. This is especially
455 useful when using this malloc as part of a shared library, and when
456 the heap contents are saved/restored via some other method. The
457 primary example for this is GNU Emacs with its `dumping' procedure.
458 `Hook' function pointers are never saved or restored by these
459 functions, with two exceptions: If malloc checking was in use when
460 malloc_get_state() was called, then malloc_set_state() calls
461 __malloc_check_init() if possible; if malloc checking was not in
462 use in the recorded state but the user requested malloc checking,
463 then the hooks are reset to 0. */
465 #define MALLOC_STATE_MAGIC 0x444c4541l
466 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
468 struct malloc_save_state {
469 long magic;
470 long version;
471 mbinptr av[NBINS * 2 + 2];
472 char* sbrk_base;
473 int sbrked_mem_bytes;
474 unsigned long trim_threshold;
475 unsigned long top_pad;
476 unsigned int n_mmaps_max;
477 unsigned long mmap_threshold;
478 int check_action;
479 unsigned long max_sbrked_mem;
480 unsigned long max_total_mem;
481 unsigned int n_mmaps;
482 unsigned int max_n_mmaps;
483 unsigned long mmapped_mem;
484 unsigned long max_mmapped_mem;
485 int using_malloc_checking;
488 Void_t*
489 public_gET_STATe(void)
491 struct malloc_save_state* ms;
492 int i;
493 mbinptr b;
495 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
496 if (!ms)
497 return 0;
498 (void)mutex_lock(&main_arena.mutex);
499 malloc_consolidate(&main_arena);
500 ms->magic = MALLOC_STATE_MAGIC;
501 ms->version = MALLOC_STATE_VERSION;
502 ms->av[0] = 0;
503 ms->av[1] = 0; /* used to be binblocks, now no longer used */
504 ms->av[2] = top(&main_arena);
505 ms->av[3] = 0; /* used to be undefined */
506 for(i=1; i<NBINS; i++) {
507 b = bin_at(&main_arena, i);
508 if(first(b) == b)
509 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
510 else {
511 ms->av[2*i+2] = first(b);
512 ms->av[2*i+3] = last(b);
515 ms->sbrk_base = mp_.sbrk_base;
516 ms->sbrked_mem_bytes = main_arena.system_mem;
517 ms->trim_threshold = mp_.trim_threshold;
518 ms->top_pad = mp_.top_pad;
519 ms->n_mmaps_max = mp_.n_mmaps_max;
520 ms->mmap_threshold = mp_.mmap_threshold;
521 ms->check_action = check_action;
522 ms->max_sbrked_mem = main_arena.max_system_mem;
523 #ifdef NO_THREADS
524 ms->max_total_mem = mp_.max_total_mem;
525 #else
526 ms->max_total_mem = 0;
527 #endif
528 ms->n_mmaps = mp_.n_mmaps;
529 ms->max_n_mmaps = mp_.max_n_mmaps;
530 ms->mmapped_mem = mp_.mmapped_mem;
531 ms->max_mmapped_mem = mp_.max_mmapped_mem;
532 ms->using_malloc_checking = using_malloc_checking;
533 (void)mutex_unlock(&main_arena.mutex);
534 return (Void_t*)ms;
538 public_sET_STATe(Void_t* msptr)
540 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
541 size_t i;
542 mbinptr b;
544 disallow_malloc_check = 1;
545 ptmalloc_init();
546 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
547 /* Must fail if the major version is too high. */
548 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
549 (void)mutex_lock(&main_arena.mutex);
550 /* There are no fastchunks. */
551 clear_fastchunks(&main_arena);
552 set_max_fast(&main_arena, DEFAULT_MXFAST);
553 for (i=0; i<NFASTBINS; ++i)
554 main_arena.fastbins[i] = 0;
555 for (i=0; i<BINMAPSIZE; ++i)
556 main_arena.binmap[i] = 0;
557 top(&main_arena) = ms->av[2];
558 main_arena.last_remainder = 0;
559 for(i=1; i<NBINS; i++) {
560 b = bin_at(&main_arena, i);
561 if(ms->av[2*i+2] == 0) {
562 assert(ms->av[2*i+3] == 0);
563 first(b) = last(b) = b;
564 } else {
565 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
566 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
567 first(b) = ms->av[2*i+2];
568 last(b) = ms->av[2*i+3];
569 /* Make sure the links to the bins within the heap are correct. */
570 first(b)->bk = b;
571 last(b)->fd = b;
572 /* Set bit in binblocks. */
573 mark_bin(&main_arena, i);
574 } else {
575 /* Oops, index computation from chunksize must have changed.
576 Link the whole list into unsorted_chunks. */
577 first(b) = last(b) = b;
578 b = unsorted_chunks(&main_arena);
579 ms->av[2*i+2]->bk = b;
580 ms->av[2*i+3]->fd = b->fd;
581 b->fd->bk = ms->av[2*i+3];
582 b->fd = ms->av[2*i+2];
586 mp_.sbrk_base = ms->sbrk_base;
587 main_arena.system_mem = ms->sbrked_mem_bytes;
588 mp_.trim_threshold = ms->trim_threshold;
589 mp_.top_pad = ms->top_pad;
590 mp_.n_mmaps_max = ms->n_mmaps_max;
591 mp_.mmap_threshold = ms->mmap_threshold;
592 check_action = ms->check_action;
593 main_arena.max_system_mem = ms->max_sbrked_mem;
594 #ifdef NO_THREADS
595 mp_.max_total_mem = ms->max_total_mem;
596 #endif
597 mp_.n_mmaps = ms->n_mmaps;
598 mp_.max_n_mmaps = ms->max_n_mmaps;
599 mp_.mmapped_mem = ms->mmapped_mem;
600 mp_.max_mmapped_mem = ms->max_mmapped_mem;
601 /* add version-dependent code here */
602 if (ms->version >= 1) {
603 /* Check whether it is safe to enable malloc checking, or whether
604 it is necessary to disable it. */
605 if (ms->using_malloc_checking && !using_malloc_checking &&
606 !disallow_malloc_check)
607 __malloc_check_init ();
608 else if (!ms->using_malloc_checking && using_malloc_checking) {
609 __malloc_hook = 0;
610 __free_hook = 0;
611 __realloc_hook = 0;
612 __memalign_hook = 0;
613 using_malloc_checking = 0;
616 check_malloc_state(&main_arena);
618 (void)mutex_unlock(&main_arena.mutex);
619 return 0;
623 * Local variables:
624 * c-basic-offset: 2
625 * End: