1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 /* What to do if the standard debugging hooks are in place and a
24 corrupt pointer is detected: do nothing (0), print an error message
25 (1), or call abort() (2). */
27 /* Hooks for debugging versions. The initial hooks just call the
28 initialization routine, then do the normal work. */
32 malloc_hook_ini(size_t sz
, const __malloc_ptr_t caller
)
34 malloc_hook_ini(sz
, caller
)
35 size_t sz
; const __malloc_ptr_t caller
;
40 return public_mALLOc(sz
);
45 realloc_hook_ini(Void_t
* ptr
, size_t sz
, const __malloc_ptr_t caller
)
47 realloc_hook_ini(ptr
, sz
, caller
)
48 Void_t
* ptr
; size_t sz
; const __malloc_ptr_t caller
;
52 __realloc_hook
= NULL
;
54 return public_rEALLOc(ptr
, sz
);
59 memalign_hook_ini(size_t alignment
, size_t sz
, const __malloc_ptr_t caller
)
61 memalign_hook_ini(alignment
, sz
, caller
)
62 size_t alignment
; size_t sz
; const __malloc_ptr_t caller
;
65 __memalign_hook
= NULL
;
67 return public_mEMALIGn(alignment
, sz
);
70 /* Whether we are using malloc checking. */
71 static int using_malloc_checking
;
73 /* A flag that is set by malloc_set_state, to signal that malloc checking
74 must not be enabled on the request from the user (via the MALLOC_CHECK_
75 environment variable). It is reset by __malloc_check_init to tell
76 malloc_set_state that the user has requested malloc checking.
78 The purpose of this flag is to make sure that malloc checking is not
79 enabled when the heap to be restored was constructed without malloc
80 checking, and thus does not contain the required magic bytes.
81 Otherwise the heap would be corrupted by calls to free and realloc. If
82 it turns out that the heap was created with malloc checking and the
83 user has requested it malloc_set_state just calls __malloc_check_init
84 again to enable it. On the other hand, reusing such a heap without
85 further malloc checking is safe. */
86 static int disallow_malloc_check
;
88 /* Activate a standard set of debugging hooks. */
92 if (disallow_malloc_check
) {
93 disallow_malloc_check
= 0;
96 using_malloc_checking
= 1;
97 __malloc_hook
= malloc_check
;
98 __free_hook
= free_check
;
99 __realloc_hook
= realloc_check
;
100 __memalign_hook
= memalign_check
;
102 malloc_printerr (5, "malloc: using debugging hooks", NULL
);
105 /* A simple, standard set of debugging hooks. Overhead is `only' one
106 byte per chunk; still this will catch most cases of double frees or
107 overruns. The goal here is to avoid obscure crashes due to invalid
108 usage, unlike in the MALLOC_DEBUG code. */
110 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
112 /* Instrument a chunk with overrun detector byte(s) and convert it
113 into a user pointer with requested size sz. */
118 mem2mem_check(Void_t
*ptr
, size_t sz
)
120 mem2mem_check(ptr
, sz
) Void_t
*ptr
; size_t sz
;
124 unsigned char* m_ptr
= (unsigned char*)BOUNDED_N(ptr
, sz
);
130 for(i
= chunksize(p
) - (chunk_is_mmapped(p
) ? 2*SIZE_SZ
+1 : SIZE_SZ
+1);
134 m_ptr
[i
] = (unsigned char)(i
-sz
);
139 m_ptr
[sz
] = MAGICBYTE(p
);
140 return (Void_t
*)m_ptr
;
143 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
144 pointer. If the provided pointer is not valid, return NULL. */
149 mem2chunk_check(Void_t
* mem
, unsigned char **magic_p
)
151 mem2chunk_check(mem
, magic_p
) Void_t
* mem
; unsigned char **magic_p
;
155 INTERNAL_SIZE_T sz
, c
;
158 if(!aligned_OK(mem
)) return NULL
;
160 if (!chunk_is_mmapped(p
)) {
161 /* Must be a chunk in conventional heap memory. */
162 int contig
= contiguous(&main_arena
);
165 ((char*)p
<mp_
.sbrk_base
||
166 ((char*)p
+ sz
)>=(mp_
.sbrk_base
+main_arena
.system_mem
) )) ||
167 sz
<MINSIZE
|| sz
&MALLOC_ALIGN_MASK
|| !inuse(p
) ||
168 ( !prev_inuse(p
) && (p
->prev_size
&MALLOC_ALIGN_MASK
||
169 (contig
&& (char*)prev_chunk(p
)<mp_
.sbrk_base
) ||
170 next_chunk(prev_chunk(p
))!=p
) ))
172 magic
= MAGICBYTE(p
);
173 for(sz
+= SIZE_SZ
-1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
174 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
177 unsigned long offset
, page_mask
= malloc_getpagesize
-1;
179 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
180 alignment relative to the beginning of a page. Check this
182 offset
= (unsigned long)mem
& page_mask
;
183 if((offset
!=MALLOC_ALIGNMENT
&& offset
!=0 && offset
!=0x10 &&
184 offset
!=0x20 && offset
!=0x40 && offset
!=0x80 && offset
!=0x100 &&
185 offset
!=0x200 && offset
!=0x400 && offset
!=0x800 && offset
!=0x1000 &&
187 !chunk_is_mmapped(p
) || (p
->size
& PREV_INUSE
) ||
188 ( (((unsigned long)p
- p
->prev_size
) & page_mask
) != 0 ) ||
189 ( (sz
= chunksize(p
)), ((p
->prev_size
+ sz
) & page_mask
) != 0 ) )
191 magic
= MAGICBYTE(p
);
192 for(sz
-= 1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
193 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
196 ((unsigned char*)p
)[sz
] ^= 0xFF;
198 *magic_p
= (unsigned char *)p
+ sz
;
202 /* Check for corruption of the top chunk, and try to recover if
213 mchunkptr t
= top(&main_arena
);
214 char* brk
, * new_brk
;
215 INTERNAL_SIZE_T front_misalign
, sbrk_size
;
216 unsigned long pagesz
= malloc_getpagesize
;
218 if (t
== initial_top(&main_arena
) ||
219 (!chunk_is_mmapped(t
) &&
220 chunksize(t
)>=MINSIZE
&&
222 (!contiguous(&main_arena
) ||
223 (char*)t
+ chunksize(t
) == mp_
.sbrk_base
+ main_arena
.system_mem
)))
226 malloc_printerr (check_action
, "malloc: top chunk is corrupt", t
);
228 /* Try to set up a new top chunk. */
230 front_misalign
= (unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
231 if (front_misalign
> 0)
232 front_misalign
= MALLOC_ALIGNMENT
- front_misalign
;
233 sbrk_size
= front_misalign
+ mp_
.top_pad
+ MINSIZE
;
234 sbrk_size
+= pagesz
- ((unsigned long)(brk
+ sbrk_size
) & (pagesz
- 1));
235 new_brk
= (char*)(MORECORE (sbrk_size
));
236 if (new_brk
== (char*)(MORECORE_FAILURE
))
238 MALLOC_FAILURE_ACTION
;
241 /* Call the `morecore' hook if necessary. */
242 if (__after_morecore_hook
)
243 (*__after_morecore_hook
) ();
244 main_arena
.system_mem
= (new_brk
- mp_
.sbrk_base
) + sbrk_size
;
246 top(&main_arena
) = (mchunkptr
)(brk
+ front_misalign
);
247 set_head(top(&main_arena
), (sbrk_size
- front_misalign
) | PREV_INUSE
);
254 malloc_check(size_t sz
, const Void_t
*caller
)
256 malloc_check(sz
, caller
) size_t sz
; const Void_t
*caller
;
262 MALLOC_FAILURE_ACTION
;
266 (void)mutex_lock(&main_arena
.mutex
);
267 victim
= (top_check() >= 0) ? _int_malloc(&main_arena
, sz
+1) : NULL
;
268 (void)mutex_unlock(&main_arena
.mutex
);
269 return mem2mem_check(victim
, sz
);
274 free_check(Void_t
* mem
, const Void_t
*caller
)
276 free_check(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
282 (void)mutex_lock(&main_arena
.mutex
);
283 p
= mem2chunk_check(mem
, NULL
);
285 (void)mutex_unlock(&main_arena
.mutex
);
287 malloc_printerr(check_action
, "free(): invalid pointer", mem
);
291 if (chunk_is_mmapped(p
)) {
292 (void)mutex_unlock(&main_arena
.mutex
);
297 #if 0 /* Erase freed memory. */
298 memset(mem
, 0, chunksize(p
) - (SIZE_SZ
+1));
300 _int_free(&main_arena
, mem
);
301 (void)mutex_unlock(&main_arena
.mutex
);
306 realloc_check(Void_t
* oldmem
, size_t bytes
, const Void_t
*caller
)
308 realloc_check(oldmem
, bytes
, caller
)
309 Void_t
* oldmem
; size_t bytes
; const Void_t
*caller
;
313 INTERNAL_SIZE_T nb
, oldsize
;
315 unsigned char *magic_p
;
318 MALLOC_FAILURE_ACTION
;
321 if (oldmem
== 0) return malloc_check(bytes
, NULL
);
323 free_check (oldmem
, NULL
);
326 (void)mutex_lock(&main_arena
.mutex
);
327 oldp
= mem2chunk_check(oldmem
, &magic_p
);
328 (void)mutex_unlock(&main_arena
.mutex
);
330 malloc_printerr(check_action
, "realloc(): invalid pointer", oldmem
);
331 return malloc_check(bytes
, NULL
);
333 oldsize
= chunksize(oldp
);
335 checked_request2size(bytes
+1, nb
);
336 (void)mutex_lock(&main_arena
.mutex
);
339 if (chunk_is_mmapped(oldp
)) {
341 mchunkptr newp
= mremap_chunk(oldp
, nb
);
343 newmem
= chunk2mem(newp
);
347 /* Note the extra SIZE_SZ overhead. */
348 if(oldsize
- SIZE_SZ
>= nb
)
349 newmem
= oldmem
; /* do nothing */
351 /* Must alloc, copy, free. */
352 if (top_check() >= 0)
353 newmem
= _int_malloc(&main_arena
, bytes
+1);
355 MALLOC_COPY(BOUNDED_N(newmem
, bytes
+1), oldmem
, oldsize
- 2*SIZE_SZ
);
361 #endif /* HAVE_MMAP */
362 if (top_check() >= 0)
363 newmem
= _int_realloc(&main_arena
, oldmem
, bytes
+1);
364 #if 0 /* Erase freed memory. */
366 newp
= mem2chunk(newmem
);
367 nb
= chunksize(newp
);
368 if(oldp
<newp
|| oldp
>=chunk_at_offset(newp
, nb
)) {
369 memset((char*)oldmem
+ 2*sizeof(mbinptr
), 0,
370 oldsize
- (2*sizeof(mbinptr
)+2*SIZE_SZ
+1));
371 } else if(nb
> oldsize
+SIZE_SZ
) {
372 memset((char*)BOUNDED_N(chunk2mem(newp
), bytes
) + oldsize
,
373 0, nb
- (oldsize
+SIZE_SZ
));
380 /* mem2chunk_check changed the magic byte in the old chunk.
381 If newmem is NULL, then the old chunk will still be used though,
382 so we need to invert that change here. */
383 if (newmem
== NULL
) *magic_p
^= 0xFF;
385 (void)mutex_unlock(&main_arena
.mutex
);
387 return mem2mem_check(newmem
, bytes
);
392 memalign_check(size_t alignment
, size_t bytes
, const Void_t
*caller
)
394 memalign_check(alignment
, bytes
, caller
)
395 size_t alignment
; size_t bytes
; const Void_t
*caller
;
401 if (alignment
<= MALLOC_ALIGNMENT
) return malloc_check(bytes
, NULL
);
402 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
405 MALLOC_FAILURE_ACTION
;
408 checked_request2size(bytes
+1, nb
);
409 (void)mutex_lock(&main_arena
.mutex
);
410 mem
= (top_check() >= 0) ? _int_memalign(&main_arena
, alignment
, bytes
+1) :
412 (void)mutex_unlock(&main_arena
.mutex
);
413 return mem2mem_check(mem
, bytes
);
419 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
420 /* These routines are never needed in this configuration. */
429 /* The following hooks are used when the global initialization in
430 ptmalloc_init() hasn't completed yet. */
434 malloc_starter(size_t sz
, const Void_t
*caller
)
436 malloc_starter(sz
, caller
) size_t sz
; const Void_t
*caller
;
441 victim
= _int_malloc(&main_arena
, sz
);
443 return victim
? BOUNDED_N(victim
, sz
) : 0;
448 memalign_starter(size_t align
, size_t sz
, const Void_t
*caller
)
450 memalign_starter(align
, sz
, caller
) size_t align
, sz
; const Void_t
*caller
;
455 victim
= _int_memalign(&main_arena
, align
, sz
);
457 return victim
? BOUNDED_N(victim
, sz
) : 0;
462 free_starter(Void_t
* mem
, const Void_t
*caller
)
464 free_starter(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
472 if (chunk_is_mmapped(p
)) {
477 _int_free(&main_arena
, mem
);
480 # endif /* !defiend NO_STARTER */
481 #endif /* NO_THREADS */
484 /* Get/set state: malloc_get_state() records the current state of all
485 malloc variables (_except_ for the actual heap contents and `hook'
486 function pointers) in a system dependent, opaque data structure.
487 This data structure is dynamically allocated and can be free()d
488 after use. malloc_set_state() restores the state of all malloc
489 variables to the previously obtained state. This is especially
490 useful when using this malloc as part of a shared library, and when
491 the heap contents are saved/restored via some other method. The
492 primary example for this is GNU Emacs with its `dumping' procedure.
493 `Hook' function pointers are never saved or restored by these
494 functions, with two exceptions: If malloc checking was in use when
495 malloc_get_state() was called, then malloc_set_state() calls
496 __malloc_check_init() if possible; if malloc checking was not in
497 use in the recorded state but the user requested malloc checking,
498 then the hooks are reset to 0. */
500 #define MALLOC_STATE_MAGIC 0x444c4541l
501 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
503 struct malloc_save_state
{
506 mbinptr av
[NBINS
* 2 + 2];
508 int sbrked_mem_bytes
;
509 unsigned long trim_threshold
;
510 unsigned long top_pad
;
511 unsigned int n_mmaps_max
;
512 unsigned long mmap_threshold
;
514 unsigned long max_sbrked_mem
;
515 unsigned long max_total_mem
;
516 unsigned int n_mmaps
;
517 unsigned int max_n_mmaps
;
518 unsigned long mmapped_mem
;
519 unsigned long max_mmapped_mem
;
520 int using_malloc_checking
;
524 public_gET_STATe(void)
526 struct malloc_save_state
* ms
;
530 ms
= (struct malloc_save_state
*)public_mALLOc(sizeof(*ms
));
533 (void)mutex_lock(&main_arena
.mutex
);
534 malloc_consolidate(&main_arena
);
535 ms
->magic
= MALLOC_STATE_MAGIC
;
536 ms
->version
= MALLOC_STATE_VERSION
;
538 ms
->av
[1] = 0; /* used to be binblocks, now no longer used */
539 ms
->av
[2] = top(&main_arena
);
540 ms
->av
[3] = 0; /* used to be undefined */
541 for(i
=1; i
<NBINS
; i
++) {
542 b
= bin_at(&main_arena
, i
);
544 ms
->av
[2*i
+2] = ms
->av
[2*i
+3] = 0; /* empty bin */
546 ms
->av
[2*i
+2] = first(b
);
547 ms
->av
[2*i
+3] = last(b
);
550 ms
->sbrk_base
= mp_
.sbrk_base
;
551 ms
->sbrked_mem_bytes
= main_arena
.system_mem
;
552 ms
->trim_threshold
= mp_
.trim_threshold
;
553 ms
->top_pad
= mp_
.top_pad
;
554 ms
->n_mmaps_max
= mp_
.n_mmaps_max
;
555 ms
->mmap_threshold
= mp_
.mmap_threshold
;
556 ms
->check_action
= check_action
;
557 ms
->max_sbrked_mem
= main_arena
.max_system_mem
;
559 ms
->max_total_mem
= mp_
.max_total_mem
;
561 ms
->max_total_mem
= 0;
563 ms
->n_mmaps
= mp_
.n_mmaps
;
564 ms
->max_n_mmaps
= mp_
.max_n_mmaps
;
565 ms
->mmapped_mem
= mp_
.mmapped_mem
;
566 ms
->max_mmapped_mem
= mp_
.max_mmapped_mem
;
567 ms
->using_malloc_checking
= using_malloc_checking
;
568 (void)mutex_unlock(&main_arena
.mutex
);
573 public_sET_STATe(Void_t
* msptr
)
575 struct malloc_save_state
* ms
= (struct malloc_save_state
*)msptr
;
579 disallow_malloc_check
= 1;
581 if(ms
->magic
!= MALLOC_STATE_MAGIC
) return -1;
582 /* Must fail if the major version is too high. */
583 if((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
)) return -2;
584 (void)mutex_lock(&main_arena
.mutex
);
585 /* There are no fastchunks. */
586 clear_fastchunks(&main_arena
);
587 set_max_fast(&main_arena
, DEFAULT_MXFAST
);
588 for (i
=0; i
<NFASTBINS
; ++i
)
589 main_arena
.fastbins
[i
] = 0;
590 for (i
=0; i
<BINMAPSIZE
; ++i
)
591 main_arena
.binmap
[i
] = 0;
592 top(&main_arena
) = ms
->av
[2];
593 main_arena
.last_remainder
= 0;
594 for(i
=1; i
<NBINS
; i
++) {
595 b
= bin_at(&main_arena
, i
);
596 if(ms
->av
[2*i
+2] == 0) {
597 assert(ms
->av
[2*i
+3] == 0);
598 first(b
) = last(b
) = b
;
600 if(i
<NSMALLBINS
|| (largebin_index(chunksize(ms
->av
[2*i
+2]))==i
&&
601 largebin_index(chunksize(ms
->av
[2*i
+3]))==i
)) {
602 first(b
) = ms
->av
[2*i
+2];
603 last(b
) = ms
->av
[2*i
+3];
604 /* Make sure the links to the bins within the heap are correct. */
607 /* Set bit in binblocks. */
608 mark_bin(&main_arena
, i
);
610 /* Oops, index computation from chunksize must have changed.
611 Link the whole list into unsorted_chunks. */
612 first(b
) = last(b
) = b
;
613 b
= unsorted_chunks(&main_arena
);
614 ms
->av
[2*i
+2]->bk
= b
;
615 ms
->av
[2*i
+3]->fd
= b
->fd
;
616 b
->fd
->bk
= ms
->av
[2*i
+3];
617 b
->fd
= ms
->av
[2*i
+2];
621 mp_
.sbrk_base
= ms
->sbrk_base
;
622 main_arena
.system_mem
= ms
->sbrked_mem_bytes
;
623 mp_
.trim_threshold
= ms
->trim_threshold
;
624 mp_
.top_pad
= ms
->top_pad
;
625 mp_
.n_mmaps_max
= ms
->n_mmaps_max
;
626 mp_
.mmap_threshold
= ms
->mmap_threshold
;
627 check_action
= ms
->check_action
;
628 main_arena
.max_system_mem
= ms
->max_sbrked_mem
;
630 mp_
.max_total_mem
= ms
->max_total_mem
;
632 mp_
.n_mmaps
= ms
->n_mmaps
;
633 mp_
.max_n_mmaps
= ms
->max_n_mmaps
;
634 mp_
.mmapped_mem
= ms
->mmapped_mem
;
635 mp_
.max_mmapped_mem
= ms
->max_mmapped_mem
;
636 /* add version-dependent code here */
637 if (ms
->version
>= 1) {
638 /* Check whether it is safe to enable malloc checking, or whether
639 it is necessary to disable it. */
640 if (ms
->using_malloc_checking
&& !using_malloc_checking
&&
641 !disallow_malloc_check
)
642 __malloc_check_init ();
643 else if (!ms
->using_malloc_checking
&& using_malloc_checking
) {
648 using_malloc_checking
= 0;
651 check_malloc_state(&main_arena
);
653 (void)mutex_unlock(&main_arena
.mutex
);