1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 /* What to do if the standard debugging hooks are in place and a
24 corrupt pointer is detected: do nothing (0), print an error message
25 (1), or call abort() (2). */
27 /* Hooks for debugging versions. The initial hooks just call the
28 initialization routine, then do the normal work. */
32 malloc_hook_ini(size_t sz
, const __malloc_ptr_t caller
)
34 malloc_hook_ini(sz
, caller
)
35 size_t sz
; const __malloc_ptr_t caller
;
40 return public_mALLOc(sz
);
45 realloc_hook_ini(Void_t
* ptr
, size_t sz
, const __malloc_ptr_t caller
)
47 realloc_hook_ini(ptr
, sz
, caller
)
48 Void_t
* ptr
; size_t sz
; const __malloc_ptr_t caller
;
52 __realloc_hook
= NULL
;
54 return public_rEALLOc(ptr
, sz
);
59 memalign_hook_ini(size_t alignment
, size_t sz
, const __malloc_ptr_t caller
)
61 memalign_hook_ini(alignment
, sz
, caller
)
62 size_t alignment
; size_t sz
; const __malloc_ptr_t caller
;
65 __memalign_hook
= NULL
;
67 return public_mEMALIGn(alignment
, sz
);
70 /* Whether we are using malloc checking. */
71 static int using_malloc_checking
;
73 /* A flag that is set by malloc_set_state, to signal that malloc checking
74 must not be enabled on the request from the user (via the MALLOC_CHECK_
75 environment variable). It is reset by __malloc_check_init to tell
76 malloc_set_state that the user has requested malloc checking.
78 The purpose of this flag is to make sure that malloc checking is not
79 enabled when the heap to be restored was constructed without malloc
80 checking, and thus does not contain the required magic bytes.
81 Otherwise the heap would be corrupted by calls to free and realloc. If
82 it turns out that the heap was created with malloc checking and the
83 user has requested it malloc_set_state just calls __malloc_check_init
84 again to enable it. On the other hand, reusing such a heap without
85 further malloc checking is safe. */
86 static int disallow_malloc_check
;
88 /* Activate a standard set of debugging hooks. */
92 if (disallow_malloc_check
) {
93 disallow_malloc_check
= 0;
96 using_malloc_checking
= 1;
97 __malloc_hook
= malloc_check
;
98 __free_hook
= free_check
;
99 __realloc_hook
= realloc_check
;
100 __memalign_hook
= memalign_check
;
102 malloc_printerr (5, "malloc: using debugging hooks", NULL
);
105 /* A simple, standard set of debugging hooks. Overhead is `only' one
106 byte per chunk; still this will catch most cases of double frees or
107 overruns. The goal here is to avoid obscure crashes due to invalid
108 usage, unlike in the MALLOC_DEBUG code. */
110 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
112 /* Instrument a chunk with overrun detector byte(s) and convert it
113 into a user pointer with requested size sz. */
118 mem2mem_check(Void_t
*ptr
, size_t sz
)
120 mem2mem_check(ptr
, sz
) Void_t
*ptr
; size_t sz
;
124 unsigned char* m_ptr
= (unsigned char*)BOUNDED_N(ptr
, sz
);
130 for(i
= chunksize(p
) - (chunk_is_mmapped(p
) ? 2*SIZE_SZ
+1 : SIZE_SZ
+1);
134 m_ptr
[i
] = (unsigned char)(i
-sz
);
139 m_ptr
[sz
] = MAGICBYTE(p
);
140 return (Void_t
*)m_ptr
;
143 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
144 pointer. If the provided pointer is not valid, return NULL. */
149 mem2chunk_check(Void_t
* mem
)
151 mem2chunk_check(mem
) Void_t
* mem
;
155 INTERNAL_SIZE_T sz
, c
;
158 if(!aligned_OK(mem
)) return NULL
;
160 if( (char*)p
>=mp_
.sbrk_base
&&
161 (char*)p
<(mp_
.sbrk_base
+main_arena
.system_mem
) ) {
162 /* Must be a chunk in conventional heap memory. */
163 if(chunk_is_mmapped(p
) ||
164 ( (sz
= chunksize(p
)),
165 ((char*)p
+ sz
)>=(mp_
.sbrk_base
+main_arena
.system_mem
) ) ||
166 sz
<MINSIZE
|| sz
&MALLOC_ALIGN_MASK
|| !inuse(p
) ||
167 ( !prev_inuse(p
) && (p
->prev_size
&MALLOC_ALIGN_MASK
||
168 (long)prev_chunk(p
)<(long)mp_
.sbrk_base
||
169 next_chunk(prev_chunk(p
))!=p
) ))
171 magic
= MAGICBYTE(p
);
172 for(sz
+= SIZE_SZ
-1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
173 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
175 ((unsigned char*)p
)[sz
] ^= 0xFF;
177 unsigned long offset
, page_mask
= malloc_getpagesize
-1;
179 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
180 alignment relative to the beginning of a page. Check this
182 offset
= (unsigned long)mem
& page_mask
;
183 if((offset
!=MALLOC_ALIGNMENT
&& offset
!=0 && offset
!=0x10 &&
184 offset
!=0x20 && offset
!=0x40 && offset
!=0x80 && offset
!=0x100 &&
185 offset
!=0x200 && offset
!=0x400 && offset
!=0x800 && offset
!=0x1000 &&
187 !chunk_is_mmapped(p
) || (p
->size
& PREV_INUSE
) ||
188 ( (((unsigned long)p
- p
->prev_size
) & page_mask
) != 0 ) ||
189 ( (sz
= chunksize(p
)), ((p
->prev_size
+ sz
) & page_mask
) != 0 ) )
191 magic
= MAGICBYTE(p
);
192 for(sz
-= 1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
193 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
195 ((unsigned char*)p
)[sz
] ^= 0xFF;
200 /* Check for corruption of the top chunk, and try to recover if
211 mchunkptr t
= top(&main_arena
);
212 char* brk
, * new_brk
;
213 INTERNAL_SIZE_T front_misalign
, sbrk_size
;
214 unsigned long pagesz
= malloc_getpagesize
;
216 if((char*)t
+ chunksize(t
) == mp_
.sbrk_base
+ main_arena
.system_mem
||
217 t
== initial_top(&main_arena
)) return 0;
219 malloc_printerr (check_action
, "malloc: top chunk is corrupt", t
);
221 /* Try to set up a new top chunk. */
223 front_misalign
= (unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
224 if (front_misalign
> 0)
225 front_misalign
= MALLOC_ALIGNMENT
- front_misalign
;
226 sbrk_size
= front_misalign
+ mp_
.top_pad
+ MINSIZE
;
227 sbrk_size
+= pagesz
- ((unsigned long)(brk
+ sbrk_size
) & (pagesz
- 1));
228 new_brk
= (char*)(MORECORE (sbrk_size
));
229 if (new_brk
== (char*)(MORECORE_FAILURE
)) return -1;
230 /* Call the `morecore' hook if necessary. */
231 if (__after_morecore_hook
)
232 (*__after_morecore_hook
) ();
233 main_arena
.system_mem
= (new_brk
- mp_
.sbrk_base
) + sbrk_size
;
235 top(&main_arena
) = (mchunkptr
)(brk
+ front_misalign
);
236 set_head(top(&main_arena
), (sbrk_size
- front_misalign
) | PREV_INUSE
);
243 malloc_check(size_t sz
, const Void_t
*caller
)
245 malloc_check(sz
, caller
) size_t sz
; const Void_t
*caller
;
250 (void)mutex_lock(&main_arena
.mutex
);
251 victim
= (top_check() >= 0) ? _int_malloc(&main_arena
, sz
+1) : NULL
;
252 (void)mutex_unlock(&main_arena
.mutex
);
253 return mem2mem_check(victim
, sz
);
258 free_check(Void_t
* mem
, const Void_t
*caller
)
260 free_check(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
266 (void)mutex_lock(&main_arena
.mutex
);
267 p
= mem2chunk_check(mem
);
269 (void)mutex_unlock(&main_arena
.mutex
);
271 malloc_printerr(check_action
, "free(): invalid pointer", mem
);
275 if (chunk_is_mmapped(p
)) {
276 (void)mutex_unlock(&main_arena
.mutex
);
281 #if 0 /* Erase freed memory. */
282 memset(mem
, 0, chunksize(p
) - (SIZE_SZ
+1));
284 _int_free(&main_arena
, mem
);
285 (void)mutex_unlock(&main_arena
.mutex
);
290 realloc_check(Void_t
* oldmem
, size_t bytes
, const Void_t
*caller
)
292 realloc_check(oldmem
, bytes
, caller
)
293 Void_t
* oldmem
; size_t bytes
; const Void_t
*caller
;
297 INTERNAL_SIZE_T nb
, oldsize
;
300 if (oldmem
== 0) return malloc_check(bytes
, NULL
);
301 (void)mutex_lock(&main_arena
.mutex
);
302 oldp
= mem2chunk_check(oldmem
);
303 (void)mutex_unlock(&main_arena
.mutex
);
305 malloc_printerr(check_action
, "realloc(): invalid pointer", oldmem
);
306 return malloc_check(bytes
, NULL
);
308 oldsize
= chunksize(oldp
);
310 checked_request2size(bytes
+1, nb
);
311 (void)mutex_lock(&main_arena
.mutex
);
314 if (chunk_is_mmapped(oldp
)) {
316 mchunkptr newp
= mremap_chunk(oldp
, nb
);
318 newmem
= chunk2mem(newp
);
322 /* Note the extra SIZE_SZ overhead. */
323 if(oldsize
- SIZE_SZ
>= nb
)
324 newmem
= oldmem
; /* do nothing */
326 /* Must alloc, copy, free. */
327 if (top_check() >= 0)
328 newmem
= _int_malloc(&main_arena
, bytes
+1);
330 MALLOC_COPY(BOUNDED_N(newmem
, bytes
+1), oldmem
, oldsize
- 2*SIZE_SZ
);
336 #endif /* HAVE_MMAP */
337 if (top_check() >= 0)
338 newmem
= _int_realloc(&main_arena
, oldmem
, bytes
+1);
339 #if 0 /* Erase freed memory. */
341 newp
= mem2chunk(newmem
);
342 nb
= chunksize(newp
);
343 if(oldp
<newp
|| oldp
>=chunk_at_offset(newp
, nb
)) {
344 memset((char*)oldmem
+ 2*sizeof(mbinptr
), 0,
345 oldsize
- (2*sizeof(mbinptr
)+2*SIZE_SZ
+1));
346 } else if(nb
> oldsize
+SIZE_SZ
) {
347 memset((char*)BOUNDED_N(chunk2mem(newp
), bytes
) + oldsize
,
348 0, nb
- (oldsize
+SIZE_SZ
));
354 (void)mutex_unlock(&main_arena
.mutex
);
356 return mem2mem_check(newmem
, bytes
);
361 memalign_check(size_t alignment
, size_t bytes
, const Void_t
*caller
)
363 memalign_check(alignment
, bytes
, caller
)
364 size_t alignment
; size_t bytes
; const Void_t
*caller
;
370 if (alignment
<= MALLOC_ALIGNMENT
) return malloc_check(bytes
, NULL
);
371 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
373 checked_request2size(bytes
+1, nb
);
374 (void)mutex_lock(&main_arena
.mutex
);
375 mem
= (top_check() >= 0) ? _int_memalign(&main_arena
, alignment
, bytes
+1) :
377 (void)mutex_unlock(&main_arena
.mutex
);
378 return mem2mem_check(mem
, bytes
);
384 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
385 /* These routines are never needed in this configuration. */
394 /* The following hooks are used when the global initialization in
395 ptmalloc_init() hasn't completed yet. */
399 malloc_starter(size_t sz
, const Void_t
*caller
)
401 malloc_starter(sz
, caller
) size_t sz
; const Void_t
*caller
;
406 victim
= _int_malloc(&main_arena
, sz
);
408 return victim
? BOUNDED_N(victim
, sz
) : 0;
413 memalign_starter(size_t align
, size_t sz
, const Void_t
*caller
)
415 memalign_starter(align
, sz
, caller
) size_t align
, sz
; const Void_t
*caller
;
420 victim
= _int_memalign(&main_arena
, align
, sz
);
422 return victim
? BOUNDED_N(victim
, sz
) : 0;
427 free_starter(Void_t
* mem
, const Void_t
*caller
)
429 free_starter(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
437 if (chunk_is_mmapped(p
)) {
442 _int_free(&main_arena
, mem
);
445 # endif /* !defiend NO_STARTER */
446 #endif /* NO_THREADS */
449 /* Get/set state: malloc_get_state() records the current state of all
450 malloc variables (_except_ for the actual heap contents and `hook'
451 function pointers) in a system dependent, opaque data structure.
452 This data structure is dynamically allocated and can be free()d
453 after use. malloc_set_state() restores the state of all malloc
454 variables to the previously obtained state. This is especially
455 useful when using this malloc as part of a shared library, and when
456 the heap contents are saved/restored via some other method. The
457 primary example for this is GNU Emacs with its `dumping' procedure.
458 `Hook' function pointers are never saved or restored by these
459 functions, with two exceptions: If malloc checking was in use when
460 malloc_get_state() was called, then malloc_set_state() calls
461 __malloc_check_init() if possible; if malloc checking was not in
462 use in the recorded state but the user requested malloc checking,
463 then the hooks are reset to 0. */
465 #define MALLOC_STATE_MAGIC 0x444c4541l
466 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
468 struct malloc_save_state
{
471 mbinptr av
[NBINS
* 2 + 2];
473 int sbrked_mem_bytes
;
474 unsigned long trim_threshold
;
475 unsigned long top_pad
;
476 unsigned int n_mmaps_max
;
477 unsigned long mmap_threshold
;
479 unsigned long max_sbrked_mem
;
480 unsigned long max_total_mem
;
481 unsigned int n_mmaps
;
482 unsigned int max_n_mmaps
;
483 unsigned long mmapped_mem
;
484 unsigned long max_mmapped_mem
;
485 int using_malloc_checking
;
489 public_gET_STATe(void)
491 struct malloc_save_state
* ms
;
495 ms
= (struct malloc_save_state
*)public_mALLOc(sizeof(*ms
));
498 (void)mutex_lock(&main_arena
.mutex
);
499 malloc_consolidate(&main_arena
);
500 ms
->magic
= MALLOC_STATE_MAGIC
;
501 ms
->version
= MALLOC_STATE_VERSION
;
503 ms
->av
[1] = 0; /* used to be binblocks, now no longer used */
504 ms
->av
[2] = top(&main_arena
);
505 ms
->av
[3] = 0; /* used to be undefined */
506 for(i
=1; i
<NBINS
; i
++) {
507 b
= bin_at(&main_arena
, i
);
509 ms
->av
[2*i
+2] = ms
->av
[2*i
+3] = 0; /* empty bin */
511 ms
->av
[2*i
+2] = first(b
);
512 ms
->av
[2*i
+3] = last(b
);
515 ms
->sbrk_base
= mp_
.sbrk_base
;
516 ms
->sbrked_mem_bytes
= main_arena
.system_mem
;
517 ms
->trim_threshold
= mp_
.trim_threshold
;
518 ms
->top_pad
= mp_
.top_pad
;
519 ms
->n_mmaps_max
= mp_
.n_mmaps_max
;
520 ms
->mmap_threshold
= mp_
.mmap_threshold
;
521 ms
->check_action
= check_action
;
522 ms
->max_sbrked_mem
= main_arena
.max_system_mem
;
524 ms
->max_total_mem
= mp_
.max_total_mem
;
526 ms
->max_total_mem
= 0;
528 ms
->n_mmaps
= mp_
.n_mmaps
;
529 ms
->max_n_mmaps
= mp_
.max_n_mmaps
;
530 ms
->mmapped_mem
= mp_
.mmapped_mem
;
531 ms
->max_mmapped_mem
= mp_
.max_mmapped_mem
;
532 ms
->using_malloc_checking
= using_malloc_checking
;
533 (void)mutex_unlock(&main_arena
.mutex
);
538 public_sET_STATe(Void_t
* msptr
)
540 struct malloc_save_state
* ms
= (struct malloc_save_state
*)msptr
;
544 disallow_malloc_check
= 1;
546 if(ms
->magic
!= MALLOC_STATE_MAGIC
) return -1;
547 /* Must fail if the major version is too high. */
548 if((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
)) return -2;
549 (void)mutex_lock(&main_arena
.mutex
);
550 /* There are no fastchunks. */
551 clear_fastchunks(&main_arena
);
552 set_max_fast(&main_arena
, DEFAULT_MXFAST
);
553 for (i
=0; i
<NFASTBINS
; ++i
)
554 main_arena
.fastbins
[i
] = 0;
555 for (i
=0; i
<BINMAPSIZE
; ++i
)
556 main_arena
.binmap
[i
] = 0;
557 top(&main_arena
) = ms
->av
[2];
558 main_arena
.last_remainder
= 0;
559 for(i
=1; i
<NBINS
; i
++) {
560 b
= bin_at(&main_arena
, i
);
561 if(ms
->av
[2*i
+2] == 0) {
562 assert(ms
->av
[2*i
+3] == 0);
563 first(b
) = last(b
) = b
;
565 if(i
<NSMALLBINS
|| (largebin_index(chunksize(ms
->av
[2*i
+2]))==i
&&
566 largebin_index(chunksize(ms
->av
[2*i
+3]))==i
)) {
567 first(b
) = ms
->av
[2*i
+2];
568 last(b
) = ms
->av
[2*i
+3];
569 /* Make sure the links to the bins within the heap are correct. */
572 /* Set bit in binblocks. */
573 mark_bin(&main_arena
, i
);
575 /* Oops, index computation from chunksize must have changed.
576 Link the whole list into unsorted_chunks. */
577 first(b
) = last(b
) = b
;
578 b
= unsorted_chunks(&main_arena
);
579 ms
->av
[2*i
+2]->bk
= b
;
580 ms
->av
[2*i
+3]->fd
= b
->fd
;
581 b
->fd
->bk
= ms
->av
[2*i
+3];
582 b
->fd
= ms
->av
[2*i
+2];
586 mp_
.sbrk_base
= ms
->sbrk_base
;
587 main_arena
.system_mem
= ms
->sbrked_mem_bytes
;
588 mp_
.trim_threshold
= ms
->trim_threshold
;
589 mp_
.top_pad
= ms
->top_pad
;
590 mp_
.n_mmaps_max
= ms
->n_mmaps_max
;
591 mp_
.mmap_threshold
= ms
->mmap_threshold
;
592 check_action
= ms
->check_action
;
593 main_arena
.max_system_mem
= ms
->max_sbrked_mem
;
595 mp_
.max_total_mem
= ms
->max_total_mem
;
597 mp_
.n_mmaps
= ms
->n_mmaps
;
598 mp_
.max_n_mmaps
= ms
->max_n_mmaps
;
599 mp_
.mmapped_mem
= ms
->mmapped_mem
;
600 mp_
.max_mmapped_mem
= ms
->max_mmapped_mem
;
601 /* add version-dependent code here */
602 if (ms
->version
>= 1) {
603 /* Check whether it is safe to enable malloc checking, or whether
604 it is necessary to disable it. */
605 if (ms
->using_malloc_checking
&& !using_malloc_checking
&&
606 !disallow_malloc_check
)
607 __malloc_check_init ();
608 else if (!ms
->using_malloc_checking
&& using_malloc_checking
) {
613 using_malloc_checking
= 0;
616 check_malloc_state(&main_arena
);
618 (void)mutex_unlock(&main_arena
.mutex
);