1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 /* What to do if the standard debugging hooks are in place and a
24 corrupt pointer is detected: do nothing (0), print an error message
25 (1), or call abort() (2). */
27 /* Hooks for debugging versions. The initial hooks just call the
28 initialization routine, then do the normal work. */
32 malloc_hook_ini(size_t sz
, const __malloc_ptr_t caller
)
34 malloc_hook_ini(sz
, caller
)
35 size_t sz
; const __malloc_ptr_t caller
;
40 return public_mALLOc(sz
);
45 realloc_hook_ini(Void_t
* ptr
, size_t sz
, const __malloc_ptr_t caller
)
47 realloc_hook_ini(ptr
, sz
, caller
)
48 Void_t
* ptr
; size_t sz
; const __malloc_ptr_t caller
;
52 __realloc_hook
= NULL
;
54 return public_rEALLOc(ptr
, sz
);
59 memalign_hook_ini(size_t alignment
, size_t sz
, const __malloc_ptr_t caller
)
61 memalign_hook_ini(alignment
, sz
, caller
)
62 size_t alignment
; size_t sz
; const __malloc_ptr_t caller
;
65 __memalign_hook
= NULL
;
67 return public_mEMALIGn(alignment
, sz
);
70 /* Whether we are using malloc checking. */
71 static int using_malloc_checking
;
73 /* A flag that is set by malloc_set_state, to signal that malloc checking
74 must not be enabled on the request from the user (via the MALLOC_CHECK_
75 environment variable). It is reset by __malloc_check_init to tell
76 malloc_set_state that the user has requested malloc checking.
78 The purpose of this flag is to make sure that malloc checking is not
79 enabled when the heap to be restored was constructed without malloc
80 checking, and thus does not contain the required magic bytes.
81 Otherwise the heap would be corrupted by calls to free and realloc. If
82 it turns out that the heap was created with malloc checking and the
83 user has requested it malloc_set_state just calls __malloc_check_init
84 again to enable it. On the other hand, reusing such a heap without
85 further malloc checking is safe. */
86 static int disallow_malloc_check
;
88 /* Activate a standard set of debugging hooks. */
92 if (disallow_malloc_check
) {
93 disallow_malloc_check
= 0;
96 using_malloc_checking
= 1;
97 __malloc_hook
= malloc_check
;
98 __free_hook
= free_check
;
99 __realloc_hook
= realloc_check
;
100 __memalign_hook
= memalign_check
;
102 malloc_printerr (5, "malloc: using debugging hooks", NULL
);
105 /* A simple, standard set of debugging hooks. Overhead is `only' one
106 byte per chunk; still this will catch most cases of double frees or
107 overruns. The goal here is to avoid obscure crashes due to invalid
108 usage, unlike in the MALLOC_DEBUG code. */
110 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
112 /* Instrument a chunk with overrun detector byte(s) and convert it
113 into a user pointer with requested size sz. */
118 mem2mem_check(Void_t
*ptr
, size_t sz
)
120 mem2mem_check(ptr
, sz
) Void_t
*ptr
; size_t sz
;
124 unsigned char* m_ptr
= (unsigned char*)BOUNDED_N(ptr
, sz
);
130 for(i
= chunksize(p
) - (chunk_is_mmapped(p
) ? 2*SIZE_SZ
+1 : SIZE_SZ
+1);
134 m_ptr
[i
] = (unsigned char)(i
-sz
);
139 m_ptr
[sz
] = MAGICBYTE(p
);
140 return (Void_t
*)m_ptr
;
143 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
144 pointer. If the provided pointer is not valid, return NULL. */
149 mem2chunk_check(Void_t
* mem
)
151 mem2chunk_check(mem
) Void_t
* mem
;
155 INTERNAL_SIZE_T sz
, c
;
158 if(!aligned_OK(mem
)) return NULL
;
160 if (!chunk_is_mmapped(p
)) {
161 /* Must be a chunk in conventional heap memory. */
162 int contig
= contiguous(&main_arena
);
165 ((char*)p
<mp_
.sbrk_base
||
166 ((char*)p
+ sz
)>=(mp_
.sbrk_base
+main_arena
.system_mem
) )) ||
167 sz
<MINSIZE
|| sz
&MALLOC_ALIGN_MASK
|| !inuse(p
) ||
168 ( !prev_inuse(p
) && (p
->prev_size
&MALLOC_ALIGN_MASK
||
169 (contig
&& (char*)prev_chunk(p
)<mp_
.sbrk_base
) ||
170 next_chunk(prev_chunk(p
))!=p
) ))
172 magic
= MAGICBYTE(p
);
173 for(sz
+= SIZE_SZ
-1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
174 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
176 ((unsigned char*)p
)[sz
] ^= 0xFF;
178 unsigned long offset
, page_mask
= malloc_getpagesize
-1;
180 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
181 alignment relative to the beginning of a page. Check this
183 offset
= (unsigned long)mem
& page_mask
;
184 if((offset
!=MALLOC_ALIGNMENT
&& offset
!=0 && offset
!=0x10 &&
185 offset
!=0x20 && offset
!=0x40 && offset
!=0x80 && offset
!=0x100 &&
186 offset
!=0x200 && offset
!=0x400 && offset
!=0x800 && offset
!=0x1000 &&
188 !chunk_is_mmapped(p
) || (p
->size
& PREV_INUSE
) ||
189 ( (((unsigned long)p
- p
->prev_size
) & page_mask
) != 0 ) ||
190 ( (sz
= chunksize(p
)), ((p
->prev_size
+ sz
) & page_mask
) != 0 ) )
192 magic
= MAGICBYTE(p
);
193 for(sz
-= 1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
194 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
196 ((unsigned char*)p
)[sz
] ^= 0xFF;
201 /* Check for corruption of the top chunk, and try to recover if
212 mchunkptr t
= top(&main_arena
);
213 char* brk
, * new_brk
;
214 INTERNAL_SIZE_T front_misalign
, sbrk_size
;
215 unsigned long pagesz
= malloc_getpagesize
;
217 if (t
== initial_top(&main_arena
) ||
218 (!chunk_is_mmapped(t
) &&
219 chunksize(t
)>=MINSIZE
&&
221 (!contiguous(&main_arena
) ||
222 (char*)t
+ chunksize(t
) == mp_
.sbrk_base
+ main_arena
.system_mem
)))
225 malloc_printerr (check_action
, "malloc: top chunk is corrupt", t
);
227 /* Try to set up a new top chunk. */
229 front_misalign
= (unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
230 if (front_misalign
> 0)
231 front_misalign
= MALLOC_ALIGNMENT
- front_misalign
;
232 sbrk_size
= front_misalign
+ mp_
.top_pad
+ MINSIZE
;
233 sbrk_size
+= pagesz
- ((unsigned long)(brk
+ sbrk_size
) & (pagesz
- 1));
234 new_brk
= (char*)(MORECORE (sbrk_size
));
235 if (new_brk
== (char*)(MORECORE_FAILURE
)) return -1;
236 /* Call the `morecore' hook if necessary. */
237 if (__after_morecore_hook
)
238 (*__after_morecore_hook
) ();
239 main_arena
.system_mem
= (new_brk
- mp_
.sbrk_base
) + sbrk_size
;
241 top(&main_arena
) = (mchunkptr
)(brk
+ front_misalign
);
242 set_head(top(&main_arena
), (sbrk_size
- front_misalign
) | PREV_INUSE
);
249 malloc_check(size_t sz
, const Void_t
*caller
)
251 malloc_check(sz
, caller
) size_t sz
; const Void_t
*caller
;
256 (void)mutex_lock(&main_arena
.mutex
);
257 victim
= (top_check() >= 0) ? _int_malloc(&main_arena
, sz
+1) : NULL
;
258 (void)mutex_unlock(&main_arena
.mutex
);
259 return mem2mem_check(victim
, sz
);
264 free_check(Void_t
* mem
, const Void_t
*caller
)
266 free_check(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
272 (void)mutex_lock(&main_arena
.mutex
);
273 p
= mem2chunk_check(mem
);
275 (void)mutex_unlock(&main_arena
.mutex
);
277 malloc_printerr(check_action
, "free(): invalid pointer", mem
);
281 if (chunk_is_mmapped(p
)) {
282 (void)mutex_unlock(&main_arena
.mutex
);
287 #if 0 /* Erase freed memory. */
288 memset(mem
, 0, chunksize(p
) - (SIZE_SZ
+1));
290 _int_free(&main_arena
, mem
);
291 (void)mutex_unlock(&main_arena
.mutex
);
296 realloc_check(Void_t
* oldmem
, size_t bytes
, const Void_t
*caller
)
298 realloc_check(oldmem
, bytes
, caller
)
299 Void_t
* oldmem
; size_t bytes
; const Void_t
*caller
;
303 INTERNAL_SIZE_T nb
, oldsize
;
306 if (oldmem
== 0) return malloc_check(bytes
, NULL
);
307 (void)mutex_lock(&main_arena
.mutex
);
308 oldp
= mem2chunk_check(oldmem
);
309 (void)mutex_unlock(&main_arena
.mutex
);
311 malloc_printerr(check_action
, "realloc(): invalid pointer", oldmem
);
312 return malloc_check(bytes
, NULL
);
314 oldsize
= chunksize(oldp
);
316 checked_request2size(bytes
+1, nb
);
317 (void)mutex_lock(&main_arena
.mutex
);
320 if (chunk_is_mmapped(oldp
)) {
322 mchunkptr newp
= mremap_chunk(oldp
, nb
);
324 newmem
= chunk2mem(newp
);
328 /* Note the extra SIZE_SZ overhead. */
329 if(oldsize
- SIZE_SZ
>= nb
)
330 newmem
= oldmem
; /* do nothing */
332 /* Must alloc, copy, free. */
333 if (top_check() >= 0)
334 newmem
= _int_malloc(&main_arena
, bytes
+1);
336 MALLOC_COPY(BOUNDED_N(newmem
, bytes
+1), oldmem
, oldsize
- 2*SIZE_SZ
);
342 #endif /* HAVE_MMAP */
343 if (top_check() >= 0)
344 newmem
= _int_realloc(&main_arena
, oldmem
, bytes
+1);
345 #if 0 /* Erase freed memory. */
347 newp
= mem2chunk(newmem
);
348 nb
= chunksize(newp
);
349 if(oldp
<newp
|| oldp
>=chunk_at_offset(newp
, nb
)) {
350 memset((char*)oldmem
+ 2*sizeof(mbinptr
), 0,
351 oldsize
- (2*sizeof(mbinptr
)+2*SIZE_SZ
+1));
352 } else if(nb
> oldsize
+SIZE_SZ
) {
353 memset((char*)BOUNDED_N(chunk2mem(newp
), bytes
) + oldsize
,
354 0, nb
- (oldsize
+SIZE_SZ
));
360 (void)mutex_unlock(&main_arena
.mutex
);
362 return mem2mem_check(newmem
, bytes
);
367 memalign_check(size_t alignment
, size_t bytes
, const Void_t
*caller
)
369 memalign_check(alignment
, bytes
, caller
)
370 size_t alignment
; size_t bytes
; const Void_t
*caller
;
376 if (alignment
<= MALLOC_ALIGNMENT
) return malloc_check(bytes
, NULL
);
377 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
379 checked_request2size(bytes
+1, nb
);
380 (void)mutex_lock(&main_arena
.mutex
);
381 mem
= (top_check() >= 0) ? _int_memalign(&main_arena
, alignment
, bytes
+1) :
383 (void)mutex_unlock(&main_arena
.mutex
);
384 return mem2mem_check(mem
, bytes
);
390 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
391 /* These routines are never needed in this configuration. */
400 /* The following hooks are used when the global initialization in
401 ptmalloc_init() hasn't completed yet. */
405 malloc_starter(size_t sz
, const Void_t
*caller
)
407 malloc_starter(sz
, caller
) size_t sz
; const Void_t
*caller
;
412 victim
= _int_malloc(&main_arena
, sz
);
414 return victim
? BOUNDED_N(victim
, sz
) : 0;
419 memalign_starter(size_t align
, size_t sz
, const Void_t
*caller
)
421 memalign_starter(align
, sz
, caller
) size_t align
, sz
; const Void_t
*caller
;
426 victim
= _int_memalign(&main_arena
, align
, sz
);
428 return victim
? BOUNDED_N(victim
, sz
) : 0;
433 free_starter(Void_t
* mem
, const Void_t
*caller
)
435 free_starter(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
443 if (chunk_is_mmapped(p
)) {
448 _int_free(&main_arena
, mem
);
451 # endif /* !defiend NO_STARTER */
452 #endif /* NO_THREADS */
455 /* Get/set state: malloc_get_state() records the current state of all
456 malloc variables (_except_ for the actual heap contents and `hook'
457 function pointers) in a system dependent, opaque data structure.
458 This data structure is dynamically allocated and can be free()d
459 after use. malloc_set_state() restores the state of all malloc
460 variables to the previously obtained state. This is especially
461 useful when using this malloc as part of a shared library, and when
462 the heap contents are saved/restored via some other method. The
463 primary example for this is GNU Emacs with its `dumping' procedure.
464 `Hook' function pointers are never saved or restored by these
465 functions, with two exceptions: If malloc checking was in use when
466 malloc_get_state() was called, then malloc_set_state() calls
467 __malloc_check_init() if possible; if malloc checking was not in
468 use in the recorded state but the user requested malloc checking,
469 then the hooks are reset to 0. */
471 #define MALLOC_STATE_MAGIC 0x444c4541l
472 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
474 struct malloc_save_state
{
477 mbinptr av
[NBINS
* 2 + 2];
479 int sbrked_mem_bytes
;
480 unsigned long trim_threshold
;
481 unsigned long top_pad
;
482 unsigned int n_mmaps_max
;
483 unsigned long mmap_threshold
;
485 unsigned long max_sbrked_mem
;
486 unsigned long max_total_mem
;
487 unsigned int n_mmaps
;
488 unsigned int max_n_mmaps
;
489 unsigned long mmapped_mem
;
490 unsigned long max_mmapped_mem
;
491 int using_malloc_checking
;
495 public_gET_STATe(void)
497 struct malloc_save_state
* ms
;
501 ms
= (struct malloc_save_state
*)public_mALLOc(sizeof(*ms
));
504 (void)mutex_lock(&main_arena
.mutex
);
505 malloc_consolidate(&main_arena
);
506 ms
->magic
= MALLOC_STATE_MAGIC
;
507 ms
->version
= MALLOC_STATE_VERSION
;
509 ms
->av
[1] = 0; /* used to be binblocks, now no longer used */
510 ms
->av
[2] = top(&main_arena
);
511 ms
->av
[3] = 0; /* used to be undefined */
512 for(i
=1; i
<NBINS
; i
++) {
513 b
= bin_at(&main_arena
, i
);
515 ms
->av
[2*i
+2] = ms
->av
[2*i
+3] = 0; /* empty bin */
517 ms
->av
[2*i
+2] = first(b
);
518 ms
->av
[2*i
+3] = last(b
);
521 ms
->sbrk_base
= mp_
.sbrk_base
;
522 ms
->sbrked_mem_bytes
= main_arena
.system_mem
;
523 ms
->trim_threshold
= mp_
.trim_threshold
;
524 ms
->top_pad
= mp_
.top_pad
;
525 ms
->n_mmaps_max
= mp_
.n_mmaps_max
;
526 ms
->mmap_threshold
= mp_
.mmap_threshold
;
527 ms
->check_action
= check_action
;
528 ms
->max_sbrked_mem
= main_arena
.max_system_mem
;
530 ms
->max_total_mem
= mp_
.max_total_mem
;
532 ms
->max_total_mem
= 0;
534 ms
->n_mmaps
= mp_
.n_mmaps
;
535 ms
->max_n_mmaps
= mp_
.max_n_mmaps
;
536 ms
->mmapped_mem
= mp_
.mmapped_mem
;
537 ms
->max_mmapped_mem
= mp_
.max_mmapped_mem
;
538 ms
->using_malloc_checking
= using_malloc_checking
;
539 (void)mutex_unlock(&main_arena
.mutex
);
544 public_sET_STATe(Void_t
* msptr
)
546 struct malloc_save_state
* ms
= (struct malloc_save_state
*)msptr
;
550 disallow_malloc_check
= 1;
552 if(ms
->magic
!= MALLOC_STATE_MAGIC
) return -1;
553 /* Must fail if the major version is too high. */
554 if((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
)) return -2;
555 (void)mutex_lock(&main_arena
.mutex
);
556 /* There are no fastchunks. */
557 clear_fastchunks(&main_arena
);
558 set_max_fast(&main_arena
, DEFAULT_MXFAST
);
559 for (i
=0; i
<NFASTBINS
; ++i
)
560 main_arena
.fastbins
[i
] = 0;
561 for (i
=0; i
<BINMAPSIZE
; ++i
)
562 main_arena
.binmap
[i
] = 0;
563 top(&main_arena
) = ms
->av
[2];
564 main_arena
.last_remainder
= 0;
565 for(i
=1; i
<NBINS
; i
++) {
566 b
= bin_at(&main_arena
, i
);
567 if(ms
->av
[2*i
+2] == 0) {
568 assert(ms
->av
[2*i
+3] == 0);
569 first(b
) = last(b
) = b
;
571 if(i
<NSMALLBINS
|| (largebin_index(chunksize(ms
->av
[2*i
+2]))==i
&&
572 largebin_index(chunksize(ms
->av
[2*i
+3]))==i
)) {
573 first(b
) = ms
->av
[2*i
+2];
574 last(b
) = ms
->av
[2*i
+3];
575 /* Make sure the links to the bins within the heap are correct. */
578 /* Set bit in binblocks. */
579 mark_bin(&main_arena
, i
);
581 /* Oops, index computation from chunksize must have changed.
582 Link the whole list into unsorted_chunks. */
583 first(b
) = last(b
) = b
;
584 b
= unsorted_chunks(&main_arena
);
585 ms
->av
[2*i
+2]->bk
= b
;
586 ms
->av
[2*i
+3]->fd
= b
->fd
;
587 b
->fd
->bk
= ms
->av
[2*i
+3];
588 b
->fd
= ms
->av
[2*i
+2];
592 mp_
.sbrk_base
= ms
->sbrk_base
;
593 main_arena
.system_mem
= ms
->sbrked_mem_bytes
;
594 mp_
.trim_threshold
= ms
->trim_threshold
;
595 mp_
.top_pad
= ms
->top_pad
;
596 mp_
.n_mmaps_max
= ms
->n_mmaps_max
;
597 mp_
.mmap_threshold
= ms
->mmap_threshold
;
598 check_action
= ms
->check_action
;
599 main_arena
.max_system_mem
= ms
->max_sbrked_mem
;
601 mp_
.max_total_mem
= ms
->max_total_mem
;
603 mp_
.n_mmaps
= ms
->n_mmaps
;
604 mp_
.max_n_mmaps
= ms
->max_n_mmaps
;
605 mp_
.mmapped_mem
= ms
->mmapped_mem
;
606 mp_
.max_mmapped_mem
= ms
->max_mmapped_mem
;
607 /* add version-dependent code here */
608 if (ms
->version
>= 1) {
609 /* Check whether it is safe to enable malloc checking, or whether
610 it is necessary to disable it. */
611 if (ms
->using_malloc_checking
&& !using_malloc_checking
&&
612 !disallow_malloc_check
)
613 __malloc_check_init ();
614 else if (!ms
->using_malloc_checking
&& using_malloc_checking
) {
619 using_malloc_checking
= 0;
622 check_malloc_state(&main_arena
);
624 (void)mutex_unlock(&main_arena
.mutex
);