1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001,02 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 #ifndef DEFAULT_CHECK_ACTION
24 #define DEFAULT_CHECK_ACTION 1
27 /* What to do if the standard debugging hooks are in place and a
28 corrupt pointer is detected: do nothing (0), print an error message
29 (1), or call abort() (2). */
31 /* Hooks for debugging versions. The initial hooks just call the
32 initialization routine, then do the normal work. */
36 malloc_hook_ini(size_t sz
, const __malloc_ptr_t caller
)
38 malloc_hook_ini(sz
, caller
)
39 size_t sz
; const __malloc_ptr_t caller
;
44 return public_mALLOc(sz
);
49 realloc_hook_ini(Void_t
* ptr
, size_t sz
, const __malloc_ptr_t caller
)
51 realloc_hook_ini(ptr
, sz
, caller
)
52 Void_t
* ptr
; size_t sz
; const __malloc_ptr_t caller
;
56 __realloc_hook
= NULL
;
58 return public_rEALLOc(ptr
, sz
);
63 memalign_hook_ini(size_t alignment
, size_t sz
, const __malloc_ptr_t caller
)
65 memalign_hook_ini(alignment
, sz
, caller
)
66 size_t alignment
; size_t sz
; const __malloc_ptr_t caller
;
69 __memalign_hook
= NULL
;
71 return public_mEMALIGn(alignment
, sz
);
75 static int check_action
= DEFAULT_CHECK_ACTION
;
77 /* Whether we are using malloc checking. */
78 static int using_malloc_checking
;
80 /* A flag that is set by malloc_set_state, to signal that malloc checking
81 must not be enabled on the request from the user (via the MALLOC_CHECK_
82 environment variable). It is reset by __malloc_check_init to tell
83 malloc_set_state that the user has requested malloc checking.
85 The purpose of this flag is to make sure that malloc checking is not
86 enabled when the heap to be restored was constructed without malloc
87 checking, and thus does not contain the required magic bytes.
88 Otherwise the heap would be corrupted by calls to free and realloc. If
89 it turns out that the heap was created with malloc checking and the
90 user has requested it malloc_set_state just calls __malloc_check_init
91 again to enable it. On the other hand, reusing such a heap without
92 further malloc checking is safe. */
93 static int disallow_malloc_check
;
95 /* Activate a standard set of debugging hooks. */
99 if (disallow_malloc_check
) {
100 disallow_malloc_check
= 0;
103 using_malloc_checking
= 1;
104 __malloc_hook
= malloc_check
;
105 __free_hook
= free_check
;
106 __realloc_hook
= realloc_check
;
107 __memalign_hook
= memalign_check
;
109 fprintf(stderr
, "malloc: using debugging hooks\n");
112 /* A simple, standard set of debugging hooks. Overhead is `only' one
113 byte per chunk; still this will catch most cases of double frees or
114 overruns. The goal here is to avoid obscure crashes due to invalid
115 usage, unlike in the MALLOC_DEBUG code. */
117 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
119 /* Instrument a chunk with overrun detector byte(s) and convert it
120 into a user pointer with requested size sz. */
125 mem2mem_check(Void_t
*ptr
, size_t sz
)
127 mem2mem_check(ptr
, sz
) Void_t
*ptr
; size_t sz
;
131 unsigned char* m_ptr
= (unsigned char*)BOUNDED_N(ptr
, sz
);
137 for(i
= chunksize(p
) - (chunk_is_mmapped(p
) ? 2*SIZE_SZ
+1 : SIZE_SZ
+1);
141 m_ptr
[i
] = (unsigned char)(i
-sz
);
146 m_ptr
[sz
] = MAGICBYTE(p
);
147 return (Void_t
*)m_ptr
;
150 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
151 pointer. If the provided pointer is not valid, return NULL. */
156 mem2chunk_check(Void_t
* mem
)
158 mem2chunk_check(mem
) Void_t
* mem
;
162 INTERNAL_SIZE_T sz
, c
;
166 if(!aligned_OK(p
)) return NULL
;
167 if( (char*)p
>=mp_
.sbrk_base
&&
168 (char*)p
<(mp_
.sbrk_base
+main_arena
.system_mem
) ) {
169 /* Must be a chunk in conventional heap memory. */
170 if(chunk_is_mmapped(p
) ||
171 ( (sz
= chunksize(p
)),
172 ((char*)p
+ sz
)>=(mp_
.sbrk_base
+main_arena
.system_mem
) ) ||
173 sz
<MINSIZE
|| sz
&MALLOC_ALIGN_MASK
|| !inuse(p
) ||
174 ( !prev_inuse(p
) && (p
->prev_size
&MALLOC_ALIGN_MASK
||
175 (long)prev_chunk(p
)<(long)mp_
.sbrk_base
||
176 next_chunk(prev_chunk(p
))!=p
) ))
178 magic
= MAGICBYTE(p
);
179 for(sz
+= SIZE_SZ
-1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
180 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
182 ((unsigned char*)p
)[sz
] ^= 0xFF;
184 unsigned long offset
, page_mask
= malloc_getpagesize
-1;
186 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
187 alignment relative to the beginning of a page. Check this
189 offset
= (unsigned long)mem
& page_mask
;
190 if((offset
!=MALLOC_ALIGNMENT
&& offset
!=0 && offset
!=0x10 &&
191 offset
!=0x20 && offset
!=0x40 && offset
!=0x80 && offset
!=0x100 &&
192 offset
!=0x200 && offset
!=0x400 && offset
!=0x800 && offset
!=0x1000 &&
194 !chunk_is_mmapped(p
) || (p
->size
& PREV_INUSE
) ||
195 ( (((unsigned long)p
- p
->prev_size
) & page_mask
) != 0 ) ||
196 ( (sz
= chunksize(p
)), ((p
->prev_size
+ sz
) & page_mask
) != 0 ) )
198 magic
= MAGICBYTE(p
);
199 for(sz
-= 1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
200 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
202 ((unsigned char*)p
)[sz
] ^= 0xFF;
207 /* Check for corruption of the top chunk, and try to recover if
218 mchunkptr t
= top(&main_arena
);
219 char* brk
, * new_brk
;
220 INTERNAL_SIZE_T front_misalign
, sbrk_size
;
221 unsigned long pagesz
= malloc_getpagesize
;
223 if((char*)t
+ chunksize(t
) == mp_
.sbrk_base
+ main_arena
.system_mem
||
224 t
== initial_top(&main_arena
)) return 0;
227 fprintf(stderr
, "malloc: top chunk is corrupt\n");
231 /* Try to set up a new top chunk. */
233 front_misalign
= (unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
234 if (front_misalign
> 0)
235 front_misalign
= MALLOC_ALIGNMENT
- front_misalign
;
236 sbrk_size
= front_misalign
+ mp_
.top_pad
+ MINSIZE
;
237 sbrk_size
+= pagesz
- ((unsigned long)(brk
+ sbrk_size
) & (pagesz
- 1));
238 new_brk
= (char*)(MORECORE (sbrk_size
));
239 if (new_brk
== (char*)(MORECORE_FAILURE
)) return -1;
240 /* Call the `morecore' hook if necessary. */
241 if (__after_morecore_hook
)
242 (*__after_morecore_hook
) ();
243 main_arena
.system_mem
= (new_brk
- mp_
.sbrk_base
) + sbrk_size
;
245 top(&main_arena
) = (mchunkptr
)(brk
+ front_misalign
);
246 set_head(top(&main_arena
), (sbrk_size
- front_misalign
) | PREV_INUSE
);
253 malloc_check(size_t sz
, const Void_t
*caller
)
255 malloc_check(sz
, caller
) size_t sz
; const Void_t
*caller
;
260 (void)mutex_lock(&main_arena
.mutex
);
261 victim
= (top_check() >= 0) ? _int_malloc(&main_arena
, sz
+1) : NULL
;
262 (void)mutex_unlock(&main_arena
.mutex
);
263 return mem2mem_check(victim
, sz
);
268 free_check(Void_t
* mem
, const Void_t
*caller
)
270 free_check(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
276 (void)mutex_lock(&main_arena
.mutex
);
277 p
= mem2chunk_check(mem
);
279 (void)mutex_unlock(&main_arena
.mutex
);
281 fprintf(stderr
, "free(): invalid pointer %p!\n", mem
);
287 if (chunk_is_mmapped(p
)) {
288 (void)mutex_unlock(&main_arena
.mutex
);
293 #if 0 /* Erase freed memory. */
294 memset(mem
, 0, chunksize(p
) - (SIZE_SZ
+1));
296 _int_free(&main_arena
, mem
);
297 (void)mutex_unlock(&main_arena
.mutex
);
302 realloc_check(Void_t
* oldmem
, size_t bytes
, const Void_t
*caller
)
304 realloc_check(oldmem
, bytes
, caller
)
305 Void_t
* oldmem
; size_t bytes
; const Void_t
*caller
;
309 INTERNAL_SIZE_T nb
, oldsize
;
312 if (oldmem
== 0) return malloc_check(bytes
, NULL
);
313 (void)mutex_lock(&main_arena
.mutex
);
314 oldp
= mem2chunk_check(oldmem
);
315 (void)mutex_unlock(&main_arena
.mutex
);
318 fprintf(stderr
, "realloc(): invalid pointer %p!\n", oldmem
);
321 return malloc_check(bytes
, NULL
);
323 oldsize
= chunksize(oldp
);
325 checked_request2size(bytes
+1, nb
);
326 (void)mutex_lock(&main_arena
.mutex
);
329 if (chunk_is_mmapped(oldp
)) {
331 mchunkptr newp
= mremap_chunk(oldp
, nb
);
333 newmem
= chunk2mem(newp
);
337 /* Note the extra SIZE_SZ overhead. */
338 if(oldsize
- SIZE_SZ
>= nb
)
339 newmem
= oldmem
; /* do nothing */
341 /* Must alloc, copy, free. */
342 if (top_check() >= 0)
343 newmem
= _int_malloc(&main_arena
, bytes
+1);
345 MALLOC_COPY(BOUNDED_N(newmem
, bytes
+1), oldmem
, oldsize
- 2*SIZE_SZ
);
351 #endif /* HAVE_MMAP */
352 if (top_check() >= 0)
353 newmem
= _int_realloc(&main_arena
, oldmem
, bytes
+1);
354 #if 0 /* Erase freed memory. */
356 newp
= mem2chunk(newmem
);
357 nb
= chunksize(newp
);
358 if(oldp
<newp
|| oldp
>=chunk_at_offset(newp
, nb
)) {
359 memset((char*)oldmem
+ 2*sizeof(mbinptr
), 0,
360 oldsize
- (2*sizeof(mbinptr
)+2*SIZE_SZ
+1));
361 } else if(nb
> oldsize
+SIZE_SZ
) {
362 memset((char*)BOUNDED_N(chunk2mem(newp
), bytes
) + oldsize
,
363 0, nb
- (oldsize
+SIZE_SZ
));
369 (void)mutex_unlock(&main_arena
.mutex
);
371 return mem2mem_check(newmem
, bytes
);
376 memalign_check(size_t alignment
, size_t bytes
, const Void_t
*caller
)
378 memalign_check(alignment
, bytes
, caller
)
379 size_t alignment
; size_t bytes
; const Void_t
*caller
;
385 if (alignment
<= MALLOC_ALIGNMENT
) return malloc_check(bytes
, NULL
);
386 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
388 checked_request2size(bytes
+1, nb
);
389 (void)mutex_lock(&main_arena
.mutex
);
390 mem
= (top_check() >= 0) ? _int_memalign(&main_arena
, alignment
, bytes
+1) :
392 (void)mutex_unlock(&main_arena
.mutex
);
393 return mem2mem_check(mem
, bytes
);
398 /* The following hooks are used when the global initialization in
399 ptmalloc_init() hasn't completed yet. */
403 malloc_starter(size_t sz
, const Void_t
*caller
)
405 malloc_starter(sz
, caller
) size_t sz
; const Void_t
*caller
;
410 victim
= _int_malloc(&main_arena
, sz
);
412 return victim
? BOUNDED_N(victim
, sz
) : 0;
417 free_starter(Void_t
* mem
, const Void_t
*caller
)
419 free_starter(mem
, caller
) Void_t
* mem
; const Void_t
*caller
;
427 if (chunk_is_mmapped(p
)) {
432 _int_free(&main_arena
, mem
);
435 #endif /* NO_THREADS */
438 /* Get/set state: malloc_get_state() records the current state of all
439 malloc variables (_except_ for the actual heap contents and `hook'
440 function pointers) in a system dependent, opaque data structure.
441 This data structure is dynamically allocated and can be free()d
442 after use. malloc_set_state() restores the state of all malloc
443 variables to the previously obtained state. This is especially
444 useful when using this malloc as part of a shared library, and when
445 the heap contents are saved/restored via some other method. The
446 primary example for this is GNU Emacs with its `dumping' procedure.
447 `Hook' function pointers are never saved or restored by these
448 functions, with two exceptions: If malloc checking was in use when
449 malloc_get_state() was called, then malloc_set_state() calls
450 __malloc_check_init() if possible; if malloc checking was not in
451 use in the recorded state but the user requested malloc checking,
452 then the hooks are reset to 0. */
454 #define MALLOC_STATE_MAGIC 0x444c4541l
455 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
457 struct malloc_save_state
{
460 mbinptr av
[NBINS
* 2 + 2];
462 int sbrked_mem_bytes
;
463 unsigned long trim_threshold
;
464 unsigned long top_pad
;
465 unsigned int n_mmaps_max
;
466 unsigned long mmap_threshold
;
468 unsigned long max_sbrked_mem
;
469 unsigned long max_total_mem
;
470 unsigned int n_mmaps
;
471 unsigned int max_n_mmaps
;
472 unsigned long mmapped_mem
;
473 unsigned long max_mmapped_mem
;
474 int using_malloc_checking
;
478 public_gET_STATe(void)
480 struct malloc_save_state
* ms
;
484 ms
= (struct malloc_save_state
*)public_mALLOc(sizeof(*ms
));
487 (void)mutex_lock(&main_arena
.mutex
);
488 malloc_consolidate(&main_arena
);
489 ms
->magic
= MALLOC_STATE_MAGIC
;
490 ms
->version
= MALLOC_STATE_VERSION
;
492 ms
->av
[1] = 0; /* used to be binblocks, now no longer used */
493 ms
->av
[2] = top(&main_arena
);
494 ms
->av
[3] = 0; /* used to be undefined */
495 for(i
=1; i
<NBINS
; i
++) {
496 b
= bin_at(&main_arena
, i
);
498 ms
->av
[2*i
+2] = ms
->av
[2*i
+3] = 0; /* empty bin */
500 ms
->av
[2*i
+2] = first(b
);
501 ms
->av
[2*i
+3] = last(b
);
504 ms
->sbrk_base
= mp_
.sbrk_base
;
505 ms
->sbrked_mem_bytes
= main_arena
.system_mem
;
506 ms
->trim_threshold
= mp_
.trim_threshold
;
507 ms
->top_pad
= mp_
.top_pad
;
508 ms
->n_mmaps_max
= mp_
.n_mmaps_max
;
509 ms
->mmap_threshold
= mp_
.mmap_threshold
;
510 ms
->check_action
= check_action
;
511 ms
->max_sbrked_mem
= main_arena
.max_system_mem
;
513 ms
->max_total_mem
= mp_
.max_total_mem
;
515 ms
->max_total_mem
= 0;
517 ms
->n_mmaps
= mp_
.n_mmaps
;
518 ms
->max_n_mmaps
= mp_
.max_n_mmaps
;
519 ms
->mmapped_mem
= mp_
.mmapped_mem
;
520 ms
->max_mmapped_mem
= mp_
.max_mmapped_mem
;
521 ms
->using_malloc_checking
= using_malloc_checking
;
522 (void)mutex_unlock(&main_arena
.mutex
);
527 public_sET_STATe(Void_t
* msptr
)
529 struct malloc_save_state
* ms
= (struct malloc_save_state
*)msptr
;
533 disallow_malloc_check
= 1;
535 if(ms
->magic
!= MALLOC_STATE_MAGIC
) return -1;
536 /* Must fail if the major version is too high. */
537 if((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
)) return -2;
538 (void)mutex_lock(&main_arena
.mutex
);
539 /* There are no fastchunks. */
540 clear_fastchunks(&main_arena
);
541 set_max_fast(&main_arena
, DEFAULT_MXFAST
);
542 for (i
=0; i
<NFASTBINS
; ++i
)
543 main_arena
.fastbins
[i
] = 0;
544 for (i
=0; i
<BINMAPSIZE
; ++i
)
545 main_arena
.binmap
[i
] = 0;
546 top(&main_arena
) = ms
->av
[2];
547 main_arena
.last_remainder
= 0;
548 for(i
=1; i
<NBINS
; i
++) {
549 b
= bin_at(&main_arena
, i
);
550 if(ms
->av
[2*i
+2] == 0) {
551 assert(ms
->av
[2*i
+3] == 0);
552 first(b
) = last(b
) = b
;
554 if(i
<NSMALLBINS
|| (largebin_index(chunksize(ms
->av
[2*i
+2]))==i
&&
555 largebin_index(chunksize(ms
->av
[2*i
+3]))==i
)) {
556 first(b
) = ms
->av
[2*i
+2];
557 last(b
) = ms
->av
[2*i
+3];
558 /* Make sure the links to the bins within the heap are correct. */
561 /* Set bit in binblocks. */
562 mark_bin(&main_arena
, i
);
564 /* Oops, index computation from chunksize must have changed.
565 Link the whole list into unsorted_chunks. */
566 first(b
) = last(b
) = b
;
567 b
= unsorted_chunks(&main_arena
);
568 ms
->av
[2*i
+2]->bk
= b
;
569 ms
->av
[2*i
+3]->fd
= b
->fd
;
570 b
->fd
->bk
= ms
->av
[2*i
+3];
571 b
->fd
= ms
->av
[2*i
+2];
575 mp_
.sbrk_base
= ms
->sbrk_base
;
576 main_arena
.system_mem
= ms
->sbrked_mem_bytes
;
577 mp_
.trim_threshold
= ms
->trim_threshold
;
578 mp_
.top_pad
= ms
->top_pad
;
579 mp_
.n_mmaps_max
= ms
->n_mmaps_max
;
580 mp_
.mmap_threshold
= ms
->mmap_threshold
;
581 check_action
= ms
->check_action
;
582 main_arena
.max_system_mem
= ms
->max_sbrked_mem
;
584 mp_
.max_total_mem
= ms
->max_total_mem
;
586 mp_
.n_mmaps
= ms
->n_mmaps
;
587 mp_
.max_n_mmaps
= ms
->max_n_mmaps
;
588 mp_
.mmapped_mem
= ms
->mmapped_mem
;
589 mp_
.max_mmapped_mem
= ms
->max_mmapped_mem
;
590 /* add version-dependent code here */
591 if (ms
->version
>= 1) {
592 /* Check whether it is safe to enable malloc checking, or whether
593 it is necessary to disable it. */
594 if (ms
->using_malloc_checking
&& !using_malloc_checking
&&
595 !disallow_malloc_check
)
596 __malloc_check_init ();
597 else if (!ms
->using_malloc_checking
&& using_malloc_checking
) {
602 using_malloc_checking
= 0;
605 check_malloc_state(&main_arena
);
607 (void)mutex_unlock(&main_arena
.mutex
);