1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
28 malloc_hook_ini(size_t sz
, const __malloc_ptr_t caller
)
32 return __libc_malloc(sz
);
36 realloc_hook_ini(void* ptr
, size_t sz
, const __malloc_ptr_t caller
)
39 __realloc_hook
= NULL
;
41 return __libc_realloc(ptr
, sz
);
45 memalign_hook_ini(size_t alignment
, size_t sz
, const __malloc_ptr_t caller
)
47 __memalign_hook
= NULL
;
49 return __libc_memalign(alignment
, sz
);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking
;
55 /* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68 static int disallow_malloc_check
;
70 /* Activate a standard set of debugging hooks. */
74 if (disallow_malloc_check
) {
75 disallow_malloc_check
= 0;
78 using_malloc_checking
= 1;
79 __malloc_hook
= malloc_check
;
80 __free_hook
= free_check
;
81 __realloc_hook
= realloc_check
;
82 __memalign_hook
= memalign_check
;
85 /* A simple, standard set of debugging hooks. Overhead is `only' one
86 byte per chunk; still this will catch most cases of double frees or
87 overruns. The goal here is to avoid obscure crashes due to invalid
88 usage, unlike in the MALLOC_DEBUG code. */
90 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
92 /* Visualize the chunk as being partitioned into blocks of 256 bytes from the
93 highest address of the chunk, downwards. The beginning of each block tells
94 us the size of the previous block, up to the actual size of the requested
95 memory. Our magic byte is right at the end of the requested size, so we
96 must reach it with this iteration, otherwise we have witnessed a memory
99 malloc_check_get_size(mchunkptr p
)
103 unsigned char magic
= MAGICBYTE(p
);
105 assert(using_malloc_checking
== 1);
107 for (size
= chunksize(p
) - 1 + (chunk_is_mmapped(p
) ? 0 : SIZE_SZ
);
108 (c
= ((unsigned char*)p
)[size
]) != magic
;
110 if(c
<=0 || size
<(c
+2*SIZE_SZ
)) {
111 malloc_printerr(check_action
, "malloc_check_get_size: memory corruption",
117 /* chunk2mem size. */
118 return size
- 2*SIZE_SZ
;
121 /* Instrument a chunk with overrun detector byte(s) and convert it
122 into a user pointer with requested size sz. */
126 mem2mem_check(void *ptr
, size_t sz
)
129 unsigned char* m_ptr
= ptr
;
135 for(i
= chunksize(p
) - (chunk_is_mmapped(p
) ? 2*SIZE_SZ
+1 : SIZE_SZ
+1);
139 m_ptr
[i
] = (unsigned char)(i
-sz
);
144 m_ptr
[sz
] = MAGICBYTE(p
);
148 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
149 pointer. If the provided pointer is not valid, return NULL. */
153 mem2chunk_check(void* mem
, unsigned char **magic_p
)
156 INTERNAL_SIZE_T sz
, c
;
159 if(!aligned_OK(mem
)) return NULL
;
161 if (!chunk_is_mmapped(p
)) {
162 /* Must be a chunk in conventional heap memory. */
163 int contig
= contiguous(&main_arena
);
166 ((char*)p
<mp_
.sbrk_base
||
167 ((char*)p
+ sz
)>=(mp_
.sbrk_base
+main_arena
.system_mem
) )) ||
168 sz
<MINSIZE
|| sz
&MALLOC_ALIGN_MASK
|| !inuse(p
) ||
169 ( !prev_inuse(p
) && (p
->prev_size
&MALLOC_ALIGN_MASK
||
170 (contig
&& (char*)prev_chunk(p
)<mp_
.sbrk_base
) ||
171 next_chunk(prev_chunk(p
))!=p
) ))
173 magic
= MAGICBYTE(p
);
174 for(sz
+= SIZE_SZ
-1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
175 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
178 unsigned long offset
, page_mask
= GLRO(dl_pagesize
)-1;
180 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
181 alignment relative to the beginning of a page. Check this
183 offset
= (unsigned long)mem
& page_mask
;
184 if((offset
!=MALLOC_ALIGNMENT
&& offset
!=0 && offset
!=0x10 &&
185 offset
!=0x20 && offset
!=0x40 && offset
!=0x80 && offset
!=0x100 &&
186 offset
!=0x200 && offset
!=0x400 && offset
!=0x800 && offset
!=0x1000 &&
188 !chunk_is_mmapped(p
) || (p
->size
& PREV_INUSE
) ||
189 ( (((unsigned long)p
- p
->prev_size
) & page_mask
) != 0 ) ||
190 ( (sz
= chunksize(p
)), ((p
->prev_size
+ sz
) & page_mask
) != 0 ) )
192 magic
= MAGICBYTE(p
);
193 for(sz
-= 1; (c
= ((unsigned char*)p
)[sz
]) != magic
; sz
-= c
) {
194 if(c
<=0 || sz
<(c
+2*SIZE_SZ
)) return NULL
;
197 ((unsigned char*)p
)[sz
] ^= 0xFF;
199 *magic_p
= (unsigned char *)p
+ sz
;
203 /* Check for corruption of the top chunk, and try to recover if
210 mchunkptr t
= top(&main_arena
);
211 char* brk
, * new_brk
;
212 INTERNAL_SIZE_T front_misalign
, sbrk_size
;
213 unsigned long pagesz
= GLRO(dl_pagesize
);
215 if (t
== initial_top(&main_arena
) ||
216 (!chunk_is_mmapped(t
) &&
217 chunksize(t
)>=MINSIZE
&&
219 (!contiguous(&main_arena
) ||
220 (char*)t
+ chunksize(t
) == mp_
.sbrk_base
+ main_arena
.system_mem
)))
223 malloc_printerr (check_action
, "malloc: top chunk is corrupt", t
);
225 /* Try to set up a new top chunk. */
227 front_misalign
= (unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
228 if (front_misalign
> 0)
229 front_misalign
= MALLOC_ALIGNMENT
- front_misalign
;
230 sbrk_size
= front_misalign
+ mp_
.top_pad
+ MINSIZE
;
231 sbrk_size
+= pagesz
- ((unsigned long)(brk
+ sbrk_size
) & (pagesz
- 1));
232 new_brk
= (char*)(MORECORE (sbrk_size
));
233 if (new_brk
== (char*)(MORECORE_FAILURE
))
235 __set_errno (ENOMEM
);
238 /* Call the `morecore' hook if necessary. */
239 void (*hook
) (void) = force_reg (__after_morecore_hook
);
242 main_arena
.system_mem
= (new_brk
- mp_
.sbrk_base
) + sbrk_size
;
244 top(&main_arena
) = (mchunkptr
)(brk
+ front_misalign
);
245 set_head(top(&main_arena
), (sbrk_size
- front_misalign
) | PREV_INUSE
);
251 malloc_check(size_t sz
, const void *caller
)
256 __set_errno (ENOMEM
);
260 (void)mutex_lock(&main_arena
.mutex
);
261 victim
= (top_check() >= 0) ? _int_malloc(&main_arena
, sz
+1) : NULL
;
262 (void)mutex_unlock(&main_arena
.mutex
);
263 return mem2mem_check(victim
, sz
);
267 free_check(void* mem
, const void *caller
)
272 (void)mutex_lock(&main_arena
.mutex
);
273 p
= mem2chunk_check(mem
, NULL
);
275 (void)mutex_unlock(&main_arena
.mutex
);
277 malloc_printerr(check_action
, "free(): invalid pointer", mem
);
280 if (chunk_is_mmapped(p
)) {
281 (void)mutex_unlock(&main_arena
.mutex
);
285 _int_free(&main_arena
, p
, 1);
286 (void)mutex_unlock(&main_arena
.mutex
);
290 realloc_check(void* oldmem
, size_t bytes
, const void *caller
)
294 unsigned char *magic_p
;
297 __set_errno (ENOMEM
);
300 if (oldmem
== 0) return malloc_check(bytes
, NULL
);
302 free_check (oldmem
, NULL
);
305 (void)mutex_lock(&main_arena
.mutex
);
306 const mchunkptr oldp
= mem2chunk_check(oldmem
, &magic_p
);
307 (void)mutex_unlock(&main_arena
.mutex
);
309 malloc_printerr(check_action
, "realloc(): invalid pointer", oldmem
);
310 return malloc_check(bytes
, NULL
);
312 const INTERNAL_SIZE_T oldsize
= chunksize(oldp
);
314 checked_request2size(bytes
+1, nb
);
315 (void)mutex_lock(&main_arena
.mutex
);
317 if (chunk_is_mmapped(oldp
)) {
319 mchunkptr newp
= mremap_chunk(oldp
, nb
);
321 newmem
= chunk2mem(newp
);
325 /* Note the extra SIZE_SZ overhead. */
326 if(oldsize
- SIZE_SZ
>= nb
)
327 newmem
= oldmem
; /* do nothing */
329 /* Must alloc, copy, free. */
330 if (top_check() >= 0)
331 newmem
= _int_malloc(&main_arena
, bytes
+1);
333 MALLOC_COPY(newmem
, oldmem
, oldsize
- 2*SIZE_SZ
);
339 if (top_check() >= 0) {
341 checked_request2size(bytes
+ 1, nb
);
342 newmem
= _int_realloc(&main_arena
, oldp
, oldsize
, nb
);
346 /* mem2chunk_check changed the magic byte in the old chunk.
347 If newmem is NULL, then the old chunk will still be used though,
348 so we need to invert that change here. */
349 if (newmem
== NULL
) *magic_p
^= 0xFF;
351 (void)mutex_unlock(&main_arena
.mutex
);
353 return mem2mem_check(newmem
, bytes
);
357 memalign_check(size_t alignment
, size_t bytes
, const void *caller
)
361 if (alignment
<= MALLOC_ALIGNMENT
) return malloc_check(bytes
, NULL
);
362 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
365 __set_errno (ENOMEM
);
368 (void)mutex_lock(&main_arena
.mutex
);
369 mem
= (top_check() >= 0) ? _int_memalign(&main_arena
, alignment
, bytes
+1) :
371 (void)mutex_unlock(&main_arena
.mutex
);
372 return mem2mem_check(mem
, bytes
);
376 /* Get/set state: malloc_get_state() records the current state of all
377 malloc variables (_except_ for the actual heap contents and `hook'
378 function pointers) in a system dependent, opaque data structure.
379 This data structure is dynamically allocated and can be free()d
380 after use. malloc_set_state() restores the state of all malloc
381 variables to the previously obtained state. This is especially
382 useful when using this malloc as part of a shared library, and when
383 the heap contents are saved/restored via some other method. The
384 primary example for this is GNU Emacs with its `dumping' procedure.
385 `Hook' function pointers are never saved or restored by these
386 functions, with two exceptions: If malloc checking was in use when
387 malloc_get_state() was called, then malloc_set_state() calls
388 __malloc_check_init() if possible; if malloc checking was not in
389 use in the recorded state but the user requested malloc checking,
390 then the hooks are reset to 0. */
392 #define MALLOC_STATE_MAGIC 0x444c4541l
393 #define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
395 struct malloc_save_state
{
398 mbinptr av
[NBINS
* 2 + 2];
400 int sbrked_mem_bytes
;
401 unsigned long trim_threshold
;
402 unsigned long top_pad
;
403 unsigned int n_mmaps_max
;
404 unsigned long mmap_threshold
;
406 unsigned long max_sbrked_mem
;
407 unsigned long max_total_mem
;
408 unsigned int n_mmaps
;
409 unsigned int max_n_mmaps
;
410 unsigned long mmapped_mem
;
411 unsigned long max_mmapped_mem
;
412 int using_malloc_checking
;
413 unsigned long max_fast
;
414 unsigned long arena_test
;
415 unsigned long arena_max
;
416 unsigned long narenas
;
420 __malloc_get_state(void)
422 struct malloc_save_state
* ms
;
426 ms
= (struct malloc_save_state
*)__libc_malloc(sizeof(*ms
));
429 (void)mutex_lock(&main_arena
.mutex
);
430 malloc_consolidate(&main_arena
);
431 ms
->magic
= MALLOC_STATE_MAGIC
;
432 ms
->version
= MALLOC_STATE_VERSION
;
434 ms
->av
[1] = 0; /* used to be binblocks, now no longer used */
435 ms
->av
[2] = top(&main_arena
);
436 ms
->av
[3] = 0; /* used to be undefined */
437 for(i
=1; i
<NBINS
; i
++) {
438 b
= bin_at(&main_arena
, i
);
440 ms
->av
[2*i
+2] = ms
->av
[2*i
+3] = 0; /* empty bin */
442 ms
->av
[2*i
+2] = first(b
);
443 ms
->av
[2*i
+3] = last(b
);
446 ms
->sbrk_base
= mp_
.sbrk_base
;
447 ms
->sbrked_mem_bytes
= main_arena
.system_mem
;
448 ms
->trim_threshold
= mp_
.trim_threshold
;
449 ms
->top_pad
= mp_
.top_pad
;
450 ms
->n_mmaps_max
= mp_
.n_mmaps_max
;
451 ms
->mmap_threshold
= mp_
.mmap_threshold
;
452 ms
->check_action
= check_action
;
453 ms
->max_sbrked_mem
= main_arena
.max_system_mem
;
454 ms
->max_total_mem
= 0;
455 ms
->n_mmaps
= mp_
.n_mmaps
;
456 ms
->max_n_mmaps
= mp_
.max_n_mmaps
;
457 ms
->mmapped_mem
= mp_
.mmapped_mem
;
458 ms
->max_mmapped_mem
= mp_
.max_mmapped_mem
;
459 ms
->using_malloc_checking
= using_malloc_checking
;
460 ms
->max_fast
= get_max_fast();
462 ms
->arena_test
= mp_
.arena_test
;
463 ms
->arena_max
= mp_
.arena_max
;
464 ms
->narenas
= narenas
;
466 (void)mutex_unlock(&main_arena
.mutex
);
471 __malloc_set_state(void* msptr
)
473 struct malloc_save_state
* ms
= (struct malloc_save_state
*)msptr
;
477 disallow_malloc_check
= 1;
479 if(ms
->magic
!= MALLOC_STATE_MAGIC
) return -1;
480 /* Must fail if the major version is too high. */
481 if((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
)) return -2;
482 (void)mutex_lock(&main_arena
.mutex
);
483 /* There are no fastchunks. */
484 clear_fastchunks(&main_arena
);
485 if (ms
->version
>= 4)
486 set_max_fast(ms
->max_fast
);
488 set_max_fast(64); /* 64 used to be the value we always used. */
489 for (i
=0; i
<NFASTBINS
; ++i
)
490 fastbin (&main_arena
, i
) = 0;
491 for (i
=0; i
<BINMAPSIZE
; ++i
)
492 main_arena
.binmap
[i
] = 0;
493 top(&main_arena
) = ms
->av
[2];
494 main_arena
.last_remainder
= 0;
495 for(i
=1; i
<NBINS
; i
++) {
496 b
= bin_at(&main_arena
, i
);
497 if(ms
->av
[2*i
+2] == 0) {
498 assert(ms
->av
[2*i
+3] == 0);
499 first(b
) = last(b
) = b
;
501 if(ms
->version
>= 3 &&
502 (i
<NSMALLBINS
|| (largebin_index(chunksize(ms
->av
[2*i
+2]))==i
&&
503 largebin_index(chunksize(ms
->av
[2*i
+3]))==i
))) {
504 first(b
) = ms
->av
[2*i
+2];
505 last(b
) = ms
->av
[2*i
+3];
506 /* Make sure the links to the bins within the heap are correct. */
509 /* Set bit in binblocks. */
510 mark_bin(&main_arena
, i
);
512 /* Oops, index computation from chunksize must have changed.
513 Link the whole list into unsorted_chunks. */
514 first(b
) = last(b
) = b
;
515 b
= unsorted_chunks(&main_arena
);
516 ms
->av
[2*i
+2]->bk
= b
;
517 ms
->av
[2*i
+3]->fd
= b
->fd
;
518 b
->fd
->bk
= ms
->av
[2*i
+3];
519 b
->fd
= ms
->av
[2*i
+2];
523 if (ms
->version
< 3) {
524 /* Clear fd_nextsize and bk_nextsize fields. */
525 b
= unsorted_chunks(&main_arena
)->fd
;
526 while (b
!= unsorted_chunks(&main_arena
)) {
527 if (!in_smallbin_range(chunksize(b
))) {
528 b
->fd_nextsize
= NULL
;
529 b
->bk_nextsize
= NULL
;
534 mp_
.sbrk_base
= ms
->sbrk_base
;
535 main_arena
.system_mem
= ms
->sbrked_mem_bytes
;
536 mp_
.trim_threshold
= ms
->trim_threshold
;
537 mp_
.top_pad
= ms
->top_pad
;
538 mp_
.n_mmaps_max
= ms
->n_mmaps_max
;
539 mp_
.mmap_threshold
= ms
->mmap_threshold
;
540 check_action
= ms
->check_action
;
541 main_arena
.max_system_mem
= ms
->max_sbrked_mem
;
542 mp_
.n_mmaps
= ms
->n_mmaps
;
543 mp_
.max_n_mmaps
= ms
->max_n_mmaps
;
544 mp_
.mmapped_mem
= ms
->mmapped_mem
;
545 mp_
.max_mmapped_mem
= ms
->max_mmapped_mem
;
546 /* add version-dependent code here */
547 if (ms
->version
>= 1) {
548 /* Check whether it is safe to enable malloc checking, or whether
549 it is necessary to disable it. */
550 if (ms
->using_malloc_checking
&& !using_malloc_checking
&&
551 !disallow_malloc_check
)
552 __malloc_check_init ();
553 else if (!ms
->using_malloc_checking
&& using_malloc_checking
) {
554 __malloc_hook
= NULL
;
556 __realloc_hook
= NULL
;
557 __memalign_hook
= NULL
;
558 using_malloc_checking
= 0;
561 if (ms
->version
>= 4) {
563 mp_
.arena_test
= ms
->arena_test
;
564 mp_
.arena_max
= ms
->arena_max
;
565 narenas
= ms
->narenas
;
568 check_malloc_state(&main_arena
);
570 (void)mutex_unlock(&main_arena
.mutex
);