1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
28 malloc_hook_ini (size_t sz
, const void *caller
)
32 return __libc_malloc (sz
);
36 realloc_hook_ini (void *ptr
, size_t sz
, const void *caller
)
39 __realloc_hook
= NULL
;
41 return __libc_realloc (ptr
, sz
);
45 memalign_hook_ini (size_t alignment
, size_t sz
, const void *caller
)
47 __memalign_hook
= NULL
;
49 return __libc_memalign (alignment
, sz
);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking
;
55 /* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68 static int disallow_malloc_check
;
70 /* Activate a standard set of debugging hooks. */
72 __malloc_check_init (void)
74 if (disallow_malloc_check
)
76 disallow_malloc_check
= 0;
79 using_malloc_checking
= 1;
80 __malloc_hook
= malloc_check
;
81 __free_hook
= free_check
;
82 __realloc_hook
= realloc_check
;
83 __memalign_hook
= memalign_check
;
86 /* A simple, standard set of debugging hooks. Overhead is `only' one
87 byte per chunk; still this will catch most cases of double frees or
88 overruns. The goal here is to avoid obscure crashes due to invalid
89 usage, unlike in the MALLOC_DEBUG code. */
92 magicbyte (const void *p
)
96 magic
= (((uintptr_t) p
>> 3) ^ ((uintptr_t) p
>> 11)) & 0xFF;
97 /* Do not return 1. See the comment in mem2mem_check(). */
104 /* Visualize the chunk as being partitioned into blocks of 255 bytes from the
105 highest address of the chunk, downwards. The end of each block tells
106 us the size of that block, up to the actual size of the requested
107 memory. Our magic byte is right at the end of the requested size, so we
108 must reach it with this iteration, otherwise we have witnessed a memory
111 malloc_check_get_size (mchunkptr p
)
115 unsigned char magic
= magicbyte (p
);
117 assert (using_malloc_checking
== 1);
119 for (size
= chunksize (p
) - 1 + (chunk_is_mmapped (p
) ? 0 : SIZE_SZ
);
120 (c
= ((unsigned char *) p
)[size
]) != magic
;
123 if (c
<= 0 || size
< (c
+ 2 * SIZE_SZ
))
124 malloc_printerr ("malloc_check_get_size: memory corruption");
127 /* chunk2mem size. */
128 return size
- 2 * SIZE_SZ
;
131 /* Instrument a chunk with overrun detector byte(s) and convert it
132 into a user pointer with requested size req_sz. */
135 mem2mem_check (void *ptr
, size_t req_sz
)
138 unsigned char *m_ptr
= ptr
;
139 size_t max_sz
, block_sz
, i
;
146 magic
= magicbyte (p
);
147 max_sz
= chunksize (p
) - 2 * SIZE_SZ
;
148 if (!chunk_is_mmapped (p
))
150 for (i
= max_sz
- 1; i
> req_sz
; i
-= block_sz
)
152 block_sz
= MIN (i
- req_sz
, 0xff);
153 /* Don't allow the magic byte to appear in the chain of length bytes.
154 For the following to work, magicbyte cannot return 0x01. */
155 if (block_sz
== magic
)
160 m_ptr
[req_sz
] = magic
;
161 return (void *) m_ptr
;
164 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
165 pointer. If the provided pointer is not valid, return NULL. */
168 mem2chunk_check (void *mem
, unsigned char **magic_p
)
171 INTERNAL_SIZE_T sz
, c
;
174 if (!aligned_OK (mem
))
179 magic
= magicbyte (p
);
180 if (!chunk_is_mmapped (p
))
182 /* Must be a chunk in conventional heap memory. */
183 int contig
= contiguous (&main_arena
);
185 ((char *) p
< mp_
.sbrk_base
||
186 ((char *) p
+ sz
) >= (mp_
.sbrk_base
+ main_arena
.system_mem
))) ||
187 sz
< MINSIZE
|| sz
& MALLOC_ALIGN_MASK
|| !inuse (p
) ||
188 (!prev_inuse (p
) && ((prev_size (p
) & MALLOC_ALIGN_MASK
) != 0 ||
189 (contig
&& (char *) prev_chunk (p
) < mp_
.sbrk_base
) ||
190 next_chunk (prev_chunk (p
)) != p
)))
193 for (sz
+= SIZE_SZ
- 1; (c
= ((unsigned char *) p
)[sz
]) != magic
; sz
-= c
)
195 if (c
== 0 || sz
< (c
+ 2 * SIZE_SZ
))
201 unsigned long offset
, page_mask
= GLRO (dl_pagesize
) - 1;
203 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
204 alignment relative to the beginning of a page. Check this
206 offset
= (unsigned long) mem
& page_mask
;
207 if ((offset
!= MALLOC_ALIGNMENT
&& offset
!= 0 && offset
!= 0x10 &&
208 offset
!= 0x20 && offset
!= 0x40 && offset
!= 0x80 && offset
!= 0x100 &&
209 offset
!= 0x200 && offset
!= 0x400 && offset
!= 0x800 && offset
!= 0x1000 &&
211 !chunk_is_mmapped (p
) || prev_inuse (p
) ||
212 ((((unsigned long) p
- prev_size (p
)) & page_mask
) != 0) ||
213 ((prev_size (p
) + sz
) & page_mask
) != 0)
216 for (sz
-= 1; (c
= ((unsigned char *) p
)[sz
]) != magic
; sz
-= c
)
218 if (c
== 0 || sz
< (c
+ 2 * SIZE_SZ
))
222 ((unsigned char *) p
)[sz
] ^= 0xFF;
224 *magic_p
= (unsigned char *) p
+ sz
;
228 /* Check for corruption of the top chunk. */
232 mchunkptr t
= top (&main_arena
);
234 if (t
== initial_top (&main_arena
) ||
235 (!chunk_is_mmapped (t
) &&
236 chunksize (t
) >= MINSIZE
&&
238 (!contiguous (&main_arena
) ||
239 (char *) t
+ chunksize (t
) == mp_
.sbrk_base
+ main_arena
.system_mem
)))
242 malloc_printerr ("malloc: top chunk is corrupt");
246 malloc_check (size_t sz
, const void *caller
)
252 __set_errno (ENOMEM
);
256 __libc_lock_lock (main_arena
.mutex
);
258 victim
= _int_malloc (&main_arena
, sz
+ 1);
259 __libc_lock_unlock (main_arena
.mutex
);
260 return mem2mem_check (victim
, sz
);
264 free_check (void *mem
, const void *caller
)
271 __libc_lock_lock (main_arena
.mutex
);
272 p
= mem2chunk_check (mem
, NULL
);
274 malloc_printerr ("free(): invalid pointer");
275 if (chunk_is_mmapped (p
))
277 __libc_lock_unlock (main_arena
.mutex
);
281 _int_free (&main_arena
, p
, 1);
282 __libc_lock_unlock (main_arena
.mutex
);
286 realloc_check (void *oldmem
, size_t bytes
, const void *caller
)
290 unsigned char *magic_p
;
294 __set_errno (ENOMEM
);
298 return malloc_check (bytes
, NULL
);
302 free_check (oldmem
, NULL
);
305 __libc_lock_lock (main_arena
.mutex
);
306 const mchunkptr oldp
= mem2chunk_check (oldmem
, &magic_p
);
307 __libc_lock_unlock (main_arena
.mutex
);
309 malloc_printerr ("realloc(): invalid pointer");
310 const INTERNAL_SIZE_T oldsize
= chunksize (oldp
);
312 checked_request2size (bytes
+ 1, nb
);
313 __libc_lock_lock (main_arena
.mutex
);
315 if (chunk_is_mmapped (oldp
))
318 mchunkptr newp
= mremap_chunk (oldp
, nb
);
320 newmem
= chunk2mem (newp
);
324 /* Note the extra SIZE_SZ overhead. */
325 if (oldsize
- SIZE_SZ
>= nb
)
326 newmem
= oldmem
; /* do nothing */
329 /* Must alloc, copy, free. */
331 newmem
= _int_malloc (&main_arena
, bytes
+ 1);
334 memcpy (newmem
, oldmem
, oldsize
- 2 * SIZE_SZ
);
344 checked_request2size (bytes
+ 1, nb
);
345 newmem
= _int_realloc (&main_arena
, oldp
, oldsize
, nb
);
348 /* mem2chunk_check changed the magic byte in the old chunk.
349 If newmem is NULL, then the old chunk will still be used though,
350 so we need to invert that change here. */
354 __libc_lock_unlock (main_arena
.mutex
);
356 return mem2mem_check (newmem
, bytes
);
360 memalign_check (size_t alignment
, size_t bytes
, const void *caller
)
364 if (alignment
<= MALLOC_ALIGNMENT
)
365 return malloc_check (bytes
, NULL
);
367 if (alignment
< MINSIZE
)
370 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
371 power of 2 and will cause overflow in the check below. */
372 if (alignment
> SIZE_MAX
/ 2 + 1)
374 __set_errno (EINVAL
);
378 /* Check for overflow. */
379 if (bytes
> SIZE_MAX
- alignment
- MINSIZE
)
381 __set_errno (ENOMEM
);
385 /* Make sure alignment is power of 2. */
386 if (!powerof2 (alignment
))
388 size_t a
= MALLOC_ALIGNMENT
* 2;
389 while (a
< alignment
)
394 __libc_lock_lock (main_arena
.mutex
);
396 mem
= _int_memalign (&main_arena
, alignment
, bytes
+ 1);
397 __libc_lock_unlock (main_arena
.mutex
);
398 return mem2mem_check (mem
, bytes
);
401 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
403 /* Get/set state: malloc_get_state() records the current state of all
404 malloc variables (_except_ for the actual heap contents and `hook'
405 function pointers) in a system dependent, opaque data structure.
406 This data structure is dynamically allocated and can be free()d
407 after use. malloc_set_state() restores the state of all malloc
408 variables to the previously obtained state. This is especially
409 useful when using this malloc as part of a shared library, and when
410 the heap contents are saved/restored via some other method. The
411 primary example for this is GNU Emacs with its `dumping' procedure.
412 `Hook' function pointers are never saved or restored by these
413 functions, with two exceptions: If malloc checking was in use when
414 malloc_get_state() was called, then malloc_set_state() calls
415 __malloc_check_init() if possible; if malloc checking was not in
416 use in the recorded state but the user requested malloc checking,
417 then the hooks are reset to 0. */
419 #define MALLOC_STATE_MAGIC 0x444c4541l
420 #define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
422 struct malloc_save_state
426 mbinptr av
[NBINS
* 2 + 2];
428 int sbrked_mem_bytes
;
429 unsigned long trim_threshold
;
430 unsigned long top_pad
;
431 unsigned int n_mmaps_max
;
432 unsigned long mmap_threshold
;
434 unsigned long max_sbrked_mem
;
435 unsigned long max_total_mem
; /* Always 0, for backwards compatibility. */
436 unsigned int n_mmaps
;
437 unsigned int max_n_mmaps
;
438 unsigned long mmapped_mem
;
439 unsigned long max_mmapped_mem
;
440 int using_malloc_checking
;
441 unsigned long max_fast
;
442 unsigned long arena_test
;
443 unsigned long arena_max
;
444 unsigned long narenas
;
447 /* Dummy implementation which always fails. We need to provide this
448 symbol so that existing Emacs binaries continue to work with
451 attribute_compat_text_section
452 malloc_get_state (void)
454 __set_errno (ENOSYS
);
457 compat_symbol (libc
, malloc_get_state
, malloc_get_state
, GLIBC_2_0
);
460 attribute_compat_text_section
461 malloc_set_state (void *msptr
)
463 struct malloc_save_state
*ms
= (struct malloc_save_state
*) msptr
;
465 if (ms
->magic
!= MALLOC_STATE_MAGIC
)
468 /* Must fail if the major version is too high. */
469 if ((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
))
472 /* We do not need to perform locking here because __malloc_set_state
473 must be called before the first call into the malloc subsytem
474 (usually via __malloc_initialize_hook). pthread_create always
475 calls calloc and thus must be called only afterwards, so there
476 cannot be more than one thread when we reach this point. */
478 /* Disable the malloc hooks (and malloc checking). */
479 __malloc_hook
= NULL
;
480 __realloc_hook
= NULL
;
482 __memalign_hook
= NULL
;
483 using_malloc_checking
= 0;
485 /* Patch the dumped heap. We no longer try to integrate into the
486 existing heap. Instead, we mark the existing chunks as mmapped.
487 Together with the update to dumped_main_arena_start and
488 dumped_main_arena_end, realloc and free will recognize these
489 chunks as dumped fake mmapped chunks and never free them. */
491 /* Find the chunk with the lowest address with the heap. */
492 mchunkptr chunk
= NULL
;
494 size_t *candidate
= (size_t *) ms
->sbrk_base
;
495 size_t *end
= (size_t *) (ms
->sbrk_base
+ ms
->sbrked_mem_bytes
);
496 while (candidate
< end
)
499 chunk
= mem2chunk ((void *) (candidate
+ 1));
508 /* Iterate over the dumped heap and patch the chunks so that they
509 are treated as fake mmapped chunks. */
510 mchunkptr top
= ms
->av
[2];
515 /* Mark chunk as mmapped, to trigger the fallback path. */
516 size_t size
= chunksize (chunk
);
517 set_head (chunk
, size
| IS_MMAPPED
);
519 chunk
= next_chunk (chunk
);
522 /* The dumped fake mmapped chunks all lie in this address range. */
523 dumped_main_arena_start
= (mchunkptr
) ms
->sbrk_base
;
524 dumped_main_arena_end
= top
;
528 compat_symbol (libc
, malloc_set_state
, malloc_set_state
, GLIBC_2_0
);
530 #endif /* SHLIB_COMPAT */