1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
28 malloc_hook_ini (size_t sz
, const void *caller
)
32 return __libc_malloc (sz
);
36 realloc_hook_ini (void *ptr
, size_t sz
, const void *caller
)
39 __realloc_hook
= NULL
;
41 return __libc_realloc (ptr
, sz
);
45 memalign_hook_ini (size_t alignment
, size_t sz
, const void *caller
)
47 __memalign_hook
= NULL
;
49 return __libc_memalign (alignment
, sz
);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking
;
55 /* Activate a standard set of debugging hooks. */
57 __malloc_check_init (void)
59 using_malloc_checking
= 1;
60 __malloc_hook
= malloc_check
;
61 __free_hook
= free_check
;
62 __realloc_hook
= realloc_check
;
63 __memalign_hook
= memalign_check
;
66 /* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
72 magicbyte (const void *p
)
76 magic
= (((uintptr_t) p
>> 3) ^ ((uintptr_t) p
>> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
84 /* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
91 malloc_check_get_size (mchunkptr p
)
95 unsigned char magic
= magicbyte (p
);
97 assert (using_malloc_checking
== 1);
99 for (size
= chunksize (p
) - 1 + (chunk_is_mmapped (p
) ? 0 : SIZE_SZ
);
100 (c
= ((unsigned char *) p
)[size
]) != magic
;
103 if (c
<= 0 || size
< (c
+ 2 * SIZE_SZ
))
104 malloc_printerr ("malloc_check_get_size: memory corruption");
107 /* chunk2mem size. */
108 return size
- 2 * SIZE_SZ
;
111 /* Instrument a chunk with overrun detector byte(s) and convert it
112 into a user pointer with requested size req_sz. */
115 mem2mem_check (void *ptr
, size_t req_sz
)
118 unsigned char *m_ptr
= ptr
;
119 size_t max_sz
, block_sz
, i
;
126 magic
= magicbyte (p
);
127 max_sz
= chunksize (p
) - 2 * SIZE_SZ
;
128 if (!chunk_is_mmapped (p
))
130 for (i
= max_sz
- 1; i
> req_sz
; i
-= block_sz
)
132 block_sz
= MIN (i
- req_sz
, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz
== magic
)
140 m_ptr
[req_sz
] = magic
;
141 return (void *) m_ptr
;
144 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
148 mem2chunk_check (void *mem
, unsigned char **magic_p
)
151 INTERNAL_SIZE_T sz
, c
;
154 if (!aligned_OK (mem
))
159 magic
= magicbyte (p
);
160 if (!chunk_is_mmapped (p
))
162 /* Must be a chunk in conventional heap memory. */
163 int contig
= contiguous (&main_arena
);
165 ((char *) p
< mp_
.sbrk_base
||
166 ((char *) p
+ sz
) >= (mp_
.sbrk_base
+ main_arena
.system_mem
))) ||
167 sz
< MINSIZE
|| sz
& MALLOC_ALIGN_MASK
|| !inuse (p
) ||
168 (!prev_inuse (p
) && ((prev_size (p
) & MALLOC_ALIGN_MASK
) != 0 ||
169 (contig
&& (char *) prev_chunk (p
) < mp_
.sbrk_base
) ||
170 next_chunk (prev_chunk (p
)) != p
)))
173 for (sz
+= SIZE_SZ
- 1; (c
= ((unsigned char *) p
)[sz
]) != magic
; sz
-= c
)
175 if (c
== 0 || sz
< (c
+ 2 * SIZE_SZ
))
181 unsigned long offset
, page_mask
= GLRO (dl_pagesize
) - 1;
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
186 offset
= (unsigned long) mem
& page_mask
;
187 if ((offset
!= MALLOC_ALIGNMENT
&& offset
!= 0 && offset
!= 0x10 &&
188 offset
!= 0x20 && offset
!= 0x40 && offset
!= 0x80 && offset
!= 0x100 &&
189 offset
!= 0x200 && offset
!= 0x400 && offset
!= 0x800 && offset
!= 0x1000 &&
191 !chunk_is_mmapped (p
) || prev_inuse (p
) ||
192 ((((unsigned long) p
- prev_size (p
)) & page_mask
) != 0) ||
193 ((prev_size (p
) + sz
) & page_mask
) != 0)
196 for (sz
-= 1; (c
= ((unsigned char *) p
)[sz
]) != magic
; sz
-= c
)
198 if (c
== 0 || sz
< (c
+ 2 * SIZE_SZ
))
202 ((unsigned char *) p
)[sz
] ^= 0xFF;
204 *magic_p
= (unsigned char *) p
+ sz
;
208 /* Check for corruption of the top chunk. */
212 mchunkptr t
= top (&main_arena
);
214 if (t
== initial_top (&main_arena
) ||
215 (!chunk_is_mmapped (t
) &&
216 chunksize (t
) >= MINSIZE
&&
218 (!contiguous (&main_arena
) ||
219 (char *) t
+ chunksize (t
) == mp_
.sbrk_base
+ main_arena
.system_mem
)))
222 malloc_printerr ("malloc: top chunk is corrupt");
226 malloc_check (size_t sz
, const void *caller
)
231 if (__builtin_add_overflow (sz
, 1, &nb
))
233 __set_errno (ENOMEM
);
237 __libc_lock_lock (main_arena
.mutex
);
239 victim
= _int_malloc (&main_arena
, nb
);
240 __libc_lock_unlock (main_arena
.mutex
);
241 return mem2mem_check (victim
, sz
);
245 free_check (void *mem
, const void *caller
)
252 __libc_lock_lock (main_arena
.mutex
);
253 p
= mem2chunk_check (mem
, NULL
);
255 malloc_printerr ("free(): invalid pointer");
256 if (chunk_is_mmapped (p
))
258 __libc_lock_unlock (main_arena
.mutex
);
262 _int_free (&main_arena
, p
, 1);
263 __libc_lock_unlock (main_arena
.mutex
);
267 realloc_check (void *oldmem
, size_t bytes
, const void *caller
)
271 unsigned char *magic_p
;
274 if (__builtin_add_overflow (bytes
, 1, &rb
))
276 __set_errno (ENOMEM
);
280 return malloc_check (bytes
, NULL
);
284 free_check (oldmem
, NULL
);
287 __libc_lock_lock (main_arena
.mutex
);
288 const mchunkptr oldp
= mem2chunk_check (oldmem
, &magic_p
);
289 __libc_lock_unlock (main_arena
.mutex
);
291 malloc_printerr ("realloc(): invalid pointer");
292 const INTERNAL_SIZE_T oldsize
= chunksize (oldp
);
294 if (!checked_request2size (rb
, &nb
))
297 __libc_lock_lock (main_arena
.mutex
);
299 if (chunk_is_mmapped (oldp
))
302 mchunkptr newp
= mremap_chunk (oldp
, nb
);
304 newmem
= chunk2mem (newp
);
308 /* Note the extra SIZE_SZ overhead. */
309 if (oldsize
- SIZE_SZ
>= nb
)
310 newmem
= oldmem
; /* do nothing */
313 /* Must alloc, copy, free. */
315 newmem
= _int_malloc (&main_arena
, rb
);
318 memcpy (newmem
, oldmem
, oldsize
- 2 * SIZE_SZ
);
327 newmem
= _int_realloc (&main_arena
, oldp
, oldsize
, nb
);
330 DIAG_PUSH_NEEDS_COMMENT
;
331 #if __GNUC_PREREQ (7, 0)
332 /* GCC 7 warns about magic_p may be used uninitialized. But we never
333 reach here if magic_p is uninitialized. */
334 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
336 /* mem2chunk_check changed the magic byte in the old chunk.
337 If newmem is NULL, then the old chunk will still be used though,
338 so we need to invert that change here. */
342 DIAG_POP_NEEDS_COMMENT
;
344 __libc_lock_unlock (main_arena
.mutex
);
346 return mem2mem_check (newmem
, bytes
);
350 memalign_check (size_t alignment
, size_t bytes
, const void *caller
)
354 if (alignment
<= MALLOC_ALIGNMENT
)
355 return malloc_check (bytes
, NULL
);
357 if (alignment
< MINSIZE
)
360 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
361 power of 2 and will cause overflow in the check below. */
362 if (alignment
> SIZE_MAX
/ 2 + 1)
364 __set_errno (EINVAL
);
368 /* Check for overflow. */
369 if (bytes
> SIZE_MAX
- alignment
- MINSIZE
)
371 __set_errno (ENOMEM
);
375 /* Make sure alignment is power of 2. */
376 if (!powerof2 (alignment
))
378 size_t a
= MALLOC_ALIGNMENT
* 2;
379 while (a
< alignment
)
384 __libc_lock_lock (main_arena
.mutex
);
386 mem
= _int_memalign (&main_arena
, alignment
, bytes
+ 1);
387 __libc_lock_unlock (main_arena
.mutex
);
388 return mem2mem_check (mem
, bytes
);
391 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
393 /* Support for restoring dumped heaps contained in historic Emacs
394 executables. The heap saving feature (malloc_get_state) is no
395 longer implemented in this version of glibc, but we have a heap
396 rewriter in malloc_set_state which transforms the heap into a
397 version compatible with current malloc. */
399 #define MALLOC_STATE_MAGIC 0x444c4541l
400 #define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
402 struct malloc_save_state
406 mbinptr av
[NBINS
* 2 + 2];
408 int sbrked_mem_bytes
;
409 unsigned long trim_threshold
;
410 unsigned long top_pad
;
411 unsigned int n_mmaps_max
;
412 unsigned long mmap_threshold
;
414 unsigned long max_sbrked_mem
;
415 unsigned long max_total_mem
; /* Always 0, for backwards compatibility. */
416 unsigned int n_mmaps
;
417 unsigned int max_n_mmaps
;
418 unsigned long mmapped_mem
;
419 unsigned long max_mmapped_mem
;
420 int using_malloc_checking
;
421 unsigned long max_fast
;
422 unsigned long arena_test
;
423 unsigned long arena_max
;
424 unsigned long narenas
;
427 /* Dummy implementation which always fails. We need to provide this
428 symbol so that existing Emacs binaries continue to work with
431 attribute_compat_text_section
432 malloc_get_state (void)
434 __set_errno (ENOSYS
);
437 compat_symbol (libc
, malloc_get_state
, malloc_get_state
, GLIBC_2_0
);
440 attribute_compat_text_section
441 malloc_set_state (void *msptr
)
443 struct malloc_save_state
*ms
= (struct malloc_save_state
*) msptr
;
445 if (ms
->magic
!= MALLOC_STATE_MAGIC
)
448 /* Must fail if the major version is too high. */
449 if ((ms
->version
& ~0xffl
) > (MALLOC_STATE_VERSION
& ~0xffl
))
452 /* We do not need to perform locking here because malloc_set_state
453 must be called before the first call into the malloc subsytem
454 (usually via __malloc_initialize_hook). pthread_create always
455 calls calloc and thus must be called only afterwards, so there
456 cannot be more than one thread when we reach this point. */
458 /* Disable the malloc hooks (and malloc checking). */
459 __malloc_hook
= NULL
;
460 __realloc_hook
= NULL
;
462 __memalign_hook
= NULL
;
463 using_malloc_checking
= 0;
465 /* Patch the dumped heap. We no longer try to integrate into the
466 existing heap. Instead, we mark the existing chunks as mmapped.
467 Together with the update to dumped_main_arena_start and
468 dumped_main_arena_end, realloc and free will recognize these
469 chunks as dumped fake mmapped chunks and never free them. */
471 /* Find the chunk with the lowest address with the heap. */
472 mchunkptr chunk
= NULL
;
474 size_t *candidate
= (size_t *) ms
->sbrk_base
;
475 size_t *end
= (size_t *) (ms
->sbrk_base
+ ms
->sbrked_mem_bytes
);
476 while (candidate
< end
)
479 chunk
= mem2chunk ((void *) (candidate
+ 1));
488 /* Iterate over the dumped heap and patch the chunks so that they
489 are treated as fake mmapped chunks. */
490 mchunkptr top
= ms
->av
[2];
495 /* Mark chunk as mmapped, to trigger the fallback path. */
496 size_t size
= chunksize (chunk
);
497 set_head (chunk
, size
| IS_MMAPPED
);
499 chunk
= next_chunk (chunk
);
502 /* The dumped fake mmapped chunks all lie in this address range. */
503 dumped_main_arena_start
= (mchunkptr
) ms
->sbrk_base
;
504 dumped_main_arena_end
= top
;
508 compat_symbol (libc
, malloc_set_state
, malloc_set_state
, GLIBC_2_0
);
510 #endif /* SHLIB_COMPAT */