Use GCC builtins for round functions if desired.
[glibc.git] / malloc / hooks.c
blob3cb432b97fe4e458ce0a32c5501000a8eabe03c3
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
27 static void *
28 malloc_hook_ini (size_t sz, const void *caller)
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
35 static void *
36 realloc_hook_ini (void *ptr, size_t sz, const void *caller)
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
44 static void *
45 memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking;
55 /* Activate a standard set of debugging hooks. */
56 void
57 __malloc_check_init (void)
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
66 /* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
71 static unsigned char
72 magicbyte (const void *p)
74 unsigned char magic;
76 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
78 if (magic == 1)
79 ++magic;
80 return magic;
84 /* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
89 corruption. */
90 static size_t
91 malloc_check_get_size (mchunkptr p)
93 size_t size;
94 unsigned char c;
95 unsigned char magic = magicbyte (p);
97 assert (using_malloc_checking == 1);
99 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
100 (c = ((unsigned char *) p)[size]) != magic;
101 size -= c)
103 if (c <= 0 || size < (c + 2 * SIZE_SZ))
104 malloc_printerr ("malloc_check_get_size: memory corruption");
107 /* chunk2mem size. */
108 return size - 2 * SIZE_SZ;
111 /* Instrument a chunk with overrun detector byte(s) and convert it
112 into a user pointer with requested size req_sz. */
114 static void *
115 mem2mem_check (void *ptr, size_t req_sz)
117 mchunkptr p;
118 unsigned char *m_ptr = ptr;
119 size_t max_sz, block_sz, i;
120 unsigned char magic;
122 if (!ptr)
123 return ptr;
125 p = mem2chunk (ptr);
126 magic = magicbyte (p);
127 max_sz = chunksize (p) - 2 * SIZE_SZ;
128 if (!chunk_is_mmapped (p))
129 max_sz += SIZE_SZ;
130 for (i = max_sz - 1; i > req_sz; i -= block_sz)
132 block_sz = MIN (i - req_sz, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz == magic)
136 --block_sz;
138 m_ptr[i] = block_sz;
140 m_ptr[req_sz] = magic;
141 return (void *) m_ptr;
144 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
147 static mchunkptr
148 mem2chunk_check (void *mem, unsigned char **magic_p)
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
154 if (!aligned_OK (mem))
155 return NULL;
157 p = mem2chunk (mem);
158 sz = chunksize (p);
159 magic = magicbyte (p);
160 if (!chunk_is_mmapped (p))
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous (&main_arena);
164 if ((contig &&
165 ((char *) p < mp_.sbrk_base ||
166 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
167 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
168 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
169 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
170 next_chunk (prev_chunk (p)) != p)))
171 return NULL;
173 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
175 if (c == 0 || sz < (c + 2 * SIZE_SZ))
176 return NULL;
179 else
181 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
185 first. */
186 offset = (unsigned long) mem & page_mask;
187 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
188 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
189 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
190 offset < 0x2000) ||
191 !chunk_is_mmapped (p) || prev_inuse (p) ||
192 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
193 ((prev_size (p) + sz) & page_mask) != 0)
194 return NULL;
196 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
198 if (c == 0 || sz < (c + 2 * SIZE_SZ))
199 return NULL;
202 ((unsigned char *) p)[sz] ^= 0xFF;
203 if (magic_p)
204 *magic_p = (unsigned char *) p + sz;
205 return p;
208 /* Check for corruption of the top chunk. */
209 static void
210 top_check (void)
212 mchunkptr t = top (&main_arena);
214 if (t == initial_top (&main_arena) ||
215 (!chunk_is_mmapped (t) &&
216 chunksize (t) >= MINSIZE &&
217 prev_inuse (t) &&
218 (!contiguous (&main_arena) ||
219 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
220 return;
222 malloc_printerr ("malloc: top chunk is corrupt");
225 static void *
226 malloc_check (size_t sz, const void *caller)
228 void *victim;
229 size_t nb;
231 if (__builtin_add_overflow (sz, 1, &nb))
233 __set_errno (ENOMEM);
234 return NULL;
237 __libc_lock_lock (main_arena.mutex);
238 top_check ();
239 victim = _int_malloc (&main_arena, nb);
240 __libc_lock_unlock (main_arena.mutex);
241 return mem2mem_check (victim, sz);
244 static void
245 free_check (void *mem, const void *caller)
247 mchunkptr p;
249 if (!mem)
250 return;
252 __libc_lock_lock (main_arena.mutex);
253 p = mem2chunk_check (mem, NULL);
254 if (!p)
255 malloc_printerr ("free(): invalid pointer");
256 if (chunk_is_mmapped (p))
258 __libc_lock_unlock (main_arena.mutex);
259 munmap_chunk (p);
260 return;
262 _int_free (&main_arena, p, 1);
263 __libc_lock_unlock (main_arena.mutex);
266 static void *
267 realloc_check (void *oldmem, size_t bytes, const void *caller)
269 INTERNAL_SIZE_T nb;
270 void *newmem = 0;
271 unsigned char *magic_p;
272 size_t rb;
274 if (__builtin_add_overflow (bytes, 1, &rb))
276 __set_errno (ENOMEM);
277 return NULL;
279 if (oldmem == 0)
280 return malloc_check (bytes, NULL);
282 if (bytes == 0)
284 free_check (oldmem, NULL);
285 return NULL;
287 __libc_lock_lock (main_arena.mutex);
288 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
289 __libc_lock_unlock (main_arena.mutex);
290 if (!oldp)
291 malloc_printerr ("realloc(): invalid pointer");
292 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
294 if (!checked_request2size (rb, &nb))
295 goto invert;
297 __libc_lock_lock (main_arena.mutex);
299 if (chunk_is_mmapped (oldp))
301 #if HAVE_MREMAP
302 mchunkptr newp = mremap_chunk (oldp, nb);
303 if (newp)
304 newmem = chunk2mem (newp);
305 else
306 #endif
308 /* Note the extra SIZE_SZ overhead. */
309 if (oldsize - SIZE_SZ >= nb)
310 newmem = oldmem; /* do nothing */
311 else
313 /* Must alloc, copy, free. */
314 top_check ();
315 newmem = _int_malloc (&main_arena, rb);
316 if (newmem)
318 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
319 munmap_chunk (oldp);
324 else
326 top_check ();
327 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
330 DIAG_PUSH_NEEDS_COMMENT;
331 #if __GNUC_PREREQ (7, 0)
332 /* GCC 7 warns about magic_p may be used uninitialized. But we never
333 reach here if magic_p is uninitialized. */
334 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
335 #endif
336 /* mem2chunk_check changed the magic byte in the old chunk.
337 If newmem is NULL, then the old chunk will still be used though,
338 so we need to invert that change here. */
339 invert:
340 if (newmem == NULL)
341 *magic_p ^= 0xFF;
342 DIAG_POP_NEEDS_COMMENT;
344 __libc_lock_unlock (main_arena.mutex);
346 return mem2mem_check (newmem, bytes);
349 static void *
350 memalign_check (size_t alignment, size_t bytes, const void *caller)
352 void *mem;
354 if (alignment <= MALLOC_ALIGNMENT)
355 return malloc_check (bytes, NULL);
357 if (alignment < MINSIZE)
358 alignment = MINSIZE;
360 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
361 power of 2 and will cause overflow in the check below. */
362 if (alignment > SIZE_MAX / 2 + 1)
364 __set_errno (EINVAL);
365 return 0;
368 /* Check for overflow. */
369 if (bytes > SIZE_MAX - alignment - MINSIZE)
371 __set_errno (ENOMEM);
372 return 0;
375 /* Make sure alignment is power of 2. */
376 if (!powerof2 (alignment))
378 size_t a = MALLOC_ALIGNMENT * 2;
379 while (a < alignment)
380 a <<= 1;
381 alignment = a;
384 __libc_lock_lock (main_arena.mutex);
385 top_check ();
386 mem = _int_memalign (&main_arena, alignment, bytes + 1);
387 __libc_lock_unlock (main_arena.mutex);
388 return mem2mem_check (mem, bytes);
391 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
393 /* Support for restoring dumped heaps contained in historic Emacs
394 executables. The heap saving feature (malloc_get_state) is no
395 longer implemented in this version of glibc, but we have a heap
396 rewriter in malloc_set_state which transforms the heap into a
397 version compatible with current malloc. */
399 #define MALLOC_STATE_MAGIC 0x444c4541l
400 #define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
402 struct malloc_save_state
404 long magic;
405 long version;
406 mbinptr av[NBINS * 2 + 2];
407 char *sbrk_base;
408 int sbrked_mem_bytes;
409 unsigned long trim_threshold;
410 unsigned long top_pad;
411 unsigned int n_mmaps_max;
412 unsigned long mmap_threshold;
413 int check_action;
414 unsigned long max_sbrked_mem;
415 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
416 unsigned int n_mmaps;
417 unsigned int max_n_mmaps;
418 unsigned long mmapped_mem;
419 unsigned long max_mmapped_mem;
420 int using_malloc_checking;
421 unsigned long max_fast;
422 unsigned long arena_test;
423 unsigned long arena_max;
424 unsigned long narenas;
427 /* Dummy implementation which always fails. We need to provide this
428 symbol so that existing Emacs binaries continue to work with
429 BIND_NOW. */
430 void *
431 attribute_compat_text_section
432 malloc_get_state (void)
434 __set_errno (ENOSYS);
435 return NULL;
437 compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
440 attribute_compat_text_section
441 malloc_set_state (void *msptr)
443 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
445 if (ms->magic != MALLOC_STATE_MAGIC)
446 return -1;
448 /* Must fail if the major version is too high. */
449 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
450 return -2;
452 /* We do not need to perform locking here because malloc_set_state
453 must be called before the first call into the malloc subsytem
454 (usually via __malloc_initialize_hook). pthread_create always
455 calls calloc and thus must be called only afterwards, so there
456 cannot be more than one thread when we reach this point. */
458 /* Disable the malloc hooks (and malloc checking). */
459 __malloc_hook = NULL;
460 __realloc_hook = NULL;
461 __free_hook = NULL;
462 __memalign_hook = NULL;
463 using_malloc_checking = 0;
465 /* Patch the dumped heap. We no longer try to integrate into the
466 existing heap. Instead, we mark the existing chunks as mmapped.
467 Together with the update to dumped_main_arena_start and
468 dumped_main_arena_end, realloc and free will recognize these
469 chunks as dumped fake mmapped chunks and never free them. */
471 /* Find the chunk with the lowest address with the heap. */
472 mchunkptr chunk = NULL;
474 size_t *candidate = (size_t *) ms->sbrk_base;
475 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
476 while (candidate < end)
477 if (*candidate != 0)
479 chunk = mem2chunk ((void *) (candidate + 1));
480 break;
482 else
483 ++candidate;
485 if (chunk == NULL)
486 return 0;
488 /* Iterate over the dumped heap and patch the chunks so that they
489 are treated as fake mmapped chunks. */
490 mchunkptr top = ms->av[2];
491 while (chunk < top)
493 if (inuse (chunk))
495 /* Mark chunk as mmapped, to trigger the fallback path. */
496 size_t size = chunksize (chunk);
497 set_head (chunk, size | IS_MMAPPED);
499 chunk = next_chunk (chunk);
502 /* The dumped fake mmapped chunks all lie in this address range. */
503 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
504 dumped_main_arena_end = top;
506 return 0;
508 compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
510 #endif /* SHLIB_COMPAT */
513 * Local variables:
514 * c-basic-offset: 2
515 * End: