Use single bits/msq.h for all architectures.
[glibc.git] / malloc / hooks.c
blobae7305b0369e8eb4559c8f33146c37a1ee3c781a
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
27 static void *
28 malloc_hook_ini (size_t sz, const void *caller)
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
35 static void *
36 realloc_hook_ini (void *ptr, size_t sz, const void *caller)
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
44 static void *
45 memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking;
55 /* Activate a standard set of debugging hooks. */
56 void
57 __malloc_check_init (void)
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
66 /* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
71 static unsigned char
72 magicbyte (const void *p)
74 unsigned char magic;
76 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
78 if (magic == 1)
79 ++magic;
80 return magic;
84 /* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
89 corruption. */
90 static size_t
91 malloc_check_get_size (mchunkptr p)
93 size_t size;
94 unsigned char c;
95 unsigned char magic = magicbyte (p);
97 assert (using_malloc_checking == 1);
99 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
100 (c = ((unsigned char *) p)[size]) != magic;
101 size -= c)
103 if (c <= 0 || size < (c + 2 * SIZE_SZ))
104 malloc_printerr ("malloc_check_get_size: memory corruption");
107 /* chunk2mem size. */
108 return size - 2 * SIZE_SZ;
111 /* Instrument a chunk with overrun detector byte(s) and convert it
112 into a user pointer with requested size req_sz. */
114 static void *
115 mem2mem_check (void *ptr, size_t req_sz)
117 mchunkptr p;
118 unsigned char *m_ptr = ptr;
119 size_t max_sz, block_sz, i;
120 unsigned char magic;
122 if (!ptr)
123 return ptr;
125 p = mem2chunk (ptr);
126 magic = magicbyte (p);
127 max_sz = chunksize (p) - 2 * SIZE_SZ;
128 if (!chunk_is_mmapped (p))
129 max_sz += SIZE_SZ;
130 for (i = max_sz - 1; i > req_sz; i -= block_sz)
132 block_sz = MIN (i - req_sz, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz == magic)
136 --block_sz;
138 m_ptr[i] = block_sz;
140 m_ptr[req_sz] = magic;
141 return (void *) m_ptr;
144 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
147 static mchunkptr
148 mem2chunk_check (void *mem, unsigned char **magic_p)
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
154 if (!aligned_OK (mem))
155 return NULL;
157 p = mem2chunk (mem);
158 sz = chunksize (p);
159 magic = magicbyte (p);
160 if (!chunk_is_mmapped (p))
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous (&main_arena);
164 if ((contig &&
165 ((char *) p < mp_.sbrk_base ||
166 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
167 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
168 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
169 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
170 next_chunk (prev_chunk (p)) != p)))
171 return NULL;
173 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
175 if (c == 0 || sz < (c + 2 * SIZE_SZ))
176 return NULL;
179 else
181 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
185 first. */
186 offset = (unsigned long) mem & page_mask;
187 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
188 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
189 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
190 offset < 0x2000) ||
191 !chunk_is_mmapped (p) || prev_inuse (p) ||
192 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
193 ((prev_size (p) + sz) & page_mask) != 0)
194 return NULL;
196 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
198 if (c == 0 || sz < (c + 2 * SIZE_SZ))
199 return NULL;
202 ((unsigned char *) p)[sz] ^= 0xFF;
203 if (magic_p)
204 *magic_p = (unsigned char *) p + sz;
205 return p;
208 /* Check for corruption of the top chunk. */
209 static void
210 top_check (void)
212 mchunkptr t = top (&main_arena);
214 if (t == initial_top (&main_arena) ||
215 (!chunk_is_mmapped (t) &&
216 chunksize (t) >= MINSIZE &&
217 prev_inuse (t) &&
218 (!contiguous (&main_arena) ||
219 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
220 return;
222 malloc_printerr ("malloc: top chunk is corrupt");
225 static void *
226 malloc_check (size_t sz, const void *caller)
228 void *victim;
230 if (sz + 1 == 0)
232 __set_errno (ENOMEM);
233 return NULL;
236 __libc_lock_lock (main_arena.mutex);
237 top_check ();
238 victim = _int_malloc (&main_arena, sz + 1);
239 __libc_lock_unlock (main_arena.mutex);
240 return mem2mem_check (victim, sz);
243 static void
244 free_check (void *mem, const void *caller)
246 mchunkptr p;
248 if (!mem)
249 return;
251 __libc_lock_lock (main_arena.mutex);
252 p = mem2chunk_check (mem, NULL);
253 if (!p)
254 malloc_printerr ("free(): invalid pointer");
255 if (chunk_is_mmapped (p))
257 __libc_lock_unlock (main_arena.mutex);
258 munmap_chunk (p);
259 return;
261 _int_free (&main_arena, p, 1);
262 __libc_lock_unlock (main_arena.mutex);
265 static void *
266 realloc_check (void *oldmem, size_t bytes, const void *caller)
268 INTERNAL_SIZE_T nb;
269 void *newmem = 0;
270 unsigned char *magic_p;
272 if (bytes + 1 == 0)
274 __set_errno (ENOMEM);
275 return NULL;
277 if (oldmem == 0)
278 return malloc_check (bytes, NULL);
280 if (bytes == 0)
282 free_check (oldmem, NULL);
283 return NULL;
285 __libc_lock_lock (main_arena.mutex);
286 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
287 __libc_lock_unlock (main_arena.mutex);
288 if (!oldp)
289 malloc_printerr ("realloc(): invalid pointer");
290 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
292 checked_request2size (bytes + 1, nb);
293 __libc_lock_lock (main_arena.mutex);
295 if (chunk_is_mmapped (oldp))
297 #if HAVE_MREMAP
298 mchunkptr newp = mremap_chunk (oldp, nb);
299 if (newp)
300 newmem = chunk2mem (newp);
301 else
302 #endif
304 /* Note the extra SIZE_SZ overhead. */
305 if (oldsize - SIZE_SZ >= nb)
306 newmem = oldmem; /* do nothing */
307 else
309 /* Must alloc, copy, free. */
310 top_check ();
311 newmem = _int_malloc (&main_arena, bytes + 1);
312 if (newmem)
314 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
315 munmap_chunk (oldp);
320 else
322 top_check ();
323 INTERNAL_SIZE_T nb;
324 checked_request2size (bytes + 1, nb);
325 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
328 DIAG_PUSH_NEEDS_COMMENT;
329 #if __GNUC_PREREQ (7, 0)
330 /* GCC 7 warns about magic_p may be used uninitialized. But we never
331 reach here if magic_p is uninitialized. */
332 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
333 #endif
334 /* mem2chunk_check changed the magic byte in the old chunk.
335 If newmem is NULL, then the old chunk will still be used though,
336 so we need to invert that change here. */
337 if (newmem == NULL)
338 *magic_p ^= 0xFF;
339 DIAG_POP_NEEDS_COMMENT;
341 __libc_lock_unlock (main_arena.mutex);
343 return mem2mem_check (newmem, bytes);
346 static void *
347 memalign_check (size_t alignment, size_t bytes, const void *caller)
349 void *mem;
351 if (alignment <= MALLOC_ALIGNMENT)
352 return malloc_check (bytes, NULL);
354 if (alignment < MINSIZE)
355 alignment = MINSIZE;
357 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
358 power of 2 and will cause overflow in the check below. */
359 if (alignment > SIZE_MAX / 2 + 1)
361 __set_errno (EINVAL);
362 return 0;
365 /* Check for overflow. */
366 if (bytes > SIZE_MAX - alignment - MINSIZE)
368 __set_errno (ENOMEM);
369 return 0;
372 /* Make sure alignment is power of 2. */
373 if (!powerof2 (alignment))
375 size_t a = MALLOC_ALIGNMENT * 2;
376 while (a < alignment)
377 a <<= 1;
378 alignment = a;
381 __libc_lock_lock (main_arena.mutex);
382 top_check ();
383 mem = _int_memalign (&main_arena, alignment, bytes + 1);
384 __libc_lock_unlock (main_arena.mutex);
385 return mem2mem_check (mem, bytes);
388 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
390 /* Support for restoring dumped heaps contained in historic Emacs
391 executables. The heap saving feature (malloc_get_state) is no
392 longer implemented in this version of glibc, but we have a heap
393 rewriter in malloc_set_state which transforms the heap into a
394 version compatible with current malloc. */
396 #define MALLOC_STATE_MAGIC 0x444c4541l
397 #define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
399 struct malloc_save_state
401 long magic;
402 long version;
403 mbinptr av[NBINS * 2 + 2];
404 char *sbrk_base;
405 int sbrked_mem_bytes;
406 unsigned long trim_threshold;
407 unsigned long top_pad;
408 unsigned int n_mmaps_max;
409 unsigned long mmap_threshold;
410 int check_action;
411 unsigned long max_sbrked_mem;
412 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
413 unsigned int n_mmaps;
414 unsigned int max_n_mmaps;
415 unsigned long mmapped_mem;
416 unsigned long max_mmapped_mem;
417 int using_malloc_checking;
418 unsigned long max_fast;
419 unsigned long arena_test;
420 unsigned long arena_max;
421 unsigned long narenas;
424 /* Dummy implementation which always fails. We need to provide this
425 symbol so that existing Emacs binaries continue to work with
426 BIND_NOW. */
427 void *
428 attribute_compat_text_section
429 malloc_get_state (void)
431 __set_errno (ENOSYS);
432 return NULL;
434 compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
437 attribute_compat_text_section
438 malloc_set_state (void *msptr)
440 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
442 if (ms->magic != MALLOC_STATE_MAGIC)
443 return -1;
445 /* Must fail if the major version is too high. */
446 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
447 return -2;
449 /* We do not need to perform locking here because malloc_set_state
450 must be called before the first call into the malloc subsytem
451 (usually via __malloc_initialize_hook). pthread_create always
452 calls calloc and thus must be called only afterwards, so there
453 cannot be more than one thread when we reach this point. */
455 /* Disable the malloc hooks (and malloc checking). */
456 __malloc_hook = NULL;
457 __realloc_hook = NULL;
458 __free_hook = NULL;
459 __memalign_hook = NULL;
460 using_malloc_checking = 0;
462 /* Patch the dumped heap. We no longer try to integrate into the
463 existing heap. Instead, we mark the existing chunks as mmapped.
464 Together with the update to dumped_main_arena_start and
465 dumped_main_arena_end, realloc and free will recognize these
466 chunks as dumped fake mmapped chunks and never free them. */
468 /* Find the chunk with the lowest address with the heap. */
469 mchunkptr chunk = NULL;
471 size_t *candidate = (size_t *) ms->sbrk_base;
472 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
473 while (candidate < end)
474 if (*candidate != 0)
476 chunk = mem2chunk ((void *) (candidate + 1));
477 break;
479 else
480 ++candidate;
482 if (chunk == NULL)
483 return 0;
485 /* Iterate over the dumped heap and patch the chunks so that they
486 are treated as fake mmapped chunks. */
487 mchunkptr top = ms->av[2];
488 while (chunk < top)
490 if (inuse (chunk))
492 /* Mark chunk as mmapped, to trigger the fallback path. */
493 size_t size = chunksize (chunk);
494 set_head (chunk, size | IS_MMAPPED);
496 chunk = next_chunk (chunk);
499 /* The dumped fake mmapped chunks all lie in this address range. */
500 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
501 dumped_main_arena_end = top;
503 return 0;
505 compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
507 #endif /* SHLIB_COMPAT */
510 * Local variables:
511 * c-basic-offset: 2
512 * End: