Remove obsolete comment
[glibc.git] / malloc / hooks.c
blob996111a7e3b2f1373d888c7a1a4740bcaa6dee4d
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
27 static void *
28 malloc_hook_ini (size_t sz, const void *caller)
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
35 static void *
36 realloc_hook_ini (void *ptr, size_t sz, const void *caller)
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
44 static void *
45 memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking;
55 /* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68 static int disallow_malloc_check;
70 /* Activate a standard set of debugging hooks. */
71 void
72 __malloc_check_init (void)
74 if (disallow_malloc_check)
76 disallow_malloc_check = 0;
77 return;
79 using_malloc_checking = 1;
80 __malloc_hook = malloc_check;
81 __free_hook = free_check;
82 __realloc_hook = realloc_check;
83 __memalign_hook = memalign_check;
86 /* A simple, standard set of debugging hooks. Overhead is `only' one
87 byte per chunk; still this will catch most cases of double frees or
88 overruns. The goal here is to avoid obscure crashes due to invalid
89 usage, unlike in the MALLOC_DEBUG code. */
91 #define MAGICBYTE(p) ((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF)
93 /* Visualize the chunk as being partitioned into blocks of 255 bytes from the
94 highest address of the chunk, downwards. The end of each block tells us
95 the size of that block, up to the actual size of the requested memory.
96 The last block has a length of zero and is followed by the magic byte.
97 Our magic byte is right at the end of the requested size. If we don't
98 reach it with this iteration we have witnessed a memory corruption. */
99 static size_t
100 malloc_check_get_size (mchunkptr p)
102 size_t total_sz, size;
103 unsigned char c;
104 unsigned char magic = MAGICBYTE (p);
106 assert (using_malloc_checking == 1);
108 /* Validate the length-byte chain. */
109 total_sz = chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
110 for (size = total_sz - 1;
111 (c = ((unsigned char *) p)[size]) != 0;
112 size -= c)
114 if (size <= c + 2 * SIZE_SZ)
115 break;
117 if (c != 0 || ((unsigned char *) p)[--size] != magic)
119 malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
120 chunk2mem (p));
121 return 0;
124 /* chunk2mem size. */
125 return size - 2 * SIZE_SZ;
128 /* Instrument a chunk with overrun detector byte(s) and convert it
129 into a user pointer with requested size sz. */
131 static void *
132 internal_function
133 mem2mem_check (void *ptr, size_t sz)
135 mchunkptr p;
136 unsigned char *m_ptr = ptr;
137 size_t user_sz, block_sz, i;
139 if (!ptr)
140 return ptr;
142 p = mem2chunk (ptr);
143 user_sz = chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
144 user_sz -= 2 * SIZE_SZ;
145 for (i = user_sz - 1; i > sz; i -= block_sz)
147 block_sz = i - (sz + 1);
148 if (block_sz > 0xff)
149 block_sz = 0xff;
151 m_ptr[i] = (unsigned char) block_sz;
153 if (block_sz == 0)
154 break;
156 m_ptr[sz] = MAGICBYTE (p);
157 return (void *) m_ptr;
160 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
161 pointer. If the provided pointer is not valid, return NULL. */
163 static mchunkptr
164 internal_function
165 mem2chunk_check (void *mem, unsigned char **magic_p)
167 mchunkptr p;
168 INTERNAL_SIZE_T sz, c;
169 unsigned char magic;
171 if (!aligned_OK (mem))
172 return NULL;
174 p = mem2chunk (mem);
175 sz = chunksize (p);
176 magic = MAGICBYTE (p);
177 if (!chunk_is_mmapped (p))
179 /* Must be a chunk in conventional heap memory. */
180 int contig = contiguous (&main_arena);
181 if ((contig &&
182 ((char *) p < mp_.sbrk_base ||
183 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
184 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
185 (!prev_inuse (p) && (p->prev_size & MALLOC_ALIGN_MASK ||
186 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
187 next_chunk (prev_chunk (p)) != p)))
188 return NULL;
190 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != 0; sz -= c)
192 if (sz <= c + 2 * SIZE_SZ)
193 break;
195 if (c != 0 || ((unsigned char *) p)[--sz] != magic)
196 return NULL;
198 else
200 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
202 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
203 alignment relative to the beginning of a page. Check this
204 first. */
205 offset = (unsigned long) mem & page_mask;
206 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
207 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
208 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
209 offset < 0x2000) ||
210 !chunk_is_mmapped (p) || (p->size & PREV_INUSE) ||
211 ((((unsigned long) p - p->prev_size) & page_mask) != 0) ||
212 ((p->prev_size + sz) & page_mask) != 0)
213 return NULL;
215 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != 0; sz -= c)
217 if (sz <= c + 2 * SIZE_SZ)
218 break;
220 if (c != 0 || ((unsigned char *) p)[--sz] != magic)
221 return NULL;
223 ((unsigned char *) p)[sz] ^= 0xFF;
224 if (magic_p)
225 *magic_p = (unsigned char *) p + sz;
226 return p;
229 /* Check for corruption of the top chunk, and try to recover if
230 necessary. */
232 static int
233 internal_function
234 top_check (void)
236 mchunkptr t = top (&main_arena);
237 char *brk, *new_brk;
238 INTERNAL_SIZE_T front_misalign, sbrk_size;
239 unsigned long pagesz = GLRO (dl_pagesize);
241 if (t == initial_top (&main_arena) ||
242 (!chunk_is_mmapped (t) &&
243 chunksize (t) >= MINSIZE &&
244 prev_inuse (t) &&
245 (!contiguous (&main_arena) ||
246 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
247 return 0;
249 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
251 /* Try to set up a new top chunk. */
252 brk = MORECORE (0);
253 front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK;
254 if (front_misalign > 0)
255 front_misalign = MALLOC_ALIGNMENT - front_misalign;
256 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
257 sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1));
258 new_brk = (char *) (MORECORE (sbrk_size));
259 if (new_brk == (char *) (MORECORE_FAILURE))
261 __set_errno (ENOMEM);
262 return -1;
264 /* Call the `morecore' hook if necessary. */
265 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
266 if (hook)
267 (*hook)();
268 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
270 top (&main_arena) = (mchunkptr) (brk + front_misalign);
271 set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
273 return 0;
276 static void *
277 malloc_check (size_t sz, const void *caller)
279 void *victim;
281 if (sz + 1 == 0)
283 __set_errno (ENOMEM);
284 return NULL;
287 (void) mutex_lock (&main_arena.mutex);
288 victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
289 (void) mutex_unlock (&main_arena.mutex);
290 return mem2mem_check (victim, sz);
293 static void
294 free_check (void *mem, const void *caller)
296 mchunkptr p;
298 if (!mem)
299 return;
301 (void) mutex_lock (&main_arena.mutex);
302 p = mem2chunk_check (mem, NULL);
303 if (!p)
305 (void) mutex_unlock (&main_arena.mutex);
307 malloc_printerr (check_action, "free(): invalid pointer", mem);
308 return;
310 if (chunk_is_mmapped (p))
312 (void) mutex_unlock (&main_arena.mutex);
313 munmap_chunk (p);
314 return;
316 _int_free (&main_arena, p, 1);
317 (void) mutex_unlock (&main_arena.mutex);
320 static void *
321 realloc_check (void *oldmem, size_t bytes, const void *caller)
323 INTERNAL_SIZE_T nb;
324 void *newmem = 0;
325 unsigned char *magic_p;
327 if (bytes + 1 == 0)
329 __set_errno (ENOMEM);
330 return NULL;
332 if (oldmem == 0)
333 return malloc_check (bytes, NULL);
335 if (bytes == 0)
337 free_check (oldmem, NULL);
338 return NULL;
340 (void) mutex_lock (&main_arena.mutex);
341 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
342 (void) mutex_unlock (&main_arena.mutex);
343 if (!oldp)
345 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
346 return malloc_check (bytes, NULL);
348 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
350 checked_request2size (bytes + 1, nb);
351 (void) mutex_lock (&main_arena.mutex);
353 if (chunk_is_mmapped (oldp))
355 #if HAVE_MREMAP
356 mchunkptr newp = mremap_chunk (oldp, nb);
357 if (newp)
358 newmem = chunk2mem (newp);
359 else
360 #endif
362 /* Note the extra SIZE_SZ overhead. */
363 if (oldsize - SIZE_SZ >= nb)
364 newmem = oldmem; /* do nothing */
365 else
367 /* Must alloc, copy, free. */
368 if (top_check () >= 0)
369 newmem = _int_malloc (&main_arena, bytes + 1);
370 if (newmem)
372 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
373 munmap_chunk (oldp);
378 else
380 if (top_check () >= 0)
382 INTERNAL_SIZE_T nb;
383 checked_request2size (bytes + 1, nb);
384 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
388 /* mem2chunk_check changed the magic byte in the old chunk.
389 If newmem is NULL, then the old chunk will still be used though,
390 so we need to invert that change here. */
391 if (newmem == NULL)
392 *magic_p ^= 0xFF;
394 (void) mutex_unlock (&main_arena.mutex);
396 return mem2mem_check (newmem, bytes);
399 static void *
400 memalign_check (size_t alignment, size_t bytes, const void *caller)
402 void *mem;
404 if (alignment <= MALLOC_ALIGNMENT)
405 return malloc_check (bytes, NULL);
407 if (alignment < MINSIZE)
408 alignment = MINSIZE;
410 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
411 power of 2 and will cause overflow in the check below. */
412 if (alignment > SIZE_MAX / 2 + 1)
414 __set_errno (EINVAL);
415 return 0;
418 /* Check for overflow. */
419 if (bytes > SIZE_MAX - alignment - MINSIZE)
421 __set_errno (ENOMEM);
422 return 0;
425 /* Make sure alignment is power of 2. */
426 if (!powerof2 (alignment))
428 size_t a = MALLOC_ALIGNMENT * 2;
429 while (a < alignment)
430 a <<= 1;
431 alignment = a;
434 (void) mutex_lock (&main_arena.mutex);
435 mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
436 NULL;
437 (void) mutex_unlock (&main_arena.mutex);
438 return mem2mem_check (mem, bytes);
442 /* Get/set state: malloc_get_state() records the current state of all
443 malloc variables (_except_ for the actual heap contents and `hook'
444 function pointers) in a system dependent, opaque data structure.
445 This data structure is dynamically allocated and can be free()d
446 after use. malloc_set_state() restores the state of all malloc
447 variables to the previously obtained state. This is especially
448 useful when using this malloc as part of a shared library, and when
449 the heap contents are saved/restored via some other method. The
450 primary example for this is GNU Emacs with its `dumping' procedure.
451 `Hook' function pointers are never saved or restored by these
452 functions, with two exceptions: If malloc checking was in use when
453 malloc_get_state() was called, then malloc_set_state() calls
454 __malloc_check_init() if possible; if malloc checking was not in
455 use in the recorded state but the user requested malloc checking,
456 then the hooks are reset to 0. */
458 #define MALLOC_STATE_MAGIC 0x444c4541l
459 #define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */
461 struct malloc_save_state
463 long magic;
464 long version;
465 mbinptr av[NBINS * 2 + 2];
466 char *sbrk_base;
467 int sbrked_mem_bytes;
468 unsigned long trim_threshold;
469 unsigned long top_pad;
470 unsigned int n_mmaps_max;
471 unsigned long mmap_threshold;
472 int check_action;
473 unsigned long max_sbrked_mem;
474 unsigned long max_total_mem;
475 unsigned int n_mmaps;
476 unsigned int max_n_mmaps;
477 unsigned long mmapped_mem;
478 unsigned long max_mmapped_mem;
479 int using_malloc_checking;
480 unsigned long max_fast;
481 unsigned long arena_test;
482 unsigned long arena_max;
483 unsigned long narenas;
486 void *
487 __malloc_get_state (void)
489 struct malloc_save_state *ms;
490 int i;
491 mbinptr b;
493 ms = (struct malloc_save_state *) __libc_malloc (sizeof (*ms));
494 if (!ms)
495 return 0;
497 (void) mutex_lock (&main_arena.mutex);
498 malloc_consolidate (&main_arena);
499 ms->magic = MALLOC_STATE_MAGIC;
500 ms->version = MALLOC_STATE_VERSION;
501 ms->av[0] = 0;
502 ms->av[1] = 0; /* used to be binblocks, now no longer used */
503 ms->av[2] = top (&main_arena);
504 ms->av[3] = 0; /* used to be undefined */
505 for (i = 1; i < NBINS; i++)
507 b = bin_at (&main_arena, i);
508 if (first (b) == b)
509 ms->av[2 * i + 2] = ms->av[2 * i + 3] = 0; /* empty bin */
510 else
512 ms->av[2 * i + 2] = first (b);
513 ms->av[2 * i + 3] = last (b);
516 ms->sbrk_base = mp_.sbrk_base;
517 ms->sbrked_mem_bytes = main_arena.system_mem;
518 ms->trim_threshold = mp_.trim_threshold;
519 ms->top_pad = mp_.top_pad;
520 ms->n_mmaps_max = mp_.n_mmaps_max;
521 ms->mmap_threshold = mp_.mmap_threshold;
522 ms->check_action = check_action;
523 ms->max_sbrked_mem = main_arena.max_system_mem;
524 ms->max_total_mem = 0;
525 ms->n_mmaps = mp_.n_mmaps;
526 ms->max_n_mmaps = mp_.max_n_mmaps;
527 ms->mmapped_mem = mp_.mmapped_mem;
528 ms->max_mmapped_mem = mp_.max_mmapped_mem;
529 ms->using_malloc_checking = using_malloc_checking;
530 ms->max_fast = get_max_fast ();
531 ms->arena_test = mp_.arena_test;
532 ms->arena_max = mp_.arena_max;
533 ms->narenas = narenas;
534 (void) mutex_unlock (&main_arena.mutex);
535 return (void *) ms;
539 __malloc_set_state (void *msptr)
541 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
542 size_t i;
543 mbinptr b;
545 disallow_malloc_check = 1;
546 ptmalloc_init ();
547 if (ms->magic != MALLOC_STATE_MAGIC)
548 return -1;
550 /* Must fail if the major version is too high. */
551 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
552 return -2;
554 (void) mutex_lock (&main_arena.mutex);
555 /* There are no fastchunks. */
556 clear_fastchunks (&main_arena);
557 if (ms->version >= 4)
558 set_max_fast (ms->max_fast);
559 else
560 set_max_fast (64); /* 64 used to be the value we always used. */
561 for (i = 0; i < NFASTBINS; ++i)
562 fastbin (&main_arena, i) = 0;
563 for (i = 0; i < BINMAPSIZE; ++i)
564 main_arena.binmap[i] = 0;
565 top (&main_arena) = ms->av[2];
566 main_arena.last_remainder = 0;
567 for (i = 1; i < NBINS; i++)
569 b = bin_at (&main_arena, i);
570 if (ms->av[2 * i + 2] == 0)
572 assert (ms->av[2 * i + 3] == 0);
573 first (b) = last (b) = b;
575 else
577 if (ms->version >= 3 &&
578 (i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i &&
579 largebin_index (chunksize (ms->av[2 * i + 3])) == i)))
581 first (b) = ms->av[2 * i + 2];
582 last (b) = ms->av[2 * i + 3];
583 /* Make sure the links to the bins within the heap are correct. */
584 first (b)->bk = b;
585 last (b)->fd = b;
586 /* Set bit in binblocks. */
587 mark_bin (&main_arena, i);
589 else
591 /* Oops, index computation from chunksize must have changed.
592 Link the whole list into unsorted_chunks. */
593 first (b) = last (b) = b;
594 b = unsorted_chunks (&main_arena);
595 ms->av[2 * i + 2]->bk = b;
596 ms->av[2 * i + 3]->fd = b->fd;
597 b->fd->bk = ms->av[2 * i + 3];
598 b->fd = ms->av[2 * i + 2];
602 if (ms->version < 3)
604 /* Clear fd_nextsize and bk_nextsize fields. */
605 b = unsorted_chunks (&main_arena)->fd;
606 while (b != unsorted_chunks (&main_arena))
608 if (!in_smallbin_range (chunksize (b)))
610 b->fd_nextsize = NULL;
611 b->bk_nextsize = NULL;
613 b = b->fd;
616 mp_.sbrk_base = ms->sbrk_base;
617 main_arena.system_mem = ms->sbrked_mem_bytes;
618 mp_.trim_threshold = ms->trim_threshold;
619 mp_.top_pad = ms->top_pad;
620 mp_.n_mmaps_max = ms->n_mmaps_max;
621 mp_.mmap_threshold = ms->mmap_threshold;
622 check_action = ms->check_action;
623 main_arena.max_system_mem = ms->max_sbrked_mem;
624 mp_.n_mmaps = ms->n_mmaps;
625 mp_.max_n_mmaps = ms->max_n_mmaps;
626 mp_.mmapped_mem = ms->mmapped_mem;
627 mp_.max_mmapped_mem = ms->max_mmapped_mem;
628 /* add version-dependent code here */
629 if (ms->version >= 1)
631 /* Check whether it is safe to enable malloc checking, or whether
632 it is necessary to disable it. */
633 if (ms->using_malloc_checking && !using_malloc_checking &&
634 !disallow_malloc_check)
635 __malloc_check_init ();
636 else if (!ms->using_malloc_checking && using_malloc_checking)
638 __malloc_hook = NULL;
639 __free_hook = NULL;
640 __realloc_hook = NULL;
641 __memalign_hook = NULL;
642 using_malloc_checking = 0;
645 if (ms->version >= 4)
647 mp_.arena_test = ms->arena_test;
648 mp_.arena_max = ms->arena_max;
649 narenas = ms->narenas;
651 check_malloc_state (&main_arena);
653 (void) mutex_unlock (&main_arena.mutex);
654 return 0;
658 * Local variables:
659 * c-basic-offset: 2
660 * End: