Update copyright notices with scripts/update-copyrights
[glibc.git] / malloc / hooks.c
blob1b80a74e280333896859165d8424322ee0ed997e
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
27 static void*
28 malloc_hook_ini(size_t sz, const void *caller)
30 __malloc_hook = NULL;
31 ptmalloc_init();
32 return __libc_malloc(sz);
35 static void*
36 realloc_hook_ini(void* ptr, size_t sz, const void *caller)
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init();
41 return __libc_realloc(ptr, sz);
44 static void*
45 memalign_hook_ini(size_t alignment, size_t sz, const void *caller)
47 __memalign_hook = NULL;
48 ptmalloc_init();
49 return __libc_memalign(alignment, sz);
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking;
55 /* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68 static int disallow_malloc_check;
70 /* Activate a standard set of debugging hooks. */
71 void
72 __malloc_check_init (void)
74 if (disallow_malloc_check) {
75 disallow_malloc_check = 0;
76 return;
78 using_malloc_checking = 1;
79 __malloc_hook = malloc_check;
80 __free_hook = free_check;
81 __realloc_hook = realloc_check;
82 __memalign_hook = memalign_check;
85 /* A simple, standard set of debugging hooks. Overhead is `only' one
86 byte per chunk; still this will catch most cases of double frees or
87 overruns. The goal here is to avoid obscure crashes due to invalid
88 usage, unlike in the MALLOC_DEBUG code. */
90 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
92 /* Visualize the chunk as being partitioned into blocks of 256 bytes from the
93 highest address of the chunk, downwards. The beginning of each block tells
94 us the size of the previous block, up to the actual size of the requested
95 memory. Our magic byte is right at the end of the requested size, so we
96 must reach it with this iteration, otherwise we have witnessed a memory
97 corruption. */
98 static size_t
99 malloc_check_get_size(mchunkptr p)
101 size_t size;
102 unsigned char c;
103 unsigned char magic = MAGICBYTE(p);
105 assert(using_malloc_checking == 1);
107 for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ);
108 (c = ((unsigned char*)p)[size]) != magic;
109 size -= c) {
110 if(c<=0 || size<(c+2*SIZE_SZ)) {
111 malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
112 chunk2mem(p));
113 return 0;
117 /* chunk2mem size. */
118 return size - 2*SIZE_SZ;
121 /* Instrument a chunk with overrun detector byte(s) and convert it
122 into a user pointer with requested size sz. */
124 static void*
125 internal_function
126 mem2mem_check(void *ptr, size_t sz)
128 mchunkptr p;
129 unsigned char* m_ptr = ptr;
130 size_t i;
132 if (!ptr)
133 return ptr;
134 p = mem2chunk(ptr);
135 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
136 i > sz;
137 i -= 0xFF) {
138 if(i-sz < 0x100) {
139 m_ptr[i] = (unsigned char)(i-sz);
140 break;
142 m_ptr[i] = 0xFF;
144 m_ptr[sz] = MAGICBYTE(p);
145 return (void*)m_ptr;
148 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
149 pointer. If the provided pointer is not valid, return NULL. */
151 static mchunkptr
152 internal_function
153 mem2chunk_check(void* mem, unsigned char **magic_p)
155 mchunkptr p;
156 INTERNAL_SIZE_T sz, c;
157 unsigned char magic;
159 if(!aligned_OK(mem)) return NULL;
160 p = mem2chunk(mem);
161 if (!chunk_is_mmapped(p)) {
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous(&main_arena);
164 sz = chunksize(p);
165 if((contig &&
166 ((char*)p<mp_.sbrk_base ||
167 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
168 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
169 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
170 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
171 next_chunk(prev_chunk(p))!=p) ))
172 return NULL;
173 magic = MAGICBYTE(p);
174 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
175 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
177 } else {
178 unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
180 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
181 alignment relative to the beginning of a page. Check this
182 first. */
183 offset = (unsigned long)mem & page_mask;
184 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
185 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
186 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
187 offset<0x2000) ||
188 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
189 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
190 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
191 return NULL;
192 magic = MAGICBYTE(p);
193 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
194 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
197 ((unsigned char*)p)[sz] ^= 0xFF;
198 if (magic_p)
199 *magic_p = (unsigned char *)p + sz;
200 return p;
203 /* Check for corruption of the top chunk, and try to recover if
204 necessary. */
206 static int
207 internal_function
208 top_check(void)
210 mchunkptr t = top(&main_arena);
211 char* brk, * new_brk;
212 INTERNAL_SIZE_T front_misalign, sbrk_size;
213 unsigned long pagesz = GLRO(dl_pagesize);
215 if (t == initial_top(&main_arena) ||
216 (!chunk_is_mmapped(t) &&
217 chunksize(t)>=MINSIZE &&
218 prev_inuse(t) &&
219 (!contiguous(&main_arena) ||
220 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
221 return 0;
223 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
225 /* Try to set up a new top chunk. */
226 brk = MORECORE(0);
227 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
228 if (front_misalign > 0)
229 front_misalign = MALLOC_ALIGNMENT - front_misalign;
230 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
231 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
232 new_brk = (char*)(MORECORE (sbrk_size));
233 if (new_brk == (char*)(MORECORE_FAILURE))
235 __set_errno (ENOMEM);
236 return -1;
238 /* Call the `morecore' hook if necessary. */
239 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
240 if (hook)
241 (*hook) ();
242 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
244 top(&main_arena) = (mchunkptr)(brk + front_misalign);
245 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
247 return 0;
250 static void*
251 malloc_check(size_t sz, const void *caller)
253 void *victim;
255 if (sz+1 == 0) {
256 __set_errno (ENOMEM);
257 return NULL;
260 (void)mutex_lock(&main_arena.mutex);
261 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
262 (void)mutex_unlock(&main_arena.mutex);
263 return mem2mem_check(victim, sz);
266 static void
267 free_check(void* mem, const void *caller)
269 mchunkptr p;
271 if(!mem) return;
272 (void)mutex_lock(&main_arena.mutex);
273 p = mem2chunk_check(mem, NULL);
274 if(!p) {
275 (void)mutex_unlock(&main_arena.mutex);
277 malloc_printerr(check_action, "free(): invalid pointer", mem);
278 return;
280 if (chunk_is_mmapped(p)) {
281 (void)mutex_unlock(&main_arena.mutex);
282 munmap_chunk(p);
283 return;
285 _int_free(&main_arena, p, 1);
286 (void)mutex_unlock(&main_arena.mutex);
289 static void*
290 realloc_check(void* oldmem, size_t bytes, const void *caller)
292 INTERNAL_SIZE_T nb;
293 void* newmem = 0;
294 unsigned char *magic_p;
296 if (bytes+1 == 0) {
297 __set_errno (ENOMEM);
298 return NULL;
300 if (oldmem == 0) return malloc_check(bytes, NULL);
301 if (bytes == 0) {
302 free_check (oldmem, NULL);
303 return NULL;
305 (void)mutex_lock(&main_arena.mutex);
306 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
307 (void)mutex_unlock(&main_arena.mutex);
308 if(!oldp) {
309 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
310 return malloc_check(bytes, NULL);
312 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
314 checked_request2size(bytes+1, nb);
315 (void)mutex_lock(&main_arena.mutex);
317 if (chunk_is_mmapped(oldp)) {
318 #if HAVE_MREMAP
319 mchunkptr newp = mremap_chunk(oldp, nb);
320 if(newp)
321 newmem = chunk2mem(newp);
322 else
323 #endif
325 /* Note the extra SIZE_SZ overhead. */
326 if(oldsize - SIZE_SZ >= nb)
327 newmem = oldmem; /* do nothing */
328 else {
329 /* Must alloc, copy, free. */
330 if (top_check() >= 0)
331 newmem = _int_malloc(&main_arena, bytes+1);
332 if (newmem) {
333 memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
334 munmap_chunk(oldp);
338 } else {
339 if (top_check() >= 0) {
340 INTERNAL_SIZE_T nb;
341 checked_request2size(bytes + 1, nb);
342 newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
346 /* mem2chunk_check changed the magic byte in the old chunk.
347 If newmem is NULL, then the old chunk will still be used though,
348 so we need to invert that change here. */
349 if (newmem == NULL) *magic_p ^= 0xFF;
351 (void)mutex_unlock(&main_arena.mutex);
353 return mem2mem_check(newmem, bytes);
356 static void*
357 memalign_check(size_t alignment, size_t bytes, const void *caller)
359 void* mem;
361 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
362 if (alignment < MINSIZE) alignment = MINSIZE;
364 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
365 power of 2 and will cause overflow in the check below. */
366 if (alignment > SIZE_MAX / 2 + 1)
368 __set_errno (EINVAL);
369 return 0;
372 /* Check for overflow. */
373 if (bytes > SIZE_MAX - alignment - MINSIZE)
375 __set_errno (ENOMEM);
376 return 0;
379 /* Make sure alignment is power of 2. */
380 if (!powerof2(alignment)) {
381 size_t a = MALLOC_ALIGNMENT * 2;
382 while (a < alignment) a <<= 1;
383 alignment = a;
386 (void)mutex_lock(&main_arena.mutex);
387 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
388 NULL;
389 (void)mutex_unlock(&main_arena.mutex);
390 return mem2mem_check(mem, bytes);
394 /* Get/set state: malloc_get_state() records the current state of all
395 malloc variables (_except_ for the actual heap contents and `hook'
396 function pointers) in a system dependent, opaque data structure.
397 This data structure is dynamically allocated and can be free()d
398 after use. malloc_set_state() restores the state of all malloc
399 variables to the previously obtained state. This is especially
400 useful when using this malloc as part of a shared library, and when
401 the heap contents are saved/restored via some other method. The
402 primary example for this is GNU Emacs with its `dumping' procedure.
403 `Hook' function pointers are never saved or restored by these
404 functions, with two exceptions: If malloc checking was in use when
405 malloc_get_state() was called, then malloc_set_state() calls
406 __malloc_check_init() if possible; if malloc checking was not in
407 use in the recorded state but the user requested malloc checking,
408 then the hooks are reset to 0. */
410 #define MALLOC_STATE_MAGIC 0x444c4541l
411 #define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
413 struct malloc_save_state {
414 long magic;
415 long version;
416 mbinptr av[NBINS * 2 + 2];
417 char* sbrk_base;
418 int sbrked_mem_bytes;
419 unsigned long trim_threshold;
420 unsigned long top_pad;
421 unsigned int n_mmaps_max;
422 unsigned long mmap_threshold;
423 int check_action;
424 unsigned long max_sbrked_mem;
425 unsigned long max_total_mem;
426 unsigned int n_mmaps;
427 unsigned int max_n_mmaps;
428 unsigned long mmapped_mem;
429 unsigned long max_mmapped_mem;
430 int using_malloc_checking;
431 unsigned long max_fast;
432 unsigned long arena_test;
433 unsigned long arena_max;
434 unsigned long narenas;
437 void*
438 __malloc_get_state(void)
440 struct malloc_save_state* ms;
441 int i;
442 mbinptr b;
444 ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
445 if (!ms)
446 return 0;
447 (void)mutex_lock(&main_arena.mutex);
448 malloc_consolidate(&main_arena);
449 ms->magic = MALLOC_STATE_MAGIC;
450 ms->version = MALLOC_STATE_VERSION;
451 ms->av[0] = 0;
452 ms->av[1] = 0; /* used to be binblocks, now no longer used */
453 ms->av[2] = top(&main_arena);
454 ms->av[3] = 0; /* used to be undefined */
455 for(i=1; i<NBINS; i++) {
456 b = bin_at(&main_arena, i);
457 if(first(b) == b)
458 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
459 else {
460 ms->av[2*i+2] = first(b);
461 ms->av[2*i+3] = last(b);
464 ms->sbrk_base = mp_.sbrk_base;
465 ms->sbrked_mem_bytes = main_arena.system_mem;
466 ms->trim_threshold = mp_.trim_threshold;
467 ms->top_pad = mp_.top_pad;
468 ms->n_mmaps_max = mp_.n_mmaps_max;
469 ms->mmap_threshold = mp_.mmap_threshold;
470 ms->check_action = check_action;
471 ms->max_sbrked_mem = main_arena.max_system_mem;
472 ms->max_total_mem = 0;
473 ms->n_mmaps = mp_.n_mmaps;
474 ms->max_n_mmaps = mp_.max_n_mmaps;
475 ms->mmapped_mem = mp_.mmapped_mem;
476 ms->max_mmapped_mem = mp_.max_mmapped_mem;
477 ms->using_malloc_checking = using_malloc_checking;
478 ms->max_fast = get_max_fast();
479 ms->arena_test = mp_.arena_test;
480 ms->arena_max = mp_.arena_max;
481 ms->narenas = narenas;
482 (void)mutex_unlock(&main_arena.mutex);
483 return (void*)ms;
487 __malloc_set_state(void* msptr)
489 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
490 size_t i;
491 mbinptr b;
493 disallow_malloc_check = 1;
494 ptmalloc_init();
495 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
496 /* Must fail if the major version is too high. */
497 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
498 (void)mutex_lock(&main_arena.mutex);
499 /* There are no fastchunks. */
500 clear_fastchunks(&main_arena);
501 if (ms->version >= 4)
502 set_max_fast(ms->max_fast);
503 else
504 set_max_fast(64); /* 64 used to be the value we always used. */
505 for (i=0; i<NFASTBINS; ++i)
506 fastbin (&main_arena, i) = 0;
507 for (i=0; i<BINMAPSIZE; ++i)
508 main_arena.binmap[i] = 0;
509 top(&main_arena) = ms->av[2];
510 main_arena.last_remainder = 0;
511 for(i=1; i<NBINS; i++) {
512 b = bin_at(&main_arena, i);
513 if(ms->av[2*i+2] == 0) {
514 assert(ms->av[2*i+3] == 0);
515 first(b) = last(b) = b;
516 } else {
517 if(ms->version >= 3 &&
518 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
519 largebin_index(chunksize(ms->av[2*i+3]))==i))) {
520 first(b) = ms->av[2*i+2];
521 last(b) = ms->av[2*i+3];
522 /* Make sure the links to the bins within the heap are correct. */
523 first(b)->bk = b;
524 last(b)->fd = b;
525 /* Set bit in binblocks. */
526 mark_bin(&main_arena, i);
527 } else {
528 /* Oops, index computation from chunksize must have changed.
529 Link the whole list into unsorted_chunks. */
530 first(b) = last(b) = b;
531 b = unsorted_chunks(&main_arena);
532 ms->av[2*i+2]->bk = b;
533 ms->av[2*i+3]->fd = b->fd;
534 b->fd->bk = ms->av[2*i+3];
535 b->fd = ms->av[2*i+2];
539 if (ms->version < 3) {
540 /* Clear fd_nextsize and bk_nextsize fields. */
541 b = unsorted_chunks(&main_arena)->fd;
542 while (b != unsorted_chunks(&main_arena)) {
543 if (!in_smallbin_range(chunksize(b))) {
544 b->fd_nextsize = NULL;
545 b->bk_nextsize = NULL;
547 b = b->fd;
550 mp_.sbrk_base = ms->sbrk_base;
551 main_arena.system_mem = ms->sbrked_mem_bytes;
552 mp_.trim_threshold = ms->trim_threshold;
553 mp_.top_pad = ms->top_pad;
554 mp_.n_mmaps_max = ms->n_mmaps_max;
555 mp_.mmap_threshold = ms->mmap_threshold;
556 check_action = ms->check_action;
557 main_arena.max_system_mem = ms->max_sbrked_mem;
558 mp_.n_mmaps = ms->n_mmaps;
559 mp_.max_n_mmaps = ms->max_n_mmaps;
560 mp_.mmapped_mem = ms->mmapped_mem;
561 mp_.max_mmapped_mem = ms->max_mmapped_mem;
562 /* add version-dependent code here */
563 if (ms->version >= 1) {
564 /* Check whether it is safe to enable malloc checking, or whether
565 it is necessary to disable it. */
566 if (ms->using_malloc_checking && !using_malloc_checking &&
567 !disallow_malloc_check)
568 __malloc_check_init ();
569 else if (!ms->using_malloc_checking && using_malloc_checking) {
570 __malloc_hook = NULL;
571 __free_hook = NULL;
572 __realloc_hook = NULL;
573 __memalign_hook = NULL;
574 using_malloc_checking = 0;
577 if (ms->version >= 4) {
578 mp_.arena_test = ms->arena_test;
579 mp_.arena_max = ms->arena_max;
580 narenas = ms->narenas;
582 check_malloc_state(&main_arena);
584 (void)mutex_unlock(&main_arena.mutex);
585 return 0;
589 * Local variables:
590 * c-basic-offset: 2
591 * End: