Fix entries.
[glibc.git] / malloc / hooks.c
blob020657a36849076f77362bdb74f7f8394543b6e3
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* $Id$ */
23 #ifndef DEFAULT_CHECK_ACTION
24 #define DEFAULT_CHECK_ACTION 1
25 #endif
27 /* What to do if the standard debugging hooks are in place and a
28 corrupt pointer is detected: do nothing (0), print an error message
29 (1), or call abort() (2). */
31 /* Hooks for debugging versions. The initial hooks just call the
32 initialization routine, then do the normal work. */
34 static Void_t*
35 #if __STD_C
36 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
37 #else
38 malloc_hook_ini(sz, caller)
39 size_t sz; const __malloc_ptr_t caller;
40 #endif
42 __malloc_hook = NULL;
43 ptmalloc_init();
44 return public_mALLOc(sz);
47 static Void_t*
48 #if __STD_C
49 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
50 #else
51 realloc_hook_ini(ptr, sz, caller)
52 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
53 #endif
55 __malloc_hook = NULL;
56 __realloc_hook = NULL;
57 ptmalloc_init();
58 return public_rEALLOc(ptr, sz);
61 static Void_t*
62 #if __STD_C
63 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
64 #else
65 memalign_hook_ini(alignment, sz, caller)
66 size_t alignment; size_t sz; const __malloc_ptr_t caller;
67 #endif
69 __memalign_hook = NULL;
70 ptmalloc_init();
71 return public_mEMALIGn(alignment, sz);
75 static int check_action = DEFAULT_CHECK_ACTION;
77 /* Whether we are using malloc checking. */
78 static int using_malloc_checking;
80 /* A flag that is set by malloc_set_state, to signal that malloc checking
81 must not be enabled on the request from the user (via the MALLOC_CHECK_
82 environment variable). It is reset by __malloc_check_init to tell
83 malloc_set_state that the user has requested malloc checking.
85 The purpose of this flag is to make sure that malloc checking is not
86 enabled when the heap to be restored was constructed without malloc
87 checking, and thus does not contain the required magic bytes.
88 Otherwise the heap would be corrupted by calls to free and realloc. If
89 it turns out that the heap was created with malloc checking and the
90 user has requested it malloc_set_state just calls __malloc_check_init
91 again to enable it. On the other hand, reusing such a heap without
92 further malloc checking is safe. */
93 static int disallow_malloc_check;
95 /* Activate a standard set of debugging hooks. */
96 void
97 __malloc_check_init()
99 if (disallow_malloc_check) {
100 disallow_malloc_check = 0;
101 return;
103 using_malloc_checking = 1;
104 __malloc_hook = malloc_check;
105 __free_hook = free_check;
106 __realloc_hook = realloc_check;
107 __memalign_hook = memalign_check;
108 if(check_action & 1)
110 #ifdef _LIBC
111 _IO_flockfile (stderr);
112 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
113 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
114 #endif
115 fprintf(stderr, "malloc: using debugging hooks\n");
116 #ifdef _LIBC
117 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
118 _IO_funlockfile (stderr);
119 #endif
123 /* A simple, standard set of debugging hooks. Overhead is `only' one
124 byte per chunk; still this will catch most cases of double frees or
125 overruns. The goal here is to avoid obscure crashes due to invalid
126 usage, unlike in the MALLOC_DEBUG code. */
128 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
130 /* Instrument a chunk with overrun detector byte(s) and convert it
131 into a user pointer with requested size sz. */
133 static Void_t*
134 internal_function
135 #if __STD_C
136 mem2mem_check(Void_t *ptr, size_t sz)
137 #else
138 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
139 #endif
141 mchunkptr p;
142 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
143 size_t i;
145 if (!ptr)
146 return ptr;
147 p = mem2chunk(ptr);
148 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
149 i > sz;
150 i -= 0xFF) {
151 if(i-sz < 0x100) {
152 m_ptr[i] = (unsigned char)(i-sz);
153 break;
155 m_ptr[i] = 0xFF;
157 m_ptr[sz] = MAGICBYTE(p);
158 return (Void_t*)m_ptr;
161 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
162 pointer. If the provided pointer is not valid, return NULL. */
164 static mchunkptr
165 internal_function
166 #if __STD_C
167 mem2chunk_check(Void_t* mem)
168 #else
169 mem2chunk_check(mem) Void_t* mem;
170 #endif
172 mchunkptr p;
173 INTERNAL_SIZE_T sz, c;
174 unsigned char magic;
176 if(!aligned_OK(mem)) return NULL;
177 p = mem2chunk(mem);
178 if( (char*)p>=mp_.sbrk_base &&
179 (char*)p<(mp_.sbrk_base+main_arena.system_mem) ) {
180 /* Must be a chunk in conventional heap memory. */
181 if(chunk_is_mmapped(p) ||
182 ( (sz = chunksize(p)),
183 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) ) ||
184 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
185 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
186 (long)prev_chunk(p)<(long)mp_.sbrk_base ||
187 next_chunk(prev_chunk(p))!=p) ))
188 return NULL;
189 magic = MAGICBYTE(p);
190 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
191 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
193 ((unsigned char*)p)[sz] ^= 0xFF;
194 } else {
195 unsigned long offset, page_mask = malloc_getpagesize-1;
197 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
198 alignment relative to the beginning of a page. Check this
199 first. */
200 offset = (unsigned long)mem & page_mask;
201 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
202 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
203 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
204 offset<0x2000) ||
205 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
206 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
207 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
208 return NULL;
209 magic = MAGICBYTE(p);
210 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
211 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
213 ((unsigned char*)p)[sz] ^= 0xFF;
215 return p;
218 /* Check for corruption of the top chunk, and try to recover if
219 necessary. */
221 static int
222 internal_function
223 #if __STD_C
224 top_check(void)
225 #else
226 top_check()
227 #endif
229 mchunkptr t = top(&main_arena);
230 char* brk, * new_brk;
231 INTERNAL_SIZE_T front_misalign, sbrk_size;
232 unsigned long pagesz = malloc_getpagesize;
234 if((char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem ||
235 t == initial_top(&main_arena)) return 0;
237 if(check_action & 1)
239 #ifdef _LIBC
240 _IO_flockfile (stderr);
241 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
242 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
243 #endif
244 fprintf(stderr, "malloc: top chunk is corrupt\n");
245 #ifdef _LIBC
246 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
247 _IO_funlockfile (stderr);
248 #endif
250 if(check_action & 2)
251 abort();
253 /* Try to set up a new top chunk. */
254 brk = MORECORE(0);
255 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
256 if (front_misalign > 0)
257 front_misalign = MALLOC_ALIGNMENT - front_misalign;
258 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
259 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
260 new_brk = (char*)(MORECORE (sbrk_size));
261 if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
262 /* Call the `morecore' hook if necessary. */
263 if (__after_morecore_hook)
264 (*__after_morecore_hook) ();
265 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
267 top(&main_arena) = (mchunkptr)(brk + front_misalign);
268 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
270 return 0;
273 static Void_t*
274 #if __STD_C
275 malloc_check(size_t sz, const Void_t *caller)
276 #else
277 malloc_check(sz, caller) size_t sz; const Void_t *caller;
278 #endif
280 Void_t *victim;
282 (void)mutex_lock(&main_arena.mutex);
283 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
284 (void)mutex_unlock(&main_arena.mutex);
285 return mem2mem_check(victim, sz);
288 static void
289 #if __STD_C
290 free_check(Void_t* mem, const Void_t *caller)
291 #else
292 free_check(mem, caller) Void_t* mem; const Void_t *caller;
293 #endif
295 mchunkptr p;
297 if(!mem) return;
298 (void)mutex_lock(&main_arena.mutex);
299 p = mem2chunk_check(mem);
300 if(!p) {
301 (void)mutex_unlock(&main_arena.mutex);
302 if(check_action & 1)
304 #ifdef _LIBC
305 _IO_flockfile (stderr);
306 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
307 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
308 #endif
309 fprintf(stderr, "free(): invalid pointer %p!\n", mem);
310 #ifdef _LIBC
311 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
312 _IO_funlockfile (stderr);
313 #endif
315 if(check_action & 2)
316 abort();
317 return;
319 #if HAVE_MMAP
320 if (chunk_is_mmapped(p)) {
321 (void)mutex_unlock(&main_arena.mutex);
322 munmap_chunk(p);
323 return;
325 #endif
326 #if 0 /* Erase freed memory. */
327 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
328 #endif
329 _int_free(&main_arena, mem);
330 (void)mutex_unlock(&main_arena.mutex);
333 static Void_t*
334 #if __STD_C
335 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
336 #else
337 realloc_check(oldmem, bytes, caller)
338 Void_t* oldmem; size_t bytes; const Void_t *caller;
339 #endif
341 mchunkptr oldp;
342 INTERNAL_SIZE_T nb, oldsize;
343 Void_t* newmem = 0;
345 if (oldmem == 0) return malloc_check(bytes, NULL);
346 (void)mutex_lock(&main_arena.mutex);
347 oldp = mem2chunk_check(oldmem);
348 (void)mutex_unlock(&main_arena.mutex);
349 if(!oldp) {
350 if(check_action & 1)
352 #ifdef _LIBC
353 _IO_flockfile (stderr);
354 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
355 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
356 #endif
357 fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
358 #ifdef _LIBC
359 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
360 _IO_funlockfile (stderr);
361 #endif
363 if(check_action & 2)
364 abort();
365 return malloc_check(bytes, NULL);
367 oldsize = chunksize(oldp);
369 checked_request2size(bytes+1, nb);
370 (void)mutex_lock(&main_arena.mutex);
372 #if HAVE_MMAP
373 if (chunk_is_mmapped(oldp)) {
374 #if HAVE_MREMAP
375 mchunkptr newp = mremap_chunk(oldp, nb);
376 if(newp)
377 newmem = chunk2mem(newp);
378 else
379 #endif
381 /* Note the extra SIZE_SZ overhead. */
382 if(oldsize - SIZE_SZ >= nb)
383 newmem = oldmem; /* do nothing */
384 else {
385 /* Must alloc, copy, free. */
386 if (top_check() >= 0)
387 newmem = _int_malloc(&main_arena, bytes+1);
388 if (newmem) {
389 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
390 munmap_chunk(oldp);
394 } else {
395 #endif /* HAVE_MMAP */
396 if (top_check() >= 0)
397 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
398 #if 0 /* Erase freed memory. */
399 if(newmem)
400 newp = mem2chunk(newmem);
401 nb = chunksize(newp);
402 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
403 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
404 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
405 } else if(nb > oldsize+SIZE_SZ) {
406 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
407 0, nb - (oldsize+SIZE_SZ));
409 #endif
410 #if HAVE_MMAP
412 #endif
413 (void)mutex_unlock(&main_arena.mutex);
415 return mem2mem_check(newmem, bytes);
418 static Void_t*
419 #if __STD_C
420 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
421 #else
422 memalign_check(alignment, bytes, caller)
423 size_t alignment; size_t bytes; const Void_t *caller;
424 #endif
426 INTERNAL_SIZE_T nb;
427 Void_t* mem;
429 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
430 if (alignment < MINSIZE) alignment = MINSIZE;
432 checked_request2size(bytes+1, nb);
433 (void)mutex_lock(&main_arena.mutex);
434 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
435 NULL;
436 (void)mutex_unlock(&main_arena.mutex);
437 return mem2mem_check(mem, bytes);
440 #ifndef NO_THREADS
442 # ifdef _LIBC
443 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
444 /* These routines are never needed in this configuration. */
445 # define NO_STARTER
446 # endif
447 # endif
449 # ifdef NO_STARTER
450 # undef NO_STARTER
451 # else
453 /* The following hooks are used when the global initialization in
454 ptmalloc_init() hasn't completed yet. */
456 static Void_t*
457 #if __STD_C
458 malloc_starter(size_t sz, const Void_t *caller)
459 #else
460 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
461 #endif
463 Void_t* victim;
465 victim = _int_malloc(&main_arena, sz);
467 return victim ? BOUNDED_N(victim, sz) : 0;
470 static Void_t*
471 #if __STD_C
472 memalign_starter(size_t align, size_t sz, const Void_t *caller)
473 #else
474 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
475 #endif
477 Void_t* victim;
479 victim = _int_memalign(&main_arena, align, sz);
481 return victim ? BOUNDED_N(victim, sz) : 0;
484 static void
485 #if __STD_C
486 free_starter(Void_t* mem, const Void_t *caller)
487 #else
488 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
489 #endif
491 mchunkptr p;
493 if(!mem) return;
494 p = mem2chunk(mem);
495 #if HAVE_MMAP
496 if (chunk_is_mmapped(p)) {
497 munmap_chunk(p);
498 return;
500 #endif
501 _int_free(&main_arena, mem);
504 # endif /* !defiend NO_STARTER */
505 #endif /* NO_THREADS */
508 /* Get/set state: malloc_get_state() records the current state of all
509 malloc variables (_except_ for the actual heap contents and `hook'
510 function pointers) in a system dependent, opaque data structure.
511 This data structure is dynamically allocated and can be free()d
512 after use. malloc_set_state() restores the state of all malloc
513 variables to the previously obtained state. This is especially
514 useful when using this malloc as part of a shared library, and when
515 the heap contents are saved/restored via some other method. The
516 primary example for this is GNU Emacs with its `dumping' procedure.
517 `Hook' function pointers are never saved or restored by these
518 functions, with two exceptions: If malloc checking was in use when
519 malloc_get_state() was called, then malloc_set_state() calls
520 __malloc_check_init() if possible; if malloc checking was not in
521 use in the recorded state but the user requested malloc checking,
522 then the hooks are reset to 0. */
524 #define MALLOC_STATE_MAGIC 0x444c4541l
525 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
527 struct malloc_save_state {
528 long magic;
529 long version;
530 mbinptr av[NBINS * 2 + 2];
531 char* sbrk_base;
532 int sbrked_mem_bytes;
533 unsigned long trim_threshold;
534 unsigned long top_pad;
535 unsigned int n_mmaps_max;
536 unsigned long mmap_threshold;
537 int check_action;
538 unsigned long max_sbrked_mem;
539 unsigned long max_total_mem;
540 unsigned int n_mmaps;
541 unsigned int max_n_mmaps;
542 unsigned long mmapped_mem;
543 unsigned long max_mmapped_mem;
544 int using_malloc_checking;
547 Void_t*
548 public_gET_STATe(void)
550 struct malloc_save_state* ms;
551 int i;
552 mbinptr b;
554 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
555 if (!ms)
556 return 0;
557 (void)mutex_lock(&main_arena.mutex);
558 malloc_consolidate(&main_arena);
559 ms->magic = MALLOC_STATE_MAGIC;
560 ms->version = MALLOC_STATE_VERSION;
561 ms->av[0] = 0;
562 ms->av[1] = 0; /* used to be binblocks, now no longer used */
563 ms->av[2] = top(&main_arena);
564 ms->av[3] = 0; /* used to be undefined */
565 for(i=1; i<NBINS; i++) {
566 b = bin_at(&main_arena, i);
567 if(first(b) == b)
568 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
569 else {
570 ms->av[2*i+2] = first(b);
571 ms->av[2*i+3] = last(b);
574 ms->sbrk_base = mp_.sbrk_base;
575 ms->sbrked_mem_bytes = main_arena.system_mem;
576 ms->trim_threshold = mp_.trim_threshold;
577 ms->top_pad = mp_.top_pad;
578 ms->n_mmaps_max = mp_.n_mmaps_max;
579 ms->mmap_threshold = mp_.mmap_threshold;
580 ms->check_action = check_action;
581 ms->max_sbrked_mem = main_arena.max_system_mem;
582 #ifdef NO_THREADS
583 ms->max_total_mem = mp_.max_total_mem;
584 #else
585 ms->max_total_mem = 0;
586 #endif
587 ms->n_mmaps = mp_.n_mmaps;
588 ms->max_n_mmaps = mp_.max_n_mmaps;
589 ms->mmapped_mem = mp_.mmapped_mem;
590 ms->max_mmapped_mem = mp_.max_mmapped_mem;
591 ms->using_malloc_checking = using_malloc_checking;
592 (void)mutex_unlock(&main_arena.mutex);
593 return (Void_t*)ms;
597 public_sET_STATe(Void_t* msptr)
599 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
600 size_t i;
601 mbinptr b;
603 disallow_malloc_check = 1;
604 ptmalloc_init();
605 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
606 /* Must fail if the major version is too high. */
607 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
608 (void)mutex_lock(&main_arena.mutex);
609 /* There are no fastchunks. */
610 clear_fastchunks(&main_arena);
611 set_max_fast(&main_arena, DEFAULT_MXFAST);
612 for (i=0; i<NFASTBINS; ++i)
613 main_arena.fastbins[i] = 0;
614 for (i=0; i<BINMAPSIZE; ++i)
615 main_arena.binmap[i] = 0;
616 top(&main_arena) = ms->av[2];
617 main_arena.last_remainder = 0;
618 for(i=1; i<NBINS; i++) {
619 b = bin_at(&main_arena, i);
620 if(ms->av[2*i+2] == 0) {
621 assert(ms->av[2*i+3] == 0);
622 first(b) = last(b) = b;
623 } else {
624 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
625 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
626 first(b) = ms->av[2*i+2];
627 last(b) = ms->av[2*i+3];
628 /* Make sure the links to the bins within the heap are correct. */
629 first(b)->bk = b;
630 last(b)->fd = b;
631 /* Set bit in binblocks. */
632 mark_bin(&main_arena, i);
633 } else {
634 /* Oops, index computation from chunksize must have changed.
635 Link the whole list into unsorted_chunks. */
636 first(b) = last(b) = b;
637 b = unsorted_chunks(&main_arena);
638 ms->av[2*i+2]->bk = b;
639 ms->av[2*i+3]->fd = b->fd;
640 b->fd->bk = ms->av[2*i+3];
641 b->fd = ms->av[2*i+2];
645 mp_.sbrk_base = ms->sbrk_base;
646 main_arena.system_mem = ms->sbrked_mem_bytes;
647 mp_.trim_threshold = ms->trim_threshold;
648 mp_.top_pad = ms->top_pad;
649 mp_.n_mmaps_max = ms->n_mmaps_max;
650 mp_.mmap_threshold = ms->mmap_threshold;
651 check_action = ms->check_action;
652 main_arena.max_system_mem = ms->max_sbrked_mem;
653 #ifdef NO_THREADS
654 mp_.max_total_mem = ms->max_total_mem;
655 #endif
656 mp_.n_mmaps = ms->n_mmaps;
657 mp_.max_n_mmaps = ms->max_n_mmaps;
658 mp_.mmapped_mem = ms->mmapped_mem;
659 mp_.max_mmapped_mem = ms->max_mmapped_mem;
660 /* add version-dependent code here */
661 if (ms->version >= 1) {
662 /* Check whether it is safe to enable malloc checking, or whether
663 it is necessary to disable it. */
664 if (ms->using_malloc_checking && !using_malloc_checking &&
665 !disallow_malloc_check)
666 __malloc_check_init ();
667 else if (!ms->using_malloc_checking && using_malloc_checking) {
668 __malloc_hook = 0;
669 __free_hook = 0;
670 __realloc_hook = 0;
671 __memalign_hook = 0;
672 using_malloc_checking = 0;
675 check_malloc_state(&main_arena);
677 (void)mutex_unlock(&main_arena.mutex);
678 return 0;
682 * Local variables:
683 * c-basic-offset: 2
684 * End: