* sysdeps/unix/sysv/linux/getsysstats.c (next_line): New function.
[glibc.git] / malloc / hooks.c
blob9659ec5fbe06458f00704b92c48c2196ed2205db
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* What to do if the standard debugging hooks are in place and a
22 corrupt pointer is detected: do nothing (0), print an error message
23 (1), or call abort() (2). */
25 /* Hooks for debugging versions. The initial hooks just call the
26 initialization routine, then do the normal work. */
28 static Void_t*
29 #if __STD_C
30 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
31 #else
32 malloc_hook_ini(sz, caller)
33 size_t sz; const __malloc_ptr_t caller;
34 #endif
36 __malloc_hook = NULL;
37 ptmalloc_init();
38 return public_mALLOc(sz);
41 static Void_t*
42 #if __STD_C
43 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
44 #else
45 realloc_hook_ini(ptr, sz, caller)
46 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
47 #endif
49 __malloc_hook = NULL;
50 __realloc_hook = NULL;
51 ptmalloc_init();
52 return public_rEALLOc(ptr, sz);
55 static Void_t*
56 #if __STD_C
57 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
58 #else
59 memalign_hook_ini(alignment, sz, caller)
60 size_t alignment; size_t sz; const __malloc_ptr_t caller;
61 #endif
63 __memalign_hook = NULL;
64 ptmalloc_init();
65 return public_mEMALIGn(alignment, sz);
68 /* Whether we are using malloc checking. */
69 static int using_malloc_checking;
71 /* A flag that is set by malloc_set_state, to signal that malloc checking
72 must not be enabled on the request from the user (via the MALLOC_CHECK_
73 environment variable). It is reset by __malloc_check_init to tell
74 malloc_set_state that the user has requested malloc checking.
76 The purpose of this flag is to make sure that malloc checking is not
77 enabled when the heap to be restored was constructed without malloc
78 checking, and thus does not contain the required magic bytes.
79 Otherwise the heap would be corrupted by calls to free and realloc. If
80 it turns out that the heap was created with malloc checking and the
81 user has requested it malloc_set_state just calls __malloc_check_init
82 again to enable it. On the other hand, reusing such a heap without
83 further malloc checking is safe. */
84 static int disallow_malloc_check;
86 /* Activate a standard set of debugging hooks. */
87 void
88 __malloc_check_init()
90 if (disallow_malloc_check) {
91 disallow_malloc_check = 0;
92 return;
94 using_malloc_checking = 1;
95 __malloc_hook = malloc_check;
96 __free_hook = free_check;
97 __realloc_hook = realloc_check;
98 __memalign_hook = memalign_check;
101 /* A simple, standard set of debugging hooks. Overhead is `only' one
102 byte per chunk; still this will catch most cases of double frees or
103 overruns. The goal here is to avoid obscure crashes due to invalid
104 usage, unlike in the MALLOC_DEBUG code. */
106 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
108 /* Instrument a chunk with overrun detector byte(s) and convert it
109 into a user pointer with requested size sz. */
111 static Void_t*
112 internal_function
113 #if __STD_C
114 mem2mem_check(Void_t *ptr, size_t sz)
115 #else
116 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
117 #endif
119 mchunkptr p;
120 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
121 size_t i;
123 if (!ptr)
124 return ptr;
125 p = mem2chunk(ptr);
126 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
127 i > sz;
128 i -= 0xFF) {
129 if(i-sz < 0x100) {
130 m_ptr[i] = (unsigned char)(i-sz);
131 break;
133 m_ptr[i] = 0xFF;
135 m_ptr[sz] = MAGICBYTE(p);
136 return (Void_t*)m_ptr;
139 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
140 pointer. If the provided pointer is not valid, return NULL. */
142 static mchunkptr
143 internal_function
144 #if __STD_C
145 mem2chunk_check(Void_t* mem, unsigned char **magic_p)
146 #else
147 mem2chunk_check(mem, magic_p) Void_t* mem; unsigned char **magic_p;
148 #endif
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
154 if(!aligned_OK(mem)) return NULL;
155 p = mem2chunk(mem);
156 if (!chunk_is_mmapped(p)) {
157 /* Must be a chunk in conventional heap memory. */
158 int contig = contiguous(&main_arena);
159 sz = chunksize(p);
160 if((contig &&
161 ((char*)p<mp_.sbrk_base ||
162 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
163 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
164 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
165 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
166 next_chunk(prev_chunk(p))!=p) ))
167 return NULL;
168 magic = MAGICBYTE(p);
169 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
170 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
172 } else {
173 unsigned long offset, page_mask = malloc_getpagesize-1;
175 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
176 alignment relative to the beginning of a page. Check this
177 first. */
178 offset = (unsigned long)mem & page_mask;
179 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
180 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
181 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
182 offset<0x2000) ||
183 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
184 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
185 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
186 return NULL;
187 magic = MAGICBYTE(p);
188 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
189 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
192 ((unsigned char*)p)[sz] ^= 0xFF;
193 if (magic_p)
194 *magic_p = (unsigned char *)p + sz;
195 return p;
198 /* Check for corruption of the top chunk, and try to recover if
199 necessary. */
201 static int
202 internal_function
203 #if __STD_C
204 top_check(void)
205 #else
206 top_check()
207 #endif
209 mchunkptr t = top(&main_arena);
210 char* brk, * new_brk;
211 INTERNAL_SIZE_T front_misalign, sbrk_size;
212 unsigned long pagesz = malloc_getpagesize;
214 if (t == initial_top(&main_arena) ||
215 (!chunk_is_mmapped(t) &&
216 chunksize(t)>=MINSIZE &&
217 prev_inuse(t) &&
218 (!contiguous(&main_arena) ||
219 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
220 return 0;
222 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
224 /* Try to set up a new top chunk. */
225 brk = MORECORE(0);
226 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
227 if (front_misalign > 0)
228 front_misalign = MALLOC_ALIGNMENT - front_misalign;
229 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
230 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
231 new_brk = (char*)(MORECORE (sbrk_size));
232 if (new_brk == (char*)(MORECORE_FAILURE))
234 MALLOC_FAILURE_ACTION;
235 return -1;
237 /* Call the `morecore' hook if necessary. */
238 if (__after_morecore_hook)
239 (*__after_morecore_hook) ();
240 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
242 top(&main_arena) = (mchunkptr)(brk + front_misalign);
243 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
245 return 0;
248 static Void_t*
249 #if __STD_C
250 malloc_check(size_t sz, const Void_t *caller)
251 #else
252 malloc_check(sz, caller) size_t sz; const Void_t *caller;
253 #endif
255 Void_t *victim;
257 if (sz+1 == 0) {
258 MALLOC_FAILURE_ACTION;
259 return NULL;
262 (void)mutex_lock(&main_arena.mutex);
263 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
264 (void)mutex_unlock(&main_arena.mutex);
265 return mem2mem_check(victim, sz);
268 static void
269 #if __STD_C
270 free_check(Void_t* mem, const Void_t *caller)
271 #else
272 free_check(mem, caller) Void_t* mem; const Void_t *caller;
273 #endif
275 mchunkptr p;
277 if(!mem) return;
278 (void)mutex_lock(&main_arena.mutex);
279 p = mem2chunk_check(mem, NULL);
280 if(!p) {
281 (void)mutex_unlock(&main_arena.mutex);
283 malloc_printerr(check_action, "free(): invalid pointer", mem);
284 return;
286 #if HAVE_MMAP
287 if (chunk_is_mmapped(p)) {
288 (void)mutex_unlock(&main_arena.mutex);
289 munmap_chunk(p);
290 return;
292 #endif
293 #if 0 /* Erase freed memory. */
294 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
295 #endif
296 _int_free(&main_arena, p);
297 (void)mutex_unlock(&main_arena.mutex);
300 static Void_t*
301 #if __STD_C
302 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
303 #else
304 realloc_check(oldmem, bytes, caller)
305 Void_t* oldmem; size_t bytes; const Void_t *caller;
306 #endif
308 INTERNAL_SIZE_T nb;
309 Void_t* newmem = 0;
310 unsigned char *magic_p;
312 if (bytes+1 == 0) {
313 MALLOC_FAILURE_ACTION;
314 return NULL;
316 if (oldmem == 0) return malloc_check(bytes, NULL);
317 if (bytes == 0) {
318 free_check (oldmem, NULL);
319 return NULL;
321 (void)mutex_lock(&main_arena.mutex);
322 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
323 (void)mutex_unlock(&main_arena.mutex);
324 if(!oldp) {
325 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
326 return malloc_check(bytes, NULL);
328 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
330 checked_request2size(bytes+1, nb);
331 (void)mutex_lock(&main_arena.mutex);
333 #if HAVE_MMAP
334 if (chunk_is_mmapped(oldp)) {
335 #if HAVE_MREMAP
336 mchunkptr newp = mremap_chunk(oldp, nb);
337 if(newp)
338 newmem = chunk2mem(newp);
339 else
340 #endif
342 /* Note the extra SIZE_SZ overhead. */
343 if(oldsize - SIZE_SZ >= nb)
344 newmem = oldmem; /* do nothing */
345 else {
346 /* Must alloc, copy, free. */
347 if (top_check() >= 0)
348 newmem = _int_malloc(&main_arena, bytes+1);
349 if (newmem) {
350 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
351 munmap_chunk(oldp);
355 } else {
356 #endif /* HAVE_MMAP */
357 if (top_check() >= 0) {
358 INTERNAL_SIZE_T nb;
359 checked_request2size(bytes + 1, nb);
360 newmem = _int_realloc(&main_arena, oldp, nb);
362 #if 0 /* Erase freed memory. */
363 if(newmem)
364 newp = mem2chunk(newmem);
365 nb = chunksize(newp);
366 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
367 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
368 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
369 } else if(nb > oldsize+SIZE_SZ) {
370 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
371 0, nb - (oldsize+SIZE_SZ));
373 #endif
374 #if HAVE_MMAP
376 #endif
378 /* mem2chunk_check changed the magic byte in the old chunk.
379 If newmem is NULL, then the old chunk will still be used though,
380 so we need to invert that change here. */
381 if (newmem == NULL) *magic_p ^= 0xFF;
383 (void)mutex_unlock(&main_arena.mutex);
385 return mem2mem_check(newmem, bytes);
388 static Void_t*
389 #if __STD_C
390 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
391 #else
392 memalign_check(alignment, bytes, caller)
393 size_t alignment; size_t bytes; const Void_t *caller;
394 #endif
396 INTERNAL_SIZE_T nb;
397 Void_t* mem;
399 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
400 if (alignment < MINSIZE) alignment = MINSIZE;
402 if (bytes+1 == 0) {
403 MALLOC_FAILURE_ACTION;
404 return NULL;
406 checked_request2size(bytes+1, nb);
407 (void)mutex_lock(&main_arena.mutex);
408 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
409 NULL;
410 (void)mutex_unlock(&main_arena.mutex);
411 return mem2mem_check(mem, bytes);
414 #ifndef NO_THREADS
416 # ifdef _LIBC
417 # if USE___THREAD || !defined SHARED
418 /* These routines are never needed in this configuration. */
419 # define NO_STARTER
420 # endif
421 # endif
423 # ifdef NO_STARTER
424 # undef NO_STARTER
425 # else
427 /* The following hooks are used when the global initialization in
428 ptmalloc_init() hasn't completed yet. */
430 static Void_t*
431 #if __STD_C
432 malloc_starter(size_t sz, const Void_t *caller)
433 #else
434 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
435 #endif
437 Void_t* victim;
439 victim = _int_malloc(&main_arena, sz);
441 return victim ? BOUNDED_N(victim, sz) : 0;
444 static Void_t*
445 #if __STD_C
446 memalign_starter(size_t align, size_t sz, const Void_t *caller)
447 #else
448 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
449 #endif
451 Void_t* victim;
453 victim = _int_memalign(&main_arena, align, sz);
455 return victim ? BOUNDED_N(victim, sz) : 0;
458 static void
459 #if __STD_C
460 free_starter(Void_t* mem, const Void_t *caller)
461 #else
462 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
463 #endif
465 mchunkptr p;
467 if(!mem) return;
468 p = mem2chunk(mem);
469 #if HAVE_MMAP
470 if (chunk_is_mmapped(p)) {
471 munmap_chunk(p);
472 return;
474 #endif
475 _int_free(&main_arena, p);
478 # endif /* !defiend NO_STARTER */
479 #endif /* NO_THREADS */
482 /* Get/set state: malloc_get_state() records the current state of all
483 malloc variables (_except_ for the actual heap contents and `hook'
484 function pointers) in a system dependent, opaque data structure.
485 This data structure is dynamically allocated and can be free()d
486 after use. malloc_set_state() restores the state of all malloc
487 variables to the previously obtained state. This is especially
488 useful when using this malloc as part of a shared library, and when
489 the heap contents are saved/restored via some other method. The
490 primary example for this is GNU Emacs with its `dumping' procedure.
491 `Hook' function pointers are never saved or restored by these
492 functions, with two exceptions: If malloc checking was in use when
493 malloc_get_state() was called, then malloc_set_state() calls
494 __malloc_check_init() if possible; if malloc checking was not in
495 use in the recorded state but the user requested malloc checking,
496 then the hooks are reset to 0. */
498 #define MALLOC_STATE_MAGIC 0x444c4541l
499 #define MALLOC_STATE_VERSION (0*0x100l + 3l) /* major*0x100 + minor */
501 struct malloc_save_state {
502 long magic;
503 long version;
504 mbinptr av[NBINS * 2 + 2];
505 char* sbrk_base;
506 int sbrked_mem_bytes;
507 unsigned long trim_threshold;
508 unsigned long top_pad;
509 unsigned int n_mmaps_max;
510 unsigned long mmap_threshold;
511 int check_action;
512 unsigned long max_sbrked_mem;
513 unsigned long max_total_mem;
514 unsigned int n_mmaps;
515 unsigned int max_n_mmaps;
516 unsigned long mmapped_mem;
517 unsigned long max_mmapped_mem;
518 int using_malloc_checking;
521 Void_t*
522 public_gET_STATe(void)
524 struct malloc_save_state* ms;
525 int i;
526 mbinptr b;
528 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
529 if (!ms)
530 return 0;
531 (void)mutex_lock(&main_arena.mutex);
532 malloc_consolidate(&main_arena);
533 ms->magic = MALLOC_STATE_MAGIC;
534 ms->version = MALLOC_STATE_VERSION;
535 ms->av[0] = 0;
536 ms->av[1] = 0; /* used to be binblocks, now no longer used */
537 ms->av[2] = top(&main_arena);
538 ms->av[3] = 0; /* used to be undefined */
539 for(i=1; i<NBINS; i++) {
540 b = bin_at(&main_arena, i);
541 if(first(b) == b)
542 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
543 else {
544 ms->av[2*i+2] = first(b);
545 ms->av[2*i+3] = last(b);
548 ms->sbrk_base = mp_.sbrk_base;
549 ms->sbrked_mem_bytes = main_arena.system_mem;
550 ms->trim_threshold = mp_.trim_threshold;
551 ms->top_pad = mp_.top_pad;
552 ms->n_mmaps_max = mp_.n_mmaps_max;
553 ms->mmap_threshold = mp_.mmap_threshold;
554 ms->check_action = check_action;
555 ms->max_sbrked_mem = main_arena.max_system_mem;
556 #ifdef NO_THREADS
557 ms->max_total_mem = mp_.max_total_mem;
558 #else
559 ms->max_total_mem = 0;
560 #endif
561 ms->n_mmaps = mp_.n_mmaps;
562 ms->max_n_mmaps = mp_.max_n_mmaps;
563 ms->mmapped_mem = mp_.mmapped_mem;
564 ms->max_mmapped_mem = mp_.max_mmapped_mem;
565 ms->using_malloc_checking = using_malloc_checking;
566 (void)mutex_unlock(&main_arena.mutex);
567 return (Void_t*)ms;
571 public_sET_STATe(Void_t* msptr)
573 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
574 size_t i;
575 mbinptr b;
577 disallow_malloc_check = 1;
578 ptmalloc_init();
579 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
580 /* Must fail if the major version is too high. */
581 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
582 (void)mutex_lock(&main_arena.mutex);
583 /* There are no fastchunks. */
584 clear_fastchunks(&main_arena);
585 set_max_fast(DEFAULT_MXFAST);
586 for (i=0; i<NFASTBINS; ++i)
587 main_arena.fastbins[i] = 0;
588 for (i=0; i<BINMAPSIZE; ++i)
589 main_arena.binmap[i] = 0;
590 top(&main_arena) = ms->av[2];
591 main_arena.last_remainder = 0;
592 for(i=1; i<NBINS; i++) {
593 b = bin_at(&main_arena, i);
594 if(ms->av[2*i+2] == 0) {
595 assert(ms->av[2*i+3] == 0);
596 first(b) = last(b) = b;
597 } else {
598 if(ms->version >= 3 &&
599 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
600 largebin_index(chunksize(ms->av[2*i+3]))==i))) {
601 first(b) = ms->av[2*i+2];
602 last(b) = ms->av[2*i+3];
603 /* Make sure the links to the bins within the heap are correct. */
604 first(b)->bk = b;
605 last(b)->fd = b;
606 /* Set bit in binblocks. */
607 mark_bin(&main_arena, i);
608 } else {
609 /* Oops, index computation from chunksize must have changed.
610 Link the whole list into unsorted_chunks. */
611 first(b) = last(b) = b;
612 b = unsorted_chunks(&main_arena);
613 ms->av[2*i+2]->bk = b;
614 ms->av[2*i+3]->fd = b->fd;
615 b->fd->bk = ms->av[2*i+3];
616 b->fd = ms->av[2*i+2];
620 if (ms->version < 3) {
621 /* Clear fd_nextsize and bk_nextsize fields. */
622 b = unsorted_chunks(&main_arena)->fd;
623 while (b != unsorted_chunks(&main_arena)) {
624 if (!in_smallbin_range(chunksize(b))) {
625 b->fd_nextsize = NULL;
626 b->bk_nextsize = NULL;
628 b = b->fd;
631 mp_.sbrk_base = ms->sbrk_base;
632 main_arena.system_mem = ms->sbrked_mem_bytes;
633 mp_.trim_threshold = ms->trim_threshold;
634 mp_.top_pad = ms->top_pad;
635 mp_.n_mmaps_max = ms->n_mmaps_max;
636 mp_.mmap_threshold = ms->mmap_threshold;
637 check_action = ms->check_action;
638 main_arena.max_system_mem = ms->max_sbrked_mem;
639 #ifdef NO_THREADS
640 mp_.max_total_mem = ms->max_total_mem;
641 #endif
642 mp_.n_mmaps = ms->n_mmaps;
643 mp_.max_n_mmaps = ms->max_n_mmaps;
644 mp_.mmapped_mem = ms->mmapped_mem;
645 mp_.max_mmapped_mem = ms->max_mmapped_mem;
646 /* add version-dependent code here */
647 if (ms->version >= 1) {
648 /* Check whether it is safe to enable malloc checking, or whether
649 it is necessary to disable it. */
650 if (ms->using_malloc_checking && !using_malloc_checking &&
651 !disallow_malloc_check)
652 __malloc_check_init ();
653 else if (!ms->using_malloc_checking && using_malloc_checking) {
654 __malloc_hook = 0;
655 __free_hook = 0;
656 __realloc_hook = 0;
657 __memalign_hook = 0;
658 using_malloc_checking = 0;
661 check_malloc_state(&main_arena);
663 (void)mutex_unlock(&main_arena.mutex);
664 return 0;
668 * Local variables:
669 * c-basic-offset: 2
670 * End: