1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 /* Compile-time constants. */
25 #define HEAP_MIN_SIZE (32*1024)
27 #define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
30 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
31 that are dynamically created for multi-threaded programs. The
32 maximum size must be a power of two, for fast determination of
33 which heap belongs to a chunk. It should be much larger than the
34 mmap threshold, so that requests with a size just below that
35 threshold can be fulfilled without creating too many heaps. */
39 #define THREAD_STATS 0
42 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
45 /***************************************************************************/
47 #define top(ar_ptr) ((ar_ptr)->top)
49 /* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
51 address aligned to HEAP_MAX_SIZE. Not used unless compiling with
54 typedef struct _heap_info
{
55 mstate ar_ptr
; /* Arena for this heap. */
56 struct _heap_info
*prev
; /* Previous heap. */
57 size_t size
; /* Current size in bytes. */
58 size_t pad
; /* Make sure the following data is properly aligned. */
61 /* Thread specific data */
63 static tsd_key_t arena_key
;
64 static mutex_t list_lock
;
67 static int stat_n_heaps
;
68 #define THREAD_STAT(x) x
70 #define THREAD_STAT(x) do ; while(0)
73 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
74 static unsigned long arena_mem
;
76 /* Already initialized? */
77 int __malloc_initialized
= -1;
79 /**************************************************************************/
83 /* arena_get() acquires an arena and locks the corresponding mutex.
84 First, try the one last locked successfully by this thread. (This
85 is the common case and handled with a macro for speed.) Then, loop
86 once over the circularly linked list of arenas. If no arena is
87 readily available, create a new one. In this latter case, `size'
88 is just a hint as to how much memory will be required immediately
91 #define arena_get(ptr, size) do { \
92 Void_t *vptr = NULL; \
93 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
94 if(ptr && !mutex_trylock(&ptr->mutex)) { \
95 THREAD_STAT(++(ptr->stat_lock_direct)); \
97 ptr = arena_get2(ptr, (size)); \
100 /* find the heap and corresponding arena for a given ptr */
102 #define heap_for_ptr(ptr) \
103 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
104 #define arena_for_chunk(ptr) \
105 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
107 #else /* !USE_ARENAS */
109 /* There is only one arena, main_arena. */
112 #define arena_get(ar_ptr, sz) do { \
113 ar_ptr = &main_arena; \
114 if(!mutex_trylock(&ar_ptr->mutex)) \
115 ++(ar_ptr->stat_lock_direct); \
117 (void)mutex_lock(&ar_ptr->mutex); \
118 ++(ar_ptr->stat_lock_wait); \
122 #define arena_get(ar_ptr, sz) do { \
123 ar_ptr = &main_arena; \
124 (void)mutex_lock(&ar_ptr->mutex); \
127 #define arena_for_chunk(ptr) (&main_arena)
129 #endif /* USE_ARENAS */
131 /**************************************************************************/
135 /* atfork support. */
137 static __malloc_ptr_t (*save_malloc_hook
) __MALLOC_P ((size_t __size
,
138 __const __malloc_ptr_t
));
139 # if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
140 static __malloc_ptr_t (*save_memalign_hook
) __MALLOC_P ((size_t __align
,
142 __const __malloc_ptr_t
));
144 static void (*save_free_hook
) __MALLOC_P ((__malloc_ptr_t __ptr
,
145 __const __malloc_ptr_t
));
146 static Void_t
* save_arena
;
148 /* Magic value for the thread-specific arena pointer when
149 malloc_atfork() is in use. */
151 #define ATFORK_ARENA_PTR ((Void_t*)-1)
153 /* The following hooks are used while the `atfork' handling mechanism
157 malloc_atfork(size_t sz
, const Void_t
*caller
)
162 tsd_getspecific(arena_key
, vptr
);
163 if(vptr
== ATFORK_ARENA_PTR
) {
164 /* We are the only thread that may allocate at all. */
165 if(save_malloc_hook
!= malloc_check
) {
166 return _int_malloc(&main_arena
, sz
);
170 victim
= _int_malloc(&main_arena
, sz
+1);
171 return mem2mem_check(victim
, sz
);
174 /* Suspend the thread until the `atfork' handlers have completed.
175 By that time, the hooks will have been reset as well, so that
176 mALLOc() can be used again. */
177 (void)mutex_lock(&list_lock
);
178 (void)mutex_unlock(&list_lock
);
179 return public_mALLOc(sz
);
184 free_atfork(Void_t
* mem
, const Void_t
*caller
)
188 mchunkptr p
; /* chunk corresponding to mem */
190 if (mem
== 0) /* free(0) has no effect */
193 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
196 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
203 ar_ptr
= arena_for_chunk(p
);
204 tsd_getspecific(arena_key
, vptr
);
205 if(vptr
!= ATFORK_ARENA_PTR
)
206 (void)mutex_lock(&ar_ptr
->mutex
);
207 _int_free(ar_ptr
, mem
);
208 if(vptr
!= ATFORK_ARENA_PTR
)
209 (void)mutex_unlock(&ar_ptr
->mutex
);
212 /* The following two functions are registered via thread_atfork() to
213 make sure that the mutexes remain in a consistent state in the
214 fork()ed version of a thread. Also adapt the malloc and free hooks
215 temporarily, because the `atfork' handler mechanism may use
216 malloc/free internally (e.g. in LinuxThreads). */
219 ptmalloc_lock_all
__MALLOC_P((void))
223 if(__malloc_initialized
< 1)
225 (void)mutex_lock(&list_lock
);
226 for(ar_ptr
= &main_arena
;;) {
227 (void)mutex_lock(&ar_ptr
->mutex
);
228 ar_ptr
= ar_ptr
->next
;
229 if(ar_ptr
== &main_arena
) break;
231 save_malloc_hook
= __malloc_hook
;
232 save_free_hook
= __free_hook
;
233 __malloc_hook
= malloc_atfork
;
234 __free_hook
= free_atfork
;
235 /* Only the current thread may perform malloc/free calls now. */
236 tsd_getspecific(arena_key
, save_arena
);
237 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
241 ptmalloc_unlock_all
__MALLOC_P((void))
245 if(__malloc_initialized
< 1)
247 tsd_setspecific(arena_key
, save_arena
);
248 __malloc_hook
= save_malloc_hook
;
249 __free_hook
= save_free_hook
;
250 for(ar_ptr
= &main_arena
;;) {
251 (void)mutex_unlock(&ar_ptr
->mutex
);
252 ar_ptr
= ar_ptr
->next
;
253 if(ar_ptr
== &main_arena
) break;
255 (void)mutex_unlock(&list_lock
);
260 /* In LinuxThreads, unlocking a mutex in the child process after a
261 fork() is currently unsafe, whereas re-initializing it is safe and
262 does not leak resources. Therefore, a special atfork handler is
263 installed for the child. */
266 ptmalloc_unlock_all2
__MALLOC_P((void))
270 if(__malloc_initialized
< 1)
272 #if defined _LIBC || defined MALLOC_HOOKS
273 tsd_setspecific(arena_key
, save_arena
);
274 __malloc_hook
= save_malloc_hook
;
275 __free_hook
= save_free_hook
;
277 for(ar_ptr
= &main_arena
;;) {
278 mutex_init(&ar_ptr
->mutex
);
279 ar_ptr
= ar_ptr
->next
;
280 if(ar_ptr
== &main_arena
) break;
282 mutex_init(&list_lock
);
287 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
291 #endif /* !defined NO_THREADS */
293 /* Initialization routine. */
296 extern char **_environ
;
300 next_env_entry (char ***position
)
302 char **current
= *position
;
305 while (*current
!= NULL
)
307 if (__builtin_expect ((*current
)[0] == 'M', 0)
308 && (*current
)[1] == 'A'
309 && (*current
)[2] == 'L'
310 && (*current
)[3] == 'L'
311 && (*current
)[4] == 'O'
312 && (*current
)[5] == 'C'
313 && (*current
)[6] == '_')
315 result
= &(*current
)[7];
317 /* Save current position for next visit. */
318 *position
= ++current
;
330 /* Set up basic state so that _int_malloc et al can work. */
332 ptmalloc_init_minimal
__MALLOC_P((void))
334 #if DEFAULT_TOP_PAD != 0
335 mp_
.top_pad
= DEFAULT_TOP_PAD
;
337 mp_
.n_mmaps_max
= DEFAULT_MMAP_MAX
;
338 mp_
.mmap_threshold
= DEFAULT_MMAP_THRESHOLD
;
339 mp_
.trim_threshold
= DEFAULT_TRIM_THRESHOLD
;
340 mp_
.pagesize
= malloc_getpagesize
;
344 # if defined SHARED && defined USE_TLS && !USE___THREAD
345 # include <stdbool.h>
347 /* This is called by __pthread_initialize_minimal when it needs to use
348 malloc to set up the TLS state. We cannot do the full work of
349 ptmalloc_init (below) until __pthread_initialize_minimal has finished,
350 so it has to switch to using the special startup-time hooks while doing
351 those allocations. */
353 __libc_malloc_pthread_startup (bool first_time
)
357 ptmalloc_init_minimal ();
358 save_malloc_hook
= __malloc_hook
;
359 save_memalign_hook
= __memalign_hook
;
360 save_free_hook
= __free_hook
;
361 __malloc_hook
= malloc_starter
;
362 __memalign_hook
= memalign_starter
;
363 __free_hook
= free_starter
;
367 __malloc_hook
= save_malloc_hook
;
368 __memalign_hook
= save_memalign_hook
;
369 __free_hook
= save_free_hook
;
376 ptmalloc_init
__MALLOC_P((void))
385 if(__malloc_initialized
>= 0) return;
386 __malloc_initialized
= 0;
389 # if defined SHARED && defined USE_TLS && !USE___THREAD
390 /* ptmalloc_init_minimal may already have been called via
391 __libc_malloc_pthread_startup, above. */
392 if (mp_
.pagesize
== 0)
395 ptmalloc_init_minimal();
398 # if defined _LIBC && defined USE_TLS
399 /* We know __pthread_initialize_minimal has already been called,
400 and that is enough. */
404 /* With some threads implementations, creating thread-specific data
405 or initializing a mutex may call malloc() itself. Provide a
406 simple starter version (realloc() won't work). */
407 save_malloc_hook
= __malloc_hook
;
408 save_memalign_hook
= __memalign_hook
;
409 save_free_hook
= __free_hook
;
410 __malloc_hook
= malloc_starter
;
411 __memalign_hook
= memalign_starter
;
412 __free_hook
= free_starter
;
414 /* Initialize the pthreads interface. */
415 if (__pthread_initialize
!= NULL
)
416 __pthread_initialize();
417 # endif /* !defined _LIBC */
418 # endif /* !defined NO_STARTER */
419 #endif /* !defined NO_THREADS */
420 mutex_init(&main_arena
.mutex
);
421 main_arena
.next
= &main_arena
;
423 mutex_init(&list_lock
);
424 tsd_key_create(&arena_key
, NULL
);
425 tsd_setspecific(arena_key
, (Void_t
*)&main_arena
);
426 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
429 __malloc_hook
= save_malloc_hook
;
430 __memalign_hook
= save_memalign_hook
;
431 __free_hook
= save_free_hook
;
437 secure
= __libc_enable_secure
;
439 if (__builtin_expect (_environ
!= NULL
, 1))
441 char **runp
= _environ
;
444 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
447 size_t len
= strcspn (envline
, "=");
449 if (envline
[len
] != '=')
450 /* This is a "MALLOC_" variable at the end of the string
451 without a '=' character. Ignore it since otherwise we
452 will access invalid memory below. */
458 if (memcmp (envline
, "CHECK_", 6) == 0)
462 if (! secure
&& memcmp (envline
, "TOP_PAD_", 8) == 0)
463 mALLOPt(M_TOP_PAD
, atoi(&envline
[9]));
466 if (! secure
&& memcmp (envline
, "MMAP_MAX_", 9) == 0)
467 mALLOPt(M_MMAP_MAX
, atoi(&envline
[10]));
472 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
473 mALLOPt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
474 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
475 mALLOPt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
486 if((s
= getenv("MALLOC_TRIM_THRESHOLD_")))
487 mALLOPt(M_TRIM_THRESHOLD
, atoi(s
));
488 if((s
= getenv("MALLOC_TOP_PAD_")))
489 mALLOPt(M_TOP_PAD
, atoi(s
));
490 if((s
= getenv("MALLOC_MMAP_THRESHOLD_")))
491 mALLOPt(M_MMAP_THRESHOLD
, atoi(s
));
492 if((s
= getenv("MALLOC_MMAP_MAX_")))
493 mALLOPt(M_MMAP_MAX
, atoi(s
));
495 s
= getenv("MALLOC_CHECK_");
498 if(s
[0]) mALLOPt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
499 __malloc_check_init();
501 if(__malloc_initialize_hook
!= NULL
)
502 (*__malloc_initialize_hook
)();
503 __malloc_initialized
= 1;
506 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
507 #ifdef thread_atfork_static
508 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
509 ptmalloc_unlock_all2
)
514 /* Managing heaps and arenas (for concurrent threads) */
520 /* Print the complete contents of a single heap to stderr. */
524 dump_heap(heap_info
*heap
)
526 dump_heap(heap
) heap_info
*heap
;
532 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
533 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
534 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
535 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
538 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
539 if(p
== top(heap
->ar_ptr
)) {
540 fprintf(stderr
, " (top)\n");
542 } else if(p
->size
== (0|PREV_INUSE
)) {
543 fprintf(stderr
, " (fence)\n");
546 fprintf(stderr
, "\n");
551 #endif /* MALLOC_DEBUG > 1 */
553 /* Create a new heap. size is automatically rounded up to a multiple
559 new_heap(size_t size
, size_t top_pad
)
561 new_heap(size
, top_pad
) size_t size
, top_pad
;
564 size_t page_mask
= malloc_getpagesize
- 1;
569 if(size
+top_pad
< HEAP_MIN_SIZE
)
570 size
= HEAP_MIN_SIZE
;
571 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
573 else if(size
> HEAP_MAX_SIZE
)
576 size
= HEAP_MAX_SIZE
;
577 size
= (size
+ page_mask
) & ~page_mask
;
579 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
580 No swap space needs to be reserved for the following large
581 mapping (on Linux, this is the case for all non-writable mappings
583 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
, MAP_PRIVATE
|MAP_NORESERVE
);
584 if(p1
!= MAP_FAILED
) {
585 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1)) & ~(HEAP_MAX_SIZE
-1));
588 munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
590 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
591 is already aligned. */
592 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_PRIVATE
|MAP_NORESERVE
);
595 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
596 munmap(p2
, HEAP_MAX_SIZE
);
600 if(mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
601 munmap(p2
, HEAP_MAX_SIZE
);
606 THREAD_STAT(stat_n_heaps
++);
610 /* Grow or shrink a heap. size is automatically rounded up to a
611 multiple of the page size if it is positive. */
615 grow_heap(heap_info
*h
, long diff
)
617 grow_heap(h
, diff
) heap_info
*h
; long diff
;
620 size_t page_mask
= malloc_getpagesize
- 1;
624 diff
= (diff
+ page_mask
) & ~page_mask
;
625 new_size
= (long)h
->size
+ diff
;
626 if(new_size
> HEAP_MAX_SIZE
)
628 if(mprotect((char *)h
+ h
->size
, diff
, PROT_READ
|PROT_WRITE
) != 0)
631 new_size
= (long)h
->size
+ diff
;
632 if(new_size
< (long)sizeof(*h
))
634 /* Try to re-map the extra heap space freshly to save memory, and
635 make it inaccessible. */
636 if((char *)MMAP((char *)h
+ new_size
, -diff
, PROT_NONE
,
637 MAP_PRIVATE
|MAP_FIXED
) == (char *) MAP_FAILED
)
639 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
647 #define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
652 heap_trim(heap_info
*heap
, size_t pad
)
654 heap_trim(heap
, pad
) heap_info
*heap
; size_t pad
;
657 mstate ar_ptr
= heap
->ar_ptr
;
658 unsigned long pagesz
= mp_
.pagesize
;
659 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
660 heap_info
*prev_heap
;
661 long new_size
, top_size
, extra
;
663 /* Can this heap go away completely? */
664 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
665 prev_heap
= heap
->prev
;
666 p
= chunk_at_offset(prev_heap
, prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
));
667 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
669 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
);
670 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
672 new_size
+= p
->prev_size
;
673 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
674 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
676 ar_ptr
->system_mem
-= heap
->size
;
677 arena_mem
-= heap
->size
;
680 if(!prev_inuse(p
)) { /* consolidate backward */
684 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
685 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
686 top(ar_ptr
) = top_chunk
= p
;
687 set_head(top_chunk
, new_size
| PREV_INUSE
);
688 /*check_chunk(ar_ptr, top_chunk);*/
690 top_size
= chunksize(top_chunk
);
691 extra
= ((top_size
- pad
- MINSIZE
+ (pagesz
-1))/pagesz
- 1) * pagesz
;
692 if(extra
< (long)pagesz
)
695 if(grow_heap(heap
, -extra
) != 0)
697 ar_ptr
->system_mem
-= extra
;
700 /* Success. Adjust top accordingly. */
701 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
702 /*check_chunk(ar_ptr, top_chunk);*/
709 arena_get2(mstate a_tsd
, size_t size
)
711 arena_get2(a_tsd
, size
) mstate a_tsd
; size_t size
;
717 a
= a_tsd
= &main_arena
;
721 /* This can only happen while initializing the new arena. */
722 (void)mutex_lock(&main_arena
.mutex
);
723 THREAD_STAT(++(main_arena
.stat_lock_wait
));
728 /* Check the global, circularly linked list for available arenas. */
731 if(!mutex_trylock(&a
->mutex
)) {
732 THREAD_STAT(++(a
->stat_lock_loop
));
733 tsd_setspecific(arena_key
, (Void_t
*)a
);
739 /* If not even the list_lock can be obtained, try again. This can
740 happen during `atfork', or for example on systems where thread
741 creation makes it temporarily impossible to obtain _any_
743 if(mutex_trylock(&list_lock
)) {
747 (void)mutex_unlock(&list_lock
);
749 /* Nothing immediately available, so generate a new arena. */
750 a
= _int_new_arena(size
);
754 tsd_setspecific(arena_key
, (Void_t
*)a
);
755 mutex_init(&a
->mutex
);
756 mutex_lock(&a
->mutex
); /* remember result */
758 /* Add the new arena to the global list. */
759 (void)mutex_lock(&list_lock
);
760 a
->next
= main_arena
.next
;
762 (void)mutex_unlock(&list_lock
);
764 THREAD_STAT(++(a
->stat_lock_loop
));
768 /* Create a new arena with initial size "size". */
771 _int_new_arena(size_t size
)
776 unsigned long misalign
;
778 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
781 /* Maybe size is too large to fit in a single heap. So, just try
782 to create a minimally-sized arena and let _int_malloc() attempt
783 to deal with the large request via mmap_chunk(). */
784 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
788 a
= h
->ar_ptr
= (mstate
)(h
+1);
789 malloc_init_state(a
);
791 a
->system_mem
= a
->max_system_mem
= h
->size
;
792 arena_mem
+= h
->size
;
794 if((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
796 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
799 /* Set up the top chunk, with proper alignment. */
800 ptr
= (char *)(a
+ 1);
801 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
803 ptr
+= MALLOC_ALIGNMENT
- misalign
;
804 top(a
) = (mchunkptr
)ptr
;
805 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
810 #endif /* USE_ARENAS */