1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001,2002,2003,2004,2005,2006,2007
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
24 /* Compile-time constants. */
26 #define HEAP_MIN_SIZE (32*1024)
28 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
29 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
31 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
35 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
36 that are dynamically created for multi-threaded programs. The
37 maximum size must be a power of two, for fast determination of
38 which heap belongs to a chunk. It should be much larger than the
39 mmap threshold, so that requests with a size just below that
40 threshold can be fulfilled without creating too many heaps. */
44 #define THREAD_STATS 0
47 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
50 /***************************************************************************/
52 #define top(ar_ptr) ((ar_ptr)->top)
54 /* A heap is a single contiguous memory region holding (coalesceable)
55 malloc_chunks. It is allocated with mmap() and always starts at an
56 address aligned to HEAP_MAX_SIZE. Not used unless compiling with
59 typedef struct _heap_info
{
60 mstate ar_ptr
; /* Arena for this heap. */
61 struct _heap_info
*prev
; /* Previous heap. */
62 size_t size
; /* Current size in bytes. */
63 size_t mprotect_size
; /* Size in bytes that has been mprotected
64 PROT_READ|PROT_WRITE. */
65 /* Make sure the following data is properly aligned, particularly
66 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
68 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
71 /* Get a compile-time error if the heap_info padding is not correct
72 to make alignment work as expected in sYSMALLOc. */
73 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
74 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
77 /* Thread specific data */
79 static tsd_key_t arena_key
;
80 static mutex_t list_lock
;
83 static int stat_n_heaps
;
84 #define THREAD_STAT(x) x
86 #define THREAD_STAT(x) do ; while(0)
89 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
90 static unsigned long arena_mem
;
92 /* Already initialized? */
93 int __malloc_initialized
= -1;
95 /**************************************************************************/
99 /* arena_get() acquires an arena and locks the corresponding mutex.
100 First, try the one last locked successfully by this thread. (This
101 is the common case and handled with a macro for speed.) Then, loop
102 once over the circularly linked list of arenas. If no arena is
103 readily available, create a new one. In this latter case, `size'
104 is just a hint as to how much memory will be required immediately
107 #define arena_get(ptr, size) do { \
108 Void_t *vptr = NULL; \
109 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
110 if(ptr && !mutex_trylock(&ptr->mutex)) { \
111 THREAD_STAT(++(ptr->stat_lock_direct)); \
113 ptr = arena_get2(ptr, (size)); \
116 /* find the heap and corresponding arena for a given ptr */
118 #define heap_for_ptr(ptr) \
119 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
120 #define arena_for_chunk(ptr) \
121 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
123 #else /* !USE_ARENAS */
125 /* There is only one arena, main_arena. */
128 #define arena_get(ar_ptr, sz) do { \
129 ar_ptr = &main_arena; \
130 if(!mutex_trylock(&ar_ptr->mutex)) \
131 ++(ar_ptr->stat_lock_direct); \
133 (void)mutex_lock(&ar_ptr->mutex); \
134 ++(ar_ptr->stat_lock_wait); \
138 #define arena_get(ar_ptr, sz) do { \
139 ar_ptr = &main_arena; \
140 (void)mutex_lock(&ar_ptr->mutex); \
143 #define arena_for_chunk(ptr) (&main_arena)
145 #endif /* USE_ARENAS */
147 /**************************************************************************/
151 /* atfork support. */
153 static __malloc_ptr_t (*save_malloc_hook
) (size_t __size
,
154 __const __malloc_ptr_t
);
155 # if !defined _LIBC || (defined SHARED && !USE___THREAD)
156 static __malloc_ptr_t (*save_memalign_hook
) (size_t __align
, size_t __size
,
157 __const __malloc_ptr_t
);
159 static void (*save_free_hook
) (__malloc_ptr_t __ptr
,
160 __const __malloc_ptr_t
);
161 static Void_t
* save_arena
;
167 /* Magic value for the thread-specific arena pointer when
168 malloc_atfork() is in use. */
170 #define ATFORK_ARENA_PTR ((Void_t*)-1)
172 /* The following hooks are used while the `atfork' handling mechanism
176 malloc_atfork(size_t sz
, const Void_t
*caller
)
181 tsd_getspecific(arena_key
, vptr
);
182 if(vptr
== ATFORK_ARENA_PTR
) {
183 /* We are the only thread that may allocate at all. */
184 if(save_malloc_hook
!= malloc_check
) {
185 return _int_malloc(&main_arena
, sz
);
189 victim
= _int_malloc(&main_arena
, sz
+1);
190 return mem2mem_check(victim
, sz
);
193 /* Suspend the thread until the `atfork' handlers have completed.
194 By that time, the hooks will have been reset as well, so that
195 mALLOc() can be used again. */
196 (void)mutex_lock(&list_lock
);
197 (void)mutex_unlock(&list_lock
);
198 return public_mALLOc(sz
);
203 free_atfork(Void_t
* mem
, const Void_t
*caller
)
207 mchunkptr p
; /* chunk corresponding to mem */
209 if (mem
== 0) /* free(0) has no effect */
212 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
215 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
222 ar_ptr
= arena_for_chunk(p
);
223 tsd_getspecific(arena_key
, vptr
);
224 if(vptr
!= ATFORK_ARENA_PTR
)
225 (void)mutex_lock(&ar_ptr
->mutex
);
226 _int_free(ar_ptr
, mem
);
227 if(vptr
!= ATFORK_ARENA_PTR
)
228 (void)mutex_unlock(&ar_ptr
->mutex
);
232 /* Counter for number of times the list is locked by the same thread. */
233 static unsigned int atfork_recursive_cntr
;
235 /* The following two functions are registered via thread_atfork() to
236 make sure that the mutexes remain in a consistent state in the
237 fork()ed version of a thread. Also adapt the malloc and free hooks
238 temporarily, because the `atfork' handler mechanism may use
239 malloc/free internally (e.g. in LinuxThreads). */
242 ptmalloc_lock_all (void)
246 if(__malloc_initialized
< 1)
248 if (mutex_trylock(&list_lock
))
251 tsd_getspecific(arena_key
, my_arena
);
252 if (my_arena
== ATFORK_ARENA_PTR
)
253 /* This is the same thread which already locks the global list.
254 Just bump the counter. */
257 /* This thread has to wait its turn. */
258 (void)mutex_lock(&list_lock
);
260 for(ar_ptr
= &main_arena
;;) {
261 (void)mutex_lock(&ar_ptr
->mutex
);
262 ar_ptr
= ar_ptr
->next
;
263 if(ar_ptr
== &main_arena
) break;
265 save_malloc_hook
= __malloc_hook
;
266 save_free_hook
= __free_hook
;
267 __malloc_hook
= malloc_atfork
;
268 __free_hook
= free_atfork
;
269 /* Only the current thread may perform malloc/free calls now. */
270 tsd_getspecific(arena_key
, save_arena
);
271 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
273 ++atfork_recursive_cntr
;
277 ptmalloc_unlock_all (void)
281 if(__malloc_initialized
< 1)
283 if (--atfork_recursive_cntr
!= 0)
285 tsd_setspecific(arena_key
, save_arena
);
286 __malloc_hook
= save_malloc_hook
;
287 __free_hook
= save_free_hook
;
288 for(ar_ptr
= &main_arena
;;) {
289 (void)mutex_unlock(&ar_ptr
->mutex
);
290 ar_ptr
= ar_ptr
->next
;
291 if(ar_ptr
== &main_arena
) break;
293 (void)mutex_unlock(&list_lock
);
298 /* In NPTL, unlocking a mutex in the child process after a
299 fork() is currently unsafe, whereas re-initializing it is safe and
300 does not leak resources. Therefore, a special atfork handler is
301 installed for the child. */
304 ptmalloc_unlock_all2 (void)
308 if(__malloc_initialized
< 1)
310 #if defined _LIBC || defined MALLOC_HOOKS
311 tsd_setspecific(arena_key
, save_arena
);
312 __malloc_hook
= save_malloc_hook
;
313 __free_hook
= save_free_hook
;
315 for(ar_ptr
= &main_arena
;;) {
316 mutex_init(&ar_ptr
->mutex
);
317 ar_ptr
= ar_ptr
->next
;
318 if(ar_ptr
== &main_arena
) break;
320 mutex_init(&list_lock
);
321 atfork_recursive_cntr
= 0;
326 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
330 #endif /* !defined NO_THREADS */
332 /* Initialization routine. */
335 extern char **_environ
;
339 next_env_entry (char ***position
)
341 char **current
= *position
;
344 while (*current
!= NULL
)
346 if (__builtin_expect ((*current
)[0] == 'M', 0)
347 && (*current
)[1] == 'A'
348 && (*current
)[2] == 'L'
349 && (*current
)[3] == 'L'
350 && (*current
)[4] == 'O'
351 && (*current
)[5] == 'C'
352 && (*current
)[6] == '_')
354 result
= &(*current
)[7];
356 /* Save current position for next visit. */
357 *position
= ++current
;
369 /* Set up basic state so that _int_malloc et al can work. */
371 ptmalloc_init_minimal (void)
373 #if DEFAULT_TOP_PAD != 0
374 mp_
.top_pad
= DEFAULT_TOP_PAD
;
376 mp_
.n_mmaps_max
= DEFAULT_MMAP_MAX
;
377 mp_
.mmap_threshold
= DEFAULT_MMAP_THRESHOLD
;
378 mp_
.trim_threshold
= DEFAULT_TRIM_THRESHOLD
;
379 mp_
.pagesize
= malloc_getpagesize
;
386 __failing_morecore (ptrdiff_t d
)
388 return (void *) MORECORE_FAILURE
;
391 extern struct dl_open_hook
*_dl_open_hook
;
392 libc_hidden_proto (_dl_open_hook
);
395 # if defined SHARED && !USE___THREAD
396 /* This is called by __pthread_initialize_minimal when it needs to use
397 malloc to set up the TLS state. We cannot do the full work of
398 ptmalloc_init (below) until __pthread_initialize_minimal has finished,
399 so it has to switch to using the special startup-time hooks while doing
400 those allocations. */
402 __libc_malloc_pthread_startup (bool first_time
)
406 ptmalloc_init_minimal ();
407 save_malloc_hook
= __malloc_hook
;
408 save_memalign_hook
= __memalign_hook
;
409 save_free_hook
= __free_hook
;
410 __malloc_hook
= malloc_starter
;
411 __memalign_hook
= memalign_starter
;
412 __free_hook
= free_starter
;
416 __malloc_hook
= save_malloc_hook
;
417 __memalign_hook
= save_memalign_hook
;
418 __free_hook
= save_free_hook
;
434 if(__malloc_initialized
>= 0) return;
435 __malloc_initialized
= 0;
438 # if defined SHARED && !USE___THREAD
439 /* ptmalloc_init_minimal may already have been called via
440 __libc_malloc_pthread_startup, above. */
441 if (mp_
.pagesize
== 0)
444 ptmalloc_init_minimal();
448 /* We know __pthread_initialize_minimal has already been called,
449 and that is enough. */
453 /* With some threads implementations, creating thread-specific data
454 or initializing a mutex may call malloc() itself. Provide a
455 simple starter version (realloc() won't work). */
456 save_malloc_hook
= __malloc_hook
;
457 save_memalign_hook
= __memalign_hook
;
458 save_free_hook
= __free_hook
;
459 __malloc_hook
= malloc_starter
;
460 __memalign_hook
= memalign_starter
;
461 __free_hook
= free_starter
;
463 /* Initialize the pthreads interface. */
464 if (__pthread_initialize
!= NULL
)
465 __pthread_initialize();
466 # endif /* !defined _LIBC */
467 # endif /* !defined NO_STARTER */
468 #endif /* !defined NO_THREADS */
469 mutex_init(&main_arena
.mutex
);
470 main_arena
.next
= &main_arena
;
472 #if defined _LIBC && defined SHARED
473 /* In case this libc copy is in a non-default namespace, never use brk.
474 Likewise if dlopened from statically linked program. */
478 if (_dl_open_hook
!= NULL
479 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
480 && l
->l_ns
!= LM_ID_BASE
))
481 __morecore
= __failing_morecore
;
484 mutex_init(&list_lock
);
485 tsd_key_create(&arena_key
, NULL
);
486 tsd_setspecific(arena_key
, (Void_t
*)&main_arena
);
487 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
490 __malloc_hook
= save_malloc_hook
;
491 __memalign_hook
= save_memalign_hook
;
492 __free_hook
= save_free_hook
;
498 secure
= __libc_enable_secure
;
500 if (__builtin_expect (_environ
!= NULL
, 1))
502 char **runp
= _environ
;
505 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
508 size_t len
= strcspn (envline
, "=");
510 if (envline
[len
] != '=')
511 /* This is a "MALLOC_" variable at the end of the string
512 without a '=' character. Ignore it since otherwise we
513 will access invalid memory below. */
519 if (memcmp (envline
, "CHECK_", 6) == 0)
525 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
526 mALLOPt(M_TOP_PAD
, atoi(&envline
[9]));
527 else if (memcmp (envline
, "PERTURB_", 8) == 0)
528 mALLOPt(M_PERTURB
, atoi(&envline
[9]));
532 if (! secure
&& memcmp (envline
, "MMAP_MAX_", 9) == 0)
533 mALLOPt(M_MMAP_MAX
, atoi(&envline
[10]));
538 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
539 mALLOPt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
540 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
541 mALLOPt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
552 if((s
= getenv("MALLOC_TRIM_THRESHOLD_")))
553 mALLOPt(M_TRIM_THRESHOLD
, atoi(s
));
554 if((s
= getenv("MALLOC_TOP_PAD_")))
555 mALLOPt(M_TOP_PAD
, atoi(s
));
556 if((s
= getenv("MALLOC_PERTURB_")))
557 mALLOPt(M_PERTURB
, atoi(s
));
558 if((s
= getenv("MALLOC_MMAP_THRESHOLD_")))
559 mALLOPt(M_MMAP_THRESHOLD
, atoi(s
));
560 if((s
= getenv("MALLOC_MMAP_MAX_")))
561 mALLOPt(M_MMAP_MAX
, atoi(s
));
563 s
= getenv("MALLOC_CHECK_");
566 mALLOPt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
567 if (check_action
!= 0)
568 __malloc_check_init();
570 if(__malloc_initialize_hook
!= NULL
)
571 (*__malloc_initialize_hook
)();
572 __malloc_initialized
= 1;
575 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
576 #ifdef thread_atfork_static
577 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
578 ptmalloc_unlock_all2
)
583 /* Managing heaps and arenas (for concurrent threads) */
589 /* Print the complete contents of a single heap to stderr. */
593 dump_heap(heap_info
*heap
)
595 dump_heap(heap
) heap_info
*heap
;
601 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
602 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
603 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
604 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
607 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
608 if(p
== top(heap
->ar_ptr
)) {
609 fprintf(stderr
, " (top)\n");
611 } else if(p
->size
== (0|PREV_INUSE
)) {
612 fprintf(stderr
, " (fence)\n");
615 fprintf(stderr
, "\n");
620 #endif /* MALLOC_DEBUG > 1 */
622 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
623 addresses as opposed to increasing, new_heap would badly fragment the
624 address space. In that case remember the second HEAP_MAX_SIZE part
625 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
626 call (if it is already aligned) and try to reuse it next time. We need
627 no locking for it, as kernel ensures the atomicity for us - worst case
628 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
629 multiple threads, but only one will succeed. */
630 static char *aligned_heap_area
;
632 /* Create a new heap. size is automatically rounded up to a multiple
638 new_heap(size_t size
, size_t top_pad
)
640 new_heap(size
, top_pad
) size_t size
, top_pad
;
643 size_t page_mask
= malloc_getpagesize
- 1;
648 if(size
+top_pad
< HEAP_MIN_SIZE
)
649 size
= HEAP_MIN_SIZE
;
650 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
652 else if(size
> HEAP_MAX_SIZE
)
655 size
= HEAP_MAX_SIZE
;
656 size
= (size
+ page_mask
) & ~page_mask
;
658 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
659 No swap space needs to be reserved for the following large
660 mapping (on Linux, this is the case for all non-writable mappings
663 if(aligned_heap_area
) {
664 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
665 MAP_PRIVATE
|MAP_NORESERVE
);
666 aligned_heap_area
= NULL
;
667 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
668 munmap(p2
, HEAP_MAX_SIZE
);
672 if(p2
== MAP_FAILED
) {
673 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
,
674 MAP_PRIVATE
|MAP_NORESERVE
);
675 if(p1
!= MAP_FAILED
) {
676 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
677 & ~(HEAP_MAX_SIZE
-1));
682 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
683 munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
685 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
686 is already aligned. */
687 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_PRIVATE
|MAP_NORESERVE
);
690 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
691 munmap(p2
, HEAP_MAX_SIZE
);
696 if(mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
697 munmap(p2
, HEAP_MAX_SIZE
);
702 h
->mprotect_size
= size
;
703 THREAD_STAT(stat_n_heaps
++);
707 /* Grow a heap. size is automatically rounded up to a
708 multiple of the page size. */
712 grow_heap(heap_info
*h
, long diff
)
714 grow_heap(h
, diff
) heap_info
*h
; long diff
;
717 size_t page_mask
= malloc_getpagesize
- 1;
720 diff
= (diff
+ page_mask
) & ~page_mask
;
721 new_size
= (long)h
->size
+ diff
;
722 if((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
724 if((unsigned long) new_size
> h
->mprotect_size
) {
725 if (mprotect((char *)h
+ h
->mprotect_size
,
726 (unsigned long) new_size
- h
->mprotect_size
,
727 PROT_READ
|PROT_WRITE
) != 0)
729 h
->mprotect_size
= new_size
;
740 shrink_heap(heap_info
*h
, long diff
)
742 shrink_heap(h
, diff
) heap_info
*h
; long diff
;
747 new_size
= (long)h
->size
- diff
;
748 if(new_size
< (long)sizeof(*h
))
750 /* Try to re-map the extra heap space freshly to save memory, and
751 make it inaccessible. */
753 if (__builtin_expect (__libc_enable_secure
, 0))
758 if((char *)MMAP((char *)h
+ new_size
, diff
, PROT_NONE
,
759 MAP_PRIVATE
|MAP_FIXED
) == (char *) MAP_FAILED
)
761 h
->mprotect_size
= new_size
;
765 madvise ((char *)h
+ new_size
, diff
, MADV_DONTNEED
);
767 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
775 #define delete_heap(heap) \
777 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
778 aligned_heap_area = NULL; \
779 munmap((char*)(heap), HEAP_MAX_SIZE); \
785 heap_trim(heap_info
*heap
, size_t pad
)
787 heap_trim(heap
, pad
) heap_info
*heap
; size_t pad
;
790 mstate ar_ptr
= heap
->ar_ptr
;
791 unsigned long pagesz
= mp_
.pagesize
;
792 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
793 heap_info
*prev_heap
;
794 long new_size
, top_size
, extra
;
796 /* Can this heap go away completely? */
797 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
798 prev_heap
= heap
->prev
;
799 p
= chunk_at_offset(prev_heap
, prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
));
800 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
802 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
);
803 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
805 new_size
+= p
->prev_size
;
806 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
807 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
809 ar_ptr
->system_mem
-= heap
->size
;
810 arena_mem
-= heap
->size
;
813 if(!prev_inuse(p
)) { /* consolidate backward */
817 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
818 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
819 top(ar_ptr
) = top_chunk
= p
;
820 set_head(top_chunk
, new_size
| PREV_INUSE
);
821 /*check_chunk(ar_ptr, top_chunk);*/
823 top_size
= chunksize(top_chunk
);
824 extra
= ((top_size
- pad
- MINSIZE
+ (pagesz
-1))/pagesz
- 1) * pagesz
;
825 if(extra
< (long)pagesz
)
828 if(shrink_heap(heap
, extra
) != 0)
830 ar_ptr
->system_mem
-= extra
;
833 /* Success. Adjust top accordingly. */
834 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
835 /*check_chunk(ar_ptr, top_chunk);*/
839 /* Create a new arena with initial size "size". */
842 _int_new_arena(size_t size
)
847 unsigned long misalign
;
849 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
852 /* Maybe size is too large to fit in a single heap. So, just try
853 to create a minimally-sized arena and let _int_malloc() attempt
854 to deal with the large request via mmap_chunk(). */
855 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
859 a
= h
->ar_ptr
= (mstate
)(h
+1);
860 malloc_init_state(a
);
862 a
->system_mem
= a
->max_system_mem
= h
->size
;
863 arena_mem
+= h
->size
;
865 if((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
867 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
870 /* Set up the top chunk, with proper alignment. */
871 ptr
= (char *)(a
+ 1);
872 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
874 ptr
+= MALLOC_ALIGNMENT
- misalign
;
875 top(a
) = (mchunkptr
)ptr
;
876 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
884 arena_get2(mstate a_tsd
, size_t size
)
886 arena_get2(a_tsd
, size
) mstate a_tsd
; size_t size
;
892 a
= a_tsd
= &main_arena
;
896 /* This can only happen while initializing the new arena. */
897 (void)mutex_lock(&main_arena
.mutex
);
898 THREAD_STAT(++(main_arena
.stat_lock_wait
));
903 /* Check the global, circularly linked list for available arenas. */
904 bool retried
= false;
907 if(!mutex_trylock(&a
->mutex
)) {
909 (void)mutex_unlock(&list_lock
);
910 THREAD_STAT(++(a
->stat_lock_loop
));
911 tsd_setspecific(arena_key
, (Void_t
*)a
);
917 /* If not even the list_lock can be obtained, try again. This can
918 happen during `atfork', or for example on systems where thread
919 creation makes it temporarily impossible to obtain _any_
921 if(!retried
&& mutex_trylock(&list_lock
)) {
922 /* We will block to not run in a busy loop. */
923 (void)mutex_lock(&list_lock
);
925 /* Since we blocked there might be an arena available now. */
931 /* Nothing immediately available, so generate a new arena. */
932 a
= _int_new_arena(size
);
935 tsd_setspecific(arena_key
, (Void_t
*)a
);
936 mutex_init(&a
->mutex
);
937 mutex_lock(&a
->mutex
); /* remember result */
939 /* Add the new arena to the global list. */
940 a
->next
= main_arena
.next
;
941 atomic_write_barrier ();
944 THREAD_STAT(++(a
->stat_lock_loop
));
946 (void)mutex_unlock(&list_lock
);
951 #endif /* USE_ARENAS */