1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
25 /* Compile-time constants. */
27 #define HEAP_MIN_SIZE (32*1024)
29 #define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
32 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
33 that are dynamically created for multi-threaded programs. The
34 maximum size must be a power of two, for fast determination of
35 which heap belongs to a chunk. It should be much larger than the
36 mmap threshold, so that requests with a size just below that
37 threshold can be fulfilled without creating too many heaps. */
41 #define THREAD_STATS 0
44 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
47 /***************************************************************************/
49 #define top(ar_ptr) ((ar_ptr)->top)
51 /* A heap is a single contiguous memory region holding (coalesceable)
52 malloc_chunks. It is allocated with mmap() and always starts at an
53 address aligned to HEAP_MAX_SIZE. Not used unless compiling with
56 typedef struct _heap_info
{
57 mstate ar_ptr
; /* Arena for this heap. */
58 struct _heap_info
*prev
; /* Previous heap. */
59 size_t size
; /* Current size in bytes. */
60 size_t pad
; /* Make sure the following data is properly aligned. */
63 /* Thread specific data */
65 static tsd_key_t arena_key
;
66 static mutex_t list_lock
;
69 static int stat_n_heaps
;
70 #define THREAD_STAT(x) x
72 #define THREAD_STAT(x) do ; while(0)
75 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
76 static unsigned long arena_mem
;
78 /* Already initialized? */
79 int __malloc_initialized
= -1;
81 /**************************************************************************/
85 /* arena_get() acquires an arena and locks the corresponding mutex.
86 First, try the one last locked successfully by this thread. (This
87 is the common case and handled with a macro for speed.) Then, loop
88 once over the circularly linked list of arenas. If no arena is
89 readily available, create a new one. In this latter case, `size'
90 is just a hint as to how much memory will be required immediately
93 #define arena_get(ptr, size) do { \
94 Void_t *vptr = NULL; \
95 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
96 if(ptr && !mutex_trylock(&ptr->mutex)) { \
97 THREAD_STAT(++(ptr->stat_lock_direct)); \
99 ptr = arena_get2(ptr, (size)); \
102 /* find the heap and corresponding arena for a given ptr */
104 #define heap_for_ptr(ptr) \
105 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
106 #define arena_for_chunk(ptr) \
107 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
109 #else /* !USE_ARENAS */
111 /* There is only one arena, main_arena. */
114 #define arena_get(ar_ptr, sz) do { \
115 ar_ptr = &main_arena; \
116 if(!mutex_trylock(&ar_ptr->mutex)) \
117 ++(ar_ptr->stat_lock_direct); \
119 (void)mutex_lock(&ar_ptr->mutex); \
120 ++(ar_ptr->stat_lock_wait); \
124 #define arena_get(ar_ptr, sz) do { \
125 ar_ptr = &main_arena; \
126 (void)mutex_lock(&ar_ptr->mutex); \
129 #define arena_for_chunk(ptr) (&main_arena)
131 #endif /* USE_ARENAS */
133 /**************************************************************************/
137 /* atfork support. */
139 static __malloc_ptr_t (*save_malloc_hook
) (size_t __size
,
140 __const __malloc_ptr_t
);
141 # if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
142 static __malloc_ptr_t (*save_memalign_hook
) (size_t __align
, size_t __size
,
143 __const __malloc_ptr_t
);
145 static void (*save_free_hook
) (__malloc_ptr_t __ptr
,
146 __const __malloc_ptr_t
);
147 static Void_t
* save_arena
;
149 /* Magic value for the thread-specific arena pointer when
150 malloc_atfork() is in use. */
152 #define ATFORK_ARENA_PTR ((Void_t*)-1)
154 /* The following hooks are used while the `atfork' handling mechanism
158 malloc_atfork(size_t sz
, const Void_t
*caller
)
163 tsd_getspecific(arena_key
, vptr
);
164 if(vptr
== ATFORK_ARENA_PTR
) {
165 /* We are the only thread that may allocate at all. */
166 if(save_malloc_hook
!= malloc_check
) {
167 return _int_malloc(&main_arena
, sz
);
171 victim
= _int_malloc(&main_arena
, sz
+1);
172 return mem2mem_check(victim
, sz
);
175 /* Suspend the thread until the `atfork' handlers have completed.
176 By that time, the hooks will have been reset as well, so that
177 mALLOc() can be used again. */
178 (void)mutex_lock(&list_lock
);
179 (void)mutex_unlock(&list_lock
);
180 return public_mALLOc(sz
);
185 free_atfork(Void_t
* mem
, const Void_t
*caller
)
189 mchunkptr p
; /* chunk corresponding to mem */
191 if (mem
== 0) /* free(0) has no effect */
194 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
197 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
204 ar_ptr
= arena_for_chunk(p
);
205 tsd_getspecific(arena_key
, vptr
);
206 if(vptr
!= ATFORK_ARENA_PTR
)
207 (void)mutex_lock(&ar_ptr
->mutex
);
208 _int_free(ar_ptr
, mem
);
209 if(vptr
!= ATFORK_ARENA_PTR
)
210 (void)mutex_unlock(&ar_ptr
->mutex
);
213 /* The following two functions are registered via thread_atfork() to
214 make sure that the mutexes remain in a consistent state in the
215 fork()ed version of a thread. Also adapt the malloc and free hooks
216 temporarily, because the `atfork' handler mechanism may use
217 malloc/free internally (e.g. in LinuxThreads). */
220 ptmalloc_lock_all (void)
224 if(__malloc_initialized
< 1)
226 (void)mutex_lock(&list_lock
);
227 for(ar_ptr
= &main_arena
;;) {
228 (void)mutex_lock(&ar_ptr
->mutex
);
229 ar_ptr
= ar_ptr
->next
;
230 if(ar_ptr
== &main_arena
) break;
232 save_malloc_hook
= __malloc_hook
;
233 save_free_hook
= __free_hook
;
234 __malloc_hook
= malloc_atfork
;
235 __free_hook
= free_atfork
;
236 /* Only the current thread may perform malloc/free calls now. */
237 tsd_getspecific(arena_key
, save_arena
);
238 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
242 ptmalloc_unlock_all (void)
246 if(__malloc_initialized
< 1)
248 tsd_setspecific(arena_key
, save_arena
);
249 __malloc_hook
= save_malloc_hook
;
250 __free_hook
= save_free_hook
;
251 for(ar_ptr
= &main_arena
;;) {
252 (void)mutex_unlock(&ar_ptr
->mutex
);
253 ar_ptr
= ar_ptr
->next
;
254 if(ar_ptr
== &main_arena
) break;
256 (void)mutex_unlock(&list_lock
);
261 /* In LinuxThreads, unlocking a mutex in the child process after a
262 fork() is currently unsafe, whereas re-initializing it is safe and
263 does not leak resources. Therefore, a special atfork handler is
264 installed for the child. */
267 ptmalloc_unlock_all2 (void)
271 if(__malloc_initialized
< 1)
273 #if defined _LIBC || defined MALLOC_HOOKS
274 tsd_setspecific(arena_key
, save_arena
);
275 __malloc_hook
= save_malloc_hook
;
276 __free_hook
= save_free_hook
;
278 for(ar_ptr
= &main_arena
;;) {
279 mutex_init(&ar_ptr
->mutex
);
280 ar_ptr
= ar_ptr
->next
;
281 if(ar_ptr
== &main_arena
) break;
283 mutex_init(&list_lock
);
288 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
292 #endif /* !defined NO_THREADS */
294 /* Initialization routine. */
297 extern char **_environ
;
301 next_env_entry (char ***position
)
303 char **current
= *position
;
306 while (*current
!= NULL
)
308 if (__builtin_expect ((*current
)[0] == 'M', 0)
309 && (*current
)[1] == 'A'
310 && (*current
)[2] == 'L'
311 && (*current
)[3] == 'L'
312 && (*current
)[4] == 'O'
313 && (*current
)[5] == 'C'
314 && (*current
)[6] == '_')
316 result
= &(*current
)[7];
318 /* Save current position for next visit. */
319 *position
= ++current
;
331 /* Set up basic state so that _int_malloc et al can work. */
333 ptmalloc_init_minimal (void)
335 #if DEFAULT_TOP_PAD != 0
336 mp_
.top_pad
= DEFAULT_TOP_PAD
;
338 mp_
.n_mmaps_max
= DEFAULT_MMAP_MAX
;
339 mp_
.mmap_threshold
= DEFAULT_MMAP_THRESHOLD
;
340 mp_
.trim_threshold
= DEFAULT_TRIM_THRESHOLD
;
341 mp_
.pagesize
= malloc_getpagesize
;
348 __failing_morecore (ptrdiff_t d
)
350 return (void *) MORECORE_FAILURE
;
353 extern struct dl_open_hook
*_dl_open_hook
;
354 libc_hidden_proto (_dl_open_hook
);
357 # if defined SHARED && defined USE_TLS && !USE___THREAD
358 /* This is called by __pthread_initialize_minimal when it needs to use
359 malloc to set up the TLS state. We cannot do the full work of
360 ptmalloc_init (below) until __pthread_initialize_minimal has finished,
361 so it has to switch to using the special startup-time hooks while doing
362 those allocations. */
364 __libc_malloc_pthread_startup (bool first_time
)
368 ptmalloc_init_minimal ();
369 save_malloc_hook
= __malloc_hook
;
370 save_memalign_hook
= __memalign_hook
;
371 save_free_hook
= __free_hook
;
372 __malloc_hook
= malloc_starter
;
373 __memalign_hook
= memalign_starter
;
374 __free_hook
= free_starter
;
378 __malloc_hook
= save_malloc_hook
;
379 __memalign_hook
= save_memalign_hook
;
380 __free_hook
= save_free_hook
;
396 if(__malloc_initialized
>= 0) return;
397 __malloc_initialized
= 0;
400 # if defined SHARED && defined USE_TLS && !USE___THREAD
401 /* ptmalloc_init_minimal may already have been called via
402 __libc_malloc_pthread_startup, above. */
403 if (mp_
.pagesize
== 0)
406 ptmalloc_init_minimal();
409 # if defined _LIBC && defined USE_TLS
410 /* We know __pthread_initialize_minimal has already been called,
411 and that is enough. */
415 /* With some threads implementations, creating thread-specific data
416 or initializing a mutex may call malloc() itself. Provide a
417 simple starter version (realloc() won't work). */
418 save_malloc_hook
= __malloc_hook
;
419 save_memalign_hook
= __memalign_hook
;
420 save_free_hook
= __free_hook
;
421 __malloc_hook
= malloc_starter
;
422 __memalign_hook
= memalign_starter
;
423 __free_hook
= free_starter
;
425 /* Initialize the pthreads interface. */
426 if (__pthread_initialize
!= NULL
)
427 __pthread_initialize();
428 # endif /* !defined _LIBC */
429 # endif /* !defined NO_STARTER */
430 #endif /* !defined NO_THREADS */
431 mutex_init(&main_arena
.mutex
);
432 main_arena
.next
= &main_arena
;
434 #if defined _LIBC && defined SHARED
435 /* In case this libc copy is in a non-default namespace, never use brk.
436 Likewise if dlopened from statically linked program. */
440 if (_dl_open_hook
!= NULL
441 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
442 && l
->l_ns
!= LM_ID_BASE
))
443 __morecore
= __failing_morecore
;
446 mutex_init(&list_lock
);
447 tsd_key_create(&arena_key
, NULL
);
448 tsd_setspecific(arena_key
, (Void_t
*)&main_arena
);
449 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
452 __malloc_hook
= save_malloc_hook
;
453 __memalign_hook
= save_memalign_hook
;
454 __free_hook
= save_free_hook
;
460 secure
= __libc_enable_secure
;
462 if (__builtin_expect (_environ
!= NULL
, 1))
464 char **runp
= _environ
;
467 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
470 size_t len
= strcspn (envline
, "=");
472 if (envline
[len
] != '=')
473 /* This is a "MALLOC_" variable at the end of the string
474 without a '=' character. Ignore it since otherwise we
475 will access invalid memory below. */
481 if (memcmp (envline
, "CHECK_", 6) == 0)
487 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
488 mALLOPt(M_TOP_PAD
, atoi(&envline
[9]));
489 else if (memcmp (envline
, "PERTURB_", 8) == 0)
490 mALLOPt(M_PERTURB
, atoi(&envline
[9]));
494 if (! secure
&& memcmp (envline
, "MMAP_MAX_", 9) == 0)
495 mALLOPt(M_MMAP_MAX
, atoi(&envline
[10]));
500 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
501 mALLOPt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
502 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
503 mALLOPt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
514 if((s
= getenv("MALLOC_TRIM_THRESHOLD_")))
515 mALLOPt(M_TRIM_THRESHOLD
, atoi(s
));
516 if((s
= getenv("MALLOC_TOP_PAD_")))
517 mALLOPt(M_TOP_PAD
, atoi(s
));
518 if((s
= getenv("MALLOC_PERTURB_")))
519 mALLOPt(M_PERTURB
, atoi(s
));
520 if((s
= getenv("MALLOC_MMAP_THRESHOLD_")))
521 mALLOPt(M_MMAP_THRESHOLD
, atoi(s
));
522 if((s
= getenv("MALLOC_MMAP_MAX_")))
523 mALLOPt(M_MMAP_MAX
, atoi(s
));
525 s
= getenv("MALLOC_CHECK_");
528 if(s
[0]) mALLOPt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
529 if (check_action
!= 0)
530 __malloc_check_init();
532 if(__malloc_initialize_hook
!= NULL
)
533 (*__malloc_initialize_hook
)();
534 __malloc_initialized
= 1;
537 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
538 #ifdef thread_atfork_static
539 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
540 ptmalloc_unlock_all2
)
545 /* Managing heaps and arenas (for concurrent threads) */
551 /* Print the complete contents of a single heap to stderr. */
555 dump_heap(heap_info
*heap
)
557 dump_heap(heap
) heap_info
*heap
;
563 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
564 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
565 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
566 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
569 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
570 if(p
== top(heap
->ar_ptr
)) {
571 fprintf(stderr
, " (top)\n");
573 } else if(p
->size
== (0|PREV_INUSE
)) {
574 fprintf(stderr
, " (fence)\n");
577 fprintf(stderr
, "\n");
582 #endif /* MALLOC_DEBUG > 1 */
584 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
585 addresses as opposed to increasing, new_heap would badly fragment the
586 address space. In that case remember the second HEAP_MAX_SIZE part
587 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
588 call (if it is already aligned) and try to reuse it next time. We need
589 no locking for it, as kernel ensures the atomicity for us - worst case
590 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
591 multiple threads, but only one will succeed. */
592 static char *aligned_heap_area
;
594 /* Create a new heap. size is automatically rounded up to a multiple
600 new_heap(size_t size
, size_t top_pad
)
602 new_heap(size
, top_pad
) size_t size
, top_pad
;
605 size_t page_mask
= malloc_getpagesize
- 1;
610 if(size
+top_pad
< HEAP_MIN_SIZE
)
611 size
= HEAP_MIN_SIZE
;
612 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
614 else if(size
> HEAP_MAX_SIZE
)
617 size
= HEAP_MAX_SIZE
;
618 size
= (size
+ page_mask
) & ~page_mask
;
620 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
621 No swap space needs to be reserved for the following large
622 mapping (on Linux, this is the case for all non-writable mappings
625 if(aligned_heap_area
) {
626 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
627 MAP_PRIVATE
|MAP_NORESERVE
);
628 aligned_heap_area
= NULL
;
629 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
630 munmap(p2
, HEAP_MAX_SIZE
);
634 if(p2
== MAP_FAILED
) {
635 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
,
636 MAP_PRIVATE
|MAP_NORESERVE
);
637 if(p1
!= MAP_FAILED
) {
638 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
639 & ~(HEAP_MAX_SIZE
-1));
644 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
645 munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
647 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
648 is already aligned. */
649 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_PRIVATE
|MAP_NORESERVE
);
652 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
653 munmap(p2
, HEAP_MAX_SIZE
);
658 if(mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
659 munmap(p2
, HEAP_MAX_SIZE
);
664 THREAD_STAT(stat_n_heaps
++);
668 /* Grow or shrink a heap. size is automatically rounded up to a
669 multiple of the page size if it is positive. */
673 grow_heap(heap_info
*h
, long diff
)
675 grow_heap(h
, diff
) heap_info
*h
; long diff
;
678 size_t page_mask
= malloc_getpagesize
- 1;
682 diff
= (diff
+ page_mask
) & ~page_mask
;
683 new_size
= (long)h
->size
+ diff
;
684 if(new_size
> HEAP_MAX_SIZE
)
686 if(mprotect((char *)h
+ h
->size
, diff
, PROT_READ
|PROT_WRITE
) != 0)
689 new_size
= (long)h
->size
+ diff
;
690 if(new_size
< (long)sizeof(*h
))
692 /* Try to re-map the extra heap space freshly to save memory, and
693 make it inaccessible. */
694 if((char *)MMAP((char *)h
+ new_size
, -diff
, PROT_NONE
,
695 MAP_PRIVATE
|MAP_FIXED
) == (char *) MAP_FAILED
)
697 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
705 #define delete_heap(heap) \
707 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
708 aligned_heap_area = NULL; \
709 munmap((char*)(heap), HEAP_MAX_SIZE); \
715 heap_trim(heap_info
*heap
, size_t pad
)
717 heap_trim(heap
, pad
) heap_info
*heap
; size_t pad
;
720 mstate ar_ptr
= heap
->ar_ptr
;
721 unsigned long pagesz
= mp_
.pagesize
;
722 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
723 heap_info
*prev_heap
;
724 long new_size
, top_size
, extra
;
726 /* Can this heap go away completely? */
727 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
728 prev_heap
= heap
->prev
;
729 p
= chunk_at_offset(prev_heap
, prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
));
730 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
732 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
);
733 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
735 new_size
+= p
->prev_size
;
736 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
737 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
739 ar_ptr
->system_mem
-= heap
->size
;
740 arena_mem
-= heap
->size
;
743 if(!prev_inuse(p
)) { /* consolidate backward */
747 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
748 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
749 top(ar_ptr
) = top_chunk
= p
;
750 set_head(top_chunk
, new_size
| PREV_INUSE
);
751 /*check_chunk(ar_ptr, top_chunk);*/
753 top_size
= chunksize(top_chunk
);
754 extra
= ((top_size
- pad
- MINSIZE
+ (pagesz
-1))/pagesz
- 1) * pagesz
;
755 if(extra
< (long)pagesz
)
758 if(grow_heap(heap
, -extra
) != 0)
760 ar_ptr
->system_mem
-= extra
;
763 /* Success. Adjust top accordingly. */
764 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
765 /*check_chunk(ar_ptr, top_chunk);*/
772 arena_get2(mstate a_tsd
, size_t size
)
774 arena_get2(a_tsd
, size
) mstate a_tsd
; size_t size
;
780 a
= a_tsd
= &main_arena
;
784 /* This can only happen while initializing the new arena. */
785 (void)mutex_lock(&main_arena
.mutex
);
786 THREAD_STAT(++(main_arena
.stat_lock_wait
));
791 /* Check the global, circularly linked list for available arenas. */
792 bool retried
= false;
795 if(!mutex_trylock(&a
->mutex
)) {
797 (void)mutex_unlock(&list_lock
);
798 THREAD_STAT(++(a
->stat_lock_loop
));
799 tsd_setspecific(arena_key
, (Void_t
*)a
);
805 /* If not even the list_lock can be obtained, try again. This can
806 happen during `atfork', or for example on systems where thread
807 creation makes it temporarily impossible to obtain _any_
809 if(!retried
&& mutex_trylock(&list_lock
)) {
810 /* We will block to not run in a busy loop. */
811 (void)mutex_lock(&list_lock
);
813 /* Since we blocked there might be an arena available now. */
819 /* Nothing immediately available, so generate a new arena. */
820 a
= _int_new_arena(size
);
823 tsd_setspecific(arena_key
, (Void_t
*)a
);
824 mutex_init(&a
->mutex
);
825 mutex_lock(&a
->mutex
); /* remember result */
827 /* Add the new arena to the global list. */
828 a
->next
= main_arena
.next
;
829 atomic_write_barrier ();
832 THREAD_STAT(++(a
->stat_lock_loop
));
834 (void)mutex_unlock(&list_lock
);
839 /* Create a new arena with initial size "size". */
842 _int_new_arena(size_t size
)
847 unsigned long misalign
;
849 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
852 /* Maybe size is too large to fit in a single heap. So, just try
853 to create a minimally-sized arena and let _int_malloc() attempt
854 to deal with the large request via mmap_chunk(). */
855 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
859 a
= h
->ar_ptr
= (mstate
)(h
+1);
860 malloc_init_state(a
);
862 a
->system_mem
= a
->max_system_mem
= h
->size
;
863 arena_mem
+= h
->size
;
865 if((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
867 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
870 /* Set up the top chunk, with proper alignment. */
871 ptr
= (char *)(a
+ 1);
872 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
874 ptr
+= MALLOC_ALIGNMENT
- misalign
;
875 top(a
) = (mchunkptr
)ptr
;
876 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
881 #endif /* USE_ARENAS */