1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009,2010,2011,2012
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
23 /* Compile-time constants. */
25 #define HEAP_MIN_SIZE (32*1024)
27 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
28 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
30 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
34 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
35 that are dynamically created for multi-threaded programs. The
36 maximum size must be a power of two, for fast determination of
37 which heap belongs to a chunk. It should be much larger than the
38 mmap threshold, so that requests with a size just below that
39 threshold can be fulfilled without creating too many heaps. */
43 #define THREAD_STATS 0
46 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
49 /***************************************************************************/
51 #define top(ar_ptr) ((ar_ptr)->top)
53 /* A heap is a single contiguous memory region holding (coalesceable)
54 malloc_chunks. It is allocated with mmap() and always starts at an
55 address aligned to HEAP_MAX_SIZE. */
57 typedef struct _heap_info
{
58 mstate ar_ptr
; /* Arena for this heap. */
59 struct _heap_info
*prev
; /* Previous heap. */
60 size_t size
; /* Current size in bytes. */
61 size_t mprotect_size
; /* Size in bytes that has been mprotected
62 PROT_READ|PROT_WRITE. */
63 /* Make sure the following data is properly aligned, particularly
64 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
66 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
69 /* Get a compile-time error if the heap_info padding is not correct
70 to make alignment work as expected in sYSMALLOc. */
71 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
72 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
75 /* Thread specific data */
77 static tsd_key_t arena_key
;
78 static mutex_t list_lock
= MUTEX_INITIALIZER
;
80 static size_t narenas
= 1;
81 static mstate free_list
;
85 static int stat_n_heaps
;
86 #define THREAD_STAT(x) x
88 #define THREAD_STAT(x) do ; while(0)
91 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
92 static unsigned long arena_mem
;
94 /* Already initialized? */
95 int __malloc_initialized
= -1;
97 /**************************************************************************/
100 /* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
108 #define arena_get(ptr, size) do { \
110 arena_lock(ptr, size); \
113 #define arena_lookup(ptr) do { \
115 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
119 # define arena_lock(ptr, size) do { \
121 (void)mutex_lock(&ptr->mutex); \
123 ptr = arena_get2(ptr, (size)); \
126 # define arena_lock(ptr, size) do { \
127 if(ptr && !mutex_trylock(&ptr->mutex)) { \
128 THREAD_STAT(++(ptr->stat_lock_direct)); \
130 ptr = arena_get2(ptr, (size)); \
134 /* find the heap and corresponding arena for a given ptr */
136 #define heap_for_ptr(ptr) \
137 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
138 #define arena_for_chunk(ptr) \
139 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
142 /**************************************************************************/
144 /* atfork support. */
146 static __malloc_ptr_t (*save_malloc_hook
) (size_t __size
,
147 const __malloc_ptr_t
);
148 static void (*save_free_hook
) (__malloc_ptr_t __ptr
,
149 const __malloc_ptr_t
);
150 static void* save_arena
;
156 /* Magic value for the thread-specific arena pointer when
157 malloc_atfork() is in use. */
159 #define ATFORK_ARENA_PTR ((void*)-1)
161 /* The following hooks are used while the `atfork' handling mechanism
165 malloc_atfork(size_t sz
, const void *caller
)
170 tsd_getspecific(arena_key
, vptr
);
171 if(vptr
== ATFORK_ARENA_PTR
) {
172 /* We are the only thread that may allocate at all. */
173 if(save_malloc_hook
!= malloc_check
) {
174 return _int_malloc(&main_arena
, sz
);
178 victim
= _int_malloc(&main_arena
, sz
+1);
179 return mem2mem_check(victim
, sz
);
182 /* Suspend the thread until the `atfork' handlers have completed.
183 By that time, the hooks will have been reset as well, so that
184 mALLOc() can be used again. */
185 (void)mutex_lock(&list_lock
);
186 (void)mutex_unlock(&list_lock
);
187 return __libc_malloc(sz
);
192 free_atfork(void* mem
, const void *caller
)
196 mchunkptr p
; /* chunk corresponding to mem */
198 if (mem
== 0) /* free(0) has no effect */
201 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
203 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
209 ar_ptr
= arena_for_chunk(p
);
210 tsd_getspecific(arena_key
, vptr
);
211 _int_free(ar_ptr
, p
, vptr
== ATFORK_ARENA_PTR
);
215 /* Counter for number of times the list is locked by the same thread. */
216 static unsigned int atfork_recursive_cntr
;
218 /* The following two functions are registered via thread_atfork() to
219 make sure that the mutexes remain in a consistent state in the
220 fork()ed version of a thread. Also adapt the malloc and free hooks
221 temporarily, because the `atfork' handler mechanism may use
222 malloc/free internally (e.g. in LinuxThreads). */
225 ptmalloc_lock_all (void)
229 if(__malloc_initialized
< 1)
231 if (mutex_trylock(&list_lock
))
234 tsd_getspecific(arena_key
, my_arena
);
235 if (my_arena
== ATFORK_ARENA_PTR
)
236 /* This is the same thread which already locks the global list.
237 Just bump the counter. */
240 /* This thread has to wait its turn. */
241 (void)mutex_lock(&list_lock
);
243 for(ar_ptr
= &main_arena
;;) {
244 (void)mutex_lock(&ar_ptr
->mutex
);
245 ar_ptr
= ar_ptr
->next
;
246 if(ar_ptr
== &main_arena
) break;
248 save_malloc_hook
= __malloc_hook
;
249 save_free_hook
= __free_hook
;
250 __malloc_hook
= malloc_atfork
;
251 __free_hook
= free_atfork
;
252 /* Only the current thread may perform malloc/free calls now. */
253 tsd_getspecific(arena_key
, save_arena
);
254 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
256 ++atfork_recursive_cntr
;
260 ptmalloc_unlock_all (void)
264 if(__malloc_initialized
< 1)
266 if (--atfork_recursive_cntr
!= 0)
268 tsd_setspecific(arena_key
, save_arena
);
269 __malloc_hook
= save_malloc_hook
;
270 __free_hook
= save_free_hook
;
271 for(ar_ptr
= &main_arena
;;) {
272 (void)mutex_unlock(&ar_ptr
->mutex
);
273 ar_ptr
= ar_ptr
->next
;
274 if(ar_ptr
== &main_arena
) break;
276 (void)mutex_unlock(&list_lock
);
281 /* In NPTL, unlocking a mutex in the child process after a
282 fork() is currently unsafe, whereas re-initializing it is safe and
283 does not leak resources. Therefore, a special atfork handler is
284 installed for the child. */
287 ptmalloc_unlock_all2 (void)
291 if(__malloc_initialized
< 1)
293 tsd_setspecific(arena_key
, save_arena
);
294 __malloc_hook
= save_malloc_hook
;
295 __free_hook
= save_free_hook
;
299 for(ar_ptr
= &main_arena
;;) {
300 mutex_init(&ar_ptr
->mutex
);
302 if (ar_ptr
!= save_arena
) {
303 ar_ptr
->next_free
= free_list
;
307 ar_ptr
= ar_ptr
->next
;
308 if(ar_ptr
== &main_arena
) break;
310 mutex_init(&list_lock
);
311 atfork_recursive_cntr
= 0;
316 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
320 /* Initialization routine. */
322 extern char **_environ
;
326 next_env_entry (char ***position
)
328 char **current
= *position
;
331 while (*current
!= NULL
)
333 if (__builtin_expect ((*current
)[0] == 'M', 0)
334 && (*current
)[1] == 'A'
335 && (*current
)[2] == 'L'
336 && (*current
)[3] == 'L'
337 && (*current
)[4] == 'O'
338 && (*current
)[5] == 'C'
339 && (*current
)[6] == '_')
341 result
= &(*current
)[7];
343 /* Save current position for next visit. */
344 *position
= ++current
;
358 __failing_morecore (ptrdiff_t d
)
360 return (void *) MORECORE_FAILURE
;
363 extern struct dl_open_hook
*_dl_open_hook
;
364 libc_hidden_proto (_dl_open_hook
);
370 if(__malloc_initialized
>= 0) return;
371 __malloc_initialized
= 0;
374 /* In case this libc copy is in a non-default namespace, never use brk.
375 Likewise if dlopened from statically linked program. */
379 if (_dl_open_hook
!= NULL
380 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
381 && l
->l_ns
!= LM_ID_BASE
))
382 __morecore
= __failing_morecore
;
385 tsd_key_create(&arena_key
, NULL
);
386 tsd_setspecific(arena_key
, (void *)&main_arena
);
387 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
388 const char *s
= NULL
;
389 if (__builtin_expect (_environ
!= NULL
, 1))
391 char **runp
= _environ
;
394 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
397 size_t len
= strcspn (envline
, "=");
399 if (envline
[len
] != '=')
400 /* This is a "MALLOC_" variable at the end of the string
401 without a '=' character. Ignore it since otherwise we
402 will access invalid memory below. */
408 if (memcmp (envline
, "CHECK_", 6) == 0)
412 if (! __builtin_expect (__libc_enable_secure
, 0))
414 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
415 __libc_mallopt(M_TOP_PAD
, atoi(&envline
[9]));
416 else if (memcmp (envline
, "PERTURB_", 8) == 0)
417 __libc_mallopt(M_PERTURB
, atoi(&envline
[9]));
421 if (! __builtin_expect (__libc_enable_secure
, 0))
423 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
424 __libc_mallopt(M_MMAP_MAX
, atoi(&envline
[10]));
426 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
427 __libc_mallopt(M_ARENA_MAX
, atoi(&envline
[10]));
433 if (! __builtin_expect (__libc_enable_secure
, 0))
435 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
436 __libc_mallopt(M_ARENA_TEST
, atoi(&envline
[11]));
441 if (! __builtin_expect (__libc_enable_secure
, 0))
443 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
444 __libc_mallopt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
445 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
446 __libc_mallopt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
455 __libc_mallopt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
456 if (check_action
!= 0)
457 __malloc_check_init();
459 void (*hook
) (void) = force_reg (__malloc_initialize_hook
);
462 __malloc_initialized
= 1;
465 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
466 #ifdef thread_atfork_static
467 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
468 ptmalloc_unlock_all2
)
473 /* Managing heaps and arenas (for concurrent threads) */
477 /* Print the complete contents of a single heap to stderr. */
480 dump_heap(heap_info
*heap
)
485 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
486 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
487 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
488 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
491 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
492 if(p
== top(heap
->ar_ptr
)) {
493 fprintf(stderr
, " (top)\n");
495 } else if(p
->size
== (0|PREV_INUSE
)) {
496 fprintf(stderr
, " (fence)\n");
499 fprintf(stderr
, "\n");
504 #endif /* MALLOC_DEBUG > 1 */
506 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
507 addresses as opposed to increasing, new_heap would badly fragment the
508 address space. In that case remember the second HEAP_MAX_SIZE part
509 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
510 call (if it is already aligned) and try to reuse it next time. We need
511 no locking for it, as kernel ensures the atomicity for us - worst case
512 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
513 multiple threads, but only one will succeed. */
514 static char *aligned_heap_area
;
516 /* Create a new heap. size is automatically rounded up to a multiple
521 new_heap(size_t size
, size_t top_pad
)
523 size_t page_mask
= GLRO(dl_pagesize
) - 1;
528 if(size
+top_pad
< HEAP_MIN_SIZE
)
529 size
= HEAP_MIN_SIZE
;
530 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
532 else if(size
> HEAP_MAX_SIZE
)
535 size
= HEAP_MAX_SIZE
;
536 size
= (size
+ page_mask
) & ~page_mask
;
538 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
539 No swap space needs to be reserved for the following large
540 mapping (on Linux, this is the case for all non-writable mappings
543 if(aligned_heap_area
) {
544 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
546 aligned_heap_area
= NULL
;
547 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
548 __munmap(p2
, HEAP_MAX_SIZE
);
552 if(p2
== MAP_FAILED
) {
553 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
, MAP_NORESERVE
);
554 if(p1
!= MAP_FAILED
) {
555 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
556 & ~(HEAP_MAX_SIZE
-1));
561 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
562 __munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
564 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
565 is already aligned. */
566 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
569 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
570 __munmap(p2
, HEAP_MAX_SIZE
);
575 if(__mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
576 __munmap(p2
, HEAP_MAX_SIZE
);
581 h
->mprotect_size
= size
;
582 THREAD_STAT(stat_n_heaps
++);
586 /* Grow a heap. size is automatically rounded up to a
587 multiple of the page size. */
590 grow_heap(heap_info
*h
, long diff
)
592 size_t page_mask
= GLRO(dl_pagesize
) - 1;
595 diff
= (diff
+ page_mask
) & ~page_mask
;
596 new_size
= (long)h
->size
+ diff
;
597 if((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
599 if((unsigned long) new_size
> h
->mprotect_size
) {
600 if (__mprotect((char *)h
+ h
->mprotect_size
,
601 (unsigned long) new_size
- h
->mprotect_size
,
602 PROT_READ
|PROT_WRITE
) != 0)
604 h
->mprotect_size
= new_size
;
614 shrink_heap(heap_info
*h
, long diff
)
618 new_size
= (long)h
->size
- diff
;
619 if(new_size
< (long)sizeof(*h
))
621 /* Try to re-map the extra heap space freshly to save memory, and
622 make it inaccessible. */
623 if (__builtin_expect (__libc_enable_secure
, 0))
625 if((char *)MMAP((char *)h
+ new_size
, diff
, PROT_NONE
,
626 MAP_FIXED
) == (char *) MAP_FAILED
)
628 h
->mprotect_size
= new_size
;
631 madvise ((char *)h
+ new_size
, diff
, MADV_DONTNEED
);
632 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
640 #define delete_heap(heap) \
642 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
643 aligned_heap_area = NULL; \
644 __munmap((char*)(heap), HEAP_MAX_SIZE); \
649 heap_trim(heap_info
*heap
, size_t pad
)
651 mstate ar_ptr
= heap
->ar_ptr
;
652 unsigned long pagesz
= GLRO(dl_pagesize
);
653 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
654 heap_info
*prev_heap
;
655 long new_size
, top_size
, extra
;
657 /* Can this heap go away completely? */
658 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
659 prev_heap
= heap
->prev
;
660 p
= chunk_at_offset(prev_heap
, prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
));
661 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
663 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
);
664 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
666 new_size
+= p
->prev_size
;
667 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
668 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
670 ar_ptr
->system_mem
-= heap
->size
;
671 arena_mem
-= heap
->size
;
674 if(!prev_inuse(p
)) { /* consolidate backward */
678 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
679 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
680 top(ar_ptr
) = top_chunk
= p
;
681 set_head(top_chunk
, new_size
| PREV_INUSE
);
682 /*check_chunk(ar_ptr, top_chunk);*/
684 top_size
= chunksize(top_chunk
);
685 extra
= (top_size
- pad
- MINSIZE
- 1) & ~(pagesz
- 1);
686 if(extra
< (long)pagesz
)
689 if(shrink_heap(heap
, extra
) != 0)
691 ar_ptr
->system_mem
-= extra
;
694 /* Success. Adjust top accordingly. */
695 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
696 /*check_chunk(ar_ptr, top_chunk);*/
700 /* Create a new arena with initial size "size". */
703 _int_new_arena(size_t size
)
708 unsigned long misalign
;
710 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
713 /* Maybe size is too large to fit in a single heap. So, just try
714 to create a minimally-sized arena and let _int_malloc() attempt
715 to deal with the large request via mmap_chunk(). */
716 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
720 a
= h
->ar_ptr
= (mstate
)(h
+1);
721 malloc_init_state(a
);
723 a
->system_mem
= a
->max_system_mem
= h
->size
;
724 arena_mem
+= h
->size
;
726 /* Set up the top chunk, with proper alignment. */
727 ptr
= (char *)(a
+ 1);
728 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
730 ptr
+= MALLOC_ALIGNMENT
- misalign
;
731 top(a
) = (mchunkptr
)ptr
;
732 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
734 tsd_setspecific(arena_key
, (void *)a
);
735 mutex_init(&a
->mutex
);
736 (void)mutex_lock(&a
->mutex
);
739 (void)mutex_lock(&list_lock
);
742 /* Add the new arena to the global list. */
743 a
->next
= main_arena
.next
;
744 atomic_write_barrier ();
748 (void)mutex_unlock(&list_lock
);
751 THREAD_STAT(++(a
->stat_lock_loop
));
761 mstate result
= free_list
;
764 (void)mutex_lock(&list_lock
);
767 free_list
= result
->next_free
;
768 (void)mutex_unlock(&list_lock
);
772 (void)mutex_lock(&result
->mutex
);
773 tsd_setspecific(arena_key
, (void *)result
);
774 THREAD_STAT(++(result
->stat_lock_loop
));
786 static mstate next_to_use
;
787 if (next_to_use
== NULL
)
788 next_to_use
= &main_arena
;
790 result
= next_to_use
;
793 if (!mutex_trylock(&result
->mutex
))
796 result
= result
->next
;
798 while (result
!= next_to_use
);
800 /* No arena available. Wait for the next in line. */
801 (void)mutex_lock(&result
->mutex
);
804 tsd_setspecific(arena_key
, (void *)result
);
805 THREAD_STAT(++(result
->stat_lock_loop
));
806 next_to_use
= result
->next
;
814 arena_get2(mstate a_tsd
, size_t size
)
819 static size_t narenas_limit
;
821 a
= get_free_list ();
824 /* Nothing immediately available, so generate a new arena. */
825 if (narenas_limit
== 0)
827 if (mp_
.arena_max
!= 0)
828 narenas_limit
= mp_
.arena_max
;
829 else if (narenas
> mp_
.arena_test
)
831 int n
= __get_nprocs ();
834 narenas_limit
= NARENAS_FROM_NCORES (n
);
836 /* We have no information about the system. Assume two
838 narenas_limit
= NARENAS_FROM_NCORES (2);
843 /* NB: the following depends on the fact that (size_t)0 - 1 is a
844 very large number and that the underflow is OK. If arena_max
845 is set the value of arena_test is irrelevant. If arena_test
846 is set but narenas is not yet larger or equal to arena_test
847 narenas_limit is 0. There is no possibility for narenas to
848 be too big for the test to always fail since there is not
849 enough address space to create that many arenas. */
850 if (__builtin_expect (n
<= narenas_limit
- 1, 0))
852 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
854 a
= _int_new_arena (size
);
855 if (__builtin_expect (a
== NULL
, 0))
856 catomic_decrement (&narenas
);
863 a
= a_tsd
= &main_arena
;
867 /* This can only happen while initializing the new arena. */
868 (void)mutex_lock(&main_arena
.mutex
);
869 THREAD_STAT(++(main_arena
.stat_lock_wait
));
874 /* Check the global, circularly linked list for available arenas. */
875 bool retried
= false;
878 if(!mutex_trylock(&a
->mutex
)) {
880 (void)mutex_unlock(&list_lock
);
881 THREAD_STAT(++(a
->stat_lock_loop
));
882 tsd_setspecific(arena_key
, (void *)a
);
888 /* If not even the list_lock can be obtained, try again. This can
889 happen during `atfork', or for example on systems where thread
890 creation makes it temporarily impossible to obtain _any_
892 if(!retried
&& mutex_trylock(&list_lock
)) {
893 /* We will block to not run in a busy loop. */
894 (void)mutex_lock(&list_lock
);
896 /* Since we blocked there might be an arena available now. */
902 /* Nothing immediately available, so generate a new arena. */
903 a
= _int_new_arena(size
);
904 (void)mutex_unlock(&list_lock
);
911 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
912 arena_thread_freeres (void)
915 mstate a
= tsd_getspecific(arena_key
, vptr
);
916 tsd_setspecific(arena_key
, NULL
);
920 (void)mutex_lock(&list_lock
);
921 a
->next_free
= free_list
;
923 (void)mutex_unlock(&list_lock
);
926 text_set_element (__libc_thread_subfreeres
, arena_thread_freeres
);