1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32*1024)
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
29 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
42 #define THREAD_STATS 0
45 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
48 /***************************************************************************/
50 #define top(ar_ptr) ((ar_ptr)->top)
52 /* A heap is a single contiguous memory region holding (coalesceable)
53 malloc_chunks. It is allocated with mmap() and always starts at an
54 address aligned to HEAP_MAX_SIZE. */
56 typedef struct _heap_info
{
57 mstate ar_ptr
; /* Arena for this heap. */
58 struct _heap_info
*prev
; /* Previous heap. */
59 size_t size
; /* Current size in bytes. */
60 size_t mprotect_size
; /* Size in bytes that has been mprotected
61 PROT_READ|PROT_WRITE. */
62 /* Make sure the following data is properly aligned, particularly
63 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
65 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
68 /* Get a compile-time error if the heap_info padding is not correct
69 to make alignment work as expected in sYSMALLOc. */
70 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
71 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
74 /* Thread specific data */
76 static tsd_key_t arena_key
;
77 static mutex_t list_lock
= MUTEX_INITIALIZER
;
79 static size_t narenas
= 1;
80 static mstate free_list
;
84 static int stat_n_heaps
;
85 #define THREAD_STAT(x) x
87 #define THREAD_STAT(x) do ; while(0)
90 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
91 static unsigned long arena_mem
;
93 /* Already initialized? */
94 int __malloc_initialized
= -1;
96 /**************************************************************************/
99 /* arena_get() acquires an arena and locks the corresponding mutex.
100 First, try the one last locked successfully by this thread. (This
101 is the common case and handled with a macro for speed.) Then, loop
102 once over the circularly linked list of arenas. If no arena is
103 readily available, create a new one. In this latter case, `size'
104 is just a hint as to how much memory will be required immediately
107 #define arena_get(ptr, size) do { \
109 arena_lock(ptr, size); \
112 #define arena_lookup(ptr) do { \
114 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
118 # define arena_lock(ptr, size) do { \
120 (void)mutex_lock(&ptr->mutex); \
122 ptr = arena_get2(ptr, (size), NULL); \
125 # define arena_lock(ptr, size) do { \
126 if(ptr && !mutex_trylock(&ptr->mutex)) { \
127 THREAD_STAT(++(ptr->stat_lock_direct)); \
129 ptr = arena_get2(ptr, (size), NULL); \
133 /* find the heap and corresponding arena for a given ptr */
135 #define heap_for_ptr(ptr) \
136 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
137 #define arena_for_chunk(ptr) \
138 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
141 /**************************************************************************/
145 /* atfork support. */
147 static __malloc_ptr_t (*save_malloc_hook
) (size_t __size
,
148 const __malloc_ptr_t
);
149 static void (*save_free_hook
) (__malloc_ptr_t __ptr
,
150 const __malloc_ptr_t
);
151 static void* save_arena
;
157 /* Magic value for the thread-specific arena pointer when
158 malloc_atfork() is in use. */
160 #define ATFORK_ARENA_PTR ((void*)-1)
162 /* The following hooks are used while the `atfork' handling mechanism
166 malloc_atfork(size_t sz
, const void *caller
)
171 tsd_getspecific(arena_key
, vptr
);
172 if(vptr
== ATFORK_ARENA_PTR
) {
173 /* We are the only thread that may allocate at all. */
174 if(save_malloc_hook
!= malloc_check
) {
175 return _int_malloc(&main_arena
, sz
);
179 victim
= _int_malloc(&main_arena
, sz
+1);
180 return mem2mem_check(victim
, sz
);
183 /* Suspend the thread until the `atfork' handlers have completed.
184 By that time, the hooks will have been reset as well, so that
185 mALLOc() can be used again. */
186 (void)mutex_lock(&list_lock
);
187 (void)mutex_unlock(&list_lock
);
188 return __libc_malloc(sz
);
193 free_atfork(void* mem
, const void *caller
)
197 mchunkptr p
; /* chunk corresponding to mem */
199 if (mem
== 0) /* free(0) has no effect */
202 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
204 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
210 ar_ptr
= arena_for_chunk(p
);
211 tsd_getspecific(arena_key
, vptr
);
212 _int_free(ar_ptr
, p
, vptr
== ATFORK_ARENA_PTR
);
216 /* Counter for number of times the list is locked by the same thread. */
217 static unsigned int atfork_recursive_cntr
;
219 /* The following two functions are registered via thread_atfork() to
220 make sure that the mutexes remain in a consistent state in the
221 fork()ed version of a thread. Also adapt the malloc and free hooks
222 temporarily, because the `atfork' handler mechanism may use
223 malloc/free internally (e.g. in LinuxThreads). */
226 ptmalloc_lock_all (void)
230 if(__malloc_initialized
< 1)
232 if (mutex_trylock(&list_lock
))
235 tsd_getspecific(arena_key
, my_arena
);
236 if (my_arena
== ATFORK_ARENA_PTR
)
237 /* This is the same thread which already locks the global list.
238 Just bump the counter. */
241 /* This thread has to wait its turn. */
242 (void)mutex_lock(&list_lock
);
244 for(ar_ptr
= &main_arena
;;) {
245 (void)mutex_lock(&ar_ptr
->mutex
);
246 ar_ptr
= ar_ptr
->next
;
247 if(ar_ptr
== &main_arena
) break;
249 save_malloc_hook
= __malloc_hook
;
250 save_free_hook
= __free_hook
;
251 __malloc_hook
= malloc_atfork
;
252 __free_hook
= free_atfork
;
253 /* Only the current thread may perform malloc/free calls now. */
254 tsd_getspecific(arena_key
, save_arena
);
255 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
257 ++atfork_recursive_cntr
;
261 ptmalloc_unlock_all (void)
265 if(__malloc_initialized
< 1)
267 if (--atfork_recursive_cntr
!= 0)
269 tsd_setspecific(arena_key
, save_arena
);
270 __malloc_hook
= save_malloc_hook
;
271 __free_hook
= save_free_hook
;
272 for(ar_ptr
= &main_arena
;;) {
273 (void)mutex_unlock(&ar_ptr
->mutex
);
274 ar_ptr
= ar_ptr
->next
;
275 if(ar_ptr
== &main_arena
) break;
277 (void)mutex_unlock(&list_lock
);
282 /* In NPTL, unlocking a mutex in the child process after a
283 fork() is currently unsafe, whereas re-initializing it is safe and
284 does not leak resources. Therefore, a special atfork handler is
285 installed for the child. */
288 ptmalloc_unlock_all2 (void)
292 if(__malloc_initialized
< 1)
294 tsd_setspecific(arena_key
, save_arena
);
295 __malloc_hook
= save_malloc_hook
;
296 __free_hook
= save_free_hook
;
300 for(ar_ptr
= &main_arena
;;) {
301 mutex_init(&ar_ptr
->mutex
);
303 if (ar_ptr
!= save_arena
) {
304 ar_ptr
->next_free
= free_list
;
308 ar_ptr
= ar_ptr
->next
;
309 if(ar_ptr
== &main_arena
) break;
311 mutex_init(&list_lock
);
312 atfork_recursive_cntr
= 0;
317 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
321 #endif /* !NO_THREADS */
323 /* Initialization routine. */
325 extern char **_environ
;
329 next_env_entry (char ***position
)
331 char **current
= *position
;
334 while (*current
!= NULL
)
336 if (__builtin_expect ((*current
)[0] == 'M', 0)
337 && (*current
)[1] == 'A'
338 && (*current
)[2] == 'L'
339 && (*current
)[3] == 'L'
340 && (*current
)[4] == 'O'
341 && (*current
)[5] == 'C'
342 && (*current
)[6] == '_')
344 result
= &(*current
)[7];
346 /* Save current position for next visit. */
347 *position
= ++current
;
361 __failing_morecore (ptrdiff_t d
)
363 return (void *) MORECORE_FAILURE
;
366 extern struct dl_open_hook
*_dl_open_hook
;
367 libc_hidden_proto (_dl_open_hook
);
373 if(__malloc_initialized
>= 0) return;
374 __malloc_initialized
= 0;
377 /* In case this libc copy is in a non-default namespace, never use brk.
378 Likewise if dlopened from statically linked program. */
382 if (_dl_open_hook
!= NULL
383 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
384 && l
->l_ns
!= LM_ID_BASE
))
385 __morecore
= __failing_morecore
;
388 tsd_key_create(&arena_key
, NULL
);
389 tsd_setspecific(arena_key
, (void *)&main_arena
);
390 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
391 const char *s
= NULL
;
392 if (__builtin_expect (_environ
!= NULL
, 1))
394 char **runp
= _environ
;
397 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
400 size_t len
= strcspn (envline
, "=");
402 if (envline
[len
] != '=')
403 /* This is a "MALLOC_" variable at the end of the string
404 without a '=' character. Ignore it since otherwise we
405 will access invalid memory below. */
411 if (memcmp (envline
, "CHECK_", 6) == 0)
415 if (! __builtin_expect (__libc_enable_secure
, 0))
417 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
418 __libc_mallopt(M_TOP_PAD
, atoi(&envline
[9]));
419 else if (memcmp (envline
, "PERTURB_", 8) == 0)
420 __libc_mallopt(M_PERTURB
, atoi(&envline
[9]));
424 if (! __builtin_expect (__libc_enable_secure
, 0))
426 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
427 __libc_mallopt(M_MMAP_MAX
, atoi(&envline
[10]));
429 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
430 __libc_mallopt(M_ARENA_MAX
, atoi(&envline
[10]));
436 if (! __builtin_expect (__libc_enable_secure
, 0))
438 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
439 __libc_mallopt(M_ARENA_TEST
, atoi(&envline
[11]));
444 if (! __builtin_expect (__libc_enable_secure
, 0))
446 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
447 __libc_mallopt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
448 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
449 __libc_mallopt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
458 __libc_mallopt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
459 if (check_action
!= 0)
460 __malloc_check_init();
462 void (*hook
) (void) = force_reg (__malloc_initialize_hook
);
465 __malloc_initialized
= 1;
468 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
469 #ifdef thread_atfork_static
470 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
471 ptmalloc_unlock_all2
)
476 /* Managing heaps and arenas (for concurrent threads) */
480 /* Print the complete contents of a single heap to stderr. */
483 dump_heap(heap_info
*heap
)
488 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
489 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
490 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
491 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
494 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
495 if(p
== top(heap
->ar_ptr
)) {
496 fprintf(stderr
, " (top)\n");
498 } else if(p
->size
== (0|PREV_INUSE
)) {
499 fprintf(stderr
, " (fence)\n");
502 fprintf(stderr
, "\n");
507 #endif /* MALLOC_DEBUG > 1 */
509 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
510 addresses as opposed to increasing, new_heap would badly fragment the
511 address space. In that case remember the second HEAP_MAX_SIZE part
512 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
513 call (if it is already aligned) and try to reuse it next time. We need
514 no locking for it, as kernel ensures the atomicity for us - worst case
515 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
516 multiple threads, but only one will succeed. */
517 static char *aligned_heap_area
;
519 /* Create a new heap. size is automatically rounded up to a multiple
524 new_heap(size_t size
, size_t top_pad
)
526 size_t page_mask
= GLRO(dl_pagesize
) - 1;
531 if(size
+top_pad
< HEAP_MIN_SIZE
)
532 size
= HEAP_MIN_SIZE
;
533 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
535 else if(size
> HEAP_MAX_SIZE
)
538 size
= HEAP_MAX_SIZE
;
539 size
= (size
+ page_mask
) & ~page_mask
;
541 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
542 No swap space needs to be reserved for the following large
543 mapping (on Linux, this is the case for all non-writable mappings
546 if(aligned_heap_area
) {
547 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
549 aligned_heap_area
= NULL
;
550 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
551 __munmap(p2
, HEAP_MAX_SIZE
);
555 if(p2
== MAP_FAILED
) {
556 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
, MAP_NORESERVE
);
557 if(p1
!= MAP_FAILED
) {
558 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
559 & ~(HEAP_MAX_SIZE
-1));
564 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
565 __munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
567 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
568 is already aligned. */
569 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
572 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
573 __munmap(p2
, HEAP_MAX_SIZE
);
578 if(__mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
579 __munmap(p2
, HEAP_MAX_SIZE
);
584 h
->mprotect_size
= size
;
585 THREAD_STAT(stat_n_heaps
++);
589 /* Grow a heap. size is automatically rounded up to a
590 multiple of the page size. */
593 grow_heap(heap_info
*h
, long diff
)
595 size_t page_mask
= GLRO(dl_pagesize
) - 1;
598 diff
= (diff
+ page_mask
) & ~page_mask
;
599 new_size
= (long)h
->size
+ diff
;
600 if((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
602 if((unsigned long) new_size
> h
->mprotect_size
) {
603 if (__mprotect((char *)h
+ h
->mprotect_size
,
604 (unsigned long) new_size
- h
->mprotect_size
,
605 PROT_READ
|PROT_WRITE
) != 0)
607 h
->mprotect_size
= new_size
;
617 shrink_heap(heap_info
*h
, long diff
)
621 new_size
= (long)h
->size
- diff
;
622 if(new_size
< (long)sizeof(*h
))
624 /* Try to re-map the extra heap space freshly to save memory, and make it
625 inaccessible. See malloc-sysdep.h to know when this is true. */
626 if (__builtin_expect (check_may_shrink_heap (), 0))
628 if((char *)MMAP((char *)h
+ new_size
, diff
, PROT_NONE
,
629 MAP_FIXED
) == (char *) MAP_FAILED
)
631 h
->mprotect_size
= new_size
;
634 __madvise ((char *)h
+ new_size
, diff
, MADV_DONTNEED
);
635 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
643 #define delete_heap(heap) \
645 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
646 aligned_heap_area = NULL; \
647 __munmap((char*)(heap), HEAP_MAX_SIZE); \
652 heap_trim(heap_info
*heap
, size_t pad
)
654 mstate ar_ptr
= heap
->ar_ptr
;
655 unsigned long pagesz
= GLRO(dl_pagesize
);
656 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
657 heap_info
*prev_heap
;
658 long new_size
, top_size
, extra
, prev_size
, misalign
;
660 /* Can this heap go away completely? */
661 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
662 prev_heap
= heap
->prev
;
663 prev_size
= prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
);
664 p
= chunk_at_offset(prev_heap
, prev_size
);
665 /* fencepost must be properly aligned. */
666 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
667 p
= chunk_at_offset(prev_heap
, prev_size
- misalign
);
668 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
670 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
) + misalign
;
671 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
673 new_size
+= p
->prev_size
;
674 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
675 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
677 ar_ptr
->system_mem
-= heap
->size
;
678 arena_mem
-= heap
->size
;
681 if(!prev_inuse(p
)) { /* consolidate backward */
685 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
686 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
687 top(ar_ptr
) = top_chunk
= p
;
688 set_head(top_chunk
, new_size
| PREV_INUSE
);
689 /*check_chunk(ar_ptr, top_chunk);*/
691 top_size
= chunksize(top_chunk
);
692 extra
= (top_size
- pad
- MINSIZE
- 1) & ~(pagesz
- 1);
693 if(extra
< (long)pagesz
)
696 if(shrink_heap(heap
, extra
) != 0)
698 ar_ptr
->system_mem
-= extra
;
701 /* Success. Adjust top accordingly. */
702 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
703 /*check_chunk(ar_ptr, top_chunk);*/
707 /* Create a new arena with initial size "size". */
710 _int_new_arena(size_t size
)
715 unsigned long misalign
;
717 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
720 /* Maybe size is too large to fit in a single heap. So, just try
721 to create a minimally-sized arena and let _int_malloc() attempt
722 to deal with the large request via mmap_chunk(). */
723 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
727 a
= h
->ar_ptr
= (mstate
)(h
+1);
728 malloc_init_state(a
);
730 a
->system_mem
= a
->max_system_mem
= h
->size
;
731 arena_mem
+= h
->size
;
733 /* Set up the top chunk, with proper alignment. */
734 ptr
= (char *)(a
+ 1);
735 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
737 ptr
+= MALLOC_ALIGNMENT
- misalign
;
738 top(a
) = (mchunkptr
)ptr
;
739 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
741 tsd_setspecific(arena_key
, (void *)a
);
742 mutex_init(&a
->mutex
);
743 (void)mutex_lock(&a
->mutex
);
746 (void)mutex_lock(&list_lock
);
749 /* Add the new arena to the global list. */
750 a
->next
= main_arena
.next
;
751 atomic_write_barrier ();
755 (void)mutex_unlock(&list_lock
);
758 THREAD_STAT(++(a
->stat_lock_loop
));
768 mstate result
= free_list
;
771 (void)mutex_lock(&list_lock
);
774 free_list
= result
->next_free
;
775 (void)mutex_unlock(&list_lock
);
779 (void)mutex_lock(&result
->mutex
);
780 tsd_setspecific(arena_key
, (void *)result
);
781 THREAD_STAT(++(result
->stat_lock_loop
));
788 /* Lock and return an arena that can be reused for memory allocation.
789 Avoid AVOID_ARENA as we have already failed to allocate memory in
790 it and it is currently locked. */
792 reused_arena (mstate avoid_arena
)
795 static mstate next_to_use
;
796 if (next_to_use
== NULL
)
797 next_to_use
= &main_arena
;
799 result
= next_to_use
;
802 if (!mutex_trylock(&result
->mutex
))
805 result
= result
->next
;
807 while (result
!= next_to_use
);
809 /* Avoid AVOID_ARENA as we have already failed to allocate memory
810 in that arena and it is currently locked. */
811 if (result
== avoid_arena
)
812 result
= result
->next
;
814 /* No arena available. Wait for the next in line. */
815 (void)mutex_lock(&result
->mutex
);
818 tsd_setspecific(arena_key
, (void *)result
);
819 THREAD_STAT(++(result
->stat_lock_loop
));
820 next_to_use
= result
->next
;
828 arena_get2(mstate a_tsd
, size_t size
, mstate avoid_arena
)
833 static size_t narenas_limit
;
835 a
= get_free_list ();
838 /* Nothing immediately available, so generate a new arena. */
839 if (narenas_limit
== 0)
841 if (mp_
.arena_max
!= 0)
842 narenas_limit
= mp_
.arena_max
;
843 else if (narenas
> mp_
.arena_test
)
845 int n
= __get_nprocs ();
848 narenas_limit
= NARENAS_FROM_NCORES (n
);
850 /* We have no information about the system. Assume two
852 narenas_limit
= NARENAS_FROM_NCORES (2);
857 /* NB: the following depends on the fact that (size_t)0 - 1 is a
858 very large number and that the underflow is OK. If arena_max
859 is set the value of arena_test is irrelevant. If arena_test
860 is set but narenas is not yet larger or equal to arena_test
861 narenas_limit is 0. There is no possibility for narenas to
862 be too big for the test to always fail since there is not
863 enough address space to create that many arenas. */
864 if (__builtin_expect (n
<= narenas_limit
- 1, 0))
866 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
868 a
= _int_new_arena (size
);
869 if (__builtin_expect (a
== NULL
, 0))
870 catomic_decrement (&narenas
);
873 a
= reused_arena (avoid_arena
);
877 a
= a_tsd
= &main_arena
;
881 /* This can only happen while initializing the new arena. */
882 (void)mutex_lock(&main_arena
.mutex
);
883 THREAD_STAT(++(main_arena
.stat_lock_wait
));
888 /* Check the global, circularly linked list for available arenas. */
889 bool retried
= false;
892 if(!mutex_trylock(&a
->mutex
)) {
894 (void)mutex_unlock(&list_lock
);
895 THREAD_STAT(++(a
->stat_lock_loop
));
896 tsd_setspecific(arena_key
, (void *)a
);
902 /* If not even the list_lock can be obtained, try again. This can
903 happen during `atfork', or for example on systems where thread
904 creation makes it temporarily impossible to obtain _any_
906 if(!retried
&& mutex_trylock(&list_lock
)) {
907 /* We will block to not run in a busy loop. */
908 (void)mutex_lock(&list_lock
);
910 /* Since we blocked there might be an arena available now. */
916 /* Nothing immediately available, so generate a new arena. */
917 a
= _int_new_arena(size
);
918 (void)mutex_unlock(&list_lock
);
924 /* If we don't have the main arena, then maybe the failure is due to running
925 out of mmapped areas, so we can try allocating on the main arena.
926 Otherwise, it is likely that sbrk() has failed and there is still a chance
927 to mmap(), so try one of the other arenas. */
929 arena_get_retry (mstate ar_ptr
, size_t bytes
)
931 if(ar_ptr
!= &main_arena
) {
932 (void)mutex_unlock(&ar_ptr
->mutex
);
933 ar_ptr
= &main_arena
;
934 (void)mutex_lock(&ar_ptr
->mutex
);
936 /* Grab ar_ptr->next prior to releasing its lock. */
937 mstate prev
= ar_ptr
->next
? ar_ptr
: 0;
938 (void)mutex_unlock(&ar_ptr
->mutex
);
939 ar_ptr
= arena_get2(prev
, bytes
, ar_ptr
);
946 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
947 arena_thread_freeres (void)
950 mstate a
= tsd_getspecific(arena_key
, vptr
);
951 tsd_setspecific(arena_key
, NULL
);
955 (void)mutex_lock(&list_lock
);
956 a
->next_free
= free_list
;
958 (void)mutex_unlock(&list_lock
);
961 text_set_element (__libc_thread_subfreeres
, arena_thread_freeres
);