1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32*1024)
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
29 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
42 #define THREAD_STATS 0
45 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
48 /***************************************************************************/
50 #define top(ar_ptr) ((ar_ptr)->top)
52 /* A heap is a single contiguous memory region holding (coalesceable)
53 malloc_chunks. It is allocated with mmap() and always starts at an
54 address aligned to HEAP_MAX_SIZE. */
56 typedef struct _heap_info
{
57 mstate ar_ptr
; /* Arena for this heap. */
58 struct _heap_info
*prev
; /* Previous heap. */
59 size_t size
; /* Current size in bytes. */
60 size_t mprotect_size
; /* Size in bytes that has been mprotected
61 PROT_READ|PROT_WRITE. */
62 /* Make sure the following data is properly aligned, particularly
63 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
65 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
68 /* Get a compile-time error if the heap_info padding is not correct
69 to make alignment work as expected in sYSMALLOc. */
70 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
71 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
74 /* Thread specific data */
76 static tsd_key_t arena_key
;
77 static mutex_t list_lock
= MUTEX_INITIALIZER
;
79 static size_t narenas
= 1;
80 static mstate free_list
;
84 static int stat_n_heaps
;
85 #define THREAD_STAT(x) x
87 #define THREAD_STAT(x) do ; while(0)
90 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
91 static unsigned long arena_mem
;
93 /* Already initialized? */
94 int __malloc_initialized
= -1;
96 /**************************************************************************/
99 /* arena_get() acquires an arena and locks the corresponding mutex.
100 First, try the one last locked successfully by this thread. (This
101 is the common case and handled with a macro for speed.) Then, loop
102 once over the circularly linked list of arenas. If no arena is
103 readily available, create a new one. In this latter case, `size'
104 is just a hint as to how much memory will be required immediately
107 #define arena_get(ptr, size) do { \
109 arena_lock(ptr, size); \
112 #define arena_lookup(ptr) do { \
114 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
118 # define arena_lock(ptr, size) do { \
120 (void)mutex_lock(&ptr->mutex); \
122 ptr = arena_get2(ptr, (size), NULL); \
125 # define arena_lock(ptr, size) do { \
126 if(ptr && !mutex_trylock(&ptr->mutex)) { \
127 THREAD_STAT(++(ptr->stat_lock_direct)); \
129 ptr = arena_get2(ptr, (size), NULL); \
133 /* find the heap and corresponding arena for a given ptr */
135 #define heap_for_ptr(ptr) \
136 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
137 #define arena_for_chunk(ptr) \
138 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
141 /**************************************************************************/
145 /* atfork support. */
147 static void *(*save_malloc_hook
) (size_t __size
, const void *);
148 static void (*save_free_hook
) (void *__ptr
, const void *);
149 static void *save_arena
;
155 /* Magic value for the thread-specific arena pointer when
156 malloc_atfork() is in use. */
158 #define ATFORK_ARENA_PTR ((void*)-1)
160 /* The following hooks are used while the `atfork' handling mechanism
164 malloc_atfork(size_t sz
, const void *caller
)
169 tsd_getspecific(arena_key
, vptr
);
170 if(vptr
== ATFORK_ARENA_PTR
) {
171 /* We are the only thread that may allocate at all. */
172 if(save_malloc_hook
!= malloc_check
) {
173 return _int_malloc(&main_arena
, sz
);
177 victim
= _int_malloc(&main_arena
, sz
+1);
178 return mem2mem_check(victim
, sz
);
181 /* Suspend the thread until the `atfork' handlers have completed.
182 By that time, the hooks will have been reset as well, so that
183 mALLOc() can be used again. */
184 (void)mutex_lock(&list_lock
);
185 (void)mutex_unlock(&list_lock
);
186 return __libc_malloc(sz
);
191 free_atfork(void* mem
, const void *caller
)
195 mchunkptr p
; /* chunk corresponding to mem */
197 if (mem
== 0) /* free(0) has no effect */
200 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
202 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
208 ar_ptr
= arena_for_chunk(p
);
209 tsd_getspecific(arena_key
, vptr
);
210 _int_free(ar_ptr
, p
, vptr
== ATFORK_ARENA_PTR
);
214 /* Counter for number of times the list is locked by the same thread. */
215 static unsigned int atfork_recursive_cntr
;
217 /* The following two functions are registered via thread_atfork() to
218 make sure that the mutexes remain in a consistent state in the
219 fork()ed version of a thread. Also adapt the malloc and free hooks
220 temporarily, because the `atfork' handler mechanism may use
221 malloc/free internally (e.g. in LinuxThreads). */
224 ptmalloc_lock_all (void)
228 if(__malloc_initialized
< 1)
230 if (mutex_trylock(&list_lock
))
233 tsd_getspecific(arena_key
, my_arena
);
234 if (my_arena
== ATFORK_ARENA_PTR
)
235 /* This is the same thread which already locks the global list.
236 Just bump the counter. */
239 /* This thread has to wait its turn. */
240 (void)mutex_lock(&list_lock
);
242 for(ar_ptr
= &main_arena
;;) {
243 (void)mutex_lock(&ar_ptr
->mutex
);
244 ar_ptr
= ar_ptr
->next
;
245 if(ar_ptr
== &main_arena
) break;
247 save_malloc_hook
= __malloc_hook
;
248 save_free_hook
= __free_hook
;
249 __malloc_hook
= malloc_atfork
;
250 __free_hook
= free_atfork
;
251 /* Only the current thread may perform malloc/free calls now. */
252 tsd_getspecific(arena_key
, save_arena
);
253 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
255 ++atfork_recursive_cntr
;
259 ptmalloc_unlock_all (void)
263 if(__malloc_initialized
< 1)
265 if (--atfork_recursive_cntr
!= 0)
267 tsd_setspecific(arena_key
, save_arena
);
268 __malloc_hook
= save_malloc_hook
;
269 __free_hook
= save_free_hook
;
270 for(ar_ptr
= &main_arena
;;) {
271 (void)mutex_unlock(&ar_ptr
->mutex
);
272 ar_ptr
= ar_ptr
->next
;
273 if(ar_ptr
== &main_arena
) break;
275 (void)mutex_unlock(&list_lock
);
280 /* In NPTL, unlocking a mutex in the child process after a
281 fork() is currently unsafe, whereas re-initializing it is safe and
282 does not leak resources. Therefore, a special atfork handler is
283 installed for the child. */
286 ptmalloc_unlock_all2 (void)
290 if(__malloc_initialized
< 1)
292 tsd_setspecific(arena_key
, save_arena
);
293 __malloc_hook
= save_malloc_hook
;
294 __free_hook
= save_free_hook
;
298 for(ar_ptr
= &main_arena
;;) {
299 mutex_init(&ar_ptr
->mutex
);
301 if (ar_ptr
!= save_arena
) {
302 ar_ptr
->next_free
= free_list
;
306 ar_ptr
= ar_ptr
->next
;
307 if(ar_ptr
== &main_arena
) break;
309 mutex_init(&list_lock
);
310 atfork_recursive_cntr
= 0;
315 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
319 #endif /* !NO_THREADS */
321 /* Initialization routine. */
323 extern char **_environ
;
327 next_env_entry (char ***position
)
329 char **current
= *position
;
332 while (*current
!= NULL
)
334 if (__builtin_expect ((*current
)[0] == 'M', 0)
335 && (*current
)[1] == 'A'
336 && (*current
)[2] == 'L'
337 && (*current
)[3] == 'L'
338 && (*current
)[4] == 'O'
339 && (*current
)[5] == 'C'
340 && (*current
)[6] == '_')
342 result
= &(*current
)[7];
344 /* Save current position for next visit. */
345 *position
= ++current
;
359 __failing_morecore (ptrdiff_t d
)
361 return (void *) MORECORE_FAILURE
;
364 extern struct dl_open_hook
*_dl_open_hook
;
365 libc_hidden_proto (_dl_open_hook
);
371 if(__malloc_initialized
>= 0) return;
372 __malloc_initialized
= 0;
375 /* In case this libc copy is in a non-default namespace, never use brk.
376 Likewise if dlopened from statically linked program. */
380 if (_dl_open_hook
!= NULL
381 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
382 && l
->l_ns
!= LM_ID_BASE
))
383 __morecore
= __failing_morecore
;
386 tsd_key_create(&arena_key
, NULL
);
387 tsd_setspecific(arena_key
, (void *)&main_arena
);
388 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
389 const char *s
= NULL
;
390 if (__builtin_expect (_environ
!= NULL
, 1))
392 char **runp
= _environ
;
395 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
398 size_t len
= strcspn (envline
, "=");
400 if (envline
[len
] != '=')
401 /* This is a "MALLOC_" variable at the end of the string
402 without a '=' character. Ignore it since otherwise we
403 will access invalid memory below. */
409 if (memcmp (envline
, "CHECK_", 6) == 0)
413 if (! __builtin_expect (__libc_enable_secure
, 0))
415 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
416 __libc_mallopt(M_TOP_PAD
, atoi(&envline
[9]));
417 else if (memcmp (envline
, "PERTURB_", 8) == 0)
418 __libc_mallopt(M_PERTURB
, atoi(&envline
[9]));
422 if (! __builtin_expect (__libc_enable_secure
, 0))
424 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
425 __libc_mallopt(M_MMAP_MAX
, atoi(&envline
[10]));
427 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
428 __libc_mallopt(M_ARENA_MAX
, atoi(&envline
[10]));
434 if (! __builtin_expect (__libc_enable_secure
, 0))
436 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
437 __libc_mallopt(M_ARENA_TEST
, atoi(&envline
[11]));
442 if (! __builtin_expect (__libc_enable_secure
, 0))
444 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
445 __libc_mallopt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
446 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
447 __libc_mallopt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
456 __libc_mallopt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
457 if (check_action
!= 0)
458 __malloc_check_init();
460 void (*hook
) (void) = force_reg (__malloc_initialize_hook
);
463 __malloc_initialized
= 1;
466 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
467 #ifdef thread_atfork_static
468 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
469 ptmalloc_unlock_all2
)
474 /* Managing heaps and arenas (for concurrent threads) */
478 /* Print the complete contents of a single heap to stderr. */
481 dump_heap(heap_info
*heap
)
486 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
487 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
488 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
489 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
492 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
493 if(p
== top(heap
->ar_ptr
)) {
494 fprintf(stderr
, " (top)\n");
496 } else if(p
->size
== (0|PREV_INUSE
)) {
497 fprintf(stderr
, " (fence)\n");
500 fprintf(stderr
, "\n");
505 #endif /* MALLOC_DEBUG > 1 */
507 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
508 addresses as opposed to increasing, new_heap would badly fragment the
509 address space. In that case remember the second HEAP_MAX_SIZE part
510 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
511 call (if it is already aligned) and try to reuse it next time. We need
512 no locking for it, as kernel ensures the atomicity for us - worst case
513 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
514 multiple threads, but only one will succeed. */
515 static char *aligned_heap_area
;
517 /* Create a new heap. size is automatically rounded up to a multiple
522 new_heap(size_t size
, size_t top_pad
)
524 size_t page_mask
= GLRO(dl_pagesize
) - 1;
529 if(size
+top_pad
< HEAP_MIN_SIZE
)
530 size
= HEAP_MIN_SIZE
;
531 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
533 else if(size
> HEAP_MAX_SIZE
)
536 size
= HEAP_MAX_SIZE
;
537 size
= (size
+ page_mask
) & ~page_mask
;
539 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
540 No swap space needs to be reserved for the following large
541 mapping (on Linux, this is the case for all non-writable mappings
544 if(aligned_heap_area
) {
545 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
547 aligned_heap_area
= NULL
;
548 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
549 __munmap(p2
, HEAP_MAX_SIZE
);
553 if(p2
== MAP_FAILED
) {
554 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
, MAP_NORESERVE
);
555 if(p1
!= MAP_FAILED
) {
556 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
557 & ~(HEAP_MAX_SIZE
-1));
562 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
563 __munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
565 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
566 is already aligned. */
567 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
570 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
571 __munmap(p2
, HEAP_MAX_SIZE
);
576 if(__mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
577 __munmap(p2
, HEAP_MAX_SIZE
);
582 h
->mprotect_size
= size
;
583 THREAD_STAT(stat_n_heaps
++);
587 /* Grow a heap. size is automatically rounded up to a
588 multiple of the page size. */
591 grow_heap(heap_info
*h
, long diff
)
593 size_t page_mask
= GLRO(dl_pagesize
) - 1;
596 diff
= (diff
+ page_mask
) & ~page_mask
;
597 new_size
= (long)h
->size
+ diff
;
598 if((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
600 if((unsigned long) new_size
> h
->mprotect_size
) {
601 if (__mprotect((char *)h
+ h
->mprotect_size
,
602 (unsigned long) new_size
- h
->mprotect_size
,
603 PROT_READ
|PROT_WRITE
) != 0)
605 h
->mprotect_size
= new_size
;
615 shrink_heap(heap_info
*h
, long diff
)
619 new_size
= (long)h
->size
- diff
;
620 if(new_size
< (long)sizeof(*h
))
622 /* Try to re-map the extra heap space freshly to save memory, and make it
623 inaccessible. See malloc-sysdep.h to know when this is true. */
624 if (__builtin_expect (check_may_shrink_heap (), 0))
626 if((char *)MMAP((char *)h
+ new_size
, diff
, PROT_NONE
,
627 MAP_FIXED
) == (char *) MAP_FAILED
)
629 h
->mprotect_size
= new_size
;
632 __madvise ((char *)h
+ new_size
, diff
, MADV_DONTNEED
);
633 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
641 #define delete_heap(heap) \
643 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
644 aligned_heap_area = NULL; \
645 __munmap((char*)(heap), HEAP_MAX_SIZE); \
650 heap_trim(heap_info
*heap
, size_t pad
)
652 mstate ar_ptr
= heap
->ar_ptr
;
653 unsigned long pagesz
= GLRO(dl_pagesize
);
654 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
655 heap_info
*prev_heap
;
656 long new_size
, top_size
, extra
, prev_size
, misalign
;
658 /* Can this heap go away completely? */
659 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
660 prev_heap
= heap
->prev
;
661 prev_size
= prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
);
662 p
= chunk_at_offset(prev_heap
, prev_size
);
663 /* fencepost must be properly aligned. */
664 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
665 p
= chunk_at_offset(prev_heap
, prev_size
- misalign
);
666 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
668 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
) + misalign
;
669 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
671 new_size
+= p
->prev_size
;
672 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
673 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
675 ar_ptr
->system_mem
-= heap
->size
;
676 arena_mem
-= heap
->size
;
679 if(!prev_inuse(p
)) { /* consolidate backward */
683 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
684 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
685 top(ar_ptr
) = top_chunk
= p
;
686 set_head(top_chunk
, new_size
| PREV_INUSE
);
687 /*check_chunk(ar_ptr, top_chunk);*/
689 top_size
= chunksize(top_chunk
);
690 extra
= (top_size
- pad
- MINSIZE
- 1) & ~(pagesz
- 1);
691 if(extra
< (long)pagesz
)
694 if(shrink_heap(heap
, extra
) != 0)
696 ar_ptr
->system_mem
-= extra
;
699 /* Success. Adjust top accordingly. */
700 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
701 /*check_chunk(ar_ptr, top_chunk);*/
705 /* Create a new arena with initial size "size". */
708 _int_new_arena(size_t size
)
713 unsigned long misalign
;
715 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
718 /* Maybe size is too large to fit in a single heap. So, just try
719 to create a minimally-sized arena and let _int_malloc() attempt
720 to deal with the large request via mmap_chunk(). */
721 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
725 a
= h
->ar_ptr
= (mstate
)(h
+1);
726 malloc_init_state(a
);
728 a
->system_mem
= a
->max_system_mem
= h
->size
;
729 arena_mem
+= h
->size
;
731 /* Set up the top chunk, with proper alignment. */
732 ptr
= (char *)(a
+ 1);
733 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
735 ptr
+= MALLOC_ALIGNMENT
- misalign
;
736 top(a
) = (mchunkptr
)ptr
;
737 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
739 tsd_setspecific(arena_key
, (void *)a
);
740 mutex_init(&a
->mutex
);
741 (void)mutex_lock(&a
->mutex
);
744 (void)mutex_lock(&list_lock
);
747 /* Add the new arena to the global list. */
748 a
->next
= main_arena
.next
;
749 atomic_write_barrier ();
753 (void)mutex_unlock(&list_lock
);
756 THREAD_STAT(++(a
->stat_lock_loop
));
766 mstate result
= free_list
;
769 (void)mutex_lock(&list_lock
);
772 free_list
= result
->next_free
;
773 (void)mutex_unlock(&list_lock
);
777 (void)mutex_lock(&result
->mutex
);
778 tsd_setspecific(arena_key
, (void *)result
);
779 THREAD_STAT(++(result
->stat_lock_loop
));
786 /* Lock and return an arena that can be reused for memory allocation.
787 Avoid AVOID_ARENA as we have already failed to allocate memory in
788 it and it is currently locked. */
790 reused_arena (mstate avoid_arena
)
793 static mstate next_to_use
;
794 if (next_to_use
== NULL
)
795 next_to_use
= &main_arena
;
797 result
= next_to_use
;
800 if (!mutex_trylock(&result
->mutex
))
803 result
= result
->next
;
805 while (result
!= next_to_use
);
807 /* Avoid AVOID_ARENA as we have already failed to allocate memory
808 in that arena and it is currently locked. */
809 if (result
== avoid_arena
)
810 result
= result
->next
;
812 /* No arena available. Wait for the next in line. */
813 (void)mutex_lock(&result
->mutex
);
816 tsd_setspecific(arena_key
, (void *)result
);
817 THREAD_STAT(++(result
->stat_lock_loop
));
818 next_to_use
= result
->next
;
826 arena_get2(mstate a_tsd
, size_t size
, mstate avoid_arena
)
831 static size_t narenas_limit
;
833 a
= get_free_list ();
836 /* Nothing immediately available, so generate a new arena. */
837 if (narenas_limit
== 0)
839 if (mp_
.arena_max
!= 0)
840 narenas_limit
= mp_
.arena_max
;
841 else if (narenas
> mp_
.arena_test
)
843 int n
= __get_nprocs ();
846 narenas_limit
= NARENAS_FROM_NCORES (n
);
848 /* We have no information about the system. Assume two
850 narenas_limit
= NARENAS_FROM_NCORES (2);
855 /* NB: the following depends on the fact that (size_t)0 - 1 is a
856 very large number and that the underflow is OK. If arena_max
857 is set the value of arena_test is irrelevant. If arena_test
858 is set but narenas is not yet larger or equal to arena_test
859 narenas_limit is 0. There is no possibility for narenas to
860 be too big for the test to always fail since there is not
861 enough address space to create that many arenas. */
862 if (__builtin_expect (n
<= narenas_limit
- 1, 0))
864 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
866 a
= _int_new_arena (size
);
867 if (__builtin_expect (a
== NULL
, 0))
868 catomic_decrement (&narenas
);
871 a
= reused_arena (avoid_arena
);
875 a
= a_tsd
= &main_arena
;
879 /* This can only happen while initializing the new arena. */
880 (void)mutex_lock(&main_arena
.mutex
);
881 THREAD_STAT(++(main_arena
.stat_lock_wait
));
886 /* Check the global, circularly linked list for available arenas. */
887 bool retried
= false;
890 if(!mutex_trylock(&a
->mutex
)) {
892 (void)mutex_unlock(&list_lock
);
893 THREAD_STAT(++(a
->stat_lock_loop
));
894 tsd_setspecific(arena_key
, (void *)a
);
900 /* If not even the list_lock can be obtained, try again. This can
901 happen during `atfork', or for example on systems where thread
902 creation makes it temporarily impossible to obtain _any_
904 if(!retried
&& mutex_trylock(&list_lock
)) {
905 /* We will block to not run in a busy loop. */
906 (void)mutex_lock(&list_lock
);
908 /* Since we blocked there might be an arena available now. */
914 /* Nothing immediately available, so generate a new arena. */
915 a
= _int_new_arena(size
);
916 (void)mutex_unlock(&list_lock
);
922 /* If we don't have the main arena, then maybe the failure is due to running
923 out of mmapped areas, so we can try allocating on the main arena.
924 Otherwise, it is likely that sbrk() has failed and there is still a chance
925 to mmap(), so try one of the other arenas. */
927 arena_get_retry (mstate ar_ptr
, size_t bytes
)
929 if(ar_ptr
!= &main_arena
) {
930 (void)mutex_unlock(&ar_ptr
->mutex
);
931 ar_ptr
= &main_arena
;
932 (void)mutex_lock(&ar_ptr
->mutex
);
934 /* Grab ar_ptr->next prior to releasing its lock. */
935 mstate prev
= ar_ptr
->next
? ar_ptr
: 0;
936 (void)mutex_unlock(&ar_ptr
->mutex
);
937 ar_ptr
= arena_get2(prev
, bytes
, ar_ptr
);
944 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
945 arena_thread_freeres (void)
948 mstate a
= tsd_getspecific(arena_key
, vptr
);
949 tsd_setspecific(arena_key
, NULL
);
953 (void)mutex_lock(&list_lock
);
954 a
->next_free
= free_list
;
956 (void)mutex_unlock(&list_lock
);
959 text_set_element (__libc_thread_subfreeres
, arena_thread_freeres
);