1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32*1024)
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
29 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
42 #define THREAD_STATS 0
45 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
48 /***************************************************************************/
50 #define top(ar_ptr) ((ar_ptr)->top)
52 /* A heap is a single contiguous memory region holding (coalesceable)
53 malloc_chunks. It is allocated with mmap() and always starts at an
54 address aligned to HEAP_MAX_SIZE. */
56 typedef struct _heap_info
{
57 mstate ar_ptr
; /* Arena for this heap. */
58 struct _heap_info
*prev
; /* Previous heap. */
59 size_t size
; /* Current size in bytes. */
60 size_t mprotect_size
; /* Size in bytes that has been mprotected
61 PROT_READ|PROT_WRITE. */
62 /* Make sure the following data is properly aligned, particularly
63 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
65 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
68 /* Get a compile-time error if the heap_info padding is not correct
69 to make alignment work as expected in sYSMALLOc. */
70 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
71 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
74 /* Thread specific data */
76 static tsd_key_t arena_key
;
77 static mutex_t list_lock
= MUTEX_INITIALIZER
;
79 static size_t narenas
= 1;
80 static mstate free_list
;
84 static int stat_n_heaps
;
85 #define THREAD_STAT(x) x
87 #define THREAD_STAT(x) do ; while(0)
90 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
91 static unsigned long arena_mem
;
93 /* Already initialized? */
94 int __malloc_initialized
= -1;
96 /**************************************************************************/
99 /* arena_get() acquires an arena and locks the corresponding mutex.
100 First, try the one last locked successfully by this thread. (This
101 is the common case and handled with a macro for speed.) Then, loop
102 once over the circularly linked list of arenas. If no arena is
103 readily available, create a new one. In this latter case, `size'
104 is just a hint as to how much memory will be required immediately
107 #define arena_get(ptr, size) do { \
109 arena_lock(ptr, size); \
112 #define arena_lookup(ptr) do { \
114 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
118 # define arena_lock(ptr, size) do { \
120 (void)mutex_lock(&ptr->mutex); \
122 ptr = arena_get2(ptr, (size), NULL); \
125 # define arena_lock(ptr, size) do { \
126 if(ptr && !mutex_trylock(&ptr->mutex)) { \
127 THREAD_STAT(++(ptr->stat_lock_direct)); \
129 ptr = arena_get2(ptr, (size), NULL); \
133 /* find the heap and corresponding arena for a given ptr */
135 #define heap_for_ptr(ptr) \
136 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
137 #define arena_for_chunk(ptr) \
138 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
141 /**************************************************************************/
145 /* atfork support. */
147 static void *(*save_malloc_hook
) (size_t __size
, const void *);
148 static void (*save_free_hook
) (void *__ptr
, const void *);
149 static void *save_arena
;
155 /* Magic value for the thread-specific arena pointer when
156 malloc_atfork() is in use. */
158 #define ATFORK_ARENA_PTR ((void*)-1)
160 /* The following hooks are used while the `atfork' handling mechanism
164 malloc_atfork(size_t sz
, const void *caller
)
169 tsd_getspecific(arena_key
, vptr
);
170 if(vptr
== ATFORK_ARENA_PTR
) {
171 /* We are the only thread that may allocate at all. */
172 if(save_malloc_hook
!= malloc_check
) {
173 return _int_malloc(&main_arena
, sz
);
177 victim
= _int_malloc(&main_arena
, sz
+1);
178 return mem2mem_check(victim
, sz
);
181 /* Suspend the thread until the `atfork' handlers have completed.
182 By that time, the hooks will have been reset as well, so that
183 mALLOc() can be used again. */
184 (void)mutex_lock(&list_lock
);
185 (void)mutex_unlock(&list_lock
);
186 return __libc_malloc(sz
);
191 free_atfork(void* mem
, const void *caller
)
195 mchunkptr p
; /* chunk corresponding to mem */
197 if (mem
== 0) /* free(0) has no effect */
200 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
202 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
208 ar_ptr
= arena_for_chunk(p
);
209 tsd_getspecific(arena_key
, vptr
);
210 _int_free(ar_ptr
, p
, vptr
== ATFORK_ARENA_PTR
);
214 /* Counter for number of times the list is locked by the same thread. */
215 static unsigned int atfork_recursive_cntr
;
217 /* The following two functions are registered via thread_atfork() to
218 make sure that the mutexes remain in a consistent state in the
219 fork()ed version of a thread. Also adapt the malloc and free hooks
220 temporarily, because the `atfork' handler mechanism may use
221 malloc/free internally (e.g. in LinuxThreads). */
224 ptmalloc_lock_all (void)
228 if(__malloc_initialized
< 1)
230 if (mutex_trylock(&list_lock
))
233 tsd_getspecific(arena_key
, my_arena
);
234 if (my_arena
== ATFORK_ARENA_PTR
)
235 /* This is the same thread which already locks the global list.
236 Just bump the counter. */
239 /* This thread has to wait its turn. */
240 (void)mutex_lock(&list_lock
);
242 for(ar_ptr
= &main_arena
;;) {
243 (void)mutex_lock(&ar_ptr
->mutex
);
244 ar_ptr
= ar_ptr
->next
;
245 if(ar_ptr
== &main_arena
) break;
247 save_malloc_hook
= __malloc_hook
;
248 save_free_hook
= __free_hook
;
249 __malloc_hook
= malloc_atfork
;
250 __free_hook
= free_atfork
;
251 /* Only the current thread may perform malloc/free calls now. */
252 tsd_getspecific(arena_key
, save_arena
);
253 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
255 ++atfork_recursive_cntr
;
259 ptmalloc_unlock_all (void)
263 if(__malloc_initialized
< 1)
265 if (--atfork_recursive_cntr
!= 0)
267 tsd_setspecific(arena_key
, save_arena
);
268 __malloc_hook
= save_malloc_hook
;
269 __free_hook
= save_free_hook
;
270 for(ar_ptr
= &main_arena
;;) {
271 (void)mutex_unlock(&ar_ptr
->mutex
);
272 ar_ptr
= ar_ptr
->next
;
273 if(ar_ptr
== &main_arena
) break;
275 (void)mutex_unlock(&list_lock
);
280 /* In NPTL, unlocking a mutex in the child process after a
281 fork() is currently unsafe, whereas re-initializing it is safe and
282 does not leak resources. Therefore, a special atfork handler is
283 installed for the child. */
286 ptmalloc_unlock_all2 (void)
290 if(__malloc_initialized
< 1)
292 tsd_setspecific(arena_key
, save_arena
);
293 __malloc_hook
= save_malloc_hook
;
294 __free_hook
= save_free_hook
;
298 for(ar_ptr
= &main_arena
;;) {
299 mutex_init(&ar_ptr
->mutex
);
301 if (ar_ptr
!= save_arena
) {
302 ar_ptr
->next_free
= free_list
;
306 ar_ptr
= ar_ptr
->next
;
307 if(ar_ptr
== &main_arena
) break;
309 mutex_init(&list_lock
);
310 atfork_recursive_cntr
= 0;
315 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
319 #endif /* !NO_THREADS */
321 /* Initialization routine. */
323 extern char **_environ
;
327 next_env_entry (char ***position
)
329 char **current
= *position
;
332 while (*current
!= NULL
)
334 if (__builtin_expect ((*current
)[0] == 'M', 0)
335 && (*current
)[1] == 'A'
336 && (*current
)[2] == 'L'
337 && (*current
)[3] == 'L'
338 && (*current
)[4] == 'O'
339 && (*current
)[5] == 'C'
340 && (*current
)[6] == '_')
342 result
= &(*current
)[7];
344 /* Save current position for next visit. */
345 *position
= ++current
;
359 __failing_morecore (ptrdiff_t d
)
361 return (void *) MORECORE_FAILURE
;
364 extern struct dl_open_hook
*_dl_open_hook
;
365 libc_hidden_proto (_dl_open_hook
);
371 if(__malloc_initialized
>= 0) return;
372 __malloc_initialized
= 0;
375 /* In case this libc copy is in a non-default namespace, never use brk.
376 Likewise if dlopened from statically linked program. */
380 if (_dl_open_hook
!= NULL
381 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
382 && l
->l_ns
!= LM_ID_BASE
))
383 __morecore
= __failing_morecore
;
386 tsd_key_create(&arena_key
, NULL
);
387 tsd_setspecific(arena_key
, (void *)&main_arena
);
388 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
389 const char *s
= NULL
;
390 if (__builtin_expect (_environ
!= NULL
, 1))
392 char **runp
= _environ
;
395 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
398 size_t len
= strcspn (envline
, "=");
400 if (envline
[len
] != '=')
401 /* This is a "MALLOC_" variable at the end of the string
402 without a '=' character. Ignore it since otherwise we
403 will access invalid memory below. */
409 if (memcmp (envline
, "CHECK_", 6) == 0)
413 if (! __builtin_expect (__libc_enable_secure
, 0))
415 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
416 __libc_mallopt(M_TOP_PAD
, atoi(&envline
[9]));
417 else if (memcmp (envline
, "PERTURB_", 8) == 0)
418 __libc_mallopt(M_PERTURB
, atoi(&envline
[9]));
422 if (! __builtin_expect (__libc_enable_secure
, 0))
424 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
425 __libc_mallopt(M_MMAP_MAX
, atoi(&envline
[10]));
427 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
428 __libc_mallopt(M_ARENA_MAX
, atoi(&envline
[10]));
434 if (! __builtin_expect (__libc_enable_secure
, 0))
436 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
437 __libc_mallopt(M_ARENA_TEST
, atoi(&envline
[11]));
442 if (! __builtin_expect (__libc_enable_secure
, 0))
444 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
445 __libc_mallopt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
446 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
447 __libc_mallopt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
456 __libc_mallopt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
457 if (check_action
!= 0)
458 __malloc_check_init();
460 void (*hook
) (void) = force_reg (__malloc_initialize_hook
);
463 __malloc_initialized
= 1;
466 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
467 #ifdef thread_atfork_static
468 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
469 ptmalloc_unlock_all2
)
474 /* Managing heaps and arenas (for concurrent threads) */
478 /* Print the complete contents of a single heap to stderr. */
481 dump_heap(heap_info
*heap
)
486 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
487 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
488 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
489 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
492 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
493 if(p
== top(heap
->ar_ptr
)) {
494 fprintf(stderr
, " (top)\n");
496 } else if(p
->size
== (0|PREV_INUSE
)) {
497 fprintf(stderr
, " (fence)\n");
500 fprintf(stderr
, "\n");
505 #endif /* MALLOC_DEBUG > 1 */
507 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
508 addresses as opposed to increasing, new_heap would badly fragment the
509 address space. In that case remember the second HEAP_MAX_SIZE part
510 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
511 call (if it is already aligned) and try to reuse it next time. We need
512 no locking for it, as kernel ensures the atomicity for us - worst case
513 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
514 multiple threads, but only one will succeed. */
515 static char *aligned_heap_area
;
517 /* Create a new heap. size is automatically rounded up to a multiple
522 new_heap(size_t size
, size_t top_pad
)
524 size_t page_mask
= GLRO(dl_pagesize
) - 1;
529 if(size
+top_pad
< HEAP_MIN_SIZE
)
530 size
= HEAP_MIN_SIZE
;
531 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
533 else if(size
> HEAP_MAX_SIZE
)
536 size
= HEAP_MAX_SIZE
;
537 size
= (size
+ page_mask
) & ~page_mask
;
539 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
540 No swap space needs to be reserved for the following large
541 mapping (on Linux, this is the case for all non-writable mappings
544 if(aligned_heap_area
) {
545 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
547 aligned_heap_area
= NULL
;
548 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
549 __munmap(p2
, HEAP_MAX_SIZE
);
553 if(p2
== MAP_FAILED
) {
554 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
, MAP_NORESERVE
);
555 if(p1
!= MAP_FAILED
) {
556 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
557 & ~(HEAP_MAX_SIZE
-1));
562 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
563 __munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
565 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
566 is already aligned. */
567 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
570 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
571 __munmap(p2
, HEAP_MAX_SIZE
);
576 if(__mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
577 __munmap(p2
, HEAP_MAX_SIZE
);
582 h
->mprotect_size
= size
;
583 THREAD_STAT(stat_n_heaps
++);
584 LIBC_PROBE (memory_heap_new
, 2, h
, h
->size
);
588 /* Grow a heap. size is automatically rounded up to a
589 multiple of the page size. */
592 grow_heap(heap_info
*h
, long diff
)
594 size_t page_mask
= GLRO(dl_pagesize
) - 1;
597 diff
= (diff
+ page_mask
) & ~page_mask
;
598 new_size
= (long)h
->size
+ diff
;
599 if((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
601 if((unsigned long) new_size
> h
->mprotect_size
) {
602 if (__mprotect((char *)h
+ h
->mprotect_size
,
603 (unsigned long) new_size
- h
->mprotect_size
,
604 PROT_READ
|PROT_WRITE
) != 0)
606 h
->mprotect_size
= new_size
;
610 LIBC_PROBE (memory_heap_more
, 2, h
, h
->size
);
617 shrink_heap(heap_info
*h
, long diff
)
621 new_size
= (long)h
->size
- diff
;
622 if(new_size
< (long)sizeof(*h
))
624 /* Try to re-map the extra heap space freshly to save memory, and make it
625 inaccessible. See malloc-sysdep.h to know when this is true. */
626 if (__builtin_expect (check_may_shrink_heap (), 0))
628 if((char *)MMAP((char *)h
+ new_size
, diff
, PROT_NONE
,
629 MAP_FIXED
) == (char *) MAP_FAILED
)
631 h
->mprotect_size
= new_size
;
634 __madvise ((char *)h
+ new_size
, diff
, MADV_DONTNEED
);
635 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
638 LIBC_PROBE (memory_heap_less
, 2, h
, h
->size
);
644 #define delete_heap(heap) \
646 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
647 aligned_heap_area = NULL; \
648 __munmap((char*)(heap), HEAP_MAX_SIZE); \
653 heap_trim(heap_info
*heap
, size_t pad
)
655 mstate ar_ptr
= heap
->ar_ptr
;
656 unsigned long pagesz
= GLRO(dl_pagesize
);
657 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
658 heap_info
*prev_heap
;
659 long new_size
, top_size
, extra
, prev_size
, misalign
;
661 /* Can this heap go away completely? */
662 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
663 prev_heap
= heap
->prev
;
664 prev_size
= prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
);
665 p
= chunk_at_offset(prev_heap
, prev_size
);
666 /* fencepost must be properly aligned. */
667 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
668 p
= chunk_at_offset(prev_heap
, prev_size
- misalign
);
669 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
671 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
) + misalign
;
672 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
674 new_size
+= p
->prev_size
;
675 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
676 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
678 ar_ptr
->system_mem
-= heap
->size
;
679 arena_mem
-= heap
->size
;
680 LIBC_PROBE (memory_heap_free
, 2, heap
, heap
->size
);
683 if(!prev_inuse(p
)) { /* consolidate backward */
687 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
688 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
689 top(ar_ptr
) = top_chunk
= p
;
690 set_head(top_chunk
, new_size
| PREV_INUSE
);
691 /*check_chunk(ar_ptr, top_chunk);*/
693 top_size
= chunksize(top_chunk
);
694 extra
= (top_size
- pad
- MINSIZE
- 1) & ~(pagesz
- 1);
695 if(extra
< (long)pagesz
)
698 if(shrink_heap(heap
, extra
) != 0)
700 ar_ptr
->system_mem
-= extra
;
703 /* Success. Adjust top accordingly. */
704 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
705 /*check_chunk(ar_ptr, top_chunk);*/
709 /* Create a new arena with initial size "size". */
712 _int_new_arena(size_t size
)
717 unsigned long misalign
;
719 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
722 /* Maybe size is too large to fit in a single heap. So, just try
723 to create a minimally-sized arena and let _int_malloc() attempt
724 to deal with the large request via mmap_chunk(). */
725 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
729 a
= h
->ar_ptr
= (mstate
)(h
+1);
730 malloc_init_state(a
);
732 a
->system_mem
= a
->max_system_mem
= h
->size
;
733 arena_mem
+= h
->size
;
735 /* Set up the top chunk, with proper alignment. */
736 ptr
= (char *)(a
+ 1);
737 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
739 ptr
+= MALLOC_ALIGNMENT
- misalign
;
740 top(a
) = (mchunkptr
)ptr
;
741 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
743 LIBC_PROBE (memory_arena_new
, 2, a
, size
);
744 tsd_setspecific(arena_key
, (void *)a
);
745 mutex_init(&a
->mutex
);
746 (void)mutex_lock(&a
->mutex
);
749 (void)mutex_lock(&list_lock
);
752 /* Add the new arena to the global list. */
753 a
->next
= main_arena
.next
;
754 atomic_write_barrier ();
758 (void)mutex_unlock(&list_lock
);
761 THREAD_STAT(++(a
->stat_lock_loop
));
771 mstate result
= free_list
;
774 (void)mutex_lock(&list_lock
);
777 free_list
= result
->next_free
;
778 (void)mutex_unlock(&list_lock
);
782 LIBC_PROBE (memory_arena_reuse_free_list
, 1, result
);
783 (void)mutex_lock(&result
->mutex
);
784 tsd_setspecific(arena_key
, (void *)result
);
785 THREAD_STAT(++(result
->stat_lock_loop
));
792 /* Lock and return an arena that can be reused for memory allocation.
793 Avoid AVOID_ARENA as we have already failed to allocate memory in
794 it and it is currently locked. */
796 reused_arena (mstate avoid_arena
)
799 static mstate next_to_use
;
800 if (next_to_use
== NULL
)
801 next_to_use
= &main_arena
;
803 result
= next_to_use
;
806 if (!mutex_trylock(&result
->mutex
))
809 result
= result
->next
;
811 while (result
!= next_to_use
);
813 /* Avoid AVOID_ARENA as we have already failed to allocate memory
814 in that arena and it is currently locked. */
815 if (result
== avoid_arena
)
816 result
= result
->next
;
818 /* No arena available. Wait for the next in line. */
819 LIBC_PROBE (memory_arena_reuse_wait
, 3, &result
->mutex
, result
, avoid_arena
);
820 (void)mutex_lock(&result
->mutex
);
823 LIBC_PROBE (memory_arena_reuse
, 2, result
, avoid_arena
);
824 tsd_setspecific(arena_key
, (void *)result
);
825 THREAD_STAT(++(result
->stat_lock_loop
));
826 next_to_use
= result
->next
;
834 arena_get2(mstate a_tsd
, size_t size
, mstate avoid_arena
)
839 static size_t narenas_limit
;
841 a
= get_free_list ();
844 /* Nothing immediately available, so generate a new arena. */
845 if (narenas_limit
== 0)
847 if (mp_
.arena_max
!= 0)
848 narenas_limit
= mp_
.arena_max
;
849 else if (narenas
> mp_
.arena_test
)
851 int n
= __get_nprocs ();
854 narenas_limit
= NARENAS_FROM_NCORES (n
);
856 /* We have no information about the system. Assume two
858 narenas_limit
= NARENAS_FROM_NCORES (2);
863 /* NB: the following depends on the fact that (size_t)0 - 1 is a
864 very large number and that the underflow is OK. If arena_max
865 is set the value of arena_test is irrelevant. If arena_test
866 is set but narenas is not yet larger or equal to arena_test
867 narenas_limit is 0. There is no possibility for narenas to
868 be too big for the test to always fail since there is not
869 enough address space to create that many arenas. */
870 if (__builtin_expect (n
<= narenas_limit
- 1, 0))
872 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
874 a
= _int_new_arena (size
);
875 if (__builtin_expect (a
== NULL
, 0))
876 catomic_decrement (&narenas
);
879 a
= reused_arena (avoid_arena
);
883 a
= a_tsd
= &main_arena
;
887 /* This can only happen while initializing the new arena. */
888 (void)mutex_lock(&main_arena
.mutex
);
889 THREAD_STAT(++(main_arena
.stat_lock_wait
));
894 /* Check the global, circularly linked list for available arenas. */
895 bool retried
= false;
898 if(!mutex_trylock(&a
->mutex
)) {
900 (void)mutex_unlock(&list_lock
);
901 THREAD_STAT(++(a
->stat_lock_loop
));
902 LIBC_PROBE (memory_arena_reuse
, 2, a
, a_tsd
);
903 tsd_setspecific(arena_key
, (void *)a
);
909 /* If not even the list_lock can be obtained, try again. This can
910 happen during `atfork', or for example on systems where thread
911 creation makes it temporarily impossible to obtain _any_
913 if(!retried
&& mutex_trylock(&list_lock
)) {
914 /* We will block to not run in a busy loop. */
915 LIBC_PROBE (memory_arena_reuse_wait
, 3, &list_lock
, NULL
, a_tsd
);
916 (void)mutex_lock(&list_lock
);
918 /* Since we blocked there might be an arena available now. */
924 /* Nothing immediately available, so generate a new arena. */
925 a
= _int_new_arena(size
);
926 (void)mutex_unlock(&list_lock
);
932 /* If we don't have the main arena, then maybe the failure is due to running
933 out of mmapped areas, so we can try allocating on the main arena.
934 Otherwise, it is likely that sbrk() has failed and there is still a chance
935 to mmap(), so try one of the other arenas. */
937 arena_get_retry (mstate ar_ptr
, size_t bytes
)
939 LIBC_PROBE (memory_arena_retry
, 2, bytes
, ar_ptr
);
940 if(ar_ptr
!= &main_arena
) {
941 (void)mutex_unlock(&ar_ptr
->mutex
);
942 ar_ptr
= &main_arena
;
943 (void)mutex_lock(&ar_ptr
->mutex
);
945 /* Grab ar_ptr->next prior to releasing its lock. */
946 mstate prev
= ar_ptr
->next
? ar_ptr
: 0;
947 (void)mutex_unlock(&ar_ptr
->mutex
);
948 ar_ptr
= arena_get2(prev
, bytes
, ar_ptr
);
955 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
956 arena_thread_freeres (void)
959 mstate a
= tsd_getspecific(arena_key
, vptr
);
960 tsd_setspecific(arena_key
, NULL
);
964 (void)mutex_lock(&list_lock
);
965 a
->next_free
= free_list
;
967 (void)mutex_unlock(&list_lock
);
970 text_set_element (__libc_thread_subfreeres
, arena_thread_freeres
);