1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <https://www.gnu.org/licenses/>. */
20 #include <setvmaname.h>
22 #define TUNABLE_NAMESPACE malloc
23 #include <elf/dl-tunables.h>
25 /* Compile-time constants. */
27 #define HEAP_MIN_SIZE (32 * 1024)
29 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
30 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
32 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
36 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
37 that are dynamically created for multi-threaded programs. The
38 maximum size must be a power of two, for fast determination of
39 which heap belongs to a chunk. It should be much larger than the
40 mmap threshold, so that requests with a size just below that
41 threshold can be fulfilled without creating too many heaps. */
43 /* When huge pages are used to create new arenas, the maximum and minimum
44 size are based on the runtime defined huge page size. */
49 return mp_
.hp_pagesize
== 0 ? HEAP_MIN_SIZE
: mp_
.hp_pagesize
;
55 return mp_
.hp_pagesize
== 0 ? HEAP_MAX_SIZE
: mp_
.hp_pagesize
* 4;
58 /***************************************************************************/
60 #define top(ar_ptr) ((ar_ptr)->top)
62 /* A heap is a single contiguous memory region holding (coalesceable)
63 malloc_chunks. It is allocated with mmap() and always starts at an
64 address aligned to HEAP_MAX_SIZE. */
66 typedef struct _heap_info
68 mstate ar_ptr
; /* Arena for this heap. */
69 struct _heap_info
*prev
; /* Previous heap. */
70 size_t size
; /* Current size in bytes. */
71 size_t mprotect_size
; /* Size in bytes that has been mprotected
72 PROT_READ|PROT_WRITE. */
73 size_t pagesize
; /* Page size used when allocating the arena. */
74 /* Make sure the following data is properly aligned, particularly
75 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
77 char pad
[-3 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
80 /* Get a compile-time error if the heap_info padding is not correct
81 to make alignment work as expected in sYSMALLOc. */
82 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
83 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
86 /* Thread specific data. */
88 static __thread mstate thread_arena attribute_tls_model_ie
;
90 /* Arena free list. free_list_lock synchronizes access to the
91 free_list variable below, and the next_free and attached_threads
92 members of struct malloc_state objects. No other locks must be
93 acquired after free_list_lock has been acquired. */
95 __libc_lock_define_initialized (static, free_list_lock
);
97 static size_t narenas
= 1;
99 static mstate free_list
;
101 /* list_lock prevents concurrent writes to the next member of struct
102 malloc_state objects.
104 Read access to the next member is supposed to synchronize with the
105 atomic_write_barrier and the write to the next member in
106 _int_new_arena. This suffers from data races; see the FIXME
107 comments in _int_new_arena and reused_arena.
109 list_lock also prevents concurrent forks. At the time list_lock is
110 acquired, no arena lock must have been acquired, but it is
111 permitted to acquire arena locks subsequently, while list_lock is
113 __libc_lock_define_initialized (static, list_lock
);
115 /* Already initialized? */
116 static bool __malloc_initialized
= false;
118 /**************************************************************************/
121 /* arena_get() acquires an arena and locks the corresponding mutex.
122 First, try the one last locked successfully by this thread. (This
123 is the common case and handled with a macro for speed.) Then, loop
124 once over the circularly linked list of arenas. If no arena is
125 readily available, create a new one. In this latter case, `size'
126 is just a hint as to how much memory will be required immediately
129 #define arena_get(ptr, size) do { \
130 ptr = thread_arena; \
131 arena_lock (ptr, size); \
134 #define arena_lock(ptr, size) do { \
136 __libc_lock_lock (ptr->mutex); \
138 ptr = arena_get2 ((size), NULL); \
141 /* find the heap and corresponding arena for a given ptr */
143 static inline heap_info
*
144 heap_for_ptr (void *ptr
)
146 size_t max_size
= heap_max_size ();
147 return PTR_ALIGN_DOWN (ptr
, max_size
);
150 static inline struct malloc_state
*
151 arena_for_chunk (mchunkptr ptr
)
153 return chunk_main_arena (ptr
) ? &main_arena
: heap_for_ptr (ptr
)->ar_ptr
;
157 /**************************************************************************/
159 /* atfork support. */
161 /* The following three functions are called around fork from a
162 multi-threaded process. We do not use the general fork handler
163 mechanism to make sure that our handlers are the last ones being
164 called, so that other fork handlers can use the malloc
168 __malloc_fork_lock_parent (void)
170 if (!__malloc_initialized
)
173 /* We do not acquire free_list_lock here because we completely
174 reconstruct free_list in __malloc_fork_unlock_child. */
176 __libc_lock_lock (list_lock
);
178 for (mstate ar_ptr
= &main_arena
;; )
180 __libc_lock_lock (ar_ptr
->mutex
);
181 ar_ptr
= ar_ptr
->next
;
182 if (ar_ptr
== &main_arena
)
188 __malloc_fork_unlock_parent (void)
190 if (!__malloc_initialized
)
193 for (mstate ar_ptr
= &main_arena
;; )
195 __libc_lock_unlock (ar_ptr
->mutex
);
196 ar_ptr
= ar_ptr
->next
;
197 if (ar_ptr
== &main_arena
)
200 __libc_lock_unlock (list_lock
);
204 __malloc_fork_unlock_child (void)
206 if (!__malloc_initialized
)
209 /* Push all arenas to the free list, except thread_arena, which is
210 attached to the current thread. */
211 __libc_lock_init (free_list_lock
);
212 if (thread_arena
!= NULL
)
213 thread_arena
->attached_threads
= 1;
215 for (mstate ar_ptr
= &main_arena
;; )
217 __libc_lock_init (ar_ptr
->mutex
);
218 if (ar_ptr
!= thread_arena
)
220 /* This arena is no longer attached to any thread. */
221 ar_ptr
->attached_threads
= 0;
222 ar_ptr
->next_free
= free_list
;
225 ar_ptr
= ar_ptr
->next
;
226 if (ar_ptr
== &main_arena
)
230 __libc_lock_init (list_lock
);
233 #define TUNABLE_CALLBACK_FNDECL(__name, __type) \
234 static inline int do_ ## __name (__type value); \
236 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
238 __type value = (__type) (valp)->numval; \
239 do_ ## __name (value); \
242 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold
, size_t)
243 TUNABLE_CALLBACK_FNDECL (set_mmaps_max
, int32_t)
244 TUNABLE_CALLBACK_FNDECL (set_top_pad
, size_t)
245 TUNABLE_CALLBACK_FNDECL (set_perturb_byte
, int32_t)
246 TUNABLE_CALLBACK_FNDECL (set_trim_threshold
, size_t)
247 TUNABLE_CALLBACK_FNDECL (set_arena_max
, size_t)
248 TUNABLE_CALLBACK_FNDECL (set_arena_test
, size_t)
250 TUNABLE_CALLBACK_FNDECL (set_tcache_max
, size_t)
251 TUNABLE_CALLBACK_FNDECL (set_tcache_count
, size_t)
252 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit
, size_t)
254 TUNABLE_CALLBACK_FNDECL (set_mxfast
, size_t)
255 TUNABLE_CALLBACK_FNDECL (set_hugetlb
, size_t)
258 static void tcache_key_initialize (void);
264 if (__malloc_initialized
)
267 __malloc_initialized
= true;
270 tcache_key_initialize ();
274 if ((TUNABLE_GET_FULL (glibc
, mem
, tagging
, int32_t, NULL
) & 1) != 0)
276 /* If the tunable says that we should be using tagged memory
277 and that morecore does not support tagged regions, then
279 if (__MTAG_SBRK_UNTAGGED
)
280 __always_fail_morecore
= true;
283 mtag_mmap_flags
= __MTAG_MMAP_FLAGS
;
287 #if defined SHARED && IS_IN (libc)
288 /* In case this libc copy is in a non-default namespace, never use
289 brk. Likewise if dlopened from statically linked program. The
290 generic sbrk implementation also enforces this, but it is not
293 __always_fail_morecore
= true;
296 thread_arena
= &main_arena
;
298 malloc_init_state (&main_arena
);
300 TUNABLE_GET (top_pad
, size_t, TUNABLE_CALLBACK (set_top_pad
));
301 TUNABLE_GET (perturb
, int32_t, TUNABLE_CALLBACK (set_perturb_byte
));
302 TUNABLE_GET (mmap_threshold
, size_t, TUNABLE_CALLBACK (set_mmap_threshold
));
303 TUNABLE_GET (trim_threshold
, size_t, TUNABLE_CALLBACK (set_trim_threshold
));
304 TUNABLE_GET (mmap_max
, int32_t, TUNABLE_CALLBACK (set_mmaps_max
));
305 TUNABLE_GET (arena_max
, size_t, TUNABLE_CALLBACK (set_arena_max
));
306 TUNABLE_GET (arena_test
, size_t, TUNABLE_CALLBACK (set_arena_test
));
308 TUNABLE_GET (tcache_max
, size_t, TUNABLE_CALLBACK (set_tcache_max
));
309 TUNABLE_GET (tcache_count
, size_t, TUNABLE_CALLBACK (set_tcache_count
));
310 TUNABLE_GET (tcache_unsorted_limit
, size_t,
311 TUNABLE_CALLBACK (set_tcache_unsorted_limit
));
313 TUNABLE_GET (mxfast
, size_t, TUNABLE_CALLBACK (set_mxfast
));
314 TUNABLE_GET (hugetlb
, size_t, TUNABLE_CALLBACK (set_hugetlb
));
315 if (mp_
.hp_pagesize
> 0)
316 /* Force mmap for main arena instead of sbrk, so hugepages are explicitly
318 __always_fail_morecore
= true;
321 /* Managing heaps and arenas (for concurrent threads) */
325 /* Print the complete contents of a single heap to stderr. */
328 dump_heap (heap_info
*heap
)
333 fprintf (stderr
, "Heap %p, size %10lx:\n", heap
, (long) heap
->size
);
334 ptr
= (heap
->ar_ptr
!= (mstate
) (heap
+ 1)) ?
335 (char *) (heap
+ 1) : (char *) (heap
+ 1) + sizeof (struct malloc_state
);
336 p
= (mchunkptr
) (((uintptr_t) ptr
+ MALLOC_ALIGN_MASK
) &
340 fprintf (stderr
, "chunk %p size %10lx", p
, (long) chunksize_nomask(p
));
341 if (p
== top (heap
->ar_ptr
))
343 fprintf (stderr
, " (top)\n");
346 else if (chunksize_nomask(p
) == (0 | PREV_INUSE
))
348 fprintf (stderr
, " (fence)\n");
351 fprintf (stderr
, "\n");
355 #endif /* MALLOC_DEBUG > 1 */
357 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
358 addresses as opposed to increasing, new_heap would badly fragment the
359 address space. In that case remember the second HEAP_MAX_SIZE part
360 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
361 call (if it is already aligned) and try to reuse it next time. We need
362 no locking for it, as kernel ensures the atomicity for us - worst case
363 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
364 multiple threads, but only one will succeed. */
365 static char *aligned_heap_area
;
367 /* Create a new heap. size is automatically rounded up to a multiple
371 alloc_new_heap (size_t size
, size_t top_pad
, size_t pagesize
,
377 size_t min_size
= heap_min_size ();
378 size_t max_size
= heap_max_size ();
380 if (size
+ top_pad
< min_size
)
382 else if (size
+ top_pad
<= max_size
)
384 else if (size
> max_size
)
388 size
= ALIGN_UP (size
, pagesize
);
390 /* A memory region aligned to a multiple of max_size is needed.
391 No swap space needs to be reserved for the following large
392 mapping (on Linux, this is the case for all non-writable mappings
395 if (aligned_heap_area
)
397 p2
= (char *) MMAP (aligned_heap_area
, max_size
, PROT_NONE
, mmap_flags
);
398 aligned_heap_area
= NULL
;
399 if (p2
!= MAP_FAILED
&& ((unsigned long) p2
& (max_size
- 1)))
401 __munmap (p2
, max_size
);
405 if (p2
== MAP_FAILED
)
407 p1
= (char *) MMAP (0, max_size
<< 1, PROT_NONE
, mmap_flags
);
408 if (p1
!= MAP_FAILED
)
410 p2
= (char *) (((uintptr_t) p1
+ (max_size
- 1))
416 aligned_heap_area
= p2
+ max_size
;
417 __munmap (p2
+ max_size
, max_size
- ul
);
421 /* Try to take the chance that an allocation of only max_size
422 is already aligned. */
423 p2
= (char *) MMAP (0, max_size
, PROT_NONE
, mmap_flags
);
424 if (p2
== MAP_FAILED
)
427 if ((unsigned long) p2
& (max_size
- 1))
429 __munmap (p2
, max_size
);
434 if (__mprotect (p2
, size
, mtag_mmap_flags
| PROT_READ
| PROT_WRITE
) != 0)
436 __munmap (p2
, max_size
);
440 /* Only considere the actual usable range. */
441 __set_vma_name (p2
, size
, " glibc: malloc arena");
443 madvise_thp (p2
, size
);
445 h
= (heap_info
*) p2
;
447 h
->mprotect_size
= size
;
448 h
->pagesize
= pagesize
;
449 LIBC_PROBE (memory_heap_new
, 2, h
, h
->size
);
454 new_heap (size_t size
, size_t top_pad
)
456 if (__glibc_unlikely (mp_
.hp_pagesize
!= 0))
458 heap_info
*h
= alloc_new_heap (size
, top_pad
, mp_
.hp_pagesize
,
463 return alloc_new_heap (size
, top_pad
, GLRO (dl_pagesize
), 0);
466 /* Grow a heap. size is automatically rounded up to a
467 multiple of the page size. */
470 grow_heap (heap_info
*h
, long diff
)
472 size_t pagesize
= h
->pagesize
;
473 size_t max_size
= heap_max_size ();
476 diff
= ALIGN_UP (diff
, pagesize
);
477 new_size
= (long) h
->size
+ diff
;
478 if ((unsigned long) new_size
> (unsigned long) max_size
)
481 if ((unsigned long) new_size
> h
->mprotect_size
)
483 if (__mprotect ((char *) h
+ h
->mprotect_size
,
484 (unsigned long) new_size
- h
->mprotect_size
,
485 mtag_mmap_flags
| PROT_READ
| PROT_WRITE
) != 0)
488 h
->mprotect_size
= new_size
;
492 LIBC_PROBE (memory_heap_more
, 2, h
, h
->size
);
499 shrink_heap (heap_info
*h
, long diff
)
503 new_size
= (long) h
->size
- diff
;
504 if (new_size
< (long) sizeof (*h
))
507 /* Try to re-map the extra heap space freshly to save memory, and make it
508 inaccessible. See malloc-sysdep.h to know when this is true. */
509 if (__glibc_unlikely (check_may_shrink_heap ()))
511 if ((char *) MMAP ((char *) h
+ new_size
, diff
, PROT_NONE
,
512 MAP_FIXED
) == (char *) MAP_FAILED
)
515 h
->mprotect_size
= new_size
;
518 __madvise ((char *) h
+ new_size
, diff
, MADV_DONTNEED
);
519 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
522 LIBC_PROBE (memory_heap_less
, 2, h
, h
->size
);
529 heap_trim (heap_info
*heap
, size_t pad
)
531 mstate ar_ptr
= heap
->ar_ptr
;
532 mchunkptr top_chunk
= top (ar_ptr
), p
;
533 heap_info
*prev_heap
;
534 long new_size
, top_size
, top_area
, extra
, prev_size
, misalign
;
535 size_t max_size
= heap_max_size ();
537 /* Can this heap go away completely? */
538 while (top_chunk
== chunk_at_offset (heap
, sizeof (*heap
)))
540 prev_heap
= heap
->prev
;
541 prev_size
= prev_heap
->size
- (MINSIZE
- 2 * SIZE_SZ
);
542 p
= chunk_at_offset (prev_heap
, prev_size
);
543 /* fencepost must be properly aligned. */
544 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
545 p
= chunk_at_offset (prev_heap
, prev_size
- misalign
);
546 assert (chunksize_nomask (p
) == (0 | PREV_INUSE
)); /* must be fencepost */
548 new_size
= chunksize (p
) + (MINSIZE
- 2 * SIZE_SZ
) + misalign
;
549 assert (new_size
> 0 && new_size
< (long) (2 * MINSIZE
));
551 new_size
+= prev_size (p
);
552 assert (new_size
> 0 && new_size
< max_size
);
553 if (new_size
+ (max_size
- prev_heap
->size
) < pad
+ MINSIZE
556 ar_ptr
->system_mem
-= heap
->size
;
557 LIBC_PROBE (memory_heap_free
, 2, heap
, heap
->size
);
558 if ((char *) heap
+ max_size
== aligned_heap_area
)
559 aligned_heap_area
= NULL
;
560 __munmap (heap
, max_size
);
562 if (!prev_inuse (p
)) /* consolidate backward */
565 unlink_chunk (ar_ptr
, p
);
567 assert (((unsigned long) ((char *) p
+ new_size
) & (heap
->pagesize
- 1))
569 assert (((char *) p
+ new_size
) == ((char *) heap
+ heap
->size
));
570 top (ar_ptr
) = top_chunk
= p
;
571 set_head (top_chunk
, new_size
| PREV_INUSE
);
572 /*check_chunk(ar_ptr, top_chunk);*/
575 /* Uses similar logic for per-thread arenas as the main arena with systrim
576 and _int_free by preserving the top pad and rounding down to the nearest
578 top_size
= chunksize (top_chunk
);
579 if ((unsigned long)(top_size
) <
580 (unsigned long)(mp_
.trim_threshold
))
583 top_area
= top_size
- MINSIZE
- 1;
584 if (top_area
< 0 || (size_t) top_area
<= pad
)
587 /* Release in pagesize units and round down to the nearest page. */
588 extra
= ALIGN_DOWN(top_area
- pad
, heap
->pagesize
);
593 if (shrink_heap (heap
, extra
) != 0)
596 ar_ptr
->system_mem
-= extra
;
598 /* Success. Adjust top accordingly. */
599 set_head (top_chunk
, (top_size
- extra
) | PREV_INUSE
);
600 /*check_chunk(ar_ptr, top_chunk);*/
604 /* Create a new arena with initial size "size". */
607 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
608 called while free_list_lock is held. */
610 detach_arena (mstate replaced_arena
)
612 if (replaced_arena
!= NULL
)
614 assert (replaced_arena
->attached_threads
> 0);
615 /* The current implementation only detaches from main_arena in
616 case of allocation failure. This means that it is likely not
617 beneficial to put the arena on free_list even if the
618 reference count reaches zero. */
619 --replaced_arena
->attached_threads
;
624 _int_new_arena (size_t size
)
629 unsigned long misalign
;
631 h
= new_heap (size
+ (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
),
635 /* Maybe size is too large to fit in a single heap. So, just try
636 to create a minimally-sized arena and let _int_malloc() attempt
637 to deal with the large request via mmap_chunk(). */
638 h
= new_heap (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
642 a
= h
->ar_ptr
= (mstate
) (h
+ 1);
643 malloc_init_state (a
);
644 a
->attached_threads
= 1;
646 a
->system_mem
= a
->max_system_mem
= h
->size
;
648 /* Set up the top chunk, with proper alignment. */
649 ptr
= (char *) (a
+ 1);
650 misalign
= (uintptr_t) chunk2mem (ptr
) & MALLOC_ALIGN_MASK
;
652 ptr
+= MALLOC_ALIGNMENT
- misalign
;
653 top (a
) = (mchunkptr
) ptr
;
654 set_head (top (a
), (((char *) h
+ h
->size
) - ptr
) | PREV_INUSE
);
656 LIBC_PROBE (memory_arena_new
, 2, a
, size
);
657 mstate replaced_arena
= thread_arena
;
659 __libc_lock_init (a
->mutex
);
661 __libc_lock_lock (list_lock
);
663 /* Add the new arena to the global list. */
664 a
->next
= main_arena
.next
;
665 /* FIXME: The barrier is an attempt to synchronize with read access
666 in reused_arena, which does not acquire list_lock while
667 traversing the list. */
668 atomic_write_barrier ();
671 __libc_lock_unlock (list_lock
);
673 __libc_lock_lock (free_list_lock
);
674 detach_arena (replaced_arena
);
675 __libc_lock_unlock (free_list_lock
);
677 /* Lock this arena. NB: Another thread may have been attached to
678 this arena because the arena is now accessible from the
679 main_arena.next list and could have been picked by reused_arena.
680 This can only happen for the last arena created (before the arena
681 limit is reached). At this point, some arena has to be attached
682 to two threads. We could acquire the arena lock before list_lock
683 to make it less likely that reused_arena picks this new arena,
684 but this could result in a deadlock with
685 __malloc_fork_lock_parent. */
687 __libc_lock_lock (a
->mutex
);
693 /* Remove an arena from free_list. */
697 mstate replaced_arena
= thread_arena
;
698 mstate result
= free_list
;
701 __libc_lock_lock (free_list_lock
);
705 free_list
= result
->next_free
;
707 /* The arena will be attached to this thread. */
708 assert (result
->attached_threads
== 0);
709 result
->attached_threads
= 1;
711 detach_arena (replaced_arena
);
713 __libc_lock_unlock (free_list_lock
);
717 LIBC_PROBE (memory_arena_reuse_free_list
, 1, result
);
718 __libc_lock_lock (result
->mutex
);
719 thread_arena
= result
;
726 /* Remove the arena from the free list (if it is present).
727 free_list_lock must have been acquired by the caller. */
729 remove_from_free_list (mstate arena
)
731 mstate
*previous
= &free_list
;
732 for (mstate p
= free_list
; p
!= NULL
; p
= p
->next_free
)
734 assert (p
->attached_threads
== 0);
737 /* Remove the requested arena from the list. */
738 *previous
= p
->next_free
;
742 previous
= &p
->next_free
;
746 /* Lock and return an arena that can be reused for memory allocation.
747 Avoid AVOID_ARENA as we have already failed to allocate memory in
748 it and it is currently locked. */
750 reused_arena (mstate avoid_arena
)
753 /* FIXME: Access to next_to_use suffers from data races. */
754 static mstate next_to_use
;
755 if (next_to_use
== NULL
)
756 next_to_use
= &main_arena
;
758 /* Iterate over all arenas (including those linked from
760 result
= next_to_use
;
763 if (!__libc_lock_trylock (result
->mutex
))
766 /* FIXME: This is a data race, see _int_new_arena. */
767 result
= result
->next
;
769 while (result
!= next_to_use
);
771 /* Avoid AVOID_ARENA as we have already failed to allocate memory
772 in that arena and it is currently locked. */
773 if (result
== avoid_arena
)
774 result
= result
->next
;
776 /* No arena available without contention. Wait for the next in line. */
777 LIBC_PROBE (memory_arena_reuse_wait
, 3, &result
->mutex
, result
, avoid_arena
);
778 __libc_lock_lock (result
->mutex
);
781 /* Attach the arena to the current thread. */
783 /* Update the arena thread attachment counters. */
784 mstate replaced_arena
= thread_arena
;
785 __libc_lock_lock (free_list_lock
);
786 detach_arena (replaced_arena
);
788 /* We may have picked up an arena on the free list. We need to
789 preserve the invariant that no arena on the free list has a
790 positive attached_threads counter (otherwise,
791 arena_thread_freeres cannot use the counter to determine if the
792 arena needs to be put on the free list). We unconditionally
793 remove the selected arena from the free list. The caller of
794 reused_arena checked the free list and observed it to be empty,
795 so the list is very short. */
796 remove_from_free_list (result
);
798 ++result
->attached_threads
;
800 __libc_lock_unlock (free_list_lock
);
803 LIBC_PROBE (memory_arena_reuse
, 2, result
, avoid_arena
);
804 thread_arena
= result
;
805 next_to_use
= result
->next
;
811 arena_get2 (size_t size
, mstate avoid_arena
)
815 static size_t narenas_limit
;
817 a
= get_free_list ();
820 /* Nothing immediately available, so generate a new arena. */
821 if (narenas_limit
== 0)
823 if (mp_
.arena_max
!= 0)
824 narenas_limit
= mp_
.arena_max
;
825 else if (narenas
> mp_
.arena_test
)
827 int n
= __get_nprocs_sched ();
830 narenas_limit
= NARENAS_FROM_NCORES (n
);
832 /* We have no information about the system. Assume two
834 narenas_limit
= NARENAS_FROM_NCORES (2);
839 /* NB: the following depends on the fact that (size_t)0 - 1 is a
840 very large number and that the underflow is OK. If arena_max
841 is set the value of arena_test is irrelevant. If arena_test
842 is set but narenas is not yet larger or equal to arena_test
843 narenas_limit is 0. There is no possibility for narenas to
844 be too big for the test to always fail since there is not
845 enough address space to create that many arenas. */
846 if (__glibc_unlikely (n
<= narenas_limit
- 1))
848 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
850 a
= _int_new_arena (size
);
851 if (__glibc_unlikely (a
== NULL
))
852 catomic_decrement (&narenas
);
855 a
= reused_arena (avoid_arena
);
860 /* If we don't have the main arena, then maybe the failure is due to running
861 out of mmapped areas, so we can try allocating on the main arena.
862 Otherwise, it is likely that sbrk() has failed and there is still a chance
863 to mmap(), so try one of the other arenas. */
865 arena_get_retry (mstate ar_ptr
, size_t bytes
)
867 LIBC_PROBE (memory_arena_retry
, 2, bytes
, ar_ptr
);
868 if (ar_ptr
!= &main_arena
)
870 __libc_lock_unlock (ar_ptr
->mutex
);
871 ar_ptr
= &main_arena
;
872 __libc_lock_lock (ar_ptr
->mutex
);
876 __libc_lock_unlock (ar_ptr
->mutex
);
877 ar_ptr
= arena_get2 (bytes
, ar_ptr
);
885 __malloc_arena_thread_freeres (void)
887 /* Shut down the thread cache first. This could deallocate data for
888 the thread arena, so do this before we put the arena on the free
890 tcache_thread_shutdown ();
892 mstate a
= thread_arena
;
897 __libc_lock_lock (free_list_lock
);
898 /* If this was the last attached thread for this arena, put the
899 arena on the free list. */
900 assert (a
->attached_threads
> 0);
901 if (--a
->attached_threads
== 0)
903 a
->next_free
= free_list
;
906 __libc_lock_unlock (free_list_lock
);