stdlib: Remove use of mergesort on qsort (BZ 21719)
[glibc.git] / malloc / arena.c
blob6f03955ff24e1bb3df0ee71c96606ecea2ce744a
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <https://www.gnu.org/licenses/>. */
19 #include <stdbool.h>
21 #define TUNABLE_NAMESPACE malloc
22 #include <elf/dl-tunables.h>
24 /* Compile-time constants. */
26 #define HEAP_MIN_SIZE (32 * 1024)
27 #ifndef HEAP_MAX_SIZE
28 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
29 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
30 # else
31 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
32 # endif
33 #endif
35 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
36 that are dynamically created for multi-threaded programs. The
37 maximum size must be a power of two, for fast determination of
38 which heap belongs to a chunk. It should be much larger than the
39 mmap threshold, so that requests with a size just below that
40 threshold can be fulfilled without creating too many heaps. */
42 /* When huge pages are used to create new arenas, the maximum and minimum
43 size are based on the runtime defined huge page size. */
45 static inline size_t
46 heap_min_size (void)
48 return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize;
51 static inline size_t
52 heap_max_size (void)
54 return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4;
57 /***************************************************************************/
59 #define top(ar_ptr) ((ar_ptr)->top)
61 /* A heap is a single contiguous memory region holding (coalesceable)
62 malloc_chunks. It is allocated with mmap() and always starts at an
63 address aligned to HEAP_MAX_SIZE. */
65 typedef struct _heap_info
67 mstate ar_ptr; /* Arena for this heap. */
68 struct _heap_info *prev; /* Previous heap. */
69 size_t size; /* Current size in bytes. */
70 size_t mprotect_size; /* Size in bytes that has been mprotected
71 PROT_READ|PROT_WRITE. */
72 size_t pagesize; /* Page size used when allocating the arena. */
73 /* Make sure the following data is properly aligned, particularly
74 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
75 MALLOC_ALIGNMENT. */
76 char pad[-3 * SIZE_SZ & MALLOC_ALIGN_MASK];
77 } heap_info;
79 /* Get a compile-time error if the heap_info padding is not correct
80 to make alignment work as expected in sYSMALLOc. */
81 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
82 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
83 ? -1 : 1];
85 /* Thread specific data. */
87 static __thread mstate thread_arena attribute_tls_model_ie;
89 /* Arena free list. free_list_lock synchronizes access to the
90 free_list variable below, and the next_free and attached_threads
91 members of struct malloc_state objects. No other locks must be
92 acquired after free_list_lock has been acquired. */
94 __libc_lock_define_initialized (static, free_list_lock);
95 #if IS_IN (libc)
96 static size_t narenas = 1;
97 #endif
98 static mstate free_list;
100 /* list_lock prevents concurrent writes to the next member of struct
101 malloc_state objects.
103 Read access to the next member is supposed to synchronize with the
104 atomic_write_barrier and the write to the next member in
105 _int_new_arena. This suffers from data races; see the FIXME
106 comments in _int_new_arena and reused_arena.
108 list_lock also prevents concurrent forks. At the time list_lock is
109 acquired, no arena lock must have been acquired, but it is
110 permitted to acquire arena locks subsequently, while list_lock is
111 acquired. */
112 __libc_lock_define_initialized (static, list_lock);
114 /* Already initialized? */
115 static bool __malloc_initialized = false;
117 /**************************************************************************/
120 /* arena_get() acquires an arena and locks the corresponding mutex.
121 First, try the one last locked successfully by this thread. (This
122 is the common case and handled with a macro for speed.) Then, loop
123 once over the circularly linked list of arenas. If no arena is
124 readily available, create a new one. In this latter case, `size'
125 is just a hint as to how much memory will be required immediately
126 in the new arena. */
128 #define arena_get(ptr, size) do { \
129 ptr = thread_arena; \
130 arena_lock (ptr, size); \
131 } while (0)
133 #define arena_lock(ptr, size) do { \
134 if (ptr) \
135 __libc_lock_lock (ptr->mutex); \
136 else \
137 ptr = arena_get2 ((size), NULL); \
138 } while (0)
140 /* find the heap and corresponding arena for a given ptr */
142 static inline heap_info *
143 heap_for_ptr (void *ptr)
145 size_t max_size = heap_max_size ();
146 return PTR_ALIGN_DOWN (ptr, max_size);
149 static inline struct malloc_state *
150 arena_for_chunk (mchunkptr ptr)
152 return chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr;
156 /**************************************************************************/
158 /* atfork support. */
160 /* The following three functions are called around fork from a
161 multi-threaded process. We do not use the general fork handler
162 mechanism to make sure that our handlers are the last ones being
163 called, so that other fork handlers can use the malloc
164 subsystem. */
166 void
167 __malloc_fork_lock_parent (void)
169 if (!__malloc_initialized)
170 return;
172 /* We do not acquire free_list_lock here because we completely
173 reconstruct free_list in __malloc_fork_unlock_child. */
175 __libc_lock_lock (list_lock);
177 for (mstate ar_ptr = &main_arena;; )
179 __libc_lock_lock (ar_ptr->mutex);
180 ar_ptr = ar_ptr->next;
181 if (ar_ptr == &main_arena)
182 break;
186 void
187 __malloc_fork_unlock_parent (void)
189 if (!__malloc_initialized)
190 return;
192 for (mstate ar_ptr = &main_arena;; )
194 __libc_lock_unlock (ar_ptr->mutex);
195 ar_ptr = ar_ptr->next;
196 if (ar_ptr == &main_arena)
197 break;
199 __libc_lock_unlock (list_lock);
202 void
203 __malloc_fork_unlock_child (void)
205 if (!__malloc_initialized)
206 return;
208 /* Push all arenas to the free list, except thread_arena, which is
209 attached to the current thread. */
210 __libc_lock_init (free_list_lock);
211 if (thread_arena != NULL)
212 thread_arena->attached_threads = 1;
213 free_list = NULL;
214 for (mstate ar_ptr = &main_arena;; )
216 __libc_lock_init (ar_ptr->mutex);
217 if (ar_ptr != thread_arena)
219 /* This arena is no longer attached to any thread. */
220 ar_ptr->attached_threads = 0;
221 ar_ptr->next_free = free_list;
222 free_list = ar_ptr;
224 ar_ptr = ar_ptr->next;
225 if (ar_ptr == &main_arena)
226 break;
229 __libc_lock_init (list_lock);
232 #define TUNABLE_CALLBACK_FNDECL(__name, __type) \
233 static inline int do_ ## __name (__type value); \
234 static void \
235 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
237 __type value = (__type) (valp)->numval; \
238 do_ ## __name (value); \
241 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
242 TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
243 TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
244 TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
245 TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
246 TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
247 TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
248 #if USE_TCACHE
249 TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
250 TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
251 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
252 #endif
253 TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
254 TUNABLE_CALLBACK_FNDECL (set_hugetlb, size_t)
256 #if USE_TCACHE
257 static void tcache_key_initialize (void);
258 #endif
260 static void
261 ptmalloc_init (void)
263 if (__malloc_initialized)
264 return;
266 __malloc_initialized = true;
268 #if USE_TCACHE
269 tcache_key_initialize ();
270 #endif
272 #ifdef USE_MTAG
273 if ((TUNABLE_GET_FULL (glibc, mem, tagging, int32_t, NULL) & 1) != 0)
275 /* If the tunable says that we should be using tagged memory
276 and that morecore does not support tagged regions, then
277 disable it. */
278 if (__MTAG_SBRK_UNTAGGED)
279 __always_fail_morecore = true;
281 mtag_enabled = true;
282 mtag_mmap_flags = __MTAG_MMAP_FLAGS;
284 #endif
286 #if defined SHARED && IS_IN (libc)
287 /* In case this libc copy is in a non-default namespace, never use
288 brk. Likewise if dlopened from statically linked program. The
289 generic sbrk implementation also enforces this, but it is not
290 used on Hurd. */
291 if (!__libc_initial)
292 __always_fail_morecore = true;
293 #endif
295 thread_arena = &main_arena;
297 malloc_init_state (&main_arena);
299 TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
300 TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
301 TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
302 TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
303 TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
304 TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
305 TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
306 # if USE_TCACHE
307 TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
308 TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
309 TUNABLE_GET (tcache_unsorted_limit, size_t,
310 TUNABLE_CALLBACK (set_tcache_unsorted_limit));
311 # endif
312 TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
313 TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb));
314 if (mp_.hp_pagesize > 0)
315 /* Force mmap for main arena instead of sbrk, so hugepages are explicitly
316 used. */
317 __always_fail_morecore = true;
320 /* Managing heaps and arenas (for concurrent threads) */
322 #if MALLOC_DEBUG > 1
324 /* Print the complete contents of a single heap to stderr. */
326 static void
327 dump_heap (heap_info *heap)
329 char *ptr;
330 mchunkptr p;
332 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
333 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
334 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
335 p = (mchunkptr) (((uintptr_t) ptr + MALLOC_ALIGN_MASK) &
336 ~MALLOC_ALIGN_MASK);
337 for (;; )
339 fprintf (stderr, "chunk %p size %10lx", p, (long) chunksize_nomask(p));
340 if (p == top (heap->ar_ptr))
342 fprintf (stderr, " (top)\n");
343 break;
345 else if (chunksize_nomask(p) == (0 | PREV_INUSE))
347 fprintf (stderr, " (fence)\n");
348 break;
350 fprintf (stderr, "\n");
351 p = next_chunk (p);
354 #endif /* MALLOC_DEBUG > 1 */
356 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
357 addresses as opposed to increasing, new_heap would badly fragment the
358 address space. In that case remember the second HEAP_MAX_SIZE part
359 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
360 call (if it is already aligned) and try to reuse it next time. We need
361 no locking for it, as kernel ensures the atomicity for us - worst case
362 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
363 multiple threads, but only one will succeed. */
364 static char *aligned_heap_area;
366 /* Create a new heap. size is automatically rounded up to a multiple
367 of the page size. */
369 static heap_info *
370 alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
371 int mmap_flags)
373 char *p1, *p2;
374 unsigned long ul;
375 heap_info *h;
376 size_t min_size = heap_min_size ();
377 size_t max_size = heap_max_size ();
379 if (size + top_pad < min_size)
380 size = min_size;
381 else if (size + top_pad <= max_size)
382 size += top_pad;
383 else if (size > max_size)
384 return 0;
385 else
386 size = max_size;
387 size = ALIGN_UP (size, pagesize);
389 /* A memory region aligned to a multiple of max_size is needed.
390 No swap space needs to be reserved for the following large
391 mapping (on Linux, this is the case for all non-writable mappings
392 anyway). */
393 p2 = MAP_FAILED;
394 if (aligned_heap_area)
396 p2 = (char *) MMAP (aligned_heap_area, max_size, PROT_NONE, mmap_flags);
397 aligned_heap_area = NULL;
398 if (p2 != MAP_FAILED && ((unsigned long) p2 & (max_size - 1)))
400 __munmap (p2, max_size);
401 p2 = MAP_FAILED;
404 if (p2 == MAP_FAILED)
406 p1 = (char *) MMAP (0, max_size << 1, PROT_NONE, mmap_flags);
407 if (p1 != MAP_FAILED)
409 p2 = (char *) (((uintptr_t) p1 + (max_size - 1))
410 & ~(max_size - 1));
411 ul = p2 - p1;
412 if (ul)
413 __munmap (p1, ul);
414 else
415 aligned_heap_area = p2 + max_size;
416 __munmap (p2 + max_size, max_size - ul);
418 else
420 /* Try to take the chance that an allocation of only max_size
421 is already aligned. */
422 p2 = (char *) MMAP (0, max_size, PROT_NONE, mmap_flags);
423 if (p2 == MAP_FAILED)
424 return 0;
426 if ((unsigned long) p2 & (max_size - 1))
428 __munmap (p2, max_size);
429 return 0;
433 if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
435 __munmap (p2, max_size);
436 return 0;
439 madvise_thp (p2, size);
441 h = (heap_info *) p2;
442 h->size = size;
443 h->mprotect_size = size;
444 h->pagesize = pagesize;
445 LIBC_PROBE (memory_heap_new, 2, h, h->size);
446 return h;
449 static heap_info *
450 new_heap (size_t size, size_t top_pad)
452 if (__glibc_unlikely (mp_.hp_pagesize != 0))
454 heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
455 mp_.hp_flags);
456 if (h != NULL)
457 return h;
459 return alloc_new_heap (size, top_pad, GLRO (dl_pagesize), 0);
462 /* Grow a heap. size is automatically rounded up to a
463 multiple of the page size. */
465 static int
466 grow_heap (heap_info *h, long diff)
468 size_t pagesize = h->pagesize;
469 size_t max_size = heap_max_size ();
470 long new_size;
472 diff = ALIGN_UP (diff, pagesize);
473 new_size = (long) h->size + diff;
474 if ((unsigned long) new_size > (unsigned long) max_size)
475 return -1;
477 if ((unsigned long) new_size > h->mprotect_size)
479 if (__mprotect ((char *) h + h->mprotect_size,
480 (unsigned long) new_size - h->mprotect_size,
481 mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
482 return -2;
484 h->mprotect_size = new_size;
487 h->size = new_size;
488 LIBC_PROBE (memory_heap_more, 2, h, h->size);
489 return 0;
492 /* Shrink a heap. */
494 static int
495 shrink_heap (heap_info *h, long diff)
497 long new_size;
499 new_size = (long) h->size - diff;
500 if (new_size < (long) sizeof (*h))
501 return -1;
503 /* Try to re-map the extra heap space freshly to save memory, and make it
504 inaccessible. See malloc-sysdep.h to know when this is true. */
505 if (__glibc_unlikely (check_may_shrink_heap ()))
507 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
508 MAP_FIXED) == (char *) MAP_FAILED)
509 return -2;
511 h->mprotect_size = new_size;
513 else
514 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
515 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
517 h->size = new_size;
518 LIBC_PROBE (memory_heap_less, 2, h, h->size);
519 return 0;
522 /* Delete a heap. */
524 static int
525 heap_trim (heap_info *heap, size_t pad)
527 mstate ar_ptr = heap->ar_ptr;
528 mchunkptr top_chunk = top (ar_ptr), p;
529 heap_info *prev_heap;
530 long new_size, top_size, top_area, extra, prev_size, misalign;
531 size_t max_size = heap_max_size ();
533 /* Can this heap go away completely? */
534 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
536 prev_heap = heap->prev;
537 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
538 p = chunk_at_offset (prev_heap, prev_size);
539 /* fencepost must be properly aligned. */
540 misalign = ((long) p) & MALLOC_ALIGN_MASK;
541 p = chunk_at_offset (prev_heap, prev_size - misalign);
542 assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
543 p = prev_chunk (p);
544 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
545 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
546 if (!prev_inuse (p))
547 new_size += prev_size (p);
548 assert (new_size > 0 && new_size < max_size);
549 if (new_size + (max_size - prev_heap->size) < pad + MINSIZE
550 + heap->pagesize)
551 break;
552 ar_ptr->system_mem -= heap->size;
553 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
554 if ((char *) heap + max_size == aligned_heap_area)
555 aligned_heap_area = NULL;
556 __munmap (heap, max_size);
557 heap = prev_heap;
558 if (!prev_inuse (p)) /* consolidate backward */
560 p = prev_chunk (p);
561 unlink_chunk (ar_ptr, p);
563 assert (((unsigned long) ((char *) p + new_size) & (heap->pagesize - 1))
564 == 0);
565 assert (((char *) p + new_size) == ((char *) heap + heap->size));
566 top (ar_ptr) = top_chunk = p;
567 set_head (top_chunk, new_size | PREV_INUSE);
568 /*check_chunk(ar_ptr, top_chunk);*/
571 /* Uses similar logic for per-thread arenas as the main arena with systrim
572 and _int_free by preserving the top pad and rounding down to the nearest
573 page. */
574 top_size = chunksize (top_chunk);
575 if ((unsigned long)(top_size) <
576 (unsigned long)(mp_.trim_threshold))
577 return 0;
579 top_area = top_size - MINSIZE - 1;
580 if (top_area < 0 || (size_t) top_area <= pad)
581 return 0;
583 /* Release in pagesize units and round down to the nearest page. */
584 extra = ALIGN_DOWN(top_area - pad, heap->pagesize);
585 if (extra == 0)
586 return 0;
588 /* Try to shrink. */
589 if (shrink_heap (heap, extra) != 0)
590 return 0;
592 ar_ptr->system_mem -= extra;
594 /* Success. Adjust top accordingly. */
595 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
596 /*check_chunk(ar_ptr, top_chunk);*/
597 return 1;
600 /* Create a new arena with initial size "size". */
602 #if IS_IN (libc)
603 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
604 called while free_list_lock is held. */
605 static void
606 detach_arena (mstate replaced_arena)
608 if (replaced_arena != NULL)
610 assert (replaced_arena->attached_threads > 0);
611 /* The current implementation only detaches from main_arena in
612 case of allocation failure. This means that it is likely not
613 beneficial to put the arena on free_list even if the
614 reference count reaches zero. */
615 --replaced_arena->attached_threads;
619 static mstate
620 _int_new_arena (size_t size)
622 mstate a;
623 heap_info *h;
624 char *ptr;
625 unsigned long misalign;
627 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
628 mp_.top_pad);
629 if (!h)
631 /* Maybe size is too large to fit in a single heap. So, just try
632 to create a minimally-sized arena and let _int_malloc() attempt
633 to deal with the large request via mmap_chunk(). */
634 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
635 if (!h)
636 return 0;
638 a = h->ar_ptr = (mstate) (h + 1);
639 malloc_init_state (a);
640 a->attached_threads = 1;
641 /*a->next = NULL;*/
642 a->system_mem = a->max_system_mem = h->size;
644 /* Set up the top chunk, with proper alignment. */
645 ptr = (char *) (a + 1);
646 misalign = (uintptr_t) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
647 if (misalign > 0)
648 ptr += MALLOC_ALIGNMENT - misalign;
649 top (a) = (mchunkptr) ptr;
650 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
652 LIBC_PROBE (memory_arena_new, 2, a, size);
653 mstate replaced_arena = thread_arena;
654 thread_arena = a;
655 __libc_lock_init (a->mutex);
657 __libc_lock_lock (list_lock);
659 /* Add the new arena to the global list. */
660 a->next = main_arena.next;
661 /* FIXME: The barrier is an attempt to synchronize with read access
662 in reused_arena, which does not acquire list_lock while
663 traversing the list. */
664 atomic_write_barrier ();
665 main_arena.next = a;
667 __libc_lock_unlock (list_lock);
669 __libc_lock_lock (free_list_lock);
670 detach_arena (replaced_arena);
671 __libc_lock_unlock (free_list_lock);
673 /* Lock this arena. NB: Another thread may have been attached to
674 this arena because the arena is now accessible from the
675 main_arena.next list and could have been picked by reused_arena.
676 This can only happen for the last arena created (before the arena
677 limit is reached). At this point, some arena has to be attached
678 to two threads. We could acquire the arena lock before list_lock
679 to make it less likely that reused_arena picks this new arena,
680 but this could result in a deadlock with
681 __malloc_fork_lock_parent. */
683 __libc_lock_lock (a->mutex);
685 return a;
689 /* Remove an arena from free_list. */
690 static mstate
691 get_free_list (void)
693 mstate replaced_arena = thread_arena;
694 mstate result = free_list;
695 if (result != NULL)
697 __libc_lock_lock (free_list_lock);
698 result = free_list;
699 if (result != NULL)
701 free_list = result->next_free;
703 /* The arena will be attached to this thread. */
704 assert (result->attached_threads == 0);
705 result->attached_threads = 1;
707 detach_arena (replaced_arena);
709 __libc_lock_unlock (free_list_lock);
711 if (result != NULL)
713 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
714 __libc_lock_lock (result->mutex);
715 thread_arena = result;
719 return result;
722 /* Remove the arena from the free list (if it is present).
723 free_list_lock must have been acquired by the caller. */
724 static void
725 remove_from_free_list (mstate arena)
727 mstate *previous = &free_list;
728 for (mstate p = free_list; p != NULL; p = p->next_free)
730 assert (p->attached_threads == 0);
731 if (p == arena)
733 /* Remove the requested arena from the list. */
734 *previous = p->next_free;
735 break;
737 else
738 previous = &p->next_free;
742 /* Lock and return an arena that can be reused for memory allocation.
743 Avoid AVOID_ARENA as we have already failed to allocate memory in
744 it and it is currently locked. */
745 static mstate
746 reused_arena (mstate avoid_arena)
748 mstate result;
749 /* FIXME: Access to next_to_use suffers from data races. */
750 static mstate next_to_use;
751 if (next_to_use == NULL)
752 next_to_use = &main_arena;
754 /* Iterate over all arenas (including those linked from
755 free_list). */
756 result = next_to_use;
759 if (!__libc_lock_trylock (result->mutex))
760 goto out;
762 /* FIXME: This is a data race, see _int_new_arena. */
763 result = result->next;
765 while (result != next_to_use);
767 /* Avoid AVOID_ARENA as we have already failed to allocate memory
768 in that arena and it is currently locked. */
769 if (result == avoid_arena)
770 result = result->next;
772 /* No arena available without contention. Wait for the next in line. */
773 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
774 __libc_lock_lock (result->mutex);
776 out:
777 /* Attach the arena to the current thread. */
779 /* Update the arena thread attachment counters. */
780 mstate replaced_arena = thread_arena;
781 __libc_lock_lock (free_list_lock);
782 detach_arena (replaced_arena);
784 /* We may have picked up an arena on the free list. We need to
785 preserve the invariant that no arena on the free list has a
786 positive attached_threads counter (otherwise,
787 arena_thread_freeres cannot use the counter to determine if the
788 arena needs to be put on the free list). We unconditionally
789 remove the selected arena from the free list. The caller of
790 reused_arena checked the free list and observed it to be empty,
791 so the list is very short. */
792 remove_from_free_list (result);
794 ++result->attached_threads;
796 __libc_lock_unlock (free_list_lock);
799 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
800 thread_arena = result;
801 next_to_use = result->next;
803 return result;
806 static mstate
807 arena_get2 (size_t size, mstate avoid_arena)
809 mstate a;
811 static size_t narenas_limit;
813 a = get_free_list ();
814 if (a == NULL)
816 /* Nothing immediately available, so generate a new arena. */
817 if (narenas_limit == 0)
819 if (mp_.arena_max != 0)
820 narenas_limit = mp_.arena_max;
821 else if (narenas > mp_.arena_test)
823 int n = __get_nprocs_sched ();
825 if (n >= 1)
826 narenas_limit = NARENAS_FROM_NCORES (n);
827 else
828 /* We have no information about the system. Assume two
829 cores. */
830 narenas_limit = NARENAS_FROM_NCORES (2);
833 repeat:;
834 size_t n = narenas;
835 /* NB: the following depends on the fact that (size_t)0 - 1 is a
836 very large number and that the underflow is OK. If arena_max
837 is set the value of arena_test is irrelevant. If arena_test
838 is set but narenas is not yet larger or equal to arena_test
839 narenas_limit is 0. There is no possibility for narenas to
840 be too big for the test to always fail since there is not
841 enough address space to create that many arenas. */
842 if (__glibc_unlikely (n <= narenas_limit - 1))
844 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
845 goto repeat;
846 a = _int_new_arena (size);
847 if (__glibc_unlikely (a == NULL))
848 catomic_decrement (&narenas);
850 else
851 a = reused_arena (avoid_arena);
853 return a;
856 /* If we don't have the main arena, then maybe the failure is due to running
857 out of mmapped areas, so we can try allocating on the main arena.
858 Otherwise, it is likely that sbrk() has failed and there is still a chance
859 to mmap(), so try one of the other arenas. */
860 static mstate
861 arena_get_retry (mstate ar_ptr, size_t bytes)
863 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
864 if (ar_ptr != &main_arena)
866 __libc_lock_unlock (ar_ptr->mutex);
867 ar_ptr = &main_arena;
868 __libc_lock_lock (ar_ptr->mutex);
870 else
872 __libc_lock_unlock (ar_ptr->mutex);
873 ar_ptr = arena_get2 (bytes, ar_ptr);
876 return ar_ptr;
878 #endif
880 void
881 __malloc_arena_thread_freeres (void)
883 /* Shut down the thread cache first. This could deallocate data for
884 the thread arena, so do this before we put the arena on the free
885 list. */
886 tcache_thread_shutdown ();
888 mstate a = thread_arena;
889 thread_arena = NULL;
891 if (a != NULL)
893 __libc_lock_lock (free_list_lock);
894 /* If this was the last attached thread for this arena, put the
895 arena on the free list. */
896 assert (a->attached_threads > 0);
897 if (--a->attached_threads == 0)
899 a->next_free = free_list;
900 free_list = a;
902 __libc_lock_unlock (free_list_lock);
907 * Local variables:
908 * c-basic-offset: 2
909 * End: