1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
23 # define TUNABLE_NAMESPACE malloc
25 #include <elf/dl-tunables.h>
27 /* Compile-time constants. */
29 #define HEAP_MIN_SIZE (32 * 1024)
31 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
32 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
34 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
38 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
39 that are dynamically created for multi-threaded programs. The
40 maximum size must be a power of two, for fast determination of
41 which heap belongs to a chunk. It should be much larger than the
42 mmap threshold, so that requests with a size just below that
43 threshold can be fulfilled without creating too many heaps. */
45 /***************************************************************************/
47 #define top(ar_ptr) ((ar_ptr)->top)
49 /* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
51 address aligned to HEAP_MAX_SIZE. */
53 typedef struct _heap_info
55 mstate ar_ptr
; /* Arena for this heap. */
56 struct _heap_info
*prev
; /* Previous heap. */
57 size_t size
; /* Current size in bytes. */
58 size_t mprotect_size
; /* Size in bytes that has been mprotected
59 PROT_READ|PROT_WRITE. */
60 /* Make sure the following data is properly aligned, particularly
61 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
63 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
66 /* Get a compile-time error if the heap_info padding is not correct
67 to make alignment work as expected in sYSMALLOc. */
68 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
69 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
72 /* Thread specific data. */
74 static __thread mstate thread_arena attribute_tls_model_ie
;
76 /* Arena free list. free_list_lock synchronizes access to the
77 free_list variable below, and the next_free and attached_threads
78 members of struct malloc_state objects. No other locks must be
79 acquired after free_list_lock has been acquired. */
81 __libc_lock_define_initialized (static, free_list_lock
);
82 static size_t narenas
= 1;
83 static mstate free_list
;
85 /* list_lock prevents concurrent writes to the next member of struct
88 Read access to the next member is supposed to synchronize with the
89 atomic_write_barrier and the write to the next member in
90 _int_new_arena. This suffers from data races; see the FIXME
91 comments in _int_new_arena and reused_arena.
93 list_lock also prevents concurrent forks. At the time list_lock is
94 acquired, no arena lock must have been acquired, but it is
95 permitted to acquire arena locks subsequently, while list_lock is
97 __libc_lock_define_initialized (static, list_lock
);
99 /* Already initialized? */
100 int __malloc_initialized
= -1;
102 /**************************************************************************/
105 /* arena_get() acquires an arena and locks the corresponding mutex.
106 First, try the one last locked successfully by this thread. (This
107 is the common case and handled with a macro for speed.) Then, loop
108 once over the circularly linked list of arenas. If no arena is
109 readily available, create a new one. In this latter case, `size'
110 is just a hint as to how much memory will be required immediately
113 #define arena_get(ptr, size) do { \
114 ptr = thread_arena; \
115 arena_lock (ptr, size); \
118 #define arena_lock(ptr, size) do { \
120 __libc_lock_lock (ptr->mutex); \
122 ptr = arena_get2 ((size), NULL); \
125 /* find the heap and corresponding arena for a given ptr */
127 #define heap_for_ptr(ptr) \
128 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
129 #define arena_for_chunk(ptr) \
130 (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
133 /**************************************************************************/
135 /* atfork support. */
137 /* The following three functions are called around fork from a
138 multi-threaded process. We do not use the general fork handler
139 mechanism to make sure that our handlers are the last ones being
140 called, so that other fork handlers can use the malloc
144 __malloc_fork_lock_parent (void)
146 if (__malloc_initialized
< 1)
149 /* We do not acquire free_list_lock here because we completely
150 reconstruct free_list in __malloc_fork_unlock_child. */
152 __libc_lock_lock (list_lock
);
154 for (mstate ar_ptr
= &main_arena
;; )
156 __libc_lock_lock (ar_ptr
->mutex
);
157 ar_ptr
= ar_ptr
->next
;
158 if (ar_ptr
== &main_arena
)
164 __malloc_fork_unlock_parent (void)
166 if (__malloc_initialized
< 1)
169 for (mstate ar_ptr
= &main_arena
;; )
171 __libc_lock_unlock (ar_ptr
->mutex
);
172 ar_ptr
= ar_ptr
->next
;
173 if (ar_ptr
== &main_arena
)
176 __libc_lock_unlock (list_lock
);
180 __malloc_fork_unlock_child (void)
182 if (__malloc_initialized
< 1)
185 /* Push all arenas to the free list, except thread_arena, which is
186 attached to the current thread. */
187 __libc_lock_init (free_list_lock
);
188 if (thread_arena
!= NULL
)
189 thread_arena
->attached_threads
= 1;
191 for (mstate ar_ptr
= &main_arena
;; )
193 __libc_lock_init (ar_ptr
->mutex
);
194 if (ar_ptr
!= thread_arena
)
196 /* This arena is no longer attached to any thread. */
197 ar_ptr
->attached_threads
= 0;
198 ar_ptr
->next_free
= free_list
;
201 ar_ptr
= ar_ptr
->next
;
202 if (ar_ptr
== &main_arena
)
206 __libc_lock_init (list_lock
);
211 TUNABLE_CALLBACK (set_mallopt_check
) (tunable_val_t
*valp
)
213 int32_t value
= (int32_t) valp
->numval
;
215 __malloc_check_init ();
218 # define TUNABLE_CALLBACK_FNDECL(__name, __type) \
219 static inline int do_ ## __name (__type value); \
221 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
223 __type value = (__type) (valp)->numval; \
224 do_ ## __name (value); \
227 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold
, size_t)
228 TUNABLE_CALLBACK_FNDECL (set_mmaps_max
, int32_t)
229 TUNABLE_CALLBACK_FNDECL (set_top_pad
, size_t)
230 TUNABLE_CALLBACK_FNDECL (set_perturb_byte
, int32_t)
231 TUNABLE_CALLBACK_FNDECL (set_trim_threshold
, size_t)
232 TUNABLE_CALLBACK_FNDECL (set_arena_max
, size_t)
233 TUNABLE_CALLBACK_FNDECL (set_arena_test
, size_t)
235 TUNABLE_CALLBACK_FNDECL (set_tcache_max
, size_t)
236 TUNABLE_CALLBACK_FNDECL (set_tcache_count
, size_t)
237 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit
, size_t)
239 TUNABLE_CALLBACK_FNDECL (set_mxfast
, size_t)
241 /* Initialization routine. */
243 extern char **_environ
;
246 next_env_entry (char ***position
)
248 char **current
= *position
;
251 while (*current
!= NULL
)
253 if (__builtin_expect ((*current
)[0] == 'M', 0)
254 && (*current
)[1] == 'A'
255 && (*current
)[2] == 'L'
256 && (*current
)[3] == 'L'
257 && (*current
)[4] == 'O'
258 && (*current
)[5] == 'C'
259 && (*current
)[6] == '_')
261 result
= &(*current
)[7];
263 /* Save current position for next visit. */
264 *position
= ++current
;
279 __failing_morecore (ptrdiff_t d
)
281 return (void *) MORECORE_FAILURE
;
284 extern struct dl_open_hook
*_dl_open_hook
;
285 libc_hidden_proto (_dl_open_hook
);
291 if (__malloc_initialized
>= 0)
294 __malloc_initialized
= 0;
297 /* In case this libc copy is in a non-default namespace, never use brk.
298 Likewise if dlopened from statically linked program. */
302 if (_dl_open_hook
!= NULL
303 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
304 && l
->l_ns
!= LM_ID_BASE
))
305 __morecore
= __failing_morecore
;
308 thread_arena
= &main_arena
;
310 malloc_init_state (&main_arena
);
313 TUNABLE_GET (check
, int32_t, TUNABLE_CALLBACK (set_mallopt_check
));
314 TUNABLE_GET (top_pad
, size_t, TUNABLE_CALLBACK (set_top_pad
));
315 TUNABLE_GET (perturb
, int32_t, TUNABLE_CALLBACK (set_perturb_byte
));
316 TUNABLE_GET (mmap_threshold
, size_t, TUNABLE_CALLBACK (set_mmap_threshold
));
317 TUNABLE_GET (trim_threshold
, size_t, TUNABLE_CALLBACK (set_trim_threshold
));
318 TUNABLE_GET (mmap_max
, int32_t, TUNABLE_CALLBACK (set_mmaps_max
));
319 TUNABLE_GET (arena_max
, size_t, TUNABLE_CALLBACK (set_arena_max
));
320 TUNABLE_GET (arena_test
, size_t, TUNABLE_CALLBACK (set_arena_test
));
322 TUNABLE_GET (tcache_max
, size_t, TUNABLE_CALLBACK (set_tcache_max
));
323 TUNABLE_GET (tcache_count
, size_t, TUNABLE_CALLBACK (set_tcache_count
));
324 TUNABLE_GET (tcache_unsorted_limit
, size_t,
325 TUNABLE_CALLBACK (set_tcache_unsorted_limit
));
327 TUNABLE_GET (mxfast
, size_t, TUNABLE_CALLBACK (set_mxfast
));
329 const char *s
= NULL
;
330 if (__glibc_likely (_environ
!= NULL
))
332 char **runp
= _environ
;
335 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
338 size_t len
= strcspn (envline
, "=");
340 if (envline
[len
] != '=')
341 /* This is a "MALLOC_" variable at the end of the string
342 without a '=' character. Ignore it since otherwise we
343 will access invalid memory below. */
349 if (memcmp (envline
, "CHECK_", 6) == 0)
353 if (!__builtin_expect (__libc_enable_secure
, 0))
355 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
356 __libc_mallopt (M_TOP_PAD
, atoi (&envline
[9]));
357 else if (memcmp (envline
, "PERTURB_", 8) == 0)
358 __libc_mallopt (M_PERTURB
, atoi (&envline
[9]));
362 if (!__builtin_expect (__libc_enable_secure
, 0))
364 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
365 __libc_mallopt (M_MMAP_MAX
, atoi (&envline
[10]));
366 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
367 __libc_mallopt (M_ARENA_MAX
, atoi (&envline
[10]));
371 if (!__builtin_expect (__libc_enable_secure
, 0))
373 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
374 __libc_mallopt (M_ARENA_TEST
, atoi (&envline
[11]));
378 if (!__builtin_expect (__libc_enable_secure
, 0))
380 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
381 __libc_mallopt (M_TRIM_THRESHOLD
, atoi (&envline
[16]));
382 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
383 __libc_mallopt (M_MMAP_THRESHOLD
, atoi (&envline
[16]));
391 if (s
&& s
[0] != '\0' && s
[0] != '0')
392 __malloc_check_init ();
395 #if HAVE_MALLOC_INIT_HOOK
396 void (*hook
) (void) = atomic_forced_read (__malloc_initialize_hook
);
400 __malloc_initialized
= 1;
403 /* Managing heaps and arenas (for concurrent threads) */
407 /* Print the complete contents of a single heap to stderr. */
410 dump_heap (heap_info
*heap
)
415 fprintf (stderr
, "Heap %p, size %10lx:\n", heap
, (long) heap
->size
);
416 ptr
= (heap
->ar_ptr
!= (mstate
) (heap
+ 1)) ?
417 (char *) (heap
+ 1) : (char *) (heap
+ 1) + sizeof (struct malloc_state
);
418 p
= (mchunkptr
) (((unsigned long) ptr
+ MALLOC_ALIGN_MASK
) &
422 fprintf (stderr
, "chunk %p size %10lx", p
, (long) p
->size
);
423 if (p
== top (heap
->ar_ptr
))
425 fprintf (stderr
, " (top)\n");
428 else if (p
->size
== (0 | PREV_INUSE
))
430 fprintf (stderr
, " (fence)\n");
433 fprintf (stderr
, "\n");
437 #endif /* MALLOC_DEBUG > 1 */
439 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
440 addresses as opposed to increasing, new_heap would badly fragment the
441 address space. In that case remember the second HEAP_MAX_SIZE part
442 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
443 call (if it is already aligned) and try to reuse it next time. We need
444 no locking for it, as kernel ensures the atomicity for us - worst case
445 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
446 multiple threads, but only one will succeed. */
447 static char *aligned_heap_area
;
449 /* Create a new heap. size is automatically rounded up to a multiple
453 new_heap (size_t size
, size_t top_pad
)
455 size_t pagesize
= GLRO (dl_pagesize
);
460 if (size
+ top_pad
< HEAP_MIN_SIZE
)
461 size
= HEAP_MIN_SIZE
;
462 else if (size
+ top_pad
<= HEAP_MAX_SIZE
)
464 else if (size
> HEAP_MAX_SIZE
)
467 size
= HEAP_MAX_SIZE
;
468 size
= ALIGN_UP (size
, pagesize
);
470 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
471 No swap space needs to be reserved for the following large
472 mapping (on Linux, this is the case for all non-writable mappings
475 if (aligned_heap_area
)
477 p2
= (char *) MMAP (aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
479 aligned_heap_area
= NULL
;
480 if (p2
!= MAP_FAILED
&& ((unsigned long) p2
& (HEAP_MAX_SIZE
- 1)))
482 __munmap (p2
, HEAP_MAX_SIZE
);
486 if (p2
== MAP_FAILED
)
488 p1
= (char *) MMAP (0, HEAP_MAX_SIZE
<< 1, PROT_NONE
, MAP_NORESERVE
);
489 if (p1
!= MAP_FAILED
)
491 p2
= (char *) (((unsigned long) p1
+ (HEAP_MAX_SIZE
- 1))
492 & ~(HEAP_MAX_SIZE
- 1));
497 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
498 __munmap (p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
502 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
503 is already aligned. */
504 p2
= (char *) MMAP (0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
505 if (p2
== MAP_FAILED
)
508 if ((unsigned long) p2
& (HEAP_MAX_SIZE
- 1))
510 __munmap (p2
, HEAP_MAX_SIZE
);
515 if (__mprotect (p2
, size
, PROT_READ
| PROT_WRITE
) != 0)
517 __munmap (p2
, HEAP_MAX_SIZE
);
520 h
= (heap_info
*) p2
;
522 h
->mprotect_size
= size
;
523 LIBC_PROBE (memory_heap_new
, 2, h
, h
->size
);
527 /* Grow a heap. size is automatically rounded up to a
528 multiple of the page size. */
531 grow_heap (heap_info
*h
, long diff
)
533 size_t pagesize
= GLRO (dl_pagesize
);
536 diff
= ALIGN_UP (diff
, pagesize
);
537 new_size
= (long) h
->size
+ diff
;
538 if ((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
541 if ((unsigned long) new_size
> h
->mprotect_size
)
543 if (__mprotect ((char *) h
+ h
->mprotect_size
,
544 (unsigned long) new_size
- h
->mprotect_size
,
545 PROT_READ
| PROT_WRITE
) != 0)
548 h
->mprotect_size
= new_size
;
552 LIBC_PROBE (memory_heap_more
, 2, h
, h
->size
);
559 shrink_heap (heap_info
*h
, long diff
)
563 new_size
= (long) h
->size
- diff
;
564 if (new_size
< (long) sizeof (*h
))
567 /* Try to re-map the extra heap space freshly to save memory, and make it
568 inaccessible. See malloc-sysdep.h to know when this is true. */
569 if (__glibc_unlikely (check_may_shrink_heap ()))
571 if ((char *) MMAP ((char *) h
+ new_size
, diff
, PROT_NONE
,
572 MAP_FIXED
) == (char *) MAP_FAILED
)
575 h
->mprotect_size
= new_size
;
578 __madvise ((char *) h
+ new_size
, diff
, MADV_DONTNEED
);
579 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
582 LIBC_PROBE (memory_heap_less
, 2, h
, h
->size
);
588 #define delete_heap(heap) \
590 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
591 aligned_heap_area = NULL; \
592 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
596 heap_trim (heap_info
*heap
, size_t pad
)
598 mstate ar_ptr
= heap
->ar_ptr
;
599 unsigned long pagesz
= GLRO (dl_pagesize
);
600 mchunkptr top_chunk
= top (ar_ptr
), p
;
601 heap_info
*prev_heap
;
602 long new_size
, top_size
, top_area
, extra
, prev_size
, misalign
;
604 /* Can this heap go away completely? */
605 while (top_chunk
== chunk_at_offset (heap
, sizeof (*heap
)))
607 prev_heap
= heap
->prev
;
608 prev_size
= prev_heap
->size
- (MINSIZE
- 2 * SIZE_SZ
);
609 p
= chunk_at_offset (prev_heap
, prev_size
);
610 /* fencepost must be properly aligned. */
611 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
612 p
= chunk_at_offset (prev_heap
, prev_size
- misalign
);
613 assert (chunksize_nomask (p
) == (0 | PREV_INUSE
)); /* must be fencepost */
615 new_size
= chunksize (p
) + (MINSIZE
- 2 * SIZE_SZ
) + misalign
;
616 assert (new_size
> 0 && new_size
< (long) (2 * MINSIZE
));
618 new_size
+= prev_size (p
);
619 assert (new_size
> 0 && new_size
< HEAP_MAX_SIZE
);
620 if (new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
622 ar_ptr
->system_mem
-= heap
->size
;
623 LIBC_PROBE (memory_heap_free
, 2, heap
, heap
->size
);
626 if (!prev_inuse (p
)) /* consolidate backward */
629 unlink_chunk (ar_ptr
, p
);
631 assert (((unsigned long) ((char *) p
+ new_size
) & (pagesz
- 1)) == 0);
632 assert (((char *) p
+ new_size
) == ((char *) heap
+ heap
->size
));
633 top (ar_ptr
) = top_chunk
= p
;
634 set_head (top_chunk
, new_size
| PREV_INUSE
);
635 /*check_chunk(ar_ptr, top_chunk);*/
638 /* Uses similar logic for per-thread arenas as the main arena with systrim
639 and _int_free by preserving the top pad and rounding down to the nearest
641 top_size
= chunksize (top_chunk
);
642 if ((unsigned long)(top_size
) <
643 (unsigned long)(mp_
.trim_threshold
))
646 top_area
= top_size
- MINSIZE
- 1;
647 if (top_area
< 0 || (size_t) top_area
<= pad
)
650 /* Release in pagesize units and round down to the nearest page. */
651 extra
= ALIGN_DOWN(top_area
- pad
, pagesz
);
656 if (shrink_heap (heap
, extra
) != 0)
659 ar_ptr
->system_mem
-= extra
;
661 /* Success. Adjust top accordingly. */
662 set_head (top_chunk
, (top_size
- extra
) | PREV_INUSE
);
663 /*check_chunk(ar_ptr, top_chunk);*/
667 /* Create a new arena with initial size "size". */
669 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
670 called while free_list_lock is held. */
672 detach_arena (mstate replaced_arena
)
674 if (replaced_arena
!= NULL
)
676 assert (replaced_arena
->attached_threads
> 0);
677 /* The current implementation only detaches from main_arena in
678 case of allocation failure. This means that it is likely not
679 beneficial to put the arena on free_list even if the
680 reference count reaches zero. */
681 --replaced_arena
->attached_threads
;
686 _int_new_arena (size_t size
)
691 unsigned long misalign
;
693 h
= new_heap (size
+ (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
),
697 /* Maybe size is too large to fit in a single heap. So, just try
698 to create a minimally-sized arena and let _int_malloc() attempt
699 to deal with the large request via mmap_chunk(). */
700 h
= new_heap (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
704 a
= h
->ar_ptr
= (mstate
) (h
+ 1);
705 malloc_init_state (a
);
706 a
->attached_threads
= 1;
708 a
->system_mem
= a
->max_system_mem
= h
->size
;
710 /* Set up the top chunk, with proper alignment. */
711 ptr
= (char *) (a
+ 1);
712 misalign
= (unsigned long) chunk2mem (ptr
) & MALLOC_ALIGN_MASK
;
714 ptr
+= MALLOC_ALIGNMENT
- misalign
;
715 top (a
) = (mchunkptr
) ptr
;
716 set_head (top (a
), (((char *) h
+ h
->size
) - ptr
) | PREV_INUSE
);
718 LIBC_PROBE (memory_arena_new
, 2, a
, size
);
719 mstate replaced_arena
= thread_arena
;
721 __libc_lock_init (a
->mutex
);
723 __libc_lock_lock (list_lock
);
725 /* Add the new arena to the global list. */
726 a
->next
= main_arena
.next
;
727 /* FIXME: The barrier is an attempt to synchronize with read access
728 in reused_arena, which does not acquire list_lock while
729 traversing the list. */
730 atomic_write_barrier ();
733 __libc_lock_unlock (list_lock
);
735 __libc_lock_lock (free_list_lock
);
736 detach_arena (replaced_arena
);
737 __libc_lock_unlock (free_list_lock
);
739 /* Lock this arena. NB: Another thread may have been attached to
740 this arena because the arena is now accessible from the
741 main_arena.next list and could have been picked by reused_arena.
742 This can only happen for the last arena created (before the arena
743 limit is reached). At this point, some arena has to be attached
744 to two threads. We could acquire the arena lock before list_lock
745 to make it less likely that reused_arena picks this new arena,
746 but this could result in a deadlock with
747 __malloc_fork_lock_parent. */
749 __libc_lock_lock (a
->mutex
);
755 /* Remove an arena from free_list. */
759 mstate replaced_arena
= thread_arena
;
760 mstate result
= free_list
;
763 __libc_lock_lock (free_list_lock
);
767 free_list
= result
->next_free
;
769 /* The arena will be attached to this thread. */
770 assert (result
->attached_threads
== 0);
771 result
->attached_threads
= 1;
773 detach_arena (replaced_arena
);
775 __libc_lock_unlock (free_list_lock
);
779 LIBC_PROBE (memory_arena_reuse_free_list
, 1, result
);
780 __libc_lock_lock (result
->mutex
);
781 thread_arena
= result
;
788 /* Remove the arena from the free list (if it is present).
789 free_list_lock must have been acquired by the caller. */
791 remove_from_free_list (mstate arena
)
793 mstate
*previous
= &free_list
;
794 for (mstate p
= free_list
; p
!= NULL
; p
= p
->next_free
)
796 assert (p
->attached_threads
== 0);
799 /* Remove the requested arena from the list. */
800 *previous
= p
->next_free
;
804 previous
= &p
->next_free
;
808 /* Lock and return an arena that can be reused for memory allocation.
809 Avoid AVOID_ARENA as we have already failed to allocate memory in
810 it and it is currently locked. */
812 reused_arena (mstate avoid_arena
)
815 /* FIXME: Access to next_to_use suffers from data races. */
816 static mstate next_to_use
;
817 if (next_to_use
== NULL
)
818 next_to_use
= &main_arena
;
820 /* Iterate over all arenas (including those linked from
822 result
= next_to_use
;
825 if (!__libc_lock_trylock (result
->mutex
))
828 /* FIXME: This is a data race, see _int_new_arena. */
829 result
= result
->next
;
831 while (result
!= next_to_use
);
833 /* Avoid AVOID_ARENA as we have already failed to allocate memory
834 in that arena and it is currently locked. */
835 if (result
== avoid_arena
)
836 result
= result
->next
;
838 /* No arena available without contention. Wait for the next in line. */
839 LIBC_PROBE (memory_arena_reuse_wait
, 3, &result
->mutex
, result
, avoid_arena
);
840 __libc_lock_lock (result
->mutex
);
843 /* Attach the arena to the current thread. */
845 /* Update the arena thread attachment counters. */
846 mstate replaced_arena
= thread_arena
;
847 __libc_lock_lock (free_list_lock
);
848 detach_arena (replaced_arena
);
850 /* We may have picked up an arena on the free list. We need to
851 preserve the invariant that no arena on the free list has a
852 positive attached_threads counter (otherwise,
853 arena_thread_freeres cannot use the counter to determine if the
854 arena needs to be put on the free list). We unconditionally
855 remove the selected arena from the free list. The caller of
856 reused_arena checked the free list and observed it to be empty,
857 so the list is very short. */
858 remove_from_free_list (result
);
860 ++result
->attached_threads
;
862 __libc_lock_unlock (free_list_lock
);
865 LIBC_PROBE (memory_arena_reuse
, 2, result
, avoid_arena
);
866 thread_arena
= result
;
867 next_to_use
= result
->next
;
873 arena_get2 (size_t size
, mstate avoid_arena
)
877 static size_t narenas_limit
;
879 a
= get_free_list ();
882 /* Nothing immediately available, so generate a new arena. */
883 if (narenas_limit
== 0)
885 if (mp_
.arena_max
!= 0)
886 narenas_limit
= mp_
.arena_max
;
887 else if (narenas
> mp_
.arena_test
)
889 int n
= __get_nprocs ();
892 narenas_limit
= NARENAS_FROM_NCORES (n
);
894 /* We have no information about the system. Assume two
896 narenas_limit
= NARENAS_FROM_NCORES (2);
901 /* NB: the following depends on the fact that (size_t)0 - 1 is a
902 very large number and that the underflow is OK. If arena_max
903 is set the value of arena_test is irrelevant. If arena_test
904 is set but narenas is not yet larger or equal to arena_test
905 narenas_limit is 0. There is no possibility for narenas to
906 be too big for the test to always fail since there is not
907 enough address space to create that many arenas. */
908 if (__glibc_unlikely (n
<= narenas_limit
- 1))
910 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
912 a
= _int_new_arena (size
);
913 if (__glibc_unlikely (a
== NULL
))
914 catomic_decrement (&narenas
);
917 a
= reused_arena (avoid_arena
);
922 /* If we don't have the main arena, then maybe the failure is due to running
923 out of mmapped areas, so we can try allocating on the main arena.
924 Otherwise, it is likely that sbrk() has failed and there is still a chance
925 to mmap(), so try one of the other arenas. */
927 arena_get_retry (mstate ar_ptr
, size_t bytes
)
929 LIBC_PROBE (memory_arena_retry
, 2, bytes
, ar_ptr
);
930 if (ar_ptr
!= &main_arena
)
932 __libc_lock_unlock (ar_ptr
->mutex
);
933 ar_ptr
= &main_arena
;
934 __libc_lock_lock (ar_ptr
->mutex
);
938 __libc_lock_unlock (ar_ptr
->mutex
);
939 ar_ptr
= arena_get2 (bytes
, ar_ptr
);
946 __malloc_arena_thread_freeres (void)
948 /* Shut down the thread cache first. This could deallocate data for
949 the thread arena, so do this before we put the arena on the free
951 tcache_thread_shutdown ();
953 mstate a
= thread_arena
;
958 __libc_lock_lock (free_list_lock
);
959 /* If this was the last attached thread for this arena, put the
960 arena on the free list. */
961 assert (a
->attached_threads
> 0);
962 if (--a
->attached_threads
== 0)
964 a
->next_free
= free_list
;
967 __libc_lock_unlock (free_list_lock
);