Fix tst-audit10 build when -mavx512f is not supported.
[glibc.git] / malloc / arena.c
blobcd26cdd02c10687f3280450c1d35805dc95b35eb
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32 * 1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
40 /***************************************************************************/
42 #define top(ar_ptr) ((ar_ptr)->top)
44 /* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
46 address aligned to HEAP_MAX_SIZE. */
48 typedef struct _heap_info
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
59 } heap_info;
61 /* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
67 /* Thread specific data. */
69 static __thread mstate thread_arena attribute_tls_model_ie;
71 /* Arena free list. free_list_lock synchronizes access to the
72 free_list variable below, and the next_free and attached_threads
73 members of struct malloc_state objects. No other locks must be
74 acquired after free_list_lock has been acquired. */
76 static mutex_t free_list_lock = _LIBC_LOCK_INITIALIZER;
77 static size_t narenas = 1;
78 static mstate free_list;
80 /* list_lock prevents concurrent writes to the next member of struct
81 malloc_state objects.
83 Read access to the next member is supposed to synchronize with the
84 atomic_write_barrier and the write to the next member in
85 _int_new_arena. This suffers from data races; see the FIXME
86 comments in _int_new_arena and reused_arena.
88 list_lock also prevents concurrent forks. At the time list_lock is
89 acquired, no arena lock must have been acquired, but it is
90 permitted to acquire arena locks subsequently, while list_lock is
91 acquired. */
92 static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
94 /* Already initialized? */
95 int __malloc_initialized = -1;
97 /**************************************************************************/
100 /* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
106 in the new arena. */
108 #define arena_get(ptr, size) do { \
109 ptr = thread_arena; \
110 arena_lock (ptr, size); \
111 } while (0)
113 #define arena_lock(ptr, size) do { \
114 if (ptr && !arena_is_corrupt (ptr)) \
115 (void) mutex_lock (&ptr->mutex); \
116 else \
117 ptr = arena_get2 ((size), NULL); \
118 } while (0)
120 /* find the heap and corresponding arena for a given ptr */
122 #define heap_for_ptr(ptr) \
123 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
124 #define arena_for_chunk(ptr) \
125 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
128 /**************************************************************************/
130 /* atfork support. */
132 static void *(*save_malloc_hook)(size_t __size, const void *);
133 static void (*save_free_hook) (void *__ptr, const void *);
134 static void *save_arena;
136 # ifdef ATFORK_MEM
137 ATFORK_MEM;
138 # endif
140 /* Magic value for the thread-specific arena pointer when
141 malloc_atfork() is in use. */
143 # define ATFORK_ARENA_PTR ((void *) -1)
145 /* The following hooks are used while the `atfork' handling mechanism
146 is active. */
148 static void *
149 malloc_atfork (size_t sz, const void *caller)
151 void *victim;
153 if (thread_arena == ATFORK_ARENA_PTR)
155 /* We are the only thread that may allocate at all. */
156 if (save_malloc_hook != malloc_check)
158 return _int_malloc (&main_arena, sz);
160 else
162 if (top_check () < 0)
163 return 0;
165 victim = _int_malloc (&main_arena, sz + 1);
166 return mem2mem_check (victim, sz);
169 else
171 /* Suspend the thread until the `atfork' handlers have completed.
172 By that time, the hooks will have been reset as well, so that
173 mALLOc() can be used again. */
174 (void) mutex_lock (&list_lock);
175 (void) mutex_unlock (&list_lock);
176 return __libc_malloc (sz);
180 static void
181 free_atfork (void *mem, const void *caller)
183 mstate ar_ptr;
184 mchunkptr p; /* chunk corresponding to mem */
186 if (mem == 0) /* free(0) has no effect */
187 return;
189 p = mem2chunk (mem); /* do not bother to replicate free_check here */
191 if (chunk_is_mmapped (p)) /* release mmapped memory. */
193 munmap_chunk (p);
194 return;
197 ar_ptr = arena_for_chunk (p);
198 _int_free (ar_ptr, p, thread_arena == ATFORK_ARENA_PTR);
202 /* Counter for number of times the list is locked by the same thread. */
203 static unsigned int atfork_recursive_cntr;
205 /* The following two functions are registered via thread_atfork() to
206 make sure that the mutexes remain in a consistent state in the
207 fork()ed version of a thread. Also adapt the malloc and free hooks
208 temporarily, because the `atfork' handler mechanism may use
209 malloc/free internally (e.g. in LinuxThreads). */
211 static void
212 ptmalloc_lock_all (void)
214 mstate ar_ptr;
216 if (__malloc_initialized < 1)
217 return;
219 /* We do not acquire free_list_lock here because we completely
220 reconstruct free_list in ptmalloc_unlock_all2. */
222 if (mutex_trylock (&list_lock))
224 if (thread_arena == ATFORK_ARENA_PTR)
225 /* This is the same thread which already locks the global list.
226 Just bump the counter. */
227 goto out;
229 /* This thread has to wait its turn. */
230 (void) mutex_lock (&list_lock);
232 for (ar_ptr = &main_arena;; )
234 (void) mutex_lock (&ar_ptr->mutex);
235 ar_ptr = ar_ptr->next;
236 if (ar_ptr == &main_arena)
237 break;
239 save_malloc_hook = __malloc_hook;
240 save_free_hook = __free_hook;
241 __malloc_hook = malloc_atfork;
242 __free_hook = free_atfork;
243 /* Only the current thread may perform malloc/free calls now.
244 save_arena will be reattached to the current thread, in
245 ptmalloc_lock_all, so save_arena->attached_threads is not
246 updated. */
247 save_arena = thread_arena;
248 thread_arena = ATFORK_ARENA_PTR;
249 out:
250 ++atfork_recursive_cntr;
253 static void
254 ptmalloc_unlock_all (void)
256 mstate ar_ptr;
258 if (__malloc_initialized < 1)
259 return;
261 if (--atfork_recursive_cntr != 0)
262 return;
264 /* Replace ATFORK_ARENA_PTR with save_arena.
265 save_arena->attached_threads was not changed in ptmalloc_lock_all
266 and is still correct. */
267 thread_arena = save_arena;
268 __malloc_hook = save_malloc_hook;
269 __free_hook = save_free_hook;
270 for (ar_ptr = &main_arena;; )
272 (void) mutex_unlock (&ar_ptr->mutex);
273 ar_ptr = ar_ptr->next;
274 if (ar_ptr == &main_arena)
275 break;
277 (void) mutex_unlock (&list_lock);
280 # ifdef __linux__
282 /* In NPTL, unlocking a mutex in the child process after a
283 fork() is currently unsafe, whereas re-initializing it is safe and
284 does not leak resources. Therefore, a special atfork handler is
285 installed for the child. */
287 static void
288 ptmalloc_unlock_all2 (void)
290 mstate ar_ptr;
292 if (__malloc_initialized < 1)
293 return;
295 thread_arena = save_arena;
296 __malloc_hook = save_malloc_hook;
297 __free_hook = save_free_hook;
299 /* Push all arenas to the free list, except save_arena, which is
300 attached to the current thread. */
301 mutex_init (&free_list_lock);
302 if (save_arena != NULL)
303 ((mstate) save_arena)->attached_threads = 1;
304 free_list = NULL;
305 for (ar_ptr = &main_arena;; )
307 mutex_init (&ar_ptr->mutex);
308 if (ar_ptr != save_arena)
310 /* This arena is no longer attached to any thread. */
311 ar_ptr->attached_threads = 0;
312 ar_ptr->next_free = free_list;
313 free_list = ar_ptr;
315 ar_ptr = ar_ptr->next;
316 if (ar_ptr == &main_arena)
317 break;
320 mutex_init (&list_lock);
321 atfork_recursive_cntr = 0;
324 # else
326 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
327 # endif
329 /* Initialization routine. */
330 #include <string.h>
331 extern char **_environ;
333 static char *
334 internal_function
335 next_env_entry (char ***position)
337 char **current = *position;
338 char *result = NULL;
340 while (*current != NULL)
342 if (__builtin_expect ((*current)[0] == 'M', 0)
343 && (*current)[1] == 'A'
344 && (*current)[2] == 'L'
345 && (*current)[3] == 'L'
346 && (*current)[4] == 'O'
347 && (*current)[5] == 'C'
348 && (*current)[6] == '_')
350 result = &(*current)[7];
352 /* Save current position for next visit. */
353 *position = ++current;
355 break;
358 ++current;
361 return result;
365 #ifdef SHARED
366 static void *
367 __failing_morecore (ptrdiff_t d)
369 return (void *) MORECORE_FAILURE;
372 extern struct dl_open_hook *_dl_open_hook;
373 libc_hidden_proto (_dl_open_hook);
374 #endif
376 static void
377 ptmalloc_init (void)
379 if (__malloc_initialized >= 0)
380 return;
382 __malloc_initialized = 0;
384 #ifdef SHARED
385 /* In case this libc copy is in a non-default namespace, never use brk.
386 Likewise if dlopened from statically linked program. */
387 Dl_info di;
388 struct link_map *l;
390 if (_dl_open_hook != NULL
391 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
392 && l->l_ns != LM_ID_BASE))
393 __morecore = __failing_morecore;
394 #endif
396 thread_arena = &main_arena;
397 thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
398 const char *s = NULL;
399 if (__glibc_likely (_environ != NULL))
401 char **runp = _environ;
402 char *envline;
404 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
407 size_t len = strcspn (envline, "=");
409 if (envline[len] != '=')
410 /* This is a "MALLOC_" variable at the end of the string
411 without a '=' character. Ignore it since otherwise we
412 will access invalid memory below. */
413 continue;
415 switch (len)
417 case 6:
418 if (memcmp (envline, "CHECK_", 6) == 0)
419 s = &envline[7];
420 break;
421 case 8:
422 if (!__builtin_expect (__libc_enable_secure, 0))
424 if (memcmp (envline, "TOP_PAD_", 8) == 0)
425 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
426 else if (memcmp (envline, "PERTURB_", 8) == 0)
427 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
429 break;
430 case 9:
431 if (!__builtin_expect (__libc_enable_secure, 0))
433 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
434 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
435 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
436 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
438 break;
439 case 10:
440 if (!__builtin_expect (__libc_enable_secure, 0))
442 if (memcmp (envline, "ARENA_TEST", 10) == 0)
443 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
445 break;
446 case 15:
447 if (!__builtin_expect (__libc_enable_secure, 0))
449 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
450 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
451 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
452 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
454 break;
455 default:
456 break;
460 if (s && s[0])
462 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
463 if (check_action != 0)
464 __malloc_check_init ();
466 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
467 if (hook != NULL)
468 (*hook)();
469 __malloc_initialized = 1;
472 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
473 #ifdef thread_atfork_static
474 thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \
475 ptmalloc_unlock_all2)
476 #endif
480 /* Managing heaps and arenas (for concurrent threads) */
482 #if MALLOC_DEBUG > 1
484 /* Print the complete contents of a single heap to stderr. */
486 static void
487 dump_heap (heap_info *heap)
489 char *ptr;
490 mchunkptr p;
492 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
493 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
494 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
495 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
496 ~MALLOC_ALIGN_MASK);
497 for (;; )
499 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
500 if (p == top (heap->ar_ptr))
502 fprintf (stderr, " (top)\n");
503 break;
505 else if (p->size == (0 | PREV_INUSE))
507 fprintf (stderr, " (fence)\n");
508 break;
510 fprintf (stderr, "\n");
511 p = next_chunk (p);
514 #endif /* MALLOC_DEBUG > 1 */
516 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
517 addresses as opposed to increasing, new_heap would badly fragment the
518 address space. In that case remember the second HEAP_MAX_SIZE part
519 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
520 call (if it is already aligned) and try to reuse it next time. We need
521 no locking for it, as kernel ensures the atomicity for us - worst case
522 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
523 multiple threads, but only one will succeed. */
524 static char *aligned_heap_area;
526 /* Create a new heap. size is automatically rounded up to a multiple
527 of the page size. */
529 static heap_info *
530 internal_function
531 new_heap (size_t size, size_t top_pad)
533 size_t pagesize = GLRO (dl_pagesize);
534 char *p1, *p2;
535 unsigned long ul;
536 heap_info *h;
538 if (size + top_pad < HEAP_MIN_SIZE)
539 size = HEAP_MIN_SIZE;
540 else if (size + top_pad <= HEAP_MAX_SIZE)
541 size += top_pad;
542 else if (size > HEAP_MAX_SIZE)
543 return 0;
544 else
545 size = HEAP_MAX_SIZE;
546 size = ALIGN_UP (size, pagesize);
548 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
549 No swap space needs to be reserved for the following large
550 mapping (on Linux, this is the case for all non-writable mappings
551 anyway). */
552 p2 = MAP_FAILED;
553 if (aligned_heap_area)
555 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
556 MAP_NORESERVE);
557 aligned_heap_area = NULL;
558 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
560 __munmap (p2, HEAP_MAX_SIZE);
561 p2 = MAP_FAILED;
564 if (p2 == MAP_FAILED)
566 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
567 if (p1 != MAP_FAILED)
569 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
570 & ~(HEAP_MAX_SIZE - 1));
571 ul = p2 - p1;
572 if (ul)
573 __munmap (p1, ul);
574 else
575 aligned_heap_area = p2 + HEAP_MAX_SIZE;
576 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
578 else
580 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
581 is already aligned. */
582 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
583 if (p2 == MAP_FAILED)
584 return 0;
586 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
588 __munmap (p2, HEAP_MAX_SIZE);
589 return 0;
593 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
595 __munmap (p2, HEAP_MAX_SIZE);
596 return 0;
598 h = (heap_info *) p2;
599 h->size = size;
600 h->mprotect_size = size;
601 LIBC_PROBE (memory_heap_new, 2, h, h->size);
602 return h;
605 /* Grow a heap. size is automatically rounded up to a
606 multiple of the page size. */
608 static int
609 grow_heap (heap_info *h, long diff)
611 size_t pagesize = GLRO (dl_pagesize);
612 long new_size;
614 diff = ALIGN_UP (diff, pagesize);
615 new_size = (long) h->size + diff;
616 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
617 return -1;
619 if ((unsigned long) new_size > h->mprotect_size)
621 if (__mprotect ((char *) h + h->mprotect_size,
622 (unsigned long) new_size - h->mprotect_size,
623 PROT_READ | PROT_WRITE) != 0)
624 return -2;
626 h->mprotect_size = new_size;
629 h->size = new_size;
630 LIBC_PROBE (memory_heap_more, 2, h, h->size);
631 return 0;
634 /* Shrink a heap. */
636 static int
637 shrink_heap (heap_info *h, long diff)
639 long new_size;
641 new_size = (long) h->size - diff;
642 if (new_size < (long) sizeof (*h))
643 return -1;
645 /* Try to re-map the extra heap space freshly to save memory, and make it
646 inaccessible. See malloc-sysdep.h to know when this is true. */
647 if (__glibc_unlikely (check_may_shrink_heap ()))
649 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
650 MAP_FIXED) == (char *) MAP_FAILED)
651 return -2;
653 h->mprotect_size = new_size;
655 else
656 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
657 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
659 h->size = new_size;
660 LIBC_PROBE (memory_heap_less, 2, h, h->size);
661 return 0;
664 /* Delete a heap. */
666 #define delete_heap(heap) \
667 do { \
668 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
669 aligned_heap_area = NULL; \
670 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
671 } while (0)
673 static int
674 internal_function
675 heap_trim (heap_info *heap, size_t pad)
677 mstate ar_ptr = heap->ar_ptr;
678 unsigned long pagesz = GLRO (dl_pagesize);
679 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
680 heap_info *prev_heap;
681 long new_size, top_size, top_area, extra, prev_size, misalign;
683 /* Can this heap go away completely? */
684 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
686 prev_heap = heap->prev;
687 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
688 p = chunk_at_offset (prev_heap, prev_size);
689 /* fencepost must be properly aligned. */
690 misalign = ((long) p) & MALLOC_ALIGN_MASK;
691 p = chunk_at_offset (prev_heap, prev_size - misalign);
692 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
693 p = prev_chunk (p);
694 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
695 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
696 if (!prev_inuse (p))
697 new_size += p->prev_size;
698 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
699 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
700 break;
701 ar_ptr->system_mem -= heap->size;
702 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
703 delete_heap (heap);
704 heap = prev_heap;
705 if (!prev_inuse (p)) /* consolidate backward */
707 p = prev_chunk (p);
708 unlink (ar_ptr, p, bck, fwd);
710 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
711 assert (((char *) p + new_size) == ((char *) heap + heap->size));
712 top (ar_ptr) = top_chunk = p;
713 set_head (top_chunk, new_size | PREV_INUSE);
714 /*check_chunk(ar_ptr, top_chunk);*/
717 /* Uses similar logic for per-thread arenas as the main arena with systrim
718 and _int_free by preserving the top pad and rounding down to the nearest
719 page. */
720 top_size = chunksize (top_chunk);
721 if ((unsigned long)(top_size) <
722 (unsigned long)(mp_.trim_threshold))
723 return 0;
725 top_area = top_size - MINSIZE - 1;
726 if (top_area < 0 || (size_t) top_area <= pad)
727 return 0;
729 /* Release in pagesize units and round down to the nearest page. */
730 extra = ALIGN_DOWN(top_area - pad, pagesz);
731 if (extra == 0)
732 return 0;
734 /* Try to shrink. */
735 if (shrink_heap (heap, extra) != 0)
736 return 0;
738 ar_ptr->system_mem -= extra;
740 /* Success. Adjust top accordingly. */
741 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
742 /*check_chunk(ar_ptr, top_chunk);*/
743 return 1;
746 /* Create a new arena with initial size "size". */
748 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
749 called while free_list_lock is held. */
750 static void
751 detach_arena (mstate replaced_arena)
753 if (replaced_arena != NULL)
755 assert (replaced_arena->attached_threads > 0);
756 /* The current implementation only detaches from main_arena in
757 case of allocation failure. This means that it is likely not
758 beneficial to put the arena on free_list even if the
759 reference count reaches zero. */
760 --replaced_arena->attached_threads;
764 static mstate
765 _int_new_arena (size_t size)
767 mstate a;
768 heap_info *h;
769 char *ptr;
770 unsigned long misalign;
772 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
773 mp_.top_pad);
774 if (!h)
776 /* Maybe size is too large to fit in a single heap. So, just try
777 to create a minimally-sized arena and let _int_malloc() attempt
778 to deal with the large request via mmap_chunk(). */
779 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
780 if (!h)
781 return 0;
783 a = h->ar_ptr = (mstate) (h + 1);
784 malloc_init_state (a);
785 a->attached_threads = 1;
786 /*a->next = NULL;*/
787 a->system_mem = a->max_system_mem = h->size;
789 /* Set up the top chunk, with proper alignment. */
790 ptr = (char *) (a + 1);
791 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
792 if (misalign > 0)
793 ptr += MALLOC_ALIGNMENT - misalign;
794 top (a) = (mchunkptr) ptr;
795 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
797 LIBC_PROBE (memory_arena_new, 2, a, size);
798 mstate replaced_arena = thread_arena;
799 thread_arena = a;
800 mutex_init (&a->mutex);
802 (void) mutex_lock (&list_lock);
804 /* Add the new arena to the global list. */
805 a->next = main_arena.next;
806 /* FIXME: The barrier is an attempt to synchronize with read access
807 in reused_arena, which does not acquire list_lock while
808 traversing the list. */
809 atomic_write_barrier ();
810 main_arena.next = a;
812 (void) mutex_unlock (&list_lock);
814 (void) mutex_lock (&free_list_lock);
815 detach_arena (replaced_arena);
816 (void) mutex_unlock (&free_list_lock);
818 /* Lock this arena. NB: Another thread may have been attached to
819 this arena because the arena is now accessible from the
820 main_arena.next list and could have been picked by reused_arena.
821 This can only happen for the last arena created (before the arena
822 limit is reached). At this point, some arena has to be attached
823 to two threads. We could acquire the arena lock before list_lock
824 to make it less likely that reused_arena picks this new arena,
825 but this could result in a deadlock with ptmalloc_lock_all. */
827 (void) mutex_lock (&a->mutex);
829 return a;
833 /* Remove an arena from free_list. The arena may be in use because it
834 was attached concurrently to a thread by reused_arena below. */
835 static mstate
836 get_free_list (void)
838 mstate replaced_arena = thread_arena;
839 mstate result = free_list;
840 if (result != NULL)
842 (void) mutex_lock (&free_list_lock);
843 result = free_list;
844 if (result != NULL)
846 free_list = result->next_free;
848 /* The arena will be attached to this thread. */
849 ++result->attached_threads;
851 detach_arena (replaced_arena);
853 (void) mutex_unlock (&free_list_lock);
855 if (result != NULL)
857 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
858 (void) mutex_lock (&result->mutex);
859 thread_arena = result;
863 return result;
866 /* Lock and return an arena that can be reused for memory allocation.
867 Avoid AVOID_ARENA as we have already failed to allocate memory in
868 it and it is currently locked. */
869 static mstate
870 reused_arena (mstate avoid_arena)
872 mstate result;
873 /* FIXME: Access to next_to_use suffers from data races. */
874 static mstate next_to_use;
875 if (next_to_use == NULL)
876 next_to_use = &main_arena;
878 /* Iterate over all arenas (including those linked from
879 free_list). */
880 result = next_to_use;
883 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
884 goto out;
886 /* FIXME: This is a data race, see _int_new_arena. */
887 result = result->next;
889 while (result != next_to_use);
891 /* Avoid AVOID_ARENA as we have already failed to allocate memory
892 in that arena and it is currently locked. */
893 if (result == avoid_arena)
894 result = result->next;
896 /* Make sure that the arena we get is not corrupted. */
897 mstate begin = result;
898 while (arena_is_corrupt (result) || result == avoid_arena)
900 result = result->next;
901 if (result == begin)
902 break;
905 /* We could not find any arena that was either not corrupted or not the one
906 we wanted to avoid. */
907 if (result == begin || result == avoid_arena)
908 return NULL;
910 /* No arena available without contention. Wait for the next in line. */
911 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
912 (void) mutex_lock (&result->mutex);
914 out:
915 /* Attach the arena to the current thread. Note that we may have
916 selected an arena which was on free_list. */
918 /* Update the arena thread attachment counters. */
919 mstate replaced_arena = thread_arena;
920 (void) mutex_lock (&free_list_lock);
921 detach_arena (replaced_arena);
922 ++result->attached_threads;
923 (void) mutex_unlock (&free_list_lock);
926 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
927 thread_arena = result;
928 next_to_use = result->next;
930 return result;
933 static mstate
934 internal_function
935 arena_get2 (size_t size, mstate avoid_arena)
937 mstate a;
939 static size_t narenas_limit;
941 a = get_free_list ();
942 if (a == NULL)
944 /* Nothing immediately available, so generate a new arena. */
945 if (narenas_limit == 0)
947 if (mp_.arena_max != 0)
948 narenas_limit = mp_.arena_max;
949 else if (narenas > mp_.arena_test)
951 int n = __get_nprocs ();
953 if (n >= 1)
954 narenas_limit = NARENAS_FROM_NCORES (n);
955 else
956 /* We have no information about the system. Assume two
957 cores. */
958 narenas_limit = NARENAS_FROM_NCORES (2);
961 repeat:;
962 size_t n = narenas;
963 /* NB: the following depends on the fact that (size_t)0 - 1 is a
964 very large number and that the underflow is OK. If arena_max
965 is set the value of arena_test is irrelevant. If arena_test
966 is set but narenas is not yet larger or equal to arena_test
967 narenas_limit is 0. There is no possibility for narenas to
968 be too big for the test to always fail since there is not
969 enough address space to create that many arenas. */
970 if (__glibc_unlikely (n <= narenas_limit - 1))
972 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
973 goto repeat;
974 a = _int_new_arena (size);
975 if (__glibc_unlikely (a == NULL))
976 catomic_decrement (&narenas);
978 else
979 a = reused_arena (avoid_arena);
981 return a;
984 /* If we don't have the main arena, then maybe the failure is due to running
985 out of mmapped areas, so we can try allocating on the main arena.
986 Otherwise, it is likely that sbrk() has failed and there is still a chance
987 to mmap(), so try one of the other arenas. */
988 static mstate
989 arena_get_retry (mstate ar_ptr, size_t bytes)
991 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
992 if (ar_ptr != &main_arena)
994 (void) mutex_unlock (&ar_ptr->mutex);
995 /* Don't touch the main arena if it is corrupt. */
996 if (arena_is_corrupt (&main_arena))
997 return NULL;
999 ar_ptr = &main_arena;
1000 (void) mutex_lock (&ar_ptr->mutex);
1002 else
1004 (void) mutex_unlock (&ar_ptr->mutex);
1005 ar_ptr = arena_get2 (bytes, ar_ptr);
1008 return ar_ptr;
1011 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
1012 arena_thread_freeres (void)
1014 mstate a = thread_arena;
1015 thread_arena = NULL;
1017 if (a != NULL)
1019 (void) mutex_lock (&free_list_lock);
1020 /* If this was the last attached thread for this arena, put the
1021 arena on the free list. */
1022 assert (a->attached_threads > 0);
1023 if (--a->attached_threads == 0)
1025 a->next_free = free_list;
1026 free_list = a;
1028 (void) mutex_unlock (&free_list_lock);
1031 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
1034 * Local variables:
1035 * c-basic-offset: 2
1036 * End: