malloc: Remove malloc hooks from fork handler
[glibc.git] / malloc / arena.c
blob8bf81712bbb5d34f1d572fe530c6bd6cc5139a0d
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32 * 1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
40 /***************************************************************************/
42 #define top(ar_ptr) ((ar_ptr)->top)
44 /* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
46 address aligned to HEAP_MAX_SIZE. */
48 typedef struct _heap_info
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
59 } heap_info;
61 /* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
67 /* Thread specific data. */
69 static __thread mstate thread_arena attribute_tls_model_ie;
71 /* Arena free list. free_list_lock synchronizes access to the
72 free_list variable below, and the next_free and attached_threads
73 members of struct malloc_state objects. No other locks must be
74 acquired after free_list_lock has been acquired. */
76 static mutex_t free_list_lock = _LIBC_LOCK_INITIALIZER;
77 static size_t narenas = 1;
78 static mstate free_list;
80 /* list_lock prevents concurrent writes to the next member of struct
81 malloc_state objects.
83 Read access to the next member is supposed to synchronize with the
84 atomic_write_barrier and the write to the next member in
85 _int_new_arena. This suffers from data races; see the FIXME
86 comments in _int_new_arena and reused_arena.
88 list_lock also prevents concurrent forks. At the time list_lock is
89 acquired, no arena lock must have been acquired, but it is
90 permitted to acquire arena locks subsequently, while list_lock is
91 acquired. */
92 static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
94 /* Already initialized? */
95 int __malloc_initialized = -1;
97 /**************************************************************************/
100 /* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
106 in the new arena. */
108 #define arena_get(ptr, size) do { \
109 ptr = thread_arena; \
110 arena_lock (ptr, size); \
111 } while (0)
113 #define arena_lock(ptr, size) do { \
114 if (ptr && !arena_is_corrupt (ptr)) \
115 (void) mutex_lock (&ptr->mutex); \
116 else \
117 ptr = arena_get2 ((size), NULL); \
118 } while (0)
120 /* find the heap and corresponding arena for a given ptr */
122 #define heap_for_ptr(ptr) \
123 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
124 #define arena_for_chunk(ptr) \
125 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
128 /**************************************************************************/
130 /* atfork support. */
132 /* The following three functions are called around fork from a
133 multi-threaded process. We do not use the general fork handler
134 mechanism to make sure that our handlers are the last ones being
135 called, so that other fork handlers can use the malloc
136 subsystem. */
138 void
139 __malloc_fork_lock_parent (void)
141 if (__malloc_initialized < 1)
142 return;
144 /* We do not acquire free_list_lock here because we completely
145 reconstruct free_list in __malloc_fork_unlock_child. */
147 (void) mutex_lock (&list_lock);
149 for (mstate ar_ptr = &main_arena;; )
151 (void) mutex_lock (&ar_ptr->mutex);
152 ar_ptr = ar_ptr->next;
153 if (ar_ptr == &main_arena)
154 break;
158 void
159 __malloc_fork_unlock_parent (void)
161 if (__malloc_initialized < 1)
162 return;
164 for (mstate ar_ptr = &main_arena;; )
166 (void) mutex_unlock (&ar_ptr->mutex);
167 ar_ptr = ar_ptr->next;
168 if (ar_ptr == &main_arena)
169 break;
171 (void) mutex_unlock (&list_lock);
174 void
175 __malloc_fork_unlock_child (void)
177 if (__malloc_initialized < 1)
178 return;
180 /* Push all arenas to the free list, except thread_arena, which is
181 attached to the current thread. */
182 mutex_init (&free_list_lock);
183 if (thread_arena != NULL)
184 thread_arena->attached_threads = 1;
185 free_list = NULL;
186 for (mstate ar_ptr = &main_arena;; )
188 mutex_init (&ar_ptr->mutex);
189 if (ar_ptr != thread_arena)
191 /* This arena is no longer attached to any thread. */
192 ar_ptr->attached_threads = 0;
193 ar_ptr->next_free = free_list;
194 free_list = ar_ptr;
196 ar_ptr = ar_ptr->next;
197 if (ar_ptr == &main_arena)
198 break;
201 mutex_init (&list_lock);
204 /* Initialization routine. */
205 #include <string.h>
206 extern char **_environ;
208 static char *
209 internal_function
210 next_env_entry (char ***position)
212 char **current = *position;
213 char *result = NULL;
215 while (*current != NULL)
217 if (__builtin_expect ((*current)[0] == 'M', 0)
218 && (*current)[1] == 'A'
219 && (*current)[2] == 'L'
220 && (*current)[3] == 'L'
221 && (*current)[4] == 'O'
222 && (*current)[5] == 'C'
223 && (*current)[6] == '_')
225 result = &(*current)[7];
227 /* Save current position for next visit. */
228 *position = ++current;
230 break;
233 ++current;
236 return result;
240 #ifdef SHARED
241 static void *
242 __failing_morecore (ptrdiff_t d)
244 return (void *) MORECORE_FAILURE;
247 extern struct dl_open_hook *_dl_open_hook;
248 libc_hidden_proto (_dl_open_hook);
249 #endif
251 static void
252 ptmalloc_init (void)
254 if (__malloc_initialized >= 0)
255 return;
257 __malloc_initialized = 0;
259 #ifdef SHARED
260 /* In case this libc copy is in a non-default namespace, never use brk.
261 Likewise if dlopened from statically linked program. */
262 Dl_info di;
263 struct link_map *l;
265 if (_dl_open_hook != NULL
266 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
267 && l->l_ns != LM_ID_BASE))
268 __morecore = __failing_morecore;
269 #endif
271 thread_arena = &main_arena;
272 const char *s = NULL;
273 if (__glibc_likely (_environ != NULL))
275 char **runp = _environ;
276 char *envline;
278 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
281 size_t len = strcspn (envline, "=");
283 if (envline[len] != '=')
284 /* This is a "MALLOC_" variable at the end of the string
285 without a '=' character. Ignore it since otherwise we
286 will access invalid memory below. */
287 continue;
289 switch (len)
291 case 6:
292 if (memcmp (envline, "CHECK_", 6) == 0)
293 s = &envline[7];
294 break;
295 case 8:
296 if (!__builtin_expect (__libc_enable_secure, 0))
298 if (memcmp (envline, "TOP_PAD_", 8) == 0)
299 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
300 else if (memcmp (envline, "PERTURB_", 8) == 0)
301 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
303 break;
304 case 9:
305 if (!__builtin_expect (__libc_enable_secure, 0))
307 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
308 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
309 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
310 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
312 break;
313 case 10:
314 if (!__builtin_expect (__libc_enable_secure, 0))
316 if (memcmp (envline, "ARENA_TEST", 10) == 0)
317 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
319 break;
320 case 15:
321 if (!__builtin_expect (__libc_enable_secure, 0))
323 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
324 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
325 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
326 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
328 break;
329 default:
330 break;
334 if (s && s[0])
336 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
337 if (check_action != 0)
338 __malloc_check_init ();
340 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
341 if (hook != NULL)
342 (*hook)();
343 __malloc_initialized = 1;
346 /* Managing heaps and arenas (for concurrent threads) */
348 #if MALLOC_DEBUG > 1
350 /* Print the complete contents of a single heap to stderr. */
352 static void
353 dump_heap (heap_info *heap)
355 char *ptr;
356 mchunkptr p;
358 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
359 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
360 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
361 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
362 ~MALLOC_ALIGN_MASK);
363 for (;; )
365 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
366 if (p == top (heap->ar_ptr))
368 fprintf (stderr, " (top)\n");
369 break;
371 else if (p->size == (0 | PREV_INUSE))
373 fprintf (stderr, " (fence)\n");
374 break;
376 fprintf (stderr, "\n");
377 p = next_chunk (p);
380 #endif /* MALLOC_DEBUG > 1 */
382 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
383 addresses as opposed to increasing, new_heap would badly fragment the
384 address space. In that case remember the second HEAP_MAX_SIZE part
385 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
386 call (if it is already aligned) and try to reuse it next time. We need
387 no locking for it, as kernel ensures the atomicity for us - worst case
388 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
389 multiple threads, but only one will succeed. */
390 static char *aligned_heap_area;
392 /* Create a new heap. size is automatically rounded up to a multiple
393 of the page size. */
395 static heap_info *
396 internal_function
397 new_heap (size_t size, size_t top_pad)
399 size_t pagesize = GLRO (dl_pagesize);
400 char *p1, *p2;
401 unsigned long ul;
402 heap_info *h;
404 if (size + top_pad < HEAP_MIN_SIZE)
405 size = HEAP_MIN_SIZE;
406 else if (size + top_pad <= HEAP_MAX_SIZE)
407 size += top_pad;
408 else if (size > HEAP_MAX_SIZE)
409 return 0;
410 else
411 size = HEAP_MAX_SIZE;
412 size = ALIGN_UP (size, pagesize);
414 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
415 No swap space needs to be reserved for the following large
416 mapping (on Linux, this is the case for all non-writable mappings
417 anyway). */
418 p2 = MAP_FAILED;
419 if (aligned_heap_area)
421 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
422 MAP_NORESERVE);
423 aligned_heap_area = NULL;
424 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
426 __munmap (p2, HEAP_MAX_SIZE);
427 p2 = MAP_FAILED;
430 if (p2 == MAP_FAILED)
432 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
433 if (p1 != MAP_FAILED)
435 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
436 & ~(HEAP_MAX_SIZE - 1));
437 ul = p2 - p1;
438 if (ul)
439 __munmap (p1, ul);
440 else
441 aligned_heap_area = p2 + HEAP_MAX_SIZE;
442 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
444 else
446 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
447 is already aligned. */
448 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
449 if (p2 == MAP_FAILED)
450 return 0;
452 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
454 __munmap (p2, HEAP_MAX_SIZE);
455 return 0;
459 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
461 __munmap (p2, HEAP_MAX_SIZE);
462 return 0;
464 h = (heap_info *) p2;
465 h->size = size;
466 h->mprotect_size = size;
467 LIBC_PROBE (memory_heap_new, 2, h, h->size);
468 return h;
471 /* Grow a heap. size is automatically rounded up to a
472 multiple of the page size. */
474 static int
475 grow_heap (heap_info *h, long diff)
477 size_t pagesize = GLRO (dl_pagesize);
478 long new_size;
480 diff = ALIGN_UP (diff, pagesize);
481 new_size = (long) h->size + diff;
482 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
483 return -1;
485 if ((unsigned long) new_size > h->mprotect_size)
487 if (__mprotect ((char *) h + h->mprotect_size,
488 (unsigned long) new_size - h->mprotect_size,
489 PROT_READ | PROT_WRITE) != 0)
490 return -2;
492 h->mprotect_size = new_size;
495 h->size = new_size;
496 LIBC_PROBE (memory_heap_more, 2, h, h->size);
497 return 0;
500 /* Shrink a heap. */
502 static int
503 shrink_heap (heap_info *h, long diff)
505 long new_size;
507 new_size = (long) h->size - diff;
508 if (new_size < (long) sizeof (*h))
509 return -1;
511 /* Try to re-map the extra heap space freshly to save memory, and make it
512 inaccessible. See malloc-sysdep.h to know when this is true. */
513 if (__glibc_unlikely (check_may_shrink_heap ()))
515 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
516 MAP_FIXED) == (char *) MAP_FAILED)
517 return -2;
519 h->mprotect_size = new_size;
521 else
522 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
523 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
525 h->size = new_size;
526 LIBC_PROBE (memory_heap_less, 2, h, h->size);
527 return 0;
530 /* Delete a heap. */
532 #define delete_heap(heap) \
533 do { \
534 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
535 aligned_heap_area = NULL; \
536 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
537 } while (0)
539 static int
540 internal_function
541 heap_trim (heap_info *heap, size_t pad)
543 mstate ar_ptr = heap->ar_ptr;
544 unsigned long pagesz = GLRO (dl_pagesize);
545 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
546 heap_info *prev_heap;
547 long new_size, top_size, top_area, extra, prev_size, misalign;
549 /* Can this heap go away completely? */
550 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
552 prev_heap = heap->prev;
553 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
554 p = chunk_at_offset (prev_heap, prev_size);
555 /* fencepost must be properly aligned. */
556 misalign = ((long) p) & MALLOC_ALIGN_MASK;
557 p = chunk_at_offset (prev_heap, prev_size - misalign);
558 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
559 p = prev_chunk (p);
560 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
561 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
562 if (!prev_inuse (p))
563 new_size += p->prev_size;
564 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
565 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
566 break;
567 ar_ptr->system_mem -= heap->size;
568 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
569 delete_heap (heap);
570 heap = prev_heap;
571 if (!prev_inuse (p)) /* consolidate backward */
573 p = prev_chunk (p);
574 unlink (ar_ptr, p, bck, fwd);
576 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
577 assert (((char *) p + new_size) == ((char *) heap + heap->size));
578 top (ar_ptr) = top_chunk = p;
579 set_head (top_chunk, new_size | PREV_INUSE);
580 /*check_chunk(ar_ptr, top_chunk);*/
583 /* Uses similar logic for per-thread arenas as the main arena with systrim
584 and _int_free by preserving the top pad and rounding down to the nearest
585 page. */
586 top_size = chunksize (top_chunk);
587 if ((unsigned long)(top_size) <
588 (unsigned long)(mp_.trim_threshold))
589 return 0;
591 top_area = top_size - MINSIZE - 1;
592 if (top_area < 0 || (size_t) top_area <= pad)
593 return 0;
595 /* Release in pagesize units and round down to the nearest page. */
596 extra = ALIGN_DOWN(top_area - pad, pagesz);
597 if (extra == 0)
598 return 0;
600 /* Try to shrink. */
601 if (shrink_heap (heap, extra) != 0)
602 return 0;
604 ar_ptr->system_mem -= extra;
606 /* Success. Adjust top accordingly. */
607 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
608 /*check_chunk(ar_ptr, top_chunk);*/
609 return 1;
612 /* Create a new arena with initial size "size". */
614 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
615 called while free_list_lock is held. */
616 static void
617 detach_arena (mstate replaced_arena)
619 if (replaced_arena != NULL)
621 assert (replaced_arena->attached_threads > 0);
622 /* The current implementation only detaches from main_arena in
623 case of allocation failure. This means that it is likely not
624 beneficial to put the arena on free_list even if the
625 reference count reaches zero. */
626 --replaced_arena->attached_threads;
630 static mstate
631 _int_new_arena (size_t size)
633 mstate a;
634 heap_info *h;
635 char *ptr;
636 unsigned long misalign;
638 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
639 mp_.top_pad);
640 if (!h)
642 /* Maybe size is too large to fit in a single heap. So, just try
643 to create a minimally-sized arena and let _int_malloc() attempt
644 to deal with the large request via mmap_chunk(). */
645 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
646 if (!h)
647 return 0;
649 a = h->ar_ptr = (mstate) (h + 1);
650 malloc_init_state (a);
651 a->attached_threads = 1;
652 /*a->next = NULL;*/
653 a->system_mem = a->max_system_mem = h->size;
655 /* Set up the top chunk, with proper alignment. */
656 ptr = (char *) (a + 1);
657 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
658 if (misalign > 0)
659 ptr += MALLOC_ALIGNMENT - misalign;
660 top (a) = (mchunkptr) ptr;
661 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
663 LIBC_PROBE (memory_arena_new, 2, a, size);
664 mstate replaced_arena = thread_arena;
665 thread_arena = a;
666 mutex_init (&a->mutex);
668 (void) mutex_lock (&list_lock);
670 /* Add the new arena to the global list. */
671 a->next = main_arena.next;
672 /* FIXME: The barrier is an attempt to synchronize with read access
673 in reused_arena, which does not acquire list_lock while
674 traversing the list. */
675 atomic_write_barrier ();
676 main_arena.next = a;
678 (void) mutex_unlock (&list_lock);
680 (void) mutex_lock (&free_list_lock);
681 detach_arena (replaced_arena);
682 (void) mutex_unlock (&free_list_lock);
684 /* Lock this arena. NB: Another thread may have been attached to
685 this arena because the arena is now accessible from the
686 main_arena.next list and could have been picked by reused_arena.
687 This can only happen for the last arena created (before the arena
688 limit is reached). At this point, some arena has to be attached
689 to two threads. We could acquire the arena lock before list_lock
690 to make it less likely that reused_arena picks this new arena,
691 but this could result in a deadlock with
692 __malloc_fork_lock_parent. */
694 (void) mutex_lock (&a->mutex);
696 return a;
700 /* Remove an arena from free_list. The arena may be in use because it
701 was attached concurrently to a thread by reused_arena below. */
702 static mstate
703 get_free_list (void)
705 mstate replaced_arena = thread_arena;
706 mstate result = free_list;
707 if (result != NULL)
709 (void) mutex_lock (&free_list_lock);
710 result = free_list;
711 if (result != NULL)
713 free_list = result->next_free;
715 /* The arena will be attached to this thread. */
716 ++result->attached_threads;
718 detach_arena (replaced_arena);
720 (void) mutex_unlock (&free_list_lock);
722 if (result != NULL)
724 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
725 (void) mutex_lock (&result->mutex);
726 thread_arena = result;
730 return result;
733 /* Lock and return an arena that can be reused for memory allocation.
734 Avoid AVOID_ARENA as we have already failed to allocate memory in
735 it and it is currently locked. */
736 static mstate
737 reused_arena (mstate avoid_arena)
739 mstate result;
740 /* FIXME: Access to next_to_use suffers from data races. */
741 static mstate next_to_use;
742 if (next_to_use == NULL)
743 next_to_use = &main_arena;
745 /* Iterate over all arenas (including those linked from
746 free_list). */
747 result = next_to_use;
750 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
751 goto out;
753 /* FIXME: This is a data race, see _int_new_arena. */
754 result = result->next;
756 while (result != next_to_use);
758 /* Avoid AVOID_ARENA as we have already failed to allocate memory
759 in that arena and it is currently locked. */
760 if (result == avoid_arena)
761 result = result->next;
763 /* Make sure that the arena we get is not corrupted. */
764 mstate begin = result;
765 while (arena_is_corrupt (result) || result == avoid_arena)
767 result = result->next;
768 if (result == begin)
769 break;
772 /* We could not find any arena that was either not corrupted or not the one
773 we wanted to avoid. */
774 if (result == begin || result == avoid_arena)
775 return NULL;
777 /* No arena available without contention. Wait for the next in line. */
778 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
779 (void) mutex_lock (&result->mutex);
781 out:
782 /* Attach the arena to the current thread. Note that we may have
783 selected an arena which was on free_list. */
785 /* Update the arena thread attachment counters. */
786 mstate replaced_arena = thread_arena;
787 (void) mutex_lock (&free_list_lock);
788 detach_arena (replaced_arena);
789 ++result->attached_threads;
790 (void) mutex_unlock (&free_list_lock);
793 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
794 thread_arena = result;
795 next_to_use = result->next;
797 return result;
800 static mstate
801 internal_function
802 arena_get2 (size_t size, mstate avoid_arena)
804 mstate a;
806 static size_t narenas_limit;
808 a = get_free_list ();
809 if (a == NULL)
811 /* Nothing immediately available, so generate a new arena. */
812 if (narenas_limit == 0)
814 if (mp_.arena_max != 0)
815 narenas_limit = mp_.arena_max;
816 else if (narenas > mp_.arena_test)
818 int n = __get_nprocs ();
820 if (n >= 1)
821 narenas_limit = NARENAS_FROM_NCORES (n);
822 else
823 /* We have no information about the system. Assume two
824 cores. */
825 narenas_limit = NARENAS_FROM_NCORES (2);
828 repeat:;
829 size_t n = narenas;
830 /* NB: the following depends on the fact that (size_t)0 - 1 is a
831 very large number and that the underflow is OK. If arena_max
832 is set the value of arena_test is irrelevant. If arena_test
833 is set but narenas is not yet larger or equal to arena_test
834 narenas_limit is 0. There is no possibility for narenas to
835 be too big for the test to always fail since there is not
836 enough address space to create that many arenas. */
837 if (__glibc_unlikely (n <= narenas_limit - 1))
839 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
840 goto repeat;
841 a = _int_new_arena (size);
842 if (__glibc_unlikely (a == NULL))
843 catomic_decrement (&narenas);
845 else
846 a = reused_arena (avoid_arena);
848 return a;
851 /* If we don't have the main arena, then maybe the failure is due to running
852 out of mmapped areas, so we can try allocating on the main arena.
853 Otherwise, it is likely that sbrk() has failed and there is still a chance
854 to mmap(), so try one of the other arenas. */
855 static mstate
856 arena_get_retry (mstate ar_ptr, size_t bytes)
858 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
859 if (ar_ptr != &main_arena)
861 (void) mutex_unlock (&ar_ptr->mutex);
862 /* Don't touch the main arena if it is corrupt. */
863 if (arena_is_corrupt (&main_arena))
864 return NULL;
866 ar_ptr = &main_arena;
867 (void) mutex_lock (&ar_ptr->mutex);
869 else
871 (void) mutex_unlock (&ar_ptr->mutex);
872 ar_ptr = arena_get2 (bytes, ar_ptr);
875 return ar_ptr;
878 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
879 arena_thread_freeres (void)
881 mstate a = thread_arena;
882 thread_arena = NULL;
884 if (a != NULL)
886 (void) mutex_lock (&free_list_lock);
887 /* If this was the last attached thread for this arena, put the
888 arena on the free list. */
889 assert (a->attached_threads > 0);
890 if (--a->attached_threads == 0)
892 a->next_free = free_list;
893 free_list = a;
895 (void) mutex_unlock (&free_list_lock);
898 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
901 * Local variables:
902 * c-basic-offset: 2
903 * End: