Avoid array-bounds warning for strncat on i586 (bug 20260)
[glibc.git] / malloc / arena.c
blob229783f3b7e445199803d428798f94906fd5c8ba
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32 * 1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
40 /***************************************************************************/
42 #define top(ar_ptr) ((ar_ptr)->top)
44 /* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
46 address aligned to HEAP_MAX_SIZE. */
48 typedef struct _heap_info
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
59 } heap_info;
61 /* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
67 /* Thread specific data. */
69 static __thread mstate thread_arena attribute_tls_model_ie;
71 /* Arena free list. free_list_lock synchronizes access to the
72 free_list variable below, and the next_free and attached_threads
73 members of struct malloc_state objects. No other locks must be
74 acquired after free_list_lock has been acquired. */
76 static mutex_t free_list_lock = _LIBC_LOCK_INITIALIZER;
77 static size_t narenas = 1;
78 static mstate free_list;
80 /* list_lock prevents concurrent writes to the next member of struct
81 malloc_state objects.
83 Read access to the next member is supposed to synchronize with the
84 atomic_write_barrier and the write to the next member in
85 _int_new_arena. This suffers from data races; see the FIXME
86 comments in _int_new_arena and reused_arena.
88 list_lock also prevents concurrent forks. At the time list_lock is
89 acquired, no arena lock must have been acquired, but it is
90 permitted to acquire arena locks subsequently, while list_lock is
91 acquired. */
92 static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
94 /* Already initialized? */
95 int __malloc_initialized = -1;
97 /**************************************************************************/
100 /* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
106 in the new arena. */
108 #define arena_get(ptr, size) do { \
109 ptr = thread_arena; \
110 arena_lock (ptr, size); \
111 } while (0)
113 #define arena_lock(ptr, size) do { \
114 if (ptr && !arena_is_corrupt (ptr)) \
115 (void) mutex_lock (&ptr->mutex); \
116 else \
117 ptr = arena_get2 ((size), NULL); \
118 } while (0)
120 /* find the heap and corresponding arena for a given ptr */
122 #define heap_for_ptr(ptr) \
123 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
124 #define arena_for_chunk(ptr) \
125 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
128 /**************************************************************************/
130 /* atfork support. */
132 /* The following three functions are called around fork from a
133 multi-threaded process. We do not use the general fork handler
134 mechanism to make sure that our handlers are the last ones being
135 called, so that other fork handlers can use the malloc
136 subsystem. */
138 void
139 internal_function
140 __malloc_fork_lock_parent (void)
142 if (__malloc_initialized < 1)
143 return;
145 /* We do not acquire free_list_lock here because we completely
146 reconstruct free_list in __malloc_fork_unlock_child. */
148 (void) mutex_lock (&list_lock);
150 for (mstate ar_ptr = &main_arena;; )
152 (void) mutex_lock (&ar_ptr->mutex);
153 ar_ptr = ar_ptr->next;
154 if (ar_ptr == &main_arena)
155 break;
159 void
160 internal_function
161 __malloc_fork_unlock_parent (void)
163 if (__malloc_initialized < 1)
164 return;
166 for (mstate ar_ptr = &main_arena;; )
168 (void) mutex_unlock (&ar_ptr->mutex);
169 ar_ptr = ar_ptr->next;
170 if (ar_ptr == &main_arena)
171 break;
173 (void) mutex_unlock (&list_lock);
176 void
177 internal_function
178 __malloc_fork_unlock_child (void)
180 if (__malloc_initialized < 1)
181 return;
183 /* Push all arenas to the free list, except thread_arena, which is
184 attached to the current thread. */
185 mutex_init (&free_list_lock);
186 if (thread_arena != NULL)
187 thread_arena->attached_threads = 1;
188 free_list = NULL;
189 for (mstate ar_ptr = &main_arena;; )
191 mutex_init (&ar_ptr->mutex);
192 if (ar_ptr != thread_arena)
194 /* This arena is no longer attached to any thread. */
195 ar_ptr->attached_threads = 0;
196 ar_ptr->next_free = free_list;
197 free_list = ar_ptr;
199 ar_ptr = ar_ptr->next;
200 if (ar_ptr == &main_arena)
201 break;
204 mutex_init (&list_lock);
207 /* Initialization routine. */
208 #include <string.h>
209 extern char **_environ;
211 static char *
212 internal_function
213 next_env_entry (char ***position)
215 char **current = *position;
216 char *result = NULL;
218 while (*current != NULL)
220 if (__builtin_expect ((*current)[0] == 'M', 0)
221 && (*current)[1] == 'A'
222 && (*current)[2] == 'L'
223 && (*current)[3] == 'L'
224 && (*current)[4] == 'O'
225 && (*current)[5] == 'C'
226 && (*current)[6] == '_')
228 result = &(*current)[7];
230 /* Save current position for next visit. */
231 *position = ++current;
233 break;
236 ++current;
239 return result;
243 #ifdef SHARED
244 static void *
245 __failing_morecore (ptrdiff_t d)
247 return (void *) MORECORE_FAILURE;
250 extern struct dl_open_hook *_dl_open_hook;
251 libc_hidden_proto (_dl_open_hook);
252 #endif
254 static void
255 ptmalloc_init (void)
257 if (__malloc_initialized >= 0)
258 return;
260 __malloc_initialized = 0;
262 #ifdef SHARED
263 /* In case this libc copy is in a non-default namespace, never use brk.
264 Likewise if dlopened from statically linked program. */
265 Dl_info di;
266 struct link_map *l;
268 if (_dl_open_hook != NULL
269 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
270 && l->l_ns != LM_ID_BASE))
271 __morecore = __failing_morecore;
272 #endif
274 thread_arena = &main_arena;
275 const char *s = NULL;
276 if (__glibc_likely (_environ != NULL))
278 char **runp = _environ;
279 char *envline;
281 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
284 size_t len = strcspn (envline, "=");
286 if (envline[len] != '=')
287 /* This is a "MALLOC_" variable at the end of the string
288 without a '=' character. Ignore it since otherwise we
289 will access invalid memory below. */
290 continue;
292 switch (len)
294 case 6:
295 if (memcmp (envline, "CHECK_", 6) == 0)
296 s = &envline[7];
297 break;
298 case 8:
299 if (!__builtin_expect (__libc_enable_secure, 0))
301 if (memcmp (envline, "TOP_PAD_", 8) == 0)
302 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
303 else if (memcmp (envline, "PERTURB_", 8) == 0)
304 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
306 break;
307 case 9:
308 if (!__builtin_expect (__libc_enable_secure, 0))
310 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
311 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
312 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
313 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
315 break;
316 case 10:
317 if (!__builtin_expect (__libc_enable_secure, 0))
319 if (memcmp (envline, "ARENA_TEST", 10) == 0)
320 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
322 break;
323 case 15:
324 if (!__builtin_expect (__libc_enable_secure, 0))
326 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
327 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
328 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
329 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
331 break;
332 default:
333 break;
337 if (s && s[0])
339 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
340 if (check_action != 0)
341 __malloc_check_init ();
343 #if HAVE_MALLOC_INIT_HOOK
344 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
345 if (hook != NULL)
346 (*hook)();
347 #endif
348 __malloc_initialized = 1;
351 /* Managing heaps and arenas (for concurrent threads) */
353 #if MALLOC_DEBUG > 1
355 /* Print the complete contents of a single heap to stderr. */
357 static void
358 dump_heap (heap_info *heap)
360 char *ptr;
361 mchunkptr p;
363 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
364 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
365 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
366 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
367 ~MALLOC_ALIGN_MASK);
368 for (;; )
370 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
371 if (p == top (heap->ar_ptr))
373 fprintf (stderr, " (top)\n");
374 break;
376 else if (p->size == (0 | PREV_INUSE))
378 fprintf (stderr, " (fence)\n");
379 break;
381 fprintf (stderr, "\n");
382 p = next_chunk (p);
385 #endif /* MALLOC_DEBUG > 1 */
387 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
388 addresses as opposed to increasing, new_heap would badly fragment the
389 address space. In that case remember the second HEAP_MAX_SIZE part
390 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
391 call (if it is already aligned) and try to reuse it next time. We need
392 no locking for it, as kernel ensures the atomicity for us - worst case
393 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
394 multiple threads, but only one will succeed. */
395 static char *aligned_heap_area;
397 /* Create a new heap. size is automatically rounded up to a multiple
398 of the page size. */
400 static heap_info *
401 internal_function
402 new_heap (size_t size, size_t top_pad)
404 size_t pagesize = GLRO (dl_pagesize);
405 char *p1, *p2;
406 unsigned long ul;
407 heap_info *h;
409 if (size + top_pad < HEAP_MIN_SIZE)
410 size = HEAP_MIN_SIZE;
411 else if (size + top_pad <= HEAP_MAX_SIZE)
412 size += top_pad;
413 else if (size > HEAP_MAX_SIZE)
414 return 0;
415 else
416 size = HEAP_MAX_SIZE;
417 size = ALIGN_UP (size, pagesize);
419 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
420 No swap space needs to be reserved for the following large
421 mapping (on Linux, this is the case for all non-writable mappings
422 anyway). */
423 p2 = MAP_FAILED;
424 if (aligned_heap_area)
426 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
427 MAP_NORESERVE);
428 aligned_heap_area = NULL;
429 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
431 __munmap (p2, HEAP_MAX_SIZE);
432 p2 = MAP_FAILED;
435 if (p2 == MAP_FAILED)
437 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
438 if (p1 != MAP_FAILED)
440 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
441 & ~(HEAP_MAX_SIZE - 1));
442 ul = p2 - p1;
443 if (ul)
444 __munmap (p1, ul);
445 else
446 aligned_heap_area = p2 + HEAP_MAX_SIZE;
447 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
449 else
451 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
452 is already aligned. */
453 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
454 if (p2 == MAP_FAILED)
455 return 0;
457 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
459 __munmap (p2, HEAP_MAX_SIZE);
460 return 0;
464 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
466 __munmap (p2, HEAP_MAX_SIZE);
467 return 0;
469 h = (heap_info *) p2;
470 h->size = size;
471 h->mprotect_size = size;
472 LIBC_PROBE (memory_heap_new, 2, h, h->size);
473 return h;
476 /* Grow a heap. size is automatically rounded up to a
477 multiple of the page size. */
479 static int
480 grow_heap (heap_info *h, long diff)
482 size_t pagesize = GLRO (dl_pagesize);
483 long new_size;
485 diff = ALIGN_UP (diff, pagesize);
486 new_size = (long) h->size + diff;
487 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
488 return -1;
490 if ((unsigned long) new_size > h->mprotect_size)
492 if (__mprotect ((char *) h + h->mprotect_size,
493 (unsigned long) new_size - h->mprotect_size,
494 PROT_READ | PROT_WRITE) != 0)
495 return -2;
497 h->mprotect_size = new_size;
500 h->size = new_size;
501 LIBC_PROBE (memory_heap_more, 2, h, h->size);
502 return 0;
505 /* Shrink a heap. */
507 static int
508 shrink_heap (heap_info *h, long diff)
510 long new_size;
512 new_size = (long) h->size - diff;
513 if (new_size < (long) sizeof (*h))
514 return -1;
516 /* Try to re-map the extra heap space freshly to save memory, and make it
517 inaccessible. See malloc-sysdep.h to know when this is true. */
518 if (__glibc_unlikely (check_may_shrink_heap ()))
520 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
521 MAP_FIXED) == (char *) MAP_FAILED)
522 return -2;
524 h->mprotect_size = new_size;
526 else
527 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
528 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
530 h->size = new_size;
531 LIBC_PROBE (memory_heap_less, 2, h, h->size);
532 return 0;
535 /* Delete a heap. */
537 #define delete_heap(heap) \
538 do { \
539 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
540 aligned_heap_area = NULL; \
541 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
542 } while (0)
544 static int
545 internal_function
546 heap_trim (heap_info *heap, size_t pad)
548 mstate ar_ptr = heap->ar_ptr;
549 unsigned long pagesz = GLRO (dl_pagesize);
550 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
551 heap_info *prev_heap;
552 long new_size, top_size, top_area, extra, prev_size, misalign;
554 /* Can this heap go away completely? */
555 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
557 prev_heap = heap->prev;
558 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
559 p = chunk_at_offset (prev_heap, prev_size);
560 /* fencepost must be properly aligned. */
561 misalign = ((long) p) & MALLOC_ALIGN_MASK;
562 p = chunk_at_offset (prev_heap, prev_size - misalign);
563 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
564 p = prev_chunk (p);
565 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
566 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
567 if (!prev_inuse (p))
568 new_size += p->prev_size;
569 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
570 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
571 break;
572 ar_ptr->system_mem -= heap->size;
573 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
574 delete_heap (heap);
575 heap = prev_heap;
576 if (!prev_inuse (p)) /* consolidate backward */
578 p = prev_chunk (p);
579 unlink (ar_ptr, p, bck, fwd);
581 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
582 assert (((char *) p + new_size) == ((char *) heap + heap->size));
583 top (ar_ptr) = top_chunk = p;
584 set_head (top_chunk, new_size | PREV_INUSE);
585 /*check_chunk(ar_ptr, top_chunk);*/
588 /* Uses similar logic for per-thread arenas as the main arena with systrim
589 and _int_free by preserving the top pad and rounding down to the nearest
590 page. */
591 top_size = chunksize (top_chunk);
592 if ((unsigned long)(top_size) <
593 (unsigned long)(mp_.trim_threshold))
594 return 0;
596 top_area = top_size - MINSIZE - 1;
597 if (top_area < 0 || (size_t) top_area <= pad)
598 return 0;
600 /* Release in pagesize units and round down to the nearest page. */
601 extra = ALIGN_DOWN(top_area - pad, pagesz);
602 if (extra == 0)
603 return 0;
605 /* Try to shrink. */
606 if (shrink_heap (heap, extra) != 0)
607 return 0;
609 ar_ptr->system_mem -= extra;
611 /* Success. Adjust top accordingly. */
612 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
613 /*check_chunk(ar_ptr, top_chunk);*/
614 return 1;
617 /* Create a new arena with initial size "size". */
619 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
620 called while free_list_lock is held. */
621 static void
622 detach_arena (mstate replaced_arena)
624 if (replaced_arena != NULL)
626 assert (replaced_arena->attached_threads > 0);
627 /* The current implementation only detaches from main_arena in
628 case of allocation failure. This means that it is likely not
629 beneficial to put the arena on free_list even if the
630 reference count reaches zero. */
631 --replaced_arena->attached_threads;
635 static mstate
636 _int_new_arena (size_t size)
638 mstate a;
639 heap_info *h;
640 char *ptr;
641 unsigned long misalign;
643 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
644 mp_.top_pad);
645 if (!h)
647 /* Maybe size is too large to fit in a single heap. So, just try
648 to create a minimally-sized arena and let _int_malloc() attempt
649 to deal with the large request via mmap_chunk(). */
650 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
651 if (!h)
652 return 0;
654 a = h->ar_ptr = (mstate) (h + 1);
655 malloc_init_state (a);
656 a->attached_threads = 1;
657 /*a->next = NULL;*/
658 a->system_mem = a->max_system_mem = h->size;
660 /* Set up the top chunk, with proper alignment. */
661 ptr = (char *) (a + 1);
662 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
663 if (misalign > 0)
664 ptr += MALLOC_ALIGNMENT - misalign;
665 top (a) = (mchunkptr) ptr;
666 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
668 LIBC_PROBE (memory_arena_new, 2, a, size);
669 mstate replaced_arena = thread_arena;
670 thread_arena = a;
671 mutex_init (&a->mutex);
673 (void) mutex_lock (&list_lock);
675 /* Add the new arena to the global list. */
676 a->next = main_arena.next;
677 /* FIXME: The barrier is an attempt to synchronize with read access
678 in reused_arena, which does not acquire list_lock while
679 traversing the list. */
680 atomic_write_barrier ();
681 main_arena.next = a;
683 (void) mutex_unlock (&list_lock);
685 (void) mutex_lock (&free_list_lock);
686 detach_arena (replaced_arena);
687 (void) mutex_unlock (&free_list_lock);
689 /* Lock this arena. NB: Another thread may have been attached to
690 this arena because the arena is now accessible from the
691 main_arena.next list and could have been picked by reused_arena.
692 This can only happen for the last arena created (before the arena
693 limit is reached). At this point, some arena has to be attached
694 to two threads. We could acquire the arena lock before list_lock
695 to make it less likely that reused_arena picks this new arena,
696 but this could result in a deadlock with
697 __malloc_fork_lock_parent. */
699 (void) mutex_lock (&a->mutex);
701 return a;
705 /* Remove an arena from free_list. The arena may be in use because it
706 was attached concurrently to a thread by reused_arena below. */
707 static mstate
708 get_free_list (void)
710 mstate replaced_arena = thread_arena;
711 mstate result = free_list;
712 if (result != NULL)
714 (void) mutex_lock (&free_list_lock);
715 result = free_list;
716 if (result != NULL)
718 free_list = result->next_free;
720 /* The arena will be attached to this thread. */
721 ++result->attached_threads;
723 detach_arena (replaced_arena);
725 (void) mutex_unlock (&free_list_lock);
727 if (result != NULL)
729 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
730 (void) mutex_lock (&result->mutex);
731 thread_arena = result;
735 return result;
738 /* Lock and return an arena that can be reused for memory allocation.
739 Avoid AVOID_ARENA as we have already failed to allocate memory in
740 it and it is currently locked. */
741 static mstate
742 reused_arena (mstate avoid_arena)
744 mstate result;
745 /* FIXME: Access to next_to_use suffers from data races. */
746 static mstate next_to_use;
747 if (next_to_use == NULL)
748 next_to_use = &main_arena;
750 /* Iterate over all arenas (including those linked from
751 free_list). */
752 result = next_to_use;
755 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
756 goto out;
758 /* FIXME: This is a data race, see _int_new_arena. */
759 result = result->next;
761 while (result != next_to_use);
763 /* Avoid AVOID_ARENA as we have already failed to allocate memory
764 in that arena and it is currently locked. */
765 if (result == avoid_arena)
766 result = result->next;
768 /* Make sure that the arena we get is not corrupted. */
769 mstate begin = result;
770 while (arena_is_corrupt (result) || result == avoid_arena)
772 result = result->next;
773 if (result == begin)
774 /* We looped around the arena list. We could not find any
775 arena that was either not corrupted or not the one we
776 wanted to avoid. */
777 return NULL;
780 /* No arena available without contention. Wait for the next in line. */
781 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
782 (void) mutex_lock (&result->mutex);
784 out:
785 /* Attach the arena to the current thread. Note that we may have
786 selected an arena which was on free_list. */
788 /* Update the arena thread attachment counters. */
789 mstate replaced_arena = thread_arena;
790 (void) mutex_lock (&free_list_lock);
791 detach_arena (replaced_arena);
792 ++result->attached_threads;
793 (void) mutex_unlock (&free_list_lock);
796 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
797 thread_arena = result;
798 next_to_use = result->next;
800 return result;
803 static mstate
804 internal_function
805 arena_get2 (size_t size, mstate avoid_arena)
807 mstate a;
809 static size_t narenas_limit;
811 a = get_free_list ();
812 if (a == NULL)
814 /* Nothing immediately available, so generate a new arena. */
815 if (narenas_limit == 0)
817 if (mp_.arena_max != 0)
818 narenas_limit = mp_.arena_max;
819 else if (narenas > mp_.arena_test)
821 int n = __get_nprocs ();
823 if (n >= 1)
824 narenas_limit = NARENAS_FROM_NCORES (n);
825 else
826 /* We have no information about the system. Assume two
827 cores. */
828 narenas_limit = NARENAS_FROM_NCORES (2);
831 repeat:;
832 size_t n = narenas;
833 /* NB: the following depends on the fact that (size_t)0 - 1 is a
834 very large number and that the underflow is OK. If arena_max
835 is set the value of arena_test is irrelevant. If arena_test
836 is set but narenas is not yet larger or equal to arena_test
837 narenas_limit is 0. There is no possibility for narenas to
838 be too big for the test to always fail since there is not
839 enough address space to create that many arenas. */
840 if (__glibc_unlikely (n <= narenas_limit - 1))
842 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
843 goto repeat;
844 a = _int_new_arena (size);
845 if (__glibc_unlikely (a == NULL))
846 catomic_decrement (&narenas);
848 else
849 a = reused_arena (avoid_arena);
851 return a;
854 /* If we don't have the main arena, then maybe the failure is due to running
855 out of mmapped areas, so we can try allocating on the main arena.
856 Otherwise, it is likely that sbrk() has failed and there is still a chance
857 to mmap(), so try one of the other arenas. */
858 static mstate
859 arena_get_retry (mstate ar_ptr, size_t bytes)
861 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
862 if (ar_ptr != &main_arena)
864 (void) mutex_unlock (&ar_ptr->mutex);
865 /* Don't touch the main arena if it is corrupt. */
866 if (arena_is_corrupt (&main_arena))
867 return NULL;
869 ar_ptr = &main_arena;
870 (void) mutex_lock (&ar_ptr->mutex);
872 else
874 (void) mutex_unlock (&ar_ptr->mutex);
875 ar_ptr = arena_get2 (bytes, ar_ptr);
878 return ar_ptr;
881 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
882 arena_thread_freeres (void)
884 mstate a = thread_arena;
885 thread_arena = NULL;
887 if (a != NULL)
889 (void) mutex_lock (&free_list_lock);
890 /* If this was the last attached thread for this arena, put the
891 arena on the free list. */
892 assert (a->attached_threads > 0);
893 if (--a->attached_threads == 0)
895 a->next_free = free_list;
896 free_list = a;
898 (void) mutex_unlock (&free_list_lock);
901 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
904 * Local variables:
905 * c-basic-offset: 2
906 * End: