Ensure isinff, isinfl, isnanf, and isnanl are defined (Bug 19439)
[glibc.git] / malloc / arena.c
blob1edb4d4d35041b552975178c374e728ce7429ef7
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32 * 1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
40 /***************************************************************************/
42 #define top(ar_ptr) ((ar_ptr)->top)
44 /* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
46 address aligned to HEAP_MAX_SIZE. */
48 typedef struct _heap_info
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
59 } heap_info;
61 /* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
67 /* Thread specific data. */
69 static __thread mstate thread_arena attribute_tls_model_ie;
71 /* Arena free list. free_list_lock synchronizes access to the
72 free_list variable below, and the next_free and attached_threads
73 members of struct malloc_state objects. No other locks must be
74 acquired after free_list_lock has been acquired. */
76 static mutex_t free_list_lock = _LIBC_LOCK_INITIALIZER;
77 static size_t narenas = 1;
78 static mstate free_list;
80 /* list_lock prevents concurrent writes to the next member of struct
81 malloc_state objects.
83 Read access to the next member is supposed to synchronize with the
84 atomic_write_barrier and the write to the next member in
85 _int_new_arena. This suffers from data races; see the FIXME
86 comments in _int_new_arena and reused_arena.
88 list_lock also prevents concurrent forks. At the time list_lock is
89 acquired, no arena lock must have been acquired, but it is
90 permitted to acquire arena locks subsequently, while list_lock is
91 acquired. */
92 static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
94 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
95 static unsigned long arena_mem;
97 /* Already initialized? */
98 int __malloc_initialized = -1;
100 /**************************************************************************/
103 /* arena_get() acquires an arena and locks the corresponding mutex.
104 First, try the one last locked successfully by this thread. (This
105 is the common case and handled with a macro for speed.) Then, loop
106 once over the circularly linked list of arenas. If no arena is
107 readily available, create a new one. In this latter case, `size'
108 is just a hint as to how much memory will be required immediately
109 in the new arena. */
111 #define arena_get(ptr, size) do { \
112 ptr = thread_arena; \
113 arena_lock (ptr, size); \
114 } while (0)
116 #define arena_lock(ptr, size) do { \
117 if (ptr && !arena_is_corrupt (ptr)) \
118 (void) mutex_lock (&ptr->mutex); \
119 else \
120 ptr = arena_get2 ((size), NULL); \
121 } while (0)
123 /* find the heap and corresponding arena for a given ptr */
125 #define heap_for_ptr(ptr) \
126 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
127 #define arena_for_chunk(ptr) \
128 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
131 /**************************************************************************/
133 #ifndef NO_THREADS
135 /* atfork support. */
137 static void *(*save_malloc_hook)(size_t __size, const void *);
138 static void (*save_free_hook) (void *__ptr, const void *);
139 static void *save_arena;
141 # ifdef ATFORK_MEM
142 ATFORK_MEM;
143 # endif
145 /* Magic value for the thread-specific arena pointer when
146 malloc_atfork() is in use. */
148 # define ATFORK_ARENA_PTR ((void *) -1)
150 /* The following hooks are used while the `atfork' handling mechanism
151 is active. */
153 static void *
154 malloc_atfork (size_t sz, const void *caller)
156 void *victim;
158 if (thread_arena == ATFORK_ARENA_PTR)
160 /* We are the only thread that may allocate at all. */
161 if (save_malloc_hook != malloc_check)
163 return _int_malloc (&main_arena, sz);
165 else
167 if (top_check () < 0)
168 return 0;
170 victim = _int_malloc (&main_arena, sz + 1);
171 return mem2mem_check (victim, sz);
174 else
176 /* Suspend the thread until the `atfork' handlers have completed.
177 By that time, the hooks will have been reset as well, so that
178 mALLOc() can be used again. */
179 (void) mutex_lock (&list_lock);
180 (void) mutex_unlock (&list_lock);
181 return __libc_malloc (sz);
185 static void
186 free_atfork (void *mem, const void *caller)
188 mstate ar_ptr;
189 mchunkptr p; /* chunk corresponding to mem */
191 if (mem == 0) /* free(0) has no effect */
192 return;
194 p = mem2chunk (mem); /* do not bother to replicate free_check here */
196 if (chunk_is_mmapped (p)) /* release mmapped memory. */
198 munmap_chunk (p);
199 return;
202 ar_ptr = arena_for_chunk (p);
203 _int_free (ar_ptr, p, thread_arena == ATFORK_ARENA_PTR);
207 /* Counter for number of times the list is locked by the same thread. */
208 static unsigned int atfork_recursive_cntr;
210 /* The following two functions are registered via thread_atfork() to
211 make sure that the mutexes remain in a consistent state in the
212 fork()ed version of a thread. Also adapt the malloc and free hooks
213 temporarily, because the `atfork' handler mechanism may use
214 malloc/free internally (e.g. in LinuxThreads). */
216 static void
217 ptmalloc_lock_all (void)
219 mstate ar_ptr;
221 if (__malloc_initialized < 1)
222 return;
224 /* We do not acquire free_list_lock here because we completely
225 reconstruct free_list in ptmalloc_unlock_all2. */
227 if (mutex_trylock (&list_lock))
229 if (thread_arena == ATFORK_ARENA_PTR)
230 /* This is the same thread which already locks the global list.
231 Just bump the counter. */
232 goto out;
234 /* This thread has to wait its turn. */
235 (void) mutex_lock (&list_lock);
237 for (ar_ptr = &main_arena;; )
239 (void) mutex_lock (&ar_ptr->mutex);
240 ar_ptr = ar_ptr->next;
241 if (ar_ptr == &main_arena)
242 break;
244 save_malloc_hook = __malloc_hook;
245 save_free_hook = __free_hook;
246 __malloc_hook = malloc_atfork;
247 __free_hook = free_atfork;
248 /* Only the current thread may perform malloc/free calls now.
249 save_arena will be reattached to the current thread, in
250 ptmalloc_lock_all, so save_arena->attached_threads is not
251 updated. */
252 save_arena = thread_arena;
253 thread_arena = ATFORK_ARENA_PTR;
254 out:
255 ++atfork_recursive_cntr;
258 static void
259 ptmalloc_unlock_all (void)
261 mstate ar_ptr;
263 if (__malloc_initialized < 1)
264 return;
266 if (--atfork_recursive_cntr != 0)
267 return;
269 /* Replace ATFORK_ARENA_PTR with save_arena.
270 save_arena->attached_threads was not changed in ptmalloc_lock_all
271 and is still correct. */
272 thread_arena = save_arena;
273 __malloc_hook = save_malloc_hook;
274 __free_hook = save_free_hook;
275 for (ar_ptr = &main_arena;; )
277 (void) mutex_unlock (&ar_ptr->mutex);
278 ar_ptr = ar_ptr->next;
279 if (ar_ptr == &main_arena)
280 break;
282 (void) mutex_unlock (&list_lock);
285 # ifdef __linux__
287 /* In NPTL, unlocking a mutex in the child process after a
288 fork() is currently unsafe, whereas re-initializing it is safe and
289 does not leak resources. Therefore, a special atfork handler is
290 installed for the child. */
292 static void
293 ptmalloc_unlock_all2 (void)
295 mstate ar_ptr;
297 if (__malloc_initialized < 1)
298 return;
300 thread_arena = save_arena;
301 __malloc_hook = save_malloc_hook;
302 __free_hook = save_free_hook;
304 /* Push all arenas to the free list, except save_arena, which is
305 attached to the current thread. */
306 mutex_init (&free_list_lock);
307 if (save_arena != NULL)
308 ((mstate) save_arena)->attached_threads = 1;
309 free_list = NULL;
310 for (ar_ptr = &main_arena;; )
312 mutex_init (&ar_ptr->mutex);
313 if (ar_ptr != save_arena)
315 /* This arena is no longer attached to any thread. */
316 ar_ptr->attached_threads = 0;
317 ar_ptr->next_free = free_list;
318 free_list = ar_ptr;
320 ar_ptr = ar_ptr->next;
321 if (ar_ptr == &main_arena)
322 break;
325 mutex_init (&list_lock);
326 atfork_recursive_cntr = 0;
329 # else
331 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
332 # endif
333 #endif /* !NO_THREADS */
335 /* Initialization routine. */
336 #include <string.h>
337 extern char **_environ;
339 static char *
340 internal_function
341 next_env_entry (char ***position)
343 char **current = *position;
344 char *result = NULL;
346 while (*current != NULL)
348 if (__builtin_expect ((*current)[0] == 'M', 0)
349 && (*current)[1] == 'A'
350 && (*current)[2] == 'L'
351 && (*current)[3] == 'L'
352 && (*current)[4] == 'O'
353 && (*current)[5] == 'C'
354 && (*current)[6] == '_')
356 result = &(*current)[7];
358 /* Save current position for next visit. */
359 *position = ++current;
361 break;
364 ++current;
367 return result;
371 #ifdef SHARED
372 static void *
373 __failing_morecore (ptrdiff_t d)
375 return (void *) MORECORE_FAILURE;
378 extern struct dl_open_hook *_dl_open_hook;
379 libc_hidden_proto (_dl_open_hook);
380 #endif
382 static void
383 ptmalloc_init (void)
385 if (__malloc_initialized >= 0)
386 return;
388 __malloc_initialized = 0;
390 #ifdef SHARED
391 /* In case this libc copy is in a non-default namespace, never use brk.
392 Likewise if dlopened from statically linked program. */
393 Dl_info di;
394 struct link_map *l;
396 if (_dl_open_hook != NULL
397 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
398 && l->l_ns != LM_ID_BASE))
399 __morecore = __failing_morecore;
400 #endif
402 thread_arena = &main_arena;
403 thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
404 const char *s = NULL;
405 if (__glibc_likely (_environ != NULL))
407 char **runp = _environ;
408 char *envline;
410 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
413 size_t len = strcspn (envline, "=");
415 if (envline[len] != '=')
416 /* This is a "MALLOC_" variable at the end of the string
417 without a '=' character. Ignore it since otherwise we
418 will access invalid memory below. */
419 continue;
421 switch (len)
423 case 6:
424 if (memcmp (envline, "CHECK_", 6) == 0)
425 s = &envline[7];
426 break;
427 case 8:
428 if (!__builtin_expect (__libc_enable_secure, 0))
430 if (memcmp (envline, "TOP_PAD_", 8) == 0)
431 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
432 else if (memcmp (envline, "PERTURB_", 8) == 0)
433 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
435 break;
436 case 9:
437 if (!__builtin_expect (__libc_enable_secure, 0))
439 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
440 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
441 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
442 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
444 break;
445 case 10:
446 if (!__builtin_expect (__libc_enable_secure, 0))
448 if (memcmp (envline, "ARENA_TEST", 10) == 0)
449 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
451 break;
452 case 15:
453 if (!__builtin_expect (__libc_enable_secure, 0))
455 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
456 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
457 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
458 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
460 break;
461 default:
462 break;
466 if (s && s[0])
468 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
469 if (check_action != 0)
470 __malloc_check_init ();
472 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
473 if (hook != NULL)
474 (*hook)();
475 __malloc_initialized = 1;
478 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
479 #ifdef thread_atfork_static
480 thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \
481 ptmalloc_unlock_all2)
482 #endif
486 /* Managing heaps and arenas (for concurrent threads) */
488 #if MALLOC_DEBUG > 1
490 /* Print the complete contents of a single heap to stderr. */
492 static void
493 dump_heap (heap_info *heap)
495 char *ptr;
496 mchunkptr p;
498 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
499 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
500 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
501 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
502 ~MALLOC_ALIGN_MASK);
503 for (;; )
505 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
506 if (p == top (heap->ar_ptr))
508 fprintf (stderr, " (top)\n");
509 break;
511 else if (p->size == (0 | PREV_INUSE))
513 fprintf (stderr, " (fence)\n");
514 break;
516 fprintf (stderr, "\n");
517 p = next_chunk (p);
520 #endif /* MALLOC_DEBUG > 1 */
522 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
523 addresses as opposed to increasing, new_heap would badly fragment the
524 address space. In that case remember the second HEAP_MAX_SIZE part
525 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
526 call (if it is already aligned) and try to reuse it next time. We need
527 no locking for it, as kernel ensures the atomicity for us - worst case
528 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
529 multiple threads, but only one will succeed. */
530 static char *aligned_heap_area;
532 /* Create a new heap. size is automatically rounded up to a multiple
533 of the page size. */
535 static heap_info *
536 internal_function
537 new_heap (size_t size, size_t top_pad)
539 size_t pagesize = GLRO (dl_pagesize);
540 char *p1, *p2;
541 unsigned long ul;
542 heap_info *h;
544 if (size + top_pad < HEAP_MIN_SIZE)
545 size = HEAP_MIN_SIZE;
546 else if (size + top_pad <= HEAP_MAX_SIZE)
547 size += top_pad;
548 else if (size > HEAP_MAX_SIZE)
549 return 0;
550 else
551 size = HEAP_MAX_SIZE;
552 size = ALIGN_UP (size, pagesize);
554 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
555 No swap space needs to be reserved for the following large
556 mapping (on Linux, this is the case for all non-writable mappings
557 anyway). */
558 p2 = MAP_FAILED;
559 if (aligned_heap_area)
561 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
562 MAP_NORESERVE);
563 aligned_heap_area = NULL;
564 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
566 __munmap (p2, HEAP_MAX_SIZE);
567 p2 = MAP_FAILED;
570 if (p2 == MAP_FAILED)
572 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
573 if (p1 != MAP_FAILED)
575 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
576 & ~(HEAP_MAX_SIZE - 1));
577 ul = p2 - p1;
578 if (ul)
579 __munmap (p1, ul);
580 else
581 aligned_heap_area = p2 + HEAP_MAX_SIZE;
582 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
584 else
586 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
587 is already aligned. */
588 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
589 if (p2 == MAP_FAILED)
590 return 0;
592 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
594 __munmap (p2, HEAP_MAX_SIZE);
595 return 0;
599 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
601 __munmap (p2, HEAP_MAX_SIZE);
602 return 0;
604 h = (heap_info *) p2;
605 h->size = size;
606 h->mprotect_size = size;
607 LIBC_PROBE (memory_heap_new, 2, h, h->size);
608 return h;
611 /* Grow a heap. size is automatically rounded up to a
612 multiple of the page size. */
614 static int
615 grow_heap (heap_info *h, long diff)
617 size_t pagesize = GLRO (dl_pagesize);
618 long new_size;
620 diff = ALIGN_UP (diff, pagesize);
621 new_size = (long) h->size + diff;
622 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
623 return -1;
625 if ((unsigned long) new_size > h->mprotect_size)
627 if (__mprotect ((char *) h + h->mprotect_size,
628 (unsigned long) new_size - h->mprotect_size,
629 PROT_READ | PROT_WRITE) != 0)
630 return -2;
632 h->mprotect_size = new_size;
635 h->size = new_size;
636 LIBC_PROBE (memory_heap_more, 2, h, h->size);
637 return 0;
640 /* Shrink a heap. */
642 static int
643 shrink_heap (heap_info *h, long diff)
645 long new_size;
647 new_size = (long) h->size - diff;
648 if (new_size < (long) sizeof (*h))
649 return -1;
651 /* Try to re-map the extra heap space freshly to save memory, and make it
652 inaccessible. See malloc-sysdep.h to know when this is true. */
653 if (__glibc_unlikely (check_may_shrink_heap ()))
655 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
656 MAP_FIXED) == (char *) MAP_FAILED)
657 return -2;
659 h->mprotect_size = new_size;
661 else
662 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
663 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
665 h->size = new_size;
666 LIBC_PROBE (memory_heap_less, 2, h, h->size);
667 return 0;
670 /* Delete a heap. */
672 #define delete_heap(heap) \
673 do { \
674 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
675 aligned_heap_area = NULL; \
676 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
677 } while (0)
679 static int
680 internal_function
681 heap_trim (heap_info *heap, size_t pad)
683 mstate ar_ptr = heap->ar_ptr;
684 unsigned long pagesz = GLRO (dl_pagesize);
685 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
686 heap_info *prev_heap;
687 long new_size, top_size, top_area, extra, prev_size, misalign;
689 /* Can this heap go away completely? */
690 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
692 prev_heap = heap->prev;
693 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
694 p = chunk_at_offset (prev_heap, prev_size);
695 /* fencepost must be properly aligned. */
696 misalign = ((long) p) & MALLOC_ALIGN_MASK;
697 p = chunk_at_offset (prev_heap, prev_size - misalign);
698 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
699 p = prev_chunk (p);
700 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
701 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
702 if (!prev_inuse (p))
703 new_size += p->prev_size;
704 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
705 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
706 break;
707 ar_ptr->system_mem -= heap->size;
708 arena_mem -= heap->size;
709 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
710 delete_heap (heap);
711 heap = prev_heap;
712 if (!prev_inuse (p)) /* consolidate backward */
714 p = prev_chunk (p);
715 unlink (ar_ptr, p, bck, fwd);
717 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
718 assert (((char *) p + new_size) == ((char *) heap + heap->size));
719 top (ar_ptr) = top_chunk = p;
720 set_head (top_chunk, new_size | PREV_INUSE);
721 /*check_chunk(ar_ptr, top_chunk);*/
724 /* Uses similar logic for per-thread arenas as the main arena with systrim
725 and _int_free by preserving the top pad and rounding down to the nearest
726 page. */
727 top_size = chunksize (top_chunk);
728 if ((unsigned long)(top_size) <
729 (unsigned long)(mp_.trim_threshold))
730 return 0;
732 top_area = top_size - MINSIZE - 1;
733 if (top_area < 0 || (size_t) top_area <= pad)
734 return 0;
736 /* Release in pagesize units and round down to the nearest page. */
737 extra = ALIGN_DOWN(top_area - pad, pagesz);
738 if (extra == 0)
739 return 0;
741 /* Try to shrink. */
742 if (shrink_heap (heap, extra) != 0)
743 return 0;
745 ar_ptr->system_mem -= extra;
746 arena_mem -= extra;
748 /* Success. Adjust top accordingly. */
749 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
750 /*check_chunk(ar_ptr, top_chunk);*/
751 return 1;
754 /* Create a new arena with initial size "size". */
756 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
757 called while free_list_lock is held. */
758 static void
759 detach_arena (mstate replaced_arena)
761 if (replaced_arena != NULL)
763 assert (replaced_arena->attached_threads > 0);
764 /* The current implementation only detaches from main_arena in
765 case of allocation failure. This means that it is likely not
766 beneficial to put the arena on free_list even if the
767 reference count reaches zero. */
768 --replaced_arena->attached_threads;
772 static mstate
773 _int_new_arena (size_t size)
775 mstate a;
776 heap_info *h;
777 char *ptr;
778 unsigned long misalign;
780 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
781 mp_.top_pad);
782 if (!h)
784 /* Maybe size is too large to fit in a single heap. So, just try
785 to create a minimally-sized arena and let _int_malloc() attempt
786 to deal with the large request via mmap_chunk(). */
787 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
788 if (!h)
789 return 0;
791 a = h->ar_ptr = (mstate) (h + 1);
792 malloc_init_state (a);
793 a->attached_threads = 1;
794 /*a->next = NULL;*/
795 a->system_mem = a->max_system_mem = h->size;
796 arena_mem += h->size;
798 /* Set up the top chunk, with proper alignment. */
799 ptr = (char *) (a + 1);
800 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
801 if (misalign > 0)
802 ptr += MALLOC_ALIGNMENT - misalign;
803 top (a) = (mchunkptr) ptr;
804 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
806 LIBC_PROBE (memory_arena_new, 2, a, size);
807 mstate replaced_arena = thread_arena;
808 thread_arena = a;
809 mutex_init (&a->mutex);
811 (void) mutex_lock (&list_lock);
813 /* Add the new arena to the global list. */
814 a->next = main_arena.next;
815 /* FIXME: The barrier is an attempt to synchronize with read access
816 in reused_arena, which does not acquire list_lock while
817 traversing the list. */
818 atomic_write_barrier ();
819 main_arena.next = a;
821 (void) mutex_unlock (&list_lock);
823 (void) mutex_lock (&free_list_lock);
824 detach_arena (replaced_arena);
825 (void) mutex_unlock (&free_list_lock);
827 /* Lock this arena. NB: Another thread may have been attached to
828 this arena because the arena is now accessible from the
829 main_arena.next list and could have been picked by reused_arena.
830 This can only happen for the last arena created (before the arena
831 limit is reached). At this point, some arena has to be attached
832 to two threads. We could acquire the arena lock before list_lock
833 to make it less likely that reused_arena picks this new arena,
834 but this could result in a deadlock with ptmalloc_lock_all. */
836 (void) mutex_lock (&a->mutex);
838 return a;
842 /* Remove an arena from free_list. The arena may be in use because it
843 was attached concurrently to a thread by reused_arena below. */
844 static mstate
845 get_free_list (void)
847 mstate replaced_arena = thread_arena;
848 mstate result = free_list;
849 if (result != NULL)
851 (void) mutex_lock (&free_list_lock);
852 result = free_list;
853 if (result != NULL)
855 free_list = result->next_free;
857 /* The arena will be attached to this thread. */
858 ++result->attached_threads;
860 detach_arena (replaced_arena);
862 (void) mutex_unlock (&free_list_lock);
864 if (result != NULL)
866 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
867 (void) mutex_lock (&result->mutex);
868 thread_arena = result;
872 return result;
875 /* Lock and return an arena that can be reused for memory allocation.
876 Avoid AVOID_ARENA as we have already failed to allocate memory in
877 it and it is currently locked. */
878 static mstate
879 reused_arena (mstate avoid_arena)
881 mstate result;
882 /* FIXME: Access to next_to_use suffers from data races. */
883 static mstate next_to_use;
884 if (next_to_use == NULL)
885 next_to_use = &main_arena;
887 /* Iterate over all arenas (including those linked from
888 free_list). */
889 result = next_to_use;
892 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
893 goto out;
895 /* FIXME: This is a data race, see _int_new_arena. */
896 result = result->next;
898 while (result != next_to_use);
900 /* Avoid AVOID_ARENA as we have already failed to allocate memory
901 in that arena and it is currently locked. */
902 if (result == avoid_arena)
903 result = result->next;
905 /* Make sure that the arena we get is not corrupted. */
906 mstate begin = result;
907 while (arena_is_corrupt (result) || result == avoid_arena)
909 result = result->next;
910 if (result == begin)
911 break;
914 /* We could not find any arena that was either not corrupted or not the one
915 we wanted to avoid. */
916 if (result == begin || result == avoid_arena)
917 return NULL;
919 /* No arena available without contention. Wait for the next in line. */
920 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
921 (void) mutex_lock (&result->mutex);
923 out:
924 /* Attach the arena to the current thread. Note that we may have
925 selected an arena which was on free_list. */
927 /* Update the arena thread attachment counters. */
928 mstate replaced_arena = thread_arena;
929 (void) mutex_lock (&free_list_lock);
930 detach_arena (replaced_arena);
931 ++result->attached_threads;
932 (void) mutex_unlock (&free_list_lock);
935 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
936 thread_arena = result;
937 next_to_use = result->next;
939 return result;
942 static mstate
943 internal_function
944 arena_get2 (size_t size, mstate avoid_arena)
946 mstate a;
948 static size_t narenas_limit;
950 a = get_free_list ();
951 if (a == NULL)
953 /* Nothing immediately available, so generate a new arena. */
954 if (narenas_limit == 0)
956 if (mp_.arena_max != 0)
957 narenas_limit = mp_.arena_max;
958 else if (narenas > mp_.arena_test)
960 int n = __get_nprocs ();
962 if (n >= 1)
963 narenas_limit = NARENAS_FROM_NCORES (n);
964 else
965 /* We have no information about the system. Assume two
966 cores. */
967 narenas_limit = NARENAS_FROM_NCORES (2);
970 repeat:;
971 size_t n = narenas;
972 /* NB: the following depends on the fact that (size_t)0 - 1 is a
973 very large number and that the underflow is OK. If arena_max
974 is set the value of arena_test is irrelevant. If arena_test
975 is set but narenas is not yet larger or equal to arena_test
976 narenas_limit is 0. There is no possibility for narenas to
977 be too big for the test to always fail since there is not
978 enough address space to create that many arenas. */
979 if (__glibc_unlikely (n <= narenas_limit - 1))
981 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
982 goto repeat;
983 a = _int_new_arena (size);
984 if (__glibc_unlikely (a == NULL))
985 catomic_decrement (&narenas);
987 else
988 a = reused_arena (avoid_arena);
990 return a;
993 /* If we don't have the main arena, then maybe the failure is due to running
994 out of mmapped areas, so we can try allocating on the main arena.
995 Otherwise, it is likely that sbrk() has failed and there is still a chance
996 to mmap(), so try one of the other arenas. */
997 static mstate
998 arena_get_retry (mstate ar_ptr, size_t bytes)
1000 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
1001 if (ar_ptr != &main_arena)
1003 (void) mutex_unlock (&ar_ptr->mutex);
1004 /* Don't touch the main arena if it is corrupt. */
1005 if (arena_is_corrupt (&main_arena))
1006 return NULL;
1008 ar_ptr = &main_arena;
1009 (void) mutex_lock (&ar_ptr->mutex);
1011 else
1013 (void) mutex_unlock (&ar_ptr->mutex);
1014 ar_ptr = arena_get2 (bytes, ar_ptr);
1017 return ar_ptr;
1020 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
1021 arena_thread_freeres (void)
1023 mstate a = thread_arena;
1024 thread_arena = NULL;
1026 if (a != NULL)
1028 (void) mutex_lock (&free_list_lock);
1029 /* If this was the last attached thread for this arena, put the
1030 arena on the free list. */
1031 assert (a->attached_threads > 0);
1032 if (--a->attached_threads == 0)
1034 a->next_free = free_list;
1035 free_list = a;
1037 (void) mutex_unlock (&free_list_lock);
1040 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
1043 * Local variables:
1044 * c-basic-offset: 2
1045 * End: