Fix tanh missing underflows (bug 16520).
[glibc.git] / malloc / arena.c
blob21ecc5a13791e1156996448f53a3074f8805c87a
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32 * 1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
40 /***************************************************************************/
42 #define top(ar_ptr) ((ar_ptr)->top)
44 /* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
46 address aligned to HEAP_MAX_SIZE. */
48 typedef struct _heap_info
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
59 } heap_info;
61 /* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
67 /* Thread specific data */
69 static tsd_key_t arena_key;
70 static mutex_t list_lock = MUTEX_INITIALIZER;
71 static size_t narenas = 1;
72 static mstate free_list;
74 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
75 static unsigned long arena_mem;
77 /* Already initialized? */
78 int __malloc_initialized = -1;
80 /**************************************************************************/
83 /* arena_get() acquires an arena and locks the corresponding mutex.
84 First, try the one last locked successfully by this thread. (This
85 is the common case and handled with a macro for speed.) Then, loop
86 once over the circularly linked list of arenas. If no arena is
87 readily available, create a new one. In this latter case, `size'
88 is just a hint as to how much memory will be required immediately
89 in the new arena. */
91 #define arena_get(ptr, size) do { \
92 arena_lookup (ptr); \
93 arena_lock (ptr, size); \
94 } while (0)
96 #define arena_lookup(ptr) do { \
97 void *vptr = NULL; \
98 ptr = (mstate) tsd_getspecific (arena_key, vptr); \
99 } while (0)
101 #define arena_lock(ptr, size) do { \
102 if (ptr && !arena_is_corrupt (ptr)) \
103 (void) mutex_lock (&ptr->mutex); \
104 else \
105 ptr = arena_get2 (ptr, (size), NULL); \
106 } while (0)
108 /* find the heap and corresponding arena for a given ptr */
110 #define heap_for_ptr(ptr) \
111 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
112 #define arena_for_chunk(ptr) \
113 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
116 /**************************************************************************/
118 #ifndef NO_THREADS
120 /* atfork support. */
122 static void *(*save_malloc_hook)(size_t __size, const void *);
123 static void (*save_free_hook) (void *__ptr, const void *);
124 static void *save_arena;
126 # ifdef ATFORK_MEM
127 ATFORK_MEM;
128 # endif
130 /* Magic value for the thread-specific arena pointer when
131 malloc_atfork() is in use. */
133 # define ATFORK_ARENA_PTR ((void *) -1)
135 /* The following hooks are used while the `atfork' handling mechanism
136 is active. */
138 static void *
139 malloc_atfork (size_t sz, const void *caller)
141 void *vptr = NULL;
142 void *victim;
144 tsd_getspecific (arena_key, vptr);
145 if (vptr == ATFORK_ARENA_PTR)
147 /* We are the only thread that may allocate at all. */
148 if (save_malloc_hook != malloc_check)
150 return _int_malloc (&main_arena, sz);
152 else
154 if (top_check () < 0)
155 return 0;
157 victim = _int_malloc (&main_arena, sz + 1);
158 return mem2mem_check (victim, sz);
161 else
163 /* Suspend the thread until the `atfork' handlers have completed.
164 By that time, the hooks will have been reset as well, so that
165 mALLOc() can be used again. */
166 (void) mutex_lock (&list_lock);
167 (void) mutex_unlock (&list_lock);
168 return __libc_malloc (sz);
172 static void
173 free_atfork (void *mem, const void *caller)
175 void *vptr = NULL;
176 mstate ar_ptr;
177 mchunkptr p; /* chunk corresponding to mem */
179 if (mem == 0) /* free(0) has no effect */
180 return;
182 p = mem2chunk (mem); /* do not bother to replicate free_check here */
184 if (chunk_is_mmapped (p)) /* release mmapped memory. */
186 munmap_chunk (p);
187 return;
190 ar_ptr = arena_for_chunk (p);
191 tsd_getspecific (arena_key, vptr);
192 _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
196 /* Counter for number of times the list is locked by the same thread. */
197 static unsigned int atfork_recursive_cntr;
199 /* The following two functions are registered via thread_atfork() to
200 make sure that the mutexes remain in a consistent state in the
201 fork()ed version of a thread. Also adapt the malloc and free hooks
202 temporarily, because the `atfork' handler mechanism may use
203 malloc/free internally (e.g. in LinuxThreads). */
205 static void
206 ptmalloc_lock_all (void)
208 mstate ar_ptr;
210 if (__malloc_initialized < 1)
211 return;
213 if (mutex_trylock (&list_lock))
215 void *my_arena;
216 tsd_getspecific (arena_key, my_arena);
217 if (my_arena == ATFORK_ARENA_PTR)
218 /* This is the same thread which already locks the global list.
219 Just bump the counter. */
220 goto out;
222 /* This thread has to wait its turn. */
223 (void) mutex_lock (&list_lock);
225 for (ar_ptr = &main_arena;; )
227 (void) mutex_lock (&ar_ptr->mutex);
228 ar_ptr = ar_ptr->next;
229 if (ar_ptr == &main_arena)
230 break;
232 save_malloc_hook = __malloc_hook;
233 save_free_hook = __free_hook;
234 __malloc_hook = malloc_atfork;
235 __free_hook = free_atfork;
236 /* Only the current thread may perform malloc/free calls now. */
237 tsd_getspecific (arena_key, save_arena);
238 tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
239 out:
240 ++atfork_recursive_cntr;
243 static void
244 ptmalloc_unlock_all (void)
246 mstate ar_ptr;
248 if (__malloc_initialized < 1)
249 return;
251 if (--atfork_recursive_cntr != 0)
252 return;
254 tsd_setspecific (arena_key, save_arena);
255 __malloc_hook = save_malloc_hook;
256 __free_hook = save_free_hook;
257 for (ar_ptr = &main_arena;; )
259 (void) mutex_unlock (&ar_ptr->mutex);
260 ar_ptr = ar_ptr->next;
261 if (ar_ptr == &main_arena)
262 break;
264 (void) mutex_unlock (&list_lock);
267 # ifdef __linux__
269 /* In NPTL, unlocking a mutex in the child process after a
270 fork() is currently unsafe, whereas re-initializing it is safe and
271 does not leak resources. Therefore, a special atfork handler is
272 installed for the child. */
274 static void
275 ptmalloc_unlock_all2 (void)
277 mstate ar_ptr;
279 if (__malloc_initialized < 1)
280 return;
282 tsd_setspecific (arena_key, save_arena);
283 __malloc_hook = save_malloc_hook;
284 __free_hook = save_free_hook;
285 free_list = NULL;
286 for (ar_ptr = &main_arena;; )
288 mutex_init (&ar_ptr->mutex);
289 if (ar_ptr != save_arena)
291 ar_ptr->next_free = free_list;
292 free_list = ar_ptr;
294 ar_ptr = ar_ptr->next;
295 if (ar_ptr == &main_arena)
296 break;
298 mutex_init (&list_lock);
299 atfork_recursive_cntr = 0;
302 # else
304 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
305 # endif
306 #endif /* !NO_THREADS */
308 /* Initialization routine. */
309 #include <string.h>
310 extern char **_environ;
312 static char *
313 internal_function
314 next_env_entry (char ***position)
316 char **current = *position;
317 char *result = NULL;
319 while (*current != NULL)
321 if (__builtin_expect ((*current)[0] == 'M', 0)
322 && (*current)[1] == 'A'
323 && (*current)[2] == 'L'
324 && (*current)[3] == 'L'
325 && (*current)[4] == 'O'
326 && (*current)[5] == 'C'
327 && (*current)[6] == '_')
329 result = &(*current)[7];
331 /* Save current position for next visit. */
332 *position = ++current;
334 break;
337 ++current;
340 return result;
344 #ifdef SHARED
345 static void *
346 __failing_morecore (ptrdiff_t d)
348 return (void *) MORECORE_FAILURE;
351 extern struct dl_open_hook *_dl_open_hook;
352 libc_hidden_proto (_dl_open_hook);
353 #endif
355 static void
356 ptmalloc_init (void)
358 if (__malloc_initialized >= 0)
359 return;
361 __malloc_initialized = 0;
363 #ifdef SHARED
364 /* In case this libc copy is in a non-default namespace, never use brk.
365 Likewise if dlopened from statically linked program. */
366 Dl_info di;
367 struct link_map *l;
369 if (_dl_open_hook != NULL
370 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
371 && l->l_ns != LM_ID_BASE))
372 __morecore = __failing_morecore;
373 #endif
375 tsd_key_create (&arena_key, NULL);
376 tsd_setspecific (arena_key, (void *) &main_arena);
377 thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
378 const char *s = NULL;
379 if (__glibc_likely (_environ != NULL))
381 char **runp = _environ;
382 char *envline;
384 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
387 size_t len = strcspn (envline, "=");
389 if (envline[len] != '=')
390 /* This is a "MALLOC_" variable at the end of the string
391 without a '=' character. Ignore it since otherwise we
392 will access invalid memory below. */
393 continue;
395 switch (len)
397 case 6:
398 if (memcmp (envline, "CHECK_", 6) == 0)
399 s = &envline[7];
400 break;
401 case 8:
402 if (!__builtin_expect (__libc_enable_secure, 0))
404 if (memcmp (envline, "TOP_PAD_", 8) == 0)
405 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
406 else if (memcmp (envline, "PERTURB_", 8) == 0)
407 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
409 break;
410 case 9:
411 if (!__builtin_expect (__libc_enable_secure, 0))
413 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
414 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
415 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
416 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
418 break;
419 case 10:
420 if (!__builtin_expect (__libc_enable_secure, 0))
422 if (memcmp (envline, "ARENA_TEST", 10) == 0)
423 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
425 break;
426 case 15:
427 if (!__builtin_expect (__libc_enable_secure, 0))
429 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
430 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
431 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
432 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
434 break;
435 default:
436 break;
440 if (s && s[0])
442 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
443 if (check_action != 0)
444 __malloc_check_init ();
446 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
447 if (hook != NULL)
448 (*hook)();
449 __malloc_initialized = 1;
452 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
453 #ifdef thread_atfork_static
454 thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \
455 ptmalloc_unlock_all2)
456 #endif
460 /* Managing heaps and arenas (for concurrent threads) */
462 #if MALLOC_DEBUG > 1
464 /* Print the complete contents of a single heap to stderr. */
466 static void
467 dump_heap (heap_info *heap)
469 char *ptr;
470 mchunkptr p;
472 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
473 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
474 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
475 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
476 ~MALLOC_ALIGN_MASK);
477 for (;; )
479 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
480 if (p == top (heap->ar_ptr))
482 fprintf (stderr, " (top)\n");
483 break;
485 else if (p->size == (0 | PREV_INUSE))
487 fprintf (stderr, " (fence)\n");
488 break;
490 fprintf (stderr, "\n");
491 p = next_chunk (p);
494 #endif /* MALLOC_DEBUG > 1 */
496 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
497 addresses as opposed to increasing, new_heap would badly fragment the
498 address space. In that case remember the second HEAP_MAX_SIZE part
499 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
500 call (if it is already aligned) and try to reuse it next time. We need
501 no locking for it, as kernel ensures the atomicity for us - worst case
502 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
503 multiple threads, but only one will succeed. */
504 static char *aligned_heap_area;
506 /* Create a new heap. size is automatically rounded up to a multiple
507 of the page size. */
509 static heap_info *
510 internal_function
511 new_heap (size_t size, size_t top_pad)
513 size_t pagesize = GLRO (dl_pagesize);
514 char *p1, *p2;
515 unsigned long ul;
516 heap_info *h;
518 if (size + top_pad < HEAP_MIN_SIZE)
519 size = HEAP_MIN_SIZE;
520 else if (size + top_pad <= HEAP_MAX_SIZE)
521 size += top_pad;
522 else if (size > HEAP_MAX_SIZE)
523 return 0;
524 else
525 size = HEAP_MAX_SIZE;
526 size = ALIGN_UP (size, pagesize);
528 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
529 No swap space needs to be reserved for the following large
530 mapping (on Linux, this is the case for all non-writable mappings
531 anyway). */
532 p2 = MAP_FAILED;
533 if (aligned_heap_area)
535 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
536 MAP_NORESERVE);
537 aligned_heap_area = NULL;
538 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
540 __munmap (p2, HEAP_MAX_SIZE);
541 p2 = MAP_FAILED;
544 if (p2 == MAP_FAILED)
546 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
547 if (p1 != MAP_FAILED)
549 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
550 & ~(HEAP_MAX_SIZE - 1));
551 ul = p2 - p1;
552 if (ul)
553 __munmap (p1, ul);
554 else
555 aligned_heap_area = p2 + HEAP_MAX_SIZE;
556 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
558 else
560 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
561 is already aligned. */
562 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
563 if (p2 == MAP_FAILED)
564 return 0;
566 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
568 __munmap (p2, HEAP_MAX_SIZE);
569 return 0;
573 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
575 __munmap (p2, HEAP_MAX_SIZE);
576 return 0;
578 h = (heap_info *) p2;
579 h->size = size;
580 h->mprotect_size = size;
581 LIBC_PROBE (memory_heap_new, 2, h, h->size);
582 return h;
585 /* Grow a heap. size is automatically rounded up to a
586 multiple of the page size. */
588 static int
589 grow_heap (heap_info *h, long diff)
591 size_t pagesize = GLRO (dl_pagesize);
592 long new_size;
594 diff = ALIGN_UP (diff, pagesize);
595 new_size = (long) h->size + diff;
596 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
597 return -1;
599 if ((unsigned long) new_size > h->mprotect_size)
601 if (__mprotect ((char *) h + h->mprotect_size,
602 (unsigned long) new_size - h->mprotect_size,
603 PROT_READ | PROT_WRITE) != 0)
604 return -2;
606 h->mprotect_size = new_size;
609 h->size = new_size;
610 LIBC_PROBE (memory_heap_more, 2, h, h->size);
611 return 0;
614 /* Shrink a heap. */
616 static int
617 shrink_heap (heap_info *h, long diff)
619 long new_size;
621 new_size = (long) h->size - diff;
622 if (new_size < (long) sizeof (*h))
623 return -1;
625 /* Try to re-map the extra heap space freshly to save memory, and make it
626 inaccessible. See malloc-sysdep.h to know when this is true. */
627 if (__glibc_unlikely (check_may_shrink_heap ()))
629 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
630 MAP_FIXED) == (char *) MAP_FAILED)
631 return -2;
633 h->mprotect_size = new_size;
635 else
636 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
637 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
639 h->size = new_size;
640 LIBC_PROBE (memory_heap_less, 2, h, h->size);
641 return 0;
644 /* Delete a heap. */
646 #define delete_heap(heap) \
647 do { \
648 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
649 aligned_heap_area = NULL; \
650 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
651 } while (0)
653 static int
654 internal_function
655 heap_trim (heap_info *heap, size_t pad)
657 mstate ar_ptr = heap->ar_ptr;
658 unsigned long pagesz = GLRO (dl_pagesize);
659 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
660 heap_info *prev_heap;
661 long new_size, top_size, top_area, extra, prev_size, misalign;
663 /* Can this heap go away completely? */
664 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
666 prev_heap = heap->prev;
667 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
668 p = chunk_at_offset (prev_heap, prev_size);
669 /* fencepost must be properly aligned. */
670 misalign = ((long) p) & MALLOC_ALIGN_MASK;
671 p = chunk_at_offset (prev_heap, prev_size - misalign);
672 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
673 p = prev_chunk (p);
674 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
675 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
676 if (!prev_inuse (p))
677 new_size += p->prev_size;
678 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
679 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
680 break;
681 ar_ptr->system_mem -= heap->size;
682 arena_mem -= heap->size;
683 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
684 delete_heap (heap);
685 heap = prev_heap;
686 if (!prev_inuse (p)) /* consolidate backward */
688 p = prev_chunk (p);
689 unlink (ar_ptr, p, bck, fwd);
691 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
692 assert (((char *) p + new_size) == ((char *) heap + heap->size));
693 top (ar_ptr) = top_chunk = p;
694 set_head (top_chunk, new_size | PREV_INUSE);
695 /*check_chunk(ar_ptr, top_chunk);*/
698 /* Uses similar logic for per-thread arenas as the main arena with systrim
699 by preserving the top pad and at least a page. */
700 top_size = chunksize (top_chunk);
701 top_area = top_size - MINSIZE - 1;
702 if (top_area < 0 || (size_t) top_area <= pad)
703 return 0;
705 extra = ALIGN_DOWN(top_area - pad, pagesz);
706 if ((unsigned long) extra < mp_.trim_threshold)
707 return 0;
709 /* Try to shrink. */
710 if (shrink_heap (heap, extra) != 0)
711 return 0;
713 ar_ptr->system_mem -= extra;
714 arena_mem -= extra;
716 /* Success. Adjust top accordingly. */
717 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
718 /*check_chunk(ar_ptr, top_chunk);*/
719 return 1;
722 /* Create a new arena with initial size "size". */
724 static mstate
725 _int_new_arena (size_t size)
727 mstate a;
728 heap_info *h;
729 char *ptr;
730 unsigned long misalign;
732 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
733 mp_.top_pad);
734 if (!h)
736 /* Maybe size is too large to fit in a single heap. So, just try
737 to create a minimally-sized arena and let _int_malloc() attempt
738 to deal with the large request via mmap_chunk(). */
739 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
740 if (!h)
741 return 0;
743 a = h->ar_ptr = (mstate) (h + 1);
744 malloc_init_state (a);
745 /*a->next = NULL;*/
746 a->system_mem = a->max_system_mem = h->size;
747 arena_mem += h->size;
749 /* Set up the top chunk, with proper alignment. */
750 ptr = (char *) (a + 1);
751 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
752 if (misalign > 0)
753 ptr += MALLOC_ALIGNMENT - misalign;
754 top (a) = (mchunkptr) ptr;
755 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
757 LIBC_PROBE (memory_arena_new, 2, a, size);
758 tsd_setspecific (arena_key, (void *) a);
759 mutex_init (&a->mutex);
760 (void) mutex_lock (&a->mutex);
762 (void) mutex_lock (&list_lock);
764 /* Add the new arena to the global list. */
765 a->next = main_arena.next;
766 atomic_write_barrier ();
767 main_arena.next = a;
769 (void) mutex_unlock (&list_lock);
771 return a;
775 static mstate
776 get_free_list (void)
778 mstate result = free_list;
779 if (result != NULL)
781 (void) mutex_lock (&list_lock);
782 result = free_list;
783 if (result != NULL)
784 free_list = result->next_free;
785 (void) mutex_unlock (&list_lock);
787 if (result != NULL)
789 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
790 (void) mutex_lock (&result->mutex);
791 tsd_setspecific (arena_key, (void *) result);
795 return result;
798 /* Lock and return an arena that can be reused for memory allocation.
799 Avoid AVOID_ARENA as we have already failed to allocate memory in
800 it and it is currently locked. */
801 static mstate
802 reused_arena (mstate avoid_arena)
804 mstate result;
805 static mstate next_to_use;
806 if (next_to_use == NULL)
807 next_to_use = &main_arena;
809 result = next_to_use;
812 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
813 goto out;
815 result = result->next;
817 while (result != next_to_use);
819 /* Avoid AVOID_ARENA as we have already failed to allocate memory
820 in that arena and it is currently locked. */
821 if (result == avoid_arena)
822 result = result->next;
824 /* Make sure that the arena we get is not corrupted. */
825 mstate begin = result;
826 while (arena_is_corrupt (result) || result == avoid_arena)
828 result = result->next;
829 if (result == begin)
830 break;
833 /* We could not find any arena that was either not corrupted or not the one
834 we wanted to avoid. */
835 if (result == begin || result == avoid_arena)
836 return NULL;
838 /* No arena available without contention. Wait for the next in line. */
839 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
840 (void) mutex_lock (&result->mutex);
842 out:
843 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
844 tsd_setspecific (arena_key, (void *) result);
845 next_to_use = result->next;
847 return result;
850 static mstate
851 internal_function
852 arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
854 mstate a;
856 static size_t narenas_limit;
858 a = get_free_list ();
859 if (a == NULL)
861 /* Nothing immediately available, so generate a new arena. */
862 if (narenas_limit == 0)
864 if (mp_.arena_max != 0)
865 narenas_limit = mp_.arena_max;
866 else if (narenas > mp_.arena_test)
868 int n = __get_nprocs ();
870 if (n >= 1)
871 narenas_limit = NARENAS_FROM_NCORES (n);
872 else
873 /* We have no information about the system. Assume two
874 cores. */
875 narenas_limit = NARENAS_FROM_NCORES (2);
878 repeat:;
879 size_t n = narenas;
880 /* NB: the following depends on the fact that (size_t)0 - 1 is a
881 very large number and that the underflow is OK. If arena_max
882 is set the value of arena_test is irrelevant. If arena_test
883 is set but narenas is not yet larger or equal to arena_test
884 narenas_limit is 0. There is no possibility for narenas to
885 be too big for the test to always fail since there is not
886 enough address space to create that many arenas. */
887 if (__glibc_unlikely (n <= narenas_limit - 1))
889 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
890 goto repeat;
891 a = _int_new_arena (size);
892 if (__glibc_unlikely (a == NULL))
893 catomic_decrement (&narenas);
895 else
896 a = reused_arena (avoid_arena);
898 return a;
901 /* If we don't have the main arena, then maybe the failure is due to running
902 out of mmapped areas, so we can try allocating on the main arena.
903 Otherwise, it is likely that sbrk() has failed and there is still a chance
904 to mmap(), so try one of the other arenas. */
905 static mstate
906 arena_get_retry (mstate ar_ptr, size_t bytes)
908 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
909 if (ar_ptr != &main_arena)
911 (void) mutex_unlock (&ar_ptr->mutex);
912 ar_ptr = &main_arena;
913 (void) mutex_lock (&ar_ptr->mutex);
915 else
917 /* Grab ar_ptr->next prior to releasing its lock. */
918 mstate prev = ar_ptr->next ? ar_ptr : 0;
919 (void) mutex_unlock (&ar_ptr->mutex);
920 ar_ptr = arena_get2 (prev, bytes, ar_ptr);
923 return ar_ptr;
926 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
927 arena_thread_freeres (void)
929 void *vptr = NULL;
930 mstate a = tsd_getspecific (arena_key, vptr);
931 tsd_setspecific (arena_key, NULL);
933 if (a != NULL)
935 (void) mutex_lock (&list_lock);
936 a->next_free = free_list;
937 free_list = a;
938 (void) mutex_unlock (&list_lock);
941 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
944 * Local variables:
945 * c-basic-offset: 2
946 * End: