Remove some redundant computations in s_sin.c
[glibc.git] / malloc / arena.c
blob9d49f93265aeb952966174735cdbfd539c6fae4d
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32*1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
41 #ifndef THREAD_STATS
42 #define THREAD_STATS 0
43 #endif
45 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
46 computed. */
48 /***************************************************************************/
50 #define top(ar_ptr) ((ar_ptr)->top)
52 /* A heap is a single contiguous memory region holding (coalesceable)
53 malloc_chunks. It is allocated with mmap() and always starts at an
54 address aligned to HEAP_MAX_SIZE. */
56 typedef struct _heap_info {
57 mstate ar_ptr; /* Arena for this heap. */
58 struct _heap_info *prev; /* Previous heap. */
59 size_t size; /* Current size in bytes. */
60 size_t mprotect_size; /* Size in bytes that has been mprotected
61 PROT_READ|PROT_WRITE. */
62 /* Make sure the following data is properly aligned, particularly
63 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
64 MALLOC_ALIGNMENT. */
65 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
66 } heap_info;
68 /* Get a compile-time error if the heap_info padding is not correct
69 to make alignment work as expected in sYSMALLOc. */
70 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
71 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
72 ? -1 : 1];
74 /* Thread specific data */
76 static tsd_key_t arena_key;
77 static mutex_t list_lock = MUTEX_INITIALIZER;
78 static size_t narenas = 1;
79 static mstate free_list;
81 #if THREAD_STATS
82 static int stat_n_heaps;
83 #define THREAD_STAT(x) x
84 #else
85 #define THREAD_STAT(x) do ; while(0)
86 #endif
88 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
89 static unsigned long arena_mem;
91 /* Already initialized? */
92 int __malloc_initialized = -1;
94 /**************************************************************************/
97 /* arena_get() acquires an arena and locks the corresponding mutex.
98 First, try the one last locked successfully by this thread. (This
99 is the common case and handled with a macro for speed.) Then, loop
100 once over the circularly linked list of arenas. If no arena is
101 readily available, create a new one. In this latter case, `size'
102 is just a hint as to how much memory will be required immediately
103 in the new arena. */
105 #define arena_get(ptr, size) do { \
106 arena_lookup(ptr); \
107 arena_lock(ptr, size); \
108 } while(0)
110 #define arena_lookup(ptr) do { \
111 void *vptr = NULL; \
112 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
113 } while(0)
115 # define arena_lock(ptr, size) do { \
116 if(ptr) \
117 (void)mutex_lock(&ptr->mutex); \
118 else \
119 ptr = arena_get2(ptr, (size), NULL); \
120 } while(0)
122 /* find the heap and corresponding arena for a given ptr */
124 #define heap_for_ptr(ptr) \
125 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
126 #define arena_for_chunk(ptr) \
127 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
130 /**************************************************************************/
132 #ifndef NO_THREADS
134 /* atfork support. */
136 static void *(*save_malloc_hook) (size_t __size, const void *);
137 static void (*save_free_hook) (void *__ptr, const void *);
138 static void *save_arena;
140 #ifdef ATFORK_MEM
141 ATFORK_MEM;
142 #endif
144 /* Magic value for the thread-specific arena pointer when
145 malloc_atfork() is in use. */
147 #define ATFORK_ARENA_PTR ((void*)-1)
149 /* The following hooks are used while the `atfork' handling mechanism
150 is active. */
152 static void*
153 malloc_atfork(size_t sz, const void *caller)
155 void *vptr = NULL;
156 void *victim;
158 tsd_getspecific(arena_key, vptr);
159 if(vptr == ATFORK_ARENA_PTR) {
160 /* We are the only thread that may allocate at all. */
161 if(save_malloc_hook != malloc_check) {
162 return _int_malloc(&main_arena, sz);
163 } else {
164 if(top_check()<0)
165 return 0;
166 victim = _int_malloc(&main_arena, sz+1);
167 return mem2mem_check(victim, sz);
169 } else {
170 /* Suspend the thread until the `atfork' handlers have completed.
171 By that time, the hooks will have been reset as well, so that
172 mALLOc() can be used again. */
173 (void)mutex_lock(&list_lock);
174 (void)mutex_unlock(&list_lock);
175 return __libc_malloc(sz);
179 static void
180 free_atfork(void* mem, const void *caller)
182 void *vptr = NULL;
183 mstate ar_ptr;
184 mchunkptr p; /* chunk corresponding to mem */
186 if (mem == 0) /* free(0) has no effect */
187 return;
189 p = mem2chunk(mem); /* do not bother to replicate free_check here */
191 if (chunk_is_mmapped(p)) /* release mmapped memory. */
193 munmap_chunk(p);
194 return;
197 ar_ptr = arena_for_chunk(p);
198 tsd_getspecific(arena_key, vptr);
199 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
203 /* Counter for number of times the list is locked by the same thread. */
204 static unsigned int atfork_recursive_cntr;
206 /* The following two functions are registered via thread_atfork() to
207 make sure that the mutexes remain in a consistent state in the
208 fork()ed version of a thread. Also adapt the malloc and free hooks
209 temporarily, because the `atfork' handler mechanism may use
210 malloc/free internally (e.g. in LinuxThreads). */
212 static void
213 ptmalloc_lock_all (void)
215 mstate ar_ptr;
217 if(__malloc_initialized < 1)
218 return;
219 if (mutex_trylock(&list_lock))
221 void *my_arena;
222 tsd_getspecific(arena_key, my_arena);
223 if (my_arena == ATFORK_ARENA_PTR)
224 /* This is the same thread which already locks the global list.
225 Just bump the counter. */
226 goto out;
228 /* This thread has to wait its turn. */
229 (void)mutex_lock(&list_lock);
231 for(ar_ptr = &main_arena;;) {
232 (void)mutex_lock(&ar_ptr->mutex);
233 ar_ptr = ar_ptr->next;
234 if(ar_ptr == &main_arena) break;
236 save_malloc_hook = __malloc_hook;
237 save_free_hook = __free_hook;
238 __malloc_hook = malloc_atfork;
239 __free_hook = free_atfork;
240 /* Only the current thread may perform malloc/free calls now. */
241 tsd_getspecific(arena_key, save_arena);
242 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
243 out:
244 ++atfork_recursive_cntr;
247 static void
248 ptmalloc_unlock_all (void)
250 mstate ar_ptr;
252 if(__malloc_initialized < 1)
253 return;
254 if (--atfork_recursive_cntr != 0)
255 return;
256 tsd_setspecific(arena_key, save_arena);
257 __malloc_hook = save_malloc_hook;
258 __free_hook = save_free_hook;
259 for(ar_ptr = &main_arena;;) {
260 (void)mutex_unlock(&ar_ptr->mutex);
261 ar_ptr = ar_ptr->next;
262 if(ar_ptr == &main_arena) break;
264 (void)mutex_unlock(&list_lock);
267 # ifdef __linux__
269 /* In NPTL, unlocking a mutex in the child process after a
270 fork() is currently unsafe, whereas re-initializing it is safe and
271 does not leak resources. Therefore, a special atfork handler is
272 installed for the child. */
274 static void
275 ptmalloc_unlock_all2 (void)
277 mstate ar_ptr;
279 if(__malloc_initialized < 1)
280 return;
281 tsd_setspecific(arena_key, save_arena);
282 __malloc_hook = save_malloc_hook;
283 __free_hook = save_free_hook;
284 free_list = NULL;
285 for(ar_ptr = &main_arena;;) {
286 mutex_init(&ar_ptr->mutex);
287 if (ar_ptr != save_arena) {
288 ar_ptr->next_free = free_list;
289 free_list = ar_ptr;
291 ar_ptr = ar_ptr->next;
292 if(ar_ptr == &main_arena) break;
294 mutex_init(&list_lock);
295 atfork_recursive_cntr = 0;
298 # else
300 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
302 # endif
304 #endif /* !NO_THREADS */
306 /* Initialization routine. */
307 #include <string.h>
308 extern char **_environ;
310 static char *
311 internal_function
312 next_env_entry (char ***position)
314 char **current = *position;
315 char *result = NULL;
317 while (*current != NULL)
319 if (__builtin_expect ((*current)[0] == 'M', 0)
320 && (*current)[1] == 'A'
321 && (*current)[2] == 'L'
322 && (*current)[3] == 'L'
323 && (*current)[4] == 'O'
324 && (*current)[5] == 'C'
325 && (*current)[6] == '_')
327 result = &(*current)[7];
329 /* Save current position for next visit. */
330 *position = ++current;
332 break;
335 ++current;
338 return result;
342 #ifdef SHARED
343 static void *
344 __failing_morecore (ptrdiff_t d)
346 return (void *) MORECORE_FAILURE;
349 extern struct dl_open_hook *_dl_open_hook;
350 libc_hidden_proto (_dl_open_hook);
351 #endif
353 static void
354 ptmalloc_init (void)
356 if(__malloc_initialized >= 0) return;
357 __malloc_initialized = 0;
359 #ifdef SHARED
360 /* In case this libc copy is in a non-default namespace, never use brk.
361 Likewise if dlopened from statically linked program. */
362 Dl_info di;
363 struct link_map *l;
365 if (_dl_open_hook != NULL
366 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
367 && l->l_ns != LM_ID_BASE))
368 __morecore = __failing_morecore;
369 #endif
371 tsd_key_create(&arena_key, NULL);
372 tsd_setspecific(arena_key, (void *)&main_arena);
373 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
374 const char *s = NULL;
375 if (__builtin_expect (_environ != NULL, 1))
377 char **runp = _environ;
378 char *envline;
380 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
383 size_t len = strcspn (envline, "=");
385 if (envline[len] != '=')
386 /* This is a "MALLOC_" variable at the end of the string
387 without a '=' character. Ignore it since otherwise we
388 will access invalid memory below. */
389 continue;
391 switch (len)
393 case 6:
394 if (memcmp (envline, "CHECK_", 6) == 0)
395 s = &envline[7];
396 break;
397 case 8:
398 if (! __builtin_expect (__libc_enable_secure, 0))
400 if (memcmp (envline, "TOP_PAD_", 8) == 0)
401 __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
402 else if (memcmp (envline, "PERTURB_", 8) == 0)
403 __libc_mallopt(M_PERTURB, atoi(&envline[9]));
405 break;
406 case 9:
407 if (! __builtin_expect (__libc_enable_secure, 0))
409 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
410 __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
411 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
412 __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
414 break;
415 case 10:
416 if (! __builtin_expect (__libc_enable_secure, 0))
418 if (memcmp (envline, "ARENA_TEST", 10) == 0)
419 __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
421 break;
422 case 15:
423 if (! __builtin_expect (__libc_enable_secure, 0))
425 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
426 __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
427 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
428 __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
430 break;
431 default:
432 break;
436 if(s && s[0]) {
437 __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
438 if (check_action != 0)
439 __malloc_check_init();
441 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
442 if (hook != NULL)
443 (*hook)();
444 __malloc_initialized = 1;
447 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
448 #ifdef thread_atfork_static
449 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
450 ptmalloc_unlock_all2)
451 #endif
455 /* Managing heaps and arenas (for concurrent threads) */
457 #if MALLOC_DEBUG > 1
459 /* Print the complete contents of a single heap to stderr. */
461 static void
462 dump_heap(heap_info *heap)
464 char *ptr;
465 mchunkptr p;
467 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
468 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
469 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
470 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
471 ~MALLOC_ALIGN_MASK);
472 for(;;) {
473 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
474 if(p == top(heap->ar_ptr)) {
475 fprintf(stderr, " (top)\n");
476 break;
477 } else if(p->size == (0|PREV_INUSE)) {
478 fprintf(stderr, " (fence)\n");
479 break;
481 fprintf(stderr, "\n");
482 p = next_chunk(p);
486 #endif /* MALLOC_DEBUG > 1 */
488 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
489 addresses as opposed to increasing, new_heap would badly fragment the
490 address space. In that case remember the second HEAP_MAX_SIZE part
491 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
492 call (if it is already aligned) and try to reuse it next time. We need
493 no locking for it, as kernel ensures the atomicity for us - worst case
494 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
495 multiple threads, but only one will succeed. */
496 static char *aligned_heap_area;
498 /* Create a new heap. size is automatically rounded up to a multiple
499 of the page size. */
501 static heap_info *
502 internal_function
503 new_heap(size_t size, size_t top_pad)
505 size_t page_mask = GLRO(dl_pagesize) - 1;
506 char *p1, *p2;
507 unsigned long ul;
508 heap_info *h;
510 if(size+top_pad < HEAP_MIN_SIZE)
511 size = HEAP_MIN_SIZE;
512 else if(size+top_pad <= HEAP_MAX_SIZE)
513 size += top_pad;
514 else if(size > HEAP_MAX_SIZE)
515 return 0;
516 else
517 size = HEAP_MAX_SIZE;
518 size = (size + page_mask) & ~page_mask;
520 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
521 No swap space needs to be reserved for the following large
522 mapping (on Linux, this is the case for all non-writable mappings
523 anyway). */
524 p2 = MAP_FAILED;
525 if(aligned_heap_area) {
526 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
527 MAP_NORESERVE);
528 aligned_heap_area = NULL;
529 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
530 __munmap(p2, HEAP_MAX_SIZE);
531 p2 = MAP_FAILED;
534 if(p2 == MAP_FAILED) {
535 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
536 if(p1 != MAP_FAILED) {
537 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
538 & ~(HEAP_MAX_SIZE-1));
539 ul = p2 - p1;
540 if (ul)
541 __munmap(p1, ul);
542 else
543 aligned_heap_area = p2 + HEAP_MAX_SIZE;
544 __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
545 } else {
546 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
547 is already aligned. */
548 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
549 if(p2 == MAP_FAILED)
550 return 0;
551 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
552 __munmap(p2, HEAP_MAX_SIZE);
553 return 0;
557 if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
558 __munmap(p2, HEAP_MAX_SIZE);
559 return 0;
561 h = (heap_info *)p2;
562 h->size = size;
563 h->mprotect_size = size;
564 THREAD_STAT(stat_n_heaps++);
565 LIBC_PROBE (memory_heap_new, 2, h, h->size);
566 return h;
569 /* Grow a heap. size is automatically rounded up to a
570 multiple of the page size. */
572 static int
573 grow_heap(heap_info *h, long diff)
575 size_t page_mask = GLRO(dl_pagesize) - 1;
576 long new_size;
578 diff = (diff + page_mask) & ~page_mask;
579 new_size = (long)h->size + diff;
580 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
581 return -1;
582 if((unsigned long) new_size > h->mprotect_size) {
583 if (__mprotect((char *)h + h->mprotect_size,
584 (unsigned long) new_size - h->mprotect_size,
585 PROT_READ|PROT_WRITE) != 0)
586 return -2;
587 h->mprotect_size = new_size;
590 h->size = new_size;
591 LIBC_PROBE (memory_heap_more, 2, h, h->size);
592 return 0;
595 /* Shrink a heap. */
597 static int
598 shrink_heap(heap_info *h, long diff)
600 long new_size;
602 new_size = (long)h->size - diff;
603 if(new_size < (long)sizeof(*h))
604 return -1;
605 /* Try to re-map the extra heap space freshly to save memory, and make it
606 inaccessible. See malloc-sysdep.h to know when this is true. */
607 if (__builtin_expect (check_may_shrink_heap (), 0))
609 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
610 MAP_FIXED) == (char *) MAP_FAILED)
611 return -2;
612 h->mprotect_size = new_size;
614 else
615 __madvise ((char *)h + new_size, diff, MADV_DONTNEED);
616 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
618 h->size = new_size;
619 LIBC_PROBE (memory_heap_less, 2, h, h->size);
620 return 0;
623 /* Delete a heap. */
625 #define delete_heap(heap) \
626 do { \
627 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
628 aligned_heap_area = NULL; \
629 __munmap((char*)(heap), HEAP_MAX_SIZE); \
630 } while (0)
632 static int
633 internal_function
634 heap_trim(heap_info *heap, size_t pad)
636 mstate ar_ptr = heap->ar_ptr;
637 unsigned long pagesz = GLRO(dl_pagesize);
638 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
639 heap_info *prev_heap;
640 long new_size, top_size, extra, prev_size, misalign;
642 /* Can this heap go away completely? */
643 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
644 prev_heap = heap->prev;
645 prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
646 p = chunk_at_offset(prev_heap, prev_size);
647 /* fencepost must be properly aligned. */
648 misalign = ((long) p) & MALLOC_ALIGN_MASK;
649 p = chunk_at_offset(prev_heap, prev_size - misalign);
650 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
651 p = prev_chunk(p);
652 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
653 assert(new_size>0 && new_size<(long)(2*MINSIZE));
654 if(!prev_inuse(p))
655 new_size += p->prev_size;
656 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
657 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
658 break;
659 ar_ptr->system_mem -= heap->size;
660 arena_mem -= heap->size;
661 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
662 delete_heap(heap);
663 heap = prev_heap;
664 if(!prev_inuse(p)) { /* consolidate backward */
665 p = prev_chunk(p);
666 unlink(p, bck, fwd);
668 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
669 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
670 top(ar_ptr) = top_chunk = p;
671 set_head(top_chunk, new_size | PREV_INUSE);
672 /*check_chunk(ar_ptr, top_chunk);*/
674 top_size = chunksize(top_chunk);
675 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
676 if(extra < (long)pagesz)
677 return 0;
678 /* Try to shrink. */
679 if(shrink_heap(heap, extra) != 0)
680 return 0;
681 ar_ptr->system_mem -= extra;
682 arena_mem -= extra;
684 /* Success. Adjust top accordingly. */
685 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
686 /*check_chunk(ar_ptr, top_chunk);*/
687 return 1;
690 /* Create a new arena with initial size "size". */
692 static mstate
693 _int_new_arena(size_t size)
695 mstate a;
696 heap_info *h;
697 char *ptr;
698 unsigned long misalign;
700 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
701 mp_.top_pad);
702 if(!h) {
703 /* Maybe size is too large to fit in a single heap. So, just try
704 to create a minimally-sized arena and let _int_malloc() attempt
705 to deal with the large request via mmap_chunk(). */
706 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
707 if(!h)
708 return 0;
710 a = h->ar_ptr = (mstate)(h+1);
711 malloc_init_state(a);
712 /*a->next = NULL;*/
713 a->system_mem = a->max_system_mem = h->size;
714 arena_mem += h->size;
716 /* Set up the top chunk, with proper alignment. */
717 ptr = (char *)(a + 1);
718 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
719 if (misalign > 0)
720 ptr += MALLOC_ALIGNMENT - misalign;
721 top(a) = (mchunkptr)ptr;
722 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
724 LIBC_PROBE (memory_arena_new, 2, a, size);
725 tsd_setspecific(arena_key, (void *)a);
726 mutex_init(&a->mutex);
727 (void)mutex_lock(&a->mutex);
729 (void)mutex_lock(&list_lock);
731 /* Add the new arena to the global list. */
732 a->next = main_arena.next;
733 atomic_write_barrier ();
734 main_arena.next = a;
736 (void)mutex_unlock(&list_lock);
738 THREAD_STAT(++(a->stat_lock_loop));
740 return a;
744 static mstate
745 get_free_list (void)
747 mstate result = free_list;
748 if (result != NULL)
750 (void)mutex_lock(&list_lock);
751 result = free_list;
752 if (result != NULL)
753 free_list = result->next_free;
754 (void)mutex_unlock(&list_lock);
756 if (result != NULL)
758 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
759 (void)mutex_lock(&result->mutex);
760 tsd_setspecific(arena_key, (void *)result);
761 THREAD_STAT(++(result->stat_lock_loop));
765 return result;
768 /* Lock and return an arena that can be reused for memory allocation.
769 Avoid AVOID_ARENA as we have already failed to allocate memory in
770 it and it is currently locked. */
771 static mstate
772 reused_arena (mstate avoid_arena)
774 mstate result;
775 static mstate next_to_use;
776 if (next_to_use == NULL)
777 next_to_use = &main_arena;
779 result = next_to_use;
782 if (!mutex_trylock(&result->mutex))
783 goto out;
785 result = result->next;
787 while (result != next_to_use);
789 /* Avoid AVOID_ARENA as we have already failed to allocate memory
790 in that arena and it is currently locked. */
791 if (result == avoid_arena)
792 result = result->next;
794 /* No arena available. Wait for the next in line. */
795 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
796 (void)mutex_lock(&result->mutex);
798 out:
799 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
800 tsd_setspecific(arena_key, (void *)result);
801 THREAD_STAT(++(result->stat_lock_loop));
802 next_to_use = result->next;
804 return result;
807 static mstate
808 internal_function
809 arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
811 mstate a;
813 static size_t narenas_limit;
815 a = get_free_list ();
816 if (a == NULL)
818 /* Nothing immediately available, so generate a new arena. */
819 if (narenas_limit == 0)
821 if (mp_.arena_max != 0)
822 narenas_limit = mp_.arena_max;
823 else if (narenas > mp_.arena_test)
825 int n = __get_nprocs ();
827 if (n >= 1)
828 narenas_limit = NARENAS_FROM_NCORES (n);
829 else
830 /* We have no information about the system. Assume two
831 cores. */
832 narenas_limit = NARENAS_FROM_NCORES (2);
835 repeat:;
836 size_t n = narenas;
837 /* NB: the following depends on the fact that (size_t)0 - 1 is a
838 very large number and that the underflow is OK. If arena_max
839 is set the value of arena_test is irrelevant. If arena_test
840 is set but narenas is not yet larger or equal to arena_test
841 narenas_limit is 0. There is no possibility for narenas to
842 be too big for the test to always fail since there is not
843 enough address space to create that many arenas. */
844 if (__builtin_expect (n <= narenas_limit - 1, 0))
846 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
847 goto repeat;
848 a = _int_new_arena (size);
849 if (__builtin_expect (a == NULL, 0))
850 catomic_decrement (&narenas);
852 else
853 a = reused_arena (avoid_arena);
855 return a;
858 /* If we don't have the main arena, then maybe the failure is due to running
859 out of mmapped areas, so we can try allocating on the main arena.
860 Otherwise, it is likely that sbrk() has failed and there is still a chance
861 to mmap(), so try one of the other arenas. */
862 static mstate
863 arena_get_retry (mstate ar_ptr, size_t bytes)
865 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
866 if(ar_ptr != &main_arena) {
867 (void)mutex_unlock(&ar_ptr->mutex);
868 ar_ptr = &main_arena;
869 (void)mutex_lock(&ar_ptr->mutex);
870 } else {
871 /* Grab ar_ptr->next prior to releasing its lock. */
872 mstate prev = ar_ptr->next ? ar_ptr : 0;
873 (void)mutex_unlock(&ar_ptr->mutex);
874 ar_ptr = arena_get2(prev, bytes, ar_ptr);
877 return ar_ptr;
880 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
881 arena_thread_freeres (void)
883 void *vptr = NULL;
884 mstate a = tsd_getspecific(arena_key, vptr);
885 tsd_setspecific(arena_key, NULL);
887 if (a != NULL)
889 (void)mutex_lock(&list_lock);
890 a->next_free = free_list;
891 free_list = a;
892 (void)mutex_unlock(&list_lock);
895 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
898 * Local variables:
899 * c-basic-offset: 2
900 * End: