Add sysdeps/x86_64/x32/_itoa.h
[glibc.git] / malloc / arena.c
blob33c4ff37a751fe12b7afad23cb41088063765a6a
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009,2010,2011,2012
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
21 #include <stdbool.h>
23 /* Compile-time constants. */
25 #define HEAP_MIN_SIZE (32*1024)
26 #ifndef HEAP_MAX_SIZE
27 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
28 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
29 # else
30 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
31 # endif
32 #endif
34 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
35 that are dynamically created for multi-threaded programs. The
36 maximum size must be a power of two, for fast determination of
37 which heap belongs to a chunk. It should be much larger than the
38 mmap threshold, so that requests with a size just below that
39 threshold can be fulfilled without creating too many heaps. */
42 #ifndef THREAD_STATS
43 #define THREAD_STATS 0
44 #endif
46 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
47 computed. */
49 /***************************************************************************/
51 #define top(ar_ptr) ((ar_ptr)->top)
53 /* A heap is a single contiguous memory region holding (coalesceable)
54 malloc_chunks. It is allocated with mmap() and always starts at an
55 address aligned to HEAP_MAX_SIZE. */
57 typedef struct _heap_info {
58 mstate ar_ptr; /* Arena for this heap. */
59 struct _heap_info *prev; /* Previous heap. */
60 size_t size; /* Current size in bytes. */
61 size_t mprotect_size; /* Size in bytes that has been mprotected
62 PROT_READ|PROT_WRITE. */
63 /* Make sure the following data is properly aligned, particularly
64 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
65 MALLOC_ALIGNMENT. */
66 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
67 } heap_info;
69 /* Get a compile-time error if the heap_info padding is not correct
70 to make alignment work as expected in sYSMALLOc. */
71 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
72 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
73 ? -1 : 1];
75 /* Thread specific data */
77 static tsd_key_t arena_key;
78 static mutex_t list_lock = MUTEX_INITIALIZER;
79 #ifdef PER_THREAD
80 static size_t narenas = 1;
81 static mstate free_list;
82 #endif
84 #if THREAD_STATS
85 static int stat_n_heaps;
86 #define THREAD_STAT(x) x
87 #else
88 #define THREAD_STAT(x) do ; while(0)
89 #endif
91 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
92 static unsigned long arena_mem;
94 /* Already initialized? */
95 int __malloc_initialized = -1;
97 /**************************************************************************/
100 /* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
106 in the new arena. */
108 #define arena_get(ptr, size) do { \
109 arena_lookup(ptr); \
110 arena_lock(ptr, size); \
111 } while(0)
113 #define arena_lookup(ptr) do { \
114 void *vptr = NULL; \
115 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
116 } while(0)
118 #ifdef PER_THREAD
119 # define arena_lock(ptr, size) do { \
120 if(ptr) \
121 (void)mutex_lock(&ptr->mutex); \
122 else \
123 ptr = arena_get2(ptr, (size)); \
124 } while(0)
125 #else
126 # define arena_lock(ptr, size) do { \
127 if(ptr && !mutex_trylock(&ptr->mutex)) { \
128 THREAD_STAT(++(ptr->stat_lock_direct)); \
129 } else \
130 ptr = arena_get2(ptr, (size)); \
131 } while(0)
132 #endif
134 /* find the heap and corresponding arena for a given ptr */
136 #define heap_for_ptr(ptr) \
137 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
138 #define arena_for_chunk(ptr) \
139 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
142 /**************************************************************************/
144 /* atfork support. */
146 static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
147 const __malloc_ptr_t);
148 static void (*save_free_hook) (__malloc_ptr_t __ptr,
149 const __malloc_ptr_t);
150 static void* save_arena;
152 #ifdef ATFORK_MEM
153 ATFORK_MEM;
154 #endif
156 /* Magic value for the thread-specific arena pointer when
157 malloc_atfork() is in use. */
159 #define ATFORK_ARENA_PTR ((void*)-1)
161 /* The following hooks are used while the `atfork' handling mechanism
162 is active. */
164 static void*
165 malloc_atfork(size_t sz, const void *caller)
167 void *vptr = NULL;
168 void *victim;
170 tsd_getspecific(arena_key, vptr);
171 if(vptr == ATFORK_ARENA_PTR) {
172 /* We are the only thread that may allocate at all. */
173 if(save_malloc_hook != malloc_check) {
174 return _int_malloc(&main_arena, sz);
175 } else {
176 if(top_check()<0)
177 return 0;
178 victim = _int_malloc(&main_arena, sz+1);
179 return mem2mem_check(victim, sz);
181 } else {
182 /* Suspend the thread until the `atfork' handlers have completed.
183 By that time, the hooks will have been reset as well, so that
184 mALLOc() can be used again. */
185 (void)mutex_lock(&list_lock);
186 (void)mutex_unlock(&list_lock);
187 return __libc_malloc(sz);
191 static void
192 free_atfork(void* mem, const void *caller)
194 void *vptr = NULL;
195 mstate ar_ptr;
196 mchunkptr p; /* chunk corresponding to mem */
198 if (mem == 0) /* free(0) has no effect */
199 return;
201 p = mem2chunk(mem); /* do not bother to replicate free_check here */
203 if (chunk_is_mmapped(p)) /* release mmapped memory. */
205 munmap_chunk(p);
206 return;
209 ar_ptr = arena_for_chunk(p);
210 tsd_getspecific(arena_key, vptr);
211 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
215 /* Counter for number of times the list is locked by the same thread. */
216 static unsigned int atfork_recursive_cntr;
218 /* The following two functions are registered via thread_atfork() to
219 make sure that the mutexes remain in a consistent state in the
220 fork()ed version of a thread. Also adapt the malloc and free hooks
221 temporarily, because the `atfork' handler mechanism may use
222 malloc/free internally (e.g. in LinuxThreads). */
224 static void
225 ptmalloc_lock_all (void)
227 mstate ar_ptr;
229 if(__malloc_initialized < 1)
230 return;
231 if (mutex_trylock(&list_lock))
233 void *my_arena;
234 tsd_getspecific(arena_key, my_arena);
235 if (my_arena == ATFORK_ARENA_PTR)
236 /* This is the same thread which already locks the global list.
237 Just bump the counter. */
238 goto out;
240 /* This thread has to wait its turn. */
241 (void)mutex_lock(&list_lock);
243 for(ar_ptr = &main_arena;;) {
244 (void)mutex_lock(&ar_ptr->mutex);
245 ar_ptr = ar_ptr->next;
246 if(ar_ptr == &main_arena) break;
248 save_malloc_hook = __malloc_hook;
249 save_free_hook = __free_hook;
250 __malloc_hook = malloc_atfork;
251 __free_hook = free_atfork;
252 /* Only the current thread may perform malloc/free calls now. */
253 tsd_getspecific(arena_key, save_arena);
254 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
255 out:
256 ++atfork_recursive_cntr;
259 static void
260 ptmalloc_unlock_all (void)
262 mstate ar_ptr;
264 if(__malloc_initialized < 1)
265 return;
266 if (--atfork_recursive_cntr != 0)
267 return;
268 tsd_setspecific(arena_key, save_arena);
269 __malloc_hook = save_malloc_hook;
270 __free_hook = save_free_hook;
271 for(ar_ptr = &main_arena;;) {
272 (void)mutex_unlock(&ar_ptr->mutex);
273 ar_ptr = ar_ptr->next;
274 if(ar_ptr == &main_arena) break;
276 (void)mutex_unlock(&list_lock);
279 #ifdef __linux__
281 /* In NPTL, unlocking a mutex in the child process after a
282 fork() is currently unsafe, whereas re-initializing it is safe and
283 does not leak resources. Therefore, a special atfork handler is
284 installed for the child. */
286 static void
287 ptmalloc_unlock_all2 (void)
289 mstate ar_ptr;
291 if(__malloc_initialized < 1)
292 return;
293 tsd_setspecific(arena_key, save_arena);
294 __malloc_hook = save_malloc_hook;
295 __free_hook = save_free_hook;
296 #ifdef PER_THREAD
297 free_list = NULL;
298 #endif
299 for(ar_ptr = &main_arena;;) {
300 mutex_init(&ar_ptr->mutex);
301 #ifdef PER_THREAD
302 if (ar_ptr != save_arena) {
303 ar_ptr->next_free = free_list;
304 free_list = ar_ptr;
306 #endif
307 ar_ptr = ar_ptr->next;
308 if(ar_ptr == &main_arena) break;
310 mutex_init(&list_lock);
311 atfork_recursive_cntr = 0;
314 #else
316 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
318 #endif
320 /* Initialization routine. */
321 #include <string.h>
322 extern char **_environ;
324 static char *
325 internal_function
326 next_env_entry (char ***position)
328 char **current = *position;
329 char *result = NULL;
331 while (*current != NULL)
333 if (__builtin_expect ((*current)[0] == 'M', 0)
334 && (*current)[1] == 'A'
335 && (*current)[2] == 'L'
336 && (*current)[3] == 'L'
337 && (*current)[4] == 'O'
338 && (*current)[5] == 'C'
339 && (*current)[6] == '_')
341 result = &(*current)[7];
343 /* Save current position for next visit. */
344 *position = ++current;
346 break;
349 ++current;
352 return result;
356 #ifdef SHARED
357 static void *
358 __failing_morecore (ptrdiff_t d)
360 return (void *) MORECORE_FAILURE;
363 extern struct dl_open_hook *_dl_open_hook;
364 libc_hidden_proto (_dl_open_hook);
365 #endif
367 static void
368 ptmalloc_init (void)
370 if(__malloc_initialized >= 0) return;
371 __malloc_initialized = 0;
373 #ifdef SHARED
374 /* In case this libc copy is in a non-default namespace, never use brk.
375 Likewise if dlopened from statically linked program. */
376 Dl_info di;
377 struct link_map *l;
379 if (_dl_open_hook != NULL
380 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
381 && l->l_ns != LM_ID_BASE))
382 __morecore = __failing_morecore;
383 #endif
385 tsd_key_create(&arena_key, NULL);
386 tsd_setspecific(arena_key, (void *)&main_arena);
387 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
388 const char *s = NULL;
389 if (__builtin_expect (_environ != NULL, 1))
391 char **runp = _environ;
392 char *envline;
394 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
397 size_t len = strcspn (envline, "=");
399 if (envline[len] != '=')
400 /* This is a "MALLOC_" variable at the end of the string
401 without a '=' character. Ignore it since otherwise we
402 will access invalid memory below. */
403 continue;
405 switch (len)
407 case 6:
408 if (memcmp (envline, "CHECK_", 6) == 0)
409 s = &envline[7];
410 break;
411 case 8:
412 if (! __builtin_expect (__libc_enable_secure, 0))
414 if (memcmp (envline, "TOP_PAD_", 8) == 0)
415 __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
416 else if (memcmp (envline, "PERTURB_", 8) == 0)
417 __libc_mallopt(M_PERTURB, atoi(&envline[9]));
419 break;
420 case 9:
421 if (! __builtin_expect (__libc_enable_secure, 0))
423 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
424 __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
425 #ifdef PER_THREAD
426 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
427 __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
428 #endif
430 break;
431 #ifdef PER_THREAD
432 case 10:
433 if (! __builtin_expect (__libc_enable_secure, 0))
435 if (memcmp (envline, "ARENA_TEST", 10) == 0)
436 __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
438 break;
439 #endif
440 case 15:
441 if (! __builtin_expect (__libc_enable_secure, 0))
443 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
444 __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
445 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
446 __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
448 break;
449 default:
450 break;
454 if(s && s[0]) {
455 __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
456 if (check_action != 0)
457 __malloc_check_init();
459 void (*hook) (void) = force_reg (__malloc_initialize_hook);
460 if (hook != NULL)
461 (*hook)();
462 __malloc_initialized = 1;
465 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
466 #ifdef thread_atfork_static
467 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
468 ptmalloc_unlock_all2)
469 #endif
473 /* Managing heaps and arenas (for concurrent threads) */
475 #if MALLOC_DEBUG > 1
477 /* Print the complete contents of a single heap to stderr. */
479 static void
480 dump_heap(heap_info *heap)
482 char *ptr;
483 mchunkptr p;
485 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
486 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
487 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
488 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
489 ~MALLOC_ALIGN_MASK);
490 for(;;) {
491 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
492 if(p == top(heap->ar_ptr)) {
493 fprintf(stderr, " (top)\n");
494 break;
495 } else if(p->size == (0|PREV_INUSE)) {
496 fprintf(stderr, " (fence)\n");
497 break;
499 fprintf(stderr, "\n");
500 p = next_chunk(p);
504 #endif /* MALLOC_DEBUG > 1 */
506 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
507 addresses as opposed to increasing, new_heap would badly fragment the
508 address space. In that case remember the second HEAP_MAX_SIZE part
509 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
510 call (if it is already aligned) and try to reuse it next time. We need
511 no locking for it, as kernel ensures the atomicity for us - worst case
512 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
513 multiple threads, but only one will succeed. */
514 static char *aligned_heap_area;
516 /* Create a new heap. size is automatically rounded up to a multiple
517 of the page size. */
519 static heap_info *
520 internal_function
521 new_heap(size_t size, size_t top_pad)
523 size_t page_mask = GLRO(dl_pagesize) - 1;
524 char *p1, *p2;
525 unsigned long ul;
526 heap_info *h;
528 if(size+top_pad < HEAP_MIN_SIZE)
529 size = HEAP_MIN_SIZE;
530 else if(size+top_pad <= HEAP_MAX_SIZE)
531 size += top_pad;
532 else if(size > HEAP_MAX_SIZE)
533 return 0;
534 else
535 size = HEAP_MAX_SIZE;
536 size = (size + page_mask) & ~page_mask;
538 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
539 No swap space needs to be reserved for the following large
540 mapping (on Linux, this is the case for all non-writable mappings
541 anyway). */
542 p2 = MAP_FAILED;
543 if(aligned_heap_area) {
544 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
545 MAP_NORESERVE);
546 aligned_heap_area = NULL;
547 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
548 __munmap(p2, HEAP_MAX_SIZE);
549 p2 = MAP_FAILED;
552 if(p2 == MAP_FAILED) {
553 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
554 if(p1 != MAP_FAILED) {
555 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
556 & ~(HEAP_MAX_SIZE-1));
557 ul = p2 - p1;
558 if (ul)
559 __munmap(p1, ul);
560 else
561 aligned_heap_area = p2 + HEAP_MAX_SIZE;
562 __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
563 } else {
564 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
565 is already aligned. */
566 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
567 if(p2 == MAP_FAILED)
568 return 0;
569 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
570 __munmap(p2, HEAP_MAX_SIZE);
571 return 0;
575 if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
576 __munmap(p2, HEAP_MAX_SIZE);
577 return 0;
579 h = (heap_info *)p2;
580 h->size = size;
581 h->mprotect_size = size;
582 THREAD_STAT(stat_n_heaps++);
583 return h;
586 /* Grow a heap. size is automatically rounded up to a
587 multiple of the page size. */
589 static int
590 grow_heap(heap_info *h, long diff)
592 size_t page_mask = GLRO(dl_pagesize) - 1;
593 long new_size;
595 diff = (diff + page_mask) & ~page_mask;
596 new_size = (long)h->size + diff;
597 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
598 return -1;
599 if((unsigned long) new_size > h->mprotect_size) {
600 if (__mprotect((char *)h + h->mprotect_size,
601 (unsigned long) new_size - h->mprotect_size,
602 PROT_READ|PROT_WRITE) != 0)
603 return -2;
604 h->mprotect_size = new_size;
607 h->size = new_size;
608 return 0;
611 /* Shrink a heap. */
613 static int
614 shrink_heap(heap_info *h, long diff)
616 long new_size;
618 new_size = (long)h->size - diff;
619 if(new_size < (long)sizeof(*h))
620 return -1;
621 /* Try to re-map the extra heap space freshly to save memory, and
622 make it inaccessible. */
623 if (__builtin_expect (__libc_enable_secure, 0))
625 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
626 MAP_FIXED) == (char *) MAP_FAILED)
627 return -2;
628 h->mprotect_size = new_size;
630 else
631 madvise ((char *)h + new_size, diff, MADV_DONTNEED);
632 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
634 h->size = new_size;
635 return 0;
638 /* Delete a heap. */
640 #define delete_heap(heap) \
641 do { \
642 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
643 aligned_heap_area = NULL; \
644 __munmap((char*)(heap), HEAP_MAX_SIZE); \
645 } while (0)
647 static int
648 internal_function
649 heap_trim(heap_info *heap, size_t pad)
651 mstate ar_ptr = heap->ar_ptr;
652 unsigned long pagesz = GLRO(dl_pagesize);
653 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
654 heap_info *prev_heap;
655 long new_size, top_size, extra;
657 /* Can this heap go away completely? */
658 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
659 prev_heap = heap->prev;
660 p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
661 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
662 p = prev_chunk(p);
663 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
664 assert(new_size>0 && new_size<(long)(2*MINSIZE));
665 if(!prev_inuse(p))
666 new_size += p->prev_size;
667 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
668 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
669 break;
670 ar_ptr->system_mem -= heap->size;
671 arena_mem -= heap->size;
672 delete_heap(heap);
673 heap = prev_heap;
674 if(!prev_inuse(p)) { /* consolidate backward */
675 p = prev_chunk(p);
676 unlink(p, bck, fwd);
678 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
679 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
680 top(ar_ptr) = top_chunk = p;
681 set_head(top_chunk, new_size | PREV_INUSE);
682 /*check_chunk(ar_ptr, top_chunk);*/
684 top_size = chunksize(top_chunk);
685 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
686 if(extra < (long)pagesz)
687 return 0;
688 /* Try to shrink. */
689 if(shrink_heap(heap, extra) != 0)
690 return 0;
691 ar_ptr->system_mem -= extra;
692 arena_mem -= extra;
694 /* Success. Adjust top accordingly. */
695 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
696 /*check_chunk(ar_ptr, top_chunk);*/
697 return 1;
700 /* Create a new arena with initial size "size". */
702 static mstate
703 _int_new_arena(size_t size)
705 mstate a;
706 heap_info *h;
707 char *ptr;
708 unsigned long misalign;
710 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
711 mp_.top_pad);
712 if(!h) {
713 /* Maybe size is too large to fit in a single heap. So, just try
714 to create a minimally-sized arena and let _int_malloc() attempt
715 to deal with the large request via mmap_chunk(). */
716 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
717 if(!h)
718 return 0;
720 a = h->ar_ptr = (mstate)(h+1);
721 malloc_init_state(a);
722 /*a->next = NULL;*/
723 a->system_mem = a->max_system_mem = h->size;
724 arena_mem += h->size;
726 /* Set up the top chunk, with proper alignment. */
727 ptr = (char *)(a + 1);
728 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
729 if (misalign > 0)
730 ptr += MALLOC_ALIGNMENT - misalign;
731 top(a) = (mchunkptr)ptr;
732 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
734 tsd_setspecific(arena_key, (void *)a);
735 mutex_init(&a->mutex);
736 (void)mutex_lock(&a->mutex);
738 #ifdef PER_THREAD
739 (void)mutex_lock(&list_lock);
740 #endif
742 /* Add the new arena to the global list. */
743 a->next = main_arena.next;
744 atomic_write_barrier ();
745 main_arena.next = a;
747 #ifdef PER_THREAD
748 (void)mutex_unlock(&list_lock);
749 #endif
751 THREAD_STAT(++(a->stat_lock_loop));
753 return a;
757 #ifdef PER_THREAD
758 static mstate
759 get_free_list (void)
761 mstate result = free_list;
762 if (result != NULL)
764 (void)mutex_lock(&list_lock);
765 result = free_list;
766 if (result != NULL)
767 free_list = result->next_free;
768 (void)mutex_unlock(&list_lock);
770 if (result != NULL)
772 (void)mutex_lock(&result->mutex);
773 tsd_setspecific(arena_key, (void *)result);
774 THREAD_STAT(++(result->stat_lock_loop));
778 return result;
782 static mstate
783 reused_arena (void)
785 mstate result;
786 static mstate next_to_use;
787 if (next_to_use == NULL)
788 next_to_use = &main_arena;
790 result = next_to_use;
793 if (!mutex_trylock(&result->mutex))
794 goto out;
796 result = result->next;
798 while (result != next_to_use);
800 /* No arena available. Wait for the next in line. */
801 (void)mutex_lock(&result->mutex);
803 out:
804 tsd_setspecific(arena_key, (void *)result);
805 THREAD_STAT(++(result->stat_lock_loop));
806 next_to_use = result->next;
808 return result;
810 #endif
812 static mstate
813 internal_function
814 arena_get2(mstate a_tsd, size_t size)
816 mstate a;
818 #ifdef PER_THREAD
819 static size_t narenas_limit;
821 a = get_free_list ();
822 if (a == NULL)
824 /* Nothing immediately available, so generate a new arena. */
825 if (narenas_limit == 0)
827 if (mp_.arena_max != 0)
828 narenas_limit = mp_.arena_max;
829 else if (narenas > mp_.arena_test)
831 int n = __get_nprocs ();
833 if (n >= 1)
834 narenas_limit = NARENAS_FROM_NCORES (n);
835 else
836 /* We have no information about the system. Assume two
837 cores. */
838 narenas_limit = NARENAS_FROM_NCORES (2);
841 repeat:;
842 size_t n = narenas;
843 /* NB: the following depends on the fact that (size_t)0 - 1 is a
844 very large number and that the underflow is OK. If arena_max
845 is set the value of arena_test is irrelevant. If arena_test
846 is set but narenas is not yet larger or equal to arena_test
847 narenas_limit is 0. There is no possibility for narenas to
848 be too big for the test to always fail since there is not
849 enough address space to create that many arenas. */
850 if (__builtin_expect (n <= narenas_limit - 1, 0))
852 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
853 goto repeat;
854 a = _int_new_arena (size);
855 if (__builtin_expect (a == NULL, 0))
856 catomic_decrement (&narenas);
858 else
859 a = reused_arena ();
861 #else
862 if(!a_tsd)
863 a = a_tsd = &main_arena;
864 else {
865 a = a_tsd->next;
866 if(!a) {
867 /* This can only happen while initializing the new arena. */
868 (void)mutex_lock(&main_arena.mutex);
869 THREAD_STAT(++(main_arena.stat_lock_wait));
870 return &main_arena;
874 /* Check the global, circularly linked list for available arenas. */
875 bool retried = false;
876 repeat:
877 do {
878 if(!mutex_trylock(&a->mutex)) {
879 if (retried)
880 (void)mutex_unlock(&list_lock);
881 THREAD_STAT(++(a->stat_lock_loop));
882 tsd_setspecific(arena_key, (void *)a);
883 return a;
885 a = a->next;
886 } while(a != a_tsd);
888 /* If not even the list_lock can be obtained, try again. This can
889 happen during `atfork', or for example on systems where thread
890 creation makes it temporarily impossible to obtain _any_
891 locks. */
892 if(!retried && mutex_trylock(&list_lock)) {
893 /* We will block to not run in a busy loop. */
894 (void)mutex_lock(&list_lock);
896 /* Since we blocked there might be an arena available now. */
897 retried = true;
898 a = a_tsd;
899 goto repeat;
902 /* Nothing immediately available, so generate a new arena. */
903 a = _int_new_arena(size);
904 (void)mutex_unlock(&list_lock);
905 #endif
907 return a;
910 #ifdef PER_THREAD
911 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
912 arena_thread_freeres (void)
914 void *vptr = NULL;
915 mstate a = tsd_getspecific(arena_key, vptr);
916 tsd_setspecific(arena_key, NULL);
918 if (a != NULL)
920 (void)mutex_lock(&list_lock);
921 a->next_free = free_list;
922 free_list = a;
923 (void)mutex_unlock(&list_lock);
926 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
927 #endif
930 * Local variables:
931 * c-basic-offset: 2
932 * End: