* sysdeps/x86_64/strchr.S: Likewise.
[glibc.git] / malloc / arena.c
blobf280d38811c9a1a435eb9fa538872acbde64273f
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 #include <stdbool.h>
24 /* Compile-time constants. */
26 #define HEAP_MIN_SIZE (32*1024)
27 #ifndef HEAP_MAX_SIZE
28 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
29 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
30 # else
31 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
32 # endif
33 #endif
35 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
36 that are dynamically created for multi-threaded programs. The
37 maximum size must be a power of two, for fast determination of
38 which heap belongs to a chunk. It should be much larger than the
39 mmap threshold, so that requests with a size just below that
40 threshold can be fulfilled without creating too many heaps. */
43 #ifndef THREAD_STATS
44 #define THREAD_STATS 0
45 #endif
47 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
48 computed. */
50 /***************************************************************************/
52 #define top(ar_ptr) ((ar_ptr)->top)
54 /* A heap is a single contiguous memory region holding (coalesceable)
55 malloc_chunks. It is allocated with mmap() and always starts at an
56 address aligned to HEAP_MAX_SIZE. Not used unless compiling with
57 USE_ARENAS. */
59 typedef struct _heap_info {
60 mstate ar_ptr; /* Arena for this heap. */
61 struct _heap_info *prev; /* Previous heap. */
62 size_t size; /* Current size in bytes. */
63 size_t mprotect_size; /* Size in bytes that has been mprotected
64 PROT_READ|PROT_WRITE. */
65 /* Make sure the following data is properly aligned, particularly
66 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
67 MALLOC_ALIGNMENT. */
68 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
69 } heap_info;
71 /* Get a compile-time error if the heap_info padding is not correct
72 to make alignment work as expected in sYSMALLOc. */
73 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
74 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
75 ? -1 : 1];
77 /* Thread specific data */
79 static tsd_key_t arena_key;
80 static mutex_t list_lock;
81 #ifdef PER_THREAD
82 static size_t narenas;
83 static mstate free_list;
84 #endif
86 #if THREAD_STATS
87 static int stat_n_heaps;
88 #define THREAD_STAT(x) x
89 #else
90 #define THREAD_STAT(x) do ; while(0)
91 #endif
93 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
94 static unsigned long arena_mem;
96 /* Already initialized? */
97 int __malloc_initialized = -1;
99 /**************************************************************************/
101 #if USE_ARENAS
103 /* arena_get() acquires an arena and locks the corresponding mutex.
104 First, try the one last locked successfully by this thread. (This
105 is the common case and handled with a macro for speed.) Then, loop
106 once over the circularly linked list of arenas. If no arena is
107 readily available, create a new one. In this latter case, `size'
108 is just a hint as to how much memory will be required immediately
109 in the new arena. */
111 #define arena_get(ptr, size) do { \
112 arena_lookup(ptr); \
113 arena_lock(ptr, size); \
114 } while(0)
116 #define arena_lookup(ptr) do { \
117 Void_t *vptr = NULL; \
118 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
119 } while(0)
121 #ifdef PER_THREAD
122 #define arena_lock(ptr, size) do { \
123 if(ptr) \
124 (void)mutex_lock(&ptr->mutex); \
125 else \
126 ptr = arena_get2(ptr, (size)); \
127 } while(0)
128 #else
129 #define arena_lock(ptr, size) do { \
130 if(ptr && !mutex_trylock(&ptr->mutex)) { \
131 THREAD_STAT(++(ptr->stat_lock_direct)); \
132 } else \
133 ptr = arena_get2(ptr, (size)); \
134 } while(0)
135 #endif
137 /* find the heap and corresponding arena for a given ptr */
139 #define heap_for_ptr(ptr) \
140 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
141 #define arena_for_chunk(ptr) \
142 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
144 #else /* !USE_ARENAS */
146 /* There is only one arena, main_arena. */
148 #if THREAD_STATS
149 #define arena_get(ar_ptr, sz) do { \
150 ar_ptr = &main_arena; \
151 if(!mutex_trylock(&ar_ptr->mutex)) \
152 ++(ar_ptr->stat_lock_direct); \
153 else { \
154 (void)mutex_lock(&ar_ptr->mutex); \
155 ++(ar_ptr->stat_lock_wait); \
157 } while(0)
158 #else
159 #define arena_get(ar_ptr, sz) do { \
160 ar_ptr = &main_arena; \
161 (void)mutex_lock(&ar_ptr->mutex); \
162 } while(0)
163 #endif
164 #define arena_for_chunk(ptr) (&main_arena)
166 #endif /* USE_ARENAS */
168 /**************************************************************************/
170 #ifndef NO_THREADS
172 /* atfork support. */
174 static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
175 __const __malloc_ptr_t);
176 # if !defined _LIBC || (defined SHARED && !USE___THREAD)
177 static __malloc_ptr_t (*save_memalign_hook) (size_t __align, size_t __size,
178 __const __malloc_ptr_t);
179 # endif
180 static void (*save_free_hook) (__malloc_ptr_t __ptr,
181 __const __malloc_ptr_t);
182 static Void_t* save_arena;
184 #ifdef ATFORK_MEM
185 ATFORK_MEM;
186 #endif
188 /* Magic value for the thread-specific arena pointer when
189 malloc_atfork() is in use. */
191 #define ATFORK_ARENA_PTR ((Void_t*)-1)
193 /* The following hooks are used while the `atfork' handling mechanism
194 is active. */
196 static Void_t*
197 malloc_atfork(size_t sz, const Void_t *caller)
199 Void_t *vptr = NULL;
200 Void_t *victim;
202 tsd_getspecific(arena_key, vptr);
203 if(vptr == ATFORK_ARENA_PTR) {
204 /* We are the only thread that may allocate at all. */
205 if(save_malloc_hook != malloc_check) {
206 return _int_malloc(&main_arena, sz);
207 } else {
208 if(top_check()<0)
209 return 0;
210 victim = _int_malloc(&main_arena, sz+1);
211 return mem2mem_check(victim, sz);
213 } else {
214 /* Suspend the thread until the `atfork' handlers have completed.
215 By that time, the hooks will have been reset as well, so that
216 mALLOc() can be used again. */
217 (void)mutex_lock(&list_lock);
218 (void)mutex_unlock(&list_lock);
219 return public_mALLOc(sz);
223 static void
224 free_atfork(Void_t* mem, const Void_t *caller)
226 Void_t *vptr = NULL;
227 mstate ar_ptr;
228 mchunkptr p; /* chunk corresponding to mem */
230 if (mem == 0) /* free(0) has no effect */
231 return;
233 p = mem2chunk(mem); /* do not bother to replicate free_check here */
235 #if HAVE_MMAP
236 if (chunk_is_mmapped(p)) /* release mmapped memory. */
238 munmap_chunk(p);
239 return;
241 #endif
243 #ifdef ATOMIC_FASTBINS
244 ar_ptr = arena_for_chunk(p);
245 tsd_getspecific(arena_key, vptr);
246 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
247 #else
248 ar_ptr = arena_for_chunk(p);
249 tsd_getspecific(arena_key, vptr);
250 if(vptr != ATFORK_ARENA_PTR)
251 (void)mutex_lock(&ar_ptr->mutex);
252 _int_free(ar_ptr, p);
253 if(vptr != ATFORK_ARENA_PTR)
254 (void)mutex_unlock(&ar_ptr->mutex);
255 #endif
259 /* Counter for number of times the list is locked by the same thread. */
260 static unsigned int atfork_recursive_cntr;
262 /* The following two functions are registered via thread_atfork() to
263 make sure that the mutexes remain in a consistent state in the
264 fork()ed version of a thread. Also adapt the malloc and free hooks
265 temporarily, because the `atfork' handler mechanism may use
266 malloc/free internally (e.g. in LinuxThreads). */
268 static void
269 ptmalloc_lock_all (void)
271 mstate ar_ptr;
273 if(__malloc_initialized < 1)
274 return;
275 if (mutex_trylock(&list_lock))
277 Void_t *my_arena;
278 tsd_getspecific(arena_key, my_arena);
279 if (my_arena == ATFORK_ARENA_PTR)
280 /* This is the same thread which already locks the global list.
281 Just bump the counter. */
282 goto out;
284 /* This thread has to wait its turn. */
285 (void)mutex_lock(&list_lock);
287 for(ar_ptr = &main_arena;;) {
288 (void)mutex_lock(&ar_ptr->mutex);
289 ar_ptr = ar_ptr->next;
290 if(ar_ptr == &main_arena) break;
292 save_malloc_hook = __malloc_hook;
293 save_free_hook = __free_hook;
294 __malloc_hook = malloc_atfork;
295 __free_hook = free_atfork;
296 /* Only the current thread may perform malloc/free calls now. */
297 tsd_getspecific(arena_key, save_arena);
298 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
299 out:
300 ++atfork_recursive_cntr;
303 static void
304 ptmalloc_unlock_all (void)
306 mstate ar_ptr;
308 if(__malloc_initialized < 1)
309 return;
310 if (--atfork_recursive_cntr != 0)
311 return;
312 tsd_setspecific(arena_key, save_arena);
313 __malloc_hook = save_malloc_hook;
314 __free_hook = save_free_hook;
315 for(ar_ptr = &main_arena;;) {
316 (void)mutex_unlock(&ar_ptr->mutex);
317 ar_ptr = ar_ptr->next;
318 if(ar_ptr == &main_arena) break;
320 (void)mutex_unlock(&list_lock);
323 #ifdef __linux__
325 /* In NPTL, unlocking a mutex in the child process after a
326 fork() is currently unsafe, whereas re-initializing it is safe and
327 does not leak resources. Therefore, a special atfork handler is
328 installed for the child. */
330 static void
331 ptmalloc_unlock_all2 (void)
333 mstate ar_ptr;
335 if(__malloc_initialized < 1)
336 return;
337 #if defined _LIBC || defined MALLOC_HOOKS
338 tsd_setspecific(arena_key, save_arena);
339 __malloc_hook = save_malloc_hook;
340 __free_hook = save_free_hook;
341 #endif
342 #ifdef PER_THREAD
343 free_list = NULL;
344 #endif
345 for(ar_ptr = &main_arena;;) {
346 mutex_init(&ar_ptr->mutex);
347 #ifdef PER_THREAD
348 if (ar_ptr != save_arena) {
349 ar_ptr->next_free = free_list;
350 free_list = ar_ptr;
352 #endif
353 ar_ptr = ar_ptr->next;
354 if(ar_ptr == &main_arena) break;
356 mutex_init(&list_lock);
357 atfork_recursive_cntr = 0;
360 #else
362 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
364 #endif
366 #endif /* !defined NO_THREADS */
368 /* Initialization routine. */
369 #ifdef _LIBC
370 #include <string.h>
371 extern char **_environ;
373 static char *
374 internal_function
375 next_env_entry (char ***position)
377 char **current = *position;
378 char *result = NULL;
380 while (*current != NULL)
382 if (__builtin_expect ((*current)[0] == 'M', 0)
383 && (*current)[1] == 'A'
384 && (*current)[2] == 'L'
385 && (*current)[3] == 'L'
386 && (*current)[4] == 'O'
387 && (*current)[5] == 'C'
388 && (*current)[6] == '_')
390 result = &(*current)[7];
392 /* Save current position for next visit. */
393 *position = ++current;
395 break;
398 ++current;
401 return result;
403 #endif /* _LIBC */
405 /* Set up basic state so that _int_malloc et al can work. */
406 static void
407 ptmalloc_init_minimal (void)
409 #if DEFAULT_TOP_PAD != 0
410 mp_.top_pad = DEFAULT_TOP_PAD;
411 #endif
412 mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
413 mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
414 mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
415 mp_.pagesize = malloc_getpagesize;
416 #ifdef PER_THREAD
417 # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
418 mp_.arena_test = NARENAS_FROM_NCORES (1);
419 narenas = 1;
420 #endif
424 #ifdef _LIBC
425 # ifdef SHARED
426 static void *
427 __failing_morecore (ptrdiff_t d)
429 return (void *) MORECORE_FAILURE;
432 extern struct dl_open_hook *_dl_open_hook;
433 libc_hidden_proto (_dl_open_hook);
434 # endif
436 # if defined SHARED && !USE___THREAD
437 /* This is called by __pthread_initialize_minimal when it needs to use
438 malloc to set up the TLS state. We cannot do the full work of
439 ptmalloc_init (below) until __pthread_initialize_minimal has finished,
440 so it has to switch to using the special startup-time hooks while doing
441 those allocations. */
442 void
443 __libc_malloc_pthread_startup (bool first_time)
445 if (first_time)
447 ptmalloc_init_minimal ();
448 save_malloc_hook = __malloc_hook;
449 save_memalign_hook = __memalign_hook;
450 save_free_hook = __free_hook;
451 __malloc_hook = malloc_starter;
452 __memalign_hook = memalign_starter;
453 __free_hook = free_starter;
455 else
457 __malloc_hook = save_malloc_hook;
458 __memalign_hook = save_memalign_hook;
459 __free_hook = save_free_hook;
462 # endif
463 #endif
465 static void
466 ptmalloc_init (void)
468 #if __STD_C
469 const char* s;
470 #else
471 char* s;
472 #endif
473 int secure = 0;
475 if(__malloc_initialized >= 0) return;
476 __malloc_initialized = 0;
478 #ifdef _LIBC
479 # if defined SHARED && !USE___THREAD
480 /* ptmalloc_init_minimal may already have been called via
481 __libc_malloc_pthread_startup, above. */
482 if (mp_.pagesize == 0)
483 # endif
484 #endif
485 ptmalloc_init_minimal();
487 #ifndef NO_THREADS
488 # if defined _LIBC
489 /* We know __pthread_initialize_minimal has already been called,
490 and that is enough. */
491 # define NO_STARTER
492 # endif
493 # ifndef NO_STARTER
494 /* With some threads implementations, creating thread-specific data
495 or initializing a mutex may call malloc() itself. Provide a
496 simple starter version (realloc() won't work). */
497 save_malloc_hook = __malloc_hook;
498 save_memalign_hook = __memalign_hook;
499 save_free_hook = __free_hook;
500 __malloc_hook = malloc_starter;
501 __memalign_hook = memalign_starter;
502 __free_hook = free_starter;
503 # ifdef _LIBC
504 /* Initialize the pthreads interface. */
505 if (__pthread_initialize != NULL)
506 __pthread_initialize();
507 # endif /* !defined _LIBC */
508 # endif /* !defined NO_STARTER */
509 #endif /* !defined NO_THREADS */
510 mutex_init(&main_arena.mutex);
511 main_arena.next = &main_arena;
513 #if defined _LIBC && defined SHARED
514 /* In case this libc copy is in a non-default namespace, never use brk.
515 Likewise if dlopened from statically linked program. */
516 Dl_info di;
517 struct link_map *l;
519 if (_dl_open_hook != NULL
520 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
521 && l->l_ns != LM_ID_BASE))
522 __morecore = __failing_morecore;
523 #endif
525 mutex_init(&list_lock);
526 tsd_key_create(&arena_key, NULL);
527 tsd_setspecific(arena_key, (Void_t *)&main_arena);
528 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
529 #ifndef NO_THREADS
530 # ifndef NO_STARTER
531 __malloc_hook = save_malloc_hook;
532 __memalign_hook = save_memalign_hook;
533 __free_hook = save_free_hook;
534 # else
535 # undef NO_STARTER
536 # endif
537 #endif
538 #ifdef _LIBC
539 secure = __libc_enable_secure;
540 s = NULL;
541 if (__builtin_expect (_environ != NULL, 1))
543 char **runp = _environ;
544 char *envline;
546 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
549 size_t len = strcspn (envline, "=");
551 if (envline[len] != '=')
552 /* This is a "MALLOC_" variable at the end of the string
553 without a '=' character. Ignore it since otherwise we
554 will access invalid memory below. */
555 continue;
557 switch (len)
559 case 6:
560 if (memcmp (envline, "CHECK_", 6) == 0)
561 s = &envline[7];
562 break;
563 case 8:
564 if (! secure)
566 if (memcmp (envline, "TOP_PAD_", 8) == 0)
567 mALLOPt(M_TOP_PAD, atoi(&envline[9]));
568 else if (memcmp (envline, "PERTURB_", 8) == 0)
569 mALLOPt(M_PERTURB, atoi(&envline[9]));
571 break;
572 case 9:
573 if (! secure)
575 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
576 mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
577 #ifdef PER_THREAD
578 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
579 mALLOPt(M_ARENA_MAX, atoi(&envline[10]));
580 #endif
582 break;
583 #ifdef PER_THREAD
584 case 10:
585 if (! secure)
587 if (memcmp (envline, "ARENA_TEST", 10) == 0)
588 mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
590 break;
591 #endif
592 case 15:
593 if (! secure)
595 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
596 mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
597 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
598 mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
600 break;
601 default:
602 break;
606 #else
607 if (! secure)
609 if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
610 mALLOPt(M_TRIM_THRESHOLD, atoi(s));
611 if((s = getenv("MALLOC_TOP_PAD_")))
612 mALLOPt(M_TOP_PAD, atoi(s));
613 if((s = getenv("MALLOC_PERTURB_")))
614 mALLOPt(M_PERTURB, atoi(s));
615 if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
616 mALLOPt(M_MMAP_THRESHOLD, atoi(s));
617 if((s = getenv("MALLOC_MMAP_MAX_")))
618 mALLOPt(M_MMAP_MAX, atoi(s));
620 s = getenv("MALLOC_CHECK_");
621 #endif
622 if(s && s[0]) {
623 mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
624 if (check_action != 0)
625 __malloc_check_init();
627 if(__malloc_initialize_hook != NULL)
628 (*__malloc_initialize_hook)();
629 __malloc_initialized = 1;
632 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
633 #ifdef thread_atfork_static
634 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
635 ptmalloc_unlock_all2)
636 #endif
640 /* Managing heaps and arenas (for concurrent threads) */
642 #if USE_ARENAS
644 #if MALLOC_DEBUG > 1
646 /* Print the complete contents of a single heap to stderr. */
648 static void
649 #if __STD_C
650 dump_heap(heap_info *heap)
651 #else
652 dump_heap(heap) heap_info *heap;
653 #endif
655 char *ptr;
656 mchunkptr p;
658 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
659 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
660 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
661 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
662 ~MALLOC_ALIGN_MASK);
663 for(;;) {
664 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
665 if(p == top(heap->ar_ptr)) {
666 fprintf(stderr, " (top)\n");
667 break;
668 } else if(p->size == (0|PREV_INUSE)) {
669 fprintf(stderr, " (fence)\n");
670 break;
672 fprintf(stderr, "\n");
673 p = next_chunk(p);
677 #endif /* MALLOC_DEBUG > 1 */
679 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
680 addresses as opposed to increasing, new_heap would badly fragment the
681 address space. In that case remember the second HEAP_MAX_SIZE part
682 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
683 call (if it is already aligned) and try to reuse it next time. We need
684 no locking for it, as kernel ensures the atomicity for us - worst case
685 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
686 multiple threads, but only one will succeed. */
687 static char *aligned_heap_area;
689 /* Create a new heap. size is automatically rounded up to a multiple
690 of the page size. */
692 static heap_info *
693 internal_function
694 #if __STD_C
695 new_heap(size_t size, size_t top_pad)
696 #else
697 new_heap(size, top_pad) size_t size, top_pad;
698 #endif
700 size_t page_mask = malloc_getpagesize - 1;
701 char *p1, *p2;
702 unsigned long ul;
703 heap_info *h;
705 if(size+top_pad < HEAP_MIN_SIZE)
706 size = HEAP_MIN_SIZE;
707 else if(size+top_pad <= HEAP_MAX_SIZE)
708 size += top_pad;
709 else if(size > HEAP_MAX_SIZE)
710 return 0;
711 else
712 size = HEAP_MAX_SIZE;
713 size = (size + page_mask) & ~page_mask;
715 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
716 No swap space needs to be reserved for the following large
717 mapping (on Linux, this is the case for all non-writable mappings
718 anyway). */
719 p2 = MAP_FAILED;
720 if(aligned_heap_area) {
721 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
722 MAP_PRIVATE|MAP_NORESERVE);
723 aligned_heap_area = NULL;
724 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
725 munmap(p2, HEAP_MAX_SIZE);
726 p2 = MAP_FAILED;
729 if(p2 == MAP_FAILED) {
730 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE,
731 MAP_PRIVATE|MAP_NORESERVE);
732 if(p1 != MAP_FAILED) {
733 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
734 & ~(HEAP_MAX_SIZE-1));
735 ul = p2 - p1;
736 if (ul)
737 munmap(p1, ul);
738 else
739 aligned_heap_area = p2 + HEAP_MAX_SIZE;
740 munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
741 } else {
742 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
743 is already aligned. */
744 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
745 if(p2 == MAP_FAILED)
746 return 0;
747 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
748 munmap(p2, HEAP_MAX_SIZE);
749 return 0;
753 if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
754 munmap(p2, HEAP_MAX_SIZE);
755 return 0;
757 h = (heap_info *)p2;
758 h->size = size;
759 h->mprotect_size = size;
760 THREAD_STAT(stat_n_heaps++);
761 return h;
764 /* Grow a heap. size is automatically rounded up to a
765 multiple of the page size. */
767 static int
768 #if __STD_C
769 grow_heap(heap_info *h, long diff)
770 #else
771 grow_heap(h, diff) heap_info *h; long diff;
772 #endif
774 size_t page_mask = malloc_getpagesize - 1;
775 long new_size;
777 diff = (diff + page_mask) & ~page_mask;
778 new_size = (long)h->size + diff;
779 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
780 return -1;
781 if((unsigned long) new_size > h->mprotect_size) {
782 if (mprotect((char *)h + h->mprotect_size,
783 (unsigned long) new_size - h->mprotect_size,
784 PROT_READ|PROT_WRITE) != 0)
785 return -2;
786 h->mprotect_size = new_size;
789 h->size = new_size;
790 return 0;
793 /* Shrink a heap. */
795 static int
796 #if __STD_C
797 shrink_heap(heap_info *h, long diff)
798 #else
799 shrink_heap(h, diff) heap_info *h; long diff;
800 #endif
802 long new_size;
804 new_size = (long)h->size - diff;
805 if(new_size < (long)sizeof(*h))
806 return -1;
807 /* Try to re-map the extra heap space freshly to save memory, and
808 make it inaccessible. */
809 #ifdef _LIBC
810 if (__builtin_expect (__libc_enable_secure, 0))
811 #else
812 if (1)
813 #endif
815 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
816 MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
817 return -2;
818 h->mprotect_size = new_size;
820 #ifdef _LIBC
821 else
822 madvise ((char *)h + new_size, diff, MADV_DONTNEED);
823 #endif
824 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
826 h->size = new_size;
827 return 0;
830 /* Delete a heap. */
832 #define delete_heap(heap) \
833 do { \
834 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
835 aligned_heap_area = NULL; \
836 munmap((char*)(heap), HEAP_MAX_SIZE); \
837 } while (0)
839 static int
840 internal_function
841 #if __STD_C
842 heap_trim(heap_info *heap, size_t pad)
843 #else
844 heap_trim(heap, pad) heap_info *heap; size_t pad;
845 #endif
847 mstate ar_ptr = heap->ar_ptr;
848 unsigned long pagesz = mp_.pagesize;
849 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
850 heap_info *prev_heap;
851 long new_size, top_size, extra;
853 /* Can this heap go away completely? */
854 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
855 prev_heap = heap->prev;
856 p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
857 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
858 p = prev_chunk(p);
859 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
860 assert(new_size>0 && new_size<(long)(2*MINSIZE));
861 if(!prev_inuse(p))
862 new_size += p->prev_size;
863 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
864 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
865 break;
866 ar_ptr->system_mem -= heap->size;
867 arena_mem -= heap->size;
868 delete_heap(heap);
869 heap = prev_heap;
870 if(!prev_inuse(p)) { /* consolidate backward */
871 p = prev_chunk(p);
872 unlink(p, bck, fwd);
874 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
875 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
876 top(ar_ptr) = top_chunk = p;
877 set_head(top_chunk, new_size | PREV_INUSE);
878 /*check_chunk(ar_ptr, top_chunk);*/
880 top_size = chunksize(top_chunk);
881 extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz;
882 if(extra < (long)pagesz)
883 return 0;
884 /* Try to shrink. */
885 if(shrink_heap(heap, extra) != 0)
886 return 0;
887 ar_ptr->system_mem -= extra;
888 arena_mem -= extra;
890 /* Success. Adjust top accordingly. */
891 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
892 /*check_chunk(ar_ptr, top_chunk);*/
893 return 1;
896 /* Create a new arena with initial size "size". */
898 static mstate
899 _int_new_arena(size_t size)
901 mstate a;
902 heap_info *h;
903 char *ptr;
904 unsigned long misalign;
906 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
907 mp_.top_pad);
908 if(!h) {
909 /* Maybe size is too large to fit in a single heap. So, just try
910 to create a minimally-sized arena and let _int_malloc() attempt
911 to deal with the large request via mmap_chunk(). */
912 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
913 if(!h)
914 return 0;
916 a = h->ar_ptr = (mstate)(h+1);
917 malloc_init_state(a);
918 /*a->next = NULL;*/
919 a->system_mem = a->max_system_mem = h->size;
920 arena_mem += h->size;
921 #ifdef NO_THREADS
922 if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
923 mp_.max_total_mem)
924 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
925 #endif
927 /* Set up the top chunk, with proper alignment. */
928 ptr = (char *)(a + 1);
929 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
930 if (misalign > 0)
931 ptr += MALLOC_ALIGNMENT - misalign;
932 top(a) = (mchunkptr)ptr;
933 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
935 tsd_setspecific(arena_key, (Void_t *)a);
936 mutex_init(&a->mutex);
937 (void)mutex_lock(&a->mutex);
939 #ifdef PER_THREAD
940 (void)mutex_lock(&list_lock);
941 #endif
943 /* Add the new arena to the global list. */
944 a->next = main_arena.next;
945 atomic_write_barrier ();
946 main_arena.next = a;
948 #ifdef PER_THREAD
949 ++narenas;
951 (void)mutex_unlock(&list_lock);
952 #endif
954 THREAD_STAT(++(a->stat_lock_loop));
956 return a;
960 #ifdef PER_THREAD
961 static mstate
962 get_free_list (void)
964 mstate result = free_list;
965 if (result != NULL)
967 (void)mutex_lock(&list_lock);
968 result = free_list;
969 if (result != NULL)
970 free_list = result->next_free;
971 (void)mutex_unlock(&list_lock);
973 if (result != NULL)
975 (void)mutex_lock(&result->mutex);
976 tsd_setspecific(arena_key, (Void_t *)result);
977 THREAD_STAT(++(result->stat_lock_loop));
981 return result;
985 static mstate
986 reused_arena (void)
988 if (narenas <= mp_.arena_test)
989 return NULL;
991 static int narenas_limit;
992 if (narenas_limit == 0)
994 if (mp_.arena_max != 0)
995 narenas_limit = mp_.arena_max;
996 else
998 int n = __get_nprocs ();
1000 if (n >= 1)
1001 narenas_limit = NARENAS_FROM_NCORES (n);
1002 else
1003 /* We have no information about the system. Assume two
1004 cores. */
1005 narenas_limit = NARENAS_FROM_NCORES (2);
1009 if (narenas < narenas_limit)
1010 return NULL;
1012 mstate result;
1013 static mstate next_to_use;
1014 if (next_to_use == NULL)
1015 next_to_use = &main_arena;
1017 result = next_to_use;
1020 if (!mutex_trylock(&result->mutex))
1021 goto out;
1023 result = result->next;
1025 while (result != next_to_use);
1027 /* No arena available. Wait for the next in line. */
1028 (void)mutex_lock(&result->mutex);
1030 out:
1031 tsd_setspecific(arena_key, (Void_t *)result);
1032 THREAD_STAT(++(result->stat_lock_loop));
1033 next_to_use = result->next;
1035 return result;
1037 #endif
1039 static mstate
1040 internal_function
1041 #if __STD_C
1042 arena_get2(mstate a_tsd, size_t size)
1043 #else
1044 arena_get2(a_tsd, size) mstate a_tsd; size_t size;
1045 #endif
1047 mstate a;
1049 #ifdef PER_THREAD
1050 if ((a = get_free_list ()) == NULL
1051 && (a = reused_arena ()) == NULL)
1052 /* Nothing immediately available, so generate a new arena. */
1053 a = _int_new_arena(size);
1054 #else
1055 if(!a_tsd)
1056 a = a_tsd = &main_arena;
1057 else {
1058 a = a_tsd->next;
1059 if(!a) {
1060 /* This can only happen while initializing the new arena. */
1061 (void)mutex_lock(&main_arena.mutex);
1062 THREAD_STAT(++(main_arena.stat_lock_wait));
1063 return &main_arena;
1067 /* Check the global, circularly linked list for available arenas. */
1068 bool retried = false;
1069 repeat:
1070 do {
1071 if(!mutex_trylock(&a->mutex)) {
1072 if (retried)
1073 (void)mutex_unlock(&list_lock);
1074 THREAD_STAT(++(a->stat_lock_loop));
1075 tsd_setspecific(arena_key, (Void_t *)a);
1076 return a;
1078 a = a->next;
1079 } while(a != a_tsd);
1081 /* If not even the list_lock can be obtained, try again. This can
1082 happen during `atfork', or for example on systems where thread
1083 creation makes it temporarily impossible to obtain _any_
1084 locks. */
1085 if(!retried && mutex_trylock(&list_lock)) {
1086 /* We will block to not run in a busy loop. */
1087 (void)mutex_lock(&list_lock);
1089 /* Since we blocked there might be an arena available now. */
1090 retried = true;
1091 a = a_tsd;
1092 goto repeat;
1095 /* Nothing immediately available, so generate a new arena. */
1096 a = _int_new_arena(size);
1097 (void)mutex_unlock(&list_lock);
1098 #endif
1100 return a;
1103 #ifdef PER_THREAD
1104 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
1105 arena_thread_freeres (void)
1107 Void_t *vptr = NULL;
1108 mstate a = tsd_getspecific(arena_key, vptr);
1109 tsd_setspecific(arena_key, NULL);
1111 if (a != NULL)
1113 (void)mutex_lock(&list_lock);
1114 a->next_free = free_list;
1115 free_list = a;
1116 (void)mutex_unlock(&list_lock);
1119 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
1120 #endif
1122 #endif /* USE_ARENAS */
1125 * Local variables:
1126 * c-basic-offset: 2
1127 * End: