1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 /* Compile-time constants. */
25 #define HEAP_MIN_SIZE (32*1024)
27 #define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
30 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
31 that are dynamically created for multi-threaded programs. The
32 maximum size must be a power of two, for fast determination of
33 which heap belongs to a chunk. It should be much larger than the
34 mmap threshold, so that requests with a size just below that
35 threshold can be fulfilled without creating too many heaps. */
39 #define THREAD_STATS 0
42 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
45 /***************************************************************************/
47 #define top(ar_ptr) ((ar_ptr)->top)
49 /* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
51 address aligned to HEAP_MAX_SIZE. Not used unless compiling with
54 typedef struct _heap_info
{
55 mstate ar_ptr
; /* Arena for this heap. */
56 struct _heap_info
*prev
; /* Previous heap. */
57 size_t size
; /* Current size in bytes. */
58 size_t pad
; /* Make sure the following data is properly aligned. */
61 /* Thread specific data */
63 static tsd_key_t arena_key
;
64 static mutex_t list_lock
;
67 static int stat_n_heaps
;
68 #define THREAD_STAT(x) x
70 #define THREAD_STAT(x) do ; while(0)
73 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
74 static unsigned long arena_mem
;
76 /* Already initialized? */
77 int __malloc_initialized
= -1;
79 /**************************************************************************/
83 /* arena_get() acquires an arena and locks the corresponding mutex.
84 First, try the one last locked successfully by this thread. (This
85 is the common case and handled with a macro for speed.) Then, loop
86 once over the circularly linked list of arenas. If no arena is
87 readily available, create a new one. In this latter case, `size'
88 is just a hint as to how much memory will be required immediately
91 #define arena_get(ptr, size) do { \
92 Void_t *vptr = NULL; \
93 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
94 if(ptr && !mutex_trylock(&ptr->mutex)) { \
95 THREAD_STAT(++(ptr->stat_lock_direct)); \
97 ptr = arena_get2(ptr, (size)); \
100 /* find the heap and corresponding arena for a given ptr */
102 #define heap_for_ptr(ptr) \
103 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
104 #define arena_for_chunk(ptr) \
105 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
107 #else /* !USE_ARENAS */
109 /* There is only one arena, main_arena. */
112 #define arena_get(ar_ptr, sz) do { \
113 ar_ptr = &main_arena; \
114 if(!mutex_trylock(&ar_ptr->mutex)) \
115 ++(ar_ptr->stat_lock_direct); \
117 (void)mutex_lock(&ar_ptr->mutex); \
118 ++(ar_ptr->stat_lock_wait); \
122 #define arena_get(ar_ptr, sz) do { \
123 ar_ptr = &main_arena; \
124 (void)mutex_lock(&ar_ptr->mutex); \
127 #define arena_for_chunk(ptr) (&main_arena)
129 #endif /* USE_ARENAS */
131 /**************************************************************************/
135 /* atfork support. */
137 static __malloc_ptr_t (*save_malloc_hook
) (size_t __size
,
138 __const __malloc_ptr_t
);
139 # if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
140 static __malloc_ptr_t (*save_memalign_hook
) (size_t __align
, size_t __size
,
141 __const __malloc_ptr_t
);
143 static void (*save_free_hook
) (__malloc_ptr_t __ptr
,
144 __const __malloc_ptr_t
);
145 static Void_t
* save_arena
;
147 /* Magic value for the thread-specific arena pointer when
148 malloc_atfork() is in use. */
150 #define ATFORK_ARENA_PTR ((Void_t*)-1)
152 /* The following hooks are used while the `atfork' handling mechanism
156 malloc_atfork(size_t sz
, const Void_t
*caller
)
161 tsd_getspecific(arena_key
, vptr
);
162 if(vptr
== ATFORK_ARENA_PTR
) {
163 /* We are the only thread that may allocate at all. */
164 if(save_malloc_hook
!= malloc_check
) {
165 return _int_malloc(&main_arena
, sz
);
169 victim
= _int_malloc(&main_arena
, sz
+1);
170 return mem2mem_check(victim
, sz
);
173 /* Suspend the thread until the `atfork' handlers have completed.
174 By that time, the hooks will have been reset as well, so that
175 mALLOc() can be used again. */
176 (void)mutex_lock(&list_lock
);
177 (void)mutex_unlock(&list_lock
);
178 return public_mALLOc(sz
);
183 free_atfork(Void_t
* mem
, const Void_t
*caller
)
187 mchunkptr p
; /* chunk corresponding to mem */
189 if (mem
== 0) /* free(0) has no effect */
192 p
= mem2chunk(mem
); /* do not bother to replicate free_check here */
195 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
202 ar_ptr
= arena_for_chunk(p
);
203 tsd_getspecific(arena_key
, vptr
);
204 if(vptr
!= ATFORK_ARENA_PTR
)
205 (void)mutex_lock(&ar_ptr
->mutex
);
206 _int_free(ar_ptr
, mem
);
207 if(vptr
!= ATFORK_ARENA_PTR
)
208 (void)mutex_unlock(&ar_ptr
->mutex
);
212 /* Counter for number of times the list is locked by the same thread. */
213 static unsigned int atfork_recursive_cntr
;
215 /* The following two functions are registered via thread_atfork() to
216 make sure that the mutexes remain in a consistent state in the
217 fork()ed version of a thread. Also adapt the malloc and free hooks
218 temporarily, because the `atfork' handler mechanism may use
219 malloc/free internally (e.g. in LinuxThreads). */
222 ptmalloc_lock_all (void)
226 if(__malloc_initialized
< 1)
228 if (mutex_trylock(&list_lock
))
231 tsd_getspecific(arena_key
, my_arena
);
232 if (my_arena
== ATFORK_ARENA_PTR
)
233 /* This is the same thread which already locks the global list.
234 Just bump the counter. */
237 /* This thread has to wait its turn. */
238 (void)mutex_lock(&list_lock
);
240 for(ar_ptr
= &main_arena
;;) {
241 (void)mutex_lock(&ar_ptr
->mutex
);
242 ar_ptr
= ar_ptr
->next
;
243 if(ar_ptr
== &main_arena
) break;
245 save_malloc_hook
= __malloc_hook
;
246 save_free_hook
= __free_hook
;
247 __malloc_hook
= malloc_atfork
;
248 __free_hook
= free_atfork
;
249 /* Only the current thread may perform malloc/free calls now. */
250 tsd_getspecific(arena_key
, save_arena
);
251 tsd_setspecific(arena_key
, ATFORK_ARENA_PTR
);
253 ++atfork_recursive_cntr
;
257 ptmalloc_unlock_all (void)
261 if(__malloc_initialized
< 1)
263 if (--atfork_recursive_cntr
!= 0)
265 tsd_setspecific(arena_key
, save_arena
);
266 __malloc_hook
= save_malloc_hook
;
267 __free_hook
= save_free_hook
;
268 for(ar_ptr
= &main_arena
;;) {
269 (void)mutex_unlock(&ar_ptr
->mutex
);
270 ar_ptr
= ar_ptr
->next
;
271 if(ar_ptr
== &main_arena
) break;
273 (void)mutex_unlock(&list_lock
);
278 /* In NPTL, unlocking a mutex in the child process after a
279 fork() is currently unsafe, whereas re-initializing it is safe and
280 does not leak resources. Therefore, a special atfork handler is
281 installed for the child. */
284 ptmalloc_unlock_all2 (void)
288 if(__malloc_initialized
< 1)
290 #if defined _LIBC || defined MALLOC_HOOKS
291 tsd_setspecific(arena_key
, save_arena
);
292 __malloc_hook
= save_malloc_hook
;
293 __free_hook
= save_free_hook
;
295 for(ar_ptr
= &main_arena
;;) {
296 mutex_init(&ar_ptr
->mutex
);
297 ar_ptr
= ar_ptr
->next
;
298 if(ar_ptr
== &main_arena
) break;
300 mutex_init(&list_lock
);
301 atfork_recursive_cntr
= 0;
306 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
310 #endif /* !defined NO_THREADS */
312 /* Initialization routine. */
315 extern char **_environ
;
319 next_env_entry (char ***position
)
321 char **current
= *position
;
324 while (*current
!= NULL
)
326 if (__builtin_expect ((*current
)[0] == 'M', 0)
327 && (*current
)[1] == 'A'
328 && (*current
)[2] == 'L'
329 && (*current
)[3] == 'L'
330 && (*current
)[4] == 'O'
331 && (*current
)[5] == 'C'
332 && (*current
)[6] == '_')
334 result
= &(*current
)[7];
336 /* Save current position for next visit. */
337 *position
= ++current
;
349 /* Set up basic state so that _int_malloc et al can work. */
351 ptmalloc_init_minimal (void)
353 #if DEFAULT_TOP_PAD != 0
354 mp_
.top_pad
= DEFAULT_TOP_PAD
;
356 mp_
.n_mmaps_max
= DEFAULT_MMAP_MAX
;
357 mp_
.mmap_threshold
= DEFAULT_MMAP_THRESHOLD
;
358 mp_
.trim_threshold
= DEFAULT_TRIM_THRESHOLD
;
359 mp_
.pagesize
= malloc_getpagesize
;
366 __failing_morecore (ptrdiff_t d
)
368 return (void *) MORECORE_FAILURE
;
371 extern struct dl_open_hook
*_dl_open_hook
;
372 libc_hidden_proto (_dl_open_hook
);
375 # if defined SHARED && defined USE_TLS && !USE___THREAD
376 /* This is called by __pthread_initialize_minimal when it needs to use
377 malloc to set up the TLS state. We cannot do the full work of
378 ptmalloc_init (below) until __pthread_initialize_minimal has finished,
379 so it has to switch to using the special startup-time hooks while doing
380 those allocations. */
382 __libc_malloc_pthread_startup (bool first_time
)
386 ptmalloc_init_minimal ();
387 save_malloc_hook
= __malloc_hook
;
388 save_memalign_hook
= __memalign_hook
;
389 save_free_hook
= __free_hook
;
390 __malloc_hook
= malloc_starter
;
391 __memalign_hook
= memalign_starter
;
392 __free_hook
= free_starter
;
396 __malloc_hook
= save_malloc_hook
;
397 __memalign_hook
= save_memalign_hook
;
398 __free_hook
= save_free_hook
;
414 if(__malloc_initialized
>= 0) return;
415 __malloc_initialized
= 0;
418 # if defined SHARED && defined USE_TLS && !USE___THREAD
419 /* ptmalloc_init_minimal may already have been called via
420 __libc_malloc_pthread_startup, above. */
421 if (mp_
.pagesize
== 0)
424 ptmalloc_init_minimal();
427 # if defined _LIBC && defined USE_TLS
428 /* We know __pthread_initialize_minimal has already been called,
429 and that is enough. */
433 /* With some threads implementations, creating thread-specific data
434 or initializing a mutex may call malloc() itself. Provide a
435 simple starter version (realloc() won't work). */
436 save_malloc_hook
= __malloc_hook
;
437 save_memalign_hook
= __memalign_hook
;
438 save_free_hook
= __free_hook
;
439 __malloc_hook
= malloc_starter
;
440 __memalign_hook
= memalign_starter
;
441 __free_hook
= free_starter
;
443 /* Initialize the pthreads interface. */
444 if (__pthread_initialize
!= NULL
)
445 __pthread_initialize();
446 # endif /* !defined _LIBC */
447 # endif /* !defined NO_STARTER */
448 #endif /* !defined NO_THREADS */
449 mutex_init(&main_arena
.mutex
);
450 main_arena
.next
= &main_arena
;
452 #if defined _LIBC && defined SHARED
453 /* In case this libc copy is in a non-default namespace, never use brk.
454 Likewise if dlopened from statically linked program. */
458 if (_dl_open_hook
!= NULL
459 || (_dl_addr (ptmalloc_init
, &di
, &l
, NULL
) != 0
460 && l
->l_ns
!= LM_ID_BASE
))
461 __morecore
= __failing_morecore
;
464 mutex_init(&list_lock
);
465 tsd_key_create(&arena_key
, NULL
);
466 tsd_setspecific(arena_key
, (Void_t
*)&main_arena
);
467 thread_atfork(ptmalloc_lock_all
, ptmalloc_unlock_all
, ptmalloc_unlock_all2
);
470 __malloc_hook
= save_malloc_hook
;
471 __memalign_hook
= save_memalign_hook
;
472 __free_hook
= save_free_hook
;
478 secure
= __libc_enable_secure
;
480 if (__builtin_expect (_environ
!= NULL
, 1))
482 char **runp
= _environ
;
485 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
488 size_t len
= strcspn (envline
, "=");
490 if (envline
[len
] != '=')
491 /* This is a "MALLOC_" variable at the end of the string
492 without a '=' character. Ignore it since otherwise we
493 will access invalid memory below. */
499 if (memcmp (envline
, "CHECK_", 6) == 0)
505 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
506 mALLOPt(M_TOP_PAD
, atoi(&envline
[9]));
507 else if (memcmp (envline
, "PERTURB_", 8) == 0)
508 mALLOPt(M_PERTURB
, atoi(&envline
[9]));
512 if (! secure
&& memcmp (envline
, "MMAP_MAX_", 9) == 0)
513 mALLOPt(M_MMAP_MAX
, atoi(&envline
[10]));
518 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
519 mALLOPt(M_TRIM_THRESHOLD
, atoi(&envline
[16]));
520 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
521 mALLOPt(M_MMAP_THRESHOLD
, atoi(&envline
[16]));
532 if((s
= getenv("MALLOC_TRIM_THRESHOLD_")))
533 mALLOPt(M_TRIM_THRESHOLD
, atoi(s
));
534 if((s
= getenv("MALLOC_TOP_PAD_")))
535 mALLOPt(M_TOP_PAD
, atoi(s
));
536 if((s
= getenv("MALLOC_PERTURB_")))
537 mALLOPt(M_PERTURB
, atoi(s
));
538 if((s
= getenv("MALLOC_MMAP_THRESHOLD_")))
539 mALLOPt(M_MMAP_THRESHOLD
, atoi(s
));
540 if((s
= getenv("MALLOC_MMAP_MAX_")))
541 mALLOPt(M_MMAP_MAX
, atoi(s
));
543 s
= getenv("MALLOC_CHECK_");
546 if(s
[0]) mALLOPt(M_CHECK_ACTION
, (int)(s
[0] - '0'));
547 if (check_action
!= 0)
548 __malloc_check_init();
550 if(__malloc_initialize_hook
!= NULL
)
551 (*__malloc_initialize_hook
)();
552 __malloc_initialized
= 1;
555 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
556 #ifdef thread_atfork_static
557 thread_atfork_static(ptmalloc_lock_all
, ptmalloc_unlock_all
, \
558 ptmalloc_unlock_all2
)
563 /* Managing heaps and arenas (for concurrent threads) */
569 /* Print the complete contents of a single heap to stderr. */
573 dump_heap(heap_info
*heap
)
575 dump_heap(heap
) heap_info
*heap
;
581 fprintf(stderr
, "Heap %p, size %10lx:\n", heap
, (long)heap
->size
);
582 ptr
= (heap
->ar_ptr
!= (mstate
)(heap
+1)) ?
583 (char*)(heap
+ 1) : (char*)(heap
+ 1) + sizeof(struct malloc_state
);
584 p
= (mchunkptr
)(((unsigned long)ptr
+ MALLOC_ALIGN_MASK
) &
587 fprintf(stderr
, "chunk %p size %10lx", p
, (long)p
->size
);
588 if(p
== top(heap
->ar_ptr
)) {
589 fprintf(stderr
, " (top)\n");
591 } else if(p
->size
== (0|PREV_INUSE
)) {
592 fprintf(stderr
, " (fence)\n");
595 fprintf(stderr
, "\n");
600 #endif /* MALLOC_DEBUG > 1 */
602 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
603 addresses as opposed to increasing, new_heap would badly fragment the
604 address space. In that case remember the second HEAP_MAX_SIZE part
605 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
606 call (if it is already aligned) and try to reuse it next time. We need
607 no locking for it, as kernel ensures the atomicity for us - worst case
608 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
609 multiple threads, but only one will succeed. */
610 static char *aligned_heap_area
;
612 /* Create a new heap. size is automatically rounded up to a multiple
618 new_heap(size_t size
, size_t top_pad
)
620 new_heap(size
, top_pad
) size_t size
, top_pad
;
623 size_t page_mask
= malloc_getpagesize
- 1;
628 if(size
+top_pad
< HEAP_MIN_SIZE
)
629 size
= HEAP_MIN_SIZE
;
630 else if(size
+top_pad
<= HEAP_MAX_SIZE
)
632 else if(size
> HEAP_MAX_SIZE
)
635 size
= HEAP_MAX_SIZE
;
636 size
= (size
+ page_mask
) & ~page_mask
;
638 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
639 No swap space needs to be reserved for the following large
640 mapping (on Linux, this is the case for all non-writable mappings
643 if(aligned_heap_area
) {
644 p2
= (char *)MMAP(aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
645 MAP_PRIVATE
|MAP_NORESERVE
);
646 aligned_heap_area
= NULL
;
647 if (p2
!= MAP_FAILED
&& ((unsigned long)p2
& (HEAP_MAX_SIZE
-1))) {
648 munmap(p2
, HEAP_MAX_SIZE
);
652 if(p2
== MAP_FAILED
) {
653 p1
= (char *)MMAP(0, HEAP_MAX_SIZE
<<1, PROT_NONE
,
654 MAP_PRIVATE
|MAP_NORESERVE
);
655 if(p1
!= MAP_FAILED
) {
656 p2
= (char *)(((unsigned long)p1
+ (HEAP_MAX_SIZE
-1))
657 & ~(HEAP_MAX_SIZE
-1));
662 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
663 munmap(p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
665 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
666 is already aligned. */
667 p2
= (char *)MMAP(0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_PRIVATE
|MAP_NORESERVE
);
670 if((unsigned long)p2
& (HEAP_MAX_SIZE
-1)) {
671 munmap(p2
, HEAP_MAX_SIZE
);
676 if(mprotect(p2
, size
, PROT_READ
|PROT_WRITE
) != 0) {
677 munmap(p2
, HEAP_MAX_SIZE
);
682 THREAD_STAT(stat_n_heaps
++);
686 /* Grow or shrink a heap. size is automatically rounded up to a
687 multiple of the page size if it is positive. */
691 grow_heap(heap_info
*h
, long diff
)
693 grow_heap(h
, diff
) heap_info
*h
; long diff
;
696 size_t page_mask
= malloc_getpagesize
- 1;
700 diff
= (diff
+ page_mask
) & ~page_mask
;
701 new_size
= (long)h
->size
+ diff
;
702 if(new_size
> HEAP_MAX_SIZE
)
704 if(mprotect((char *)h
+ h
->size
, diff
, PROT_READ
|PROT_WRITE
) != 0)
707 new_size
= (long)h
->size
+ diff
;
708 if(new_size
< (long)sizeof(*h
))
710 /* Try to re-map the extra heap space freshly to save memory, and
711 make it inaccessible. */
712 if((char *)MMAP((char *)h
+ new_size
, -diff
, PROT_NONE
,
713 MAP_PRIVATE
|MAP_FIXED
) == (char *) MAP_FAILED
)
715 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
723 #define delete_heap(heap) \
725 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
726 aligned_heap_area = NULL; \
727 munmap((char*)(heap), HEAP_MAX_SIZE); \
733 heap_trim(heap_info
*heap
, size_t pad
)
735 heap_trim(heap
, pad
) heap_info
*heap
; size_t pad
;
738 mstate ar_ptr
= heap
->ar_ptr
;
739 unsigned long pagesz
= mp_
.pagesize
;
740 mchunkptr top_chunk
= top(ar_ptr
), p
, bck
, fwd
;
741 heap_info
*prev_heap
;
742 long new_size
, top_size
, extra
;
744 /* Can this heap go away completely? */
745 while(top_chunk
== chunk_at_offset(heap
, sizeof(*heap
))) {
746 prev_heap
= heap
->prev
;
747 p
= chunk_at_offset(prev_heap
, prev_heap
->size
- (MINSIZE
-2*SIZE_SZ
));
748 assert(p
->size
== (0|PREV_INUSE
)); /* must be fencepost */
750 new_size
= chunksize(p
) + (MINSIZE
-2*SIZE_SZ
);
751 assert(new_size
>0 && new_size
<(long)(2*MINSIZE
));
753 new_size
+= p
->prev_size
;
754 assert(new_size
>0 && new_size
<HEAP_MAX_SIZE
);
755 if(new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
757 ar_ptr
->system_mem
-= heap
->size
;
758 arena_mem
-= heap
->size
;
761 if(!prev_inuse(p
)) { /* consolidate backward */
765 assert(((unsigned long)((char*)p
+ new_size
) & (pagesz
-1)) == 0);
766 assert( ((char*)p
+ new_size
) == ((char*)heap
+ heap
->size
) );
767 top(ar_ptr
) = top_chunk
= p
;
768 set_head(top_chunk
, new_size
| PREV_INUSE
);
769 /*check_chunk(ar_ptr, top_chunk);*/
771 top_size
= chunksize(top_chunk
);
772 extra
= ((top_size
- pad
- MINSIZE
+ (pagesz
-1))/pagesz
- 1) * pagesz
;
773 if(extra
< (long)pagesz
)
776 if(grow_heap(heap
, -extra
) != 0)
778 ar_ptr
->system_mem
-= extra
;
781 /* Success. Adjust top accordingly. */
782 set_head(top_chunk
, (top_size
- extra
) | PREV_INUSE
);
783 /*check_chunk(ar_ptr, top_chunk);*/
787 /* Create a new arena with initial size "size". */
790 _int_new_arena(size_t size
)
795 unsigned long misalign
;
797 h
= new_heap(size
+ (sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
),
800 /* Maybe size is too large to fit in a single heap. So, just try
801 to create a minimally-sized arena and let _int_malloc() attempt
802 to deal with the large request via mmap_chunk(). */
803 h
= new_heap(sizeof(*h
) + sizeof(*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
807 a
= h
->ar_ptr
= (mstate
)(h
+1);
808 malloc_init_state(a
);
810 a
->system_mem
= a
->max_system_mem
= h
->size
;
811 arena_mem
+= h
->size
;
813 if((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
815 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
818 /* Set up the top chunk, with proper alignment. */
819 ptr
= (char *)(a
+ 1);
820 misalign
= (unsigned long)chunk2mem(ptr
) & MALLOC_ALIGN_MASK
;
822 ptr
+= MALLOC_ALIGNMENT
- misalign
;
823 top(a
) = (mchunkptr
)ptr
;
824 set_head(top(a
), (((char*)h
+ h
->size
) - ptr
) | PREV_INUSE
);
832 arena_get2(mstate a_tsd
, size_t size
)
834 arena_get2(a_tsd
, size
) mstate a_tsd
; size_t size
;
840 a
= a_tsd
= &main_arena
;
844 /* This can only happen while initializing the new arena. */
845 (void)mutex_lock(&main_arena
.mutex
);
846 THREAD_STAT(++(main_arena
.stat_lock_wait
));
851 /* Check the global, circularly linked list for available arenas. */
852 bool retried
= false;
855 if(!mutex_trylock(&a
->mutex
)) {
857 (void)mutex_unlock(&list_lock
);
858 THREAD_STAT(++(a
->stat_lock_loop
));
859 tsd_setspecific(arena_key
, (Void_t
*)a
);
865 /* If not even the list_lock can be obtained, try again. This can
866 happen during `atfork', or for example on systems where thread
867 creation makes it temporarily impossible to obtain _any_
869 if(!retried
&& mutex_trylock(&list_lock
)) {
870 /* We will block to not run in a busy loop. */
871 (void)mutex_lock(&list_lock
);
873 /* Since we blocked there might be an arena available now. */
879 /* Nothing immediately available, so generate a new arena. */
880 a
= _int_new_arena(size
);
883 tsd_setspecific(arena_key
, (Void_t
*)a
);
884 mutex_init(&a
->mutex
);
885 mutex_lock(&a
->mutex
); /* remember result */
887 /* Add the new arena to the global list. */
888 a
->next
= main_arena
.next
;
889 atomic_write_barrier ();
892 THREAD_STAT(++(a
->stat_lock_loop
));
894 (void)mutex_unlock(&list_lock
);
899 #endif /* USE_ARENAS */