Convert miscellaneous function definitions to prototype style.
[glibc.git] / malloc / arena.c
blobcaf718de10c9fd606f71d114ece82544671d9317
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 /* Compile-time constants. */
24 #define HEAP_MIN_SIZE (32 * 1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
30 # endif
31 #endif
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
40 /***************************************************************************/
42 #define top(ar_ptr) ((ar_ptr)->top)
44 /* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
46 address aligned to HEAP_MAX_SIZE. */
48 typedef struct _heap_info
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
59 } heap_info;
61 /* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
67 /* Thread specific data. */
69 static __thread mstate thread_arena attribute_tls_model_ie;
71 /* Arena free list. */
73 static mutex_t list_lock = MUTEX_INITIALIZER;
74 static size_t narenas = 1;
75 static mstate free_list;
77 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
78 static unsigned long arena_mem;
80 /* Already initialized? */
81 int __malloc_initialized = -1;
83 /**************************************************************************/
86 /* arena_get() acquires an arena and locks the corresponding mutex.
87 First, try the one last locked successfully by this thread. (This
88 is the common case and handled with a macro for speed.) Then, loop
89 once over the circularly linked list of arenas. If no arena is
90 readily available, create a new one. In this latter case, `size'
91 is just a hint as to how much memory will be required immediately
92 in the new arena. */
94 #define arena_get(ptr, size) do { \
95 ptr = thread_arena; \
96 arena_lock (ptr, size); \
97 } while (0)
99 #define arena_lock(ptr, size) do { \
100 if (ptr && !arena_is_corrupt (ptr)) \
101 (void) mutex_lock (&ptr->mutex); \
102 else \
103 ptr = arena_get2 ((size), NULL); \
104 } while (0)
106 /* find the heap and corresponding arena for a given ptr */
108 #define heap_for_ptr(ptr) \
109 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
110 #define arena_for_chunk(ptr) \
111 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
114 /**************************************************************************/
116 #ifndef NO_THREADS
118 /* atfork support. */
120 static void *(*save_malloc_hook)(size_t __size, const void *);
121 static void (*save_free_hook) (void *__ptr, const void *);
122 static void *save_arena;
124 # ifdef ATFORK_MEM
125 ATFORK_MEM;
126 # endif
128 /* Magic value for the thread-specific arena pointer when
129 malloc_atfork() is in use. */
131 # define ATFORK_ARENA_PTR ((void *) -1)
133 /* The following hooks are used while the `atfork' handling mechanism
134 is active. */
136 static void *
137 malloc_atfork (size_t sz, const void *caller)
139 void *victim;
141 if (thread_arena == ATFORK_ARENA_PTR)
143 /* We are the only thread that may allocate at all. */
144 if (save_malloc_hook != malloc_check)
146 return _int_malloc (&main_arena, sz);
148 else
150 if (top_check () < 0)
151 return 0;
153 victim = _int_malloc (&main_arena, sz + 1);
154 return mem2mem_check (victim, sz);
157 else
159 /* Suspend the thread until the `atfork' handlers have completed.
160 By that time, the hooks will have been reset as well, so that
161 mALLOc() can be used again. */
162 (void) mutex_lock (&list_lock);
163 (void) mutex_unlock (&list_lock);
164 return __libc_malloc (sz);
168 static void
169 free_atfork (void *mem, const void *caller)
171 mstate ar_ptr;
172 mchunkptr p; /* chunk corresponding to mem */
174 if (mem == 0) /* free(0) has no effect */
175 return;
177 p = mem2chunk (mem); /* do not bother to replicate free_check here */
179 if (chunk_is_mmapped (p)) /* release mmapped memory. */
181 munmap_chunk (p);
182 return;
185 ar_ptr = arena_for_chunk (p);
186 _int_free (ar_ptr, p, thread_arena == ATFORK_ARENA_PTR);
190 /* Counter for number of times the list is locked by the same thread. */
191 static unsigned int atfork_recursive_cntr;
193 /* The following two functions are registered via thread_atfork() to
194 make sure that the mutexes remain in a consistent state in the
195 fork()ed version of a thread. Also adapt the malloc and free hooks
196 temporarily, because the `atfork' handler mechanism may use
197 malloc/free internally (e.g. in LinuxThreads). */
199 static void
200 ptmalloc_lock_all (void)
202 mstate ar_ptr;
204 if (__malloc_initialized < 1)
205 return;
207 if (mutex_trylock (&list_lock))
209 if (thread_arena == ATFORK_ARENA_PTR)
210 /* This is the same thread which already locks the global list.
211 Just bump the counter. */
212 goto out;
214 /* This thread has to wait its turn. */
215 (void) mutex_lock (&list_lock);
217 for (ar_ptr = &main_arena;; )
219 (void) mutex_lock (&ar_ptr->mutex);
220 ar_ptr = ar_ptr->next;
221 if (ar_ptr == &main_arena)
222 break;
224 save_malloc_hook = __malloc_hook;
225 save_free_hook = __free_hook;
226 __malloc_hook = malloc_atfork;
227 __free_hook = free_atfork;
228 /* Only the current thread may perform malloc/free calls now. */
229 save_arena = thread_arena;
230 thread_arena = ATFORK_ARENA_PTR;
231 out:
232 ++atfork_recursive_cntr;
235 static void
236 ptmalloc_unlock_all (void)
238 mstate ar_ptr;
240 if (__malloc_initialized < 1)
241 return;
243 if (--atfork_recursive_cntr != 0)
244 return;
246 thread_arena = save_arena;
247 __malloc_hook = save_malloc_hook;
248 __free_hook = save_free_hook;
249 for (ar_ptr = &main_arena;; )
251 (void) mutex_unlock (&ar_ptr->mutex);
252 ar_ptr = ar_ptr->next;
253 if (ar_ptr == &main_arena)
254 break;
256 (void) mutex_unlock (&list_lock);
259 # ifdef __linux__
261 /* In NPTL, unlocking a mutex in the child process after a
262 fork() is currently unsafe, whereas re-initializing it is safe and
263 does not leak resources. Therefore, a special atfork handler is
264 installed for the child. */
266 static void
267 ptmalloc_unlock_all2 (void)
269 mstate ar_ptr;
271 if (__malloc_initialized < 1)
272 return;
274 thread_arena = save_arena;
275 __malloc_hook = save_malloc_hook;
276 __free_hook = save_free_hook;
277 free_list = NULL;
278 for (ar_ptr = &main_arena;; )
280 mutex_init (&ar_ptr->mutex);
281 if (ar_ptr != save_arena)
283 ar_ptr->next_free = free_list;
284 free_list = ar_ptr;
286 ar_ptr = ar_ptr->next;
287 if (ar_ptr == &main_arena)
288 break;
290 mutex_init (&list_lock);
291 atfork_recursive_cntr = 0;
294 # else
296 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
297 # endif
298 #endif /* !NO_THREADS */
300 /* Initialization routine. */
301 #include <string.h>
302 extern char **_environ;
304 static char *
305 internal_function
306 next_env_entry (char ***position)
308 char **current = *position;
309 char *result = NULL;
311 while (*current != NULL)
313 if (__builtin_expect ((*current)[0] == 'M', 0)
314 && (*current)[1] == 'A'
315 && (*current)[2] == 'L'
316 && (*current)[3] == 'L'
317 && (*current)[4] == 'O'
318 && (*current)[5] == 'C'
319 && (*current)[6] == '_')
321 result = &(*current)[7];
323 /* Save current position for next visit. */
324 *position = ++current;
326 break;
329 ++current;
332 return result;
336 #ifdef SHARED
337 static void *
338 __failing_morecore (ptrdiff_t d)
340 return (void *) MORECORE_FAILURE;
343 extern struct dl_open_hook *_dl_open_hook;
344 libc_hidden_proto (_dl_open_hook);
345 #endif
347 static void
348 ptmalloc_init (void)
350 if (__malloc_initialized >= 0)
351 return;
353 __malloc_initialized = 0;
355 #ifdef SHARED
356 /* In case this libc copy is in a non-default namespace, never use brk.
357 Likewise if dlopened from statically linked program. */
358 Dl_info di;
359 struct link_map *l;
361 if (_dl_open_hook != NULL
362 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
363 && l->l_ns != LM_ID_BASE))
364 __morecore = __failing_morecore;
365 #endif
367 thread_arena = &main_arena;
368 thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
369 const char *s = NULL;
370 if (__glibc_likely (_environ != NULL))
372 char **runp = _environ;
373 char *envline;
375 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
378 size_t len = strcspn (envline, "=");
380 if (envline[len] != '=')
381 /* This is a "MALLOC_" variable at the end of the string
382 without a '=' character. Ignore it since otherwise we
383 will access invalid memory below. */
384 continue;
386 switch (len)
388 case 6:
389 if (memcmp (envline, "CHECK_", 6) == 0)
390 s = &envline[7];
391 break;
392 case 8:
393 if (!__builtin_expect (__libc_enable_secure, 0))
395 if (memcmp (envline, "TOP_PAD_", 8) == 0)
396 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
397 else if (memcmp (envline, "PERTURB_", 8) == 0)
398 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
400 break;
401 case 9:
402 if (!__builtin_expect (__libc_enable_secure, 0))
404 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
405 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
406 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
407 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
409 break;
410 case 10:
411 if (!__builtin_expect (__libc_enable_secure, 0))
413 if (memcmp (envline, "ARENA_TEST", 10) == 0)
414 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
416 break;
417 case 15:
418 if (!__builtin_expect (__libc_enable_secure, 0))
420 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
421 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
422 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
423 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
425 break;
426 default:
427 break;
431 if (s && s[0])
433 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
434 if (check_action != 0)
435 __malloc_check_init ();
437 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
438 if (hook != NULL)
439 (*hook)();
440 __malloc_initialized = 1;
443 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
444 #ifdef thread_atfork_static
445 thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \
446 ptmalloc_unlock_all2)
447 #endif
451 /* Managing heaps and arenas (for concurrent threads) */
453 #if MALLOC_DEBUG > 1
455 /* Print the complete contents of a single heap to stderr. */
457 static void
458 dump_heap (heap_info *heap)
460 char *ptr;
461 mchunkptr p;
463 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
464 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
465 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
466 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
467 ~MALLOC_ALIGN_MASK);
468 for (;; )
470 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
471 if (p == top (heap->ar_ptr))
473 fprintf (stderr, " (top)\n");
474 break;
476 else if (p->size == (0 | PREV_INUSE))
478 fprintf (stderr, " (fence)\n");
479 break;
481 fprintf (stderr, "\n");
482 p = next_chunk (p);
485 #endif /* MALLOC_DEBUG > 1 */
487 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
488 addresses as opposed to increasing, new_heap would badly fragment the
489 address space. In that case remember the second HEAP_MAX_SIZE part
490 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
491 call (if it is already aligned) and try to reuse it next time. We need
492 no locking for it, as kernel ensures the atomicity for us - worst case
493 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
494 multiple threads, but only one will succeed. */
495 static char *aligned_heap_area;
497 /* Create a new heap. size is automatically rounded up to a multiple
498 of the page size. */
500 static heap_info *
501 internal_function
502 new_heap (size_t size, size_t top_pad)
504 size_t pagesize = GLRO (dl_pagesize);
505 char *p1, *p2;
506 unsigned long ul;
507 heap_info *h;
509 if (size + top_pad < HEAP_MIN_SIZE)
510 size = HEAP_MIN_SIZE;
511 else if (size + top_pad <= HEAP_MAX_SIZE)
512 size += top_pad;
513 else if (size > HEAP_MAX_SIZE)
514 return 0;
515 else
516 size = HEAP_MAX_SIZE;
517 size = ALIGN_UP (size, pagesize);
519 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
520 No swap space needs to be reserved for the following large
521 mapping (on Linux, this is the case for all non-writable mappings
522 anyway). */
523 p2 = MAP_FAILED;
524 if (aligned_heap_area)
526 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
527 MAP_NORESERVE);
528 aligned_heap_area = NULL;
529 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
531 __munmap (p2, HEAP_MAX_SIZE);
532 p2 = MAP_FAILED;
535 if (p2 == MAP_FAILED)
537 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
538 if (p1 != MAP_FAILED)
540 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
541 & ~(HEAP_MAX_SIZE - 1));
542 ul = p2 - p1;
543 if (ul)
544 __munmap (p1, ul);
545 else
546 aligned_heap_area = p2 + HEAP_MAX_SIZE;
547 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
549 else
551 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
552 is already aligned. */
553 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
554 if (p2 == MAP_FAILED)
555 return 0;
557 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
559 __munmap (p2, HEAP_MAX_SIZE);
560 return 0;
564 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
566 __munmap (p2, HEAP_MAX_SIZE);
567 return 0;
569 h = (heap_info *) p2;
570 h->size = size;
571 h->mprotect_size = size;
572 LIBC_PROBE (memory_heap_new, 2, h, h->size);
573 return h;
576 /* Grow a heap. size is automatically rounded up to a
577 multiple of the page size. */
579 static int
580 grow_heap (heap_info *h, long diff)
582 size_t pagesize = GLRO (dl_pagesize);
583 long new_size;
585 diff = ALIGN_UP (diff, pagesize);
586 new_size = (long) h->size + diff;
587 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
588 return -1;
590 if ((unsigned long) new_size > h->mprotect_size)
592 if (__mprotect ((char *) h + h->mprotect_size,
593 (unsigned long) new_size - h->mprotect_size,
594 PROT_READ | PROT_WRITE) != 0)
595 return -2;
597 h->mprotect_size = new_size;
600 h->size = new_size;
601 LIBC_PROBE (memory_heap_more, 2, h, h->size);
602 return 0;
605 /* Shrink a heap. */
607 static int
608 shrink_heap (heap_info *h, long diff)
610 long new_size;
612 new_size = (long) h->size - diff;
613 if (new_size < (long) sizeof (*h))
614 return -1;
616 /* Try to re-map the extra heap space freshly to save memory, and make it
617 inaccessible. See malloc-sysdep.h to know when this is true. */
618 if (__glibc_unlikely (check_may_shrink_heap ()))
620 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
621 MAP_FIXED) == (char *) MAP_FAILED)
622 return -2;
624 h->mprotect_size = new_size;
626 else
627 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
628 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
630 h->size = new_size;
631 LIBC_PROBE (memory_heap_less, 2, h, h->size);
632 return 0;
635 /* Delete a heap. */
637 #define delete_heap(heap) \
638 do { \
639 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
640 aligned_heap_area = NULL; \
641 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
642 } while (0)
644 static int
645 internal_function
646 heap_trim (heap_info *heap, size_t pad)
648 mstate ar_ptr = heap->ar_ptr;
649 unsigned long pagesz = GLRO (dl_pagesize);
650 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
651 heap_info *prev_heap;
652 long new_size, top_size, top_area, extra, prev_size, misalign;
654 /* Can this heap go away completely? */
655 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
657 prev_heap = heap->prev;
658 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
659 p = chunk_at_offset (prev_heap, prev_size);
660 /* fencepost must be properly aligned. */
661 misalign = ((long) p) & MALLOC_ALIGN_MASK;
662 p = chunk_at_offset (prev_heap, prev_size - misalign);
663 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
664 p = prev_chunk (p);
665 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
666 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
667 if (!prev_inuse (p))
668 new_size += p->prev_size;
669 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
670 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
671 break;
672 ar_ptr->system_mem -= heap->size;
673 arena_mem -= heap->size;
674 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
675 delete_heap (heap);
676 heap = prev_heap;
677 if (!prev_inuse (p)) /* consolidate backward */
679 p = prev_chunk (p);
680 unlink (ar_ptr, p, bck, fwd);
682 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
683 assert (((char *) p + new_size) == ((char *) heap + heap->size));
684 top (ar_ptr) = top_chunk = p;
685 set_head (top_chunk, new_size | PREV_INUSE);
686 /*check_chunk(ar_ptr, top_chunk);*/
689 /* Uses similar logic for per-thread arenas as the main arena with systrim
690 and _int_free by preserving the top pad and rounding down to the nearest
691 page. */
692 top_size = chunksize (top_chunk);
693 if ((unsigned long)(top_size) <
694 (unsigned long)(mp_.trim_threshold))
695 return 0;
697 top_area = top_size - MINSIZE - 1;
698 if (top_area < 0 || (size_t) top_area <= pad)
699 return 0;
701 /* Release in pagesize units and round down to the nearest page. */
702 extra = ALIGN_DOWN(top_area - pad, pagesz);
703 if (extra == 0)
704 return 0;
706 /* Try to shrink. */
707 if (shrink_heap (heap, extra) != 0)
708 return 0;
710 ar_ptr->system_mem -= extra;
711 arena_mem -= extra;
713 /* Success. Adjust top accordingly. */
714 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
715 /*check_chunk(ar_ptr, top_chunk);*/
716 return 1;
719 /* Create a new arena with initial size "size". */
721 static mstate
722 _int_new_arena (size_t size)
724 mstate a;
725 heap_info *h;
726 char *ptr;
727 unsigned long misalign;
729 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
730 mp_.top_pad);
731 if (!h)
733 /* Maybe size is too large to fit in a single heap. So, just try
734 to create a minimally-sized arena and let _int_malloc() attempt
735 to deal with the large request via mmap_chunk(). */
736 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
737 if (!h)
738 return 0;
740 a = h->ar_ptr = (mstate) (h + 1);
741 malloc_init_state (a);
742 /*a->next = NULL;*/
743 a->system_mem = a->max_system_mem = h->size;
744 arena_mem += h->size;
746 /* Set up the top chunk, with proper alignment. */
747 ptr = (char *) (a + 1);
748 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
749 if (misalign > 0)
750 ptr += MALLOC_ALIGNMENT - misalign;
751 top (a) = (mchunkptr) ptr;
752 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
754 LIBC_PROBE (memory_arena_new, 2, a, size);
755 thread_arena = a;
756 mutex_init (&a->mutex);
757 (void) mutex_lock (&a->mutex);
759 (void) mutex_lock (&list_lock);
761 /* Add the new arena to the global list. */
762 a->next = main_arena.next;
763 atomic_write_barrier ();
764 main_arena.next = a;
766 (void) mutex_unlock (&list_lock);
768 return a;
772 static mstate
773 get_free_list (void)
775 mstate result = free_list;
776 if (result != NULL)
778 (void) mutex_lock (&list_lock);
779 result = free_list;
780 if (result != NULL)
781 free_list = result->next_free;
782 (void) mutex_unlock (&list_lock);
784 if (result != NULL)
786 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
787 (void) mutex_lock (&result->mutex);
788 thread_arena = result;
792 return result;
795 /* Lock and return an arena that can be reused for memory allocation.
796 Avoid AVOID_ARENA as we have already failed to allocate memory in
797 it and it is currently locked. */
798 static mstate
799 reused_arena (mstate avoid_arena)
801 mstate result;
802 static mstate next_to_use;
803 if (next_to_use == NULL)
804 next_to_use = &main_arena;
806 result = next_to_use;
809 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
810 goto out;
812 result = result->next;
814 while (result != next_to_use);
816 /* Avoid AVOID_ARENA as we have already failed to allocate memory
817 in that arena and it is currently locked. */
818 if (result == avoid_arena)
819 result = result->next;
821 /* Make sure that the arena we get is not corrupted. */
822 mstate begin = result;
823 while (arena_is_corrupt (result) || result == avoid_arena)
825 result = result->next;
826 if (result == begin)
827 break;
830 /* We could not find any arena that was either not corrupted or not the one
831 we wanted to avoid. */
832 if (result == begin || result == avoid_arena)
833 return NULL;
835 /* No arena available without contention. Wait for the next in line. */
836 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
837 (void) mutex_lock (&result->mutex);
839 out:
840 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
841 thread_arena = result;
842 next_to_use = result->next;
844 return result;
847 static mstate
848 internal_function
849 arena_get2 (size_t size, mstate avoid_arena)
851 mstate a;
853 static size_t narenas_limit;
855 a = get_free_list ();
856 if (a == NULL)
858 /* Nothing immediately available, so generate a new arena. */
859 if (narenas_limit == 0)
861 if (mp_.arena_max != 0)
862 narenas_limit = mp_.arena_max;
863 else if (narenas > mp_.arena_test)
865 int n = __get_nprocs ();
867 if (n >= 1)
868 narenas_limit = NARENAS_FROM_NCORES (n);
869 else
870 /* We have no information about the system. Assume two
871 cores. */
872 narenas_limit = NARENAS_FROM_NCORES (2);
875 repeat:;
876 size_t n = narenas;
877 /* NB: the following depends on the fact that (size_t)0 - 1 is a
878 very large number and that the underflow is OK. If arena_max
879 is set the value of arena_test is irrelevant. If arena_test
880 is set but narenas is not yet larger or equal to arena_test
881 narenas_limit is 0. There is no possibility for narenas to
882 be too big for the test to always fail since there is not
883 enough address space to create that many arenas. */
884 if (__glibc_unlikely (n <= narenas_limit - 1))
886 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
887 goto repeat;
888 a = _int_new_arena (size);
889 if (__glibc_unlikely (a == NULL))
890 catomic_decrement (&narenas);
892 else
893 a = reused_arena (avoid_arena);
895 return a;
898 /* If we don't have the main arena, then maybe the failure is due to running
899 out of mmapped areas, so we can try allocating on the main arena.
900 Otherwise, it is likely that sbrk() has failed and there is still a chance
901 to mmap(), so try one of the other arenas. */
902 static mstate
903 arena_get_retry (mstate ar_ptr, size_t bytes)
905 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
906 if (ar_ptr != &main_arena)
908 (void) mutex_unlock (&ar_ptr->mutex);
909 /* Don't touch the main arena if it is corrupt. */
910 if (arena_is_corrupt (&main_arena))
911 return NULL;
913 ar_ptr = &main_arena;
914 (void) mutex_lock (&ar_ptr->mutex);
916 else
918 (void) mutex_unlock (&ar_ptr->mutex);
919 ar_ptr = arena_get2 (bytes, ar_ptr);
922 return ar_ptr;
925 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
926 arena_thread_freeres (void)
928 mstate a = thread_arena;
929 thread_arena = NULL;
931 if (a != NULL)
933 (void) mutex_lock (&list_lock);
934 a->next_free = free_list;
935 free_list = a;
936 (void) mutex_unlock (&list_lock);
939 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
942 * Local variables:
943 * c-basic-offset: 2
944 * End: