malloc: Remove check_action variable [BZ #21754]
[glibc.git] / malloc / arena.c
blob39cbfbc2827cc26ae6f7ac2487999cb7e43ffb61
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
20 #include <stdbool.h>
22 #if HAVE_TUNABLES
23 # define TUNABLE_NAMESPACE malloc
24 #endif
25 #include <elf/dl-tunables.h>
27 /* Compile-time constants. */
29 #define HEAP_MIN_SIZE (32 * 1024)
30 #ifndef HEAP_MAX_SIZE
31 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
32 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
33 # else
34 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
35 # endif
36 #endif
38 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
39 that are dynamically created for multi-threaded programs. The
40 maximum size must be a power of two, for fast determination of
41 which heap belongs to a chunk. It should be much larger than the
42 mmap threshold, so that requests with a size just below that
43 threshold can be fulfilled without creating too many heaps. */
45 /***************************************************************************/
47 #define top(ar_ptr) ((ar_ptr)->top)
49 /* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
51 address aligned to HEAP_MAX_SIZE. */
53 typedef struct _heap_info
55 mstate ar_ptr; /* Arena for this heap. */
56 struct _heap_info *prev; /* Previous heap. */
57 size_t size; /* Current size in bytes. */
58 size_t mprotect_size; /* Size in bytes that has been mprotected
59 PROT_READ|PROT_WRITE. */
60 /* Make sure the following data is properly aligned, particularly
61 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
62 MALLOC_ALIGNMENT. */
63 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
64 } heap_info;
66 /* Get a compile-time error if the heap_info padding is not correct
67 to make alignment work as expected in sYSMALLOc. */
68 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
69 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
70 ? -1 : 1];
72 /* Thread specific data. */
74 static __thread mstate thread_arena attribute_tls_model_ie;
76 /* Arena free list. free_list_lock synchronizes access to the
77 free_list variable below, and the next_free and attached_threads
78 members of struct malloc_state objects. No other locks must be
79 acquired after free_list_lock has been acquired. */
81 __libc_lock_define_initialized (static, free_list_lock);
82 static size_t narenas = 1;
83 static mstate free_list;
85 /* list_lock prevents concurrent writes to the next member of struct
86 malloc_state objects.
88 Read access to the next member is supposed to synchronize with the
89 atomic_write_barrier and the write to the next member in
90 _int_new_arena. This suffers from data races; see the FIXME
91 comments in _int_new_arena and reused_arena.
93 list_lock also prevents concurrent forks. At the time list_lock is
94 acquired, no arena lock must have been acquired, but it is
95 permitted to acquire arena locks subsequently, while list_lock is
96 acquired. */
97 __libc_lock_define_initialized (static, list_lock);
99 /* Already initialized? */
100 int __malloc_initialized = -1;
102 /**************************************************************************/
105 /* arena_get() acquires an arena and locks the corresponding mutex.
106 First, try the one last locked successfully by this thread. (This
107 is the common case and handled with a macro for speed.) Then, loop
108 once over the circularly linked list of arenas. If no arena is
109 readily available, create a new one. In this latter case, `size'
110 is just a hint as to how much memory will be required immediately
111 in the new arena. */
113 #define arena_get(ptr, size) do { \
114 ptr = thread_arena; \
115 arena_lock (ptr, size); \
116 } while (0)
118 #define arena_lock(ptr, size) do { \
119 if (ptr && !arena_is_corrupt (ptr)) \
120 __libc_lock_lock (ptr->mutex); \
121 else \
122 ptr = arena_get2 ((size), NULL); \
123 } while (0)
125 /* find the heap and corresponding arena for a given ptr */
127 #define heap_for_ptr(ptr) \
128 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
129 #define arena_for_chunk(ptr) \
130 (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
133 /**************************************************************************/
135 /* atfork support. */
137 /* The following three functions are called around fork from a
138 multi-threaded process. We do not use the general fork handler
139 mechanism to make sure that our handlers are the last ones being
140 called, so that other fork handlers can use the malloc
141 subsystem. */
143 void
144 internal_function
145 __malloc_fork_lock_parent (void)
147 if (__malloc_initialized < 1)
148 return;
150 /* We do not acquire free_list_lock here because we completely
151 reconstruct free_list in __malloc_fork_unlock_child. */
153 __libc_lock_lock (list_lock);
155 for (mstate ar_ptr = &main_arena;; )
157 __libc_lock_lock (ar_ptr->mutex);
158 ar_ptr = ar_ptr->next;
159 if (ar_ptr == &main_arena)
160 break;
164 void
165 internal_function
166 __malloc_fork_unlock_parent (void)
168 if (__malloc_initialized < 1)
169 return;
171 for (mstate ar_ptr = &main_arena;; )
173 __libc_lock_unlock (ar_ptr->mutex);
174 ar_ptr = ar_ptr->next;
175 if (ar_ptr == &main_arena)
176 break;
178 __libc_lock_unlock (list_lock);
181 void
182 internal_function
183 __malloc_fork_unlock_child (void)
185 if (__malloc_initialized < 1)
186 return;
188 /* Push all arenas to the free list, except thread_arena, which is
189 attached to the current thread. */
190 __libc_lock_init (free_list_lock);
191 if (thread_arena != NULL)
192 thread_arena->attached_threads = 1;
193 free_list = NULL;
194 for (mstate ar_ptr = &main_arena;; )
196 __libc_lock_init (ar_ptr->mutex);
197 if (ar_ptr != thread_arena)
199 /* This arena is no longer attached to any thread. */
200 ar_ptr->attached_threads = 0;
201 ar_ptr->next_free = free_list;
202 free_list = ar_ptr;
204 ar_ptr = ar_ptr->next;
205 if (ar_ptr == &main_arena)
206 break;
209 __libc_lock_init (list_lock);
212 #if HAVE_TUNABLES
213 static inline int do_set_mallopt_check (int32_t value);
214 void
215 TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
217 int32_t value = (int32_t) valp->numval;
218 if (value != 0)
219 __malloc_check_init ();
222 # define TUNABLE_CALLBACK_FNDECL(__name, __type) \
223 static inline int do_ ## __name (__type value); \
224 void \
225 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
227 __type value = (__type) (valp)->numval; \
228 do_ ## __name (value); \
231 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
232 TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
233 TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
234 TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
235 TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
236 TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
237 TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
238 #if USE_TCACHE
239 TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
240 TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
241 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
242 #endif
243 #else
244 /* Initialization routine. */
245 #include <string.h>
246 extern char **_environ;
248 static char *
249 internal_function
250 next_env_entry (char ***position)
252 char **current = *position;
253 char *result = NULL;
255 while (*current != NULL)
257 if (__builtin_expect ((*current)[0] == 'M', 0)
258 && (*current)[1] == 'A'
259 && (*current)[2] == 'L'
260 && (*current)[3] == 'L'
261 && (*current)[4] == 'O'
262 && (*current)[5] == 'C'
263 && (*current)[6] == '_')
265 result = &(*current)[7];
267 /* Save current position for next visit. */
268 *position = ++current;
270 break;
273 ++current;
276 return result;
278 #endif
281 #ifdef SHARED
282 static void *
283 __failing_morecore (ptrdiff_t d)
285 return (void *) MORECORE_FAILURE;
288 extern struct dl_open_hook *_dl_open_hook;
289 libc_hidden_proto (_dl_open_hook);
290 #endif
292 static void
293 ptmalloc_init (void)
295 if (__malloc_initialized >= 0)
296 return;
298 __malloc_initialized = 0;
300 #ifdef SHARED
301 /* In case this libc copy is in a non-default namespace, never use brk.
302 Likewise if dlopened from statically linked program. */
303 Dl_info di;
304 struct link_map *l;
306 if (_dl_open_hook != NULL
307 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
308 && l->l_ns != LM_ID_BASE))
309 __morecore = __failing_morecore;
310 #endif
312 thread_arena = &main_arena;
314 #if HAVE_TUNABLES
315 /* Ensure initialization/consolidation and do it under a lock so that a
316 thread attempting to use the arena in parallel waits on us till we
317 finish. */
318 __libc_lock_lock (main_arena.mutex);
319 malloc_consolidate (&main_arena);
321 TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
322 TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
323 TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
324 TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
325 TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
326 TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
327 TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
328 TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
329 #if USE_TCACHE
330 TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
331 TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
332 TUNABLE_GET (tcache_unsorted_limit, size_t,
333 TUNABLE_CALLBACK (set_tcache_unsorted_limit));
334 #endif
335 __libc_lock_unlock (main_arena.mutex);
336 #else
337 const char *s = NULL;
338 if (__glibc_likely (_environ != NULL))
340 char **runp = _environ;
341 char *envline;
343 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
346 size_t len = strcspn (envline, "=");
348 if (envline[len] != '=')
349 /* This is a "MALLOC_" variable at the end of the string
350 without a '=' character. Ignore it since otherwise we
351 will access invalid memory below. */
352 continue;
354 switch (len)
356 case 6:
357 if (memcmp (envline, "CHECK_", 6) == 0)
358 s = &envline[7];
359 break;
360 case 8:
361 if (!__builtin_expect (__libc_enable_secure, 0))
363 if (memcmp (envline, "TOP_PAD_", 8) == 0)
364 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
365 else if (memcmp (envline, "PERTURB_", 8) == 0)
366 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
368 break;
369 case 9:
370 if (!__builtin_expect (__libc_enable_secure, 0))
372 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
373 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
374 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
375 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
377 break;
378 case 10:
379 if (!__builtin_expect (__libc_enable_secure, 0))
381 if (memcmp (envline, "ARENA_TEST", 10) == 0)
382 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
384 break;
385 case 15:
386 if (!__builtin_expect (__libc_enable_secure, 0))
388 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
389 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
390 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
391 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
393 break;
394 default:
395 break;
399 if (s && s[0] != '\0' && s[0] != '0')
400 __malloc_check_init ();
401 #endif
403 #if HAVE_MALLOC_INIT_HOOK
404 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
405 if (hook != NULL)
406 (*hook)();
407 #endif
408 __malloc_initialized = 1;
411 /* Managing heaps and arenas (for concurrent threads) */
413 #if MALLOC_DEBUG > 1
415 /* Print the complete contents of a single heap to stderr. */
417 static void
418 dump_heap (heap_info *heap)
420 char *ptr;
421 mchunkptr p;
423 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
424 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
425 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
426 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
427 ~MALLOC_ALIGN_MASK);
428 for (;; )
430 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
431 if (p == top (heap->ar_ptr))
433 fprintf (stderr, " (top)\n");
434 break;
436 else if (p->size == (0 | PREV_INUSE))
438 fprintf (stderr, " (fence)\n");
439 break;
441 fprintf (stderr, "\n");
442 p = next_chunk (p);
445 #endif /* MALLOC_DEBUG > 1 */
447 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
448 addresses as opposed to increasing, new_heap would badly fragment the
449 address space. In that case remember the second HEAP_MAX_SIZE part
450 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
451 call (if it is already aligned) and try to reuse it next time. We need
452 no locking for it, as kernel ensures the atomicity for us - worst case
453 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
454 multiple threads, but only one will succeed. */
455 static char *aligned_heap_area;
457 /* Create a new heap. size is automatically rounded up to a multiple
458 of the page size. */
460 static heap_info *
461 internal_function
462 new_heap (size_t size, size_t top_pad)
464 size_t pagesize = GLRO (dl_pagesize);
465 char *p1, *p2;
466 unsigned long ul;
467 heap_info *h;
469 if (size + top_pad < HEAP_MIN_SIZE)
470 size = HEAP_MIN_SIZE;
471 else if (size + top_pad <= HEAP_MAX_SIZE)
472 size += top_pad;
473 else if (size > HEAP_MAX_SIZE)
474 return 0;
475 else
476 size = HEAP_MAX_SIZE;
477 size = ALIGN_UP (size, pagesize);
479 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
480 No swap space needs to be reserved for the following large
481 mapping (on Linux, this is the case for all non-writable mappings
482 anyway). */
483 p2 = MAP_FAILED;
484 if (aligned_heap_area)
486 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
487 MAP_NORESERVE);
488 aligned_heap_area = NULL;
489 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
491 __munmap (p2, HEAP_MAX_SIZE);
492 p2 = MAP_FAILED;
495 if (p2 == MAP_FAILED)
497 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
498 if (p1 != MAP_FAILED)
500 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
501 & ~(HEAP_MAX_SIZE - 1));
502 ul = p2 - p1;
503 if (ul)
504 __munmap (p1, ul);
505 else
506 aligned_heap_area = p2 + HEAP_MAX_SIZE;
507 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
509 else
511 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
512 is already aligned. */
513 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
514 if (p2 == MAP_FAILED)
515 return 0;
517 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
519 __munmap (p2, HEAP_MAX_SIZE);
520 return 0;
524 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
526 __munmap (p2, HEAP_MAX_SIZE);
527 return 0;
529 h = (heap_info *) p2;
530 h->size = size;
531 h->mprotect_size = size;
532 LIBC_PROBE (memory_heap_new, 2, h, h->size);
533 return h;
536 /* Grow a heap. size is automatically rounded up to a
537 multiple of the page size. */
539 static int
540 grow_heap (heap_info *h, long diff)
542 size_t pagesize = GLRO (dl_pagesize);
543 long new_size;
545 diff = ALIGN_UP (diff, pagesize);
546 new_size = (long) h->size + diff;
547 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
548 return -1;
550 if ((unsigned long) new_size > h->mprotect_size)
552 if (__mprotect ((char *) h + h->mprotect_size,
553 (unsigned long) new_size - h->mprotect_size,
554 PROT_READ | PROT_WRITE) != 0)
555 return -2;
557 h->mprotect_size = new_size;
560 h->size = new_size;
561 LIBC_PROBE (memory_heap_more, 2, h, h->size);
562 return 0;
565 /* Shrink a heap. */
567 static int
568 shrink_heap (heap_info *h, long diff)
570 long new_size;
572 new_size = (long) h->size - diff;
573 if (new_size < (long) sizeof (*h))
574 return -1;
576 /* Try to re-map the extra heap space freshly to save memory, and make it
577 inaccessible. See malloc-sysdep.h to know when this is true. */
578 if (__glibc_unlikely (check_may_shrink_heap ()))
580 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
581 MAP_FIXED) == (char *) MAP_FAILED)
582 return -2;
584 h->mprotect_size = new_size;
586 else
587 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
588 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
590 h->size = new_size;
591 LIBC_PROBE (memory_heap_less, 2, h, h->size);
592 return 0;
595 /* Delete a heap. */
597 #define delete_heap(heap) \
598 do { \
599 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
600 aligned_heap_area = NULL; \
601 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
602 } while (0)
604 static int
605 internal_function
606 heap_trim (heap_info *heap, size_t pad)
608 mstate ar_ptr = heap->ar_ptr;
609 unsigned long pagesz = GLRO (dl_pagesize);
610 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
611 heap_info *prev_heap;
612 long new_size, top_size, top_area, extra, prev_size, misalign;
614 /* Can this heap go away completely? */
615 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
617 prev_heap = heap->prev;
618 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
619 p = chunk_at_offset (prev_heap, prev_size);
620 /* fencepost must be properly aligned. */
621 misalign = ((long) p) & MALLOC_ALIGN_MASK;
622 p = chunk_at_offset (prev_heap, prev_size - misalign);
623 assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
624 p = prev_chunk (p);
625 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
626 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
627 if (!prev_inuse (p))
628 new_size += prev_size (p);
629 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
630 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
631 break;
632 ar_ptr->system_mem -= heap->size;
633 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
634 delete_heap (heap);
635 heap = prev_heap;
636 if (!prev_inuse (p)) /* consolidate backward */
638 p = prev_chunk (p);
639 unlink (ar_ptr, p, bck, fwd);
641 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
642 assert (((char *) p + new_size) == ((char *) heap + heap->size));
643 top (ar_ptr) = top_chunk = p;
644 set_head (top_chunk, new_size | PREV_INUSE);
645 /*check_chunk(ar_ptr, top_chunk);*/
648 /* Uses similar logic for per-thread arenas as the main arena with systrim
649 and _int_free by preserving the top pad and rounding down to the nearest
650 page. */
651 top_size = chunksize (top_chunk);
652 if ((unsigned long)(top_size) <
653 (unsigned long)(mp_.trim_threshold))
654 return 0;
656 top_area = top_size - MINSIZE - 1;
657 if (top_area < 0 || (size_t) top_area <= pad)
658 return 0;
660 /* Release in pagesize units and round down to the nearest page. */
661 extra = ALIGN_DOWN(top_area - pad, pagesz);
662 if (extra == 0)
663 return 0;
665 /* Try to shrink. */
666 if (shrink_heap (heap, extra) != 0)
667 return 0;
669 ar_ptr->system_mem -= extra;
671 /* Success. Adjust top accordingly. */
672 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
673 /*check_chunk(ar_ptr, top_chunk);*/
674 return 1;
677 /* Create a new arena with initial size "size". */
679 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
680 called while free_list_lock is held. */
681 static void
682 detach_arena (mstate replaced_arena)
684 if (replaced_arena != NULL)
686 assert (replaced_arena->attached_threads > 0);
687 /* The current implementation only detaches from main_arena in
688 case of allocation failure. This means that it is likely not
689 beneficial to put the arena on free_list even if the
690 reference count reaches zero. */
691 --replaced_arena->attached_threads;
695 static mstate
696 _int_new_arena (size_t size)
698 mstate a;
699 heap_info *h;
700 char *ptr;
701 unsigned long misalign;
703 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
704 mp_.top_pad);
705 if (!h)
707 /* Maybe size is too large to fit in a single heap. So, just try
708 to create a minimally-sized arena and let _int_malloc() attempt
709 to deal with the large request via mmap_chunk(). */
710 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
711 if (!h)
712 return 0;
714 a = h->ar_ptr = (mstate) (h + 1);
715 malloc_init_state (a);
716 a->attached_threads = 1;
717 /*a->next = NULL;*/
718 a->system_mem = a->max_system_mem = h->size;
720 /* Set up the top chunk, with proper alignment. */
721 ptr = (char *) (a + 1);
722 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
723 if (misalign > 0)
724 ptr += MALLOC_ALIGNMENT - misalign;
725 top (a) = (mchunkptr) ptr;
726 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
728 LIBC_PROBE (memory_arena_new, 2, a, size);
729 mstate replaced_arena = thread_arena;
730 thread_arena = a;
731 __libc_lock_init (a->mutex);
733 __libc_lock_lock (list_lock);
735 /* Add the new arena to the global list. */
736 a->next = main_arena.next;
737 /* FIXME: The barrier is an attempt to synchronize with read access
738 in reused_arena, which does not acquire list_lock while
739 traversing the list. */
740 atomic_write_barrier ();
741 main_arena.next = a;
743 __libc_lock_unlock (list_lock);
745 __libc_lock_lock (free_list_lock);
746 detach_arena (replaced_arena);
747 __libc_lock_unlock (free_list_lock);
749 /* Lock this arena. NB: Another thread may have been attached to
750 this arena because the arena is now accessible from the
751 main_arena.next list and could have been picked by reused_arena.
752 This can only happen for the last arena created (before the arena
753 limit is reached). At this point, some arena has to be attached
754 to two threads. We could acquire the arena lock before list_lock
755 to make it less likely that reused_arena picks this new arena,
756 but this could result in a deadlock with
757 __malloc_fork_lock_parent. */
759 __libc_lock_lock (a->mutex);
761 return a;
765 /* Remove an arena from free_list. */
766 static mstate
767 get_free_list (void)
769 mstate replaced_arena = thread_arena;
770 mstate result = free_list;
771 if (result != NULL)
773 __libc_lock_lock (free_list_lock);
774 result = free_list;
775 if (result != NULL)
777 free_list = result->next_free;
779 /* The arena will be attached to this thread. */
780 assert (result->attached_threads == 0);
781 result->attached_threads = 1;
783 detach_arena (replaced_arena);
785 __libc_lock_unlock (free_list_lock);
787 if (result != NULL)
789 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
790 __libc_lock_lock (result->mutex);
791 thread_arena = result;
795 return result;
798 /* Remove the arena from the free list (if it is present).
799 free_list_lock must have been acquired by the caller. */
800 static void
801 remove_from_free_list (mstate arena)
803 mstate *previous = &free_list;
804 for (mstate p = free_list; p != NULL; p = p->next_free)
806 assert (p->attached_threads == 0);
807 if (p == arena)
809 /* Remove the requested arena from the list. */
810 *previous = p->next_free;
811 break;
813 else
814 previous = &p->next_free;
818 /* Lock and return an arena that can be reused for memory allocation.
819 Avoid AVOID_ARENA as we have already failed to allocate memory in
820 it and it is currently locked. */
821 static mstate
822 reused_arena (mstate avoid_arena)
824 mstate result;
825 /* FIXME: Access to next_to_use suffers from data races. */
826 static mstate next_to_use;
827 if (next_to_use == NULL)
828 next_to_use = &main_arena;
830 /* Iterate over all arenas (including those linked from
831 free_list). */
832 result = next_to_use;
835 if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex))
836 goto out;
838 /* FIXME: This is a data race, see _int_new_arena. */
839 result = result->next;
841 while (result != next_to_use);
843 /* Avoid AVOID_ARENA as we have already failed to allocate memory
844 in that arena and it is currently locked. */
845 if (result == avoid_arena)
846 result = result->next;
848 /* Make sure that the arena we get is not corrupted. */
849 mstate begin = result;
850 while (arena_is_corrupt (result) || result == avoid_arena)
852 result = result->next;
853 if (result == begin)
854 /* We looped around the arena list. We could not find any
855 arena that was either not corrupted or not the one we
856 wanted to avoid. */
857 return NULL;
860 /* No arena available without contention. Wait for the next in line. */
861 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
862 __libc_lock_lock (result->mutex);
864 out:
865 /* Attach the arena to the current thread. */
867 /* Update the arena thread attachment counters. */
868 mstate replaced_arena = thread_arena;
869 __libc_lock_lock (free_list_lock);
870 detach_arena (replaced_arena);
872 /* We may have picked up an arena on the free list. We need to
873 preserve the invariant that no arena on the free list has a
874 positive attached_threads counter (otherwise,
875 arena_thread_freeres cannot use the counter to determine if the
876 arena needs to be put on the free list). We unconditionally
877 remove the selected arena from the free list. The caller of
878 reused_arena checked the free list and observed it to be empty,
879 so the list is very short. */
880 remove_from_free_list (result);
882 ++result->attached_threads;
884 __libc_lock_unlock (free_list_lock);
887 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
888 thread_arena = result;
889 next_to_use = result->next;
891 return result;
894 static mstate
895 internal_function
896 arena_get2 (size_t size, mstate avoid_arena)
898 mstate a;
900 static size_t narenas_limit;
902 a = get_free_list ();
903 if (a == NULL)
905 /* Nothing immediately available, so generate a new arena. */
906 if (narenas_limit == 0)
908 if (mp_.arena_max != 0)
909 narenas_limit = mp_.arena_max;
910 else if (narenas > mp_.arena_test)
912 int n = __get_nprocs ();
914 if (n >= 1)
915 narenas_limit = NARENAS_FROM_NCORES (n);
916 else
917 /* We have no information about the system. Assume two
918 cores. */
919 narenas_limit = NARENAS_FROM_NCORES (2);
922 repeat:;
923 size_t n = narenas;
924 /* NB: the following depends on the fact that (size_t)0 - 1 is a
925 very large number and that the underflow is OK. If arena_max
926 is set the value of arena_test is irrelevant. If arena_test
927 is set but narenas is not yet larger or equal to arena_test
928 narenas_limit is 0. There is no possibility for narenas to
929 be too big for the test to always fail since there is not
930 enough address space to create that many arenas. */
931 if (__glibc_unlikely (n <= narenas_limit - 1))
933 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
934 goto repeat;
935 a = _int_new_arena (size);
936 if (__glibc_unlikely (a == NULL))
937 catomic_decrement (&narenas);
939 else
940 a = reused_arena (avoid_arena);
942 return a;
945 /* If we don't have the main arena, then maybe the failure is due to running
946 out of mmapped areas, so we can try allocating on the main arena.
947 Otherwise, it is likely that sbrk() has failed and there is still a chance
948 to mmap(), so try one of the other arenas. */
949 static mstate
950 arena_get_retry (mstate ar_ptr, size_t bytes)
952 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
953 if (ar_ptr != &main_arena)
955 __libc_lock_unlock (ar_ptr->mutex);
956 /* Don't touch the main arena if it is corrupt. */
957 if (arena_is_corrupt (&main_arena))
958 return NULL;
960 ar_ptr = &main_arena;
961 __libc_lock_lock (ar_ptr->mutex);
963 else
965 __libc_lock_unlock (ar_ptr->mutex);
966 ar_ptr = arena_get2 (bytes, ar_ptr);
969 return ar_ptr;
972 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
973 arena_thread_freeres (void)
975 mstate a = thread_arena;
976 thread_arena = NULL;
978 if (a != NULL)
980 __libc_lock_lock (free_list_lock);
981 /* If this was the last attached thread for this arena, put the
982 arena on the free list. */
983 assert (a->attached_threads > 0);
984 if (--a->attached_threads == 0)
986 a->next_free = free_list;
987 free_list = a;
989 __libc_lock_unlock (free_list_lock);
992 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
995 * Local variables:
996 * c-basic-offset: 2
997 * End: