2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010-2019 The DragonFly Project,
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@backplane.com> and by
9 * Venkatesh Srinivas <me@endeavour.zapto.org>.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
41 * This module implements a slab allocator drop-in replacement for the
44 * A slab allocator reserves a ZONE for each chunk size, then lays the
45 * chunks out in an array within the zone. Allocation and deallocation
46 * is nearly instantaneous, and overhead losses are limited to a fixed
49 * The slab allocator does not have to pre-initialize the list of
50 * free chunks for each zone, and the underlying VM will not be
51 * touched at all beyond the zone header until an actual allocation
54 * Slab management and locking is done on a per-zone basis.
56 * Alloc Size Chunking Number of zones
67 * Allocations >= ZoneLimit go directly to mmap and a hash table
68 * is used to locate for free. One and Two-page allocations use the
69 * zone mechanic to avoid excessive mmap()/munmap() calls.
71 * API FEATURES AND SIDE EFFECTS
73 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
74 * Above that power-of-2 sized allocations are page-aligned. Non
75 * power-of-2 sized allocations are aligned the same as the chunk
76 * size for their zone.
77 * + malloc(0) returns a special non-NULL value
78 * + ability to allocate arbitrarily large chunks of memory
79 * + realloc will reuse the passed pointer if possible, within the
80 * limitations of the zone chunking.
82 * Multithreaded enhancements for small allocations introduced August 2010.
83 * These are in the spirit of 'libumem'. See:
84 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
85 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
86 * USENIX Technical Conference. USENIX Association.
88 * Oversized allocations employ the BIGCACHE mechanic whereby large
89 * allocations may be handed significantly larger buffers, allowing them
90 * to avoid mmap/munmap operations even through significant realloc()s.
91 * The excess space is only trimmed if too many large allocations have been
92 * given this treatment.
96 * The value of the environment variable MALLOC_OPTIONS is a character string
97 * containing various flags to tune nmalloc.
99 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
100 * This will generate utrace events for all malloc,
101 * realloc, and free calls. There are tools (mtrplay) to
102 * replay and allocation pattern or to graph heap structure
103 * (mtrgraph) which can interpret these logs.
104 * 'Z' / ['z'] Zero out / do not zero all allocations.
105 * Each new byte of memory allocated by malloc, realloc, or
106 * reallocf will be initialized to 0. This is intended for
107 * debugging and will affect performance negatively.
108 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
109 * allocation functions.
112 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
114 #include "namespace.h"
115 #include <sys/param.h>
116 #include <sys/types.h>
117 #include <sys/mman.h>
118 #include <sys/queue.h>
119 #include <sys/ktrace.h>
130 #include <machine/atomic.h>
131 #include "un-namespace.h"
133 #include "libc_private.h"
134 #include "spinlock.h"
137 void *__malloc(size_t);
138 void *__calloc(size_t, size_t);
139 void *__realloc(void *, size_t);
140 void *__aligned_alloc(size_t, size_t);
141 int __posix_memalign(void **, size_t, size_t);
144 * Linked list of large allocations
146 typedef struct bigalloc
{
147 struct bigalloc
*next
; /* hash link */
148 void *base
; /* base pointer */
149 u_long active
; /* bytes active */
150 u_long bytes
; /* bytes allocated */
154 * Note that any allocations which are exact multiples of PAGE_SIZE, or
155 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
157 #define MAX_SLAB_PAGEALIGN (2 * PAGE_SIZE) /* max slab for PAGE_SIZE*n */
158 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
159 #define ZALLOC_ZONE_SIZE (64 * 1024) /* zone size */
160 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
162 #if ZALLOC_ZONE_LIMIT == 16384
164 #elif ZALLOC_ZONE_LIMIT == 32768
167 #error "I couldn't figure out NZONES"
171 * Chunk structure for free elements
173 typedef struct slchunk
{
174 struct slchunk
*c_Next
;
178 * The IN-BAND zone header is placed at the beginning of each zone.
182 typedef struct slzone
{
183 int32_t z_Magic
; /* magic number for sanity check */
184 int z_NFree
; /* total free chunks / ualloc space */
185 struct slzone
*z_Next
; /* ZoneAry[] link if z_NFree non-zero */
186 int z_NMax
; /* maximum free chunks */
187 char *z_BasePtr
; /* pointer to start of chunk array */
188 int z_UIndex
; /* current initial allocation index */
189 int z_UEndIndex
; /* last (first) allocation index */
190 int z_ChunkSize
; /* chunk size for validation */
191 int z_FirstFreePg
; /* chunk list on a page-by-page basis */
194 struct slchunk
*z_PageAry
[ZALLOC_ZONE_SIZE
/ PAGE_SIZE
];
197 typedef struct slglobaldata
{
199 slzone_t ZoneAry
[NZONES
];/* linked list of zones NFree > 0 */
202 #define SLZF_UNOTZEROD 0x0001
204 #define FASTSLABREALLOC 0x02
207 * Misc constants. Note that allocations that are exact multiples of
208 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
209 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
211 #define MIN_CHUNK_SIZE 8 /* in bytes */
212 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
213 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
216 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
217 * not be larger then 64.
219 #define BIGHSHIFT 10 /* bigalloc hash table */
220 #define BIGHSIZE (1 << BIGHSHIFT)
221 #define BIGHMASK (BIGHSIZE - 1)
222 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
223 #define BIGXMASK (BIGXSIZE - 1)
226 * BIGCACHE caches oversized allocations. Note that a linear search is
227 * performed, so do not make the cache too large.
229 * BIGCACHE will garbage-collect excess space when the excess exceeds the
230 * specified value. A relatively large number should be used here because
231 * garbage collection is expensive.
234 #define BIGCACHE_MASK (BIGCACHE - 1)
235 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */
236 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */
238 #define CACHE_CHUNKS 32
240 #define SAFLAG_ZERO 0x0001
241 #define SAFLAG_PASSIVE 0x0002
242 #define SAFLAG_MAGS 0x0004
248 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
251 * The assertion macros try to pretty-print assertion failures
252 * which can be caused by corruption. If a lock is held, we
253 * provide a macro that attempts to release it before asserting
254 * in order to prevent (e.g.) a reentrant SIGABRT calling malloc
255 * and deadlocking, resulting in the program freezing up.
257 #define MASSERT(exp) \
258 do { if (__predict_false(!(exp))) \
259 _mpanic("assertion: %s in %s", \
263 #define MASSERT_WTHUNLK(exp, unlk) \
264 do { if (__predict_false(!(exp))) { \
266 _mpanic("assertion: %s in %s", \
272 * Magazines, arrange so the structure is roughly 4KB.
274 #define M_MAX_ROUNDS (512 - 3)
275 #define M_MIN_ROUNDS 16
276 #define M_ZONE_INIT_ROUNDS 64
277 #define M_ZONE_HYSTERESIS 32
280 SLIST_ENTRY(magazine
) nextmagazine
;
283 int capacity
; /* Max rounds in this magazine */
284 int rounds
; /* Current number of free rounds */
286 void *objects
[M_MAX_ROUNDS
];
289 SLIST_HEAD(magazinelist
, magazine
);
291 static spinlock_t zone_mag_lock
;
292 static spinlock_t depot_spinlock
;
293 static struct magazine zone_magazine
= {
295 .capacity
= M_ZONE_INIT_ROUNDS
,
299 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
300 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
301 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
302 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
305 * Each thread will have a pair of magazines per size-class (NZONES)
306 * The loaded magazine will support immediate allocations, the previous
307 * magazine will either be full or empty and can be swapped at need
309 typedef struct magazine_pair
{
310 struct magazine
*loaded
;
311 struct magazine
*prev
;
314 /* A depot is a collection of magazines for a single zone. */
315 typedef struct magazine_depot
{
316 struct magazinelist full
;
317 struct magazinelist empty
;
321 typedef struct thr_mags
{
322 magazine_pair mags
[NZONES
];
323 struct magazine
*newmag
;
327 static __thread thr_mags thread_mags TLS_ATTRIBUTE
;
328 static pthread_key_t thread_mags_key
;
329 static pthread_once_t thread_mags_once
= PTHREAD_ONCE_INIT
;
330 static magazine_depot depots
[NZONES
];
333 * Fixed globals (not per-cpu)
335 static const int ZoneSize
= ZALLOC_ZONE_SIZE
;
336 static const int ZoneLimit
= ZALLOC_ZONE_LIMIT
;
337 static const int ZonePageCount
= ZALLOC_ZONE_SIZE
/ PAGE_SIZE
;
338 static const int ZoneMask
= ZALLOC_ZONE_SIZE
- 1;
340 static int opt_madvise
= 0;
341 static int opt_utrace
= 0;
342 static int g_malloc_flags
= 0;
343 static struct slglobaldata SLGlobalData
;
344 static bigalloc_t bigalloc_array
[BIGHSIZE
];
345 static spinlock_t bigspin_array
[BIGXSIZE
];
346 static volatile void *bigcache_array
[BIGCACHE
]; /* atomic swap */
347 static volatile size_t bigcache_size_array
[BIGCACHE
]; /* SMP races ok */
348 static volatile int bigcache_index
; /* SMP races ok */
349 static int malloc_panic
;
350 static size_t excess_alloc
; /* excess big allocs */
352 static void *_slaballoc(size_t size
, int flags
);
353 static void *_slabrealloc(void *ptr
, size_t size
);
354 static void _slabfree(void *ptr
, int, bigalloc_t
*);
355 static int _slabmemalign(void **memptr
, size_t alignment
, size_t size
);
356 static void *_vmem_alloc(size_t bytes
, size_t align
, int flags
);
357 static void _vmem_free(void *ptr
, size_t bytes
);
358 static void *magazine_alloc(struct magazine
*);
359 static int magazine_free(struct magazine
*, void *);
360 static void *mtmagazine_alloc(int zi
, int flags
);
361 static int mtmagazine_free(int zi
, void *);
362 static void mtmagazine_init(void);
363 static void mtmagazine_destructor(void *);
364 static slzone_t
zone_alloc(int flags
);
365 static void zone_free(void *z
);
366 static void _mpanic(const char *ctl
, ...) __printflike(1, 2);
367 static void malloc_init(void) __constructor(101);
369 struct nmalloc_utrace
{
375 #define UTRACE(a, b, c) \
377 struct nmalloc_utrace ut = { \
382 utrace(&ut, sizeof(ut)); \
388 const char *p
= NULL
;
390 if (issetugid() == 0)
391 p
= getenv("MALLOC_OPTIONS");
393 for (; p
!= NULL
&& *p
!= '\0'; p
++) {
395 case 'u': opt_utrace
= 0; break;
396 case 'U': opt_utrace
= 1; break;
397 case 'h': opt_madvise
= 0; break;
398 case 'H': opt_madvise
= 1; break;
399 case 'z': g_malloc_flags
= 0; break;
400 case 'Z': g_malloc_flags
= SAFLAG_ZERO
; break;
406 UTRACE((void *) -1, 0, NULL
);
410 * We have to install a handler for nmalloc thread teardowns when
411 * the thread is created. We cannot delay this because destructors in
412 * sophisticated userland programs can call malloc() for the first time
413 * during their thread exit.
415 * This routine is called directly from pthreads.
418 _nmalloc_thr_init(void)
423 * Disallow mtmagazine operations until the mtmagazine is
429 _pthread_once(&thread_mags_once
, mtmagazine_init
);
430 _pthread_setspecific(thread_mags_key
, tp
);
435 _nmalloc_thr_prepfork(void)
438 _SPINLOCK(&zone_mag_lock
);
439 _SPINLOCK(&depot_spinlock
);
444 _nmalloc_thr_parentfork(void)
447 _SPINUNLOCK(&depot_spinlock
);
448 _SPINUNLOCK(&zone_mag_lock
);
453 _nmalloc_thr_childfork(void)
456 _SPINUNLOCK(&depot_spinlock
);
457 _SPINUNLOCK(&zone_mag_lock
);
462 * Handle signal reentrancy safely whether we are threaded or not.
463 * This improves the stability for mono and will probably improve
464 * stability for other high-level languages which are becoming increasingly
467 * The sigblockall()/sigunblockall() implementation uses a counter on
468 * a per-thread shared user/kernel page, avoids system calls, and is thus
472 nmalloc_sigblockall(void)
478 nmalloc_sigunblockall(void)
487 slgd_lock(slglobaldata_t slgd
)
490 _SPINLOCK(&slgd
->Spinlock
);
494 slgd_unlock(slglobaldata_t slgd
)
497 _SPINUNLOCK(&slgd
->Spinlock
);
501 depot_lock(magazine_depot
*dp __unused
)
504 _SPINLOCK(&depot_spinlock
);
508 depot_unlock(magazine_depot
*dp __unused
)
511 _SPINUNLOCK(&depot_spinlock
);
515 zone_magazine_lock(void)
518 _SPINLOCK(&zone_mag_lock
);
522 zone_magazine_unlock(void)
525 _SPINUNLOCK(&zone_mag_lock
);
529 swap_mags(magazine_pair
*mp
)
531 struct magazine
*tmp
;
533 mp
->loaded
= mp
->prev
;
538 * bigalloc hashing and locking support.
540 * Return an unmasked hash code for the passed pointer.
543 _bigalloc_hash(void *ptr
)
547 hv
= ((int)(intptr_t)ptr
>> PAGE_SHIFT
) ^
548 ((int)(intptr_t)ptr
>> (PAGE_SHIFT
+ BIGHSHIFT
));
554 * Lock the hash chain and return a pointer to its base for the specified
557 static __inline bigalloc_t
*
558 bigalloc_lock(void *ptr
)
560 int hv
= _bigalloc_hash(ptr
);
563 bigp
= &bigalloc_array
[hv
& BIGHMASK
];
565 _SPINLOCK(&bigspin_array
[hv
& BIGXMASK
]);
570 * Lock the hash chain and return a pointer to its base for the specified
573 * BUT, if the hash chain is empty, just return NULL and do not bother
576 static __inline bigalloc_t
*
577 bigalloc_check_and_lock(void *ptr
)
579 int hv
= _bigalloc_hash(ptr
);
582 bigp
= &bigalloc_array
[hv
& BIGHMASK
];
586 _SPINLOCK(&bigspin_array
[hv
& BIGXMASK
]);
592 bigalloc_unlock(void *ptr
)
597 hv
= _bigalloc_hash(ptr
);
598 _SPINUNLOCK(&bigspin_array
[hv
& BIGXMASK
]);
603 * Find a bigcache entry that might work for the allocation. SMP races are
604 * ok here except for the swap (that is, it is ok if bigcache_size_array[i]
605 * is wrong or if a NULL or too-small big is returned).
607 * Generally speaking it is ok to find a large entry even if the bytes
608 * requested are relatively small (but still oversized), because we really
609 * don't know *what* the application is going to do with the buffer.
613 bigcache_find_alloc(size_t bytes
)
615 bigalloc_t big
= NULL
;
619 for (i
= 0; i
< BIGCACHE
; ++i
) {
620 test
= bigcache_size_array
[i
];
622 bigcache_size_array
[i
] = 0;
623 big
= atomic_swap_ptr(&bigcache_array
[i
], NULL
);
631 * Free a bigcache entry, possibly returning one that the caller really must
632 * free. This is used to cache recent oversized memory blocks. Only
633 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try
634 * to collect the biggest ones we can that are under the limit.
638 bigcache_find_free(bigalloc_t big
)
644 b
= ++bigcache_index
;
645 for (i
= 0; i
< BIGCACHE
; ++i
) {
646 j
= (b
+ i
) & BIGCACHE_MASK
;
647 if (bigcache_size_array
[j
] < big
->bytes
) {
648 bigcache_size_array
[j
] = big
->bytes
;
649 big
= atomic_swap_ptr(&bigcache_array
[j
], big
);
658 handle_excess_big(void)
664 if (excess_alloc
<= BIGCACHE_EXCESS
)
667 for (i
= 0; i
< BIGHSIZE
; ++i
) {
668 bigp
= &bigalloc_array
[i
];
672 _SPINLOCK(&bigspin_array
[i
& BIGXMASK
]);
673 for (big
= *bigp
; big
; big
= big
->next
) {
674 if (big
->active
< big
->bytes
) {
675 MASSERT_WTHUNLK((big
->active
& PAGE_MASK
) == 0,
676 _SPINUNLOCK(&bigspin_array
[i
& BIGXMASK
]));
677 MASSERT_WTHUNLK((big
->bytes
& PAGE_MASK
) == 0,
678 _SPINUNLOCK(&bigspin_array
[i
& BIGXMASK
]));
679 munmap((char *)big
->base
+ big
->active
,
680 big
->bytes
- big
->active
);
681 atomic_add_long(&excess_alloc
,
682 big
->active
- big
->bytes
);
683 big
->bytes
= big
->active
;
687 _SPINUNLOCK(&bigspin_array
[i
& BIGXMASK
]);
692 * Calculate the zone index for the allocation request size and set the
693 * allocation request size to that particular zone's chunk size.
696 zoneindex(size_t *bytes
, size_t *chunking
)
698 size_t n
= (unsigned int)*bytes
; /* unsigned for shift opt */
701 * This used to be 8-byte chunks and 16 zones for n < 128.
702 * However some instructions may require 16-byte alignment
703 * (aka SIMD) and programs might not request an aligned size
704 * (aka GCC-7), so change this as follows:
706 * 0-15 bytes 8-byte alignment in two zones (0-1)
707 * 16-127 bytes 16-byte alignment in four zones (3-10)
708 * zone index 2 and 11-15 are currently unused.
711 *bytes
= n
= (n
+ 7) & ~7;
713 return(n
/ 8 - 1); /* 8 byte chunks, 2 zones */
714 /* zones 0,1, zone 2 is unused */
717 *bytes
= n
= (n
+ 15) & ~15;
719 return(n
/ 16 + 2); /* 16 byte chunks, 8 zones */
720 /* zones 3-10, zones 11-15 unused */
723 *bytes
= n
= (n
+ 15) & ~15;
729 *bytes
= n
= (n
+ 31) & ~31;
734 *bytes
= n
= (n
+ 63) & ~63;
739 *bytes
= n
= (n
+ 127) & ~127;
741 return(n
/ 128 + 31);
744 *bytes
= n
= (n
+ 255) & ~255;
746 return(n
/ 256 + 39);
748 *bytes
= n
= (n
+ 511) & ~511;
750 return(n
/ 512 + 47);
752 #if ZALLOC_ZONE_LIMIT > 8192
754 *bytes
= n
= (n
+ 1023) & ~1023;
756 return(n
/ 1024 + 55);
759 #if ZALLOC_ZONE_LIMIT > 16384
761 *bytes
= n
= (n
+ 2047) & ~2047;
763 return(n
/ 2048 + 63);
766 _mpanic("Unexpected byte count %zu", n
);
771 * We want large magazines for small allocations
778 cap
= (NZONES
- zi
) * (M_MAX_ROUNDS
- M_MIN_ROUNDS
) / NZONES
+
785 * malloc() - call internal slab allocator
788 __malloc(size_t size
)
792 nmalloc_sigblockall();
793 ptr
= _slaballoc(size
, 0);
797 UTRACE(0, size
, ptr
);
798 nmalloc_sigunblockall();
803 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
806 * calloc() - call internal slab allocator
809 __calloc(size_t number
, size_t size
)
813 if ((number
>= MUL_NO_OVERFLOW
|| size
>= MUL_NO_OVERFLOW
) &&
814 number
> 0 && SIZE_MAX
/ number
< size
) {
819 nmalloc_sigblockall();
820 ptr
= _slaballoc(number
* size
, SAFLAG_ZERO
);
824 UTRACE(0, number
* size
, ptr
);
825 nmalloc_sigunblockall();
831 * realloc() (SLAB ALLOCATOR)
833 * We do not attempt to optimize this routine beyond reusing the same
834 * pointer if the new size fits within the chunking of the old pointer's
838 __realloc(void *ptr
, size_t size
)
842 nmalloc_sigblockall();
843 ret
= _slabrealloc(ptr
, size
);
847 UTRACE(ptr
, size
, ret
);
848 nmalloc_sigunblockall();
856 * Allocate (size) bytes with a alignment of (alignment).
859 __aligned_alloc(size_t alignment
, size_t size
)
864 nmalloc_sigblockall();
866 rc
= _slabmemalign(&ptr
, alignment
, size
);
869 nmalloc_sigunblockall();
877 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
878 * is a power of 2 >= sizeof(void *).
881 __posix_memalign(void **memptr
, size_t alignment
, size_t size
)
886 * OpenGroup spec issue 6 check
888 if (alignment
< sizeof(void *)) {
893 nmalloc_sigblockall();
894 rc
= _slabmemalign(memptr
, alignment
, size
);
895 nmalloc_sigunblockall();
901 * The slab allocator will allocate on power-of-2 boundaries up to
902 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
903 * zone matching the requirements, and _vmem_alloc() otherwise.
906 _slabmemalign(void **memptr
, size_t alignment
, size_t size
)
919 * OpenGroup spec issue 6 checks
921 if ((alignment
| (alignment
- 1)) + 1 != (alignment
<< 1)) {
927 * Our zone mechanism guarantees same-sized alignment for any
928 * power-of-2 allocation. If size is a power-of-2 and reasonable
929 * we can just call _slaballoc() and be done. We round size up
930 * to the nearest alignment boundary to improve our odds of
931 * it becoming a power-of-2 if it wasn't before.
933 if (size
<= alignment
)
936 size
= (size
+ alignment
- 1) & ~(size_t)(alignment
- 1);
939 * If we have overflowed above when rounding to the nearest alignment
940 * boundary, just return ENOMEM, size should be == N * sizeof(void *).
942 * Power-of-2 allocations up to 8KB will be aligned to the allocation
943 * size and _slaballoc() can simply be used. Please see line 1082
944 * for this special case: 'Align the storage in the zone based on
945 * the chunking' has a special case for powers of 2.
950 if (size
<= MAX_SLAB_PAGEALIGN
&&
951 (size
| (size
- 1)) + 1 == (size
<< 1)) {
952 *memptr
= _slaballoc(size
, 0);
953 return(*memptr
? 0 : ENOMEM
);
957 * Otherwise locate a zone with a chunking that matches
958 * the requested alignment, within reason. Consider two cases:
960 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
961 * we find will be the best fit because the chunking will be
962 * greater or equal to the alignment.
964 * (2) A 513 allocation on a 256-byte alignment. In this case
965 * the first zoneindex we find will be for 576 byte allocations
966 * with a chunking of 64, which is not sufficient. To fix this
967 * we simply find the nearest power-of-2 >= size and use the
968 * same side-effect of _slaballoc() which guarantees
969 * same-alignment on a power-of-2 allocation.
971 if (size
< PAGE_SIZE
) {
972 zi
= zoneindex(&size
, &chunking
);
973 if (chunking
>= alignment
) {
974 *memptr
= _slaballoc(size
, 0);
975 return(*memptr
? 0 : ENOMEM
);
981 while (alignment
< size
)
983 *memptr
= _slaballoc(alignment
, 0);
984 return(*memptr
? 0 : ENOMEM
);
988 * If the slab allocator cannot handle it use vmem_alloc().
990 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
992 if (alignment
< PAGE_SIZE
)
993 alignment
= PAGE_SIZE
;
994 if (size
< alignment
)
996 size
= (size
+ PAGE_MASK
) & ~(size_t)PAGE_MASK
;
997 if (alignment
== PAGE_SIZE
&& size
<= BIGCACHE_LIMIT
) {
998 big
= bigcache_find_alloc(size
);
999 if (big
&& big
->bytes
< size
) {
1000 _slabfree(big
->base
, FASTSLABREALLOC
, &big
);
1004 *memptr
= big
->base
;
1006 if (big
->active
< big
->bytes
) {
1007 atomic_add_long(&excess_alloc
,
1008 big
->bytes
- big
->active
);
1010 bigp
= bigalloc_lock(*memptr
);
1013 bigalloc_unlock(*memptr
);
1014 handle_excess_big();
1018 *memptr
= _vmem_alloc(size
, alignment
, 0);
1019 if (*memptr
== NULL
)
1022 big
= _slaballoc(sizeof(struct bigalloc
), 0);
1024 _vmem_free(*memptr
, size
);
1028 bigp
= bigalloc_lock(*memptr
);
1029 big
->base
= *memptr
;
1031 big
->bytes
= size
; /* no excess */
1034 bigalloc_unlock(*memptr
);
1040 * free() (SLAB ALLOCATOR) - do the obvious
1047 nmalloc_sigblockall();
1048 _slabfree(ptr
, 0, NULL
);
1049 nmalloc_sigunblockall();
1053 * _slaballoc() (SLAB ALLOCATOR)
1055 * Allocate memory via the slab allocator. If the request is too large,
1056 * or if it page-aligned beyond a certain size, we fall back to the
1060 _slaballoc(size_t size
, int flags
)
1064 slglobaldata_t slgd
;
1067 struct magazine
*mp
;
1074 * Handle the degenerate size == 0 case. Yes, this does happen.
1075 * Return a special pointer. This is to maintain compatibility with
1076 * the original malloc implementation. Certain devices, such as the
1077 * adaptec driver, not only allocate 0 bytes, they check for NULL and
1078 * also realloc() later on. Joy.
1083 /* Capture global flags */
1084 flags
|= g_malloc_flags
;
1087 * Handle large allocations directly, with a separate bigmem cache.
1089 * The backend allocator is pretty nasty on a SMP system. Use the
1090 * slab allocator for one and two page-sized chunks even though we
1091 * lose some efficiency.
1093 * NOTE: Please see _slabmemalign(), which assumes that power-of-2
1094 * allocations up to an including MAX_SLAB_PAGEALIGN
1095 * can use _slaballoc() and be aligned to the same. The
1096 * zone cache can be used for this case, bigalloc does not
1099 if (size
>= ZoneLimit
||
1100 ((size
& PAGE_MASK
) == 0 && size
> MAX_SLAB_PAGEALIGN
)) {
1105 * Page-align and cache-color in case of virtually indexed
1106 * physically tagged L1 caches (aka SandyBridge). No sweat
1107 * otherwise, so just do it.
1109 * (don't count as excess).
1111 size
= (size
+ PAGE_MASK
) & ~(size_t)PAGE_MASK
;
1114 * If we have overflowed above when rounding to the page
1115 * boundary, something has passed us (size_t)[-PAGE_MASK..-1]
1116 * so just return NULL, size at this point should be >= 0.
1122 * Force an additional page offset for 8KB-aligned requests
1123 * (i.e. 8KB, 16KB, etc) that helps spread data across the
1124 * CPU caches at the cost of some dead space in the memory
1127 if ((size
& (PAGE_SIZE
* 2 - 1)) == 0)
1131 * Try to reuse a cached big block to avoid mmap'ing. If it
1132 * turns out not to fit our requirements we throw it away
1133 * and allocate normally.
1136 if (size
<= BIGCACHE_LIMIT
) {
1137 big
= bigcache_find_alloc(size
);
1138 if (big
&& big
->bytes
< size
) {
1139 _slabfree(big
->base
, FASTSLABREALLOC
, &big
);
1145 if (flags
& SAFLAG_ZERO
)
1148 chunk
= _vmem_alloc(size
, PAGE_SIZE
, flags
);
1152 big
= _slaballoc(sizeof(struct bigalloc
), 0);
1154 _vmem_free(chunk
, size
);
1162 bigp
= bigalloc_lock(chunk
);
1163 if (big
->active
< big
->bytes
) {
1164 atomic_add_long(&excess_alloc
,
1165 big
->bytes
- big
->active
);
1169 bigalloc_unlock(chunk
);
1170 handle_excess_big();
1175 /* Compute allocation zone; zoneindex will panic on excessive sizes */
1176 zi
= zoneindex(&size
, &chunking
);
1177 MASSERT(zi
< NZONES
);
1179 obj
= mtmagazine_alloc(zi
, flags
);
1181 if (flags
& SAFLAG_ZERO
)
1187 * Attempt to allocate out of an existing global zone. If all zones
1188 * are exhausted pull one off the free list or allocate a new one.
1190 slgd
= &SLGlobalData
;
1193 if (slgd
->ZoneAry
[zi
] == NULL
) {
1194 z
= zone_alloc(flags
);
1199 * How big is the base structure?
1201 off
= sizeof(struct slzone
);
1204 * Align the storage in the zone based on the chunking.
1206 * Guarantee power-of-2 alignment for power-of-2-sized
1207 * chunks. Otherwise align based on the chunking size
1208 * (typically 8 or 16 bytes for small allocations).
1210 * NOTE: Allocations >= ZoneLimit are governed by the
1211 * bigalloc code and typically only guarantee page-alignment.
1213 * Set initial conditions for UIndex near the zone header
1214 * to reduce unecessary page faults, vs semi-randomization
1215 * to improve L1 cache saturation.
1217 * NOTE: Please see _slabmemalign(), which assumes that
1218 * power-of-2 allocations up to an including
1219 * MAX_SLAB_PAGEALIGN can use _slaballoc()
1220 * and be aligned to the same. The zone cache can be
1221 * used for this case, bigalloc does not have to be
1224 * ALL power-of-2 requests that fall through to this
1225 * code use this rule (conditionals above limit this
1226 * to <= MAX_SLAB_PAGEALIGN).
1228 if ((size
| (size
- 1)) + 1 == (size
<< 1))
1229 off
= roundup2(off
, size
);
1231 off
= roundup2(off
, chunking
);
1232 z
->z_Magic
= ZALLOC_SLAB_MAGIC
;
1233 z
->z_ZoneIndex
= zi
;
1234 z
->z_NMax
= (ZoneSize
- off
) / size
;
1235 z
->z_NFree
= z
->z_NMax
;
1236 z
->z_BasePtr
= (char *)z
+ off
;
1237 z
->z_UIndex
= z
->z_UEndIndex
= 0;
1238 z
->z_ChunkSize
= size
;
1239 z
->z_FirstFreePg
= ZonePageCount
;
1240 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
1241 flags
&= ~SAFLAG_ZERO
; /* already zero'd */
1242 flags
|= SAFLAG_PASSIVE
;
1246 * Slide the base index for initial allocations out of the
1247 * next zone we create so we do not over-weight the lower
1248 * part of the cpu memory caches.
1251 z
->z_Next
= slgd
->ZoneAry
[zi
];
1252 slgd
->ZoneAry
[zi
] = z
;
1255 z
= slgd
->ZoneAry
[zi
];
1263 * Ok, we have a zone from which at least one chunk is available.
1265 MASSERT_WTHUNLK(z
->z_NFree
> 0, slgd_unlock(slgd
));
1268 * Try to cache <count> chunks, up to CACHE_CHUNKS (32 typ)
1269 * to avoid unnecessary global lock contention.
1272 mp
= tp
->mags
[zi
].loaded
;
1274 if (mp
&& tp
->init
>= 0) {
1275 count
= mp
->capacity
- mp
->rounds
;
1276 if (count
>= z
->z_NFree
)
1277 count
= z
->z_NFree
- 1;
1278 if (count
> CACHE_CHUNKS
)
1279 count
= CACHE_CHUNKS
;
1283 * Locate a chunk in a free page. This attempts to localize
1284 * reallocations into earlier pages without us having to sort
1285 * the chunk list. A chunk may still overlap a page boundary.
1287 while (z
->z_FirstFreePg
< ZonePageCount
) {
1288 if ((chunk
= z
->z_PageAry
[z
->z_FirstFreePg
]) != NULL
) {
1289 if (((uintptr_t)chunk
& ZoneMask
) == 0) {
1291 _mpanic("assertion: corrupt malloc zone");
1293 z
->z_PageAry
[z
->z_FirstFreePg
] = chunk
->c_Next
;
1298 mp
->objects
[mp
->rounds
++] = chunk
;
1306 * No chunks are available but NFree said we had some memory,
1307 * so it must be available in the never-before-used-memory
1308 * area governed by UIndex. The consequences are very
1309 * serious if our zone got corrupted so we use an explicit
1310 * panic rather then a KASSERT.
1313 chunk
= (slchunk_t
)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
1315 if (++z
->z_UIndex
== z
->z_NMax
)
1317 if (z
->z_UIndex
== z
->z_UEndIndex
) {
1318 if (z
->z_NFree
!= 0) {
1320 _mpanic("slaballoc: corrupted zone");
1325 mp
->objects
[mp
->rounds
++] = chunk
;
1329 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
1330 flags
&= ~SAFLAG_ZERO
;
1331 flags
|= SAFLAG_PASSIVE
;
1336 * Remove us from the ZoneAry[] when we become empty
1338 if (z
->z_NFree
== 0) {
1339 slgd
->ZoneAry
[zi
] = z
->z_Next
;
1343 if (flags
& SAFLAG_ZERO
)
1352 * Reallocate memory within the chunk
1355 _slabrealloc(void *ptr
, size_t size
)
1363 return(_slaballoc(size
, 0));
1370 * Handle oversized allocations.
1372 if ((bigp
= bigalloc_check_and_lock(ptr
)) != NULL
) {
1376 while ((big
= *bigp
) != NULL
) {
1377 if (big
->base
== ptr
) {
1378 size
= (size
+ PAGE_MASK
) & ~(size_t)PAGE_MASK
;
1379 bigbytes
= big
->bytes
;
1382 * If it already fits determine if it makes
1383 * sense to shrink/reallocate. Try to optimize
1384 * programs which stupidly make incremental
1385 * reallocations larger or smaller by scaling
1386 * the allocation. Also deal with potential
1389 if (size
>= (bigbytes
>> 1) &&
1391 if (big
->active
!= size
) {
1392 atomic_add_long(&excess_alloc
,
1397 bigalloc_unlock(ptr
);
1402 * For large reallocations, allocate more space
1403 * than we need to try to avoid excessive
1404 * reallocations later on.
1406 chunking
= size
+ (size
>> 3);
1407 chunking
= (chunking
+ PAGE_MASK
) &
1411 * Try to allocate adjacently in case the
1412 * program is idiotically realloc()ing a
1413 * huge memory block just slightly bigger.
1414 * (llvm's llc tends to do this a lot).
1416 * (MAP_TRYFIXED forces mmap to fail if there
1417 * is already something at the address).
1419 if (chunking
> bigbytes
) {
1421 int errno_save
= errno
;
1423 addr
= mmap((char *)ptr
+ bigbytes
,
1424 chunking
- bigbytes
,
1425 PROT_READ
|PROT_WRITE
,
1426 MAP_PRIVATE
|MAP_ANON
|
1430 if (addr
== (char *)ptr
+ bigbytes
) {
1431 atomic_add_long(&excess_alloc
,
1436 big
->bytes
= chunking
;
1438 bigalloc_unlock(ptr
);
1443 (void *)addr
== MAP_FAILED
,
1444 bigalloc_unlock(ptr
));
1448 * Failed, unlink big and allocate fresh.
1449 * (note that we have to leave (big) intact
1450 * in case the slaballoc fails).
1453 bigalloc_unlock(ptr
);
1454 if ((nptr
= _slaballoc(size
, 0)) == NULL
) {
1456 bigp
= bigalloc_lock(ptr
);
1459 bigalloc_unlock(ptr
);
1462 if (size
> bigbytes
)
1464 bcopy(ptr
, nptr
, size
);
1465 atomic_add_long(&excess_alloc
, big
->active
-
1467 _slabfree(ptr
, FASTSLABREALLOC
, &big
);
1473 bigalloc_unlock(ptr
);
1474 handle_excess_big();
1478 * Get the original allocation's zone. If the new request winds
1479 * up using the same chunk size we do not have to do anything.
1481 * NOTE: We don't have to lock the globaldata here, the fields we
1482 * access here will not change at least as long as we have control
1483 * over the allocation.
1485 z
= (slzone_t
)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
1486 MASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
1489 * Use zoneindex() to chunk-align the new size, as long as the
1490 * new size is not too large.
1492 if (size
< ZoneLimit
) {
1493 zoneindex(&size
, &chunking
);
1494 if (z
->z_ChunkSize
== size
) {
1500 * Allocate memory for the new request size and copy as appropriate.
1502 if ((nptr
= _slaballoc(size
, 0)) != NULL
) {
1503 if (size
> z
->z_ChunkSize
)
1504 size
= z
->z_ChunkSize
;
1505 bcopy(ptr
, nptr
, size
);
1506 _slabfree(ptr
, 0, NULL
);
1513 * free (SLAB ALLOCATOR)
1515 * Free a memory block previously allocated by malloc. Note that we do not
1516 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1517 * checking memory limits in malloc.
1520 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1526 _slabfree(void *ptr
, int flags
, bigalloc_t
*rbigp
)
1532 slglobaldata_t slgd
;
1537 /* Fast realloc path for big allocations */
1538 if (flags
& FASTSLABREALLOC
) {
1540 goto fastslabrealloc
;
1544 * Handle NULL frees and special 0-byte allocations
1550 * Handle oversized allocations.
1552 if ((bigp
= bigalloc_check_and_lock(ptr
)) != NULL
) {
1553 while ((big
= *bigp
) != NULL
) {
1554 if (big
->base
== ptr
) {
1556 atomic_add_long(&excess_alloc
, big
->active
-
1558 bigalloc_unlock(ptr
);
1561 * Try to stash the block we are freeing,
1562 * potentially receiving another block in
1563 * return which must be freed.
1566 if (big
->bytes
<= BIGCACHE_LIMIT
) {
1567 big
= bigcache_find_free(big
);
1571 ptr
= big
->base
; /* reload */
1573 _slabfree(big
, 0, NULL
);
1574 _vmem_free(ptr
, size
);
1579 bigalloc_unlock(ptr
);
1580 handle_excess_big();
1584 * Zone case. Figure out the zone based on the fact that it is
1587 z
= (slzone_t
)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
1588 MASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
1590 size
= z
->z_ChunkSize
;
1591 zi
= z
->z_ZoneIndex
;
1593 if (g_malloc_flags
& SAFLAG_ZERO
)
1596 if (mtmagazine_free(zi
, ptr
) == 0)
1599 pgno
= ((char *)ptr
- (char *)z
) >> PAGE_SHIFT
;
1603 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1606 slgd
= &SLGlobalData
;
1609 chunk
->c_Next
= z
->z_PageAry
[pgno
];
1610 z
->z_PageAry
[pgno
] = chunk
;
1611 if (z
->z_FirstFreePg
> pgno
)
1612 z
->z_FirstFreePg
= pgno
;
1615 * Bump the number of free chunks. If it becomes non-zero the zone
1616 * must be added back onto the appropriate list.
1618 if (z
->z_NFree
++ == 0) {
1619 z
->z_Next
= slgd
->ZoneAry
[z
->z_ZoneIndex
];
1620 slgd
->ZoneAry
[z
->z_ZoneIndex
] = z
;
1624 * If the zone becomes totally free we get rid of it.
1626 if (z
->z_NFree
== z
->z_NMax
) {
1629 pz
= &slgd
->ZoneAry
[z
->z_ZoneIndex
];
1631 pz
= &(*pz
)->z_Next
;
1643 * Allocate and return a magazine. Return NULL if no magazines are
1646 static __inline
void *
1647 magazine_alloc(struct magazine
*mp
)
1651 if (mp
&& MAGAZINE_NOTEMPTY(mp
)) {
1652 obj
= mp
->objects
[--mp
->rounds
];
1660 magazine_free(struct magazine
*mp
, void *p
)
1662 if (mp
!= NULL
&& MAGAZINE_NOTFULL(mp
)) {
1663 mp
->objects
[mp
->rounds
++] = p
;
1671 mtmagazine_alloc(int zi
, int flags
)
1674 struct magazine
*mp
, *emptymag
;
1679 * Do not try to access per-thread magazines while the mtmagazine
1680 * is being initialized or destroyed.
1687 * Primary per-thread allocation loop
1691 * Make sure we have a magazine available for use.
1693 if (tp
->newmag
== NULL
&& (flags
& SAFLAG_MAGS
) == 0) {
1694 mp
= _slaballoc(sizeof(struct magazine
),
1695 SAFLAG_ZERO
| SAFLAG_MAGS
);
1701 _slabfree(mp
, 0, NULL
);
1708 * If the loaded magazine has rounds, allocate and return
1710 mp
= tp
->mags
[zi
].loaded
;
1711 obj
= magazine_alloc(mp
);
1716 * The prev magazine can only be completely empty or completely
1717 * full. If it is full, swap it with the loaded magazine
1720 mp
= tp
->mags
[zi
].prev
;
1721 if (mp
&& MAGAZINE_FULL(mp
)) {
1722 MASSERT(mp
->rounds
!= 0);
1723 swap_mags(&tp
->mags
[zi
]); /* prev now empty */
1728 * If the depot has no loaded magazines ensure that tp->loaded
1729 * is not NULL and return NULL. This will allow _slaballoc()
1730 * to cache referals to SLGlobalData in a magazine.
1733 if (SLIST_EMPTY(&d
->full
)) { /* UNLOCKED TEST IS SAFE */
1734 mp
= tp
->mags
[zi
].loaded
;
1735 if (mp
== NULL
&& tp
->newmag
) {
1738 mp
->capacity
= zonecapacity(zi
);
1741 tp
->mags
[zi
].loaded
= mp
;
1747 * Cycle: depot(loaded) -> loaded -> prev -> depot(empty)
1749 * If we race and the depot has no full magazines, retry.
1752 mp
= SLIST_FIRST(&d
->full
);
1754 SLIST_REMOVE_HEAD(&d
->full
, nextmagazine
);
1755 emptymag
= tp
->mags
[zi
].prev
;
1757 SLIST_INSERT_HEAD(&d
->empty
, emptymag
,
1760 tp
->mags
[zi
].prev
= tp
->mags
[zi
].loaded
;
1761 tp
->mags
[zi
].loaded
= mp
;
1762 MASSERT(MAGAZINE_NOTEMPTY(mp
));
1772 mtmagazine_free(int zi
, void *ptr
)
1775 struct magazine
*mp
, *loadedmag
;
1780 * Do not try to access per-thread magazines while the mtmagazine
1781 * is being initialized or destroyed.
1788 * Primary per-thread freeing loop
1792 * Make sure a new magazine is available in case we have
1793 * to use it. Staging the newmag allows us to avoid
1794 * some locking/reentrancy complexity.
1796 * Temporarily disable the per-thread caches for this
1797 * allocation to avoid reentrancy and/or to avoid a
1798 * stack overflow if the [zi] happens to be the same that
1799 * would be used to allocate the new magazine.
1801 * WARNING! Calling _slaballoc() can indirectly modify
1804 if (tp
->newmag
== NULL
) {
1805 mp
= _slaballoc(sizeof(struct magazine
),
1806 SAFLAG_ZERO
| SAFLAG_MAGS
);
1807 if (tp
->newmag
&& mp
)
1808 _slabfree(mp
, 0, NULL
);
1811 if (tp
->newmag
== NULL
) {
1818 * If the loaded magazine has space, free directly to it
1820 rc
= magazine_free(tp
->mags
[zi
].loaded
, ptr
);
1825 * The prev magazine can only be completely empty or completely
1826 * full. If it is empty, swap it with the loaded magazine
1829 mp
= tp
->mags
[zi
].prev
;
1830 if (mp
&& MAGAZINE_EMPTY(mp
)) {
1831 MASSERT(mp
->rounds
== 0);
1832 swap_mags(&tp
->mags
[zi
]); /* prev now full */
1837 * Try to get an empty magazine from the depot. Cycle
1838 * through depot(empty)->loaded->prev->depot(full).
1839 * Retry if an empty magazine was available from the depot.
1844 if ((loadedmag
= tp
->mags
[zi
].prev
) != NULL
)
1845 SLIST_INSERT_HEAD(&d
->full
, loadedmag
, nextmagazine
);
1846 tp
->mags
[zi
].prev
= tp
->mags
[zi
].loaded
;
1847 mp
= SLIST_FIRST(&d
->empty
);
1849 tp
->mags
[zi
].loaded
= mp
;
1850 SLIST_REMOVE_HEAD(&d
->empty
, nextmagazine
);
1852 MASSERT(MAGAZINE_NOTFULL(mp
));
1856 mp
->capacity
= zonecapacity(zi
);
1859 tp
->mags
[zi
].loaded
= mp
;
1868 mtmagazine_init(void)
1872 error
= _pthread_key_create(&thread_mags_key
, mtmagazine_destructor
);
1878 * This function is only used by the thread exit destructor
1881 mtmagazine_drain(struct magazine
*mp
)
1885 nmalloc_sigblockall();
1886 while (MAGAZINE_NOTEMPTY(mp
)) {
1887 obj
= magazine_alloc(mp
);
1888 _slabfree(obj
, 0, NULL
);
1890 nmalloc_sigunblockall();
1894 * mtmagazine_destructor()
1896 * When a thread exits, we reclaim all its resources; all its magazines are
1897 * drained and the structures are freed.
1899 * WARNING! The destructor can be called multiple times if the larger user
1900 * program has its own destructors which run after ours which
1901 * allocate or free memory.
1904 mtmagazine_destructor(void *thrp
)
1906 thr_mags
*tp
= thrp
;
1907 struct magazine
*mp
;
1914 * Prevent further use of mtmagazines while we are destructing
1915 * them, as well as for any destructors which are run after us
1916 * prior to the thread actually being destroyed.
1920 nmalloc_sigblockall();
1921 for (i
= 0; i
< NZONES
; i
++) {
1922 mp
= tp
->mags
[i
].loaded
;
1923 tp
->mags
[i
].loaded
= NULL
;
1925 if (MAGAZINE_NOTEMPTY(mp
))
1926 mtmagazine_drain(mp
);
1927 _slabfree(mp
, 0, NULL
);
1930 mp
= tp
->mags
[i
].prev
;
1931 tp
->mags
[i
].prev
= NULL
;
1933 if (MAGAZINE_NOTEMPTY(mp
))
1934 mtmagazine_drain(mp
);
1935 _slabfree(mp
, 0, NULL
);
1941 _slabfree(mp
, 0, NULL
);
1943 nmalloc_sigunblockall();
1949 * Attempt to allocate a zone from the zone magazine.
1952 zone_alloc(int flags
)
1956 zone_magazine_lock();
1958 z
= magazine_alloc(&zone_magazine
);
1960 zone_magazine_unlock();
1961 z
= _vmem_alloc(ZoneSize
, ZoneSize
, flags
);
1963 z
->z_Flags
|= SLZF_UNOTZEROD
;
1964 zone_magazine_unlock();
1975 void *excess
[M_ZONE_HYSTERESIS
];
1978 zone_magazine_lock();
1980 bzero(z
, sizeof(struct slzone
));
1983 madvise(z
, ZoneSize
, MADV_FREE
);
1985 i
= magazine_free(&zone_magazine
, z
);
1988 * If we failed to free, collect excess magazines; release the zone
1989 * magazine lock, and then free to the system via _vmem_free. Re-enable
1990 * BURST mode for the magazine.
1993 for (i
= 0; i
< M_ZONE_HYSTERESIS
; ++i
) {
1994 excess
[i
] = magazine_alloc(&zone_magazine
);
1995 MASSERT_WTHUNLK(excess
[i
] != NULL
,
1996 zone_magazine_unlock());
1998 zone_magazine_unlock();
2000 for (i
= 0; i
< M_ZONE_HYSTERESIS
; ++i
)
2001 _vmem_free(excess
[i
], ZoneSize
);
2002 _vmem_free(z
, ZoneSize
);
2004 zone_magazine_unlock();
2011 * Directly map memory in PAGE_SIZE'd chunks with the specified
2014 * Alignment must be a multiple of PAGE_SIZE.
2016 * Size must be >= alignment.
2019 _vmem_alloc(size_t size
, size_t align
, int flags
)
2021 static char *addr_hint
;
2022 static int reset_hint
= 16;
2026 if (--reset_hint
<= 0) {
2032 * Map anonymous private memory.
2034 save
= mmap(addr_hint
, size
, PROT_READ
|PROT_WRITE
,
2035 MAP_PRIVATE
|MAP_ANON
, -1, 0);
2036 if (save
== MAP_FAILED
)
2038 if (((uintptr_t)save
& (align
- 1)) == 0)
2039 return((void *)save
);
2041 addr_hint
= (char *)(((size_t)save
+ (align
- 1)) & ~(align
- 1));
2044 save
= mmap(addr_hint
, size
, PROT_READ
|PROT_WRITE
,
2045 MAP_PRIVATE
|MAP_ANON
, -1, 0);
2046 if (save
== MAP_FAILED
)
2048 if (((size_t)save
& (align
- 1)) == 0)
2049 return((void *)save
);
2053 save
= mmap(NULL
, size
+ align
, PROT_READ
|PROT_WRITE
,
2054 MAP_PRIVATE
|MAP_ANON
, -1, 0);
2055 if (save
== MAP_FAILED
)
2058 addr
= (char *)(((size_t)save
+ (align
- 1)) & ~(align
- 1));
2060 munmap(save
, addr
- save
);
2061 if (addr
+ size
!= save
+ size
+ align
)
2062 munmap(addr
+ size
, save
+ align
- addr
);
2064 addr_hint
= addr
+ size
;
2066 return ((void *)addr
);
2072 * Free a chunk of memory allocated with _vmem_alloc()
2075 _vmem_free(void *ptr
, size_t size
)
2081 * Panic on fatal conditions
2084 _mpanic(const char *ctl
, ...)
2088 if (malloc_panic
== 0) {
2091 vfprintf(stderr
, ctl
, va
);
2092 fprintf(stderr
, "\n");
2099 __weak_reference(__aligned_alloc
, aligned_alloc
);
2100 __weak_reference(__malloc
, malloc
);
2101 __weak_reference(__calloc
, calloc
);
2102 __weak_reference(__posix_memalign
, posix_memalign
);
2103 __weak_reference(__realloc
, realloc
);
2104 __weak_reference(__free
, free
);