ncurses: Move in panel handling to Makefile.sources.
[dragonfly.git] / lib / libc / stdlib / nmalloc.c
blob59b078985096e577dcd0d74d6bc0edb72006b37a
1 /*
2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and by
8 * Venkatesh Srinivas <me@endeavour.zapto.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
40 * This module implements a slab allocator drop-in replacement for the
41 * libc malloc().
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantaneous, and overhead losses are limited to a fixed
46 * worst-case amount.
48 * The slab allocator does not have to pre-initialize the list of
49 * free chunks for each zone, and the underlying VM will not be
50 * touched at all beyond the zone header until an actual allocation
51 * needs it.
53 * Slab management and locking is done on a per-zone basis.
55 * Alloc Size Chunking Number of zones
56 * 0-127 8 16
57 * 128-255 16 8
58 * 256-511 32 8
59 * 512-1023 64 8
60 * 1024-2047 128 8
61 * 2048-4095 256 8
62 * 4096-8191 512 8
63 * 8192-16383 1024 8
64 * 16384-32767 2048 8
66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67 * is used to locate for free. One and Two-page allocations use the
68 * zone mechanic to avoid excessive mmap()/munmap() calls.
70 * API FEATURES AND SIDE EFFECTS
72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73 * Above that power-of-2 sized allocations are page-aligned. Non
74 * power-of-2 sized allocations are aligned the same as the chunk
75 * size for their zone.
76 * + malloc(0) returns a special non-NULL value
77 * + ability to allocate arbitrarily large chunks of memory
78 * + realloc will reuse the passed pointer if possible, within the
79 * limitations of the zone chunking.
81 * Multithreaded enhancements for small allocations introduced August 2010.
82 * These are in the spirit of 'libumem'. See:
83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85 * USENIX Technical Conference. USENIX Association.
87 * Oversized allocations employ the BIGCACHE mechanic whereby large
88 * allocations may be handed significantly larger buffers, allowing them
89 * to avoid mmap/munmap operations even through significant realloc()s.
90 * The excess space is only trimmed if too many large allocations have been
91 * given this treatment.
93 * TUNING
95 * The value of the environment variable MALLOC_OPTIONS is a character string
96 * containing various flags to tune nmalloc.
98 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
99 * This will generate utrace events for all malloc,
100 * realloc, and free calls. There are tools (mtrplay) to
101 * replay and allocation pattern or to graph heap structure
102 * (mtrgraph) which can interpret these logs.
103 * 'Z' / ['z'] Zero out / do not zero all allocations.
104 * Each new byte of memory allocated by malloc, realloc, or
105 * reallocf will be initialized to 0. This is intended for
106 * debugging and will affect performance negatively.
107 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
108 * allocation functions.
111 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
113 #include "libc_private.h"
115 #include <sys/param.h>
116 #include <sys/types.h>
117 #include <sys/mman.h>
118 #include <sys/queue.h>
119 #include <sys/uio.h>
120 #include <sys/ktrace.h>
121 #include <stdio.h>
122 #include <stdint.h>
123 #include <stdlib.h>
124 #include <stdarg.h>
125 #include <stddef.h>
126 #include <unistd.h>
127 #include <string.h>
128 #include <fcntl.h>
129 #include <errno.h>
130 #include <pthread.h>
131 #include <machine/atomic.h>
133 #include "spinlock.h"
134 #include "un-namespace.h"
138 * Linked list of large allocations
140 typedef struct bigalloc {
141 struct bigalloc *next; /* hash link */
142 void *base; /* base pointer */
143 u_long active; /* bytes active */
144 u_long bytes; /* bytes allocated */
145 } *bigalloc_t;
148 * Note that any allocations which are exact multiples of PAGE_SIZE, or
149 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
151 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
152 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
153 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
154 #define ZALLOC_ZONE_SIZE (64 * 1024)
155 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
156 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */
158 #if ZALLOC_ZONE_LIMIT == 16384
159 #define NZONES 72
160 #elif ZALLOC_ZONE_LIMIT == 32768
161 #define NZONES 80
162 #else
163 #error "I couldn't figure out NZONES"
164 #endif
167 * Chunk structure for free elements
169 typedef struct slchunk {
170 struct slchunk *c_Next;
171 } *slchunk_t;
174 * The IN-BAND zone header is placed at the beginning of each zone.
176 struct slglobaldata;
178 typedef struct slzone {
179 int32_t z_Magic; /* magic number for sanity check */
180 int z_NFree; /* total free chunks / ualloc space */
181 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
182 int z_NMax; /* maximum free chunks */
183 char *z_BasePtr; /* pointer to start of chunk array */
184 int z_UIndex; /* current initial allocation index */
185 int z_UEndIndex; /* last (first) allocation index */
186 int z_ChunkSize; /* chunk size for validation */
187 int z_FirstFreePg; /* chunk list on a page-by-page basis */
188 int z_ZoneIndex;
189 int z_Flags;
190 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
191 } *slzone_t;
193 typedef struct slglobaldata {
194 spinlock_t Spinlock;
195 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
196 int JunkIndex;
197 } *slglobaldata_t;
199 #define SLZF_UNOTZEROD 0x0001
201 #define FASTSLABREALLOC 0x02
204 * Misc constants. Note that allocations that are exact multiples of
205 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
206 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
208 #define MIN_CHUNK_SIZE 8 /* in bytes */
209 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
210 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
213 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
214 * not be larger then 64.
216 #define BIGHSHIFT 10 /* bigalloc hash table */
217 #define BIGHSIZE (1 << BIGHSHIFT)
218 #define BIGHMASK (BIGHSIZE - 1)
219 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
220 #define BIGXMASK (BIGXSIZE - 1)
223 * BIGCACHE caches oversized allocations. Note that a linear search is
224 * performed, so do not make the cache too large.
226 * BIGCACHE will garbage-collect excess space when the excess exceeds the
227 * specified value. A relatively large number should be used here because
228 * garbage collection is expensive.
230 #define BIGCACHE 16
231 #define BIGCACHE_MASK (BIGCACHE - 1)
232 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */
233 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */
235 #define SAFLAG_ZERO 0x0001
236 #define SAFLAG_PASSIVE 0x0002
239 * Thread control
242 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
244 #define MASSERT(exp) do { if (__predict_false(!(exp))) \
245 _mpanic("assertion: %s in %s", \
246 #exp, __func__); \
247 } while (0)
250 * Magazines
253 #define M_MAX_ROUNDS 64
254 #define M_ZONE_ROUNDS 64
255 #define M_LOW_ROUNDS 32
256 #define M_INIT_ROUNDS 8
257 #define M_BURST_FACTOR 8
258 #define M_BURST_NSCALE 2
260 #define M_BURST 0x0001
261 #define M_BURST_EARLY 0x0002
263 struct magazine {
264 SLIST_ENTRY(magazine) nextmagazine;
266 int flags;
267 int capacity; /* Max rounds in this magazine */
268 int rounds; /* Current number of free rounds */
269 int burst_factor; /* Number of blocks to prefill with */
270 int low_factor; /* Free till low_factor from full mag */
271 void *objects[M_MAX_ROUNDS];
274 SLIST_HEAD(magazinelist, magazine);
276 static spinlock_t zone_mag_lock;
277 static spinlock_t depot_spinlock;
278 static struct magazine zone_magazine = {
279 .flags = M_BURST | M_BURST_EARLY,
280 .capacity = M_ZONE_ROUNDS,
281 .rounds = 0,
282 .burst_factor = M_BURST_FACTOR,
283 .low_factor = M_LOW_ROUNDS
286 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
287 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
288 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
289 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
292 * Each thread will have a pair of magazines per size-class (NZONES)
293 * The loaded magazine will support immediate allocations, the previous
294 * magazine will either be full or empty and can be swapped at need
296 typedef struct magazine_pair {
297 struct magazine *loaded;
298 struct magazine *prev;
299 } magazine_pair;
301 /* A depot is a collection of magazines for a single zone. */
302 typedef struct magazine_depot {
303 struct magazinelist full;
304 struct magazinelist empty;
305 spinlock_t lock;
306 } magazine_depot;
308 typedef struct thr_mags {
309 magazine_pair mags[NZONES];
310 struct magazine *newmag;
311 int init;
312 } thr_mags;
315 * With this attribute set, do not require a function call for accessing
316 * this variable when the code is compiled -fPIC.
318 * Must be empty for libc_rtld (similar to __thread).
320 #ifdef __LIBC_RTLD
321 #define TLS_ATTRIBUTE
322 #else
323 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")))
324 #endif
326 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
327 static pthread_key_t thread_mags_key;
328 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
329 static magazine_depot depots[NZONES];
332 * Fixed globals (not per-cpu)
334 static const int ZoneSize = ZALLOC_ZONE_SIZE;
335 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
336 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
337 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
339 static int opt_madvise = 0;
340 static int opt_utrace = 0;
341 static int g_malloc_flags = 0;
342 static struct slglobaldata SLGlobalData;
343 static bigalloc_t bigalloc_array[BIGHSIZE];
344 static spinlock_t bigspin_array[BIGXSIZE];
345 static volatile void *bigcache_array[BIGCACHE]; /* atomic swap */
346 static volatile size_t bigcache_size_array[BIGCACHE]; /* SMP races ok */
347 static volatile int bigcache_index; /* SMP races ok */
348 static int malloc_panic;
349 static size_t excess_alloc; /* excess big allocs */
351 static void *_slaballoc(size_t size, int flags);
352 static void *_slabrealloc(void *ptr, size_t size);
353 static void _slabfree(void *ptr, int, bigalloc_t *);
354 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
355 static void _vmem_free(void *ptr, size_t bytes);
356 static void *magazine_alloc(struct magazine *, int *);
357 static int magazine_free(struct magazine *, void *);
358 static void *mtmagazine_alloc(int zi);
359 static int mtmagazine_free(int zi, void *);
360 static void mtmagazine_init(void);
361 static void mtmagazine_destructor(void *);
362 static slzone_t zone_alloc(int flags);
363 static void zone_free(void *z);
364 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
365 static void malloc_init(void) __constructor(101);
367 struct nmalloc_utrace {
368 void *p;
369 size_t s;
370 void *r;
373 #define UTRACE(a, b, c) \
374 if (opt_utrace) { \
375 struct nmalloc_utrace ut = { \
376 .p = (a), \
377 .s = (b), \
378 .r = (c) \
379 }; \
380 utrace(&ut, sizeof(ut)); \
383 static void
384 malloc_init(void)
386 const char *p = NULL;
388 if (issetugid() == 0)
389 p = getenv("MALLOC_OPTIONS");
391 for (; p != NULL && *p != '\0'; p++) {
392 switch(*p) {
393 case 'u': opt_utrace = 0; break;
394 case 'U': opt_utrace = 1; break;
395 case 'h': opt_madvise = 0; break;
396 case 'H': opt_madvise = 1; break;
397 case 'z': g_malloc_flags = 0; break;
398 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
399 default:
400 break;
404 UTRACE((void *) -1, 0, NULL);
408 * We have to install a handler for nmalloc thread teardowns when
409 * the thread is created. We cannot delay this because destructors in
410 * sophisticated userland programs can call malloc() for the first time
411 * during their thread exit.
413 * This routine is called directly from pthreads.
415 void
416 _nmalloc_thr_init(void)
418 static int init_once;
419 thr_mags *tp;
422 * Disallow mtmagazine operations until the mtmagazine is
423 * initialized.
425 tp = &thread_mags;
426 tp->init = -1;
428 if (init_once == 0) {
429 init_once = 1;
430 pthread_once(&thread_mags_once, mtmagazine_init);
432 pthread_setspecific(thread_mags_key, tp);
433 tp->init = 1;
436 void
437 _nmalloc_thr_prepfork(void)
439 if (__isthreaded) {
440 _SPINLOCK(&zone_mag_lock);
441 _SPINLOCK(&depot_spinlock);
445 void
446 _nmalloc_thr_parentfork(void)
448 if (__isthreaded) {
449 _SPINUNLOCK(&depot_spinlock);
450 _SPINUNLOCK(&zone_mag_lock);
454 void
455 _nmalloc_thr_childfork(void)
457 if (__isthreaded) {
458 _SPINUNLOCK(&depot_spinlock);
459 _SPINUNLOCK(&zone_mag_lock);
464 * Thread locks.
466 static __inline void
467 slgd_lock(slglobaldata_t slgd)
469 if (__isthreaded)
470 _SPINLOCK(&slgd->Spinlock);
473 static __inline void
474 slgd_unlock(slglobaldata_t slgd)
476 if (__isthreaded)
477 _SPINUNLOCK(&slgd->Spinlock);
480 static __inline void
481 depot_lock(magazine_depot *dp)
483 if (__isthreaded)
484 _SPINLOCK(&depot_spinlock);
485 #if 0
486 if (__isthreaded)
487 _SPINLOCK(&dp->lock);
488 #endif
491 static __inline void
492 depot_unlock(magazine_depot *dp)
494 if (__isthreaded)
495 _SPINUNLOCK(&depot_spinlock);
496 #if 0
497 if (__isthreaded)
498 _SPINUNLOCK(&dp->lock);
499 #endif
502 static __inline void
503 zone_magazine_lock(void)
505 if (__isthreaded)
506 _SPINLOCK(&zone_mag_lock);
509 static __inline void
510 zone_magazine_unlock(void)
512 if (__isthreaded)
513 _SPINUNLOCK(&zone_mag_lock);
516 static __inline void
517 swap_mags(magazine_pair *mp)
519 struct magazine *tmp;
520 tmp = mp->loaded;
521 mp->loaded = mp->prev;
522 mp->prev = tmp;
526 * bigalloc hashing and locking support.
528 * Return an unmasked hash code for the passed pointer.
530 static __inline int
531 _bigalloc_hash(void *ptr)
533 int hv;
535 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
536 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
538 return(hv);
542 * Lock the hash chain and return a pointer to its base for the specified
543 * address.
545 static __inline bigalloc_t *
546 bigalloc_lock(void *ptr)
548 int hv = _bigalloc_hash(ptr);
549 bigalloc_t *bigp;
551 bigp = &bigalloc_array[hv & BIGHMASK];
552 if (__isthreaded)
553 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
554 return(bigp);
558 * Lock the hash chain and return a pointer to its base for the specified
559 * address.
561 * BUT, if the hash chain is empty, just return NULL and do not bother
562 * to lock anything.
564 static __inline bigalloc_t *
565 bigalloc_check_and_lock(void *ptr)
567 int hv = _bigalloc_hash(ptr);
568 bigalloc_t *bigp;
570 bigp = &bigalloc_array[hv & BIGHMASK];
571 if (*bigp == NULL)
572 return(NULL);
573 if (__isthreaded) {
574 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
576 return(bigp);
579 static __inline void
580 bigalloc_unlock(void *ptr)
582 int hv;
584 if (__isthreaded) {
585 hv = _bigalloc_hash(ptr);
586 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
591 * Find a bigcache entry that might work for the allocation. SMP races are
592 * ok here except for the swap (that is, it is ok if bigcache_size_array[i]
593 * is wrong or if a NULL or too-small big is returned).
595 * Generally speaking it is ok to find a large entry even if the bytes
596 * requested are relatively small (but still oversized), because we really
597 * don't know *what* the application is going to do with the buffer.
599 static __inline
600 bigalloc_t
601 bigcache_find_alloc(size_t bytes)
603 bigalloc_t big = NULL;
604 size_t test;
605 int i;
607 for (i = 0; i < BIGCACHE; ++i) {
608 test = bigcache_size_array[i];
609 if (bytes <= test) {
610 bigcache_size_array[i] = 0;
611 big = atomic_swap_ptr(&bigcache_array[i], NULL);
612 break;
615 return big;
619 * Free a bigcache entry, possibly returning one that the caller really must
620 * free. This is used to cache recent oversized memory blocks. Only
621 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try
622 * to collect the biggest ones we can that are under the limit.
624 static __inline
625 bigalloc_t
626 bigcache_find_free(bigalloc_t big)
628 int i;
629 int j;
630 int b;
632 b = ++bigcache_index;
633 for (i = 0; i < BIGCACHE; ++i) {
634 j = (b + i) & BIGCACHE_MASK;
635 if (bigcache_size_array[j] < big->bytes) {
636 bigcache_size_array[j] = big->bytes;
637 big = atomic_swap_ptr(&bigcache_array[j], big);
638 break;
641 return big;
644 static __inline
645 void
646 handle_excess_big(void)
648 int i;
649 bigalloc_t big;
650 bigalloc_t *bigp;
652 if (excess_alloc <= BIGCACHE_EXCESS)
653 return;
655 for (i = 0; i < BIGHSIZE; ++i) {
656 bigp = &bigalloc_array[i];
657 if (*bigp == NULL)
658 continue;
659 if (__isthreaded)
660 _SPINLOCK(&bigspin_array[i & BIGXMASK]);
661 for (big = *bigp; big; big = big->next) {
662 if (big->active < big->bytes) {
663 MASSERT((big->active & PAGE_MASK) == 0);
664 MASSERT((big->bytes & PAGE_MASK) == 0);
665 munmap((char *)big->base + big->active,
666 big->bytes - big->active);
667 atomic_add_long(&excess_alloc,
668 big->active - big->bytes);
669 big->bytes = big->active;
672 if (__isthreaded)
673 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]);
678 * Calculate the zone index for the allocation request size and set the
679 * allocation request size to that particular zone's chunk size.
681 static __inline int
682 zoneindex(size_t *bytes, size_t *chunking)
684 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
687 * This used to be 8-byte chunks and 16 zones for n < 128.
688 * However some instructions may require 16-byte alignment
689 * (aka SIMD) and programs might not request an aligned size
690 * (aka GCC-7), so change this as follows:
692 * 0-15 bytes 8-byte alignment in two zones (0-1)
693 * 16-127 bytes 16-byte alignment in four zones (3-10)
694 * zone index 2 and 11-15 are currently unused.
696 if (n < 16) {
697 *bytes = n = (n + 7) & ~7;
698 *chunking = 8;
699 return(n / 8 - 1); /* 8 byte chunks, 2 zones */
700 /* zones 0,1, zone 2 is unused */
702 if (n < 128) {
703 *bytes = n = (n + 15) & ~15;
704 *chunking = 16;
705 return(n / 16 + 2); /* 16 byte chunks, 8 zones */
706 /* zones 3-10, zones 11-15 unused */
708 if (n < 256) {
709 *bytes = n = (n + 15) & ~15;
710 *chunking = 16;
711 return(n / 16 + 7);
713 if (n < 8192) {
714 if (n < 512) {
715 *bytes = n = (n + 31) & ~31;
716 *chunking = 32;
717 return(n / 32 + 15);
719 if (n < 1024) {
720 *bytes = n = (n + 63) & ~63;
721 *chunking = 64;
722 return(n / 64 + 23);
724 if (n < 2048) {
725 *bytes = n = (n + 127) & ~127;
726 *chunking = 128;
727 return(n / 128 + 31);
729 if (n < 4096) {
730 *bytes = n = (n + 255) & ~255;
731 *chunking = 256;
732 return(n / 256 + 39);
734 *bytes = n = (n + 511) & ~511;
735 *chunking = 512;
736 return(n / 512 + 47);
738 #if ZALLOC_ZONE_LIMIT > 8192
739 if (n < 16384) {
740 *bytes = n = (n + 1023) & ~1023;
741 *chunking = 1024;
742 return(n / 1024 + 55);
744 #endif
745 #if ZALLOC_ZONE_LIMIT > 16384
746 if (n < 32768) {
747 *bytes = n = (n + 2047) & ~2047;
748 *chunking = 2048;
749 return(n / 2048 + 63);
751 #endif
752 _mpanic("Unexpected byte count %zu", n);
753 return(0);
757 * malloc() - call internal slab allocator
759 void *
760 __malloc(size_t size)
762 void *ptr;
764 ptr = _slaballoc(size, 0);
765 if (ptr == NULL)
766 errno = ENOMEM;
767 else
768 UTRACE(0, size, ptr);
769 return(ptr);
772 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
775 * calloc() - call internal slab allocator
777 void *
778 __calloc(size_t number, size_t size)
780 void *ptr;
782 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
783 number > 0 && SIZE_MAX / number < size) {
784 errno = ENOMEM;
785 return(NULL);
788 ptr = _slaballoc(number * size, SAFLAG_ZERO);
789 if (ptr == NULL)
790 errno = ENOMEM;
791 else
792 UTRACE(0, number * size, ptr);
793 return(ptr);
797 * realloc() (SLAB ALLOCATOR)
799 * We do not attempt to optimize this routine beyond reusing the same
800 * pointer if the new size fits within the chunking of the old pointer's
801 * zone.
803 void *
804 __realloc(void *ptr, size_t size)
806 void *ret;
807 ret = _slabrealloc(ptr, size);
808 if (ret == NULL)
809 errno = ENOMEM;
810 else
811 UTRACE(ptr, size, ret);
812 return(ret);
816 * posix_memalign()
818 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
819 * is a power of 2 >= sizeof(void *).
821 * The slab allocator will allocate on power-of-2 boundaries up to
822 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
823 * zone matching the requirements, and _vmem_alloc() otherwise.
826 __posix_memalign(void **memptr, size_t alignment, size_t size)
828 bigalloc_t *bigp;
829 bigalloc_t big;
830 size_t chunking;
831 int zi __unused;
834 * OpenGroup spec issue 6 checks
836 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
837 *memptr = NULL;
838 return(EINVAL);
840 if (alignment < sizeof(void *)) {
841 *memptr = NULL;
842 return(EINVAL);
846 * Our zone mechanism guarantees same-sized alignment for any
847 * power-of-2 allocation. If size is a power-of-2 and reasonable
848 * we can just call _slaballoc() and be done. We round size up
849 * to the nearest alignment boundary to improve our odds of
850 * it becoming a power-of-2 if it wasn't before.
852 if (size <= alignment)
853 size = alignment;
854 else
855 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
856 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
857 *memptr = _slaballoc(size, 0);
858 return(*memptr ? 0 : ENOMEM);
862 * Otherwise locate a zone with a chunking that matches
863 * the requested alignment, within reason. Consider two cases:
865 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
866 * we find will be the best fit because the chunking will be
867 * greater or equal to the alignment.
869 * (2) A 513 allocation on a 256-byte alignment. In this case
870 * the first zoneindex we find will be for 576 byte allocations
871 * with a chunking of 64, which is not sufficient. To fix this
872 * we simply find the nearest power-of-2 >= size and use the
873 * same side-effect of _slaballoc() which guarantees
874 * same-alignment on a power-of-2 allocation.
876 if (size < PAGE_SIZE) {
877 zi = zoneindex(&size, &chunking);
878 if (chunking >= alignment) {
879 *memptr = _slaballoc(size, 0);
880 return(*memptr ? 0 : ENOMEM);
882 if (size >= 1024)
883 alignment = 1024;
884 if (size >= 16384)
885 alignment = 16384;
886 while (alignment < size)
887 alignment <<= 1;
888 *memptr = _slaballoc(alignment, 0);
889 return(*memptr ? 0 : ENOMEM);
893 * If the slab allocator cannot handle it use vmem_alloc().
895 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
897 if (alignment < PAGE_SIZE)
898 alignment = PAGE_SIZE;
899 if (size < alignment)
900 size = alignment;
901 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
902 *memptr = _vmem_alloc(size, alignment, 0);
903 if (*memptr == NULL)
904 return(ENOMEM);
906 big = _slaballoc(sizeof(struct bigalloc), 0);
907 if (big == NULL) {
908 _vmem_free(*memptr, size);
909 *memptr = NULL;
910 return(ENOMEM);
912 bigp = bigalloc_lock(*memptr);
913 big->base = *memptr;
914 big->active = size;
915 big->bytes = size; /* no excess */
916 big->next = *bigp;
917 *bigp = big;
918 bigalloc_unlock(*memptr);
920 return(0);
924 * free() (SLAB ALLOCATOR) - do the obvious
926 void
927 __free(void *ptr)
929 UTRACE(ptr, 0, 0);
930 _slabfree(ptr, 0, NULL);
934 * _slaballoc() (SLAB ALLOCATOR)
936 * Allocate memory via the slab allocator. If the request is too large,
937 * or if it page-aligned beyond a certain size, we fall back to the
938 * KMEM subsystem
940 static void *
941 _slaballoc(size_t size, int flags)
943 slzone_t z;
944 slchunk_t chunk;
945 slglobaldata_t slgd;
946 size_t chunking;
947 int zi;
948 int off;
949 void *obj;
952 * Handle the degenerate size == 0 case. Yes, this does happen.
953 * Return a special pointer. This is to maintain compatibility with
954 * the original malloc implementation. Certain devices, such as the
955 * adaptec driver, not only allocate 0 bytes, they check for NULL and
956 * also realloc() later on. Joy.
958 if (size == 0)
959 size = 1;
961 /* Capture global flags */
962 flags |= g_malloc_flags;
965 * Handle large allocations directly. There should not be very many
966 * of these so performance is not a big issue.
968 * The backend allocator is pretty nasty on a SMP system. Use the
969 * slab allocator for one and two page-sized chunks even though we
970 * lose some efficiency.
972 if (size >= ZoneLimit ||
973 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
974 bigalloc_t big;
975 bigalloc_t *bigp;
978 * Page-align and cache-color in case of virtually indexed
979 * physically tagged L1 caches (aka SandyBridge). No sweat
980 * otherwise, so just do it.
982 * (don't count as excess).
984 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
985 if ((size & (PAGE_SIZE * 2 - 1)) == 0)
986 size += PAGE_SIZE;
989 * Try to reuse a cached big block to avoid mmap'ing. If it
990 * turns out not to fit our requirements we throw it away
991 * and allocate normally.
993 big = NULL;
994 if (size <= BIGCACHE_LIMIT) {
995 big = bigcache_find_alloc(size);
996 if (big && big->bytes < size) {
997 _slabfree(big->base, FASTSLABREALLOC, &big);
998 big = NULL;
1001 if (big) {
1002 chunk = big->base;
1003 if (flags & SAFLAG_ZERO)
1004 bzero(chunk, size);
1005 } else {
1006 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
1007 if (chunk == NULL)
1008 return(NULL);
1010 big = _slaballoc(sizeof(struct bigalloc), 0);
1011 if (big == NULL) {
1012 _vmem_free(chunk, size);
1013 return(NULL);
1015 big->base = chunk;
1016 big->bytes = size;
1018 big->active = size;
1020 bigp = bigalloc_lock(chunk);
1021 if (big->active < big->bytes) {
1022 atomic_add_long(&excess_alloc,
1023 big->bytes - big->active);
1025 big->next = *bigp;
1026 *bigp = big;
1027 bigalloc_unlock(chunk);
1028 handle_excess_big();
1030 return(chunk);
1033 /* Compute allocation zone; zoneindex will panic on excessive sizes */
1034 zi = zoneindex(&size, &chunking);
1035 MASSERT(zi < NZONES);
1037 obj = mtmagazine_alloc(zi);
1038 if (obj != NULL) {
1039 if (flags & SAFLAG_ZERO)
1040 bzero(obj, size);
1041 return (obj);
1044 slgd = &SLGlobalData;
1045 slgd_lock(slgd);
1048 * Attempt to allocate out of an existing zone. If all zones are
1049 * exhausted pull one off the free list or allocate a new one.
1051 if ((z = slgd->ZoneAry[zi]) == NULL) {
1052 z = zone_alloc(flags);
1053 if (z == NULL)
1054 goto fail;
1057 * How big is the base structure?
1059 off = sizeof(struct slzone);
1062 * Align the storage in the zone based on the chunking.
1064 * Guarantee power-of-2 alignment for power-of-2-sized
1065 * chunks. Otherwise align based on the chunking size
1066 * (typically 8 or 16 bytes for small allocations).
1068 * NOTE: Allocations >= ZoneLimit are governed by the
1069 * bigalloc code and typically only guarantee page-alignment.
1071 * Set initial conditions for UIndex near the zone header
1072 * to reduce unecessary page faults, vs semi-randomization
1073 * to improve L1 cache saturation.
1075 if ((size | (size - 1)) + 1 == (size << 1))
1076 off = roundup2(off, size);
1077 else
1078 off = roundup2(off, chunking);
1079 z->z_Magic = ZALLOC_SLAB_MAGIC;
1080 z->z_ZoneIndex = zi;
1081 z->z_NMax = (ZoneSize - off) / size;
1082 z->z_NFree = z->z_NMax;
1083 z->z_BasePtr = (char *)z + off;
1084 z->z_UIndex = z->z_UEndIndex = 0;
1085 z->z_ChunkSize = size;
1086 z->z_FirstFreePg = ZonePageCount;
1087 z->z_Next = slgd->ZoneAry[zi];
1088 slgd->ZoneAry[zi] = z;
1089 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1090 flags &= ~SAFLAG_ZERO; /* already zero'd */
1091 flags |= SAFLAG_PASSIVE;
1095 * Slide the base index for initial allocations out of the
1096 * next zone we create so we do not over-weight the lower
1097 * part of the cpu memory caches.
1099 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
1100 & (ZALLOC_MAX_ZONE_SIZE - 1);
1104 * Ok, we have a zone from which at least one chunk is available.
1106 * Remove us from the ZoneAry[] when we become empty
1108 MASSERT(z->z_NFree > 0);
1110 if (--z->z_NFree == 0) {
1111 slgd->ZoneAry[zi] = z->z_Next;
1112 z->z_Next = NULL;
1116 * Locate a chunk in a free page. This attempts to localize
1117 * reallocations into earlier pages without us having to sort
1118 * the chunk list. A chunk may still overlap a page boundary.
1120 while (z->z_FirstFreePg < ZonePageCount) {
1121 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
1122 MASSERT((uintptr_t)chunk & ZoneMask);
1123 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
1124 goto done;
1126 ++z->z_FirstFreePg;
1130 * No chunks are available but NFree said we had some memory,
1131 * so it must be available in the never-before-used-memory
1132 * area governed by UIndex. The consequences are very
1133 * serious if our zone got corrupted so we use an explicit
1134 * panic rather then a KASSERT.
1136 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
1138 if (++z->z_UIndex == z->z_NMax)
1139 z->z_UIndex = 0;
1140 if (z->z_UIndex == z->z_UEndIndex) {
1141 if (z->z_NFree != 0)
1142 _mpanic("slaballoc: corrupted zone");
1145 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1146 flags &= ~SAFLAG_ZERO;
1147 flags |= SAFLAG_PASSIVE;
1150 done:
1151 slgd_unlock(slgd);
1152 if (flags & SAFLAG_ZERO)
1153 bzero(chunk, size);
1154 return(chunk);
1155 fail:
1156 slgd_unlock(slgd);
1157 return(NULL);
1161 * Reallocate memory within the chunk
1163 static void *
1164 _slabrealloc(void *ptr, size_t size)
1166 bigalloc_t *bigp;
1167 void *nptr;
1168 slzone_t z;
1169 size_t chunking;
1171 if (ptr == NULL) {
1172 return(_slaballoc(size, 0));
1175 if (size == 0)
1176 size = 1;
1179 * Handle oversized allocations.
1181 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1182 bigalloc_t big;
1183 size_t bigbytes;
1185 while ((big = *bigp) != NULL) {
1186 if (big->base == ptr) {
1187 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1188 bigbytes = big->bytes;
1191 * If it already fits determine if it makes
1192 * sense to shrink/reallocate. Try to optimize
1193 * programs which stupidly make incremental
1194 * reallocations larger or smaller by scaling
1195 * the allocation. Also deal with potential
1196 * coloring.
1198 if (size >= (bigbytes >> 1) &&
1199 size <= bigbytes) {
1200 if (big->active != size) {
1201 atomic_add_long(&excess_alloc,
1202 big->active -
1203 size);
1205 big->active = size;
1206 bigalloc_unlock(ptr);
1207 return(ptr);
1211 * For large reallocations, allocate more space
1212 * than we need to try to avoid excessive
1213 * reallocations later on.
1215 chunking = size + (size >> 3);
1216 chunking = (chunking + PAGE_MASK) &
1217 ~(size_t)PAGE_MASK;
1220 * Try to allocate adjacently in case the
1221 * program is idiotically realloc()ing a
1222 * huge memory block just slightly bigger.
1223 * (llvm's llc tends to do this a lot).
1225 * (MAP_TRYFIXED forces mmap to fail if there
1226 * is already something at the address).
1228 if (chunking > bigbytes) {
1229 char *addr;
1230 int errno_save = errno;
1232 addr = mmap((char *)ptr + bigbytes,
1233 chunking - bigbytes,
1234 PROT_READ|PROT_WRITE,
1235 MAP_PRIVATE|MAP_ANON|
1236 MAP_TRYFIXED,
1237 -1, 0);
1238 errno = errno_save;
1239 if (addr == (char *)ptr + bigbytes) {
1240 atomic_add_long(&excess_alloc,
1241 big->active -
1242 big->bytes +
1243 chunking -
1244 size);
1245 big->bytes = chunking;
1246 big->active = size;
1247 bigalloc_unlock(ptr);
1249 return(ptr);
1251 MASSERT((void *)addr == MAP_FAILED);
1255 * Failed, unlink big and allocate fresh.
1256 * (note that we have to leave (big) intact
1257 * in case the slaballoc fails).
1259 *bigp = big->next;
1260 bigalloc_unlock(ptr);
1261 if ((nptr = _slaballoc(size, 0)) == NULL) {
1262 /* Relink block */
1263 bigp = bigalloc_lock(ptr);
1264 big->next = *bigp;
1265 *bigp = big;
1266 bigalloc_unlock(ptr);
1267 return(NULL);
1269 if (size > bigbytes)
1270 size = bigbytes;
1271 bcopy(ptr, nptr, size);
1272 atomic_add_long(&excess_alloc, big->active -
1273 big->bytes);
1274 _slabfree(ptr, FASTSLABREALLOC, &big);
1276 return(nptr);
1278 bigp = &big->next;
1280 bigalloc_unlock(ptr);
1281 handle_excess_big();
1285 * Get the original allocation's zone. If the new request winds
1286 * up using the same chunk size we do not have to do anything.
1288 * NOTE: We don't have to lock the globaldata here, the fields we
1289 * access here will not change at least as long as we have control
1290 * over the allocation.
1292 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1293 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1296 * Use zoneindex() to chunk-align the new size, as long as the
1297 * new size is not too large.
1299 if (size < ZoneLimit) {
1300 zoneindex(&size, &chunking);
1301 if (z->z_ChunkSize == size) {
1302 return(ptr);
1307 * Allocate memory for the new request size and copy as appropriate.
1309 if ((nptr = _slaballoc(size, 0)) != NULL) {
1310 if (size > z->z_ChunkSize)
1311 size = z->z_ChunkSize;
1312 bcopy(ptr, nptr, size);
1313 _slabfree(ptr, 0, NULL);
1316 return(nptr);
1320 * free (SLAB ALLOCATOR)
1322 * Free a memory block previously allocated by malloc. Note that we do not
1323 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1324 * checking memory limits in malloc.
1326 * flags:
1327 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1328 * unlinked.
1330 * MPSAFE
1332 static void
1333 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1335 slzone_t z;
1336 slchunk_t chunk;
1337 bigalloc_t big;
1338 bigalloc_t *bigp;
1339 slglobaldata_t slgd;
1340 size_t size;
1341 int zi;
1342 int pgno;
1344 /* Fast realloc path for big allocations */
1345 if (flags & FASTSLABREALLOC) {
1346 big = *rbigp;
1347 goto fastslabrealloc;
1351 * Handle NULL frees and special 0-byte allocations
1353 if (ptr == NULL)
1354 return;
1357 * Handle oversized allocations.
1359 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1360 while ((big = *bigp) != NULL) {
1361 if (big->base == ptr) {
1362 *bigp = big->next;
1363 atomic_add_long(&excess_alloc, big->active -
1364 big->bytes);
1365 bigalloc_unlock(ptr);
1368 * Try to stash the block we are freeing,
1369 * potentially receiving another block in
1370 * return which must be freed.
1372 fastslabrealloc:
1373 if (big->bytes <= BIGCACHE_LIMIT) {
1374 big = bigcache_find_free(big);
1375 if (big == NULL)
1376 return;
1378 ptr = big->base; /* reload */
1379 size = big->bytes;
1380 _slabfree(big, 0, NULL);
1381 _vmem_free(ptr, size);
1382 return;
1384 bigp = &big->next;
1386 bigalloc_unlock(ptr);
1387 handle_excess_big();
1391 * Zone case. Figure out the zone based on the fact that it is
1392 * ZoneSize aligned.
1394 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1395 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1397 size = z->z_ChunkSize;
1398 zi = z->z_ZoneIndex;
1400 if (g_malloc_flags & SAFLAG_ZERO)
1401 bzero(ptr, size);
1403 if (mtmagazine_free(zi, ptr) == 0)
1404 return;
1406 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1407 chunk = ptr;
1408 slgd = &SLGlobalData;
1409 slgd_lock(slgd);
1412 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1413 * z_FirstFreePg.
1415 chunk->c_Next = z->z_PageAry[pgno];
1416 z->z_PageAry[pgno] = chunk;
1417 if (z->z_FirstFreePg > pgno)
1418 z->z_FirstFreePg = pgno;
1421 * Bump the number of free chunks. If it becomes non-zero the zone
1422 * must be added back onto the appropriate list.
1424 if (z->z_NFree++ == 0) {
1425 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1426 slgd->ZoneAry[z->z_ZoneIndex] = z;
1430 * If the zone becomes totally free then release it.
1432 if (z->z_NFree == z->z_NMax) {
1433 slzone_t *pz;
1435 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1436 while (z != *pz)
1437 pz = &(*pz)->z_Next;
1438 *pz = z->z_Next;
1439 z->z_Magic = -1;
1440 z->z_Next = NULL;
1441 zone_free(z);
1442 /* slgd lock released */
1443 return;
1445 slgd_unlock(slgd);
1449 * Allocate and return a magazine. NULL is returned and *burst is adjusted
1450 * if the magazine is empty.
1452 static __inline void *
1453 magazine_alloc(struct magazine *mp, int *burst)
1455 void *obj;
1457 if (mp == NULL)
1458 return(NULL);
1459 if (MAGAZINE_NOTEMPTY(mp)) {
1460 obj = mp->objects[--mp->rounds];
1461 return(obj);
1465 * Return burst factor to caller along with NULL
1467 if ((mp->flags & M_BURST) && (burst != NULL)) {
1468 *burst = mp->burst_factor;
1470 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1471 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1472 (burst != NULL)) {
1473 mp->burst_factor -= M_BURST_NSCALE;
1474 if (mp->burst_factor <= 1) {
1475 mp->burst_factor = 1;
1476 mp->flags &= ~(M_BURST);
1477 mp->flags &= ~(M_BURST_EARLY);
1480 return (NULL);
1483 static __inline int
1484 magazine_free(struct magazine *mp, void *p)
1486 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1487 mp->objects[mp->rounds++] = p;
1488 return 0;
1491 return -1;
1494 static void *
1495 mtmagazine_alloc(int zi)
1497 thr_mags *tp;
1498 struct magazine *mp, *emptymag;
1499 magazine_depot *d;
1500 void *obj;
1503 * Do not try to access per-thread magazines while the mtmagazine
1504 * is being initialized or destroyed.
1506 tp = &thread_mags;
1507 if (tp->init < 0)
1508 return(NULL);
1511 * Primary per-thread allocation loop
1513 for (;;) {
1515 * If the loaded magazine has rounds, allocate and return
1517 mp = tp->mags[zi].loaded;
1518 obj = magazine_alloc(mp, NULL);
1519 if (obj)
1520 break;
1523 * If the prev magazine is full, swap with the loaded
1524 * magazine and retry.
1526 mp = tp->mags[zi].prev;
1527 if (mp && MAGAZINE_FULL(mp)) {
1528 MASSERT(mp->rounds != 0);
1529 swap_mags(&tp->mags[zi]); /* prev now empty */
1530 continue;
1534 * Try to get a full magazine from the depot. Cycle
1535 * through depot(full)->loaded->prev->depot(empty).
1536 * Retry if a full magazine was available from the depot.
1538 * Return NULL (caller will fall through) if no magazines
1539 * can be found anywhere.
1541 d = &depots[zi];
1542 depot_lock(d);
1543 emptymag = tp->mags[zi].prev;
1544 if (emptymag)
1545 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1546 tp->mags[zi].prev = tp->mags[zi].loaded;
1547 mp = SLIST_FIRST(&d->full); /* loaded magazine */
1548 tp->mags[zi].loaded = mp;
1549 if (mp) {
1550 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1551 MASSERT(MAGAZINE_NOTEMPTY(mp));
1552 depot_unlock(d);
1553 continue;
1555 depot_unlock(d);
1556 break;
1559 return (obj);
1562 static int
1563 mtmagazine_free(int zi, void *ptr)
1565 thr_mags *tp;
1566 struct magazine *mp, *loadedmag;
1567 magazine_depot *d;
1568 int rc = -1;
1571 * Do not try to access per-thread magazines while the mtmagazine
1572 * is being initialized or destroyed.
1574 tp = &thread_mags;
1575 if (tp->init < 0)
1576 return(-1);
1579 * Primary per-thread freeing loop
1581 for (;;) {
1583 * Make sure a new magazine is available in case we have
1584 * to use it. Staging the newmag allows us to avoid
1585 * some locking/reentrancy complexity.
1587 * Temporarily disable the per-thread caches for this
1588 * allocation to avoid reentrancy and/or to avoid a
1589 * stack overflow if the [zi] happens to be the same that
1590 * would be used to allocate the new magazine.
1592 if (tp->newmag == NULL) {
1593 tp->init = -1;
1594 tp->newmag = _slaballoc(sizeof(struct magazine),
1595 SAFLAG_ZERO);
1596 tp->init = 1;
1597 if (tp->newmag == NULL) {
1598 rc = -1;
1599 break;
1604 * If the loaded magazine has space, free directly to it
1606 rc = magazine_free(tp->mags[zi].loaded, ptr);
1607 if (rc == 0)
1608 break;
1611 * If the prev magazine is empty, swap with the loaded
1612 * magazine and retry.
1614 mp = tp->mags[zi].prev;
1615 if (mp && MAGAZINE_EMPTY(mp)) {
1616 MASSERT(mp->rounds == 0);
1617 swap_mags(&tp->mags[zi]); /* prev now full */
1618 continue;
1622 * Try to get an empty magazine from the depot. Cycle
1623 * through depot(empty)->loaded->prev->depot(full).
1624 * Retry if an empty magazine was available from the depot.
1626 d = &depots[zi];
1627 depot_lock(d);
1629 if ((loadedmag = tp->mags[zi].prev) != NULL)
1630 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1631 tp->mags[zi].prev = tp->mags[zi].loaded;
1632 mp = SLIST_FIRST(&d->empty);
1633 if (mp) {
1634 tp->mags[zi].loaded = mp;
1635 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1636 MASSERT(MAGAZINE_NOTFULL(mp));
1637 } else {
1638 mp = tp->newmag;
1639 tp->newmag = NULL;
1640 mp->capacity = M_MAX_ROUNDS;
1641 mp->rounds = 0;
1642 mp->flags = 0;
1643 tp->mags[zi].loaded = mp;
1645 depot_unlock(d);
1648 return rc;
1651 static void
1652 mtmagazine_init(void)
1654 int error;
1656 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1657 if (error)
1658 abort();
1662 * This function is only used by the thread exit destructor
1664 static void
1665 mtmagazine_drain(struct magazine *mp)
1667 void *obj;
1669 while (MAGAZINE_NOTEMPTY(mp)) {
1670 obj = magazine_alloc(mp, NULL);
1671 _slabfree(obj, 0, NULL);
1676 * mtmagazine_destructor()
1678 * When a thread exits, we reclaim all its resources; all its magazines are
1679 * drained and the structures are freed.
1681 * WARNING! The destructor can be called multiple times if the larger user
1682 * program has its own destructors which run after ours which
1683 * allocate or free memory.
1685 static void
1686 mtmagazine_destructor(void *thrp)
1688 thr_mags *tp = thrp;
1689 struct magazine *mp;
1690 int i;
1693 * Prevent further use of mtmagazines while we are destructing
1694 * them, as well as for any destructors which are run after us
1695 * prior to the thread actually being destroyed.
1697 tp->init = -1;
1699 for (i = 0; i < NZONES; i++) {
1700 mp = tp->mags[i].loaded;
1701 tp->mags[i].loaded = NULL;
1702 if (mp) {
1703 if (MAGAZINE_NOTEMPTY(mp))
1704 mtmagazine_drain(mp);
1705 _slabfree(mp, 0, NULL);
1708 mp = tp->mags[i].prev;
1709 tp->mags[i].prev = NULL;
1710 if (mp) {
1711 if (MAGAZINE_NOTEMPTY(mp))
1712 mtmagazine_drain(mp);
1713 _slabfree(mp, 0, NULL);
1717 if (tp->newmag) {
1718 mp = tp->newmag;
1719 tp->newmag = NULL;
1720 _slabfree(mp, 0, NULL);
1725 * zone_alloc()
1727 * Attempt to allocate a zone from the zone magazine; the zone magazine has
1728 * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1730 static slzone_t
1731 zone_alloc(int flags)
1733 slglobaldata_t slgd = &SLGlobalData;
1734 int burst = 1;
1735 int i, j;
1736 slzone_t z;
1738 zone_magazine_lock();
1739 slgd_unlock(slgd);
1741 z = magazine_alloc(&zone_magazine, &burst);
1742 if (z == NULL && burst == 1) {
1743 zone_magazine_unlock();
1744 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1745 } else if (z == NULL) {
1746 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1747 if (z) {
1748 for (i = 1; i < burst; i++) {
1749 j = magazine_free(&zone_magazine,
1750 (char *) z + (ZoneSize * i));
1751 MASSERT(j == 0);
1754 zone_magazine_unlock();
1755 } else {
1756 z->z_Flags |= SLZF_UNOTZEROD;
1757 zone_magazine_unlock();
1759 slgd_lock(slgd);
1760 return z;
1764 * zone_free()
1766 * Release a zone and unlock the slgd lock.
1768 static void
1769 zone_free(void *z)
1771 slglobaldata_t slgd = &SLGlobalData;
1772 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1773 int i, j;
1775 zone_magazine_lock();
1776 slgd_unlock(slgd);
1778 bzero(z, sizeof(struct slzone));
1780 if (opt_madvise)
1781 madvise(z, ZoneSize, MADV_FREE);
1783 i = magazine_free(&zone_magazine, z);
1786 * If we failed to free, collect excess magazines; release the zone
1787 * magazine lock, and then free to the system via _vmem_free. Re-enable
1788 * BURST mode for the magazine.
1790 if (i == -1) {
1791 j = zone_magazine.rounds - zone_magazine.low_factor;
1792 for (i = 0; i < j; i++) {
1793 excess[i] = magazine_alloc(&zone_magazine, NULL);
1794 MASSERT(excess[i] != NULL);
1797 zone_magazine_unlock();
1799 for (i = 0; i < j; i++)
1800 _vmem_free(excess[i], ZoneSize);
1802 _vmem_free(z, ZoneSize);
1803 } else {
1804 zone_magazine_unlock();
1809 * _vmem_alloc()
1811 * Directly map memory in PAGE_SIZE'd chunks with the specified
1812 * alignment.
1814 * Alignment must be a multiple of PAGE_SIZE.
1816 * Size must be >= alignment.
1818 static void *
1819 _vmem_alloc(size_t size, size_t align, int flags)
1821 char *addr;
1822 char *save;
1823 size_t excess;
1826 * Map anonymous private memory.
1828 addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1829 MAP_PRIVATE|MAP_ANON, -1, 0);
1830 if (addr == MAP_FAILED)
1831 return(NULL);
1834 * Check alignment. The misaligned offset is also the excess
1835 * amount. If misaligned unmap the excess so we have a chance of
1836 * mapping at the next alignment point and recursively try again.
1838 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment
1839 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation
1840 * xxxxxxxxx final excess calculation
1841 * ^ returned address
1843 excess = (uintptr_t)addr & (align - 1);
1845 if (excess) {
1846 excess = align - excess;
1847 save = addr;
1849 munmap(save + excess, size - excess);
1850 addr = _vmem_alloc(size, align, flags);
1851 munmap(save, excess);
1853 return((void *)addr);
1857 * _vmem_free()
1859 * Free a chunk of memory allocated with _vmem_alloc()
1861 static void
1862 _vmem_free(void *ptr, size_t size)
1864 munmap(ptr, size);
1868 * Panic on fatal conditions
1870 static void
1871 _mpanic(const char *ctl, ...)
1873 va_list va;
1875 if (malloc_panic == 0) {
1876 malloc_panic = 1;
1877 va_start(va, ctl);
1878 vfprintf(stderr, ctl, va);
1879 fprintf(stderr, "\n");
1880 fflush(stderr);
1881 va_end(va);
1883 abort();
1886 __weak_reference(__malloc, malloc);
1887 __weak_reference(__calloc, calloc);
1888 __weak_reference(__posix_memalign, posix_memalign);
1889 __weak_reference(__realloc, realloc);
1890 __weak_reference(__free, free);