libc - Fix a serious memory leak in the nmalloc code
[dragonfly.git] / lib / libc / stdlib / nmalloc.c
blobd0b6094512ac8cddd510f1d7970a2cc18ace4cde
1 /*
2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010-2019 The DragonFly Project,
5 * All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@backplane.com> and by
9 * Venkatesh Srinivas <me@endeavour.zapto.org>.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
41 * This module implements a slab allocator drop-in replacement for the
42 * libc malloc().
44 * A slab allocator reserves a ZONE for each chunk size, then lays the
45 * chunks out in an array within the zone. Allocation and deallocation
46 * is nearly instantaneous, and overhead losses are limited to a fixed
47 * worst-case amount.
49 * The slab allocator does not have to pre-initialize the list of
50 * free chunks for each zone, and the underlying VM will not be
51 * touched at all beyond the zone header until an actual allocation
52 * needs it.
54 * Slab management and locking is done on a per-zone basis.
56 * Alloc Size Chunking Number of zones
57 * 0-127 8 16
58 * 128-255 16 8
59 * 256-511 32 8
60 * 512-1023 64 8
61 * 1024-2047 128 8
62 * 2048-4095 256 8
63 * 4096-8191 512 8
64 * 8192-16383 1024 8
65 * 16384-32767 2048 8
67 * Allocations >= ZoneLimit go directly to mmap and a hash table
68 * is used to locate for free. One and Two-page allocations use the
69 * zone mechanic to avoid excessive mmap()/munmap() calls.
71 * API FEATURES AND SIDE EFFECTS
73 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
74 * Above that power-of-2 sized allocations are page-aligned. Non
75 * power-of-2 sized allocations are aligned the same as the chunk
76 * size for their zone.
77 * + malloc(0) returns a special non-NULL value
78 * + ability to allocate arbitrarily large chunks of memory
79 * + realloc will reuse the passed pointer if possible, within the
80 * limitations of the zone chunking.
82 * Multithreaded enhancements for small allocations introduced August 2010.
83 * These are in the spirit of 'libumem'. See:
84 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
85 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
86 * USENIX Technical Conference. USENIX Association.
88 * Oversized allocations employ the BIGCACHE mechanic whereby large
89 * allocations may be handed significantly larger buffers, allowing them
90 * to avoid mmap/munmap operations even through significant realloc()s.
91 * The excess space is only trimmed if too many large allocations have been
92 * given this treatment.
94 * TUNING
96 * The value of the environment variable MALLOC_OPTIONS is a character string
97 * containing various flags to tune nmalloc.
99 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
100 * This will generate utrace events for all malloc,
101 * realloc, and free calls. There are tools (mtrplay) to
102 * replay and allocation pattern or to graph heap structure
103 * (mtrgraph) which can interpret these logs.
104 * 'Z' / ['z'] Zero out / do not zero all allocations.
105 * Each new byte of memory allocated by malloc, realloc, or
106 * reallocf will be initialized to 0. This is intended for
107 * debugging and will affect performance negatively.
108 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
109 * allocation functions.
112 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
114 #include "namespace.h"
115 #include <sys/param.h>
116 #include <sys/types.h>
117 #include <sys/mman.h>
118 #include <sys/queue.h>
119 #include <sys/ktrace.h>
120 #include <stdio.h>
121 #include <stdint.h>
122 #include <stdlib.h>
123 #include <stdarg.h>
124 #include <stddef.h>
125 #include <unistd.h>
126 #include <string.h>
127 #include <fcntl.h>
128 #include <errno.h>
129 #include <pthread.h>
130 #include <machine/atomic.h>
131 #include "un-namespace.h"
133 #include "libc_private.h"
134 #include "spinlock.h"
136 void __free(void *);
137 void *__malloc(size_t);
138 void *__calloc(size_t, size_t);
139 void *__realloc(void *, size_t);
140 void *__aligned_alloc(size_t, size_t);
141 int __posix_memalign(void **, size_t, size_t);
144 * Linked list of large allocations
146 typedef struct bigalloc {
147 struct bigalloc *next; /* hash link */
148 void *base; /* base pointer */
149 u_long active; /* bytes active */
150 u_long bytes; /* bytes allocated */
151 } *bigalloc_t;
154 * Note that any allocations which are exact multiples of PAGE_SIZE, or
155 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
157 #define MAX_SLAB_PAGEALIGN (2 * PAGE_SIZE) /* max slab for PAGE_SIZE*n */
158 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
159 #define ZALLOC_ZONE_SIZE (64 * 1024) /* zone size */
160 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
162 #if ZALLOC_ZONE_LIMIT == 16384
163 #define NZONES 72
164 #elif ZALLOC_ZONE_LIMIT == 32768
165 #define NZONES 80
166 #else
167 #error "I couldn't figure out NZONES"
168 #endif
171 * Chunk structure for free elements
173 typedef struct slchunk {
174 struct slchunk *c_Next;
175 } *slchunk_t;
178 * The IN-BAND zone header is placed at the beginning of each zone.
180 struct slglobaldata;
182 typedef struct slzone {
183 int32_t z_Magic; /* magic number for sanity check */
184 int z_NFree; /* total free chunks / ualloc space */
185 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
186 int z_NMax; /* maximum free chunks */
187 char *z_BasePtr; /* pointer to start of chunk array */
188 int z_UIndex; /* current initial allocation index */
189 int z_UEndIndex; /* last (first) allocation index */
190 int z_ChunkSize; /* chunk size for validation */
191 int z_FirstFreePg; /* chunk list on a page-by-page basis */
192 int z_ZoneIndex;
193 int z_Flags;
194 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
195 } *slzone_t;
197 typedef struct slglobaldata {
198 spinlock_t Spinlock;
199 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
200 } *slglobaldata_t;
202 #define SLZF_UNOTZEROD 0x0001
204 #define FASTSLABREALLOC 0x02
207 * Misc constants. Note that allocations that are exact multiples of
208 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
209 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
211 #define MIN_CHUNK_SIZE 8 /* in bytes */
212 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
213 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
216 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
217 * not be larger then 64.
219 #define BIGHSHIFT 10 /* bigalloc hash table */
220 #define BIGHSIZE (1 << BIGHSHIFT)
221 #define BIGHMASK (BIGHSIZE - 1)
222 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
223 #define BIGXMASK (BIGXSIZE - 1)
226 * BIGCACHE caches oversized allocations. Note that a linear search is
227 * performed, so do not make the cache too large.
229 * BIGCACHE will garbage-collect excess space when the excess exceeds the
230 * specified value. A relatively large number should be used here because
231 * garbage collection is expensive.
233 #define BIGCACHE 16
234 #define BIGCACHE_MASK (BIGCACHE - 1)
235 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */
236 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */
238 #define CACHE_CHUNKS 32
240 #define SAFLAG_ZERO 0x0001
241 #define SAFLAG_PASSIVE 0x0002
242 #define SAFLAG_MAGS 0x0004
245 * Thread control
248 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
251 * The assertion macros try to pretty-print assertion failures
252 * which can be caused by corruption. If a lock is held, we
253 * provide a macro that attempts to release it before asserting
254 * in order to prevent (e.g.) a reentrant SIGABRT calling malloc
255 * and deadlocking, resulting in the program freezing up.
257 #define MASSERT(exp) \
258 do { if (__predict_false(!(exp))) \
259 _mpanic("assertion: %s in %s", \
260 #exp, __func__); \
261 } while (0)
263 #define MASSERT_WTHUNLK(exp, unlk) \
264 do { if (__predict_false(!(exp))) { \
265 unlk; \
266 _mpanic("assertion: %s in %s", \
267 #exp, __func__); \
269 } while (0)
272 * Magazines, arrange so the structure is roughly 4KB.
274 #define M_MAX_ROUNDS (512 - 3)
275 #define M_MIN_ROUNDS 16
276 #define M_ZONE_INIT_ROUNDS 64
277 #define M_ZONE_HYSTERESIS 32
279 struct magazine {
280 SLIST_ENTRY(magazine) nextmagazine;
282 int flags;
283 int capacity; /* Max rounds in this magazine */
284 int rounds; /* Current number of free rounds */
285 int unused01;
286 void *objects[M_MAX_ROUNDS];
289 SLIST_HEAD(magazinelist, magazine);
291 static spinlock_t zone_mag_lock;
292 static spinlock_t depot_spinlock;
293 static struct magazine zone_magazine = {
294 .flags = 0,
295 .capacity = M_ZONE_INIT_ROUNDS,
296 .rounds = 0,
299 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
300 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
301 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
302 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
305 * Each thread will have a pair of magazines per size-class (NZONES)
306 * The loaded magazine will support immediate allocations, the previous
307 * magazine will either be full or empty and can be swapped at need
309 typedef struct magazine_pair {
310 struct magazine *loaded;
311 struct magazine *prev;
312 } magazine_pair;
314 /* A depot is a collection of magazines for a single zone. */
315 typedef struct magazine_depot {
316 struct magazinelist full;
317 struct magazinelist empty;
318 spinlock_t lock;
319 } magazine_depot;
321 typedef struct thr_mags {
322 magazine_pair mags[NZONES];
323 struct magazine *newmag;
324 int init;
325 } thr_mags;
327 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
328 static pthread_key_t thread_mags_key;
329 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
330 static magazine_depot depots[NZONES];
333 * Fixed globals (not per-cpu)
335 static const int ZoneSize = ZALLOC_ZONE_SIZE;
336 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
337 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
338 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
340 static int opt_madvise = 0;
341 static int opt_utrace = 0;
342 static int g_malloc_flags = 0;
343 static struct slglobaldata SLGlobalData;
344 static bigalloc_t bigalloc_array[BIGHSIZE];
345 static spinlock_t bigspin_array[BIGXSIZE];
346 static volatile void *bigcache_array[BIGCACHE]; /* atomic swap */
347 static volatile size_t bigcache_size_array[BIGCACHE]; /* SMP races ok */
348 static volatile int bigcache_index; /* SMP races ok */
349 static int malloc_panic;
350 static size_t excess_alloc; /* excess big allocs */
352 static void *_slaballoc(size_t size, int flags);
353 static void *_slabrealloc(void *ptr, size_t size);
354 static void _slabfree(void *ptr, int, bigalloc_t *);
355 static int _slabmemalign(void **memptr, size_t alignment, size_t size);
356 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
357 static void _vmem_free(void *ptr, size_t bytes);
358 static void *magazine_alloc(struct magazine *);
359 static int magazine_free(struct magazine *, void *);
360 static void *mtmagazine_alloc(int zi, int flags);
361 static int mtmagazine_free(int zi, void *);
362 static void mtmagazine_init(void);
363 static void mtmagazine_destructor(void *);
364 static slzone_t zone_alloc(int flags);
365 static void zone_free(void *z);
366 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
367 static void malloc_init(void) __constructor(101);
369 struct nmalloc_utrace {
370 void *p;
371 size_t s;
372 void *r;
375 #define UTRACE(a, b, c) \
376 if (opt_utrace) { \
377 struct nmalloc_utrace ut = { \
378 .p = (a), \
379 .s = (b), \
380 .r = (c) \
381 }; \
382 utrace(&ut, sizeof(ut)); \
385 static void
386 malloc_init(void)
388 const char *p = NULL;
390 if (issetugid() == 0)
391 p = getenv("MALLOC_OPTIONS");
393 for (; p != NULL && *p != '\0'; p++) {
394 switch(*p) {
395 case 'u': opt_utrace = 0; break;
396 case 'U': opt_utrace = 1; break;
397 case 'h': opt_madvise = 0; break;
398 case 'H': opt_madvise = 1; break;
399 case 'z': g_malloc_flags = 0; break;
400 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
401 default:
402 break;
406 UTRACE((void *) -1, 0, NULL);
410 * We have to install a handler for nmalloc thread teardowns when
411 * the thread is created. We cannot delay this because destructors in
412 * sophisticated userland programs can call malloc() for the first time
413 * during their thread exit.
415 * This routine is called directly from pthreads.
417 void
418 _nmalloc_thr_init(void)
420 thr_mags *tp;
423 * Disallow mtmagazine operations until the mtmagazine is
424 * initialized.
426 tp = &thread_mags;
427 tp->init = -1;
429 _pthread_once(&thread_mags_once, mtmagazine_init);
430 _pthread_setspecific(thread_mags_key, tp);
431 tp->init = 1;
434 void
435 _nmalloc_thr_prepfork(void)
437 if (__isthreaded) {
438 _SPINLOCK(&zone_mag_lock);
439 _SPINLOCK(&depot_spinlock);
443 void
444 _nmalloc_thr_parentfork(void)
446 if (__isthreaded) {
447 _SPINUNLOCK(&depot_spinlock);
448 _SPINUNLOCK(&zone_mag_lock);
452 void
453 _nmalloc_thr_childfork(void)
455 if (__isthreaded) {
456 _SPINUNLOCK(&depot_spinlock);
457 _SPINUNLOCK(&zone_mag_lock);
462 * Handle signal reentrancy safely whether we are threaded or not.
463 * This improves the stability for mono and will probably improve
464 * stability for other high-level languages which are becoming increasingly
465 * sophisticated.
467 * The sigblockall()/sigunblockall() implementation uses a counter on
468 * a per-thread shared user/kernel page, avoids system calls, and is thus
469 * very fast.
471 static __inline void
472 nmalloc_sigblockall(void)
474 sigblockall();
477 static __inline void
478 nmalloc_sigunblockall(void)
480 sigunblockall();
484 * Thread locks.
486 static __inline void
487 slgd_lock(slglobaldata_t slgd)
489 if (__isthreaded)
490 _SPINLOCK(&slgd->Spinlock);
493 static __inline void
494 slgd_unlock(slglobaldata_t slgd)
496 if (__isthreaded)
497 _SPINUNLOCK(&slgd->Spinlock);
500 static __inline void
501 depot_lock(magazine_depot *dp __unused)
503 if (__isthreaded)
504 _SPINLOCK(&depot_spinlock);
507 static __inline void
508 depot_unlock(magazine_depot *dp __unused)
510 if (__isthreaded)
511 _SPINUNLOCK(&depot_spinlock);
514 static __inline void
515 zone_magazine_lock(void)
517 if (__isthreaded)
518 _SPINLOCK(&zone_mag_lock);
521 static __inline void
522 zone_magazine_unlock(void)
524 if (__isthreaded)
525 _SPINUNLOCK(&zone_mag_lock);
528 static __inline void
529 swap_mags(magazine_pair *mp)
531 struct magazine *tmp;
532 tmp = mp->loaded;
533 mp->loaded = mp->prev;
534 mp->prev = tmp;
538 * bigalloc hashing and locking support.
540 * Return an unmasked hash code for the passed pointer.
542 static __inline int
543 _bigalloc_hash(void *ptr)
545 int hv;
547 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
548 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
550 return(hv);
554 * Lock the hash chain and return a pointer to its base for the specified
555 * address.
557 static __inline bigalloc_t *
558 bigalloc_lock(void *ptr)
560 int hv = _bigalloc_hash(ptr);
561 bigalloc_t *bigp;
563 bigp = &bigalloc_array[hv & BIGHMASK];
564 if (__isthreaded)
565 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
566 return(bigp);
570 * Lock the hash chain and return a pointer to its base for the specified
571 * address.
573 * BUT, if the hash chain is empty, just return NULL and do not bother
574 * to lock anything.
576 static __inline bigalloc_t *
577 bigalloc_check_and_lock(void *ptr)
579 int hv = _bigalloc_hash(ptr);
580 bigalloc_t *bigp;
582 bigp = &bigalloc_array[hv & BIGHMASK];
583 if (*bigp == NULL)
584 return(NULL);
585 if (__isthreaded) {
586 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
588 return(bigp);
591 static __inline void
592 bigalloc_unlock(void *ptr)
594 int hv;
596 if (__isthreaded) {
597 hv = _bigalloc_hash(ptr);
598 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
603 * Find a bigcache entry that might work for the allocation. SMP races are
604 * ok here except for the swap (that is, it is ok if bigcache_size_array[i]
605 * is wrong or if a NULL or too-small big is returned).
607 * Generally speaking it is ok to find a large entry even if the bytes
608 * requested are relatively small (but still oversized), because we really
609 * don't know *what* the application is going to do with the buffer.
611 static __inline
612 bigalloc_t
613 bigcache_find_alloc(size_t bytes)
615 bigalloc_t big = NULL;
616 size_t test;
617 int i;
619 for (i = 0; i < BIGCACHE; ++i) {
620 test = bigcache_size_array[i];
621 if (bytes <= test) {
622 bigcache_size_array[i] = 0;
623 big = atomic_swap_ptr(&bigcache_array[i], NULL);
624 break;
627 return big;
631 * Free a bigcache entry, possibly returning one that the caller really must
632 * free. This is used to cache recent oversized memory blocks. Only
633 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try
634 * to collect the biggest ones we can that are under the limit.
636 static __inline
637 bigalloc_t
638 bigcache_find_free(bigalloc_t big)
640 int i;
641 int j;
642 int b;
644 b = ++bigcache_index;
645 for (i = 0; i < BIGCACHE; ++i) {
646 j = (b + i) & BIGCACHE_MASK;
647 if (bigcache_size_array[j] < big->bytes) {
648 bigcache_size_array[j] = big->bytes;
649 big = atomic_swap_ptr(&bigcache_array[j], big);
650 break;
653 return big;
656 static __inline
657 void
658 handle_excess_big(void)
660 int i;
661 bigalloc_t big;
662 bigalloc_t *bigp;
664 if (excess_alloc <= BIGCACHE_EXCESS)
665 return;
667 for (i = 0; i < BIGHSIZE; ++i) {
668 bigp = &bigalloc_array[i];
669 if (*bigp == NULL)
670 continue;
671 if (__isthreaded)
672 _SPINLOCK(&bigspin_array[i & BIGXMASK]);
673 for (big = *bigp; big; big = big->next) {
674 if (big->active < big->bytes) {
675 MASSERT_WTHUNLK((big->active & PAGE_MASK) == 0,
676 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]));
677 MASSERT_WTHUNLK((big->bytes & PAGE_MASK) == 0,
678 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]));
679 munmap((char *)big->base + big->active,
680 big->bytes - big->active);
681 atomic_add_long(&excess_alloc,
682 big->active - big->bytes);
683 big->bytes = big->active;
686 if (__isthreaded)
687 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]);
692 * Calculate the zone index for the allocation request size and set the
693 * allocation request size to that particular zone's chunk size.
695 static __inline int
696 zoneindex(size_t *bytes, size_t *chunking)
698 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
701 * This used to be 8-byte chunks and 16 zones for n < 128.
702 * However some instructions may require 16-byte alignment
703 * (aka SIMD) and programs might not request an aligned size
704 * (aka GCC-7), so change this as follows:
706 * 0-15 bytes 8-byte alignment in two zones (0-1)
707 * 16-127 bytes 16-byte alignment in four zones (3-10)
708 * zone index 2 and 11-15 are currently unused.
710 if (n < 16) {
711 *bytes = n = (n + 7) & ~7;
712 *chunking = 8;
713 return(n / 8 - 1); /* 8 byte chunks, 2 zones */
714 /* zones 0,1, zone 2 is unused */
716 if (n < 128) {
717 *bytes = n = (n + 15) & ~15;
718 *chunking = 16;
719 return(n / 16 + 2); /* 16 byte chunks, 8 zones */
720 /* zones 3-10, zones 11-15 unused */
722 if (n < 256) {
723 *bytes = n = (n + 15) & ~15;
724 *chunking = 16;
725 return(n / 16 + 7);
727 if (n < 8192) {
728 if (n < 512) {
729 *bytes = n = (n + 31) & ~31;
730 *chunking = 32;
731 return(n / 32 + 15);
733 if (n < 1024) {
734 *bytes = n = (n + 63) & ~63;
735 *chunking = 64;
736 return(n / 64 + 23);
738 if (n < 2048) {
739 *bytes = n = (n + 127) & ~127;
740 *chunking = 128;
741 return(n / 128 + 31);
743 if (n < 4096) {
744 *bytes = n = (n + 255) & ~255;
745 *chunking = 256;
746 return(n / 256 + 39);
748 *bytes = n = (n + 511) & ~511;
749 *chunking = 512;
750 return(n / 512 + 47);
752 #if ZALLOC_ZONE_LIMIT > 8192
753 if (n < 16384) {
754 *bytes = n = (n + 1023) & ~1023;
755 *chunking = 1024;
756 return(n / 1024 + 55);
758 #endif
759 #if ZALLOC_ZONE_LIMIT > 16384
760 if (n < 32768) {
761 *bytes = n = (n + 2047) & ~2047;
762 *chunking = 2048;
763 return(n / 2048 + 63);
765 #endif
766 _mpanic("Unexpected byte count %zu", n);
767 return(0);
771 * We want large magazines for small allocations
773 static __inline int
774 zonecapacity(int zi)
776 int cap;
778 cap = (NZONES - zi) * (M_MAX_ROUNDS - M_MIN_ROUNDS) / NZONES +
779 M_MIN_ROUNDS;
781 return cap;
785 * malloc() - call internal slab allocator
787 void *
788 __malloc(size_t size)
790 void *ptr;
792 nmalloc_sigblockall();
793 ptr = _slaballoc(size, 0);
794 if (ptr == NULL)
795 errno = ENOMEM;
796 else
797 UTRACE(0, size, ptr);
798 nmalloc_sigunblockall();
800 return(ptr);
803 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
806 * calloc() - call internal slab allocator
808 void *
809 __calloc(size_t number, size_t size)
811 void *ptr;
813 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
814 number > 0 && SIZE_MAX / number < size) {
815 errno = ENOMEM;
816 return(NULL);
819 nmalloc_sigblockall();
820 ptr = _slaballoc(number * size, SAFLAG_ZERO);
821 if (ptr == NULL)
822 errno = ENOMEM;
823 else
824 UTRACE(0, number * size, ptr);
825 nmalloc_sigunblockall();
827 return(ptr);
831 * realloc() (SLAB ALLOCATOR)
833 * We do not attempt to optimize this routine beyond reusing the same
834 * pointer if the new size fits within the chunking of the old pointer's
835 * zone.
837 void *
838 __realloc(void *ptr, size_t size)
840 void *ret;
842 nmalloc_sigblockall();
843 ret = _slabrealloc(ptr, size);
844 if (ret == NULL)
845 errno = ENOMEM;
846 else
847 UTRACE(ptr, size, ret);
848 nmalloc_sigunblockall();
850 return(ret);
854 * aligned_alloc()
856 * Allocate (size) bytes with a alignment of (alignment).
858 void *
859 __aligned_alloc(size_t alignment, size_t size)
861 void *ptr;
862 int rc;
864 nmalloc_sigblockall();
865 ptr = NULL;
866 rc = _slabmemalign(&ptr, alignment, size);
867 if (rc)
868 errno = rc;
869 nmalloc_sigunblockall();
871 return (ptr);
875 * posix_memalign()
877 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
878 * is a power of 2 >= sizeof(void *).
881 __posix_memalign(void **memptr, size_t alignment, size_t size)
883 int rc;
886 * OpenGroup spec issue 6 check
888 if (alignment < sizeof(void *)) {
889 *memptr = NULL;
890 return(EINVAL);
893 nmalloc_sigblockall();
894 rc = _slabmemalign(memptr, alignment, size);
895 nmalloc_sigunblockall();
897 return (rc);
901 * The slab allocator will allocate on power-of-2 boundaries up to
902 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
903 * zone matching the requirements, and _vmem_alloc() otherwise.
905 static int
906 _slabmemalign(void **memptr, size_t alignment, size_t size)
908 bigalloc_t *bigp;
909 bigalloc_t big;
910 size_t chunking;
911 int zi __unused;
913 if (alignment < 1) {
914 *memptr = NULL;
915 return(EINVAL);
919 * OpenGroup spec issue 6 checks
921 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
922 *memptr = NULL;
923 return(EINVAL);
927 * Our zone mechanism guarantees same-sized alignment for any
928 * power-of-2 allocation. If size is a power-of-2 and reasonable
929 * we can just call _slaballoc() and be done. We round size up
930 * to the nearest alignment boundary to improve our odds of
931 * it becoming a power-of-2 if it wasn't before.
933 if (size <= alignment)
934 size = alignment;
935 else
936 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
939 * If we have overflowed above when rounding to the nearest alignment
940 * boundary, just return ENOMEM, size should be == N * sizeof(void *).
942 * Power-of-2 allocations up to 8KB will be aligned to the allocation
943 * size and _slaballoc() can simply be used. Please see line 1082
944 * for this special case: 'Align the storage in the zone based on
945 * the chunking' has a special case for powers of 2.
947 if (size == 0)
948 return(ENOMEM);
950 if (size <= MAX_SLAB_PAGEALIGN &&
951 (size | (size - 1)) + 1 == (size << 1)) {
952 *memptr = _slaballoc(size, 0);
953 return(*memptr ? 0 : ENOMEM);
957 * Otherwise locate a zone with a chunking that matches
958 * the requested alignment, within reason. Consider two cases:
960 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
961 * we find will be the best fit because the chunking will be
962 * greater or equal to the alignment.
964 * (2) A 513 allocation on a 256-byte alignment. In this case
965 * the first zoneindex we find will be for 576 byte allocations
966 * with a chunking of 64, which is not sufficient. To fix this
967 * we simply find the nearest power-of-2 >= size and use the
968 * same side-effect of _slaballoc() which guarantees
969 * same-alignment on a power-of-2 allocation.
971 if (size < PAGE_SIZE) {
972 zi = zoneindex(&size, &chunking);
973 if (chunking >= alignment) {
974 *memptr = _slaballoc(size, 0);
975 return(*memptr ? 0 : ENOMEM);
977 if (size >= 1024)
978 alignment = 1024;
979 if (size >= 16384)
980 alignment = 16384;
981 while (alignment < size)
982 alignment <<= 1;
983 *memptr = _slaballoc(alignment, 0);
984 return(*memptr ? 0 : ENOMEM);
988 * If the slab allocator cannot handle it use vmem_alloc().
990 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
992 if (alignment < PAGE_SIZE)
993 alignment = PAGE_SIZE;
994 if (size < alignment)
995 size = alignment;
996 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
997 if (alignment == PAGE_SIZE && size <= BIGCACHE_LIMIT) {
998 big = bigcache_find_alloc(size);
999 if (big && big->bytes < size) {
1000 _slabfree(big->base, FASTSLABREALLOC, &big);
1001 big = NULL;
1003 if (big) {
1004 *memptr = big->base;
1005 big->active = size;
1006 if (big->active < big->bytes) {
1007 atomic_add_long(&excess_alloc,
1008 big->bytes - big->active);
1010 bigp = bigalloc_lock(*memptr);
1011 big->next = *bigp;
1012 *bigp = big;
1013 bigalloc_unlock(*memptr);
1014 handle_excess_big();
1015 return(0);
1018 *memptr = _vmem_alloc(size, alignment, 0);
1019 if (*memptr == NULL)
1020 return(ENOMEM);
1022 big = _slaballoc(sizeof(struct bigalloc), 0);
1023 if (big == NULL) {
1024 _vmem_free(*memptr, size);
1025 *memptr = NULL;
1026 return(ENOMEM);
1028 bigp = bigalloc_lock(*memptr);
1029 big->base = *memptr;
1030 big->active = size;
1031 big->bytes = size; /* no excess */
1032 big->next = *bigp;
1033 *bigp = big;
1034 bigalloc_unlock(*memptr);
1036 return(0);
1040 * free() (SLAB ALLOCATOR) - do the obvious
1042 void
1043 __free(void *ptr)
1045 UTRACE(ptr, 0, 0);
1047 nmalloc_sigblockall();
1048 _slabfree(ptr, 0, NULL);
1049 nmalloc_sigunblockall();
1053 * _slaballoc() (SLAB ALLOCATOR)
1055 * Allocate memory via the slab allocator. If the request is too large,
1056 * or if it page-aligned beyond a certain size, we fall back to the
1057 * KMEM subsystem
1059 static void *
1060 _slaballoc(size_t size, int flags)
1062 slzone_t z;
1063 slchunk_t chunk;
1064 slglobaldata_t slgd;
1065 size_t chunking;
1066 thr_mags *tp;
1067 struct magazine *mp;
1068 int count;
1069 int zi;
1070 int off;
1071 void *obj;
1074 * Handle the degenerate size == 0 case. Yes, this does happen.
1075 * Return a special pointer. This is to maintain compatibility with
1076 * the original malloc implementation. Certain devices, such as the
1077 * adaptec driver, not only allocate 0 bytes, they check for NULL and
1078 * also realloc() later on. Joy.
1080 if (size == 0)
1081 size = 1;
1083 /* Capture global flags */
1084 flags |= g_malloc_flags;
1087 * Handle large allocations directly, with a separate bigmem cache.
1089 * The backend allocator is pretty nasty on a SMP system. Use the
1090 * slab allocator for one and two page-sized chunks even though we
1091 * lose some efficiency.
1093 * NOTE: Please see _slabmemalign(), which assumes that power-of-2
1094 * allocations up to an including MAX_SLAB_PAGEALIGN
1095 * can use _slaballoc() and be aligned to the same. The
1096 * zone cache can be used for this case, bigalloc does not
1097 * have to be used.
1099 if (size >= ZoneLimit ||
1100 ((size & PAGE_MASK) == 0 && size > MAX_SLAB_PAGEALIGN)) {
1101 bigalloc_t big;
1102 bigalloc_t *bigp;
1105 * Page-align and cache-color in case of virtually indexed
1106 * physically tagged L1 caches (aka SandyBridge). No sweat
1107 * otherwise, so just do it.
1109 * (don't count as excess).
1111 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1114 * If we have overflowed above when rounding to the page
1115 * boundary, something has passed us (size_t)[-PAGE_MASK..-1]
1116 * so just return NULL, size at this point should be >= 0.
1118 if (size == 0)
1119 return (NULL);
1122 * Force an additional page offset for 8KB-aligned requests
1123 * (i.e. 8KB, 16KB, etc) that helps spread data across the
1124 * CPU caches at the cost of some dead space in the memory
1125 * map.
1127 if ((size & (PAGE_SIZE * 2 - 1)) == 0)
1128 size += PAGE_SIZE;
1131 * Try to reuse a cached big block to avoid mmap'ing. If it
1132 * turns out not to fit our requirements we throw it away
1133 * and allocate normally.
1135 big = NULL;
1136 if (size <= BIGCACHE_LIMIT) {
1137 big = bigcache_find_alloc(size);
1138 if (big && big->bytes < size) {
1139 _slabfree(big->base, FASTSLABREALLOC, &big);
1140 big = NULL;
1143 if (big) {
1144 chunk = big->base;
1145 if (flags & SAFLAG_ZERO)
1146 bzero(chunk, size);
1147 } else {
1148 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
1149 if (chunk == NULL)
1150 return(NULL);
1152 big = _slaballoc(sizeof(struct bigalloc), 0);
1153 if (big == NULL) {
1154 _vmem_free(chunk, size);
1155 return(NULL);
1157 big->base = chunk;
1158 big->bytes = size;
1160 big->active = size;
1162 bigp = bigalloc_lock(chunk);
1163 if (big->active < big->bytes) {
1164 atomic_add_long(&excess_alloc,
1165 big->bytes - big->active);
1167 big->next = *bigp;
1168 *bigp = big;
1169 bigalloc_unlock(chunk);
1170 handle_excess_big();
1172 return(chunk);
1175 /* Compute allocation zone; zoneindex will panic on excessive sizes */
1176 zi = zoneindex(&size, &chunking);
1177 MASSERT(zi < NZONES);
1179 obj = mtmagazine_alloc(zi, flags);
1180 if (obj != NULL) {
1181 if (flags & SAFLAG_ZERO)
1182 bzero(obj, size);
1183 return (obj);
1187 * Attempt to allocate out of an existing global zone. If all zones
1188 * are exhausted pull one off the free list or allocate a new one.
1190 slgd = &SLGlobalData;
1192 again:
1193 if (slgd->ZoneAry[zi] == NULL) {
1194 z = zone_alloc(flags);
1195 if (z == NULL)
1196 goto fail;
1199 * How big is the base structure?
1201 off = sizeof(struct slzone);
1204 * Align the storage in the zone based on the chunking.
1206 * Guarantee power-of-2 alignment for power-of-2-sized
1207 * chunks. Otherwise align based on the chunking size
1208 * (typically 8 or 16 bytes for small allocations).
1210 * NOTE: Allocations >= ZoneLimit are governed by the
1211 * bigalloc code and typically only guarantee page-alignment.
1213 * Set initial conditions for UIndex near the zone header
1214 * to reduce unecessary page faults, vs semi-randomization
1215 * to improve L1 cache saturation.
1217 * NOTE: Please see _slabmemalign(), which assumes that
1218 * power-of-2 allocations up to an including
1219 * MAX_SLAB_PAGEALIGN can use _slaballoc()
1220 * and be aligned to the same. The zone cache can be
1221 * used for this case, bigalloc does not have to be
1222 * used.
1224 * ALL power-of-2 requests that fall through to this
1225 * code use this rule (conditionals above limit this
1226 * to <= MAX_SLAB_PAGEALIGN).
1228 if ((size | (size - 1)) + 1 == (size << 1))
1229 off = roundup2(off, size);
1230 else
1231 off = roundup2(off, chunking);
1232 z->z_Magic = ZALLOC_SLAB_MAGIC;
1233 z->z_ZoneIndex = zi;
1234 z->z_NMax = (ZoneSize - off) / size;
1235 z->z_NFree = z->z_NMax;
1236 z->z_BasePtr = (char *)z + off;
1237 z->z_UIndex = z->z_UEndIndex = 0;
1238 z->z_ChunkSize = size;
1239 z->z_FirstFreePg = ZonePageCount;
1240 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1241 flags &= ~SAFLAG_ZERO; /* already zero'd */
1242 flags |= SAFLAG_PASSIVE;
1246 * Slide the base index for initial allocations out of the
1247 * next zone we create so we do not over-weight the lower
1248 * part of the cpu memory caches.
1250 slgd_lock(slgd);
1251 z->z_Next = slgd->ZoneAry[zi];
1252 slgd->ZoneAry[zi] = z;
1253 } else {
1254 slgd_lock(slgd);
1255 z = slgd->ZoneAry[zi];
1256 if (z == NULL) {
1257 slgd_unlock(slgd);
1258 goto again;
1263 * Ok, we have a zone from which at least one chunk is available.
1265 MASSERT_WTHUNLK(z->z_NFree > 0, slgd_unlock(slgd));
1268 * Try to cache <count> chunks, up to CACHE_CHUNKS (32 typ)
1269 * to avoid unnecessary global lock contention.
1271 tp = &thread_mags;
1272 mp = tp->mags[zi].loaded;
1273 count = 0;
1274 if (mp && tp->init >= 0) {
1275 count = mp->capacity - mp->rounds;
1276 if (count >= z->z_NFree)
1277 count = z->z_NFree - 1;
1278 if (count > CACHE_CHUNKS)
1279 count = CACHE_CHUNKS;
1283 * Locate a chunk in a free page. This attempts to localize
1284 * reallocations into earlier pages without us having to sort
1285 * the chunk list. A chunk may still overlap a page boundary.
1287 while (z->z_FirstFreePg < ZonePageCount) {
1288 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
1289 if (((uintptr_t)chunk & ZoneMask) == 0) {
1290 slgd_unlock(slgd);
1291 _mpanic("assertion: corrupt malloc zone");
1293 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
1294 --z->z_NFree;
1296 if (count == 0)
1297 goto done;
1298 mp->objects[mp->rounds++] = chunk;
1299 --count;
1300 continue;
1302 ++z->z_FirstFreePg;
1306 * No chunks are available but NFree said we had some memory,
1307 * so it must be available in the never-before-used-memory
1308 * area governed by UIndex. The consequences are very
1309 * serious if our zone got corrupted so we use an explicit
1310 * panic rather then a KASSERT.
1312 for (;;) {
1313 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
1314 --z->z_NFree;
1315 if (++z->z_UIndex == z->z_NMax)
1316 z->z_UIndex = 0;
1317 if (z->z_UIndex == z->z_UEndIndex) {
1318 if (z->z_NFree != 0) {
1319 slgd_unlock(slgd);
1320 _mpanic("slaballoc: corrupted zone");
1323 if (count == 0)
1324 break;
1325 mp->objects[mp->rounds++] = chunk;
1326 --count;
1329 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1330 flags &= ~SAFLAG_ZERO;
1331 flags |= SAFLAG_PASSIVE;
1334 done:
1336 * Remove us from the ZoneAry[] when we become empty
1338 if (z->z_NFree == 0) {
1339 slgd->ZoneAry[zi] = z->z_Next;
1340 z->z_Next = NULL;
1342 slgd_unlock(slgd);
1343 if (flags & SAFLAG_ZERO)
1344 bzero(chunk, size);
1346 return(chunk);
1347 fail:
1348 return(NULL);
1352 * Reallocate memory within the chunk
1354 static void *
1355 _slabrealloc(void *ptr, size_t size)
1357 bigalloc_t *bigp;
1358 void *nptr;
1359 slzone_t z;
1360 size_t chunking;
1362 if (ptr == NULL) {
1363 return(_slaballoc(size, 0));
1366 if (size == 0)
1367 size = 1;
1370 * Handle oversized allocations.
1372 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1373 bigalloc_t big;
1374 size_t bigbytes;
1376 while ((big = *bigp) != NULL) {
1377 if (big->base == ptr) {
1378 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1379 bigbytes = big->bytes;
1382 * If it already fits determine if it makes
1383 * sense to shrink/reallocate. Try to optimize
1384 * programs which stupidly make incremental
1385 * reallocations larger or smaller by scaling
1386 * the allocation. Also deal with potential
1387 * coloring.
1389 if (size >= (bigbytes >> 1) &&
1390 size <= bigbytes) {
1391 if (big->active != size) {
1392 atomic_add_long(&excess_alloc,
1393 big->active -
1394 size);
1396 big->active = size;
1397 bigalloc_unlock(ptr);
1398 return(ptr);
1402 * For large reallocations, allocate more space
1403 * than we need to try to avoid excessive
1404 * reallocations later on.
1406 chunking = size + (size >> 3);
1407 chunking = (chunking + PAGE_MASK) &
1408 ~(size_t)PAGE_MASK;
1411 * Try to allocate adjacently in case the
1412 * program is idiotically realloc()ing a
1413 * huge memory block just slightly bigger.
1414 * (llvm's llc tends to do this a lot).
1416 * (MAP_TRYFIXED forces mmap to fail if there
1417 * is already something at the address).
1419 if (chunking > bigbytes) {
1420 char *addr;
1421 int errno_save = errno;
1423 addr = mmap((char *)ptr + bigbytes,
1424 chunking - bigbytes,
1425 PROT_READ|PROT_WRITE,
1426 MAP_PRIVATE|MAP_ANON|
1427 MAP_TRYFIXED,
1428 -1, 0);
1429 errno = errno_save;
1430 if (addr == (char *)ptr + bigbytes) {
1431 atomic_add_long(&excess_alloc,
1432 big->active -
1433 big->bytes +
1434 chunking -
1435 size);
1436 big->bytes = chunking;
1437 big->active = size;
1438 bigalloc_unlock(ptr);
1440 return(ptr);
1442 MASSERT_WTHUNLK(
1443 (void *)addr == MAP_FAILED,
1444 bigalloc_unlock(ptr));
1448 * Failed, unlink big and allocate fresh.
1449 * (note that we have to leave (big) intact
1450 * in case the slaballoc fails).
1452 *bigp = big->next;
1453 bigalloc_unlock(ptr);
1454 if ((nptr = _slaballoc(size, 0)) == NULL) {
1455 /* Relink block */
1456 bigp = bigalloc_lock(ptr);
1457 big->next = *bigp;
1458 *bigp = big;
1459 bigalloc_unlock(ptr);
1460 return(NULL);
1462 if (size > bigbytes)
1463 size = bigbytes;
1464 bcopy(ptr, nptr, size);
1465 atomic_add_long(&excess_alloc, big->active -
1466 big->bytes);
1467 _slabfree(ptr, FASTSLABREALLOC, &big);
1469 return(nptr);
1471 bigp = &big->next;
1473 bigalloc_unlock(ptr);
1474 handle_excess_big();
1478 * Get the original allocation's zone. If the new request winds
1479 * up using the same chunk size we do not have to do anything.
1481 * NOTE: We don't have to lock the globaldata here, the fields we
1482 * access here will not change at least as long as we have control
1483 * over the allocation.
1485 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1486 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1489 * Use zoneindex() to chunk-align the new size, as long as the
1490 * new size is not too large.
1492 if (size < ZoneLimit) {
1493 zoneindex(&size, &chunking);
1494 if (z->z_ChunkSize == size) {
1495 return(ptr);
1500 * Allocate memory for the new request size and copy as appropriate.
1502 if ((nptr = _slaballoc(size, 0)) != NULL) {
1503 if (size > z->z_ChunkSize)
1504 size = z->z_ChunkSize;
1505 bcopy(ptr, nptr, size);
1506 _slabfree(ptr, 0, NULL);
1509 return(nptr);
1513 * free (SLAB ALLOCATOR)
1515 * Free a memory block previously allocated by malloc. Note that we do not
1516 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1517 * checking memory limits in malloc.
1519 * flags:
1520 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1521 * unlinked.
1523 * MPSAFE
1525 static void
1526 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1528 slzone_t z;
1529 slchunk_t chunk;
1530 bigalloc_t big;
1531 bigalloc_t *bigp;
1532 slglobaldata_t slgd;
1533 size_t size;
1534 int zi;
1535 int pgno;
1537 /* Fast realloc path for big allocations */
1538 if (flags & FASTSLABREALLOC) {
1539 big = *rbigp;
1540 goto fastslabrealloc;
1544 * Handle NULL frees and special 0-byte allocations
1546 if (ptr == NULL)
1547 return;
1550 * Handle oversized allocations.
1552 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1553 while ((big = *bigp) != NULL) {
1554 if (big->base == ptr) {
1555 *bigp = big->next;
1556 atomic_add_long(&excess_alloc, big->active -
1557 big->bytes);
1558 bigalloc_unlock(ptr);
1561 * Try to stash the block we are freeing,
1562 * potentially receiving another block in
1563 * return which must be freed.
1565 fastslabrealloc:
1566 if (big->bytes <= BIGCACHE_LIMIT) {
1567 big = bigcache_find_free(big);
1568 if (big == NULL)
1569 return;
1571 ptr = big->base; /* reload */
1572 size = big->bytes;
1573 _slabfree(big, 0, NULL);
1574 _vmem_free(ptr, size);
1575 return;
1577 bigp = &big->next;
1579 bigalloc_unlock(ptr);
1580 handle_excess_big();
1584 * Zone case. Figure out the zone based on the fact that it is
1585 * ZoneSize aligned.
1587 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1588 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1590 size = z->z_ChunkSize;
1591 zi = z->z_ZoneIndex;
1593 if (g_malloc_flags & SAFLAG_ZERO)
1594 bzero(ptr, size);
1596 if (mtmagazine_free(zi, ptr) == 0)
1597 return;
1599 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1600 chunk = ptr;
1603 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1604 * z_FirstFreePg.
1606 slgd = &SLGlobalData;
1607 slgd_lock(slgd);
1609 chunk->c_Next = z->z_PageAry[pgno];
1610 z->z_PageAry[pgno] = chunk;
1611 if (z->z_FirstFreePg > pgno)
1612 z->z_FirstFreePg = pgno;
1615 * Bump the number of free chunks. If it becomes non-zero the zone
1616 * must be added back onto the appropriate list.
1618 if (z->z_NFree++ == 0) {
1619 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1620 slgd->ZoneAry[z->z_ZoneIndex] = z;
1624 * If the zone becomes totally free we get rid of it.
1626 if (z->z_NFree == z->z_NMax) {
1627 slzone_t *pz;
1629 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1630 while (z != *pz)
1631 pz = &(*pz)->z_Next;
1632 *pz = z->z_Next;
1633 z->z_Magic = -1;
1634 z->z_Next = NULL;
1635 slgd_unlock(slgd);
1636 zone_free(z);
1637 } else {
1638 slgd_unlock(slgd);
1643 * Allocate and return a magazine. Return NULL if no magazines are
1644 * available.
1646 static __inline void *
1647 magazine_alloc(struct magazine *mp)
1649 void *obj;
1651 if (mp && MAGAZINE_NOTEMPTY(mp)) {
1652 obj = mp->objects[--mp->rounds];
1653 } else {
1654 obj = NULL;
1656 return (obj);
1659 static __inline int
1660 magazine_free(struct magazine *mp, void *p)
1662 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1663 mp->objects[mp->rounds++] = p;
1664 return 0;
1667 return -1;
1670 static void *
1671 mtmagazine_alloc(int zi, int flags)
1673 thr_mags *tp;
1674 struct magazine *mp, *emptymag;
1675 magazine_depot *d;
1676 void *obj;
1679 * Do not try to access per-thread magazines while the mtmagazine
1680 * is being initialized or destroyed.
1682 tp = &thread_mags;
1683 if (tp->init < 0)
1684 return(NULL);
1687 * Primary per-thread allocation loop
1689 for (;;) {
1691 * Make sure we have a magazine available for use.
1693 if (tp->newmag == NULL && (flags & SAFLAG_MAGS) == 0) {
1694 mp = _slaballoc(sizeof(struct magazine),
1695 SAFLAG_ZERO | SAFLAG_MAGS);
1696 if (mp == NULL) {
1697 obj = NULL;
1698 break;
1700 if (tp->newmag) {
1701 _slabfree(mp, 0, NULL);
1702 } else {
1703 tp->newmag = mp;
1708 * If the loaded magazine has rounds, allocate and return
1710 mp = tp->mags[zi].loaded;
1711 obj = magazine_alloc(mp);
1712 if (obj)
1713 break;
1716 * The prev magazine can only be completely empty or completely
1717 * full. If it is full, swap it with the loaded magazine
1718 * and retry.
1720 mp = tp->mags[zi].prev;
1721 if (mp && MAGAZINE_FULL(mp)) {
1722 MASSERT(mp->rounds != 0);
1723 swap_mags(&tp->mags[zi]); /* prev now empty */
1724 continue;
1728 * If the depot has no loaded magazines ensure that tp->loaded
1729 * is not NULL and return NULL. This will allow _slaballoc()
1730 * to cache referals to SLGlobalData in a magazine.
1732 d = &depots[zi];
1733 if (SLIST_EMPTY(&d->full)) { /* UNLOCKED TEST IS SAFE */
1734 mp = tp->mags[zi].loaded;
1735 if (mp == NULL && tp->newmag) {
1736 mp = tp->newmag;
1737 tp->newmag = NULL;
1738 mp->capacity = zonecapacity(zi);
1739 mp->rounds = 0;
1740 mp->flags = 0;
1741 tp->mags[zi].loaded = mp;
1743 break;
1747 * Cycle: depot(loaded) -> loaded -> prev -> depot(empty)
1749 * If we race and the depot has no full magazines, retry.
1751 depot_lock(d);
1752 mp = SLIST_FIRST(&d->full);
1753 if (mp) {
1754 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1755 emptymag = tp->mags[zi].prev;
1756 if (emptymag) {
1757 SLIST_INSERT_HEAD(&d->empty, emptymag,
1758 nextmagazine);
1760 tp->mags[zi].prev = tp->mags[zi].loaded;
1761 tp->mags[zi].loaded = mp;
1762 MASSERT(MAGAZINE_NOTEMPTY(mp));
1764 depot_unlock(d);
1765 continue;
1768 return (obj);
1771 static int
1772 mtmagazine_free(int zi, void *ptr)
1774 thr_mags *tp;
1775 struct magazine *mp, *loadedmag;
1776 magazine_depot *d;
1777 int rc = -1;
1780 * Do not try to access per-thread magazines while the mtmagazine
1781 * is being initialized or destroyed.
1783 tp = &thread_mags;
1784 if (tp->init < 0)
1785 return(-1);
1788 * Primary per-thread freeing loop
1790 for (;;) {
1792 * Make sure a new magazine is available in case we have
1793 * to use it. Staging the newmag allows us to avoid
1794 * some locking/reentrancy complexity.
1796 * Temporarily disable the per-thread caches for this
1797 * allocation to avoid reentrancy and/or to avoid a
1798 * stack overflow if the [zi] happens to be the same that
1799 * would be used to allocate the new magazine.
1801 * WARNING! Calling _slaballoc() can indirectly modify
1802 * tp->newmag.
1804 if (tp->newmag == NULL) {
1805 mp = _slaballoc(sizeof(struct magazine),
1806 SAFLAG_ZERO | SAFLAG_MAGS);
1807 if (tp->newmag && mp)
1808 _slabfree(mp, 0, NULL);
1809 else
1810 tp->newmag = mp;
1811 if (tp->newmag == NULL) {
1812 rc = -1;
1813 break;
1818 * If the loaded magazine has space, free directly to it
1820 rc = magazine_free(tp->mags[zi].loaded, ptr);
1821 if (rc == 0)
1822 break;
1825 * The prev magazine can only be completely empty or completely
1826 * full. If it is empty, swap it with the loaded magazine
1827 * and retry.
1829 mp = tp->mags[zi].prev;
1830 if (mp && MAGAZINE_EMPTY(mp)) {
1831 MASSERT(mp->rounds == 0);
1832 swap_mags(&tp->mags[zi]); /* prev now full */
1833 continue;
1837 * Try to get an empty magazine from the depot. Cycle
1838 * through depot(empty)->loaded->prev->depot(full).
1839 * Retry if an empty magazine was available from the depot.
1841 d = &depots[zi];
1842 depot_lock(d);
1844 if ((loadedmag = tp->mags[zi].prev) != NULL)
1845 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1846 tp->mags[zi].prev = tp->mags[zi].loaded;
1847 mp = SLIST_FIRST(&d->empty);
1848 if (mp) {
1849 tp->mags[zi].loaded = mp;
1850 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1851 depot_unlock(d);
1852 MASSERT(MAGAZINE_NOTFULL(mp));
1853 } else {
1854 mp = tp->newmag;
1855 tp->newmag = NULL;
1856 mp->capacity = zonecapacity(zi);
1857 mp->rounds = 0;
1858 mp->flags = 0;
1859 tp->mags[zi].loaded = mp;
1860 depot_unlock(d);
1864 return rc;
1867 static void
1868 mtmagazine_init(void)
1870 int error;
1872 error = _pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1873 if (error)
1874 abort();
1878 * This function is only used by the thread exit destructor
1880 static void
1881 mtmagazine_drain(struct magazine *mp)
1883 void *obj;
1885 nmalloc_sigblockall();
1886 while (MAGAZINE_NOTEMPTY(mp)) {
1887 obj = magazine_alloc(mp);
1888 _slabfree(obj, 0, NULL);
1890 nmalloc_sigunblockall();
1894 * mtmagazine_destructor()
1896 * When a thread exits, we reclaim all its resources; all its magazines are
1897 * drained and the structures are freed.
1899 * WARNING! The destructor can be called multiple times if the larger user
1900 * program has its own destructors which run after ours which
1901 * allocate or free memory.
1903 static void
1904 mtmagazine_destructor(void *thrp)
1906 thr_mags *tp = thrp;
1907 struct magazine *mp;
1908 int i;
1910 if (__isexiting)
1911 return;
1914 * Prevent further use of mtmagazines while we are destructing
1915 * them, as well as for any destructors which are run after us
1916 * prior to the thread actually being destroyed.
1918 tp->init = -1;
1920 nmalloc_sigblockall();
1921 for (i = 0; i < NZONES; i++) {
1922 mp = tp->mags[i].loaded;
1923 tp->mags[i].loaded = NULL;
1924 if (mp) {
1925 if (MAGAZINE_NOTEMPTY(mp))
1926 mtmagazine_drain(mp);
1927 _slabfree(mp, 0, NULL);
1930 mp = tp->mags[i].prev;
1931 tp->mags[i].prev = NULL;
1932 if (mp) {
1933 if (MAGAZINE_NOTEMPTY(mp))
1934 mtmagazine_drain(mp);
1935 _slabfree(mp, 0, NULL);
1938 if (tp->newmag) {
1939 mp = tp->newmag;
1940 tp->newmag = NULL;
1941 _slabfree(mp, 0, NULL);
1943 nmalloc_sigunblockall();
1947 * zone_alloc()
1949 * Attempt to allocate a zone from the zone magazine.
1951 static slzone_t
1952 zone_alloc(int flags)
1954 slzone_t z;
1956 zone_magazine_lock();
1958 z = magazine_alloc(&zone_magazine);
1959 if (z == NULL) {
1960 zone_magazine_unlock();
1961 z = _vmem_alloc(ZoneSize, ZoneSize, flags);
1962 } else {
1963 z->z_Flags |= SLZF_UNOTZEROD;
1964 zone_magazine_unlock();
1966 return z;
1970 * Free a zone.
1972 static void
1973 zone_free(void *z)
1975 void *excess[M_ZONE_HYSTERESIS];
1976 int i;
1978 zone_magazine_lock();
1980 bzero(z, sizeof(struct slzone));
1982 if (opt_madvise)
1983 madvise(z, ZoneSize, MADV_FREE);
1985 i = magazine_free(&zone_magazine, z);
1988 * If we failed to free, collect excess magazines; release the zone
1989 * magazine lock, and then free to the system via _vmem_free. Re-enable
1990 * BURST mode for the magazine.
1992 if (i == -1) {
1993 for (i = 0; i < M_ZONE_HYSTERESIS; ++i) {
1994 excess[i] = magazine_alloc(&zone_magazine);
1995 MASSERT_WTHUNLK(excess[i] != NULL,
1996 zone_magazine_unlock());
1998 zone_magazine_unlock();
2000 for (i = 0; i < M_ZONE_HYSTERESIS; ++i)
2001 _vmem_free(excess[i], ZoneSize);
2002 _vmem_free(z, ZoneSize);
2003 } else {
2004 zone_magazine_unlock();
2009 * _vmem_alloc()
2011 * Directly map memory in PAGE_SIZE'd chunks with the specified
2012 * alignment.
2014 * Alignment must be a multiple of PAGE_SIZE.
2016 * Size must be >= alignment.
2018 static void *
2019 _vmem_alloc(size_t size, size_t align, int flags)
2021 static char *addr_hint;
2022 static int reset_hint = 16;
2023 char *addr;
2024 char *save;
2026 if (--reset_hint <= 0) {
2027 addr_hint = NULL;
2028 reset_hint = 16;
2032 * Map anonymous private memory.
2034 save = mmap(addr_hint, size, PROT_READ|PROT_WRITE,
2035 MAP_PRIVATE|MAP_ANON, -1, 0);
2036 if (save == MAP_FAILED)
2037 goto worst_case;
2038 if (((uintptr_t)save & (align - 1)) == 0)
2039 return((void *)save);
2041 addr_hint = (char *)(((size_t)save + (align - 1)) & ~(align - 1));
2042 munmap(save, size);
2044 save = mmap(addr_hint, size, PROT_READ|PROT_WRITE,
2045 MAP_PRIVATE|MAP_ANON, -1, 0);
2046 if (save == MAP_FAILED)
2047 goto worst_case;
2048 if (((size_t)save & (align - 1)) == 0)
2049 return((void *)save);
2050 munmap(save, size);
2052 worst_case:
2053 save = mmap(NULL, size + align, PROT_READ|PROT_WRITE,
2054 MAP_PRIVATE|MAP_ANON, -1, 0);
2055 if (save == MAP_FAILED)
2056 return NULL;
2058 addr = (char *)(((size_t)save + (align - 1)) & ~(align - 1));
2059 if (save != addr)
2060 munmap(save, addr - save);
2061 if (addr + size != save + size + align)
2062 munmap(addr + size, save + align - addr);
2064 addr_hint = addr + size;
2066 return ((void *)addr);
2070 * _vmem_free()
2072 * Free a chunk of memory allocated with _vmem_alloc()
2074 static void
2075 _vmem_free(void *ptr, size_t size)
2077 munmap(ptr, size);
2081 * Panic on fatal conditions
2083 static void
2084 _mpanic(const char *ctl, ...)
2086 va_list va;
2088 if (malloc_panic == 0) {
2089 malloc_panic = 1;
2090 va_start(va, ctl);
2091 vfprintf(stderr, ctl, va);
2092 fprintf(stderr, "\n");
2093 fflush(stderr);
2094 va_end(va);
2096 abort();
2099 __weak_reference(__aligned_alloc, aligned_alloc);
2100 __weak_reference(__malloc, malloc);
2101 __weak_reference(__calloc, calloc);
2102 __weak_reference(__posix_memalign, posix_memalign);
2103 __weak_reference(__realloc, realloc);
2104 __weak_reference(__free, free);