boot/efi: Sync our TianoCore EDK II headers with the edk2-stable202002 tag.
[dragonfly.git] / sys / kern / kern_slaballoc.c
blobc01119122a54a5a9fbc412eea9f187eed77b62cb
1 /*
2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
4 * Copyright (c) 2003,2004,2010-2019 The DragonFly Project.
5 * All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@backplane.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * This module implements a slab allocator drop-in replacement for the
38 * kernel malloc().
40 * A slab allocator reserves a ZONE for each chunk size, then lays the
41 * chunks out in an array within the zone. Allocation and deallocation
42 * is nearly instantanious, and fragmentation/overhead losses are limited
43 * to a fixed worst-case amount.
45 * The downside of this slab implementation is in the chunk size
46 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
47 * In a kernel implementation all this memory will be physical so
48 * the zone size is adjusted downward on machines with less physical
49 * memory. The upside is that overhead is bounded... this is the *worst*
50 * case overhead.
52 * Slab management is done on a per-cpu basis and no locking or mutexes
53 * are required, only a critical section. When one cpu frees memory
54 * belonging to another cpu's slab manager an asynchronous IPI message
55 * will be queued to execute the operation. In addition, both the
56 * high level slab allocator and the low level zone allocator optimize
57 * M_ZERO requests, and the slab allocator does not have to pre initialize
58 * the linked list of chunks.
60 * XXX Balancing is needed between cpus. Balance will be handled through
61 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
64 * the new zone should be restricted to M_USE_RESERVE requests only.
66 * Alloc Size Chunking Number of zones
67 * 0-127 8 16
68 * 128-255 16 8
69 * 256-511 32 8
70 * 512-1023 64 8
71 * 1024-2047 128 8
72 * 2048-4095 256 8
73 * 4096-8191 512 8
74 * 8192-16383 1024 8
75 * 16384-32767 2048 8
76 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 * Allocations >= ZoneLimit go directly to kmem.
79 * (n * PAGE_SIZE, n > 2) allocations go directly to kmem.
81 * Alignment properties:
82 * - All power-of-2 sized allocations are power-of-2 aligned.
83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
84 * power-of-2 round up of 'size'.
85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
86 * above table 'Chunking' column).
88 * API REQUIREMENTS AND SIDE EFFECTS
90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
91 * have remained compatible with the following API requirements:
93 * + malloc(0) is allowed and returns non-NULL (ahc driver)
94 * + ability to allocate arbitrarily large chunks of memory
97 #include "opt_vm.h"
99 #include <sys/param.h>
100 #include <sys/systm.h>
101 #include <sys/kernel.h>
102 #include <sys/slaballoc.h>
103 #include <sys/mbuf.h>
104 #include <sys/vmmeter.h>
105 #include <sys/lock.h>
106 #include <sys/thread.h>
107 #include <sys/globaldata.h>
108 #include <sys/sysctl.h>
109 #include <sys/ktr.h>
110 #include <sys/malloc.h>
112 #include <vm/vm.h>
113 #include <vm/vm_param.h>
114 #include <vm/vm_kern.h>
115 #include <vm/vm_extern.h>
116 #include <vm/vm_object.h>
117 #include <vm/pmap.h>
118 #include <vm/vm_map.h>
119 #include <vm/vm_page.h>
120 #include <vm/vm_pageout.h>
122 #include <machine/cpu.h>
124 #include <sys/thread2.h>
125 #include <vm/vm_page2.h>
127 #if (__VM_CACHELINE_SIZE == 32)
128 #define CAN_CACHEALIGN(sz) ((sz) >= 256)
129 #elif (__VM_CACHELINE_SIZE == 64)
130 #define CAN_CACHEALIGN(sz) ((sz) >= 512)
131 #elif (__VM_CACHELINE_SIZE == 128)
132 #define CAN_CACHEALIGN(sz) ((sz) >= 1024)
133 #else
134 #error "unsupported cacheline size"
135 #endif
137 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
139 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
140 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
142 #if !defined(KTR_MEMORY)
143 #define KTR_MEMORY KTR_ALL
144 #endif
145 KTR_INFO_MASTER(memory);
146 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
147 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
148 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
149 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
150 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
151 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
152 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
153 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
154 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
155 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
156 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
158 #define logmemory(name, ptr, type, size, flags) \
159 KTR_LOG(memory_ ## name, ptr, type, size, flags)
160 #define logmemory_quick(name) \
161 KTR_LOG(memory_ ## name)
164 * Fixed globals (not per-cpu)
166 __read_frequently static int ZoneSize;
167 __read_frequently static int ZoneLimit;
168 __read_frequently static int ZonePageCount;
169 __read_frequently static uintptr_t ZoneMask;
170 __read_frequently struct malloc_type *kmemstatistics; /* exported to vmstat */
172 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
173 static void kmem_slab_free(void *ptr, vm_size_t bytes);
175 #if defined(INVARIANTS)
176 static void chunk_mark_allocated(SLZone *z, void *chunk);
177 static void chunk_mark_free(SLZone *z, void *chunk);
178 #else
179 #define chunk_mark_allocated(z, chunk)
180 #define chunk_mark_free(z, chunk)
181 #endif
184 * Misc constants. Note that allocations that are exact multiples of
185 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
187 #define ZONE_RELS_THRESH 32 /* threshold number of zones */
189 #ifdef INVARIANTS
191 * The WEIRD_ADDR is used as known text to copy into free objects to
192 * try to create deterministic failure cases if the data is accessed after
193 * free.
195 #define WEIRD_ADDR 0xdeadc0de
196 #endif
197 #define ZERO_LENGTH_PTR ((void *)-8)
200 * Misc global malloc buckets
203 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
204 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
205 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
206 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations");
208 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
209 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
212 * Initialize the slab memory allocator. We have to choose a zone size based
213 * on available physical memory. We choose a zone side which is approximately
214 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
215 * 128K. The zone size is limited to the bounds set in slaballoc.h
216 * (typically 32K min, 128K max).
218 static void kmeminit(void *dummy);
220 char *ZeroPage;
222 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL);
224 #ifdef INVARIANTS
226 * If enabled any memory allocated without M_ZERO is initialized to -1.
228 __read_frequently static int use_malloc_pattern;
229 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
230 &use_malloc_pattern, 0,
231 "Initialize memory to -1 if M_ZERO not specified");
233 __read_frequently static int32_t weirdary[16];
234 __read_frequently static int use_weird_array;
235 SYSCTL_INT(_debug, OID_AUTO, use_weird_array, CTLFLAG_RW,
236 &use_weird_array, 0,
237 "Initialize memory to weird values on kfree()");
238 #endif
240 __read_frequently static int ZoneRelsThresh = ZONE_RELS_THRESH;
241 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
243 static struct spinlock kmemstat_spin =
244 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit");
247 * Returns the kernel memory size limit for the purposes of initializing
248 * various subsystem caches. The smaller of available memory and the KVM
249 * memory space is returned.
251 * The size in megabytes is returned.
253 size_t
254 kmem_lim_size(void)
256 size_t limsize;
258 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
259 if (limsize > KvaSize)
260 limsize = KvaSize;
261 return (limsize / (1024 * 1024));
264 static void
265 kmeminit(void *dummy)
267 size_t limsize;
268 int usesize;
269 #ifdef INVARIANTS
270 int i;
271 #endif
273 limsize = kmem_lim_size();
274 usesize = (int)(limsize * 1024); /* convert to KB */
277 * If the machine has a large KVM space and more than 8G of ram,
278 * double the zone release threshold to reduce SMP invalidations.
279 * If more than 16G of ram, do it again.
281 * The BIOS eats a little ram so add some slop. We want 8G worth of
282 * memory sticks to trigger the first adjustment.
284 if (ZoneRelsThresh == ZONE_RELS_THRESH) {
285 if (limsize >= 7 * 1024)
286 ZoneRelsThresh *= 2;
287 if (limsize >= 15 * 1024)
288 ZoneRelsThresh *= 2;
289 if (limsize >= 31 * 1024)
290 ZoneRelsThresh *= 2;
291 if (limsize >= 63 * 1024)
292 ZoneRelsThresh *= 2;
293 if (limsize >= 127 * 1024)
294 ZoneRelsThresh *= 2;
298 * Calculate the zone size. This typically calculates to
299 * ZALLOC_MAX_ZONE_SIZE
301 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
302 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
303 ZoneSize <<= 1;
304 ZoneLimit = ZoneSize / 4;
305 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
306 ZoneLimit = ZALLOC_ZONE_LIMIT;
307 ZoneMask = ~(uintptr_t)(ZoneSize - 1);
308 ZonePageCount = ZoneSize / PAGE_SIZE;
310 #ifdef INVARIANTS
311 for (i = 0; i < NELEM(weirdary); ++i)
312 weirdary[i] = WEIRD_ADDR;
313 #endif
315 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
317 if (bootverbose)
318 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
322 * (low level) Initialize slab-related elements in the globaldata structure.
324 * Occurs after kmeminit().
326 void
327 slab_gdinit(globaldata_t gd)
329 SLGlobalData *slgd;
330 int i;
332 slgd = &gd->gd_slab;
333 for (i = 0; i < NZONES; ++i)
334 TAILQ_INIT(&slgd->ZoneAry[i]);
335 TAILQ_INIT(&slgd->FreeZones);
336 TAILQ_INIT(&slgd->FreeOvZones);
340 * Initialize a malloc type tracking structure.
342 void
343 malloc_init(void *data)
345 struct malloc_type *type = data;
346 struct kmalloc_use *use;
347 size_t limsize;
349 if (type->ks_magic != M_MAGIC)
350 panic("malloc type lacks magic");
352 if (type->ks_limit != 0)
353 return;
355 if (vmstats.v_page_count == 0)
356 panic("malloc_init not allowed before vm init");
358 limsize = kmem_lim_size() * (1024 * 1024);
359 type->ks_limit = limsize / 10;
361 if (ncpus == 1)
362 use = &type->ks_use0;
363 else
364 use = kmalloc(ncpus * sizeof(*use), M_TEMP, M_WAITOK | M_ZERO);
366 spin_lock(&kmemstat_spin);
367 type->ks_next = kmemstatistics;
368 type->ks_use = use;
369 kmemstatistics = type;
370 spin_unlock(&kmemstat_spin);
373 void
374 malloc_uninit(void *data)
376 struct malloc_type *type = data;
377 struct malloc_type *t;
378 #ifdef INVARIANTS
379 int i;
380 long ttl;
381 #endif
383 if (type->ks_magic != M_MAGIC)
384 panic("malloc type lacks magic");
386 if (vmstats.v_page_count == 0)
387 panic("malloc_uninit not allowed before vm init");
389 if (type->ks_limit == 0)
390 panic("malloc_uninit on uninitialized type");
392 /* Make sure that all pending kfree()s are finished. */
393 lwkt_synchronize_ipiqs("muninit");
395 #ifdef INVARIANTS
397 * memuse is only correct in aggregation. Due to memory being allocated
398 * on one cpu and freed on another individual array entries may be
399 * negative or positive (canceling each other out).
401 for (i = ttl = 0; i < ncpus; ++i)
402 ttl += type->ks_use[i].memuse;
403 if (ttl) {
404 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
405 ttl, type->ks_shortdesc, i);
407 #endif
408 spin_lock(&kmemstat_spin);
409 if (type == kmemstatistics) {
410 kmemstatistics = type->ks_next;
411 } else {
412 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
413 if (t->ks_next == type) {
414 t->ks_next = type->ks_next;
415 break;
419 type->ks_next = NULL;
420 type->ks_limit = 0;
421 spin_unlock(&kmemstat_spin);
423 if (type->ks_use != &type->ks_use0) {
424 kfree(type->ks_use, M_TEMP);
425 type->ks_use = NULL;
430 * Reinitialize all installed malloc regions after ncpus has been
431 * determined. type->ks_use0 is initially set to &type->ks_use0,
432 * this function will dynamically allocate it as appropriate for ncpus.
434 void
435 malloc_reinit_ncpus(void)
437 struct malloc_type *t;
438 struct kmalloc_use *use;
441 * If only one cpu we can leave ks_use set to ks_use0
443 if (ncpus <= 1)
444 return;
447 * Expand ks_use for all kmalloc blocks
449 for (t = kmemstatistics; t; t = t->ks_next) {
450 KKASSERT(t->ks_use == &t->ks_use0);
451 t->ks_use = kmalloc(sizeof(*use) * ncpus, M_TEMP, M_WAITOK|M_ZERO);
452 t->ks_use[0] = t->ks_use0;
457 * Increase the kmalloc pool limit for the specified pool. No changes
458 * are the made if the pool would shrink.
460 void
461 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
463 KKASSERT(type->ks_limit != 0);
464 if (bytes == 0)
465 bytes = KvaSize;
466 if (type->ks_limit < bytes)
467 type->ks_limit = bytes;
470 void
471 kmalloc_set_unlimited(struct malloc_type *type)
473 type->ks_limit = kmem_lim_size() * (1024 * 1024);
477 * Dynamically create a malloc pool. This function is a NOP if *typep is
478 * already non-NULL.
480 void
481 kmalloc_create(struct malloc_type **typep, const char *descr)
483 struct malloc_type *type;
485 if (*typep == NULL) {
486 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
487 type->ks_magic = M_MAGIC;
488 type->ks_shortdesc = descr;
489 malloc_init(type);
490 *typep = type;
495 * Destroy a dynamically created malloc pool. This function is a NOP if
496 * the pool has already been destroyed.
498 void
499 kmalloc_destroy(struct malloc_type **typep)
501 if (*typep != NULL) {
502 malloc_uninit(*typep);
503 kfree(*typep, M_TEMP);
504 *typep = NULL;
509 * Calculate the zone index for the allocation request size and set the
510 * allocation request size to that particular zone's chunk size.
512 static __inline int
513 zoneindex(unsigned long *bytes, unsigned long *align)
515 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
517 if (n < 128) {
518 *bytes = n = (n + 7) & ~7;
519 *align = 8;
520 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
522 if (n < 256) {
523 *bytes = n = (n + 15) & ~15;
524 *align = 16;
525 return(n / 16 + 7);
527 if (n < 8192) {
528 if (n < 512) {
529 *bytes = n = (n + 31) & ~31;
530 *align = 32;
531 return(n / 32 + 15);
533 if (n < 1024) {
534 *bytes = n = (n + 63) & ~63;
535 *align = 64;
536 return(n / 64 + 23);
538 if (n < 2048) {
539 *bytes = n = (n + 127) & ~127;
540 *align = 128;
541 return(n / 128 + 31);
543 if (n < 4096) {
544 *bytes = n = (n + 255) & ~255;
545 *align = 256;
546 return(n / 256 + 39);
548 *bytes = n = (n + 511) & ~511;
549 *align = 512;
550 return(n / 512 + 47);
552 #if ZALLOC_ZONE_LIMIT > 8192
553 if (n < 16384) {
554 *bytes = n = (n + 1023) & ~1023;
555 *align = 1024;
556 return(n / 1024 + 55);
558 #endif
559 #if ZALLOC_ZONE_LIMIT > 16384
560 if (n < 32768) {
561 *bytes = n = (n + 2047) & ~2047;
562 *align = 2048;
563 return(n / 2048 + 63);
565 #endif
566 panic("Unexpected byte count %d", n);
567 return(0);
570 static __inline void
571 clean_zone_rchunks(SLZone *z)
573 SLChunk *bchunk;
575 while ((bchunk = z->z_RChunks) != NULL) {
576 cpu_ccfence();
577 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
578 *z->z_LChunksp = bchunk;
579 while (bchunk) {
580 chunk_mark_free(z, bchunk);
581 z->z_LChunksp = &bchunk->c_Next;
582 bchunk = bchunk->c_Next;
583 ++z->z_NFree;
585 break;
587 /* retry */
592 * If the zone becomes totally free and is not the only zone listed for a
593 * chunk size we move it to the FreeZones list. We always leave at least
594 * one zone per chunk size listed, even if it is freeable.
596 * Do not move the zone if there is an IPI in_flight (z_RCount != 0),
597 * otherwise MP races can result in our free_remote code accessing a
598 * destroyed zone. The remote end interlocks z_RCount with z_RChunks
599 * so one has to test both z_NFree and z_RCount.
601 * Since this code can be called from an IPI callback, do *NOT* try to mess
602 * with kernel_map here. Hysteresis will be performed at kmalloc() time.
604 static __inline SLZone *
605 check_zone_free(SLGlobalData *slgd, SLZone *z)
607 SLZone *znext;
609 znext = TAILQ_NEXT(z, z_Entry);
610 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 &&
611 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) {
612 int *kup;
614 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
616 z->z_Magic = -1;
617 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
618 ++slgd->NFreeZones;
619 kup = btokup(z);
620 *kup = 0;
622 return znext;
625 #ifdef SLAB_DEBUG
627 * Used to debug memory corruption issues. Record up to (typically 32)
628 * allocation sources for this zone (for a particular chunk size).
631 static void
632 slab_record_source(SLZone *z, const char *file, int line)
634 int i;
635 int b = line & (SLAB_DEBUG_ENTRIES - 1);
637 i = b;
638 do {
639 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
640 return;
641 if (z->z_Sources[i].file == NULL)
642 break;
643 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
644 } while (i != b);
645 z->z_Sources[i].file = file;
646 z->z_Sources[i].line = line;
649 #endif
651 static __inline unsigned long
652 powerof2_size(unsigned long size)
654 int i;
656 if (size == 0 || powerof2(size))
657 return size;
659 i = flsl(size);
660 return (1UL << i);
664 * kmalloc() (SLAB ALLOCATOR)
666 * Allocate memory via the slab allocator. If the request is too large,
667 * or if it page-aligned beyond a certain size, we fall back to the
668 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
669 * &SlabMisc if you don't care.
671 * M_RNOWAIT - don't block.
672 * M_NULLOK - return NULL instead of blocking.
673 * M_ZERO - zero the returned memory.
674 * M_USE_RESERVE - allow greater drawdown of the free list
675 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
676 * M_POWEROF2 - roundup size to the nearest power of 2
678 * MPSAFE
681 /* don't let kmalloc macro mess up function declaration */
682 #undef kmalloc
684 #ifdef SLAB_DEBUG
685 void *
686 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
687 const char *file, int line)
688 #else
689 void *
690 kmalloc(unsigned long size, struct malloc_type *type, int flags)
691 #endif
693 SLZone *z;
694 SLChunk *chunk;
695 SLGlobalData *slgd;
696 struct globaldata *gd;
697 unsigned long align;
698 int zi;
699 #ifdef INVARIANTS
700 int i;
701 #endif
703 logmemory_quick(malloc_beg);
704 gd = mycpu;
705 slgd = &gd->gd_slab;
708 * XXX silly to have this in the critical path.
710 KKASSERT(type->ks_limit != 0);
711 ++type->ks_use[gd->gd_cpuid].calls;
714 * Flagged for cache-alignment
716 if (flags & M_CACHEALIGN) {
717 if (size < __VM_CACHELINE_SIZE)
718 size = __VM_CACHELINE_SIZE;
719 else if (!CAN_CACHEALIGN(size))
720 flags |= M_POWEROF2;
724 * Flagged to force nearest power-of-2 (higher or same)
726 if (flags & M_POWEROF2)
727 size = powerof2_size(size);
730 * Handle the case where the limit is reached. Panic if we can't return
731 * NULL. The original malloc code looped, but this tended to
732 * simply deadlock the computer.
734 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
735 * to determine if a more complete limit check should be done. The
736 * actual memory use is tracked via ks_use[cpu].memuse.
738 while (type->ks_loosememuse >= type->ks_limit) {
739 int i;
740 long ttl;
742 for (i = ttl = 0; i < ncpus; ++i)
743 ttl += type->ks_use[i].memuse;
744 type->ks_loosememuse = ttl; /* not MP synchronized */
745 if ((ssize_t)ttl < 0) /* deal with occassional race */
746 ttl = 0;
747 if (ttl >= type->ks_limit) {
748 if (flags & M_NULLOK) {
749 logmemory(malloc_end, NULL, type, size, flags);
750 return(NULL);
752 panic("%s: malloc limit exceeded", type->ks_shortdesc);
757 * Handle the degenerate size == 0 case. Yes, this does happen.
758 * Return a special pointer. This is to maintain compatibility with
759 * the original malloc implementation. Certain devices, such as the
760 * adaptec driver, not only allocate 0 bytes, they check for NULL and
761 * also realloc() later on. Joy.
763 if (size == 0) {
764 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
765 return(ZERO_LENGTH_PTR);
769 * Handle hysteresis from prior frees here in malloc(). We cannot
770 * safely manipulate the kernel_map in free() due to free() possibly
771 * being called via an IPI message or from sensitive interrupt code.
773 * NOTE: ku_pagecnt must be cleared before we free the slab or we
774 * might race another cpu allocating the kva and setting
775 * ku_pagecnt.
777 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
778 crit_enter();
779 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */
780 int *kup;
782 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList);
783 KKASSERT(z != NULL);
784 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
785 --slgd->NFreeZones;
786 kup = btokup(z);
787 *kup = 0;
788 kmem_slab_free(z, ZoneSize); /* may block */
790 crit_exit();
794 * XXX handle oversized frees that were queued from kfree().
796 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) {
797 crit_enter();
798 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) {
799 vm_size_t tsize;
801 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
802 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry);
803 tsize = z->z_ChunkSize;
804 kmem_slab_free(z, tsize); /* may block */
806 crit_exit();
810 * Handle large allocations directly. There should not be very many of
811 * these so performance is not a big issue.
813 * The backend allocator is pretty nasty on a SMP system. Use the
814 * slab allocator for one and two page-sized chunks even though we lose
815 * some efficiency. XXX maybe fix mmio and the elf loader instead.
817 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
818 int *kup;
820 size = round_page(size);
821 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
822 if (chunk == NULL) {
823 logmemory(malloc_end, NULL, type, size, flags);
824 return(NULL);
826 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
827 flags |= M_PASSIVE_ZERO;
828 kup = btokup(chunk);
829 *kup = size / PAGE_SIZE;
830 crit_enter();
831 goto done;
835 * Attempt to allocate out of an existing zone. First try the free list,
836 * then allocate out of unallocated space. If we find a good zone move
837 * it to the head of the list so later allocations find it quickly
838 * (we might have thousands of zones in the list).
840 * Note: zoneindex() will panic of size is too large.
842 zi = zoneindex(&size, &align);
843 KKASSERT(zi < NZONES);
844 crit_enter();
846 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) {
848 * Locate a chunk - we have to have at least one. If this is the
849 * last chunk go ahead and do the work to retrieve chunks freed
850 * from remote cpus, and if the zone is still empty move it off
851 * the ZoneAry.
853 if (--z->z_NFree <= 0) {
854 KKASSERT(z->z_NFree == 0);
857 * WARNING! This code competes with other cpus. It is ok
858 * for us to not drain RChunks here but we might as well, and
859 * it is ok if more accumulate after we're done.
861 * Set RSignal before pulling rchunks off, indicating that we
862 * will be moving ourselves off of the ZoneAry. Remote ends will
863 * read RSignal before putting rchunks on thus interlocking
864 * their IPI signaling.
866 if (z->z_RChunks == NULL)
867 atomic_swap_int(&z->z_RSignal, 1);
869 clean_zone_rchunks(z);
872 * Remove from the zone list if no free chunks remain.
873 * Clear RSignal
875 if (z->z_NFree == 0) {
876 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry);
877 } else {
878 z->z_RSignal = 0;
883 * Fast path, we have chunks available in z_LChunks.
885 chunk = z->z_LChunks;
886 if (chunk) {
887 chunk_mark_allocated(z, chunk);
888 z->z_LChunks = chunk->c_Next;
889 if (z->z_LChunks == NULL)
890 z->z_LChunksp = &z->z_LChunks;
891 #ifdef SLAB_DEBUG
892 slab_record_source(z, file, line);
893 #endif
894 goto done;
898 * No chunks are available in LChunks, the free chunk MUST be
899 * in the never-before-used memory area, controlled by UIndex.
901 * The consequences are very serious if our zone got corrupted so
902 * we use an explicit panic rather than a KASSERT.
904 if (z->z_UIndex + 1 != z->z_NMax)
905 ++z->z_UIndex;
906 else
907 z->z_UIndex = 0;
909 if (z->z_UIndex == z->z_UEndIndex)
910 panic("slaballoc: corrupted zone");
912 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
913 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
914 flags &= ~M_ZERO;
915 flags |= M_PASSIVE_ZERO;
917 chunk_mark_allocated(z, chunk);
918 #ifdef SLAB_DEBUG
919 slab_record_source(z, file, line);
920 #endif
921 goto done;
925 * If all zones are exhausted we need to allocate a new zone for this
926 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
927 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
928 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
929 * we do not pre-zero it because we do not want to mess up the L1 cache.
931 * At least one subsystem, the tty code (see CROUND) expects power-of-2
932 * allocations to be power-of-2 aligned. We maintain compatibility by
933 * adjusting the base offset below.
936 int off;
937 int *kup;
939 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) {
940 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
941 --slgd->NFreeZones;
942 bzero(z, sizeof(SLZone));
943 z->z_Flags |= SLZF_UNOTZEROD;
944 } else {
945 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
946 if (z == NULL)
947 goto fail;
951 * How big is the base structure?
953 #if defined(INVARIANTS)
955 * Make room for z_Bitmap. An exact calculation is somewhat more
956 * complicated so don't make an exact calculation.
958 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
959 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
960 #else
961 off = sizeof(SLZone);
962 #endif
965 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
966 * Otherwise properly align the data according to the chunk size.
968 if (powerof2(size))
969 align = size;
970 off = roundup2(off, align);
972 z->z_Magic = ZALLOC_SLAB_MAGIC;
973 z->z_ZoneIndex = zi;
974 z->z_NMax = (ZoneSize - off) / size;
975 z->z_NFree = z->z_NMax - 1;
976 z->z_BasePtr = (char *)z + off;
977 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
978 z->z_ChunkSize = size;
979 z->z_CpuGd = gd;
980 z->z_Cpu = gd->gd_cpuid;
981 z->z_LChunksp = &z->z_LChunks;
982 #ifdef SLAB_DEBUG
983 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
984 bzero(z->z_Sources, sizeof(z->z_Sources));
985 #endif
986 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
987 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry);
988 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
989 flags &= ~M_ZERO; /* already zero'd */
990 flags |= M_PASSIVE_ZERO;
992 kup = btokup(z);
993 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */
994 chunk_mark_allocated(z, chunk);
995 #ifdef SLAB_DEBUG
996 slab_record_source(z, file, line);
997 #endif
1000 * Slide the base index for initial allocations out of the next
1001 * zone we create so we do not over-weight the lower part of the
1002 * cpu memory caches.
1004 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
1005 & (ZALLOC_MAX_ZONE_SIZE - 1);
1008 done:
1009 ++type->ks_use[gd->gd_cpuid].inuse;
1010 type->ks_use[gd->gd_cpuid].memuse += size;
1011 type->ks_use[gd->gd_cpuid].loosememuse += size;
1012 if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) {
1013 /* not MP synchronized */
1014 type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse;
1015 type->ks_use[gd->gd_cpuid].loosememuse = 0;
1017 crit_exit();
1019 if (flags & M_ZERO)
1020 bzero(chunk, size);
1021 #ifdef INVARIANTS
1022 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
1023 if (use_malloc_pattern) {
1024 for (i = 0; i < size; i += sizeof(int)) {
1025 *(int *)((char *)chunk + i) = -1;
1028 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
1030 #endif
1031 logmemory(malloc_end, chunk, type, size, flags);
1032 return(chunk);
1033 fail:
1034 crit_exit();
1035 logmemory(malloc_end, NULL, type, size, flags);
1036 return(NULL);
1040 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
1042 * Generally speaking this routine is not called very often and we do
1043 * not attempt to optimize it beyond reusing the same pointer if the
1044 * new size fits within the chunking of the old pointer's zone.
1046 #ifdef SLAB_DEBUG
1047 void *
1048 krealloc_debug(void *ptr, unsigned long size,
1049 struct malloc_type *type, int flags,
1050 const char *file, int line)
1051 #else
1052 void *
1053 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
1054 #endif
1056 unsigned long osize;
1057 unsigned long align;
1058 SLZone *z;
1059 void *nptr;
1060 int *kup;
1062 KKASSERT((flags & M_ZERO) == 0); /* not supported */
1064 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1065 return(kmalloc_debug(size, type, flags, file, line));
1066 if (size == 0) {
1067 kfree(ptr, type);
1068 return(NULL);
1072 * Handle oversized allocations. XXX we really should require that a
1073 * size be passed to free() instead of this nonsense.
1075 kup = btokup(ptr);
1076 if (*kup > 0) {
1077 osize = *kup << PAGE_SHIFT;
1078 if (osize == round_page(size))
1079 return(ptr);
1080 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1081 return(NULL);
1082 bcopy(ptr, nptr, min(size, osize));
1083 kfree(ptr, type);
1084 return(nptr);
1088 * Get the original allocation's zone. If the new request winds up
1089 * using the same chunk size we do not have to do anything.
1091 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1092 kup = btokup(z);
1093 KKASSERT(*kup < 0);
1094 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1097 * Allocate memory for the new request size. Note that zoneindex has
1098 * already adjusted the request size to the appropriate chunk size, which
1099 * should optimize our bcopy(). Then copy and return the new pointer.
1101 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
1102 * necessary align the result.
1104 * We can only zoneindex (to align size to the chunk size) if the new
1105 * size is not too large.
1107 if (size < ZoneLimit) {
1108 zoneindex(&size, &align);
1109 if (z->z_ChunkSize == size)
1110 return(ptr);
1112 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1113 return(NULL);
1114 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
1115 kfree(ptr, type);
1116 return(nptr);
1120 * Return the kmalloc limit for this type, in bytes.
1122 long
1123 kmalloc_limit(struct malloc_type *type)
1125 KKASSERT(type->ks_limit != 0);
1126 return(type->ks_limit);
1130 * Allocate a copy of the specified string.
1132 * (MP SAFE) (MAY BLOCK)
1134 #ifdef SLAB_DEBUG
1135 char *
1136 kstrdup_debug(const char *str, struct malloc_type *type,
1137 const char *file, int line)
1138 #else
1139 char *
1140 kstrdup(const char *str, struct malloc_type *type)
1141 #endif
1143 int zlen; /* length inclusive of terminating NUL */
1144 char *nstr;
1146 if (str == NULL)
1147 return(NULL);
1148 zlen = strlen(str) + 1;
1149 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1150 bcopy(str, nstr, zlen);
1151 return(nstr);
1154 #ifdef SLAB_DEBUG
1155 char *
1156 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type,
1157 const char *file, int line)
1158 #else
1159 char *
1160 kstrndup(const char *str, size_t maxlen, struct malloc_type *type)
1161 #endif
1163 int zlen; /* length inclusive of terminating NUL */
1164 char *nstr;
1166 if (str == NULL)
1167 return(NULL);
1168 zlen = strnlen(str, maxlen) + 1;
1169 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1170 bcopy(str, nstr, zlen);
1171 nstr[zlen - 1] = '\0';
1172 return(nstr);
1176 * Notify our cpu that a remote cpu has freed some chunks in a zone that
1177 * we own. RCount will be bumped so the memory should be good, but validate
1178 * that it really is.
1180 static void
1181 kfree_remote(void *ptr)
1183 SLGlobalData *slgd;
1184 SLZone *z;
1185 int nfree;
1186 int *kup;
1188 slgd = &mycpu->gd_slab;
1189 z = ptr;
1190 kup = btokup(z);
1191 KKASSERT(*kup == -((int)mycpuid + 1));
1192 KKASSERT(z->z_RCount > 0);
1193 atomic_subtract_int(&z->z_RCount, 1);
1195 logmemory(free_rem_beg, z, NULL, 0L, 0);
1196 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1197 KKASSERT(z->z_Cpu == mycpu->gd_cpuid);
1198 nfree = z->z_NFree;
1201 * Indicate that we will no longer be off of the ZoneAry by
1202 * clearing RSignal.
1204 if (z->z_RChunks)
1205 z->z_RSignal = 0;
1208 * Atomically extract the bchunks list and then process it back
1209 * into the lchunks list. We want to append our bchunks to the
1210 * lchunks list and not prepend since we likely do not have
1211 * cache mastership of the related data (not that it helps since
1212 * we are using c_Next).
1214 clean_zone_rchunks(z);
1215 if (z->z_NFree && nfree == 0) {
1216 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1219 check_zone_free(slgd, z);
1220 logmemory(free_rem_end, z, NULL, 0L, 0);
1224 * free (SLAB ALLOCATOR)
1226 * Free a memory block previously allocated by malloc.
1228 * Note: We do not attempt to update ks_loosememuse as MP races could
1229 * prevent us from checking memory limits in malloc. YYY we may
1230 * consider updating ks_cpu.loosememuse.
1232 * MPSAFE
1234 void
1235 kfree(void *ptr, struct malloc_type *type)
1237 SLZone *z;
1238 SLChunk *chunk;
1239 SLGlobalData *slgd;
1240 struct globaldata *gd;
1241 int *kup;
1242 unsigned long size;
1243 SLChunk *bchunk;
1244 int rsignal;
1246 logmemory_quick(free_beg);
1247 gd = mycpu;
1248 slgd = &gd->gd_slab;
1250 if (ptr == NULL)
1251 panic("trying to free NULL pointer");
1254 * Handle special 0-byte allocations
1256 if (ptr == ZERO_LENGTH_PTR) {
1257 logmemory(free_zero, ptr, type, -1UL, 0);
1258 logmemory_quick(free_end);
1259 return;
1263 * Panic on bad malloc type
1265 if (type->ks_magic != M_MAGIC)
1266 panic("free: malloc type lacks magic");
1269 * Handle oversized allocations. XXX we really should require that a
1270 * size be passed to free() instead of this nonsense.
1272 * This code is never called via an ipi.
1274 kup = btokup(ptr);
1275 if (*kup > 0) {
1276 size = *kup << PAGE_SHIFT;
1277 *kup = 0;
1278 #ifdef INVARIANTS
1279 if (use_weird_array) {
1280 KKASSERT(sizeof(weirdary) <= size);
1281 bcopy(weirdary, ptr, sizeof(weirdary));
1283 #endif
1285 * NOTE: For oversized allocations we do not record the
1286 * originating cpu. It gets freed on the cpu calling
1287 * kfree(). The statistics are in aggregate.
1289 * note: XXX we have still inherited the interrupts-can't-block
1290 * assumption. An interrupt thread does not bump
1291 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
1292 * primarily until we can fix softupdate's assumptions about free().
1294 crit_enter();
1295 --type->ks_use[gd->gd_cpuid].inuse;
1296 type->ks_use[gd->gd_cpuid].memuse -= size;
1297 if (mycpu->gd_intr_nesting_level ||
1298 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
1299 logmemory(free_ovsz_delayed, ptr, type, size, 0);
1300 z = (SLZone *)ptr;
1301 z->z_Magic = ZALLOC_OVSZ_MAGIC;
1302 z->z_ChunkSize = size;
1304 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry);
1305 crit_exit();
1306 } else {
1307 crit_exit();
1308 logmemory(free_ovsz, ptr, type, size, 0);
1309 kmem_slab_free(ptr, size); /* may block */
1311 logmemory_quick(free_end);
1312 return;
1316 * Zone case. Figure out the zone based on the fact that it is
1317 * ZoneSize aligned.
1319 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1320 kup = btokup(z);
1321 KKASSERT(*kup < 0);
1322 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1325 * If we do not own the zone then use atomic ops to free to the
1326 * remote cpu linked list and notify the target zone using a
1327 * passive message.
1329 * The target zone cannot be deallocated while we own a chunk of it,
1330 * so the zone header's storage is stable until the very moment
1331 * we adjust z_RChunks. After that we cannot safely dereference (z).
1333 * (no critical section needed)
1335 if (z->z_CpuGd != gd) {
1337 * Making these adjustments now allow us to avoid passing (type)
1338 * to the remote cpu. Note that inuse/memuse is being
1339 * adjusted on OUR cpu, not the zone cpu, but it should all still
1340 * sum up properly and cancel out.
1342 crit_enter();
1343 --type->ks_use[gd->gd_cpuid].inuse;
1344 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize;
1345 crit_exit();
1348 * WARNING! This code competes with other cpus. Once we
1349 * successfully link the chunk to RChunks the remote
1350 * cpu can rip z's storage out from under us.
1352 * Bumping RCount prevents z's storage from getting
1353 * ripped out.
1355 rsignal = z->z_RSignal;
1356 cpu_lfence();
1357 if (rsignal)
1358 atomic_add_int(&z->z_RCount, 1);
1360 chunk = ptr;
1361 for (;;) {
1362 bchunk = z->z_RChunks;
1363 cpu_ccfence();
1364 chunk->c_Next = bchunk;
1365 cpu_sfence();
1367 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1368 break;
1372 * We have to signal the remote cpu if our actions will cause
1373 * the remote zone to be placed back on ZoneAry so it can
1374 * move the zone back on.
1376 * We only need to deal with NULL->non-NULL RChunk transitions
1377 * and only if z_RSignal is set. We interlock by reading rsignal
1378 * before adding our chunk to RChunks. This should result in
1379 * virtually no IPI traffic.
1381 * We can use a passive IPI to reduce overhead even further.
1383 if (bchunk == NULL && rsignal) {
1384 logmemory(free_request, ptr, type,
1385 (unsigned long)z->z_ChunkSize, 0);
1386 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1387 /* z can get ripped out from under us from this point on */
1388 } else if (rsignal) {
1389 atomic_subtract_int(&z->z_RCount, 1);
1390 /* z can get ripped out from under us from this point on */
1392 logmemory_quick(free_end);
1393 return;
1397 * kfree locally
1399 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
1401 crit_enter();
1402 chunk = ptr;
1403 chunk_mark_free(z, chunk);
1406 * Put weird data into the memory to detect modifications after freeing,
1407 * illegal pointer use after freeing (we should fault on the odd address),
1408 * and so forth. XXX needs more work, see the old malloc code.
1410 #ifdef INVARIANTS
1411 if (use_weird_array) {
1412 if (z->z_ChunkSize < sizeof(weirdary))
1413 bcopy(weirdary, chunk, z->z_ChunkSize);
1414 else
1415 bcopy(weirdary, chunk, sizeof(weirdary));
1417 #endif
1420 * Add this free non-zero'd chunk to a linked list for reuse. Add
1421 * to the front of the linked list so it is more likely to be
1422 * reallocated, since it is already in our L1 cache.
1424 #ifdef INVARIANTS
1425 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1426 panic("BADFREE %p", chunk);
1427 #endif
1428 chunk->c_Next = z->z_LChunks;
1429 z->z_LChunks = chunk;
1430 if (chunk->c_Next == NULL)
1431 z->z_LChunksp = &chunk->c_Next;
1433 #ifdef INVARIANTS
1434 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1435 panic("BADFREE2");
1436 #endif
1439 * Bump the number of free chunks. If it becomes non-zero the zone
1440 * must be added back onto the appropriate list. A fully allocated
1441 * zone that sees its first free is considered 'mature' and is placed
1442 * at the head, giving the system time to potentially free the remaining
1443 * entries even while other allocations are going on and making the zone
1444 * freeable.
1446 if (z->z_NFree++ == 0)
1447 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1449 --type->ks_use[gd->gd_cpuid].inuse;
1450 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize;
1452 check_zone_free(slgd, z);
1453 logmemory_quick(free_end);
1454 crit_exit();
1458 * Cleanup slabs which are hanging around due to RChunks or which are wholely
1459 * free and can be moved to the free list if not moved by other means.
1461 * Called once every 10 seconds on all cpus.
1463 void
1464 slab_cleanup(void)
1466 SLGlobalData *slgd = &mycpu->gd_slab;
1467 SLZone *z;
1468 int i;
1470 crit_enter();
1471 for (i = 0; i < NZONES; ++i) {
1472 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL)
1473 continue;
1476 * Scan zones.
1478 while (z) {
1480 * Shift all RChunks to the end of the LChunks list. This is
1481 * an O(1) operation.
1483 * Then free the zone if possible.
1485 clean_zone_rchunks(z);
1486 z = check_zone_free(slgd, z);
1489 crit_exit();
1492 #if defined(INVARIANTS)
1495 * Helper routines for sanity checks
1497 static void
1498 chunk_mark_allocated(SLZone *z, void *chunk)
1500 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1501 uint32_t *bitptr;
1503 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1504 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1505 ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1506 bitptr = &z->z_Bitmap[bitdex >> 5];
1507 bitdex &= 31;
1508 KASSERT((*bitptr & (1 << bitdex)) == 0,
1509 ("memory chunk %p is already allocated!", chunk));
1510 *bitptr |= 1 << bitdex;
1513 static void
1514 chunk_mark_free(SLZone *z, void *chunk)
1516 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1517 uint32_t *bitptr;
1519 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1520 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1521 ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1522 bitptr = &z->z_Bitmap[bitdex >> 5];
1523 bitdex &= 31;
1524 KASSERT((*bitptr & (1 << bitdex)) != 0,
1525 ("memory chunk %p is already free!", chunk));
1526 *bitptr &= ~(1 << bitdex);
1529 #endif
1532 * kmem_slab_alloc()
1534 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1535 * specified alignment. M_* flags are expected in the flags field.
1537 * Alignment must be a multiple of PAGE_SIZE.
1539 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1540 * but when we move zalloc() over to use this function as its backend
1541 * we will have to switch to kreserve/krelease and call reserve(0)
1542 * after the new space is made available.
1544 * Interrupt code which has preempted other code is not allowed to
1545 * use PQ_CACHE pages. However, if an interrupt thread is run
1546 * non-preemptively or blocks and then runs non-preemptively, then
1547 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX
1549 static void *
1550 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1552 vm_size_t i;
1553 vm_offset_t addr;
1554 int count, vmflags, base_vmflags;
1555 vm_page_t mbase = NULL;
1556 vm_page_t m;
1557 thread_t td;
1559 size = round_page(size);
1560 addr = vm_map_min(&kernel_map);
1562 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1563 crit_enter();
1564 vm_map_lock(&kernel_map);
1565 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1566 vm_map_unlock(&kernel_map);
1567 if ((flags & M_NULLOK) == 0)
1568 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1569 vm_map_entry_release(count);
1570 crit_exit();
1571 return(NULL);
1575 * kernel_object maps 1:1 to kernel_map.
1577 vm_object_hold(&kernel_object);
1578 vm_object_reference_locked(&kernel_object);
1579 vm_map_insert(&kernel_map, &count,
1580 &kernel_object, NULL,
1581 addr, NULL,
1582 addr, addr + size,
1583 VM_MAPTYPE_NORMAL,
1584 VM_SUBSYS_KMALLOC,
1585 VM_PROT_ALL, VM_PROT_ALL, 0);
1586 vm_object_drop(&kernel_object);
1587 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1588 vm_map_unlock(&kernel_map);
1590 td = curthread;
1592 base_vmflags = 0;
1593 if (flags & M_ZERO)
1594 base_vmflags |= VM_ALLOC_ZERO;
1595 if (flags & M_USE_RESERVE)
1596 base_vmflags |= VM_ALLOC_SYSTEM;
1597 if (flags & M_USE_INTERRUPT_RESERVE)
1598 base_vmflags |= VM_ALLOC_INTERRUPT;
1599 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1600 panic("kmem_slab_alloc: bad flags %08x (%p)",
1601 flags, ((int **)&size)[-1]);
1605 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only
1606 * be set if we are not preempting.
1608 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1609 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1610 * implied in this case), though I'm not sure if we really need to
1611 * do that.
1613 vmflags = base_vmflags;
1614 if (flags & M_WAITOK) {
1615 if (td->td_preempted)
1616 vmflags |= VM_ALLOC_SYSTEM;
1617 else
1618 vmflags |= VM_ALLOC_NORMAL;
1621 vm_object_hold(&kernel_object);
1622 for (i = 0; i < size; i += PAGE_SIZE) {
1623 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1624 if (i == 0)
1625 mbase = m;
1628 * If the allocation failed we either return NULL or we retry.
1630 * If M_WAITOK is specified we wait for more memory and retry.
1631 * If M_WAITOK is specified from a preemption we yield instead of
1632 * wait. Livelock will not occur because the interrupt thread
1633 * will not be preempting anyone the second time around after the
1634 * yield.
1636 if (m == NULL) {
1637 if (flags & M_WAITOK) {
1638 if (td->td_preempted) {
1639 lwkt_switch();
1640 } else {
1641 vm_wait(0);
1643 i -= PAGE_SIZE; /* retry */
1644 continue;
1646 break;
1651 * Check and deal with an allocation failure
1653 if (i != size) {
1654 while (i != 0) {
1655 i -= PAGE_SIZE;
1656 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1657 /* page should already be busy */
1658 vm_page_free(m);
1660 vm_map_lock(&kernel_map);
1661 vm_map_delete(&kernel_map, addr, addr + size, &count);
1662 vm_map_unlock(&kernel_map);
1663 vm_object_drop(&kernel_object);
1665 vm_map_entry_release(count);
1666 crit_exit();
1667 return(NULL);
1671 * Success!
1673 * NOTE: The VM pages are still busied. mbase points to the first one
1674 * but we have to iterate via vm_page_next()
1676 vm_object_drop(&kernel_object);
1677 crit_exit();
1680 * Enter the pages into the pmap and deal with M_ZERO.
1682 m = mbase;
1683 i = 0;
1685 while (i < size) {
1687 * page should already be busy
1689 m->valid = VM_PAGE_BITS_ALL;
1690 vm_page_wire(m);
1691 pmap_enter(&kernel_pmap, addr + i, m,
1692 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL);
1693 if (flags & M_ZERO)
1694 pagezero((char *)addr + i);
1695 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1696 vm_page_flag_set(m, PG_REFERENCED);
1697 vm_page_wakeup(m);
1699 i += PAGE_SIZE;
1700 vm_object_hold(&kernel_object);
1701 m = vm_page_next(m);
1702 vm_object_drop(&kernel_object);
1704 smp_invltlb();
1705 vm_map_entry_release(count);
1706 return((void *)addr);
1710 * kmem_slab_free()
1712 static void
1713 kmem_slab_free(void *ptr, vm_size_t size)
1715 crit_enter();
1716 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1717 crit_exit();