kernel - Fix excessive call stack depth on stuck interrupt
[dragonfly.git] / sys / kern / kern_slaballoc.c
blob32eb3863834d5cf9ef46bab6cc343a214c077767
1 /*
2 * (MPSAFE)
4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
5 *
6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
7 *
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
79 * Allocations >= ZoneLimit go directly to kmem.
81 * Alignment properties:
82 * - All power-of-2 sized allocations are power-of-2 aligned.
83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
84 * power-of-2 round up of 'size'.
85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
86 * above table 'Chunking' column).
88 * API REQUIREMENTS AND SIDE EFFECTS
90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
91 * have remained compatible with the following API requirements:
93 * + malloc(0) is allowed and returns non-NULL (ahc driver)
94 * + ability to allocate arbitrarily large chunks of memory
97 #include "opt_vm.h"
99 #include <sys/param.h>
100 #include <sys/systm.h>
101 #include <sys/kernel.h>
102 #include <sys/slaballoc.h>
103 #include <sys/mbuf.h>
104 #include <sys/vmmeter.h>
105 #include <sys/lock.h>
106 #include <sys/thread.h>
107 #include <sys/globaldata.h>
108 #include <sys/sysctl.h>
109 #include <sys/ktr.h>
111 #include <vm/vm.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_extern.h>
115 #include <vm/vm_object.h>
116 #include <vm/pmap.h>
117 #include <vm/vm_map.h>
118 #include <vm/vm_page.h>
119 #include <vm/vm_pageout.h>
121 #include <machine/cpu.h>
123 #include <sys/thread2.h>
124 #include <vm/vm_page2.h>
126 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
128 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
129 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
131 #if !defined(KTR_MEMORY)
132 #define KTR_MEMORY KTR_ALL
133 #endif
134 KTR_INFO_MASTER(memory);
135 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
136 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
137 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
138 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
139 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
140 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
141 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
142 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
143 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
144 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
145 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
147 #define logmemory(name, ptr, type, size, flags) \
148 KTR_LOG(memory_ ## name, ptr, type, size, flags)
149 #define logmemory_quick(name) \
150 KTR_LOG(memory_ ## name)
153 * Fixed globals (not per-cpu)
155 static int ZoneSize;
156 static int ZoneLimit;
157 static int ZonePageCount;
158 static uintptr_t ZoneMask;
159 static int ZoneBigAlloc; /* in KB */
160 static int ZoneGenAlloc; /* in KB */
161 struct malloc_type *kmemstatistics; /* exported to vmstat */
162 #ifdef INVARIANTS
163 static int32_t weirdary[16];
164 #endif
166 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
167 static void kmem_slab_free(void *ptr, vm_size_t bytes);
169 #if defined(INVARIANTS)
170 static void chunk_mark_allocated(SLZone *z, void *chunk);
171 static void chunk_mark_free(SLZone *z, void *chunk);
172 #else
173 #define chunk_mark_allocated(z, chunk)
174 #define chunk_mark_free(z, chunk)
175 #endif
178 * Misc constants. Note that allocations that are exact multiples of
179 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
181 #define ZONE_RELS_THRESH 32 /* threshold number of zones */
183 #ifdef INVARIANTS
185 * The WEIRD_ADDR is used as known text to copy into free objects to
186 * try to create deterministic failure cases if the data is accessed after
187 * free.
189 #define WEIRD_ADDR 0xdeadc0de
190 #endif
191 #define ZERO_LENGTH_PTR ((void *)-8)
194 * Misc global malloc buckets
197 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
198 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
199 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
200 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations");
202 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
203 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
206 * Initialize the slab memory allocator. We have to choose a zone size based
207 * on available physical memory. We choose a zone side which is approximately
208 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
209 * 128K. The zone size is limited to the bounds set in slaballoc.h
210 * (typically 32K min, 128K max).
212 static void kmeminit(void *dummy);
214 char *ZeroPage;
216 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL);
218 #ifdef INVARIANTS
220 * If enabled any memory allocated without M_ZERO is initialized to -1.
222 static int use_malloc_pattern;
223 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
224 &use_malloc_pattern, 0,
225 "Initialize memory to -1 if M_ZERO not specified");
226 #endif
228 static int ZoneRelsThresh = ZONE_RELS_THRESH;
229 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
230 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
231 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
232 static long SlabsAllocated;
233 static long SlabsFreed;
234 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD,
235 &SlabsAllocated, 0, "");
236 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD,
237 &SlabsFreed, 0, "");
238 static int SlabFreeToTail;
239 SYSCTL_INT(_kern, OID_AUTO, slab_freetotail, CTLFLAG_RW,
240 &SlabFreeToTail, 0, "");
242 static struct spinlock kmemstat_spin =
243 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit");
246 * Returns the kernel memory size limit for the purposes of initializing
247 * various subsystem caches. The smaller of available memory and the KVM
248 * memory space is returned.
250 * The size in megabytes is returned.
252 size_t
253 kmem_lim_size(void)
255 size_t limsize;
257 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
258 if (limsize > KvaSize)
259 limsize = KvaSize;
260 return (limsize / (1024 * 1024));
263 static void
264 kmeminit(void *dummy)
266 size_t limsize;
267 int usesize;
268 #ifdef INVARIANTS
269 int i;
270 #endif
272 limsize = kmem_lim_size();
273 usesize = (int)(limsize * 1024); /* convert to KB */
276 * If the machine has a large KVM space and more than 8G of ram,
277 * double the zone release threshold to reduce SMP invalidations.
278 * If more than 16G of ram, do it again.
280 * The BIOS eats a little ram so add some slop. We want 8G worth of
281 * memory sticks to trigger the first adjustment.
283 if (ZoneRelsThresh == ZONE_RELS_THRESH) {
284 if (limsize >= 7 * 1024)
285 ZoneRelsThresh *= 2;
286 if (limsize >= 15 * 1024)
287 ZoneRelsThresh *= 2;
291 * Calculate the zone size. This typically calculates to
292 * ZALLOC_MAX_ZONE_SIZE
294 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
295 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
296 ZoneSize <<= 1;
297 ZoneLimit = ZoneSize / 4;
298 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
299 ZoneLimit = ZALLOC_ZONE_LIMIT;
300 ZoneMask = ~(uintptr_t)(ZoneSize - 1);
301 ZonePageCount = ZoneSize / PAGE_SIZE;
303 #ifdef INVARIANTS
304 for (i = 0; i < NELEM(weirdary); ++i)
305 weirdary[i] = WEIRD_ADDR;
306 #endif
308 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
310 if (bootverbose)
311 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
315 * (low level) Initialize slab-related elements in the globaldata structure.
317 * Occurs after kmeminit().
319 void
320 slab_gdinit(globaldata_t gd)
322 SLGlobalData *slgd;
323 int i;
325 slgd = &gd->gd_slab;
326 for (i = 0; i < NZONES; ++i)
327 TAILQ_INIT(&slgd->ZoneAry[i]);
328 TAILQ_INIT(&slgd->FreeZones);
329 TAILQ_INIT(&slgd->FreeOvZones);
333 * Initialize a malloc type tracking structure.
335 void
336 malloc_init(void *data)
338 struct malloc_type *type = data;
339 size_t limsize;
341 if (type->ks_magic != M_MAGIC)
342 panic("malloc type lacks magic");
344 if (type->ks_limit != 0)
345 return;
347 if (vmstats.v_page_count == 0)
348 panic("malloc_init not allowed before vm init");
350 limsize = kmem_lim_size() * (1024 * 1024);
351 type->ks_limit = limsize / 10;
353 spin_lock(&kmemstat_spin);
354 type->ks_next = kmemstatistics;
355 kmemstatistics = type;
356 spin_unlock(&kmemstat_spin);
359 void
360 malloc_uninit(void *data)
362 struct malloc_type *type = data;
363 struct malloc_type *t;
364 #ifdef INVARIANTS
365 int i;
366 long ttl;
367 #endif
369 if (type->ks_magic != M_MAGIC)
370 panic("malloc type lacks magic");
372 if (vmstats.v_page_count == 0)
373 panic("malloc_uninit not allowed before vm init");
375 if (type->ks_limit == 0)
376 panic("malloc_uninit on uninitialized type");
378 /* Make sure that all pending kfree()s are finished. */
379 lwkt_synchronize_ipiqs("muninit");
381 #ifdef INVARIANTS
383 * memuse is only correct in aggregation. Due to memory being allocated
384 * on one cpu and freed on another individual array entries may be
385 * negative or positive (canceling each other out).
387 for (i = ttl = 0; i < ncpus; ++i)
388 ttl += type->ks_use[i].memuse;
389 if (ttl) {
390 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
391 ttl, type->ks_shortdesc, i);
393 #endif
394 spin_lock(&kmemstat_spin);
395 if (type == kmemstatistics) {
396 kmemstatistics = type->ks_next;
397 } else {
398 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
399 if (t->ks_next == type) {
400 t->ks_next = type->ks_next;
401 break;
405 type->ks_next = NULL;
406 type->ks_limit = 0;
407 spin_unlock(&kmemstat_spin);
411 * Increase the kmalloc pool limit for the specified pool. No changes
412 * are the made if the pool would shrink.
414 void
415 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
417 if (type->ks_limit == 0)
418 malloc_init(type);
419 if (bytes == 0)
420 bytes = KvaSize;
421 if (type->ks_limit < bytes)
422 type->ks_limit = bytes;
425 void
426 kmalloc_set_unlimited(struct malloc_type *type)
428 type->ks_limit = kmem_lim_size() * (1024 * 1024);
432 * Dynamically create a malloc pool. This function is a NOP if *typep is
433 * already non-NULL.
435 void
436 kmalloc_create(struct malloc_type **typep, const char *descr)
438 struct malloc_type *type;
440 if (*typep == NULL) {
441 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
442 type->ks_magic = M_MAGIC;
443 type->ks_shortdesc = descr;
444 malloc_init(type);
445 *typep = type;
450 * Destroy a dynamically created malloc pool. This function is a NOP if
451 * the pool has already been destroyed.
453 void
454 kmalloc_destroy(struct malloc_type **typep)
456 if (*typep != NULL) {
457 malloc_uninit(*typep);
458 kfree(*typep, M_TEMP);
459 *typep = NULL;
464 * Calculate the zone index for the allocation request size and set the
465 * allocation request size to that particular zone's chunk size.
467 static __inline int
468 zoneindex(unsigned long *bytes, unsigned long *align)
470 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
471 if (n < 128) {
472 *bytes = n = (n + 7) & ~7;
473 *align = 8;
474 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
476 if (n < 256) {
477 *bytes = n = (n + 15) & ~15;
478 *align = 16;
479 return(n / 16 + 7);
481 if (n < 8192) {
482 if (n < 512) {
483 *bytes = n = (n + 31) & ~31;
484 *align = 32;
485 return(n / 32 + 15);
487 if (n < 1024) {
488 *bytes = n = (n + 63) & ~63;
489 *align = 64;
490 return(n / 64 + 23);
492 if (n < 2048) {
493 *bytes = n = (n + 127) & ~127;
494 *align = 128;
495 return(n / 128 + 31);
497 if (n < 4096) {
498 *bytes = n = (n + 255) & ~255;
499 *align = 256;
500 return(n / 256 + 39);
502 *bytes = n = (n + 511) & ~511;
503 *align = 512;
504 return(n / 512 + 47);
506 #if ZALLOC_ZONE_LIMIT > 8192
507 if (n < 16384) {
508 *bytes = n = (n + 1023) & ~1023;
509 *align = 1024;
510 return(n / 1024 + 55);
512 #endif
513 #if ZALLOC_ZONE_LIMIT > 16384
514 if (n < 32768) {
515 *bytes = n = (n + 2047) & ~2047;
516 *align = 2048;
517 return(n / 2048 + 63);
519 #endif
520 panic("Unexpected byte count %d", n);
521 return(0);
524 static __inline
525 void
526 clean_zone_rchunks(SLZone *z)
528 SLChunk *bchunk;
530 while ((bchunk = z->z_RChunks) != NULL) {
531 cpu_ccfence();
532 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
533 *z->z_LChunksp = bchunk;
534 while (bchunk) {
535 chunk_mark_free(z, bchunk);
536 z->z_LChunksp = &bchunk->c_Next;
537 bchunk = bchunk->c_Next;
538 ++z->z_NFree;
540 break;
542 /* retry */
547 * If the zone becomes totally free and is not the only zone listed for a
548 * chunk size we move it to the FreeZones list. We always leave at least
549 * one zone per chunk size listed, even if it is freeable.
551 * Do not move the zone if there is an IPI in_flight (z_RCount != 0),
552 * otherwise MP races can result in our free_remote code accessing a
553 * destroyed zone. The remote end interlocks z_RCount with z_RChunks
554 * so one has to test both z_NFree and z_RCount.
556 * Since this code can be called from an IPI callback, do *NOT* try to mess
557 * with kernel_map here. Hysteresis will be performed at kmalloc() time.
559 static __inline
560 SLZone *
561 check_zone_free(SLGlobalData *slgd, SLZone *z)
563 SLZone *znext;
565 znext = TAILQ_NEXT(z, z_Entry);
566 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 &&
567 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)
569 int *kup;
571 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
573 z->z_Magic = -1;
574 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
575 ++slgd->NFreeZones;
576 kup = btokup(z);
577 *kup = 0;
579 return znext;
582 #ifdef SLAB_DEBUG
584 * Used to debug memory corruption issues. Record up to (typically 32)
585 * allocation sources for this zone (for a particular chunk size).
588 static void
589 slab_record_source(SLZone *z, const char *file, int line)
591 int i;
592 int b = line & (SLAB_DEBUG_ENTRIES - 1);
594 i = b;
595 do {
596 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
597 return;
598 if (z->z_Sources[i].file == NULL)
599 break;
600 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
601 } while (i != b);
602 z->z_Sources[i].file = file;
603 z->z_Sources[i].line = line;
606 #endif
608 static __inline unsigned long
609 powerof2_size(unsigned long size)
611 int i;
613 if (size == 0 || powerof2(size))
614 return size;
616 i = flsl(size);
617 return (1UL << i);
621 * kmalloc() (SLAB ALLOCATOR)
623 * Allocate memory via the slab allocator. If the request is too large,
624 * or if it page-aligned beyond a certain size, we fall back to the
625 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
626 * &SlabMisc if you don't care.
628 * M_RNOWAIT - don't block.
629 * M_NULLOK - return NULL instead of blocking.
630 * M_ZERO - zero the returned memory.
631 * M_USE_RESERVE - allow greater drawdown of the free list
632 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
633 * M_POWEROF2 - roundup size to the nearest power of 2
635 * MPSAFE
638 #ifdef SLAB_DEBUG
639 void *
640 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
641 const char *file, int line)
642 #else
643 void *
644 kmalloc(unsigned long size, struct malloc_type *type, int flags)
645 #endif
647 SLZone *z;
648 SLChunk *chunk;
649 SLGlobalData *slgd;
650 struct globaldata *gd;
651 unsigned long align;
652 int zi;
653 #ifdef INVARIANTS
654 int i;
655 #endif
657 logmemory_quick(malloc_beg);
658 gd = mycpu;
659 slgd = &gd->gd_slab;
662 * XXX silly to have this in the critical path.
664 if (type->ks_limit == 0) {
665 crit_enter();
666 malloc_init(type);
667 crit_exit();
669 ++type->ks_calls;
671 if (flags & M_POWEROF2)
672 size = powerof2_size(size);
675 * Handle the case where the limit is reached. Panic if we can't return
676 * NULL. The original malloc code looped, but this tended to
677 * simply deadlock the computer.
679 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
680 * to determine if a more complete limit check should be done. The
681 * actual memory use is tracked via ks_use[cpu].memuse.
683 while (type->ks_loosememuse >= type->ks_limit) {
684 int i;
685 long ttl;
687 for (i = ttl = 0; i < ncpus; ++i)
688 ttl += type->ks_use[i].memuse;
689 type->ks_loosememuse = ttl; /* not MP synchronized */
690 if ((ssize_t)ttl < 0) /* deal with occassional race */
691 ttl = 0;
692 if (ttl >= type->ks_limit) {
693 if (flags & M_NULLOK) {
694 logmemory(malloc_end, NULL, type, size, flags);
695 return(NULL);
697 panic("%s: malloc limit exceeded", type->ks_shortdesc);
702 * Handle the degenerate size == 0 case. Yes, this does happen.
703 * Return a special pointer. This is to maintain compatibility with
704 * the original malloc implementation. Certain devices, such as the
705 * adaptec driver, not only allocate 0 bytes, they check for NULL and
706 * also realloc() later on. Joy.
708 if (size == 0) {
709 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
710 return(ZERO_LENGTH_PTR);
714 * Handle hysteresis from prior frees here in malloc(). We cannot
715 * safely manipulate the kernel_map in free() due to free() possibly
716 * being called via an IPI message or from sensitive interrupt code.
718 * NOTE: ku_pagecnt must be cleared before we free the slab or we
719 * might race another cpu allocating the kva and setting
720 * ku_pagecnt.
722 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
723 crit_enter();
724 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */
725 int *kup;
727 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList);
728 KKASSERT(z != NULL);
729 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
730 --slgd->NFreeZones;
731 kup = btokup(z);
732 *kup = 0;
733 kmem_slab_free(z, ZoneSize); /* may block */
734 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024);
736 crit_exit();
740 * XXX handle oversized frees that were queued from kfree().
742 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) {
743 crit_enter();
744 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) {
745 vm_size_t tsize;
747 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
748 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry);
749 tsize = z->z_ChunkSize;
750 kmem_slab_free(z, tsize); /* may block */
751 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
753 crit_exit();
757 * Handle large allocations directly. There should not be very many of
758 * these so performance is not a big issue.
760 * The backend allocator is pretty nasty on a SMP system. Use the
761 * slab allocator for one and two page-sized chunks even though we lose
762 * some efficiency. XXX maybe fix mmio and the elf loader instead.
764 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
765 int *kup;
767 size = round_page(size);
768 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
769 if (chunk == NULL) {
770 logmemory(malloc_end, NULL, type, size, flags);
771 return(NULL);
773 atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
774 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
775 flags |= M_PASSIVE_ZERO;
776 kup = btokup(chunk);
777 *kup = size / PAGE_SIZE;
778 crit_enter();
779 goto done;
783 * Attempt to allocate out of an existing zone. First try the free list,
784 * then allocate out of unallocated space. If we find a good zone move
785 * it to the head of the list so later allocations find it quickly
786 * (we might have thousands of zones in the list).
788 * Note: zoneindex() will panic of size is too large.
790 zi = zoneindex(&size, &align);
791 KKASSERT(zi < NZONES);
792 crit_enter();
794 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) {
796 * Locate a chunk - we have to have at least one. If this is the
797 * last chunk go ahead and do the work to retrieve chunks freed
798 * from remote cpus, and if the zone is still empty move it off
799 * the ZoneAry.
801 if (--z->z_NFree <= 0) {
802 KKASSERT(z->z_NFree == 0);
805 * WARNING! This code competes with other cpus. It is ok
806 * for us to not drain RChunks here but we might as well, and
807 * it is ok if more accumulate after we're done.
809 * Set RSignal before pulling rchunks off, indicating that we
810 * will be moving ourselves off of the ZoneAry. Remote ends will
811 * read RSignal before putting rchunks on thus interlocking
812 * their IPI signaling.
814 if (z->z_RChunks == NULL)
815 atomic_swap_int(&z->z_RSignal, 1);
817 clean_zone_rchunks(z);
820 * Remove from the zone list if no free chunks remain.
821 * Clear RSignal
823 if (z->z_NFree == 0) {
824 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry);
825 } else {
826 z->z_RSignal = 0;
831 * Fast path, we have chunks available in z_LChunks.
833 chunk = z->z_LChunks;
834 if (chunk) {
835 chunk_mark_allocated(z, chunk);
836 z->z_LChunks = chunk->c_Next;
837 if (z->z_LChunks == NULL)
838 z->z_LChunksp = &z->z_LChunks;
839 #ifdef SLAB_DEBUG
840 slab_record_source(z, file, line);
841 #endif
842 goto done;
846 * No chunks are available in LChunks, the free chunk MUST be
847 * in the never-before-used memory area, controlled by UIndex.
849 * The consequences are very serious if our zone got corrupted so
850 * we use an explicit panic rather than a KASSERT.
852 if (z->z_UIndex + 1 != z->z_NMax)
853 ++z->z_UIndex;
854 else
855 z->z_UIndex = 0;
857 if (z->z_UIndex == z->z_UEndIndex)
858 panic("slaballoc: corrupted zone");
860 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
861 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
862 flags &= ~M_ZERO;
863 flags |= M_PASSIVE_ZERO;
865 chunk_mark_allocated(z, chunk);
866 #ifdef SLAB_DEBUG
867 slab_record_source(z, file, line);
868 #endif
869 goto done;
873 * If all zones are exhausted we need to allocate a new zone for this
874 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
875 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
876 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
877 * we do not pre-zero it because we do not want to mess up the L1 cache.
879 * At least one subsystem, the tty code (see CROUND) expects power-of-2
880 * allocations to be power-of-2 aligned. We maintain compatibility by
881 * adjusting the base offset below.
884 int off;
885 int *kup;
887 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) {
888 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
889 --slgd->NFreeZones;
890 bzero(z, sizeof(SLZone));
891 z->z_Flags |= SLZF_UNOTZEROD;
892 } else {
893 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
894 if (z == NULL)
895 goto fail;
896 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024);
900 * How big is the base structure?
902 #if defined(INVARIANTS)
904 * Make room for z_Bitmap. An exact calculation is somewhat more
905 * complicated so don't make an exact calculation.
907 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
908 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
909 #else
910 off = sizeof(SLZone);
911 #endif
914 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
915 * Otherwise properly align the data according to the chunk size.
917 if (powerof2(size))
918 align = size;
919 off = roundup2(off, align);
921 z->z_Magic = ZALLOC_SLAB_MAGIC;
922 z->z_ZoneIndex = zi;
923 z->z_NMax = (ZoneSize - off) / size;
924 z->z_NFree = z->z_NMax - 1;
925 z->z_BasePtr = (char *)z + off;
926 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
927 z->z_ChunkSize = size;
928 z->z_CpuGd = gd;
929 z->z_Cpu = gd->gd_cpuid;
930 z->z_LChunksp = &z->z_LChunks;
931 #ifdef SLAB_DEBUG
932 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
933 bzero(z->z_Sources, sizeof(z->z_Sources));
934 #endif
935 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
936 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry);
937 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
938 flags &= ~M_ZERO; /* already zero'd */
939 flags |= M_PASSIVE_ZERO;
941 kup = btokup(z);
942 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */
943 chunk_mark_allocated(z, chunk);
944 #ifdef SLAB_DEBUG
945 slab_record_source(z, file, line);
946 #endif
949 * Slide the base index for initial allocations out of the next
950 * zone we create so we do not over-weight the lower part of the
951 * cpu memory caches.
953 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
954 & (ZALLOC_MAX_ZONE_SIZE - 1);
957 done:
958 ++type->ks_use[gd->gd_cpuid].inuse;
959 type->ks_use[gd->gd_cpuid].memuse += size;
960 type->ks_loosememuse += size; /* not MP synchronized */
961 crit_exit();
963 if (flags & M_ZERO)
964 bzero(chunk, size);
965 #ifdef INVARIANTS
966 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
967 if (use_malloc_pattern) {
968 for (i = 0; i < size; i += sizeof(int)) {
969 *(int *)((char *)chunk + i) = -1;
972 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
974 #endif
975 logmemory(malloc_end, chunk, type, size, flags);
976 return(chunk);
977 fail:
978 crit_exit();
979 logmemory(malloc_end, NULL, type, size, flags);
980 return(NULL);
984 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
986 * Generally speaking this routine is not called very often and we do
987 * not attempt to optimize it beyond reusing the same pointer if the
988 * new size fits within the chunking of the old pointer's zone.
990 #ifdef SLAB_DEBUG
991 void *
992 krealloc_debug(void *ptr, unsigned long size,
993 struct malloc_type *type, int flags,
994 const char *file, int line)
995 #else
996 void *
997 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
998 #endif
1000 unsigned long osize;
1001 unsigned long align;
1002 SLZone *z;
1003 void *nptr;
1004 int *kup;
1006 KKASSERT((flags & M_ZERO) == 0); /* not supported */
1008 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1009 return(kmalloc_debug(size, type, flags, file, line));
1010 if (size == 0) {
1011 kfree(ptr, type);
1012 return(NULL);
1016 * Handle oversized allocations. XXX we really should require that a
1017 * size be passed to free() instead of this nonsense.
1019 kup = btokup(ptr);
1020 if (*kup > 0) {
1021 osize = *kup << PAGE_SHIFT;
1022 if (osize == round_page(size))
1023 return(ptr);
1024 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1025 return(NULL);
1026 bcopy(ptr, nptr, min(size, osize));
1027 kfree(ptr, type);
1028 return(nptr);
1032 * Get the original allocation's zone. If the new request winds up
1033 * using the same chunk size we do not have to do anything.
1035 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1036 kup = btokup(z);
1037 KKASSERT(*kup < 0);
1038 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1041 * Allocate memory for the new request size. Note that zoneindex has
1042 * already adjusted the request size to the appropriate chunk size, which
1043 * should optimize our bcopy(). Then copy and return the new pointer.
1045 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
1046 * necessary align the result.
1048 * We can only zoneindex (to align size to the chunk size) if the new
1049 * size is not too large.
1051 if (size < ZoneLimit) {
1052 zoneindex(&size, &align);
1053 if (z->z_ChunkSize == size)
1054 return(ptr);
1056 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1057 return(NULL);
1058 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
1059 kfree(ptr, type);
1060 return(nptr);
1064 * Return the kmalloc limit for this type, in bytes.
1066 long
1067 kmalloc_limit(struct malloc_type *type)
1069 if (type->ks_limit == 0) {
1070 crit_enter();
1071 if (type->ks_limit == 0)
1072 malloc_init(type);
1073 crit_exit();
1075 return(type->ks_limit);
1079 * Allocate a copy of the specified string.
1081 * (MP SAFE) (MAY BLOCK)
1083 #ifdef SLAB_DEBUG
1084 char *
1085 kstrdup_debug(const char *str, struct malloc_type *type,
1086 const char *file, int line)
1087 #else
1088 char *
1089 kstrdup(const char *str, struct malloc_type *type)
1090 #endif
1092 int zlen; /* length inclusive of terminating NUL */
1093 char *nstr;
1095 if (str == NULL)
1096 return(NULL);
1097 zlen = strlen(str) + 1;
1098 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1099 bcopy(str, nstr, zlen);
1100 return(nstr);
1103 #ifdef SLAB_DEBUG
1104 char *
1105 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type,
1106 const char *file, int line)
1107 #else
1108 char *
1109 kstrndup(const char *str, size_t maxlen, struct malloc_type *type)
1110 #endif
1112 int zlen; /* length inclusive of terminating NUL */
1113 char *nstr;
1115 if (str == NULL)
1116 return(NULL);
1117 zlen = strnlen(str, maxlen) + 1;
1118 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1119 bcopy(str, nstr, zlen);
1120 nstr[zlen - 1] = '\0';
1121 return(nstr);
1125 * Notify our cpu that a remote cpu has freed some chunks in a zone that
1126 * we own. RCount will be bumped so the memory should be good, but validate
1127 * that it really is.
1129 static
1130 void
1131 kfree_remote(void *ptr)
1133 SLGlobalData *slgd;
1134 SLZone *z;
1135 int nfree;
1136 int *kup;
1138 slgd = &mycpu->gd_slab;
1139 z = ptr;
1140 kup = btokup(z);
1141 KKASSERT(*kup == -((int)mycpuid + 1));
1142 KKASSERT(z->z_RCount > 0);
1143 atomic_subtract_int(&z->z_RCount, 1);
1145 logmemory(free_rem_beg, z, NULL, 0L, 0);
1146 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1147 KKASSERT(z->z_Cpu == mycpu->gd_cpuid);
1148 nfree = z->z_NFree;
1151 * Indicate that we will no longer be off of the ZoneAry by
1152 * clearing RSignal.
1154 if (z->z_RChunks)
1155 z->z_RSignal = 0;
1158 * Atomically extract the bchunks list and then process it back
1159 * into the lchunks list. We want to append our bchunks to the
1160 * lchunks list and not prepend since we likely do not have
1161 * cache mastership of the related data (not that it helps since
1162 * we are using c_Next).
1164 clean_zone_rchunks(z);
1165 if (z->z_NFree && nfree == 0) {
1166 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1170 * If the zone becomes totally free and is not the only zone listed for a
1171 * chunk size we move it to the FreeZones list. We always leave at least
1172 * one zone per chunk size listed, even if it is freeable.
1174 * Since this code can be called from an IPI callback, do *NOT* try to
1175 * mess with kernel_map here. Hysteresis will be performed at malloc()
1176 * time.
1178 * Do not move the zone if there is an IPI in_flight (z_RCount != 0),
1179 * otherwise MP races can result in our free_remote code accessing a
1180 * destroyed zone. The remote end interlocks z_RCount with z_RChunks
1181 * so one has to test both z_NFree and z_RCount.
1183 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 &&
1184 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z ||
1185 TAILQ_NEXT(z, z_Entry))
1187 int *kup;
1189 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1190 z->z_Magic = -1;
1191 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
1192 ++slgd->NFreeZones;
1193 kup = btokup(z);
1194 *kup = 0;
1196 logmemory(free_rem_end, z, NULL, 0L, 0);
1200 * free (SLAB ALLOCATOR)
1202 * Free a memory block previously allocated by malloc. Note that we do not
1203 * attempt to update ks_loosememuse as MP races could prevent us from
1204 * checking memory limits in malloc.
1206 * MPSAFE
1208 void
1209 kfree(void *ptr, struct malloc_type *type)
1211 SLZone *z;
1212 SLChunk *chunk;
1213 SLGlobalData *slgd;
1214 struct globaldata *gd;
1215 int *kup;
1216 unsigned long size;
1217 SLChunk *bchunk;
1218 int rsignal;
1220 logmemory_quick(free_beg);
1221 gd = mycpu;
1222 slgd = &gd->gd_slab;
1224 if (ptr == NULL)
1225 panic("trying to free NULL pointer");
1228 * Handle special 0-byte allocations
1230 if (ptr == ZERO_LENGTH_PTR) {
1231 logmemory(free_zero, ptr, type, -1UL, 0);
1232 logmemory_quick(free_end);
1233 return;
1237 * Panic on bad malloc type
1239 if (type->ks_magic != M_MAGIC)
1240 panic("free: malloc type lacks magic");
1243 * Handle oversized allocations. XXX we really should require that a
1244 * size be passed to free() instead of this nonsense.
1246 * This code is never called via an ipi.
1248 kup = btokup(ptr);
1249 if (*kup > 0) {
1250 size = *kup << PAGE_SHIFT;
1251 *kup = 0;
1252 #ifdef INVARIANTS
1253 KKASSERT(sizeof(weirdary) <= size);
1254 bcopy(weirdary, ptr, sizeof(weirdary));
1255 #endif
1257 * NOTE: For oversized allocations we do not record the
1258 * originating cpu. It gets freed on the cpu calling
1259 * kfree(). The statistics are in aggregate.
1261 * note: XXX we have still inherited the interrupts-can't-block
1262 * assumption. An interrupt thread does not bump
1263 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
1264 * primarily until we can fix softupdate's assumptions about free().
1266 crit_enter();
1267 --type->ks_use[gd->gd_cpuid].inuse;
1268 type->ks_use[gd->gd_cpuid].memuse -= size;
1269 if (mycpu->gd_intr_nesting_level ||
1270 (gd->gd_curthread->td_flags & TDF_INTTHREAD))
1272 logmemory(free_ovsz_delayed, ptr, type, size, 0);
1273 z = (SLZone *)ptr;
1274 z->z_Magic = ZALLOC_OVSZ_MAGIC;
1275 z->z_ChunkSize = size;
1277 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry);
1278 crit_exit();
1279 } else {
1280 crit_exit();
1281 logmemory(free_ovsz, ptr, type, size, 0);
1282 kmem_slab_free(ptr, size); /* may block */
1283 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
1285 logmemory_quick(free_end);
1286 return;
1290 * Zone case. Figure out the zone based on the fact that it is
1291 * ZoneSize aligned.
1293 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1294 kup = btokup(z);
1295 KKASSERT(*kup < 0);
1296 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1299 * If we do not own the zone then use atomic ops to free to the
1300 * remote cpu linked list and notify the target zone using a
1301 * passive message.
1303 * The target zone cannot be deallocated while we own a chunk of it,
1304 * so the zone header's storage is stable until the very moment
1305 * we adjust z_RChunks. After that we cannot safely dereference (z).
1307 * (no critical section needed)
1309 if (z->z_CpuGd != gd) {
1311 * Making these adjustments now allow us to avoid passing (type)
1312 * to the remote cpu. Note that inuse/memuse is being
1313 * adjusted on OUR cpu, not the zone cpu, but it should all still
1314 * sum up properly and cancel out.
1316 crit_enter();
1317 --type->ks_use[gd->gd_cpuid].inuse;
1318 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize;
1319 crit_exit();
1322 * WARNING! This code competes with other cpus. Once we
1323 * successfully link the chunk to RChunks the remote
1324 * cpu can rip z's storage out from under us.
1326 * Bumping RCount prevents z's storage from getting
1327 * ripped out.
1329 rsignal = z->z_RSignal;
1330 cpu_lfence();
1331 if (rsignal)
1332 atomic_add_int(&z->z_RCount, 1);
1334 chunk = ptr;
1335 for (;;) {
1336 bchunk = z->z_RChunks;
1337 cpu_ccfence();
1338 chunk->c_Next = bchunk;
1339 cpu_sfence();
1341 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1342 break;
1346 * We have to signal the remote cpu if our actions will cause
1347 * the remote zone to be placed back on ZoneAry so it can
1348 * move the zone back on.
1350 * We only need to deal with NULL->non-NULL RChunk transitions
1351 * and only if z_RSignal is set. We interlock by reading rsignal
1352 * before adding our chunk to RChunks. This should result in
1353 * virtually no IPI traffic.
1355 * We can use a passive IPI to reduce overhead even further.
1357 if (bchunk == NULL && rsignal) {
1358 logmemory(free_request, ptr, type,
1359 (unsigned long)z->z_ChunkSize, 0);
1360 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1361 /* z can get ripped out from under us from this point on */
1362 } else if (rsignal) {
1363 atomic_subtract_int(&z->z_RCount, 1);
1364 /* z can get ripped out from under us from this point on */
1366 logmemory_quick(free_end);
1367 return;
1371 * kfree locally
1373 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
1375 crit_enter();
1376 chunk = ptr;
1377 chunk_mark_free(z, chunk);
1380 * Put weird data into the memory to detect modifications after freeing,
1381 * illegal pointer use after freeing (we should fault on the odd address),
1382 * and so forth. XXX needs more work, see the old malloc code.
1384 #ifdef INVARIANTS
1385 if (z->z_ChunkSize < sizeof(weirdary))
1386 bcopy(weirdary, chunk, z->z_ChunkSize);
1387 else
1388 bcopy(weirdary, chunk, sizeof(weirdary));
1389 #endif
1392 * Add this free non-zero'd chunk to a linked list for reuse. Add
1393 * to the front of the linked list so it is more likely to be
1394 * reallocated, since it is already in our L1 cache.
1396 #ifdef INVARIANTS
1397 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1398 panic("BADFREE %p", chunk);
1399 #endif
1400 chunk->c_Next = z->z_LChunks;
1401 z->z_LChunks = chunk;
1402 if (chunk->c_Next == NULL)
1403 z->z_LChunksp = &chunk->c_Next;
1405 #ifdef INVARIANTS
1406 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1407 panic("BADFREE2");
1408 #endif
1411 * Bump the number of free chunks. If it becomes non-zero the zone
1412 * must be added back onto the appropriate list. A fully allocated
1413 * zone that sees its first free is considered 'mature' and is placed
1414 * at the head, giving the system time to potentially free the remaining
1415 * entries even while other allocations are going on and making the zone
1416 * freeable.
1418 if (z->z_NFree++ == 0) {
1419 if (SlabFreeToTail)
1420 TAILQ_INSERT_TAIL(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1421 else
1422 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1425 --type->ks_use[z->z_Cpu].inuse;
1426 type->ks_use[z->z_Cpu].memuse -= z->z_ChunkSize;
1428 check_zone_free(slgd, z);
1429 logmemory_quick(free_end);
1430 crit_exit();
1434 * Cleanup slabs which are hanging around due to RChunks or which are wholely
1435 * free and can be moved to the free list if not moved by other means.
1437 * Called once every 10 seconds on all cpus.
1439 void
1440 slab_cleanup(void)
1442 SLGlobalData *slgd = &mycpu->gd_slab;
1443 SLZone *z;
1444 int i;
1446 crit_enter();
1447 for (i = 0; i < NZONES; ++i) {
1448 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL)
1449 continue;
1452 * Scan zones.
1454 while (z) {
1456 * Shift all RChunks to the end of the LChunks list. This is
1457 * an O(1) operation.
1459 * Then free the zone if possible.
1461 clean_zone_rchunks(z);
1462 z = check_zone_free(slgd, z);
1465 crit_exit();
1468 #if defined(INVARIANTS)
1471 * Helper routines for sanity checks
1473 static
1474 void
1475 chunk_mark_allocated(SLZone *z, void *chunk)
1477 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1478 uint32_t *bitptr;
1480 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1481 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1482 ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1483 bitptr = &z->z_Bitmap[bitdex >> 5];
1484 bitdex &= 31;
1485 KASSERT((*bitptr & (1 << bitdex)) == 0,
1486 ("memory chunk %p is already allocated!", chunk));
1487 *bitptr |= 1 << bitdex;
1490 static
1491 void
1492 chunk_mark_free(SLZone *z, void *chunk)
1494 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1495 uint32_t *bitptr;
1497 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1498 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1499 ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1500 bitptr = &z->z_Bitmap[bitdex >> 5];
1501 bitdex &= 31;
1502 KASSERT((*bitptr & (1 << bitdex)) != 0,
1503 ("memory chunk %p is already free!", chunk));
1504 *bitptr &= ~(1 << bitdex);
1507 #endif
1510 * kmem_slab_alloc()
1512 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1513 * specified alignment. M_* flags are expected in the flags field.
1515 * Alignment must be a multiple of PAGE_SIZE.
1517 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1518 * but when we move zalloc() over to use this function as its backend
1519 * we will have to switch to kreserve/krelease and call reserve(0)
1520 * after the new space is made available.
1522 * Interrupt code which has preempted other code is not allowed to
1523 * use PQ_CACHE pages. However, if an interrupt thread is run
1524 * non-preemptively or blocks and then runs non-preemptively, then
1525 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX
1527 static void *
1528 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1530 vm_size_t i;
1531 vm_offset_t addr;
1532 int count, vmflags, base_vmflags;
1533 vm_page_t mbase = NULL;
1534 vm_page_t m;
1535 thread_t td;
1537 size = round_page(size);
1538 addr = vm_map_min(&kernel_map);
1540 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1541 crit_enter();
1542 vm_map_lock(&kernel_map);
1543 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1544 vm_map_unlock(&kernel_map);
1545 if ((flags & M_NULLOK) == 0)
1546 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1547 vm_map_entry_release(count);
1548 crit_exit();
1549 return(NULL);
1553 * kernel_object maps 1:1 to kernel_map.
1555 vm_object_hold(&kernel_object);
1556 vm_object_reference_locked(&kernel_object);
1557 vm_map_insert(&kernel_map, &count,
1558 &kernel_object, NULL,
1559 addr, addr, addr + size,
1560 VM_MAPTYPE_NORMAL,
1561 VM_SUBSYS_KMALLOC,
1562 VM_PROT_ALL, VM_PROT_ALL, 0);
1563 vm_object_drop(&kernel_object);
1564 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1565 vm_map_unlock(&kernel_map);
1567 td = curthread;
1569 base_vmflags = 0;
1570 if (flags & M_ZERO)
1571 base_vmflags |= VM_ALLOC_ZERO;
1572 if (flags & M_USE_RESERVE)
1573 base_vmflags |= VM_ALLOC_SYSTEM;
1574 if (flags & M_USE_INTERRUPT_RESERVE)
1575 base_vmflags |= VM_ALLOC_INTERRUPT;
1576 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1577 panic("kmem_slab_alloc: bad flags %08x (%p)",
1578 flags, ((int **)&size)[-1]);
1582 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only
1583 * be set if we are not preempting.
1585 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1586 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1587 * implied in this case), though I'm not sure if we really need to
1588 * do that.
1590 vmflags = base_vmflags;
1591 if (flags & M_WAITOK) {
1592 if (td->td_preempted)
1593 vmflags |= VM_ALLOC_SYSTEM;
1594 else
1595 vmflags |= VM_ALLOC_NORMAL;
1598 vm_object_hold(&kernel_object);
1599 for (i = 0; i < size; i += PAGE_SIZE) {
1600 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1601 if (i == 0)
1602 mbase = m;
1605 * If the allocation failed we either return NULL or we retry.
1607 * If M_WAITOK is specified we wait for more memory and retry.
1608 * If M_WAITOK is specified from a preemption we yield instead of
1609 * wait. Livelock will not occur because the interrupt thread
1610 * will not be preempting anyone the second time around after the
1611 * yield.
1613 if (m == NULL) {
1614 if (flags & M_WAITOK) {
1615 if (td->td_preempted) {
1616 lwkt_switch();
1617 } else {
1618 vm_wait(0);
1620 i -= PAGE_SIZE; /* retry */
1621 continue;
1623 break;
1628 * Check and deal with an allocation failure
1630 if (i != size) {
1631 while (i != 0) {
1632 i -= PAGE_SIZE;
1633 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1634 /* page should already be busy */
1635 vm_page_free(m);
1637 vm_map_lock(&kernel_map);
1638 vm_map_delete(&kernel_map, addr, addr + size, &count);
1639 vm_map_unlock(&kernel_map);
1640 vm_object_drop(&kernel_object);
1642 vm_map_entry_release(count);
1643 crit_exit();
1644 return(NULL);
1648 * Success!
1650 * NOTE: The VM pages are still busied. mbase points to the first one
1651 * but we have to iterate via vm_page_next()
1653 vm_object_drop(&kernel_object);
1654 crit_exit();
1657 * Enter the pages into the pmap and deal with M_ZERO.
1659 m = mbase;
1660 i = 0;
1662 while (i < size) {
1664 * page should already be busy
1666 m->valid = VM_PAGE_BITS_ALL;
1667 vm_page_wire(m);
1668 pmap_enter(&kernel_pmap, addr + i, m,
1669 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL);
1670 if (flags & M_ZERO)
1671 pagezero((char *)addr + i);
1672 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1673 vm_page_flag_set(m, PG_REFERENCED);
1674 vm_page_wakeup(m);
1676 i += PAGE_SIZE;
1677 vm_object_hold(&kernel_object);
1678 m = vm_page_next(m);
1679 vm_object_drop(&kernel_object);
1681 smp_invltlb();
1682 vm_map_entry_release(count);
1683 atomic_add_long(&SlabsAllocated, 1);
1684 return((void *)addr);
1688 * kmem_slab_free()
1690 static void
1691 kmem_slab_free(void *ptr, vm_size_t size)
1693 crit_enter();
1694 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1695 atomic_add_long(&SlabsFreed, 1);
1696 crit_exit();
1699 void *
1700 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type,
1701 int flags)
1703 #if (__VM_CACHELINE_SIZE == 32)
1704 #define CAN_CACHEALIGN(sz) ((sz) >= 256)
1705 #elif (__VM_CACHELINE_SIZE == 64)
1706 #define CAN_CACHEALIGN(sz) ((sz) >= 512)
1707 #elif (__VM_CACHELINE_SIZE == 128)
1708 #define CAN_CACHEALIGN(sz) ((sz) >= 1024)
1709 #else
1710 #error "unsupported cacheline size"
1711 #endif
1713 void *ret;
1715 if (size_alloc < __VM_CACHELINE_SIZE)
1716 size_alloc = __VM_CACHELINE_SIZE;
1717 else if (!CAN_CACHEALIGN(size_alloc))
1718 flags |= M_POWEROF2;
1720 ret = kmalloc(size_alloc, type, flags);
1721 KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0,
1722 ("%p(%lu) not cacheline %d aligned",
1723 ret, size_alloc, __VM_CACHELINE_SIZE));
1724 return ret;
1726 #undef CAN_CACHEALIGN