4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * This module implements a slab allocator drop-in replacement for the
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
67 * Alloc Size Chunking Number of zones
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
79 * Allocations >= ZoneLimit go directly to kmem.
81 * Alignment properties:
82 * - All power-of-2 sized allocations are power-of-2 aligned.
83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
84 * power-of-2 round up of 'size'.
85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
86 * above table 'Chunking' column).
88 * API REQUIREMENTS AND SIDE EFFECTS
90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
91 * have remained compatible with the following API requirements:
93 * + malloc(0) is allowed and returns non-NULL (ahc driver)
94 * + ability to allocate arbitrarily large chunks of memory
99 #include <sys/param.h>
100 #include <sys/systm.h>
101 #include <sys/kernel.h>
102 #include <sys/slaballoc.h>
103 #include <sys/mbuf.h>
104 #include <sys/vmmeter.h>
105 #include <sys/lock.h>
106 #include <sys/thread.h>
107 #include <sys/globaldata.h>
108 #include <sys/sysctl.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_extern.h>
115 #include <vm/vm_object.h>
117 #include <vm/vm_map.h>
118 #include <vm/vm_page.h>
119 #include <vm/vm_pageout.h>
121 #include <machine/cpu.h>
123 #include <sys/thread2.h>
124 #include <vm/vm_page2.h>
126 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
128 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
129 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
131 #if !defined(KTR_MEMORY)
132 #define KTR_MEMORY KTR_ALL
134 KTR_INFO_MASTER(memory
);
135 KTR_INFO(KTR_MEMORY
, memory
, malloc_beg
, 0, "malloc begin");
136 KTR_INFO(KTR_MEMORY
, memory
, malloc_end
, 1, MEMORY_STRING
, MEMORY_ARGS
);
137 KTR_INFO(KTR_MEMORY
, memory
, free_zero
, 2, MEMORY_STRING
, MEMORY_ARGS
);
138 KTR_INFO(KTR_MEMORY
, memory
, free_ovsz
, 3, MEMORY_STRING
, MEMORY_ARGS
);
139 KTR_INFO(KTR_MEMORY
, memory
, free_ovsz_delayed
, 4, MEMORY_STRING
, MEMORY_ARGS
);
140 KTR_INFO(KTR_MEMORY
, memory
, free_chunk
, 5, MEMORY_STRING
, MEMORY_ARGS
);
141 KTR_INFO(KTR_MEMORY
, memory
, free_request
, 6, MEMORY_STRING
, MEMORY_ARGS
);
142 KTR_INFO(KTR_MEMORY
, memory
, free_rem_beg
, 7, MEMORY_STRING
, MEMORY_ARGS
);
143 KTR_INFO(KTR_MEMORY
, memory
, free_rem_end
, 8, MEMORY_STRING
, MEMORY_ARGS
);
144 KTR_INFO(KTR_MEMORY
, memory
, free_beg
, 9, "free begin");
145 KTR_INFO(KTR_MEMORY
, memory
, free_end
, 10, "free end");
147 #define logmemory(name, ptr, type, size, flags) \
148 KTR_LOG(memory_ ## name, ptr, type, size, flags)
149 #define logmemory_quick(name) \
150 KTR_LOG(memory_ ## name)
153 * Fixed globals (not per-cpu)
156 static int ZoneLimit
;
157 static int ZonePageCount
;
158 static uintptr_t ZoneMask
;
159 static int ZoneBigAlloc
; /* in KB */
160 static int ZoneGenAlloc
; /* in KB */
161 struct malloc_type
*kmemstatistics
; /* exported to vmstat */
162 static int32_t weirdary
[16];
164 static void *kmem_slab_alloc(vm_size_t bytes
, vm_offset_t align
, int flags
);
165 static void kmem_slab_free(void *ptr
, vm_size_t bytes
);
167 #if defined(INVARIANTS)
168 static void chunk_mark_allocated(SLZone
*z
, void *chunk
);
169 static void chunk_mark_free(SLZone
*z
, void *chunk
);
171 #define chunk_mark_allocated(z, chunk)
172 #define chunk_mark_free(z, chunk)
176 * Misc constants. Note that allocations that are exact multiples of
177 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
179 #define ZONE_RELS_THRESH 32 /* threshold number of zones */
182 * The WEIRD_ADDR is used as known text to copy into free objects to
183 * try to create deterministic failure cases if the data is accessed after
186 #define WEIRD_ADDR 0xdeadc0de
187 #define MAX_COPY sizeof(weirdary)
188 #define ZERO_LENGTH_PTR ((void *)-8)
191 * Misc global malloc buckets
194 MALLOC_DEFINE(M_CACHE
, "cache", "Various Dynamically allocated caches");
195 MALLOC_DEFINE(M_DEVBUF
, "devbuf", "device driver memory");
196 MALLOC_DEFINE(M_TEMP
, "temp", "misc temporary data buffers");
198 MALLOC_DEFINE(M_IP6OPT
, "ip6opt", "IPv6 options");
199 MALLOC_DEFINE(M_IP6NDP
, "ip6ndp", "IPv6 Neighbor Discovery");
202 * Initialize the slab memory allocator. We have to choose a zone size based
203 * on available physical memory. We choose a zone side which is approximately
204 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
205 * 128K. The zone size is limited to the bounds set in slaballoc.h
206 * (typically 32K min, 128K max).
208 static void kmeminit(void *dummy
);
212 SYSINIT(kmem
, SI_BOOT1_ALLOCATOR
, SI_ORDER_FIRST
, kmeminit
, NULL
);
216 * If enabled any memory allocated without M_ZERO is initialized to -1.
218 static int use_malloc_pattern
;
219 SYSCTL_INT(_debug
, OID_AUTO
, use_malloc_pattern
, CTLFLAG_RW
,
220 &use_malloc_pattern
, 0,
221 "Initialize memory to -1 if M_ZERO not specified");
224 static int ZoneRelsThresh
= ZONE_RELS_THRESH
;
225 SYSCTL_INT(_kern
, OID_AUTO
, zone_big_alloc
, CTLFLAG_RD
, &ZoneBigAlloc
, 0, "");
226 SYSCTL_INT(_kern
, OID_AUTO
, zone_gen_alloc
, CTLFLAG_RD
, &ZoneGenAlloc
, 0, "");
227 SYSCTL_INT(_kern
, OID_AUTO
, zone_cache
, CTLFLAG_RW
, &ZoneRelsThresh
, 0, "");
228 static long SlabsAllocated
;
229 static long SlabsFreed
;
230 SYSCTL_LONG(_kern
, OID_AUTO
, slabs_allocated
, CTLFLAG_RD
, &SlabsAllocated
, 0, "");
231 SYSCTL_LONG(_kern
, OID_AUTO
, slabs_freed
, CTLFLAG_RD
, &SlabsFreed
, 0, "");
234 * Returns the kernel memory size limit for the purposes of initializing
235 * various subsystem caches. The smaller of available memory and the KVM
236 * memory space is returned.
238 * The size in megabytes is returned.
245 limsize
= (size_t)vmstats
.v_page_count
* PAGE_SIZE
;
246 if (limsize
> KvaSize
)
248 return (limsize
/ (1024 * 1024));
252 kmeminit(void *dummy
)
258 limsize
= kmem_lim_size();
259 usesize
= (int)(limsize
* 1024); /* convert to KB */
262 * If the machine has a large KVM space and more than 8G of ram,
263 * double the zone release threshold to reduce SMP invalidations.
264 * If more than 16G of ram, do it again.
266 * The BIOS eats a little ram so add some slop. We want 8G worth of
267 * memory sticks to trigger the first adjustment.
269 if (ZoneRelsThresh
== ZONE_RELS_THRESH
) {
270 if (limsize
>= 7 * 1024)
272 if (limsize
>= 15 * 1024)
277 * Calculate the zone size. This typically calculates to
278 * ZALLOC_MAX_ZONE_SIZE
280 ZoneSize
= ZALLOC_MIN_ZONE_SIZE
;
281 while (ZoneSize
< ZALLOC_MAX_ZONE_SIZE
&& (ZoneSize
<< 1) < usesize
)
283 ZoneLimit
= ZoneSize
/ 4;
284 if (ZoneLimit
> ZALLOC_ZONE_LIMIT
)
285 ZoneLimit
= ZALLOC_ZONE_LIMIT
;
286 ZoneMask
= ~(uintptr_t)(ZoneSize
- 1);
287 ZonePageCount
= ZoneSize
/ PAGE_SIZE
;
289 for (i
= 0; i
< NELEM(weirdary
); ++i
)
290 weirdary
[i
] = WEIRD_ADDR
;
292 ZeroPage
= kmem_slab_alloc(PAGE_SIZE
, PAGE_SIZE
, M_WAITOK
|M_ZERO
);
295 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize
/ 1024);
299 * Initialize a malloc type tracking structure.
302 malloc_init(void *data
)
304 struct malloc_type
*type
= data
;
307 if (type
->ks_magic
!= M_MAGIC
)
308 panic("malloc type lacks magic");
310 if (type
->ks_limit
!= 0)
313 if (vmstats
.v_page_count
== 0)
314 panic("malloc_init not allowed before vm init");
316 limsize
= kmem_lim_size() * (1024 * 1024);
317 type
->ks_limit
= limsize
/ 10;
319 type
->ks_next
= kmemstatistics
;
320 kmemstatistics
= type
;
324 malloc_uninit(void *data
)
326 struct malloc_type
*type
= data
;
327 struct malloc_type
*t
;
333 if (type
->ks_magic
!= M_MAGIC
)
334 panic("malloc type lacks magic");
336 if (vmstats
.v_page_count
== 0)
337 panic("malloc_uninit not allowed before vm init");
339 if (type
->ks_limit
== 0)
340 panic("malloc_uninit on uninitialized type");
342 /* Make sure that all pending kfree()s are finished. */
343 lwkt_synchronize_ipiqs("muninit");
347 * memuse is only correct in aggregation. Due to memory being allocated
348 * on one cpu and freed on another individual array entries may be
349 * negative or positive (canceling each other out).
351 for (i
= ttl
= 0; i
< ncpus
; ++i
)
352 ttl
+= type
->ks_memuse
[i
];
354 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
355 ttl
, type
->ks_shortdesc
, i
);
358 if (type
== kmemstatistics
) {
359 kmemstatistics
= type
->ks_next
;
361 for (t
= kmemstatistics
; t
->ks_next
!= NULL
; t
= t
->ks_next
) {
362 if (t
->ks_next
== type
) {
363 t
->ks_next
= type
->ks_next
;
368 type
->ks_next
= NULL
;
373 * Increase the kmalloc pool limit for the specified pool. No changes
374 * are the made if the pool would shrink.
377 kmalloc_raise_limit(struct malloc_type
*type
, size_t bytes
)
379 if (type
->ks_limit
== 0)
383 if (type
->ks_limit
< bytes
)
384 type
->ks_limit
= bytes
;
388 * Dynamically create a malloc pool. This function is a NOP if *typep is
392 kmalloc_create(struct malloc_type
**typep
, const char *descr
)
394 struct malloc_type
*type
;
396 if (*typep
== NULL
) {
397 type
= kmalloc(sizeof(*type
), M_TEMP
, M_WAITOK
| M_ZERO
);
398 type
->ks_magic
= M_MAGIC
;
399 type
->ks_shortdesc
= descr
;
406 * Destroy a dynamically created malloc pool. This function is a NOP if
407 * the pool has already been destroyed.
410 kmalloc_destroy(struct malloc_type
**typep
)
412 if (*typep
!= NULL
) {
413 malloc_uninit(*typep
);
414 kfree(*typep
, M_TEMP
);
420 * Calculate the zone index for the allocation request size and set the
421 * allocation request size to that particular zone's chunk size.
424 zoneindex(unsigned long *bytes
, unsigned long *align
)
426 unsigned int n
= (unsigned int)*bytes
; /* unsigned for shift opt */
428 *bytes
= n
= (n
+ 7) & ~7;
430 return(n
/ 8 - 1); /* 8 byte chunks, 16 zones */
433 *bytes
= n
= (n
+ 15) & ~15;
439 *bytes
= n
= (n
+ 31) & ~31;
444 *bytes
= n
= (n
+ 63) & ~63;
449 *bytes
= n
= (n
+ 127) & ~127;
451 return(n
/ 128 + 31);
454 *bytes
= n
= (n
+ 255) & ~255;
456 return(n
/ 256 + 39);
458 *bytes
= n
= (n
+ 511) & ~511;
460 return(n
/ 512 + 47);
462 #if ZALLOC_ZONE_LIMIT > 8192
464 *bytes
= n
= (n
+ 1023) & ~1023;
466 return(n
/ 1024 + 55);
469 #if ZALLOC_ZONE_LIMIT > 16384
471 *bytes
= n
= (n
+ 2047) & ~2047;
473 return(n
/ 2048 + 63);
476 panic("Unexpected byte count %d", n
);
482 clean_zone_rchunks(SLZone
*z
)
486 while ((bchunk
= z
->z_RChunks
) != NULL
) {
488 if (atomic_cmpset_ptr(&z
->z_RChunks
, bchunk
, NULL
)) {
489 *z
->z_LChunksp
= bchunk
;
491 chunk_mark_free(z
, bchunk
);
492 z
->z_LChunksp
= &bchunk
->c_Next
;
493 bchunk
= bchunk
->c_Next
;
503 * If the zone becomes totally free, and there are other zones we
504 * can allocate from, move this zone to the FreeZones list. Since
505 * this code can be called from an IPI callback, do *NOT* try to mess
506 * with kernel_map here. Hysteresis will be performed at malloc() time.
510 check_zone_free(SLGlobalData
*slgd
, SLZone
*z
)
512 if (z
->z_NFree
== z
->z_NMax
&&
513 (z
->z_Next
|| LIST_FIRST(&slgd
->ZoneAry
[z
->z_ZoneIndex
]) != z
) &&
519 znext
= LIST_NEXT(z
, z_Entry
);
520 LIST_REMOVE(z
, z_Entry
);
523 LIST_INSERT_HEAD(&slgd
->FreeZones
, z
, z_Entry
);
529 z
= LIST_NEXT(z
, z_Entry
);
536 * Used to debug memory corruption issues. Record up to (typically 32)
537 * allocation sources for this zone (for a particular chunk size).
541 slab_record_source(SLZone
*z
, const char *file
, int line
)
544 int b
= line
& (SLAB_DEBUG_ENTRIES
- 1);
548 if (z
->z_Sources
[i
].file
== file
&& z
->z_Sources
[i
].line
== line
)
550 if (z
->z_Sources
[i
].file
== NULL
)
552 i
= (i
+ 1) & (SLAB_DEBUG_ENTRIES
- 1);
554 z
->z_Sources
[i
].file
= file
;
555 z
->z_Sources
[i
].line
= line
;
560 static __inline
unsigned long
561 powerof2_size(unsigned long size
)
565 if (size
== 0 || powerof2(size
))
573 * kmalloc() (SLAB ALLOCATOR)
575 * Allocate memory via the slab allocator. If the request is too large,
576 * or if it page-aligned beyond a certain size, we fall back to the
577 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
578 * &SlabMisc if you don't care.
580 * M_RNOWAIT - don't block.
581 * M_NULLOK - return NULL instead of blocking.
582 * M_ZERO - zero the returned memory.
583 * M_USE_RESERVE - allow greater drawdown of the free list
584 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
585 * M_POWEROF2 - roundup size to the nearest power of 2
592 kmalloc_debug(unsigned long size
, struct malloc_type
*type
, int flags
,
593 const char *file
, int line
)
596 kmalloc(unsigned long size
, struct malloc_type
*type
, int flags
)
602 struct globaldata
*gd
;
609 logmemory_quick(malloc_beg
);
614 * XXX silly to have this in the critical path.
616 if (type
->ks_limit
== 0) {
623 if (flags
& M_POWEROF2
)
624 size
= powerof2_size(size
);
627 * Handle the case where the limit is reached. Panic if we can't return
628 * NULL. The original malloc code looped, but this tended to
629 * simply deadlock the computer.
631 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
632 * to determine if a more complete limit check should be done. The
633 * actual memory use is tracked via ks_memuse[cpu].
635 while (type
->ks_loosememuse
>= type
->ks_limit
) {
639 for (i
= ttl
= 0; i
< ncpus
; ++i
)
640 ttl
+= type
->ks_memuse
[i
];
641 type
->ks_loosememuse
= ttl
; /* not MP synchronized */
642 if ((ssize_t
)ttl
< 0) /* deal with occassional race */
644 if (ttl
>= type
->ks_limit
) {
645 if (flags
& M_NULLOK
) {
646 logmemory(malloc_end
, NULL
, type
, size
, flags
);
649 panic("%s: malloc limit exceeded", type
->ks_shortdesc
);
654 * Handle the degenerate size == 0 case. Yes, this does happen.
655 * Return a special pointer. This is to maintain compatibility with
656 * the original malloc implementation. Certain devices, such as the
657 * adaptec driver, not only allocate 0 bytes, they check for NULL and
658 * also realloc() later on. Joy.
661 logmemory(malloc_end
, ZERO_LENGTH_PTR
, type
, size
, flags
);
662 return(ZERO_LENGTH_PTR
);
666 * Handle hysteresis from prior frees here in malloc(). We cannot
667 * safely manipulate the kernel_map in free() due to free() possibly
668 * being called via an IPI message or from sensitive interrupt code.
670 * NOTE: ku_pagecnt must be cleared before we free the slab or we
671 * might race another cpu allocating the kva and setting
674 while (slgd
->NFreeZones
> ZoneRelsThresh
&& (flags
& M_RNOWAIT
) == 0) {
676 if (slgd
->NFreeZones
> ZoneRelsThresh
) { /* crit sect race */
679 z
= LIST_FIRST(&slgd
->FreeZones
);
680 LIST_REMOVE(z
, z_Entry
);
684 kmem_slab_free(z
, ZoneSize
); /* may block */
685 atomic_add_int(&ZoneGenAlloc
, -ZoneSize
/ 1024);
691 * XXX handle oversized frees that were queued from kfree().
693 while (LIST_FIRST(&slgd
->FreeOvZones
) && (flags
& M_RNOWAIT
) == 0) {
695 if ((z
= LIST_FIRST(&slgd
->FreeOvZones
)) != NULL
) {
698 KKASSERT(z
->z_Magic
== ZALLOC_OVSZ_MAGIC
);
699 LIST_REMOVE(z
, z_Entry
);
700 tsize
= z
->z_ChunkSize
;
701 kmem_slab_free(z
, tsize
); /* may block */
702 atomic_add_int(&ZoneBigAlloc
, -(int)tsize
/ 1024);
708 * Handle large allocations directly. There should not be very many of
709 * these so performance is not a big issue.
711 * The backend allocator is pretty nasty on a SMP system. Use the
712 * slab allocator for one and two page-sized chunks even though we lose
713 * some efficiency. XXX maybe fix mmio and the elf loader instead.
715 if (size
>= ZoneLimit
|| ((size
& PAGE_MASK
) == 0 && size
> PAGE_SIZE
*2)) {
718 size
= round_page(size
);
719 chunk
= kmem_slab_alloc(size
, PAGE_SIZE
, flags
);
721 logmemory(malloc_end
, NULL
, type
, size
, flags
);
724 atomic_add_int(&ZoneBigAlloc
, (int)size
/ 1024);
725 flags
&= ~M_ZERO
; /* result already zero'd if M_ZERO was set */
726 flags
|= M_PASSIVE_ZERO
;
728 *kup
= size
/ PAGE_SIZE
;
734 * Attempt to allocate out of an existing zone. First try the free list,
735 * then allocate out of unallocated space. If we find a good zone move
736 * it to the head of the list so later allocations find it quickly
737 * (we might have thousands of zones in the list).
739 * Note: zoneindex() will panic of size is too large.
741 zi
= zoneindex(&size
, &align
);
742 KKASSERT(zi
< NZONES
);
745 if ((z
= LIST_FIRST(&slgd
->ZoneAry
[zi
])) != NULL
) {
747 * Locate a chunk - we have to have at least one. If this is the
748 * last chunk go ahead and do the work to retrieve chunks freed
749 * from remote cpus, and if the zone is still empty move it off
752 if (--z
->z_NFree
<= 0) {
753 KKASSERT(z
->z_NFree
== 0);
756 * WARNING! This code competes with other cpus. It is ok
757 * for us to not drain RChunks here but we might as well, and
758 * it is ok if more accumulate after we're done.
760 * Set RSignal before pulling rchunks off, indicating that we
761 * will be moving ourselves off of the ZoneAry. Remote ends will
762 * read RSignal before putting rchunks on thus interlocking
763 * their IPI signaling.
765 if (z
->z_RChunks
== NULL
)
766 atomic_swap_int(&z
->z_RSignal
, 1);
768 clean_zone_rchunks(z
);
771 * Remove from the zone list if no free chunks remain.
774 if (z
->z_NFree
== 0) {
775 LIST_REMOVE(z
, z_Entry
);
782 * Fast path, we have chunks available in z_LChunks.
784 chunk
= z
->z_LChunks
;
786 chunk_mark_allocated(z
, chunk
);
787 z
->z_LChunks
= chunk
->c_Next
;
788 if (z
->z_LChunks
== NULL
)
789 z
->z_LChunksp
= &z
->z_LChunks
;
791 slab_record_source(z
, file
, line
);
797 * No chunks are available in LChunks, the free chunk MUST be
798 * in the never-before-used memory area, controlled by UIndex.
800 * The consequences are very serious if our zone got corrupted so
801 * we use an explicit panic rather than a KASSERT.
803 if (z
->z_UIndex
+ 1 != z
->z_NMax
)
808 if (z
->z_UIndex
== z
->z_UEndIndex
)
809 panic("slaballoc: corrupted zone");
811 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
812 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
814 flags
|= M_PASSIVE_ZERO
;
816 chunk_mark_allocated(z
, chunk
);
818 slab_record_source(z
, file
, line
);
824 * If all zones are exhausted we need to allocate a new zone for this
825 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
826 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
827 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
828 * we do not pre-zero it because we do not want to mess up the L1 cache.
830 * At least one subsystem, the tty code (see CROUND) expects power-of-2
831 * allocations to be power-of-2 aligned. We maintain compatibility by
832 * adjusting the base offset below.
838 if ((z
= LIST_FIRST(&slgd
->FreeZones
)) != NULL
) {
839 LIST_REMOVE(z
, z_Entry
);
841 bzero(z
, sizeof(SLZone
));
842 z
->z_Flags
|= SLZF_UNOTZEROD
;
844 z
= kmem_slab_alloc(ZoneSize
, ZoneSize
, flags
|M_ZERO
);
847 atomic_add_int(&ZoneGenAlloc
, ZoneSize
/ 1024);
851 * How big is the base structure?
853 #if defined(INVARIANTS)
855 * Make room for z_Bitmap. An exact calculation is somewhat more
856 * complicated so don't make an exact calculation.
858 off
= offsetof(SLZone
, z_Bitmap
[(ZoneSize
/ size
+ 31) / 32]);
859 bzero(z
->z_Bitmap
, (ZoneSize
/ size
+ 31) / 8);
861 off
= sizeof(SLZone
);
865 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
866 * Otherwise properly align the data according to the chunk size.
870 off
= roundup2(off
, align
);
872 z
->z_Magic
= ZALLOC_SLAB_MAGIC
;
874 z
->z_NMax
= (ZoneSize
- off
) / size
;
875 z
->z_NFree
= z
->z_NMax
- 1;
876 z
->z_BasePtr
= (char *)z
+ off
;
877 z
->z_UIndex
= z
->z_UEndIndex
= slgd
->JunkIndex
% z
->z_NMax
;
878 z
->z_ChunkSize
= size
;
880 z
->z_Cpu
= gd
->gd_cpuid
;
881 z
->z_LChunksp
= &z
->z_LChunks
;
883 bcopy(z
->z_Sources
, z
->z_AltSources
, sizeof(z
->z_Sources
));
884 bzero(z
->z_Sources
, sizeof(z
->z_Sources
));
886 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
887 LIST_INSERT_HEAD(&slgd
->ZoneAry
[zi
], z
, z_Entry
);
888 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
889 flags
&= ~M_ZERO
; /* already zero'd */
890 flags
|= M_PASSIVE_ZERO
;
893 *kup
= -(z
->z_Cpu
+ 1); /* -1 to -(N+1) */
894 chunk_mark_allocated(z
, chunk
);
896 slab_record_source(z
, file
, line
);
900 * Slide the base index for initial allocations out of the next
901 * zone we create so we do not over-weight the lower part of the
904 slgd
->JunkIndex
= (slgd
->JunkIndex
+ ZALLOC_SLAB_SLIDE
)
905 & (ZALLOC_MAX_ZONE_SIZE
- 1);
909 ++type
->ks_inuse
[gd
->gd_cpuid
];
910 type
->ks_memuse
[gd
->gd_cpuid
] += size
;
911 type
->ks_loosememuse
+= size
; /* not MP synchronized */
917 else if ((flags
& (M_ZERO
|M_PASSIVE_ZERO
)) == 0) {
918 if (use_malloc_pattern
) {
919 for (i
= 0; i
< size
; i
+= sizeof(int)) {
920 *(int *)((char *)chunk
+ i
) = -1;
923 chunk
->c_Next
= (void *)-1; /* avoid accidental double-free check */
926 logmemory(malloc_end
, chunk
, type
, size
, flags
);
930 logmemory(malloc_end
, NULL
, type
, size
, flags
);
935 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
937 * Generally speaking this routine is not called very often and we do
938 * not attempt to optimize it beyond reusing the same pointer if the
939 * new size fits within the chunking of the old pointer's zone.
943 krealloc_debug(void *ptr
, unsigned long size
,
944 struct malloc_type
*type
, int flags
,
945 const char *file
, int line
)
948 krealloc(void *ptr
, unsigned long size
, struct malloc_type
*type
, int flags
)
957 KKASSERT((flags
& M_ZERO
) == 0); /* not supported */
959 if (ptr
== NULL
|| ptr
== ZERO_LENGTH_PTR
)
960 return(kmalloc_debug(size
, type
, flags
, file
, line
));
967 * Handle oversized allocations. XXX we really should require that a
968 * size be passed to free() instead of this nonsense.
972 osize
= *kup
<< PAGE_SHIFT
;
973 if (osize
== round_page(size
))
975 if ((nptr
= kmalloc_debug(size
, type
, flags
, file
, line
)) == NULL
)
977 bcopy(ptr
, nptr
, min(size
, osize
));
983 * Get the original allocation's zone. If the new request winds up
984 * using the same chunk size we do not have to do anything.
986 z
= (SLZone
*)((uintptr_t)ptr
& ZoneMask
);
989 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
992 * Allocate memory for the new request size. Note that zoneindex has
993 * already adjusted the request size to the appropriate chunk size, which
994 * should optimize our bcopy(). Then copy and return the new pointer.
996 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
997 * necessary align the result.
999 * We can only zoneindex (to align size to the chunk size) if the new
1000 * size is not too large.
1002 if (size
< ZoneLimit
) {
1003 zoneindex(&size
, &align
);
1004 if (z
->z_ChunkSize
== size
)
1007 if ((nptr
= kmalloc_debug(size
, type
, flags
, file
, line
)) == NULL
)
1009 bcopy(ptr
, nptr
, min(size
, z
->z_ChunkSize
));
1015 * Return the kmalloc limit for this type, in bytes.
1018 kmalloc_limit(struct malloc_type
*type
)
1020 if (type
->ks_limit
== 0) {
1022 if (type
->ks_limit
== 0)
1026 return(type
->ks_limit
);
1030 * Allocate a copy of the specified string.
1032 * (MP SAFE) (MAY BLOCK)
1036 kstrdup_debug(const char *str
, struct malloc_type
*type
,
1037 const char *file
, int line
)
1040 kstrdup(const char *str
, struct malloc_type
*type
)
1043 int zlen
; /* length inclusive of terminating NUL */
1048 zlen
= strlen(str
) + 1;
1049 nstr
= kmalloc_debug(zlen
, type
, M_WAITOK
, file
, line
);
1050 bcopy(str
, nstr
, zlen
);
1055 * Notify our cpu that a remote cpu has freed some chunks in a zone that
1056 * we own. RCount will be bumped so the memory should be good, but validate
1057 * that it really is.
1061 kfree_remote(void *ptr
)
1068 slgd
= &mycpu
->gd_slab
;
1071 KKASSERT(*kup
== -((int)mycpuid
+ 1));
1072 KKASSERT(z
->z_RCount
> 0);
1073 atomic_subtract_int(&z
->z_RCount
, 1);
1075 logmemory(free_rem_beg
, z
, NULL
, 0L, 0);
1076 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
1077 KKASSERT(z
->z_Cpu
== mycpu
->gd_cpuid
);
1081 * Indicate that we will no longer be off of the ZoneAry by
1088 * Atomically extract the bchunks list and then process it back
1089 * into the lchunks list. We want to append our bchunks to the
1090 * lchunks list and not prepend since we likely do not have
1091 * cache mastership of the related data (not that it helps since
1092 * we are using c_Next).
1094 clean_zone_rchunks(z
);
1095 if (z
->z_NFree
&& nfree
== 0) {
1096 LIST_INSERT_HEAD(&slgd
->ZoneAry
[z
->z_ZoneIndex
], z
, z_Entry
);
1100 * If the zone becomes totally free, and there are other zones we
1101 * can allocate from, move this zone to the FreeZones list. Since
1102 * this code can be called from an IPI callback, do *NOT* try to mess
1103 * with kernel_map here. Hysteresis will be performed at malloc() time.
1105 * Do not move the zone if there is an IPI inflight, otherwise MP
1106 * races can result in our free_remote code accessing a destroyed
1109 if (z
->z_NFree
== z
->z_NMax
&&
1110 (z
->z_Next
|| LIST_FIRST(&slgd
->ZoneAry
[z
->z_ZoneIndex
]) != z
) &&
1115 LIST_REMOVE(z
, z_Entry
);
1117 LIST_INSERT_HEAD(&slgd
->FreeZones
, z
, z_Entry
);
1122 logmemory(free_rem_end
, z
, NULL
, 0L, 0);
1126 * free (SLAB ALLOCATOR)
1128 * Free a memory block previously allocated by malloc. Note that we do not
1129 * attempt to update ks_loosememuse as MP races could prevent us from
1130 * checking memory limits in malloc.
1135 kfree(void *ptr
, struct malloc_type
*type
)
1140 struct globaldata
*gd
;
1146 logmemory_quick(free_beg
);
1148 slgd
= &gd
->gd_slab
;
1151 panic("trying to free NULL pointer");
1154 * Handle special 0-byte allocations
1156 if (ptr
== ZERO_LENGTH_PTR
) {
1157 logmemory(free_zero
, ptr
, type
, -1UL, 0);
1158 logmemory_quick(free_end
);
1163 * Panic on bad malloc type
1165 if (type
->ks_magic
!= M_MAGIC
)
1166 panic("free: malloc type lacks magic");
1169 * Handle oversized allocations. XXX we really should require that a
1170 * size be passed to free() instead of this nonsense.
1172 * This code is never called via an ipi.
1176 size
= *kup
<< PAGE_SHIFT
;
1179 KKASSERT(sizeof(weirdary
) <= size
);
1180 bcopy(weirdary
, ptr
, sizeof(weirdary
));
1183 * NOTE: For oversized allocations we do not record the
1184 * originating cpu. It gets freed on the cpu calling
1185 * kfree(). The statistics are in aggregate.
1187 * note: XXX we have still inherited the interrupts-can't-block
1188 * assumption. An interrupt thread does not bump
1189 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
1190 * primarily until we can fix softupdate's assumptions about free().
1193 --type
->ks_inuse
[gd
->gd_cpuid
];
1194 type
->ks_memuse
[gd
->gd_cpuid
] -= size
;
1195 if (mycpu
->gd_intr_nesting_level
||
1196 (gd
->gd_curthread
->td_flags
& TDF_INTTHREAD
))
1198 logmemory(free_ovsz_delayed
, ptr
, type
, size
, 0);
1200 z
->z_Magic
= ZALLOC_OVSZ_MAGIC
;
1201 z
->z_ChunkSize
= size
;
1203 LIST_INSERT_HEAD(&slgd
->FreeOvZones
, z
, z_Entry
);
1207 logmemory(free_ovsz
, ptr
, type
, size
, 0);
1208 kmem_slab_free(ptr
, size
); /* may block */
1209 atomic_add_int(&ZoneBigAlloc
, -(int)size
/ 1024);
1211 logmemory_quick(free_end
);
1216 * Zone case. Figure out the zone based on the fact that it is
1219 z
= (SLZone
*)((uintptr_t)ptr
& ZoneMask
);
1222 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
1225 * If we do not own the zone then use atomic ops to free to the
1226 * remote cpu linked list and notify the target zone using a
1229 * The target zone cannot be deallocated while we own a chunk of it,
1230 * so the zone header's storage is stable until the very moment
1231 * we adjust z_RChunks. After that we cannot safely dereference (z).
1233 * (no critical section needed)
1235 if (z
->z_CpuGd
!= gd
) {
1237 * Making these adjustments now allow us to avoid passing (type)
1238 * to the remote cpu. Note that ks_inuse/ks_memuse is being
1239 * adjusted on OUR cpu, not the zone cpu, but it should all still
1240 * sum up properly and cancel out.
1243 --type
->ks_inuse
[gd
->gd_cpuid
];
1244 type
->ks_memuse
[gd
->gd_cpuid
] -= z
->z_ChunkSize
;
1248 * WARNING! This code competes with other cpus. Once we
1249 * successfully link the chunk to RChunks the remote
1250 * cpu can rip z's storage out from under us.
1252 * Bumping RCount prevents z's storage from getting
1255 rsignal
= z
->z_RSignal
;
1258 atomic_add_int(&z
->z_RCount
, 1);
1262 bchunk
= z
->z_RChunks
;
1264 chunk
->c_Next
= bchunk
;
1267 if (atomic_cmpset_ptr(&z
->z_RChunks
, bchunk
, chunk
))
1272 * We have to signal the remote cpu if our actions will cause
1273 * the remote zone to be placed back on ZoneAry so it can
1274 * move the zone back on.
1276 * We only need to deal with NULL->non-NULL RChunk transitions
1277 * and only if z_RSignal is set. We interlock by reading rsignal
1278 * before adding our chunk to RChunks. This should result in
1279 * virtually no IPI traffic.
1281 * We can use a passive IPI to reduce overhead even further.
1283 if (bchunk
== NULL
&& rsignal
) {
1284 logmemory(free_request
, ptr
, type
,
1285 (unsigned long)z
->z_ChunkSize
, 0);
1286 lwkt_send_ipiq_passive(z
->z_CpuGd
, kfree_remote
, z
);
1287 /* z can get ripped out from under us from this point on */
1288 } else if (rsignal
) {
1289 atomic_subtract_int(&z
->z_RCount
, 1);
1290 /* z can get ripped out from under us from this point on */
1292 logmemory_quick(free_end
);
1299 logmemory(free_chunk
, ptr
, type
, (unsigned long)z
->z_ChunkSize
, 0);
1303 chunk_mark_free(z
, chunk
);
1306 * Put weird data into the memory to detect modifications after freeing,
1307 * illegal pointer use after freeing (we should fault on the odd address),
1308 * and so forth. XXX needs more work, see the old malloc code.
1311 if (z
->z_ChunkSize
< sizeof(weirdary
))
1312 bcopy(weirdary
, chunk
, z
->z_ChunkSize
);
1314 bcopy(weirdary
, chunk
, sizeof(weirdary
));
1318 * Add this free non-zero'd chunk to a linked list for reuse. Add
1319 * to the front of the linked list so it is more likely to be
1320 * reallocated, since it is already in our L1 cache.
1323 if ((vm_offset_t
)chunk
< KvaStart
|| (vm_offset_t
)chunk
>= KvaEnd
)
1324 panic("BADFREE %p", chunk
);
1326 chunk
->c_Next
= z
->z_LChunks
;
1327 z
->z_LChunks
= chunk
;
1328 if (chunk
->c_Next
== NULL
)
1329 z
->z_LChunksp
= &chunk
->c_Next
;
1332 if (chunk
->c_Next
&& (vm_offset_t
)chunk
->c_Next
< KvaStart
)
1337 * Bump the number of free chunks. If it becomes non-zero the zone
1338 * must be added back onto the appropriate list.
1340 if (z
->z_NFree
++ == 0) {
1341 LIST_INSERT_HEAD(&slgd
->ZoneAry
[z
->z_ZoneIndex
], z
, z_Entry
);
1344 --type
->ks_inuse
[z
->z_Cpu
];
1345 type
->ks_memuse
[z
->z_Cpu
] -= z
->z_ChunkSize
;
1347 check_zone_free(slgd
, z
);
1348 logmemory_quick(free_end
);
1353 * Cleanup slabs which are hanging around due to RChunks. Called once every
1354 * 10 seconds on all cpus.
1359 SLGlobalData
*slgd
= &mycpu
->gd_slab
;
1364 for (i
= 0; i
< NZONES
; ++i
) {
1365 if ((z
= LIST_FIRST(&slgd
->ZoneAry
[i
])) == NULL
)
1367 z
= LIST_NEXT(z
, z_Entry
);
1370 * Scan zones starting with the second zone in each list.
1374 * Shift all RChunks to the end of the LChunks list. This is
1375 * an O(1) operation.
1377 * Then free the zone if possible.
1379 clean_zone_rchunks(z
);
1380 z
= check_zone_free(slgd
, z
);
1386 #if defined(INVARIANTS)
1389 * Helper routines for sanity checks
1393 chunk_mark_allocated(SLZone
*z
, void *chunk
)
1395 int bitdex
= ((char *)chunk
- (char *)z
->z_BasePtr
) / z
->z_ChunkSize
;
1398 KKASSERT((((intptr_t)chunk
^ (intptr_t)z
) & ZoneMask
) == 0);
1399 KASSERT(bitdex
>= 0 && bitdex
< z
->z_NMax
,
1400 ("memory chunk %p bit index %d is illegal", chunk
, bitdex
));
1401 bitptr
= &z
->z_Bitmap
[bitdex
>> 5];
1403 KASSERT((*bitptr
& (1 << bitdex
)) == 0,
1404 ("memory chunk %p is already allocated!", chunk
));
1405 *bitptr
|= 1 << bitdex
;
1410 chunk_mark_free(SLZone
*z
, void *chunk
)
1412 int bitdex
= ((char *)chunk
- (char *)z
->z_BasePtr
) / z
->z_ChunkSize
;
1415 KKASSERT((((intptr_t)chunk
^ (intptr_t)z
) & ZoneMask
) == 0);
1416 KASSERT(bitdex
>= 0 && bitdex
< z
->z_NMax
,
1417 ("memory chunk %p bit index %d is illegal!", chunk
, bitdex
));
1418 bitptr
= &z
->z_Bitmap
[bitdex
>> 5];
1420 KASSERT((*bitptr
& (1 << bitdex
)) != 0,
1421 ("memory chunk %p is already free!", chunk
));
1422 *bitptr
&= ~(1 << bitdex
);
1430 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1431 * specified alignment. M_* flags are expected in the flags field.
1433 * Alignment must be a multiple of PAGE_SIZE.
1435 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1436 * but when we move zalloc() over to use this function as its backend
1437 * we will have to switch to kreserve/krelease and call reserve(0)
1438 * after the new space is made available.
1440 * Interrupt code which has preempted other code is not allowed to
1441 * use PQ_CACHE pages. However, if an interrupt thread is run
1442 * non-preemptively or blocks and then runs non-preemptively, then
1443 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX
1446 kmem_slab_alloc(vm_size_t size
, vm_offset_t align
, int flags
)
1450 int count
, vmflags
, base_vmflags
;
1451 vm_page_t mbase
= NULL
;
1455 size
= round_page(size
);
1456 addr
= vm_map_min(&kernel_map
);
1458 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
1460 vm_map_lock(&kernel_map
);
1461 if (vm_map_findspace(&kernel_map
, addr
, size
, align
, 0, &addr
)) {
1462 vm_map_unlock(&kernel_map
);
1463 if ((flags
& M_NULLOK
) == 0)
1464 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1465 vm_map_entry_release(count
);
1471 * kernel_object maps 1:1 to kernel_map.
1473 vm_object_hold(&kernel_object
);
1474 vm_object_reference_locked(&kernel_object
);
1475 vm_map_insert(&kernel_map
, &count
,
1476 &kernel_object
, NULL
,
1477 addr
, addr
, addr
+ size
,
1479 VM_PROT_ALL
, VM_PROT_ALL
,
1481 vm_object_drop(&kernel_object
);
1482 vm_map_set_wired_quick(&kernel_map
, addr
, size
, &count
);
1483 vm_map_unlock(&kernel_map
);
1489 base_vmflags
|= VM_ALLOC_ZERO
;
1490 if (flags
& M_USE_RESERVE
)
1491 base_vmflags
|= VM_ALLOC_SYSTEM
;
1492 if (flags
& M_USE_INTERRUPT_RESERVE
)
1493 base_vmflags
|= VM_ALLOC_INTERRUPT
;
1494 if ((flags
& (M_RNOWAIT
|M_WAITOK
)) == 0) {
1495 panic("kmem_slab_alloc: bad flags %08x (%p)",
1496 flags
, ((int **)&size
)[-1]);
1500 * Allocate the pages. Do not mess with the PG_ZERO flag or map
1501 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting.
1503 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1504 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1505 * implied in this case), though I'm not sure if we really need to
1508 vmflags
= base_vmflags
;
1509 if (flags
& M_WAITOK
) {
1510 if (td
->td_preempted
)
1511 vmflags
|= VM_ALLOC_SYSTEM
;
1513 vmflags
|= VM_ALLOC_NORMAL
;
1516 vm_object_hold(&kernel_object
);
1517 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
1518 m
= vm_page_alloc(&kernel_object
, OFF_TO_IDX(addr
+ i
), vmflags
);
1523 * If the allocation failed we either return NULL or we retry.
1525 * If M_WAITOK is specified we wait for more memory and retry.
1526 * If M_WAITOK is specified from a preemption we yield instead of
1527 * wait. Livelock will not occur because the interrupt thread
1528 * will not be preempting anyone the second time around after the
1532 if (flags
& M_WAITOK
) {
1533 if (td
->td_preempted
) {
1538 i
-= PAGE_SIZE
; /* retry */
1546 * Check and deal with an allocation failure
1551 m
= vm_page_lookup(&kernel_object
, OFF_TO_IDX(addr
+ i
));
1552 /* page should already be busy */
1555 vm_map_lock(&kernel_map
);
1556 vm_map_delete(&kernel_map
, addr
, addr
+ size
, &count
);
1557 vm_map_unlock(&kernel_map
);
1558 vm_object_drop(&kernel_object
);
1560 vm_map_entry_release(count
);
1568 * NOTE: The VM pages are still busied. mbase points to the first one
1569 * but we have to iterate via vm_page_next()
1571 vm_object_drop(&kernel_object
);
1575 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1582 * page should already be busy
1584 m
->valid
= VM_PAGE_BITS_ALL
;
1586 pmap_enter(&kernel_pmap
, addr
+ i
, m
, VM_PROT_ALL
| VM_PROT_NOSYNC
,
1588 if ((m
->flags
& PG_ZERO
) == 0 && (flags
& M_ZERO
))
1589 bzero((char *)addr
+ i
, PAGE_SIZE
);
1590 vm_page_flag_clear(m
, PG_ZERO
);
1591 KKASSERT(m
->flags
& (PG_WRITEABLE
| PG_MAPPED
));
1592 vm_page_flag_set(m
, PG_REFERENCED
);
1596 vm_object_hold(&kernel_object
);
1597 m
= vm_page_next(m
);
1598 vm_object_drop(&kernel_object
);
1601 vm_map_entry_release(count
);
1602 atomic_add_long(&SlabsAllocated
, 1);
1603 return((void *)addr
);
1610 kmem_slab_free(void *ptr
, vm_size_t size
)
1613 vm_map_remove(&kernel_map
, (vm_offset_t
)ptr
, (vm_offset_t
)ptr
+ size
);
1614 atomic_add_long(&SlabsFreed
, 1);
1619 kmalloc_cachealign(unsigned long size_alloc
, struct malloc_type
*type
,
1622 #if (__VM_CACHELINE_SIZE == 32)
1623 #define CAN_CACHEALIGN(sz) ((sz) >= 256)
1624 #elif (__VM_CACHELINE_SIZE == 64)
1625 #define CAN_CACHEALIGN(sz) ((sz) >= 512)
1626 #elif (__VM_CACHELINE_SIZE == 128)
1627 #define CAN_CACHEALIGN(sz) ((sz) >= 1024)
1629 #error "unsupported cacheline size"
1634 if (size_alloc
< __VM_CACHELINE_SIZE
)
1635 size_alloc
= __VM_CACHELINE_SIZE
;
1636 else if (!CAN_CACHEALIGN(size_alloc
))
1637 flags
|= M_POWEROF2
;
1639 ret
= kmalloc(size_alloc
, type
, flags
);
1640 KASSERT(((uintptr_t)ret
& (__VM_CACHELINE_SIZE
- 1)) == 0,
1641 ("%p(%lu) not cacheline %d aligned",
1642 ret
, size_alloc
, __VM_CACHELINE_SIZE
));
1645 #undef CAN_CACHEALIGN