2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator (MP SAFE)
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.31 2005/04/26 00:47:59 dillon Exp $
38 * This module implements a slab allocator drop-in replacement for the
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
67 * Alloc Size Chunking Number of zones
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
79 * Allocations >= ZoneLimit go directly to kmem.
81 * API REQUIREMENTS AND SIDE EFFECTS
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/slaballoc.h>
99 #include <sys/vmmeter.h>
100 #include <sys/lock.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
105 #include <vm/vm_param.h>
106 #include <vm/vm_kern.h>
107 #include <vm/vm_extern.h>
108 #include <vm/vm_object.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_page.h>
112 #include <vm/vm_pageout.h>
114 #include <machine/cpu.h>
116 #include <sys/thread2.h>
118 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
121 * Fixed globals (not per-cpu)
124 static int ZoneLimit
;
125 static int ZonePageCount
;
127 static struct malloc_type
*kmemstatistics
;
128 static struct kmemusage
*kmemusage
;
129 static int32_t weirdary
[16];
131 static void *kmem_slab_alloc(vm_size_t bytes
, vm_offset_t align
, int flags
);
132 static void kmem_slab_free(void *ptr
, vm_size_t bytes
);
135 * Misc constants. Note that allocations that are exact multiples of
136 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
137 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
139 #define MIN_CHUNK_SIZE 8 /* in bytes */
140 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
141 #define ZONE_RELS_THRESH 2 /* threshold number of zones */
142 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
145 * The WEIRD_ADDR is used as known text to copy into free objects to
146 * try to create deterministic failure cases if the data is accessed after
149 #define WEIRD_ADDR 0xdeadc0de
150 #define MAX_COPY sizeof(weirdary)
151 #define ZERO_LENGTH_PTR ((void *)-8)
154 * Misc global malloc buckets
157 MALLOC_DEFINE(M_CACHE
, "cache", "Various Dynamically allocated caches");
158 MALLOC_DEFINE(M_DEVBUF
, "devbuf", "device driver memory");
159 MALLOC_DEFINE(M_TEMP
, "temp", "misc temporary data buffers");
161 MALLOC_DEFINE(M_IP6OPT
, "ip6opt", "IPv6 options");
162 MALLOC_DEFINE(M_IP6NDP
, "ip6ndp", "IPv6 Neighbor Discovery");
165 * Initialize the slab memory allocator. We have to choose a zone size based
166 * on available physical memory. We choose a zone side which is approximately
167 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
168 * 128K. The zone size is limited to the bounds set in slaballoc.h
169 * (typically 32K min, 128K max).
171 static void kmeminit(void *dummy
);
173 SYSINIT(kmem
, SI_SUB_KMEM
, SI_ORDER_FIRST
, kmeminit
, NULL
)
176 kmeminit(void *dummy
)
183 limsize
= (vm_poff_t
)vmstats
.v_page_count
* PAGE_SIZE
;
184 if (limsize
> VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
)
185 limsize
= VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
;
187 usesize
= (int)(limsize
/ 1024); /* convert to KB */
189 ZoneSize
= ZALLOC_MIN_ZONE_SIZE
;
190 while (ZoneSize
< ZALLOC_MAX_ZONE_SIZE
&& (ZoneSize
<< 1) < usesize
)
192 ZoneLimit
= ZoneSize
/ 4;
193 if (ZoneLimit
> ZALLOC_ZONE_LIMIT
)
194 ZoneLimit
= ZALLOC_ZONE_LIMIT
;
195 ZoneMask
= ZoneSize
- 1;
196 ZonePageCount
= ZoneSize
/ PAGE_SIZE
;
198 npg
= (VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
) / PAGE_SIZE
;
199 kmemusage
= kmem_slab_alloc(npg
* sizeof(struct kmemusage
), PAGE_SIZE
, M_WAITOK
|M_ZERO
);
201 for (i
= 0; i
< arysize(weirdary
); ++i
)
202 weirdary
[i
] = WEIRD_ADDR
;
205 printf("Slab ZoneSize set to %dKB\n", ZoneSize
/ 1024);
209 * Initialize a malloc type tracking structure.
212 malloc_init(void *data
)
214 struct malloc_type
*type
= data
;
217 if (type
->ks_magic
!= M_MAGIC
)
218 panic("malloc type lacks magic");
220 if (type
->ks_limit
!= 0)
223 if (vmstats
.v_page_count
== 0)
224 panic("malloc_init not allowed before vm init");
226 limsize
= (vm_poff_t
)vmstats
.v_page_count
* PAGE_SIZE
;
227 if (limsize
> VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
)
228 limsize
= VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
;
229 type
->ks_limit
= limsize
/ 10;
231 type
->ks_next
= kmemstatistics
;
232 kmemstatistics
= type
;
236 malloc_uninit(void *data
)
238 struct malloc_type
*type
= data
;
239 struct malloc_type
*t
;
245 if (type
->ks_magic
!= M_MAGIC
)
246 panic("malloc type lacks magic");
248 if (vmstats
.v_page_count
== 0)
249 panic("malloc_uninit not allowed before vm init");
251 if (type
->ks_limit
== 0)
252 panic("malloc_uninit on uninitialized type");
256 * memuse is only correct in aggregation. Due to memory being allocated
257 * on one cpu and freed on another individual array entries may be
258 * negative or positive (canceling each other out).
260 for (i
= ttl
= 0; i
< ncpus
; ++i
)
261 ttl
+= type
->ks_memuse
[i
];
263 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
264 ttl
, type
->ks_shortdesc
, i
);
267 if (type
== kmemstatistics
) {
268 kmemstatistics
= type
->ks_next
;
270 for (t
= kmemstatistics
; t
->ks_next
!= NULL
; t
= t
->ks_next
) {
271 if (t
->ks_next
== type
) {
272 t
->ks_next
= type
->ks_next
;
277 type
->ks_next
= NULL
;
282 * Calculate the zone index for the allocation request size and set the
283 * allocation request size to that particular zone's chunk size.
286 zoneindex(unsigned long *bytes
)
288 unsigned int n
= (unsigned int)*bytes
; /* unsigned for shift opt */
290 *bytes
= n
= (n
+ 7) & ~7;
291 return(n
/ 8 - 1); /* 8 byte chunks, 16 zones */
294 *bytes
= n
= (n
+ 15) & ~15;
299 *bytes
= n
= (n
+ 31) & ~31;
303 *bytes
= n
= (n
+ 63) & ~63;
307 *bytes
= n
= (n
+ 127) & ~127;
308 return(n
/ 128 + 31);
311 *bytes
= n
= (n
+ 255) & ~255;
312 return(n
/ 256 + 39);
314 *bytes
= n
= (n
+ 511) & ~511;
315 return(n
/ 512 + 47);
317 #if ZALLOC_ZONE_LIMIT > 8192
319 *bytes
= n
= (n
+ 1023) & ~1023;
320 return(n
/ 1024 + 55);
323 #if ZALLOC_ZONE_LIMIT > 16384
325 *bytes
= n
= (n
+ 2047) & ~2047;
326 return(n
/ 2048 + 63);
329 panic("Unexpected byte count %d", n
);
334 * malloc() (SLAB ALLOCATOR) (MP SAFE)
336 * Allocate memory via the slab allocator. If the request is too large,
337 * or if it page-aligned beyond a certain size, we fall back to the
338 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
339 * &SlabMisc if you don't care.
341 * M_RNOWAIT - don't block.
342 * M_NULLOK - return NULL instead of blocking.
343 * M_ZERO - zero the returned memory.
344 * M_USE_RESERVE - allow greater drawdown of the free list
345 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
348 malloc(unsigned long size
, struct malloc_type
*type
, int flags
)
353 struct globaldata
*gd
;
360 * XXX silly to have this in the critical path.
362 if (type
->ks_limit
== 0) {
364 if (type
->ks_limit
== 0)
371 * Handle the case where the limit is reached. Panic if we can't return
372 * NULL. The original malloc code looped, but this tended to
373 * simply deadlock the computer.
375 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
376 * to determine if a more complete limit check should be done. The
377 * actual memory use is tracked via ks_memuse[cpu].
379 while (type
->ks_loosememuse
>= type
->ks_limit
) {
383 for (i
= ttl
= 0; i
< ncpus
; ++i
)
384 ttl
+= type
->ks_memuse
[i
];
385 type
->ks_loosememuse
= ttl
; /* not MP synchronized */
386 if (ttl
>= type
->ks_limit
) {
387 if (flags
& M_NULLOK
)
389 panic("%s: malloc limit exceeded", type
->ks_shortdesc
);
394 * Handle the degenerate size == 0 case. Yes, this does happen.
395 * Return a special pointer. This is to maintain compatibility with
396 * the original malloc implementation. Certain devices, such as the
397 * adaptec driver, not only allocate 0 bytes, they check for NULL and
398 * also realloc() later on. Joy.
401 return(ZERO_LENGTH_PTR
);
404 * Handle hysteresis from prior frees here in malloc(). We cannot
405 * safely manipulate the kernel_map in free() due to free() possibly
406 * being called via an IPI message or from sensitive interrupt code.
408 while (slgd
->NFreeZones
> ZONE_RELS_THRESH
&& (flags
& M_RNOWAIT
) == 0) {
410 if (slgd
->NFreeZones
> ZONE_RELS_THRESH
) { /* crit sect race */
412 slgd
->FreeZones
= z
->z_Next
;
414 kmem_slab_free(z
, ZoneSize
); /* may block */
419 * XXX handle oversized frees that were queued from free().
421 while (slgd
->FreeOvZones
&& (flags
& M_RNOWAIT
) == 0) {
423 if ((z
= slgd
->FreeOvZones
) != NULL
) {
424 KKASSERT(z
->z_Magic
== ZALLOC_OVSZ_MAGIC
);
425 slgd
->FreeOvZones
= z
->z_Next
;
426 kmem_slab_free(z
, z
->z_ChunkSize
); /* may block */
432 * Handle large allocations directly. There should not be very many of
433 * these so performance is not a big issue.
435 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
437 if (size
>= ZoneLimit
|| (size
& PAGE_MASK
) == 0) {
438 struct kmemusage
*kup
;
440 size
= round_page(size
);
441 chunk
= kmem_slab_alloc(size
, PAGE_SIZE
, flags
);
444 flags
&= ~M_ZERO
; /* result already zero'd if M_ZERO was set */
445 flags
|= M_PASSIVE_ZERO
;
447 kup
->ku_pagecnt
= size
/ PAGE_SIZE
;
448 kup
->ku_cpu
= gd
->gd_cpuid
;
454 * Attempt to allocate out of an existing zone. First try the free list,
455 * then allocate out of unallocated space. If we find a good zone move
456 * it to the head of the list so later allocations find it quickly
457 * (we might have thousands of zones in the list).
459 * Note: zoneindex() will panic of size is too large.
461 zi
= zoneindex(&size
);
462 KKASSERT(zi
< NZONES
);
464 if ((z
= slgd
->ZoneAry
[zi
]) != NULL
) {
465 KKASSERT(z
->z_NFree
> 0);
468 * Remove us from the ZoneAry[] when we become empty
470 if (--z
->z_NFree
== 0) {
471 slgd
->ZoneAry
[zi
] = z
->z_Next
;
476 * Locate a chunk in a free page. This attempts to localize
477 * reallocations into earlier pages without us having to sort
478 * the chunk list. A chunk may still overlap a page boundary.
480 while (z
->z_FirstFreePg
< ZonePageCount
) {
481 if ((chunk
= z
->z_PageAry
[z
->z_FirstFreePg
]) != NULL
) {
484 * Diagnostic: c_Next is not total garbage.
486 KKASSERT(chunk
->c_Next
== NULL
||
487 ((intptr_t)chunk
->c_Next
& IN_SAME_PAGE_MASK
) ==
488 ((intptr_t)chunk
& IN_SAME_PAGE_MASK
));
491 if ((uintptr_t)chunk
< VM_MIN_KERNEL_ADDRESS
)
492 panic("chunk %p FFPG %d/%d", chunk
, z
->z_FirstFreePg
, ZonePageCount
);
493 if (chunk
->c_Next
&& (uintptr_t)chunk
->c_Next
< VM_MIN_KERNEL_ADDRESS
)
494 panic("chunkNEXT %p %p FFPG %d/%d", chunk
, chunk
->c_Next
, z
->z_FirstFreePg
, ZonePageCount
);
496 z
->z_PageAry
[z
->z_FirstFreePg
] = chunk
->c_Next
;
503 * No chunks are available but NFree said we had some memory, so
504 * it must be available in the never-before-used-memory area
505 * governed by UIndex. The consequences are very serious if our zone
506 * got corrupted so we use an explicit panic rather then a KASSERT.
508 if (z
->z_UIndex
+ 1 != z
->z_NMax
)
509 z
->z_UIndex
= z
->z_UIndex
+ 1;
512 if (z
->z_UIndex
== z
->z_UEndIndex
)
513 panic("slaballoc: corrupted zone");
514 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
515 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
517 flags
|= M_PASSIVE_ZERO
;
523 * If all zones are exhausted we need to allocate a new zone for this
524 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
525 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
526 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
527 * we do not pre-zero it because we do not want to mess up the L1 cache.
529 * At least one subsystem, the tty code (see CROUND) expects power-of-2
530 * allocations to be power-of-2 aligned. We maintain compatibility by
531 * adjusting the base offset below.
536 if ((z
= slgd
->FreeZones
) != NULL
) {
537 slgd
->FreeZones
= z
->z_Next
;
539 bzero(z
, sizeof(SLZone
));
540 z
->z_Flags
|= SLZF_UNOTZEROD
;
542 z
= kmem_slab_alloc(ZoneSize
, ZoneSize
, flags
|M_ZERO
);
548 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
549 * Otherwise just 8-byte align the data.
551 if ((size
| (size
- 1)) + 1 == (size
<< 1))
552 off
= (sizeof(SLZone
) + size
- 1) & ~(size
- 1);
554 off
= (sizeof(SLZone
) + MIN_CHUNK_MASK
) & ~MIN_CHUNK_MASK
;
555 z
->z_Magic
= ZALLOC_SLAB_MAGIC
;
557 z
->z_NMax
= (ZoneSize
- off
) / size
;
558 z
->z_NFree
= z
->z_NMax
- 1;
559 z
->z_BasePtr
= (char *)z
+ off
;
560 z
->z_UIndex
= z
->z_UEndIndex
= slgd
->JunkIndex
% z
->z_NMax
;
561 z
->z_ChunkSize
= size
;
562 z
->z_FirstFreePg
= ZonePageCount
;
564 z
->z_Cpu
= gd
->gd_cpuid
;
565 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
566 z
->z_Next
= slgd
->ZoneAry
[zi
];
567 slgd
->ZoneAry
[zi
] = z
;
568 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
569 flags
&= ~M_ZERO
; /* already zero'd */
570 flags
|= M_PASSIVE_ZERO
;
574 * Slide the base index for initial allocations out of the next
575 * zone we create so we do not over-weight the lower part of the
578 slgd
->JunkIndex
= (slgd
->JunkIndex
+ ZALLOC_SLAB_SLIDE
)
579 & (ZALLOC_MAX_ZONE_SIZE
- 1);
582 ++type
->ks_inuse
[gd
->gd_cpuid
];
583 type
->ks_memuse
[gd
->gd_cpuid
] += size
;
584 type
->ks_loosememuse
+= size
; /* not MP synchronized */
589 else if ((flags
& (M_ZERO
|M_PASSIVE_ZERO
)) == 0)
590 chunk
->c_Next
= (void *)-1; /* avoid accidental double-free check */
599 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
601 * Generally speaking this routine is not called very often and we do
602 * not attempt to optimize it beyond reusing the same pointer if the
603 * new size fits within the chunking of the old pointer's zone.
606 realloc(void *ptr
, unsigned long size
, struct malloc_type
*type
, int flags
)
612 KKASSERT((flags
& M_ZERO
) == 0); /* not supported */
614 if (ptr
== NULL
|| ptr
== ZERO_LENGTH_PTR
)
615 return(malloc(size
, type
, flags
));
622 * Handle oversized allocations. XXX we really should require that a
623 * size be passed to free() instead of this nonsense.
626 struct kmemusage
*kup
;
629 if (kup
->ku_pagecnt
) {
630 osize
= kup
->ku_pagecnt
<< PAGE_SHIFT
;
631 if (osize
== round_page(size
))
633 if ((nptr
= malloc(size
, type
, flags
)) == NULL
)
635 bcopy(ptr
, nptr
, min(size
, osize
));
642 * Get the original allocation's zone. If the new request winds up
643 * using the same chunk size we do not have to do anything.
645 z
= (SLZone
*)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
646 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
649 if (z
->z_ChunkSize
== size
)
653 * Allocate memory for the new request size. Note that zoneindex has
654 * already adjusted the request size to the appropriate chunk size, which
655 * should optimize our bcopy(). Then copy and return the new pointer.
657 if ((nptr
= malloc(size
, type
, flags
)) == NULL
)
659 bcopy(ptr
, nptr
, min(size
, z
->z_ChunkSize
));
665 * Allocate a copy of the specified string.
667 * (MP SAFE) (MAY BLOCK)
670 strdup(const char *str
, struct malloc_type
*type
)
672 int zlen
; /* length inclusive of terminating NUL */
677 zlen
= strlen(str
) + 1;
678 nstr
= malloc(zlen
, type
, M_WAITOK
);
679 bcopy(str
, nstr
, zlen
);
685 * free() (SLAB ALLOCATOR)
687 * Free the specified chunk of memory.
691 free_remote(void *ptr
)
693 free(ptr
, *(struct malloc_type
**)ptr
);
699 * free (SLAB ALLOCATOR) (MP SAFE)
701 * Free a memory block previously allocated by malloc. Note that we do not
702 * attempt to uplodate ks_loosememuse as MP races could prevent us from
703 * checking memory limits in malloc.
706 free(void *ptr
, struct malloc_type
*type
)
711 struct globaldata
*gd
;
718 panic("trying to free NULL pointer");
721 * Handle special 0-byte allocations
723 if (ptr
== ZERO_LENGTH_PTR
)
727 * Handle oversized allocations. XXX we really should require that a
728 * size be passed to free() instead of this nonsense.
730 * This code is never called via an ipi.
733 struct kmemusage
*kup
;
737 if (kup
->ku_pagecnt
) {
738 size
= kup
->ku_pagecnt
<< PAGE_SHIFT
;
741 KKASSERT(sizeof(weirdary
) <= size
);
742 bcopy(weirdary
, ptr
, sizeof(weirdary
));
745 * note: we always adjust our cpu's slot, not the originating
746 * cpu (kup->ku_cpuid). The statistics are in aggregate.
748 * note: XXX we have still inherited the interrupts-can't-block
749 * assumption. An interrupt thread does not bump
750 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
751 * primarily until we can fix softupdate's assumptions about free().
754 --type
->ks_inuse
[gd
->gd_cpuid
];
755 type
->ks_memuse
[gd
->gd_cpuid
] -= size
;
756 if (mycpu
->gd_intr_nesting_level
|| (gd
->gd_curthread
->td_flags
& TDF_INTTHREAD
)) {
758 z
->z_Magic
= ZALLOC_OVSZ_MAGIC
;
759 z
->z_Next
= slgd
->FreeOvZones
;
760 z
->z_ChunkSize
= size
;
761 slgd
->FreeOvZones
= z
;
765 kmem_slab_free(ptr
, size
); /* may block */
772 * Zone case. Figure out the zone based on the fact that it is
775 z
= (SLZone
*)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
776 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
779 * If we do not own the zone then forward the request to the
780 * cpu that does. Since the timing is non-critical, a passive
783 if (z
->z_CpuGd
!= gd
) {
784 *(struct malloc_type
**)ptr
= type
;
786 lwkt_send_ipiq_passive(z
->z_CpuGd
, free_remote
, ptr
);
788 panic("Corrupt SLZone");
793 if (type
->ks_magic
!= M_MAGIC
)
794 panic("free: malloc type lacks magic");
797 pgno
= ((char *)ptr
- (char *)z
) >> PAGE_SHIFT
;
802 * Attempt to detect a double-free. To reduce overhead we only check
803 * if there appears to be link pointer at the base of the data.
805 if (((intptr_t)chunk
->c_Next
- (intptr_t)z
) >> PAGE_SHIFT
== pgno
) {
807 for (scan
= z
->z_PageAry
[pgno
]; scan
; scan
= scan
->c_Next
) {
809 panic("Double free at %p", chunk
);
815 * Put weird data into the memory to detect modifications after freeing,
816 * illegal pointer use after freeing (we should fault on the odd address),
817 * and so forth. XXX needs more work, see the old malloc code.
820 if (z
->z_ChunkSize
< sizeof(weirdary
))
821 bcopy(weirdary
, chunk
, z
->z_ChunkSize
);
823 bcopy(weirdary
, chunk
, sizeof(weirdary
));
827 * Add this free non-zero'd chunk to a linked list for reuse, adjust
831 if ((uintptr_t)chunk
< VM_MIN_KERNEL_ADDRESS
)
832 panic("BADFREE %p", chunk
);
834 chunk
->c_Next
= z
->z_PageAry
[pgno
];
835 z
->z_PageAry
[pgno
] = chunk
;
837 if (chunk
->c_Next
&& (uintptr_t)chunk
->c_Next
< VM_MIN_KERNEL_ADDRESS
)
840 if (z
->z_FirstFreePg
> pgno
)
841 z
->z_FirstFreePg
= pgno
;
844 * Bump the number of free chunks. If it becomes non-zero the zone
845 * must be added back onto the appropriate list.
847 if (z
->z_NFree
++ == 0) {
848 z
->z_Next
= slgd
->ZoneAry
[z
->z_ZoneIndex
];
849 slgd
->ZoneAry
[z
->z_ZoneIndex
] = z
;
852 --type
->ks_inuse
[z
->z_Cpu
];
853 type
->ks_memuse
[z
->z_Cpu
] -= z
->z_ChunkSize
;
856 * If the zone becomes totally free, and there are other zones we
857 * can allocate from, move this zone to the FreeZones list. Since
858 * this code can be called from an IPI callback, do *NOT* try to mess
859 * with kernel_map here. Hysteresis will be performed at malloc() time.
861 if (z
->z_NFree
== z
->z_NMax
&&
862 (z
->z_Next
|| slgd
->ZoneAry
[z
->z_ZoneIndex
] != z
)
866 for (pz
= &slgd
->ZoneAry
[z
->z_ZoneIndex
]; z
!= *pz
; pz
= &(*pz
)->z_Next
)
870 z
->z_Next
= slgd
->FreeZones
;
878 * kmem_slab_alloc() (MP SAFE) (GETS BGL)
880 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
881 * specified alignment. M_* flags are expected in the flags field.
883 * Alignment must be a multiple of PAGE_SIZE.
885 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
886 * but when we move zalloc() over to use this function as its backend
887 * we will have to switch to kreserve/krelease and call reserve(0)
888 * after the new space is made available.
890 * Interrupt code which has preempted other code is not allowed to
891 * use PQ_CACHE pages. However, if an interrupt thread is run
892 * non-preemptively or blocks and then runs non-preemptively, then
893 * it is free to use PQ_CACHE pages.
895 * This routine will currently obtain the BGL.
898 kmem_slab_alloc(vm_size_t size
, vm_offset_t align
, int flags
)
903 int count
, vmflags
, base_vmflags
;
905 vm_map_t map
= kernel_map
;
907 size
= round_page(size
);
908 addr
= vm_map_min(map
);
911 * Reserve properly aligned space from kernel_map. RNOWAIT allocations
914 if (flags
& M_RNOWAIT
) {
915 if (try_mplock() == 0)
920 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
923 if (vm_map_findspace(map
, vm_map_min(map
), size
, align
, &addr
)) {
925 if ((flags
& M_NULLOK
) == 0)
926 panic("kmem_slab_alloc(): kernel_map ran out of space!");
928 vm_map_entry_release(count
);
932 offset
= addr
- VM_MIN_KERNEL_ADDRESS
;
933 vm_object_reference(kernel_object
);
934 vm_map_insert(map
, &count
,
935 kernel_object
, offset
, addr
, addr
+ size
,
936 VM_PROT_ALL
, VM_PROT_ALL
, 0);
942 base_vmflags
|= VM_ALLOC_ZERO
;
943 if (flags
& M_USE_RESERVE
)
944 base_vmflags
|= VM_ALLOC_SYSTEM
;
945 if (flags
& M_USE_INTERRUPT_RESERVE
)
946 base_vmflags
|= VM_ALLOC_INTERRUPT
;
947 if ((flags
& (M_RNOWAIT
|M_WAITOK
)) == 0)
948 panic("kmem_slab_alloc: bad flags %08x (%p)", flags
, ((int **)&size
)[-1]);
952 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
954 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
956 vm_pindex_t idx
= OFF_TO_IDX(offset
+ i
);
959 * VM_ALLOC_NORMAL can only be set if we are not preempting.
961 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
962 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
963 * implied in this case), though I'm sure if we really need to do
966 vmflags
= base_vmflags
;
967 if (flags
& M_WAITOK
) {
968 if (td
->td_preempted
)
969 vmflags
|= VM_ALLOC_SYSTEM
;
971 vmflags
|= VM_ALLOC_NORMAL
;
974 m
= vm_page_alloc(kernel_object
, idx
, vmflags
);
977 * If the allocation failed we either return NULL or we retry.
979 * If M_WAITOK is specified we wait for more memory and retry.
980 * If M_WAITOK is specified from a preemption we yield instead of
981 * wait. Livelock will not occur because the interrupt thread
982 * will not be preempting anyone the second time around after the
986 if (flags
& M_WAITOK
) {
987 if (td
->td_preempted
) {
996 i
-= PAGE_SIZE
; /* retry */
1001 * We were unable to recover, cleanup and return NULL
1005 m
= vm_page_lookup(kernel_object
, OFF_TO_IDX(offset
+ i
));
1008 vm_map_delete(map
, addr
, addr
+ size
, &count
);
1011 vm_map_entry_release(count
);
1020 * Mark the map entry as non-pageable using a routine that allows us to
1021 * populate the underlying pages.
1023 vm_map_set_wired_quick(map
, addr
, size
, &count
);
1027 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1029 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
1032 m
= vm_page_lookup(kernel_object
, OFF_TO_IDX(offset
+ i
));
1033 m
->valid
= VM_PAGE_BITS_ALL
;
1036 pmap_enter(kernel_pmap
, addr
+ i
, m
, VM_PROT_ALL
, 1);
1037 if ((m
->flags
& PG_ZERO
) == 0 && (flags
& M_ZERO
))
1038 bzero((char *)addr
+ i
, PAGE_SIZE
);
1039 vm_page_flag_clear(m
, PG_ZERO
);
1040 vm_page_flag_set(m
, PG_MAPPED
| PG_WRITEABLE
| PG_REFERENCED
);
1043 vm_map_entry_release(count
);
1045 return((void *)addr
);
1049 * kmem_slab_free() (MP SAFE) (GETS BGL)
1052 kmem_slab_free(void *ptr
, vm_size_t size
)
1056 vm_map_remove(kernel_map
, (vm_offset_t
)ptr
, (vm_offset_t
)ptr
+ size
);