2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $
38 * This module implements a slab allocator drop-in replacement for the
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
67 * Alloc Size Chunking Number of zones
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
79 * Allocations >= ZoneLimit go directly to kmem.
81 * API REQUIREMENTS AND SIDE EFFECTS
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/slaballoc.h>
99 #include <sys/vmmeter.h>
100 #include <sys/lock.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
103 #include <sys/sysctl.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_kern.h>
109 #include <vm/vm_extern.h>
110 #include <vm/vm_object.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_page.h>
114 #include <vm/vm_pageout.h>
116 #include <machine/cpu.h>
118 #include <sys/thread2.h>
120 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x"
123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \
126 #if !defined(KTR_MEMORY)
127 #define KTR_MEMORY KTR_ALL
129 KTR_INFO_MASTER(memory
);
130 KTR_INFO(KTR_MEMORY
, memory
, malloc
, 0, MEMORY_STRING
, MEMORY_ARG_SIZE
);
131 KTR_INFO(KTR_MEMORY
, memory
, free_zero
, 1, MEMORY_STRING
, MEMORY_ARG_SIZE
);
132 KTR_INFO(KTR_MEMORY
, memory
, free_ovsz
, 2, MEMORY_STRING
, MEMORY_ARG_SIZE
);
133 KTR_INFO(KTR_MEMORY
, memory
, free_ovsz_delayed
, 3, MEMORY_STRING
, MEMORY_ARG_SIZE
);
134 KTR_INFO(KTR_MEMORY
, memory
, free_chunk
, 4, MEMORY_STRING
, MEMORY_ARG_SIZE
);
136 KTR_INFO(KTR_MEMORY
, memory
, free_request
, 5, MEMORY_STRING
, MEMORY_ARG_SIZE
);
137 KTR_INFO(KTR_MEMORY
, memory
, free_remote
, 6, MEMORY_STRING
, MEMORY_ARG_SIZE
);
139 KTR_INFO(KTR_MEMORY
, memory
, malloc_beg
, 0, "malloc begin", 0);
140 KTR_INFO(KTR_MEMORY
, memory
, free_beg
, 0, "free begin", 0);
141 KTR_INFO(KTR_MEMORY
, memory
, free_end
, 0, "free end", 0);
143 #define logmemory(name, ptr, type, size, flags) \
144 KTR_LOG(memory_ ## name, ptr, type, size, flags)
145 #define logmemory_quick(name) \
146 KTR_LOG(memory_ ## name)
149 * Fixed globals (not per-cpu)
152 static int ZoneLimit
;
153 static int ZonePageCount
;
155 struct malloc_type
*kmemstatistics
; /* exported to vmstat */
156 static struct kmemusage
*kmemusage
;
157 static int32_t weirdary
[16];
159 static void *kmem_slab_alloc(vm_size_t bytes
, vm_offset_t align
, int flags
);
160 static void kmem_slab_free(void *ptr
, vm_size_t bytes
);
161 #if defined(INVARIANTS)
162 static void chunk_mark_allocated(SLZone
*z
, void *chunk
);
163 static void chunk_mark_free(SLZone
*z
, void *chunk
);
167 * Misc constants. Note that allocations that are exact multiples of
168 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
169 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
171 #define MIN_CHUNK_SIZE 8 /* in bytes */
172 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
173 #define ZONE_RELS_THRESH 2 /* threshold number of zones */
174 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
177 * The WEIRD_ADDR is used as known text to copy into free objects to
178 * try to create deterministic failure cases if the data is accessed after
181 #define WEIRD_ADDR 0xdeadc0de
182 #define MAX_COPY sizeof(weirdary)
183 #define ZERO_LENGTH_PTR ((void *)-8)
186 * Misc global malloc buckets
189 MALLOC_DEFINE(M_CACHE
, "cache", "Various Dynamically allocated caches");
190 MALLOC_DEFINE(M_DEVBUF
, "devbuf", "device driver memory");
191 MALLOC_DEFINE(M_TEMP
, "temp", "misc temporary data buffers");
193 MALLOC_DEFINE(M_IP6OPT
, "ip6opt", "IPv6 options");
194 MALLOC_DEFINE(M_IP6NDP
, "ip6ndp", "IPv6 Neighbor Discovery");
197 * Initialize the slab memory allocator. We have to choose a zone size based
198 * on available physical memory. We choose a zone side which is approximately
199 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
200 * 128K. The zone size is limited to the bounds set in slaballoc.h
201 * (typically 32K min, 128K max).
203 static void kmeminit(void *dummy
);
205 SYSINIT(kmem
, SI_BOOT1_ALLOCATOR
, SI_ORDER_FIRST
, kmeminit
, NULL
)
209 * If enabled any memory allocated without M_ZERO is initialized to -1.
211 static int use_malloc_pattern
;
212 SYSCTL_INT(_debug
, OID_AUTO
, use_malloc_pattern
, CTLFLAG_RW
,
213 &use_malloc_pattern
, 0, "");
217 kmeminit(void *dummy
)
224 limsize
= (vm_poff_t
)vmstats
.v_page_count
* PAGE_SIZE
;
225 if (limsize
> KvaSize
)
228 usesize
= (int)(limsize
/ 1024); /* convert to KB */
230 ZoneSize
= ZALLOC_MIN_ZONE_SIZE
;
231 while (ZoneSize
< ZALLOC_MAX_ZONE_SIZE
&& (ZoneSize
<< 1) < usesize
)
233 ZoneLimit
= ZoneSize
/ 4;
234 if (ZoneLimit
> ZALLOC_ZONE_LIMIT
)
235 ZoneLimit
= ZALLOC_ZONE_LIMIT
;
236 ZoneMask
= ZoneSize
- 1;
237 ZonePageCount
= ZoneSize
/ PAGE_SIZE
;
239 npg
= KvaSize
/ PAGE_SIZE
;
240 kmemusage
= kmem_slab_alloc(npg
* sizeof(struct kmemusage
),
241 PAGE_SIZE
, M_WAITOK
|M_ZERO
);
243 for (i
= 0; i
< arysize(weirdary
); ++i
)
244 weirdary
[i
] = WEIRD_ADDR
;
247 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize
/ 1024);
251 * Initialize a malloc type tracking structure.
254 malloc_init(void *data
)
256 struct malloc_type
*type
= data
;
259 if (type
->ks_magic
!= M_MAGIC
)
260 panic("malloc type lacks magic");
262 if (type
->ks_limit
!= 0)
265 if (vmstats
.v_page_count
== 0)
266 panic("malloc_init not allowed before vm init");
268 limsize
= (vm_poff_t
)vmstats
.v_page_count
* PAGE_SIZE
;
269 if (limsize
> KvaSize
)
271 type
->ks_limit
= limsize
/ 10;
273 type
->ks_next
= kmemstatistics
;
274 kmemstatistics
= type
;
278 malloc_uninit(void *data
)
280 struct malloc_type
*type
= data
;
281 struct malloc_type
*t
;
287 if (type
->ks_magic
!= M_MAGIC
)
288 panic("malloc type lacks magic");
290 if (vmstats
.v_page_count
== 0)
291 panic("malloc_uninit not allowed before vm init");
293 if (type
->ks_limit
== 0)
294 panic("malloc_uninit on uninitialized type");
297 /* Make sure that all pending kfree()s are finished. */
298 lwkt_synchronize_ipiqs("muninit");
303 * memuse is only correct in aggregation. Due to memory being allocated
304 * on one cpu and freed on another individual array entries may be
305 * negative or positive (canceling each other out).
307 for (i
= ttl
= 0; i
< ncpus
; ++i
)
308 ttl
+= type
->ks_memuse
[i
];
310 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
311 ttl
, type
->ks_shortdesc
, i
);
314 if (type
== kmemstatistics
) {
315 kmemstatistics
= type
->ks_next
;
317 for (t
= kmemstatistics
; t
->ks_next
!= NULL
; t
= t
->ks_next
) {
318 if (t
->ks_next
== type
) {
319 t
->ks_next
= type
->ks_next
;
324 type
->ks_next
= NULL
;
329 * Increase the kmalloc pool limit for the specified pool. No changes
330 * are the made if the pool would shrink.
333 kmalloc_raise_limit(struct malloc_type
*type
, size_t bytes
)
335 if (type
->ks_limit
== 0)
337 if (type
->ks_limit
< bytes
)
338 type
->ks_limit
= bytes
;
342 * Dynamically create a malloc pool. This function is a NOP if *typep is
346 kmalloc_create(struct malloc_type
**typep
, const char *descr
)
348 struct malloc_type
*type
;
350 if (*typep
== NULL
) {
351 type
= kmalloc(sizeof(*type
), M_TEMP
, M_WAITOK
| M_ZERO
);
352 type
->ks_magic
= M_MAGIC
;
353 type
->ks_shortdesc
= descr
;
360 * Destroy a dynamically created malloc pool. This function is a NOP if
361 * the pool has already been destroyed.
364 kmalloc_destroy(struct malloc_type
**typep
)
366 if (*typep
!= NULL
) {
367 malloc_uninit(*typep
);
368 kfree(*typep
, M_TEMP
);
374 * Calculate the zone index for the allocation request size and set the
375 * allocation request size to that particular zone's chunk size.
378 zoneindex(unsigned long *bytes
)
380 unsigned int n
= (unsigned int)*bytes
; /* unsigned for shift opt */
382 *bytes
= n
= (n
+ 7) & ~7;
383 return(n
/ 8 - 1); /* 8 byte chunks, 16 zones */
386 *bytes
= n
= (n
+ 15) & ~15;
391 *bytes
= n
= (n
+ 31) & ~31;
395 *bytes
= n
= (n
+ 63) & ~63;
399 *bytes
= n
= (n
+ 127) & ~127;
400 return(n
/ 128 + 31);
403 *bytes
= n
= (n
+ 255) & ~255;
404 return(n
/ 256 + 39);
406 *bytes
= n
= (n
+ 511) & ~511;
407 return(n
/ 512 + 47);
409 #if ZALLOC_ZONE_LIMIT > 8192
411 *bytes
= n
= (n
+ 1023) & ~1023;
412 return(n
/ 1024 + 55);
415 #if ZALLOC_ZONE_LIMIT > 16384
417 *bytes
= n
= (n
+ 2047) & ~2047;
418 return(n
/ 2048 + 63);
421 panic("Unexpected byte count %d", n
);
426 * malloc() (SLAB ALLOCATOR)
428 * Allocate memory via the slab allocator. If the request is too large,
429 * or if it page-aligned beyond a certain size, we fall back to the
430 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
431 * &SlabMisc if you don't care.
433 * M_RNOWAIT - don't block.
434 * M_NULLOK - return NULL instead of blocking.
435 * M_ZERO - zero the returned memory.
436 * M_USE_RESERVE - allow greater drawdown of the free list
437 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
443 kmalloc(unsigned long size
, struct malloc_type
*type
, int flags
)
448 struct globaldata
*gd
;
454 logmemory_quick(malloc_beg
);
459 * XXX silly to have this in the critical path.
461 if (type
->ks_limit
== 0) {
463 if (type
->ks_limit
== 0)
470 * Handle the case where the limit is reached. Panic if we can't return
471 * NULL. The original malloc code looped, but this tended to
472 * simply deadlock the computer.
474 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
475 * to determine if a more complete limit check should be done. The
476 * actual memory use is tracked via ks_memuse[cpu].
478 while (type
->ks_loosememuse
>= type
->ks_limit
) {
482 for (i
= ttl
= 0; i
< ncpus
; ++i
)
483 ttl
+= type
->ks_memuse
[i
];
484 type
->ks_loosememuse
= ttl
; /* not MP synchronized */
485 if (ttl
>= type
->ks_limit
) {
486 if (flags
& M_NULLOK
) {
487 logmemory(malloc
, NULL
, type
, size
, flags
);
490 panic("%s: malloc limit exceeded", type
->ks_shortdesc
);
495 * Handle the degenerate size == 0 case. Yes, this does happen.
496 * Return a special pointer. This is to maintain compatibility with
497 * the original malloc implementation. Certain devices, such as the
498 * adaptec driver, not only allocate 0 bytes, they check for NULL and
499 * also realloc() later on. Joy.
502 logmemory(malloc
, ZERO_LENGTH_PTR
, type
, size
, flags
);
503 return(ZERO_LENGTH_PTR
);
507 * Handle hysteresis from prior frees here in malloc(). We cannot
508 * safely manipulate the kernel_map in free() due to free() possibly
509 * being called via an IPI message or from sensitive interrupt code.
511 while (slgd
->NFreeZones
> ZONE_RELS_THRESH
&& (flags
& M_RNOWAIT
) == 0) {
513 if (slgd
->NFreeZones
> ZONE_RELS_THRESH
) { /* crit sect race */
515 slgd
->FreeZones
= z
->z_Next
;
517 kmem_slab_free(z
, ZoneSize
); /* may block */
522 * XXX handle oversized frees that were queued from free().
524 while (slgd
->FreeOvZones
&& (flags
& M_RNOWAIT
) == 0) {
526 if ((z
= slgd
->FreeOvZones
) != NULL
) {
527 KKASSERT(z
->z_Magic
== ZALLOC_OVSZ_MAGIC
);
528 slgd
->FreeOvZones
= z
->z_Next
;
529 kmem_slab_free(z
, z
->z_ChunkSize
); /* may block */
535 * Handle large allocations directly. There should not be very many of
536 * these so performance is not a big issue.
538 * The backend allocator is pretty nasty on a SMP system. Use the
539 * slab allocator for one and two page-sized chunks even though we lose
540 * some efficiency. XXX maybe fix mmio and the elf loader instead.
542 if (size
>= ZoneLimit
|| ((size
& PAGE_MASK
) == 0 && size
> PAGE_SIZE
*2)) {
543 struct kmemusage
*kup
;
545 size
= round_page(size
);
546 chunk
= kmem_slab_alloc(size
, PAGE_SIZE
, flags
);
548 logmemory(malloc
, NULL
, type
, size
, flags
);
551 flags
&= ~M_ZERO
; /* result already zero'd if M_ZERO was set */
552 flags
|= M_PASSIVE_ZERO
;
554 kup
->ku_pagecnt
= size
/ PAGE_SIZE
;
555 kup
->ku_cpu
= gd
->gd_cpuid
;
561 * Attempt to allocate out of an existing zone. First try the free list,
562 * then allocate out of unallocated space. If we find a good zone move
563 * it to the head of the list so later allocations find it quickly
564 * (we might have thousands of zones in the list).
566 * Note: zoneindex() will panic of size is too large.
568 zi
= zoneindex(&size
);
569 KKASSERT(zi
< NZONES
);
571 if ((z
= slgd
->ZoneAry
[zi
]) != NULL
) {
572 KKASSERT(z
->z_NFree
> 0);
575 * Remove us from the ZoneAry[] when we become empty
577 if (--z
->z_NFree
== 0) {
578 slgd
->ZoneAry
[zi
] = z
->z_Next
;
583 * Locate a chunk in a free page. This attempts to localize
584 * reallocations into earlier pages without us having to sort
585 * the chunk list. A chunk may still overlap a page boundary.
587 while (z
->z_FirstFreePg
< ZonePageCount
) {
588 if ((chunk
= z
->z_PageAry
[z
->z_FirstFreePg
]) != NULL
) {
591 * Diagnostic: c_Next is not total garbage.
593 KKASSERT(chunk
->c_Next
== NULL
||
594 ((intptr_t)chunk
->c_Next
& IN_SAME_PAGE_MASK
) ==
595 ((intptr_t)chunk
& IN_SAME_PAGE_MASK
));
598 if ((vm_offset_t
)chunk
< KvaStart
|| (vm_offset_t
)chunk
>= KvaEnd
)
599 panic("chunk %p FFPG %d/%d", chunk
, z
->z_FirstFreePg
, ZonePageCount
);
600 if (chunk
->c_Next
&& (vm_offset_t
)chunk
->c_Next
< KvaStart
)
601 panic("chunkNEXT %p %p FFPG %d/%d", chunk
, chunk
->c_Next
, z
->z_FirstFreePg
, ZonePageCount
);
602 chunk_mark_allocated(z
, chunk
);
604 z
->z_PageAry
[z
->z_FirstFreePg
] = chunk
->c_Next
;
611 * No chunks are available but NFree said we had some memory, so
612 * it must be available in the never-before-used-memory area
613 * governed by UIndex. The consequences are very serious if our zone
614 * got corrupted so we use an explicit panic rather then a KASSERT.
616 if (z
->z_UIndex
+ 1 != z
->z_NMax
)
617 z
->z_UIndex
= z
->z_UIndex
+ 1;
620 if (z
->z_UIndex
== z
->z_UEndIndex
)
621 panic("slaballoc: corrupted zone");
622 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
623 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
625 flags
|= M_PASSIVE_ZERO
;
627 #if defined(INVARIANTS)
628 chunk_mark_allocated(z
, chunk
);
634 * If all zones are exhausted we need to allocate a new zone for this
635 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
636 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
637 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
638 * we do not pre-zero it because we do not want to mess up the L1 cache.
640 * At least one subsystem, the tty code (see CROUND) expects power-of-2
641 * allocations to be power-of-2 aligned. We maintain compatibility by
642 * adjusting the base offset below.
647 if ((z
= slgd
->FreeZones
) != NULL
) {
648 slgd
->FreeZones
= z
->z_Next
;
650 bzero(z
, sizeof(SLZone
));
651 z
->z_Flags
|= SLZF_UNOTZEROD
;
653 z
= kmem_slab_alloc(ZoneSize
, ZoneSize
, flags
|M_ZERO
);
659 * How big is the base structure?
661 #if defined(INVARIANTS)
663 * Make room for z_Bitmap. An exact calculation is somewhat more
664 * complicated so don't make an exact calculation.
666 off
= offsetof(SLZone
, z_Bitmap
[(ZoneSize
/ size
+ 31) / 32]);
667 bzero(z
->z_Bitmap
, (ZoneSize
/ size
+ 31) / 8);
669 off
= sizeof(SLZone
);
673 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
674 * Otherwise just 8-byte align the data.
676 if ((size
| (size
- 1)) + 1 == (size
<< 1))
677 off
= (off
+ size
- 1) & ~(size
- 1);
679 off
= (off
+ MIN_CHUNK_MASK
) & ~MIN_CHUNK_MASK
;
680 z
->z_Magic
= ZALLOC_SLAB_MAGIC
;
682 z
->z_NMax
= (ZoneSize
- off
) / size
;
683 z
->z_NFree
= z
->z_NMax
- 1;
684 z
->z_BasePtr
= (char *)z
+ off
;
685 z
->z_UIndex
= z
->z_UEndIndex
= slgd
->JunkIndex
% z
->z_NMax
;
686 z
->z_ChunkSize
= size
;
687 z
->z_FirstFreePg
= ZonePageCount
;
689 z
->z_Cpu
= gd
->gd_cpuid
;
690 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
691 z
->z_Next
= slgd
->ZoneAry
[zi
];
692 slgd
->ZoneAry
[zi
] = z
;
693 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
694 flags
&= ~M_ZERO
; /* already zero'd */
695 flags
|= M_PASSIVE_ZERO
;
697 #if defined(INVARIANTS)
698 chunk_mark_allocated(z
, chunk
);
702 * Slide the base index for initial allocations out of the next
703 * zone we create so we do not over-weight the lower part of the
706 slgd
->JunkIndex
= (slgd
->JunkIndex
+ ZALLOC_SLAB_SLIDE
)
707 & (ZALLOC_MAX_ZONE_SIZE
- 1);
710 ++type
->ks_inuse
[gd
->gd_cpuid
];
711 type
->ks_memuse
[gd
->gd_cpuid
] += size
;
712 type
->ks_loosememuse
+= size
; /* not MP synchronized */
717 else if ((flags
& (M_ZERO
|M_PASSIVE_ZERO
)) == 0) {
718 if (use_malloc_pattern
) {
719 for (i
= 0; i
< size
; i
+= sizeof(int)) {
720 *(int *)((char *)chunk
+ i
) = -1;
723 chunk
->c_Next
= (void *)-1; /* avoid accidental double-free check */
726 logmemory(malloc
, chunk
, type
, size
, flags
);
730 logmemory(malloc
, NULL
, type
, size
, flags
);
735 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
737 * Generally speaking this routine is not called very often and we do
738 * not attempt to optimize it beyond reusing the same pointer if the
739 * new size fits within the chunking of the old pointer's zone.
742 krealloc(void *ptr
, unsigned long size
, struct malloc_type
*type
, int flags
)
748 KKASSERT((flags
& M_ZERO
) == 0); /* not supported */
750 if (ptr
== NULL
|| ptr
== ZERO_LENGTH_PTR
)
751 return(kmalloc(size
, type
, flags
));
758 * Handle oversized allocations. XXX we really should require that a
759 * size be passed to free() instead of this nonsense.
762 struct kmemusage
*kup
;
765 if (kup
->ku_pagecnt
) {
766 osize
= kup
->ku_pagecnt
<< PAGE_SHIFT
;
767 if (osize
== round_page(size
))
769 if ((nptr
= kmalloc(size
, type
, flags
)) == NULL
)
771 bcopy(ptr
, nptr
, min(size
, osize
));
778 * Get the original allocation's zone. If the new request winds up
779 * using the same chunk size we do not have to do anything.
781 z
= (SLZone
*)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
782 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
785 * Allocate memory for the new request size. Note that zoneindex has
786 * already adjusted the request size to the appropriate chunk size, which
787 * should optimize our bcopy(). Then copy and return the new pointer.
789 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
790 * necessary align the result.
792 * We can only zoneindex (to align size to the chunk size) if the new
793 * size is not too large.
795 if (size
< ZoneLimit
) {
797 if (z
->z_ChunkSize
== size
)
800 if ((nptr
= kmalloc(size
, type
, flags
)) == NULL
)
802 bcopy(ptr
, nptr
, min(size
, z
->z_ChunkSize
));
808 * Return the kmalloc limit for this type, in bytes.
811 kmalloc_limit(struct malloc_type
*type
)
813 if (type
->ks_limit
== 0) {
815 if (type
->ks_limit
== 0)
819 return(type
->ks_limit
);
823 * Allocate a copy of the specified string.
825 * (MP SAFE) (MAY BLOCK)
828 kstrdup(const char *str
, struct malloc_type
*type
)
830 int zlen
; /* length inclusive of terminating NUL */
835 zlen
= strlen(str
) + 1;
836 nstr
= kmalloc(zlen
, type
, M_WAITOK
);
837 bcopy(str
, nstr
, zlen
);
843 * free() (SLAB ALLOCATOR)
845 * Free the specified chunk of memory.
849 free_remote(void *ptr
)
851 logmemory(free_remote
, ptr
, *(struct malloc_type
**)ptr
, -1, 0);
852 kfree(ptr
, *(struct malloc_type
**)ptr
);
858 * free (SLAB ALLOCATOR)
860 * Free a memory block previously allocated by malloc. Note that we do not
861 * attempt to uplodate ks_loosememuse as MP races could prevent us from
862 * checking memory limits in malloc.
867 kfree(void *ptr
, struct malloc_type
*type
)
872 struct globaldata
*gd
;
875 logmemory_quick(free_beg
);
880 panic("trying to free NULL pointer");
883 * Handle special 0-byte allocations
885 if (ptr
== ZERO_LENGTH_PTR
) {
886 logmemory(free_zero
, ptr
, type
, -1, 0);
887 logmemory_quick(free_end
);
892 * Handle oversized allocations. XXX we really should require that a
893 * size be passed to free() instead of this nonsense.
895 * This code is never called via an ipi.
898 struct kmemusage
*kup
;
902 if (kup
->ku_pagecnt
) {
903 size
= kup
->ku_pagecnt
<< PAGE_SHIFT
;
906 KKASSERT(sizeof(weirdary
) <= size
);
907 bcopy(weirdary
, ptr
, sizeof(weirdary
));
910 * note: we always adjust our cpu's slot, not the originating
911 * cpu (kup->ku_cpuid). The statistics are in aggregate.
913 * note: XXX we have still inherited the interrupts-can't-block
914 * assumption. An interrupt thread does not bump
915 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
916 * primarily until we can fix softupdate's assumptions about free().
919 --type
->ks_inuse
[gd
->gd_cpuid
];
920 type
->ks_memuse
[gd
->gd_cpuid
] -= size
;
921 if (mycpu
->gd_intr_nesting_level
|| (gd
->gd_curthread
->td_flags
& TDF_INTTHREAD
)) {
922 logmemory(free_ovsz_delayed
, ptr
, type
, size
, 0);
924 z
->z_Magic
= ZALLOC_OVSZ_MAGIC
;
925 z
->z_Next
= slgd
->FreeOvZones
;
926 z
->z_ChunkSize
= size
;
927 slgd
->FreeOvZones
= z
;
931 logmemory(free_ovsz
, ptr
, type
, size
, 0);
932 kmem_slab_free(ptr
, size
); /* may block */
934 logmemory_quick(free_end
);
940 * Zone case. Figure out the zone based on the fact that it is
943 z
= (SLZone
*)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
944 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
947 * If we do not own the zone then forward the request to the
948 * cpu that does. Since the timing is non-critical, a passive
951 if (z
->z_CpuGd
!= gd
) {
952 *(struct malloc_type
**)ptr
= type
;
954 logmemory(free_request
, ptr
, type
, z
->z_ChunkSize
, 0);
955 lwkt_send_ipiq_passive(z
->z_CpuGd
, free_remote
, ptr
);
957 panic("Corrupt SLZone");
959 logmemory_quick(free_end
);
963 logmemory(free_chunk
, ptr
, type
, z
->z_ChunkSize
, 0);
965 if (type
->ks_magic
!= M_MAGIC
)
966 panic("free: malloc type lacks magic");
969 pgno
= ((char *)ptr
- (char *)z
) >> PAGE_SHIFT
;
974 * Attempt to detect a double-free. To reduce overhead we only check
975 * if there appears to be link pointer at the base of the data.
977 if (((intptr_t)chunk
->c_Next
- (intptr_t)z
) >> PAGE_SHIFT
== pgno
) {
979 for (scan
= z
->z_PageAry
[pgno
]; scan
; scan
= scan
->c_Next
) {
981 panic("Double free at %p", chunk
);
984 chunk_mark_free(z
, chunk
);
988 * Put weird data into the memory to detect modifications after freeing,
989 * illegal pointer use after freeing (we should fault on the odd address),
990 * and so forth. XXX needs more work, see the old malloc code.
993 if (z
->z_ChunkSize
< sizeof(weirdary
))
994 bcopy(weirdary
, chunk
, z
->z_ChunkSize
);
996 bcopy(weirdary
, chunk
, sizeof(weirdary
));
1000 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1004 if ((vm_offset_t
)chunk
< KvaStart
|| (vm_offset_t
)chunk
>= KvaEnd
)
1005 panic("BADFREE %p", chunk
);
1007 chunk
->c_Next
= z
->z_PageAry
[pgno
];
1008 z
->z_PageAry
[pgno
] = chunk
;
1010 if (chunk
->c_Next
&& (vm_offset_t
)chunk
->c_Next
< KvaStart
)
1013 if (z
->z_FirstFreePg
> pgno
)
1014 z
->z_FirstFreePg
= pgno
;
1017 * Bump the number of free chunks. If it becomes non-zero the zone
1018 * must be added back onto the appropriate list.
1020 if (z
->z_NFree
++ == 0) {
1021 z
->z_Next
= slgd
->ZoneAry
[z
->z_ZoneIndex
];
1022 slgd
->ZoneAry
[z
->z_ZoneIndex
] = z
;
1025 --type
->ks_inuse
[z
->z_Cpu
];
1026 type
->ks_memuse
[z
->z_Cpu
] -= z
->z_ChunkSize
;
1029 * If the zone becomes totally free, and there are other zones we
1030 * can allocate from, move this zone to the FreeZones list. Since
1031 * this code can be called from an IPI callback, do *NOT* try to mess
1032 * with kernel_map here. Hysteresis will be performed at malloc() time.
1034 if (z
->z_NFree
== z
->z_NMax
&&
1035 (z
->z_Next
|| slgd
->ZoneAry
[z
->z_ZoneIndex
] != z
)
1039 for (pz
= &slgd
->ZoneAry
[z
->z_ZoneIndex
]; z
!= *pz
; pz
= &(*pz
)->z_Next
)
1043 z
->z_Next
= slgd
->FreeZones
;
1044 slgd
->FreeZones
= z
;
1047 logmemory_quick(free_end
);
1051 #if defined(INVARIANTS)
1053 * Helper routines for sanity checks
1057 chunk_mark_allocated(SLZone
*z
, void *chunk
)
1059 int bitdex
= ((char *)chunk
- (char *)z
->z_BasePtr
) / z
->z_ChunkSize
;
1062 KASSERT(bitdex
>= 0 && bitdex
< z
->z_NMax
, ("memory chunk %p bit index %d is illegal", chunk
, bitdex
));
1063 bitptr
= &z
->z_Bitmap
[bitdex
>> 5];
1065 KASSERT((*bitptr
& (1 << bitdex
)) == 0, ("memory chunk %p is already allocated!", chunk
));
1066 *bitptr
|= 1 << bitdex
;
1071 chunk_mark_free(SLZone
*z
, void *chunk
)
1073 int bitdex
= ((char *)chunk
- (char *)z
->z_BasePtr
) / z
->z_ChunkSize
;
1076 KASSERT(bitdex
>= 0 && bitdex
< z
->z_NMax
, ("memory chunk %p bit index %d is illegal!", chunk
, bitdex
));
1077 bitptr
= &z
->z_Bitmap
[bitdex
>> 5];
1079 KASSERT((*bitptr
& (1 << bitdex
)) != 0, ("memory chunk %p is already free!", chunk
));
1080 *bitptr
&= ~(1 << bitdex
);
1088 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1089 * specified alignment. M_* flags are expected in the flags field.
1091 * Alignment must be a multiple of PAGE_SIZE.
1093 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1094 * but when we move zalloc() over to use this function as its backend
1095 * we will have to switch to kreserve/krelease and call reserve(0)
1096 * after the new space is made available.
1098 * Interrupt code which has preempted other code is not allowed to
1099 * use PQ_CACHE pages. However, if an interrupt thread is run
1100 * non-preemptively or blocks and then runs non-preemptively, then
1101 * it is free to use PQ_CACHE pages.
1103 * This routine will currently obtain the BGL.
1105 * MPALMOSTSAFE - acquires mplock
1108 kmem_slab_alloc(vm_size_t size
, vm_offset_t align
, int flags
)
1112 int count
, vmflags
, base_vmflags
;
1115 size
= round_page(size
);
1116 addr
= vm_map_min(&kernel_map
);
1119 * Reserve properly aligned space from kernel_map. RNOWAIT allocations
1122 if (flags
& M_RNOWAIT
) {
1123 if (try_mplock() == 0)
1128 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
1130 vm_map_lock(&kernel_map
);
1131 if (vm_map_findspace(&kernel_map
, addr
, size
, align
, 0, &addr
)) {
1132 vm_map_unlock(&kernel_map
);
1133 if ((flags
& M_NULLOK
) == 0)
1134 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1136 vm_map_entry_release(count
);
1142 * kernel_object maps 1:1 to kernel_map.
1144 vm_object_reference(&kernel_object
);
1145 vm_map_insert(&kernel_map
, &count
,
1146 &kernel_object
, addr
, addr
, addr
+ size
,
1148 VM_PROT_ALL
, VM_PROT_ALL
,
1155 base_vmflags
|= VM_ALLOC_ZERO
;
1156 if (flags
& M_USE_RESERVE
)
1157 base_vmflags
|= VM_ALLOC_SYSTEM
;
1158 if (flags
& M_USE_INTERRUPT_RESERVE
)
1159 base_vmflags
|= VM_ALLOC_INTERRUPT
;
1160 if ((flags
& (M_RNOWAIT
|M_WAITOK
)) == 0)
1161 panic("kmem_slab_alloc: bad flags %08x (%p)", flags
, ((int **)&size
)[-1]);
1165 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
1167 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
1171 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1173 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1174 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1175 * implied in this case), though I'm not sure if we really need to
1178 vmflags
= base_vmflags
;
1179 if (flags
& M_WAITOK
) {
1180 if (td
->td_preempted
)
1181 vmflags
|= VM_ALLOC_SYSTEM
;
1183 vmflags
|= VM_ALLOC_NORMAL
;
1186 m
= vm_page_alloc(&kernel_object
, OFF_TO_IDX(addr
+ i
), vmflags
);
1189 * If the allocation failed we either return NULL or we retry.
1191 * If M_WAITOK is specified we wait for more memory and retry.
1192 * If M_WAITOK is specified from a preemption we yield instead of
1193 * wait. Livelock will not occur because the interrupt thread
1194 * will not be preempting anyone the second time around after the
1198 if (flags
& M_WAITOK
) {
1199 if (td
->td_preempted
) {
1200 vm_map_unlock(&kernel_map
);
1202 vm_map_lock(&kernel_map
);
1204 vm_map_unlock(&kernel_map
);
1206 vm_map_lock(&kernel_map
);
1208 i
-= PAGE_SIZE
; /* retry */
1213 * We were unable to recover, cleanup and return NULL
1217 m
= vm_page_lookup(&kernel_object
, OFF_TO_IDX(addr
+ i
));
1218 /* page should already be busy */
1221 vm_map_delete(&kernel_map
, addr
, addr
+ size
, &count
);
1222 vm_map_unlock(&kernel_map
);
1224 vm_map_entry_release(count
);
1233 * Mark the map entry as non-pageable using a routine that allows us to
1234 * populate the underlying pages.
1236 * The pages were busied by the allocations above.
1238 vm_map_set_wired_quick(&kernel_map
, addr
, size
, &count
);
1242 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1244 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
1247 m
= vm_page_lookup(&kernel_object
, OFF_TO_IDX(addr
+ i
));
1248 m
->valid
= VM_PAGE_BITS_ALL
;
1249 /* page should already be busy */
1252 pmap_enter(&kernel_pmap
, addr
+ i
, m
, VM_PROT_ALL
, 1);
1253 if ((m
->flags
& PG_ZERO
) == 0 && (flags
& M_ZERO
))
1254 bzero((char *)addr
+ i
, PAGE_SIZE
);
1255 vm_page_flag_clear(m
, PG_ZERO
);
1256 KKASSERT(m
->flags
& (PG_WRITEABLE
| PG_MAPPED
));
1257 vm_page_flag_set(m
, PG_REFERENCED
);
1259 vm_map_unlock(&kernel_map
);
1260 vm_map_entry_release(count
);
1262 return((void *)addr
);
1268 * MPALMOSTSAFE - acquires mplock
1271 kmem_slab_free(void *ptr
, vm_size_t size
)
1275 vm_map_remove(&kernel_map
, (vm_offset_t
)ptr
, (vm_offset_t
)ptr
+ size
);