2 * SLABALLOC.C - Userland SLAB memory allocator
4 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $DragonFly: src/lib/libcaps/slaballoc.c,v 1.4 2004/07/04 22:44:26 eirikn Exp $
30 * This module implements a thread-safe slab allocator for userland.
32 * A slab allocator reserves a ZONE for each chunk size, then lays the
33 * chunks out in an array within the zone. Allocation and deallocation
34 * is nearly instantanious, and fragmentation/overhead losses are limited
35 * to a fixed worst-case amount.
37 * The downside of this slab implementation is in the chunk size
38 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
39 * To mitigate this we attempt to select a reasonable zone size based on
40 * available system memory. e.g. 32K instead of 128K. Also since the
41 * slab allocator is operating out of virtual memory in userland the actual
42 * physical memory use is not as bad as it might otherwise be.
44 * The upside is that overhead is bounded... waste goes down as use goes up.
46 * Slab management is done on a per-cpu basis and no locking or mutexes
47 * are required, only a critical section. When one cpu frees memory
48 * belonging to another cpu's slab manager an asynchronous IPI message
49 * will be queued to execute the operation. In addition, both the
50 * high level slab allocator and the low level zone allocator optimize
51 * M_ZERO requests, and the slab allocator does not have to pre initialize
52 * the linked list of chunks.
54 * XXX Balancing is needed between cpus. Balance will be handled through
55 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
57 * Alloc Size Chunking Number of zones
67 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
69 * Allocations >= ZoneLimit go directly to kmem.
71 * API REQUIREMENTS AND SIDE EFFECTS
73 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
74 * have remained compatible with the following API requirements:
76 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
77 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
78 * + malloc(0) is allowed and returns non-NULL (ahc driver)
79 * + ability to allocate arbitrarily large chunks of memory
84 #include <sys/types.h>
86 #include <sys/stdint.h>
87 #include <sys/malloc.h>
89 #include <sys/thread.h>
90 #include <sys/msgport.h>
91 #include <sys/errno.h>
92 #include "globaldata.h"
93 #include <sys/sysctl.h>
94 #include <machine/cpufunc.h>
95 #include <sys/thread2.h>
96 #include <sys/msgport2.h>
98 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
99 #define slab_min(a,b) (((a)<(b)) ? (a) : (b))
102 * Fixed globals (not per-cpu)
105 static int ZoneLimit
;
106 static int ZonePageCount
;
107 static int ZonePageLimit
;
109 static struct malloc_type
*kmemstatistics
;
110 static int32_t weirdary
[16];
113 * Misc constants. Note that allocations that are exact multiples of
114 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
115 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
117 #define MIN_CHUNK_SIZE 8 /* in bytes */
118 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
119 #define ZONE_RELS_THRESH 2 /* threshold number of zones */
120 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
122 #define SLOVERSZ_HSIZE 8192
123 #define SLOVERSZ_HMASK (SLOVERSZ_HSIZE - 1)
125 #define SLOVERSZ_HASH(ptr) ((((uintptr_t)ptr >> PAGE_SHIFT) ^ \
126 ((uintptr_t)ptr >> (PAGE_SHIFT * 2))) & \
129 SLOversized
*SLOvHash
[SLOVERSZ_HSIZE
];
132 * The WEIRD_ADDR is used as known text to copy into free objects to
133 * try to create deterministic failure cases if the data is accessed after
136 #define WEIRD_ADDR 0xdeadc0de
137 #define MAX_COPY sizeof(weirdary)
138 #define ZERO_LENGTH_PTR ((void *)-8)
141 * Misc global malloc buckets
143 MALLOC_DEFINE(M_OVERSIZED
, "overszinfo", "Oversized Info Blocks");
147 get_oversized(void *ptr
)
149 SLOversized
**slovpp
;
152 for (slovpp
= &SLOvHash
[SLOVERSZ_HASH(ptr
)];
153 (slov
= *slovpp
) != NULL
;
154 slovpp
= &slov
->ov_Next
156 if (slov
->ov_Ptr
== ptr
)
163 * Initialize the slab memory allocator. We have to choose a zone size based
164 * on available physical memory. We choose a zone side which is approximately
165 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
166 * 128K. The zone size is limited to the bounds set in slaballoc.h
167 * (typically 32K min, 128K max).
175 int pagecnt_size
= sizeof(pagecnt
);
177 error
= sysctlbyname("vm.stats.vm.v_page_count",
178 &pagecnt
, &pagecnt_size
, NULL
, 0);
183 limsize
= pagecnt
* (vm_poff_t
)PAGE_SIZE
;
184 usesize
= (int)(limsize
/ 1024); /* convert to KB */
186 ZoneSize
= ZALLOC_MIN_ZONE_SIZE
;
187 while (ZoneSize
< ZALLOC_MAX_ZONE_SIZE
&& (ZoneSize
<< 1) < usesize
)
190 ZoneSize
= ZALLOC_MIN_ZONE_SIZE
;
192 ZoneLimit
= ZoneSize
/ 4;
193 if (ZoneLimit
> ZALLOC_ZONE_LIMIT
)
194 ZoneLimit
= ZALLOC_ZONE_LIMIT
;
195 ZoneMask
= ZoneSize
- 1;
196 ZonePageLimit
= PAGE_SIZE
* 4;
197 ZonePageCount
= ZoneSize
/ PAGE_SIZE
;
199 for (i
= 0; i
< arysize(weirdary
); ++i
)
200 weirdary
[i
] = WEIRD_ADDR
;
201 slab_malloc_init(M_OVERSIZED
);
205 * Initialize a malloc type tracking structure.
208 slab_malloc_init(void *data
)
210 struct malloc_type
*type
= data
;
214 * Skip if already initialized
216 if (type
->ks_limit
!= 0)
219 type
->ks_magic
= M_MAGIC
;
220 limsize
= (vm_poff_t
)-1; /* unlimited */
221 type
->ks_limit
= limsize
/ 10;
222 type
->ks_next
= kmemstatistics
;
223 kmemstatistics
= type
;
227 slab_malloc_uninit(void *data
)
229 struct malloc_type
*type
= data
;
230 struct malloc_type
*t
;
236 if (type
->ks_magic
!= M_MAGIC
)
237 panic("malloc type lacks magic");
239 if (type
->ks_limit
== 0)
240 panic("malloc_uninit on uninitialized type");
244 * memuse is only correct in aggregation. Due to memory being allocated
245 * on one cpu and freed on another individual array entries may be
246 * negative or positive (canceling each other out).
248 for (i
= ttl
= 0; i
< ncpus
; ++i
)
249 ttl
+= type
->ks_memuse
[i
];
251 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
252 ttl
, type
->ks_shortdesc
, i
);
255 if (type
== kmemstatistics
) {
256 kmemstatistics
= type
->ks_next
;
258 for (t
= kmemstatistics
; t
->ks_next
!= NULL
; t
= t
->ks_next
) {
259 if (t
->ks_next
== type
) {
260 t
->ks_next
= type
->ks_next
;
265 type
->ks_next
= NULL
;
270 * Calculate the zone index for the allocation request size and set the
271 * allocation request size to that particular zone's chunk size.
274 zoneindex(unsigned long *bytes
)
276 unsigned int n
= (unsigned int)*bytes
; /* unsigned for shift opt */
278 *bytes
= n
= (n
+ 7) & ~7;
279 return(n
/ 8 - 1); /* 8 byte chunks, 16 zones */
282 *bytes
= n
= (n
+ 15) & ~15;
287 *bytes
= n
= (n
+ 31) & ~31;
291 *bytes
= n
= (n
+ 63) & ~63;
295 *bytes
= n
= (n
+ 127) & ~127;
296 return(n
/ 128 + 31);
299 *bytes
= n
= (n
+ 255) & ~255;
300 return(n
/ 256 + 39);
302 *bytes
= n
= (n
+ 511) & ~511;
303 return(n
/ 512 + 47);
305 #if ZALLOC_ZONE_LIMIT > 8192
307 *bytes
= n
= (n
+ 1023) & ~1023;
308 return(n
/ 1024 + 55);
311 #if ZALLOC_ZONE_LIMIT > 16384
313 *bytes
= n
= (n
+ 2047) & ~2047;
314 return(n
/ 2048 + 63);
317 panic("Unexpected byte count %d", n
);
324 * Allocate memory via the slab allocator. If the request is too large,
325 * or if it page-aligned beyond a certain size, we fall back to the
326 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
327 * &SlabMisc if you don't care.
329 * M_NOWAIT - return NULL instead of blocking.
330 * M_ZERO - zero the returned memory.
333 slab_malloc(unsigned long size
, struct malloc_type
*type
, int flags
)
338 struct globaldata
*gd
;
345 * XXX silly to have this in the critical path.
347 if (type
->ks_limit
== 0) {
349 if (type
->ks_limit
== 0)
350 slab_malloc_init(type
);
356 * Handle the case where the limit is reached. Panic if can't return
357 * NULL. XXX the original malloc code looped, but this tended to
358 * simply deadlock the computer.
360 while (type
->ks_loosememuse
>= type
->ks_limit
) {
364 for (i
= ttl
= 0; i
< ncpus
; ++i
)
365 ttl
+= type
->ks_memuse
[i
];
366 type
->ks_loosememuse
= ttl
;
367 if (ttl
>= type
->ks_limit
) {
368 if (flags
& (M_NOWAIT
|M_NULLOK
))
370 panic("%s: malloc limit exceeded", type
->ks_shortdesc
);
375 * Handle the degenerate size == 0 case. Yes, this does happen.
376 * Return a special pointer. This is to maintain compatibility with
377 * the original malloc implementation. Certain devices, such as the
378 * adaptec driver, not only allocate 0 bytes, they check for NULL and
379 * also realloc() later on. Joy.
382 return(ZERO_LENGTH_PTR
);
385 * Handle hysteresis from prior frees here in malloc(). We cannot
386 * safely manipulate the kernel_map in free() due to free() possibly
387 * being called via an IPI message or from sensitive interrupt code.
389 while (slgd
->NFreeZones
> ZONE_RELS_THRESH
&& (flags
& M_NOWAIT
) == 0) {
391 if (slgd
->NFreeZones
> ZONE_RELS_THRESH
) { /* crit sect race */
393 slgd
->FreeZones
= z
->z_Next
;
400 * XXX handle oversized frees that were queued from free().
402 while (slgd
->FreeOvZones
&& (flags
& M_NOWAIT
) == 0) {
404 if ((z
= slgd
->FreeOvZones
) != NULL
) {
405 KKASSERT(z
->z_Magic
== ZALLOC_OVSZ_MAGIC
);
406 slgd
->FreeOvZones
= z
->z_Next
;
407 munmap(z
, z
->z_ChunkSize
);
413 * Handle large allocations directly. There should not be very many of
414 * these so performance is not a big issue.
416 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
418 if (size
>= ZoneLimit
|| (size
& PAGE_MASK
) == 0) {
419 SLOversized
**slovpp
;
422 slov
= slab_malloc(sizeof(SLOversized
), M_OVERSIZED
, M_ZERO
);
426 size
= round_page(size
);
427 chunk
= mmap(NULL
, size
, PROT_READ
|PROT_WRITE
,
428 MAP_ANON
|MAP_PRIVATE
, -1, 0);
429 if (chunk
== MAP_FAILED
) {
430 slab_free(slov
, M_OVERSIZED
);
433 flags
&= ~M_ZERO
; /* result already zero'd if M_ZERO was set */
434 flags
|= M_PASSIVE_ZERO
;
436 slov
->ov_Ptr
= chunk
;
437 slov
->ov_Bytes
= size
;
438 slovpp
= &SLOvHash
[SLOVERSZ_HASH(chunk
)];
439 slov
->ov_Next
= *slovpp
;
446 * Attempt to allocate out of an existing zone. First try the free list,
447 * then allocate out of unallocated space. If we find a good zone move
448 * it to the head of the list so later allocations find it quickly
449 * (we might have thousands of zones in the list).
451 * Note: zoneindex() will panic of size is too large.
453 zi
= zoneindex(&size
);
454 KKASSERT(zi
< NZONES
);
456 if ((z
= slgd
->ZoneAry
[zi
]) != NULL
) {
457 KKASSERT(z
->z_NFree
> 0);
460 * Remove us from the ZoneAry[] when we become empty
462 if (--z
->z_NFree
== 0) {
463 slgd
->ZoneAry
[zi
] = z
->z_Next
;
468 * Locate a chunk in a free page. This attempts to localize
469 * reallocations into earlier pages without us having to sort
470 * the chunk list. A chunk may still overlap a page boundary.
472 while (z
->z_FirstFreePg
< ZonePageCount
) {
473 if ((chunk
= z
->z_PageAry
[z
->z_FirstFreePg
]) != NULL
) {
476 * Diagnostic: c_Next is not total garbage.
478 KKASSERT(chunk
->c_Next
== NULL
||
479 ((intptr_t)chunk
->c_Next
& IN_SAME_PAGE_MASK
) ==
480 ((intptr_t)chunk
& IN_SAME_PAGE_MASK
));
483 if ((uintptr_t)chunk
< VM_MIN_KERNEL_ADDRESS
)
484 panic("chunk %p FFPG %d/%d", chunk
, z
->z_FirstFreePg
, ZonePageCount
);
485 if (chunk
->c_Next
&& (uintptr_t)chunk
->c_Next
< VM_MIN_KERNEL_ADDRESS
)
486 panic("chunkNEXT %p %p FFPG %d/%d", chunk
, chunk
->c_Next
, z
->z_FirstFreePg
, ZonePageCount
);
488 z
->z_PageAry
[z
->z_FirstFreePg
] = chunk
->c_Next
;
495 * No chunks are available but NFree said we had some memory, so
496 * it must be available in the never-before-used-memory area
497 * governed by UIndex. The consequences are very serious if our zone
498 * got corrupted so we use an explicit panic rather then a KASSERT.
500 if (z
->z_UIndex
+ 1 != z
->z_NMax
)
501 z
->z_UIndex
= z
->z_UIndex
+ 1;
504 if (z
->z_UIndex
== z
->z_UEndIndex
)
505 panic("slaballoc: corrupted zone");
506 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
507 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
509 flags
|= M_PASSIVE_ZERO
;
515 * If all zones are exhausted we need to allocate a new zone for this
516 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
517 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
518 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
519 * we do not pre-zero it because we do not want to mess up the L1 cache.
521 * At least one subsystem, the tty code (see CROUND) expects power-of-2
522 * allocations to be power-of-2 aligned. We maintain compatibility by
523 * adjusting the base offset below.
528 if ((z
= slgd
->FreeZones
) != NULL
) {
529 slgd
->FreeZones
= z
->z_Next
;
531 bzero(z
, sizeof(SLZone
));
532 z
->z_Flags
|= SLZF_UNOTZEROD
;
534 z
= mmap(NULL
, ZoneSize
, PROT_READ
|PROT_WRITE
,
535 MAP_ANON
|MAP_PRIVATE
, -1, 0);
541 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
542 * Otherwise just 8-byte align the data.
544 if ((size
| (size
- 1)) + 1 == (size
<< 1))
545 off
= (sizeof(SLZone
) + size
- 1) & ~(size
- 1);
547 off
= (sizeof(SLZone
) + MIN_CHUNK_MASK
) & ~MIN_CHUNK_MASK
;
548 z
->z_Magic
= ZALLOC_SLAB_MAGIC
;
550 z
->z_NMax
= (ZoneSize
- off
) / size
;
551 z
->z_NFree
= z
->z_NMax
- 1;
552 z
->z_BasePtr
= (char *)z
+ off
;
553 z
->z_UIndex
= z
->z_UEndIndex
= slgd
->JunkIndex
% z
->z_NMax
;
554 z
->z_ChunkSize
= size
;
555 z
->z_FirstFreePg
= ZonePageCount
;
556 z
->z_Cpu
= gd
->gd_cpuid
;
558 chunk
= (SLChunk
*)(z
->z_BasePtr
+ z
->z_UIndex
* size
);
559 z
->z_Next
= slgd
->ZoneAry
[zi
];
560 slgd
->ZoneAry
[zi
] = z
;
561 if ((z
->z_Flags
& SLZF_UNOTZEROD
) == 0) {
562 flags
&= ~M_ZERO
; /* already zero'd */
563 flags
|= M_PASSIVE_ZERO
;
567 * Slide the base index for initial allocations out of the next
568 * zone we create so we do not over-weight the lower part of the
571 slgd
->JunkIndex
= (slgd
->JunkIndex
+ ZALLOC_SLAB_SLIDE
)
572 & (ZALLOC_MAX_ZONE_SIZE
- 1);
575 ++type
->ks_inuse
[gd
->gd_cpuid
];
576 type
->ks_memuse
[gd
->gd_cpuid
] += size
;
577 type
->ks_loosememuse
+= size
;
582 else if ((flags
& (M_ZERO
|M_PASSIVE_ZERO
)) == 0)
583 chunk
->c_Next
= (void *)-1; /* avoid accidental double-free check */
592 slab_realloc(void *ptr
, unsigned long size
, struct malloc_type
*type
, int flags
)
595 SLOversized
**slovpp
;
600 if (ptr
== NULL
|| ptr
== ZERO_LENGTH_PTR
)
601 return(slab_malloc(size
, type
, flags
));
603 slab_free(ptr
, type
);
608 * Handle oversized allocations.
610 if ((slovpp
= get_oversized(ptr
)) != NULL
) {
612 osize
= slov
->ov_Bytes
;
613 if (osize
== round_page(size
))
615 if ((nptr
= slab_malloc(size
, type
, flags
)) == NULL
)
617 bcopy(ptr
, nptr
, slab_min(size
, osize
));
618 slab_free(ptr
, type
);
623 * Get the original allocation's zone. If the new request winds up
624 * using the same chunk size we do not have to do anything.
626 z
= (SLZone
*)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
627 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
630 if (z
->z_ChunkSize
== size
)
634 * Allocate memory for the new request size. Note that zoneindex has
635 * already adjusted the request size to the appropriate chunk size, which
636 * should optimize our bcopy(). Then copy and return the new pointer.
638 if ((nptr
= slab_malloc(size
, type
, flags
)) == NULL
)
640 bcopy(ptr
, nptr
, slab_min(size
, z
->z_ChunkSize
));
641 slab_free(ptr
, type
);
647 * slab_free() (SLAB ALLOCATOR)
649 * Free the specified chunk of memory.
653 slab_free_remote(void *ptr
)
655 slab_free(ptr
, *(struct malloc_type
**)ptr
);
661 slab_free(void *ptr
, struct malloc_type
*type
)
664 SLOversized
**slovpp
;
668 struct globaldata
*gd
;
675 * Handle special 0-byte allocations
677 if (ptr
== ZERO_LENGTH_PTR
)
681 * Handle oversized allocations. XXX we really should require that a
682 * size be passed to slab_free() instead of this nonsense.
684 * This code is never called via an ipi.
686 if ((slovpp
= get_oversized(ptr
)) != NULL
) {
688 *slovpp
= slov
->ov_Next
;
691 KKASSERT(sizeof(weirdary
) <= slov
->ov_Bytes
);
692 bcopy(weirdary
, ptr
, sizeof(weirdary
));
695 * note: we always adjust our cpu's slot, not the originating
696 * cpu (kup->ku_cpuid). The statistics are in aggregate.
698 * note: XXX we have still inherited the interrupts-can't-block
699 * assumption. An interrupt thread does not bump
700 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
701 * primarily until we can fix softupdate's assumptions about
705 --type
->ks_inuse
[gd
->gd_cpuid
];
706 type
->ks_memuse
[gd
->gd_cpuid
] -= slov
->ov_Bytes
;
707 if (mycpu
->gd_intr_nesting_level
|| (gd
->gd_curthread
->td_flags
& TDF_INTTHREAD
)) {
709 z
->z_Magic
= ZALLOC_OVSZ_MAGIC
;
710 z
->z_Next
= slgd
->FreeOvZones
;
711 z
->z_ChunkSize
= slov
->ov_Bytes
;
712 slgd
->FreeOvZones
= z
;
716 munmap(ptr
, slov
->ov_Bytes
);
718 slab_free(slov
, M_OVERSIZED
);
723 * Zone case. Figure out the zone based on the fact that it is
726 z
= (SLZone
*)((uintptr_t)ptr
& ~(uintptr_t)ZoneMask
);
727 KKASSERT(z
->z_Magic
== ZALLOC_SLAB_MAGIC
);
730 * If we do not own the zone then forward the request to the
731 * cpu that does. The freeing code does not need the byte count
732 * unless DIAGNOSTIC is set.
734 if (z
->z_CpuGd
!= gd
) {
735 *(struct malloc_type
**)ptr
= type
;
737 lwkt_send_ipiq(z
->z_CpuGd
, slab_free_remote
, ptr
);
739 panic("Corrupt SLZone");
744 if (type
->ks_magic
!= M_MAGIC
)
745 panic("slab_free: malloc type lacks magic");
748 pgno
= ((char *)ptr
- (char *)z
) >> PAGE_SHIFT
;
753 * Attempt to detect a double-free. To reduce overhead we only check
754 * if there appears to be link pointer at the base of the data.
756 if (((intptr_t)chunk
->c_Next
- (intptr_t)z
) >> PAGE_SHIFT
== pgno
) {
758 for (scan
= z
->z_PageAry
[pgno
]; scan
; scan
= scan
->c_Next
) {
760 panic("Double free at %p", chunk
);
766 * Put weird data into the memory to detect modifications after freeing,
767 * illegal pointer use after freeing (we should fault on the odd address),
768 * and so forth. XXX needs more work, see the old malloc code.
771 if (z
->z_ChunkSize
< sizeof(weirdary
))
772 bcopy(weirdary
, chunk
, z
->z_ChunkSize
);
774 bcopy(weirdary
, chunk
, sizeof(weirdary
));
778 * Add this free non-zero'd chunk to a linked list for reuse, adjust
782 if ((uintptr_t)chunk
< VM_MIN_KERNEL_ADDRESS
)
783 panic("BADFREE %p\n", chunk
);
785 chunk
->c_Next
= z
->z_PageAry
[pgno
];
786 z
->z_PageAry
[pgno
] = chunk
;
788 if (chunk
->c_Next
&& (uintptr_t)chunk
->c_Next
< VM_MIN_KERNEL_ADDRESS
)
791 if (z
->z_FirstFreePg
> pgno
)
792 z
->z_FirstFreePg
= pgno
;
795 * Bump the number of free chunks. If it becomes non-zero the zone
796 * must be added back onto the appropriate list.
798 if (z
->z_NFree
++ == 0) {
799 z
->z_Next
= slgd
->ZoneAry
[z
->z_ZoneIndex
];
800 slgd
->ZoneAry
[z
->z_ZoneIndex
] = z
;
803 --type
->ks_inuse
[z
->z_Cpu
];
804 type
->ks_memuse
[z
->z_Cpu
] -= z
->z_ChunkSize
;
807 * If the zone becomes totally free, and there are other zones we
808 * can allocate from, move this zone to the FreeZones list. Since
809 * this code can be called from an IPI callback, do *NOT* try to mess
810 * with kernel_map here. Hysteresis will be performed at malloc() time.
812 if (z
->z_NFree
== z
->z_NMax
&&
813 (z
->z_Next
|| slgd
->ZoneAry
[z
->z_ZoneIndex
] != z
)
817 for (pz
= &slgd
->ZoneAry
[z
->z_ZoneIndex
]; z
!= *pz
; pz
= &(*pz
)->z_Next
)
821 z
->z_Next
= slgd
->FreeZones
;