4 * Copyright (c) 1997, 1998 John S. Dyson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
39 static MALLOC_DEFINE(M_ZONE
, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
49 long zone_burst
= 128;
51 static void *zget(vm_zone_t z
);
54 * Return an item from the specified zone. This function is non-blocking for
55 * ZONE_INTERRUPT zones.
62 globaldata_t gd
= mycpu
;
69 zerror(ZONE_ERROR_INVALID
);
71 zpcpu
= &z
->zpcpu
[gd
->gd_cpuid
];
74 * Avoid spinlock contention by allocating from a per-cpu queue
76 if (zpcpu
->zfreecnt
> 0) {
78 if (zpcpu
->zfreecnt
> 0) {
82 ("zitems_pcpu unexpectedly NULL"));
83 if (((void **)item
)[1] != (void *)ZENTRY_FREE
)
84 zerror(ZONE_ERROR_NOTFREE
);
85 ((void **)item
)[1] = NULL
;
87 zpcpu
->zitems
= ((void **) item
)[0];
98 * Per-zone spinlock for the remainder. Always load at least one
101 spin_lock(&z
->zspin
);
102 if (z
->zfreecnt
> z
->zfreemin
) {
107 KASSERT(item
!= NULL
, ("zitems unexpectedly NULL"));
108 if (((void **)item
)[1] != (void *)ZENTRY_FREE
)
109 zerror(ZONE_ERROR_NOTFREE
);
111 z
->zitems
= ((void **)item
)[0];
113 ((void **)item
)[0] = zpcpu
->zitems
;
114 zpcpu
->zitems
= item
;
116 } while (--n
> 0 && z
->zfreecnt
> z
->zfreemin
);
117 spin_unlock(&z
->zspin
);
120 spin_unlock(&z
->zspin
);
123 * PANICFAIL allows the caller to assume that the zalloc()
124 * will always succeed. If it doesn't, we panic here.
126 if (item
== NULL
&& (z
->zflags
& ZONE_PANICFAIL
))
127 panic("zalloc(%s) failed", z
->zname
);
133 * Free an item to the specified zone.
138 zfree(vm_zone_t z
, void *item
)
140 globaldata_t gd
= mycpu
;
146 zpcpu
= &z
->zpcpu
[gd
->gd_cpuid
];
149 * Avoid spinlock contention by freeing into a per-cpu queue
159 ((void **)item
)[0] = zpcpu
->zitems
;
161 if (((void **)item
)[1] == (void *)ZENTRY_FREE
)
162 zerror(ZONE_ERROR_ALREADYFREE
);
163 ((void **)item
)[1] = (void *)ZENTRY_FREE
;
165 zpcpu
->zitems
= item
;
168 if (zpcpu
->zfreecnt
< zmax
) {
174 * Hystereis, move (zmax) (calculated below) items to the pool.
177 if (zmax
> zone_burst
)
182 while (count
< zmax
) {
183 tail_item
= ((void **)tail_item
)[0];
186 zpcpu
->zitems
= ((void **)tail_item
)[0];
187 zpcpu
->zfreecnt
-= count
;
190 * Per-zone spinlock for the remainder.
192 * Also implement hysteresis by freeing a number of pcpu
195 spin_lock(&z
->zspin
);
196 ((void **)tail_item
)[0] = z
->zitems
;
198 z
->zfreecnt
+= count
;
199 spin_unlock(&z
->zspin
);
205 * This file comprises a very simple zone allocator. This is used
206 * in lieu of the malloc allocator, where needed or more optimal.
208 * Note that the initial implementation of this had coloring, and
209 * absolutely no improvement (actually perf degradation) occurred.
211 * Note also that the zones are type stable. The only restriction is
212 * that the first two longwords of a data structure can be changed
213 * between allocations. Any data that must be stable between allocations
214 * must reside in areas after the first two longwords.
216 * zinitna, zinit, zbootinit are the initialization routines.
217 * zalloc, zfree, are the allocation/free routines.
220 LIST_HEAD(zlist
, vm_zone
) zlist
= LIST_HEAD_INITIALIZER(zlist
);
221 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS
);
222 static vm_pindex_t zone_kmem_pages
, zone_kern_pages
;
223 static long zone_kmem_kvaspace
;
226 * Create a zone, but don't allocate the zone structure. If the
227 * zone had been previously created by the zone boot code, initialize
228 * various parts of the zone code.
230 * If waits are not allowed during allocation (e.g. during interrupt
231 * code), a-priori allocate the kernel virtual space, and allocate
232 * only pages when needed.
235 * z pointer to zone structure.
236 * obj pointer to VM object (opt).
238 * size size of zone entries.
239 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
240 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
241 * zalloc number of pages allocated when memory is needed.
243 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
244 * by the nentries argument. The size of the memory allocatable is
245 * unlimited if ZONE_INTERRUPT is not set.
250 zinitna(vm_zone_t z
, char *name
, size_t size
, long nentries
, uint32_t flags
)
255 * Only zones created with zinit() are destroyable.
257 if (z
->zflags
& ZONE_DESTROYABLE
)
258 panic("zinitna: can't create destroyable zone");
261 * NOTE: We can only adjust zsize if we previously did not
264 if ((z
->zflags
& ZONE_BOOT
) == 0) {
265 z
->zsize
= roundup2(size
, ZONE_ROUNDING
);
266 spin_init(&z
->zspin
, "zinitna");
273 lwkt_gettoken(&vm_token
);
274 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
275 lwkt_reltoken(&vm_token
);
277 bzero(z
->zpcpu
, sizeof(z
->zpcpu
));
281 z
->zkmcur
= z
->zkmmax
= 0;
285 * If we cannot wait, allocate KVA space up front, and we will fill
286 * in pages as needed. This is particularly required when creating
287 * an allocation space for map entries in kernel_map, because we
288 * do not want to go into a recursion deadlock with
289 * vm_map_entry_reserve().
291 if (z
->zflags
& ZONE_INTERRUPT
) {
292 totsize
= round_page((size_t)z
->zsize
* nentries
);
293 atomic_add_long(&zone_kmem_kvaspace
, totsize
);
295 z
->zkva
= kmem_alloc_pageable(&kernel_map
, totsize
,
298 LIST_REMOVE(z
, zlink
);
302 z
->zpagemax
= totsize
/ PAGE_SIZE
;
303 z
->zallocflag
= VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
|
304 VM_ALLOC_NORMAL
| VM_ALLOC_RETRY
;
306 z
->zmax_pcpu
= z
->zmax
/ ncpus
/ 16;
309 * Set reasonable pcpu cache bounds. Low-memory systems
310 * might try to cache too little, large-memory systems
311 * might try to cache more than necessarsy.
313 * In particular, pvzone can wind up being excessive and
314 * waste memory unnecessarily.
316 if (z
->zmax_pcpu
< 1024)
318 if (z
->zmax_pcpu
* z
->zsize
> 16*1024*1024)
319 z
->zmax_pcpu
= 16*1024*1024 / z
->zsize
;
321 z
->zallocflag
= VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
;
327 if (z
->zsize
> PAGE_SIZE
)
330 z
->zfreemin
= PAGE_SIZE
/ z
->zsize
;
335 * Reduce kernel_map spam by allocating in chunks.
337 z
->zalloc
= ZONE_MAXPGLOAD
;
340 * Populate the interrrupt zone at creation time rather than
341 * on first allocation, as this is a potentially long operation.
343 if (z
->zflags
& ZONE_INTERRUPT
) {
355 * Subroutine same as zinitna, except zone data structure is allocated
356 * automatically by malloc. This routine should normally be used, except
357 * in certain tricky startup conditions in the VM system -- then
358 * zbootinit and zinitna can be used. Zinit is the standard zone
359 * initialization call.
364 zinit(char *name
, size_t size
, long nentries
, uint32_t flags
)
368 z
= (vm_zone_t
) kmalloc(sizeof (struct vm_zone
), M_ZONE
, M_NOWAIT
);
373 if (zinitna(z
, name
, size
, nentries
, flags
& ~ZONE_DESTROYABLE
) == 0) {
378 if (flags
& ZONE_DESTROYABLE
)
379 z
->zflags
|= ZONE_DESTROYABLE
;
385 * Initialize a zone before the system is fully up. This routine should
386 * only be called before full VM startup.
388 * Called from the low level boot code only.
391 zbootinit(vm_zone_t z
, char *name
, size_t size
, void *item
, long nitems
)
395 spin_init(&z
->zspin
, "zbootinit");
396 bzero(z
->zpcpu
, sizeof(z
->zpcpu
));
400 z
->zflags
= ZONE_BOOT
;
406 bzero(item
, (size_t)nitems
* z
->zsize
);
408 for (i
= 0; i
< nitems
; i
++) {
409 ((void **)item
)[0] = z
->zitems
;
411 ((void **)item
)[1] = (void *)ZENTRY_FREE
;
414 item
= (uint8_t *)item
+ z
->zsize
;
416 z
->zfreecnt
= nitems
;
420 lwkt_gettoken(&vm_token
);
421 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
422 lwkt_reltoken(&vm_token
);
426 * Release all resources owned by zone created with zinit().
431 zdestroy(vm_zone_t z
)
436 panic("zdestroy: null zone");
437 if ((z
->zflags
& ZONE_DESTROYABLE
) == 0)
438 panic("zdestroy: undestroyable zone");
440 lwkt_gettoken(&vm_token
);
441 LIST_REMOVE(z
, zlink
);
442 lwkt_reltoken(&vm_token
);
445 * Release virtual mappings, physical memory and update sysctl stats.
447 KKASSERT((z
->zflags
& ZONE_INTERRUPT
) == 0);
448 for (i
= 0; i
< z
->zkmcur
; i
++) {
449 kmem_free(&kernel_map
, z
->zkmvec
[i
],
450 (size_t)z
->zalloc
* PAGE_SIZE
);
451 atomic_subtract_long(&zone_kern_pages
, z
->zalloc
);
453 if (z
->zkmvec
!= NULL
)
454 kfree(z
->zkmvec
, M_ZONE
);
456 spin_uninit(&z
->zspin
);
462 * void *zalloc(vm_zone_t zone) --
463 * Returns an item from a specified zone. May not be called from a
464 * FAST interrupt or IPI function.
466 * void zfree(vm_zone_t zone, void *item) --
467 * Frees an item back to a specified zone. May not be called from a
468 * FAST interrupt or IPI function.
472 * Internal zone routine. Not to be called from external (non vm_zone) code.
474 * This function may return NULL.
481 vm_page_t pgs
[ZONE_MAXPGLOAD
];
493 panic("zget: null zone");
495 if (z
->zflags
& ZONE_INTERRUPT
) {
497 * Interrupt zones do not mess with the kernel_map, they
498 * simply populate an existing mapping.
500 * First allocate as many pages as we can, stopping at
501 * our limit or if the page allocation fails. Try to
502 * avoid exhausting the interrupt free minimum by backing
503 * off to normal page allocations after a certain point.
505 for (i
= 0; i
< ZONE_MAXPGLOAD
&& i
< z
->zalloc
; ++i
) {
507 m
= vm_page_alloc(NULL
,
508 mycpu
->gd_rand_incr
++,
511 m
= vm_page_alloc(NULL
,
512 mycpu
->gd_rand_incr
++,
523 * Account for the pages.
525 * NOTE! Do not allow overlap with a prior page as it
526 * may still be undergoing allocation on another
529 spin_lock(&z
->zspin
);
530 noffset
= (size_t)z
->zpagecount
* PAGE_SIZE
;
531 /* noffset -= noffset % z->zsize; */
532 savezpc
= z
->zpagecount
;
533 if (z
->zpagecount
+ nalloc
> z
->zpagemax
)
534 z
->zpagecount
= z
->zpagemax
;
536 z
->zpagecount
+= nalloc
;
537 item
= (char *)z
->zkva
+ noffset
;
538 npages
= z
->zpagecount
- savezpc
;
539 nitems
= ((size_t)(savezpc
+ npages
) * PAGE_SIZE
- noffset
) /
541 atomic_add_long(&zone_kmem_pages
, npages
);
542 spin_unlock(&z
->zspin
);
545 * Enter the pages into the reserved KVA space.
547 for (i
= 0; i
< npages
; ++i
) {
551 KKASSERT(m
->queue
== PQ_NONE
);
552 m
->valid
= VM_PAGE_BITS_ALL
;
556 zkva
= z
->zkva
+ (size_t)(savezpc
+ i
) * PAGE_SIZE
;
557 pmap_kenter(zkva
, VM_PAGE_TO_PHYS(m
));
558 bzero((void *)zkva
, PAGE_SIZE
);
560 for (i
= npages
; i
< nalloc
; ++i
) {
564 } else if (z
->zflags
& ZONE_SPECIAL
) {
566 * The special zone is the one used for vm_map_entry_t's.
567 * We have to avoid an infinite recursion in
568 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
569 * instead. The map entries are pre-reserved by the kernel
570 * by vm_map_entry_reserve_cpu_init().
572 nbytes
= (size_t)z
->zalloc
* PAGE_SIZE
;
574 item
= (void *)kmem_alloc3(&kernel_map
, nbytes
,
575 VM_SUBSYS_ZALLOC
, KM_KRESERVE
);
577 /* note: z might be modified due to blocking */
579 atomic_add_long(&zone_kern_pages
, z
->zalloc
);
584 nitems
= nbytes
/ z
->zsize
;
587 * Otherwise allocate KVA from the kernel_map.
589 nbytes
= (size_t)z
->zalloc
* PAGE_SIZE
;
591 item
= (void *)kmem_alloc3(&kernel_map
, nbytes
,
592 VM_SUBSYS_ZALLOC
, 0);
594 /* note: z might be modified due to blocking */
596 atomic_add_long(&zone_kern_pages
, z
->zalloc
);
599 if (z
->zflags
& ZONE_DESTROYABLE
) {
600 if (z
->zkmcur
== z
->zkmmax
) {
602 z
->zkmmax
==0 ? 1 : z
->zkmmax
*2;
603 z
->zkmvec
= krealloc(z
->zkmvec
,
604 z
->zkmmax
* sizeof(z
->zkmvec
[0]),
607 z
->zkmvec
[z
->zkmcur
++] = (vm_offset_t
)item
;
612 nitems
= nbytes
/ z
->zsize
;
616 * Enter any new pages into the pool, reserving one, or get the
617 * item from the existing pool.
619 spin_lock(&z
->zspin
);
623 * The zone code may need to allocate kernel memory, which can
624 * recurse zget() infinitely if we do not handle it properly.
625 * We deal with this by directly repopulating the pcpu vm_map_entry
628 if (nitems
> 1 && (z
->zflags
& ZONE_SPECIAL
)) {
629 struct globaldata
*gd
= mycpu
;
630 vm_map_entry_t entry
;
633 * Make sure we have enough structures in gd_vme_base to handle
634 * the reservation request.
636 * The critical section protects access to the per-cpu gd.
639 while (gd
->gd_vme_avail
< 2 && nitems
> 1) {
641 entry
->next
= gd
->gd_vme_base
;
642 gd
->gd_vme_base
= entry
;
643 atomic_add_int(&gd
->gd_vme_avail
, 1);
644 item
= (uint8_t *)item
+ z
->zsize
;
652 * Enter pages into the pool saving one for immediate
656 for (i
= 0; i
< nitems
; i
++) {
657 ((void **)item
)[0] = z
->zitems
;
659 ((void **)item
)[1] = (void *)ZENTRY_FREE
;
662 item
= (uint8_t *)item
+ z
->zsize
;
664 z
->zfreecnt
+= nitems
;
666 } else if (z
->zfreecnt
> 0) {
668 * Get an item from the existing pool.
671 z
->zitems
= ((void **)item
)[0];
673 if (((void **)item
)[1] != (void *)ZENTRY_FREE
)
674 zerror(ZONE_ERROR_NOTFREE
);
675 ((void **) item
)[1] = NULL
;
681 * No items available.
685 spin_unlock(&z
->zspin
);
694 sysctl_vm_zone(SYSCTL_HANDLER_ARGS
)
701 ksnprintf(tmpbuf
, sizeof(tmpbuf
),
702 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
703 error
= SYSCTL_OUT(req
, tmpbuf
, strlen(tmpbuf
));
707 lwkt_gettoken(&vm_token
);
708 LIST_FOREACH(curzone
, &zlist
, zlink
) {
716 len
= strlen(curzone
->zname
);
717 if (len
>= (sizeof(tmpname
) - 1))
718 len
= (sizeof(tmpname
) - 1);
719 for(i
= 0; i
< sizeof(tmpname
) - 1; i
++)
722 memcpy(tmpname
, curzone
->zname
, len
);
725 if (curzone
== LIST_FIRST(&zlist
)) {
729 freecnt
= curzone
->zfreecnt
;
730 znalloc
= curzone
->znalloc
;
731 for (n
= 0; n
< ncpus
; ++n
) {
732 freecnt
+= curzone
->zpcpu
[n
].zfreecnt
;
733 znalloc
+= curzone
->zpcpu
[n
].znalloc
;
736 ksnprintf(tmpbuf
+ offset
, sizeof(tmpbuf
) - offset
,
737 "%s %6.6lu, %8.8lu, %6.6lu, %6.6lu, %8.8lu\n",
738 tmpname
, curzone
->zsize
, curzone
->zmax
,
739 (curzone
->ztotal
- freecnt
),
742 len
= strlen((char *)tmpbuf
);
743 if (LIST_NEXT(curzone
, zlink
) == NULL
)
746 error
= SYSCTL_OUT(req
, tmpbuf
, len
);
751 lwkt_reltoken(&vm_token
);
755 #if defined(INVARIANTS)
766 case ZONE_ERROR_INVALID
:
767 msg
= "zone: invalid zone";
769 case ZONE_ERROR_NOTFREE
:
770 msg
= "zone: entry not free";
772 case ZONE_ERROR_ALREADYFREE
:
773 msg
= "zone: freeing free entry";
776 msg
= "zone: invalid error";
783 SYSCTL_OID(_vm
, OID_AUTO
, zone
, CTLTYPE_STRING
|CTLFLAG_RD
, \
784 NULL
, 0, sysctl_vm_zone
, "A", "Zone Info");
786 SYSCTL_LONG(_vm
, OID_AUTO
, zone_kmem_pages
,
787 CTLFLAG_RD
, &zone_kmem_pages
, 0, "Number of interrupt safe pages allocated by zone");
788 SYSCTL_LONG(_vm
, OID_AUTO
, zone_burst
,
789 CTLFLAG_RW
, &zone_burst
, 0, "Burst from depot to pcpu cache");
790 SYSCTL_LONG(_vm
, OID_AUTO
, zone_kmem_kvaspace
,
791 CTLFLAG_RD
, &zone_kmem_kvaspace
, 0, "KVA space allocated by zone");
792 SYSCTL_LONG(_vm
, OID_AUTO
, zone_kern_pages
,
793 CTLFLAG_RD
, &zone_kern_pages
, 0, "Number of non-interrupt safe pages allocated by zone");