acpi_hpet - Implement early HPET initialization for TSC calibration.
[dragonfly.git] / sys / vm / vm_zone.c
blob3f71d3ea442dd7c9213ce5853dfff8d51c5dc3be
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1997, 1998 John S. Dyson
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
14 * John S. Dyson.
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
49 long zone_burst = 128;
51 static void *zget(vm_zone_t z);
54 * Return an item from the specified zone. This function is non-blocking for
55 * ZONE_INTERRUPT zones.
57 * No requirements.
59 void *
60 zalloc(vm_zone_t z)
62 globaldata_t gd = mycpu;
63 vm_zpcpu_t *zpcpu;
64 void *item;
65 long n;
67 #ifdef INVARIANTS
68 if (z == NULL)
69 zerror(ZONE_ERROR_INVALID);
70 #endif
71 zpcpu = &z->zpcpu[gd->gd_cpuid];
72 retry:
74 * Avoid spinlock contention by allocating from a per-cpu queue
76 if (zpcpu->zfreecnt > 0) {
77 crit_enter_gd(gd);
78 if (zpcpu->zfreecnt > 0) {
79 item = zpcpu->zitems;
80 #ifdef INVARIANTS
81 KASSERT(item != NULL,
82 ("zitems_pcpu unexpectedly NULL"));
83 if (((void **)item)[1] != (void *)ZENTRY_FREE)
84 zerror(ZONE_ERROR_NOTFREE);
85 ((void **)item)[1] = NULL;
86 #endif
87 zpcpu->zitems = ((void **) item)[0];
88 --zpcpu->zfreecnt;
89 ++zpcpu->znalloc;
90 crit_exit_gd(gd);
92 return item;
94 crit_exit_gd(gd);
98 * Per-zone spinlock for the remainder. Always load at least one
99 * item.
101 spin_lock(&z->zspin);
102 if (z->zfreecnt > z->zfreemin) {
103 n = zone_burst;
104 do {
105 item = z->zitems;
106 #ifdef INVARIANTS
107 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
108 if (((void **)item)[1] != (void *)ZENTRY_FREE)
109 zerror(ZONE_ERROR_NOTFREE);
110 #endif
111 z->zitems = ((void **)item)[0];
112 --z->zfreecnt;
113 ((void **)item)[0] = zpcpu->zitems;
114 zpcpu->zitems = item;
115 ++zpcpu->zfreecnt;
116 } while (--n > 0 && z->zfreecnt > z->zfreemin);
117 spin_unlock(&z->zspin);
118 goto retry;
119 } else {
120 spin_unlock(&z->zspin);
121 item = zget(z);
123 * PANICFAIL allows the caller to assume that the zalloc()
124 * will always succeed. If it doesn't, we panic here.
126 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
127 panic("zalloc(%s) failed", z->zname);
129 return item;
133 * Free an item to the specified zone.
135 * No requirements.
137 void
138 zfree(vm_zone_t z, void *item)
140 globaldata_t gd = mycpu;
141 vm_zpcpu_t *zpcpu;
142 void *tail_item;
143 long count;
144 long zmax;
146 zpcpu = &z->zpcpu[gd->gd_cpuid];
149 * Avoid spinlock contention by freeing into a per-cpu queue
151 zmax = z->zmax_pcpu;
152 if (zmax < 1024)
153 zmax = 1024;
156 * Add to pcpu cache
158 crit_enter_gd(gd);
159 ((void **)item)[0] = zpcpu->zitems;
160 #ifdef INVARIANTS
161 if (((void **)item)[1] == (void *)ZENTRY_FREE)
162 zerror(ZONE_ERROR_ALREADYFREE);
163 ((void **)item)[1] = (void *)ZENTRY_FREE;
164 #endif
165 zpcpu->zitems = item;
166 ++zpcpu->zfreecnt;
168 if (zpcpu->zfreecnt < zmax) {
169 crit_exit_gd(gd);
170 return;
174 * Hystereis, move (zmax) (calculated below) items to the pool.
176 zmax = zmax / 2;
177 if (zmax > zone_burst)
178 zmax = zone_burst;
179 tail_item = item;
180 count = 1;
182 while (count < zmax) {
183 tail_item = ((void **)tail_item)[0];
184 ++count;
186 zpcpu->zitems = ((void **)tail_item)[0];
187 zpcpu->zfreecnt -= count;
190 * Per-zone spinlock for the remainder.
192 * Also implement hysteresis by freeing a number of pcpu
193 * entries.
195 spin_lock(&z->zspin);
196 ((void **)tail_item)[0] = z->zitems;
197 z->zitems = item;
198 z->zfreecnt += count;
199 spin_unlock(&z->zspin);
201 crit_exit_gd(gd);
205 * This file comprises a very simple zone allocator. This is used
206 * in lieu of the malloc allocator, where needed or more optimal.
208 * Note that the initial implementation of this had coloring, and
209 * absolutely no improvement (actually perf degradation) occurred.
211 * Note also that the zones are type stable. The only restriction is
212 * that the first two longwords of a data structure can be changed
213 * between allocations. Any data that must be stable between allocations
214 * must reside in areas after the first two longwords.
216 * zinitna, zinit, zbootinit are the initialization routines.
217 * zalloc, zfree, are the allocation/free routines.
220 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
221 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
222 static vm_pindex_t zone_kmem_pages, zone_kern_pages;
223 static long zone_kmem_kvaspace;
226 * Create a zone, but don't allocate the zone structure. If the
227 * zone had been previously created by the zone boot code, initialize
228 * various parts of the zone code.
230 * If waits are not allowed during allocation (e.g. during interrupt
231 * code), a-priori allocate the kernel virtual space, and allocate
232 * only pages when needed.
234 * Arguments:
235 * z pointer to zone structure.
236 * obj pointer to VM object (opt).
237 * name name of zone.
238 * size size of zone entries.
239 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
240 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
241 * zalloc number of pages allocated when memory is needed.
243 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
244 * by the nentries argument. The size of the memory allocatable is
245 * unlimited if ZONE_INTERRUPT is not set.
247 * No requirements.
250 zinitna(vm_zone_t z, char *name, size_t size, long nentries, uint32_t flags)
252 size_t totsize;
255 * Only zones created with zinit() are destroyable.
257 if (z->zflags & ZONE_DESTROYABLE)
258 panic("zinitna: can't create destroyable zone");
261 * NOTE: We can only adjust zsize if we previously did not
262 * use zbootinit().
264 if ((z->zflags & ZONE_BOOT) == 0) {
265 z->zsize = roundup2(size, ZONE_ROUNDING);
266 spin_init(&z->zspin, "zinitna");
267 z->zfreecnt = 0;
268 z->ztotal = 0;
269 z->zmax = 0;
270 z->zname = name;
271 z->zitems = NULL;
273 lwkt_gettoken(&vm_token);
274 LIST_INSERT_HEAD(&zlist, z, zlink);
275 lwkt_reltoken(&vm_token);
277 bzero(z->zpcpu, sizeof(z->zpcpu));
280 z->zkmvec = NULL;
281 z->zkmcur = z->zkmmax = 0;
282 z->zflags |= flags;
285 * If we cannot wait, allocate KVA space up front, and we will fill
286 * in pages as needed. This is particularly required when creating
287 * an allocation space for map entries in kernel_map, because we
288 * do not want to go into a recursion deadlock with
289 * vm_map_entry_reserve().
291 if (z->zflags & ZONE_INTERRUPT) {
292 totsize = round_page((size_t)z->zsize * nentries);
293 atomic_add_long(&zone_kmem_kvaspace, totsize);
295 z->zkva = kmem_alloc_pageable(&kernel_map, totsize,
296 VM_SUBSYS_ZALLOC);
297 if (z->zkva == 0) {
298 LIST_REMOVE(z, zlink);
299 return 0;
302 z->zpagemax = totsize / PAGE_SIZE;
303 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
304 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
305 z->zmax += nentries;
306 z->zmax_pcpu = z->zmax / ncpus / 16;
309 * Set reasonable pcpu cache bounds. Low-memory systems
310 * might try to cache too little, large-memory systems
311 * might try to cache more than necessarsy.
313 * In particular, pvzone can wind up being excessive and
314 * waste memory unnecessarily.
316 if (z->zmax_pcpu < 1024)
317 z->zmax_pcpu = 1024;
318 if (z->zmax_pcpu * z->zsize > 16*1024*1024)
319 z->zmax_pcpu = 16*1024*1024 / z->zsize;
320 } else {
321 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
322 z->zmax = 0;
323 z->zmax_pcpu = 8192;
327 if (z->zsize > PAGE_SIZE)
328 z->zfreemin = 1;
329 else
330 z->zfreemin = PAGE_SIZE / z->zsize;
332 z->zpagecount = 0;
335 * Reduce kernel_map spam by allocating in chunks.
337 z->zalloc = ZONE_MAXPGLOAD;
340 * Populate the interrrupt zone at creation time rather than
341 * on first allocation, as this is a potentially long operation.
343 if (z->zflags & ZONE_INTERRUPT) {
344 void *buf;
346 buf = zget(z);
347 if (buf)
348 zfree(z, buf);
351 return 1;
355 * Subroutine same as zinitna, except zone data structure is allocated
356 * automatically by malloc. This routine should normally be used, except
357 * in certain tricky startup conditions in the VM system -- then
358 * zbootinit and zinitna can be used. Zinit is the standard zone
359 * initialization call.
361 * No requirements.
363 vm_zone_t
364 zinit(char *name, size_t size, long nentries, uint32_t flags)
366 vm_zone_t z;
368 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
369 if (z == NULL)
370 return NULL;
372 z->zflags = 0;
373 if (zinitna(z, name, size, nentries, flags & ~ZONE_DESTROYABLE) == 0) {
374 kfree(z, M_ZONE);
375 return NULL;
378 if (flags & ZONE_DESTROYABLE)
379 z->zflags |= ZONE_DESTROYABLE;
381 return z;
385 * Initialize a zone before the system is fully up. This routine should
386 * only be called before full VM startup.
388 * Called from the low level boot code only.
390 void
391 zbootinit(vm_zone_t z, char *name, size_t size, void *item, long nitems)
393 long i;
395 spin_init(&z->zspin, "zbootinit");
396 bzero(z->zpcpu, sizeof(z->zpcpu));
397 z->zname = name;
398 z->zsize = size;
399 z->zpagemax = 0;
400 z->zflags = ZONE_BOOT;
401 z->zfreemin = 0;
402 z->zallocflag = 0;
403 z->zpagecount = 0;
404 z->zalloc = 0;
406 bzero(item, (size_t)nitems * z->zsize);
407 z->zitems = NULL;
408 for (i = 0; i < nitems; i++) {
409 ((void **)item)[0] = z->zitems;
410 #ifdef INVARIANTS
411 ((void **)item)[1] = (void *)ZENTRY_FREE;
412 #endif
413 z->zitems = item;
414 item = (uint8_t *)item + z->zsize;
416 z->zfreecnt = nitems;
417 z->zmax = nitems;
418 z->ztotal = nitems;
420 lwkt_gettoken(&vm_token);
421 LIST_INSERT_HEAD(&zlist, z, zlink);
422 lwkt_reltoken(&vm_token);
426 * Release all resources owned by zone created with zinit().
428 * No requirements.
430 void
431 zdestroy(vm_zone_t z)
433 vm_pindex_t i;
435 if (z == NULL)
436 panic("zdestroy: null zone");
437 if ((z->zflags & ZONE_DESTROYABLE) == 0)
438 panic("zdestroy: undestroyable zone");
440 lwkt_gettoken(&vm_token);
441 LIST_REMOVE(z, zlink);
442 lwkt_reltoken(&vm_token);
445 * Release virtual mappings, physical memory and update sysctl stats.
447 KKASSERT((z->zflags & ZONE_INTERRUPT) == 0);
448 for (i = 0; i < z->zkmcur; i++) {
449 kmem_free(&kernel_map, z->zkmvec[i],
450 (size_t)z->zalloc * PAGE_SIZE);
451 atomic_subtract_long(&zone_kern_pages, z->zalloc);
453 if (z->zkmvec != NULL)
454 kfree(z->zkmvec, M_ZONE);
456 spin_uninit(&z->zspin);
457 kfree(z, M_ZONE);
462 * void *zalloc(vm_zone_t zone) --
463 * Returns an item from a specified zone. May not be called from a
464 * FAST interrupt or IPI function.
466 * void zfree(vm_zone_t zone, void *item) --
467 * Frees an item back to a specified zone. May not be called from a
468 * FAST interrupt or IPI function.
472 * Internal zone routine. Not to be called from external (non vm_zone) code.
474 * This function may return NULL.
476 * No requirements.
478 static void *
479 zget(vm_zone_t z)
481 vm_page_t pgs[ZONE_MAXPGLOAD];
482 vm_page_t m;
483 long nitems;
484 long savezpc;
485 size_t nbytes;
486 size_t noffset;
487 void *item;
488 vm_pindex_t npages;
489 vm_pindex_t nalloc;
490 vm_pindex_t i;
492 if (z == NULL)
493 panic("zget: null zone");
495 if (z->zflags & ZONE_INTERRUPT) {
497 * Interrupt zones do not mess with the kernel_map, they
498 * simply populate an existing mapping.
500 * First allocate as many pages as we can, stopping at
501 * our limit or if the page allocation fails. Try to
502 * avoid exhausting the interrupt free minimum by backing
503 * off to normal page allocations after a certain point.
505 for (i = 0; i < ZONE_MAXPGLOAD && i < z->zalloc; ++i) {
506 if (i < 4) {
507 m = vm_page_alloc(NULL,
508 mycpu->gd_rand_incr++,
509 z->zallocflag);
510 } else {
511 m = vm_page_alloc(NULL,
512 mycpu->gd_rand_incr++,
513 VM_ALLOC_NORMAL |
514 VM_ALLOC_SYSTEM);
516 if (m == NULL)
517 break;
518 pgs[i] = m;
520 nalloc = i;
523 * Account for the pages.
525 * NOTE! Do not allow overlap with a prior page as it
526 * may still be undergoing allocation on another
527 * cpu.
529 spin_lock(&z->zspin);
530 noffset = (size_t)z->zpagecount * PAGE_SIZE;
531 /* noffset -= noffset % z->zsize; */
532 savezpc = z->zpagecount;
533 if (z->zpagecount + nalloc > z->zpagemax)
534 z->zpagecount = z->zpagemax;
535 else
536 z->zpagecount += nalloc;
537 item = (char *)z->zkva + noffset;
538 npages = z->zpagecount - savezpc;
539 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
540 z->zsize;
541 atomic_add_long(&zone_kmem_pages, npages);
542 spin_unlock(&z->zspin);
545 * Enter the pages into the reserved KVA space.
547 for (i = 0; i < npages; ++i) {
548 vm_offset_t zkva;
550 m = pgs[i];
551 KKASSERT(m->queue == PQ_NONE);
552 m->valid = VM_PAGE_BITS_ALL;
553 vm_page_wire(m);
554 vm_page_wakeup(m);
556 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
557 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
558 bzero((void *)zkva, PAGE_SIZE);
560 for (i = npages; i < nalloc; ++i) {
561 m = pgs[i];
562 vm_page_free(m);
564 } else if (z->zflags & ZONE_SPECIAL) {
566 * The special zone is the one used for vm_map_entry_t's.
567 * We have to avoid an infinite recursion in
568 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
569 * instead. The map entries are pre-reserved by the kernel
570 * by vm_map_entry_reserve_cpu_init().
572 nbytes = (size_t)z->zalloc * PAGE_SIZE;
574 item = (void *)kmem_alloc3(&kernel_map, nbytes,
575 VM_SUBSYS_ZALLOC, KM_KRESERVE);
577 /* note: z might be modified due to blocking */
578 if (item != NULL) {
579 atomic_add_long(&zone_kern_pages, z->zalloc);
580 bzero(item, nbytes);
581 } else {
582 nbytes = 0;
584 nitems = nbytes / z->zsize;
585 } else {
587 * Otherwise allocate KVA from the kernel_map.
589 nbytes = (size_t)z->zalloc * PAGE_SIZE;
591 item = (void *)kmem_alloc3(&kernel_map, nbytes,
592 VM_SUBSYS_ZALLOC, 0);
594 /* note: z might be modified due to blocking */
595 if (item != NULL) {
596 atomic_add_long(&zone_kern_pages, z->zalloc);
597 bzero(item, nbytes);
599 if (z->zflags & ZONE_DESTROYABLE) {
600 if (z->zkmcur == z->zkmmax) {
601 z->zkmmax =
602 z->zkmmax==0 ? 1 : z->zkmmax*2;
603 z->zkmvec = krealloc(z->zkmvec,
604 z->zkmmax * sizeof(z->zkmvec[0]),
605 M_ZONE, M_WAITOK);
607 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
609 } else {
610 nbytes = 0;
612 nitems = nbytes / z->zsize;
616 * Enter any new pages into the pool, reserving one, or get the
617 * item from the existing pool.
619 spin_lock(&z->zspin);
620 z->ztotal += nitems;
623 * The zone code may need to allocate kernel memory, which can
624 * recurse zget() infinitely if we do not handle it properly.
625 * We deal with this by directly repopulating the pcpu vm_map_entry
626 * cache.
628 if (nitems > 1 && (z->zflags & ZONE_SPECIAL)) {
629 struct globaldata *gd = mycpu;
630 vm_map_entry_t entry;
633 * Make sure we have enough structures in gd_vme_base to handle
634 * the reservation request.
636 * The critical section protects access to the per-cpu gd.
638 crit_enter();
639 while (gd->gd_vme_avail < 2 && nitems > 1) {
640 entry = item;
641 entry->next = gd->gd_vme_base;
642 gd->gd_vme_base = entry;
643 atomic_add_int(&gd->gd_vme_avail, 1);
644 item = (uint8_t *)item + z->zsize;
645 --nitems;
647 crit_exit();
650 if (nitems != 0) {
652 * Enter pages into the pool saving one for immediate
653 * allocation.
655 nitems -= 1;
656 for (i = 0; i < nitems; i++) {
657 ((void **)item)[0] = z->zitems;
658 #ifdef INVARIANTS
659 ((void **)item)[1] = (void *)ZENTRY_FREE;
660 #endif
661 z->zitems = item;
662 item = (uint8_t *)item + z->zsize;
664 z->zfreecnt += nitems;
665 ++z->znalloc;
666 } else if (z->zfreecnt > 0) {
668 * Get an item from the existing pool.
670 item = z->zitems;
671 z->zitems = ((void **)item)[0];
672 #ifdef INVARIANTS
673 if (((void **)item)[1] != (void *)ZENTRY_FREE)
674 zerror(ZONE_ERROR_NOTFREE);
675 ((void **) item)[1] = NULL;
676 #endif
677 --z->zfreecnt;
678 ++z->znalloc;
679 } else {
681 * No items available.
683 item = NULL;
685 spin_unlock(&z->zspin);
687 return item;
691 * No requirements.
693 static int
694 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
696 vm_zone_t curzone;
697 char tmpbuf[128];
698 char tmpname[14];
699 int error = 0;
701 ksnprintf(tmpbuf, sizeof(tmpbuf),
702 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
703 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
704 if (error)
705 return (error);
707 lwkt_gettoken(&vm_token);
708 LIST_FOREACH(curzone, &zlist, zlink) {
709 size_t i;
710 size_t len;
711 int offset;
712 long freecnt;
713 long znalloc;
714 int n;
716 len = strlen(curzone->zname);
717 if (len >= (sizeof(tmpname) - 1))
718 len = (sizeof(tmpname) - 1);
719 for(i = 0; i < sizeof(tmpname) - 1; i++)
720 tmpname[i] = ' ';
721 tmpname[i] = 0;
722 memcpy(tmpname, curzone->zname, len);
723 tmpname[len] = ':';
724 offset = 0;
725 if (curzone == LIST_FIRST(&zlist)) {
726 offset = 1;
727 tmpbuf[0] = '\n';
729 freecnt = curzone->zfreecnt;
730 znalloc = curzone->znalloc;
731 for (n = 0; n < ncpus; ++n) {
732 freecnt += curzone->zpcpu[n].zfreecnt;
733 znalloc += curzone->zpcpu[n].znalloc;
736 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
737 "%s %6.6lu, %8.8lu, %6.6lu, %6.6lu, %8.8lu\n",
738 tmpname, curzone->zsize, curzone->zmax,
739 (curzone->ztotal - freecnt),
740 freecnt, znalloc);
742 len = strlen((char *)tmpbuf);
743 if (LIST_NEXT(curzone, zlink) == NULL)
744 tmpbuf[len - 1] = 0;
746 error = SYSCTL_OUT(req, tmpbuf, len);
748 if (error)
749 break;
751 lwkt_reltoken(&vm_token);
752 return (error);
755 #if defined(INVARIANTS)
758 * Debugging only.
760 void
761 zerror(int error)
763 char *msg;
765 switch (error) {
766 case ZONE_ERROR_INVALID:
767 msg = "zone: invalid zone";
768 break;
769 case ZONE_ERROR_NOTFREE:
770 msg = "zone: entry not free";
771 break;
772 case ZONE_ERROR_ALREADYFREE:
773 msg = "zone: freeing free entry";
774 break;
775 default:
776 msg = "zone: invalid error";
777 break;
779 panic("%s", msg);
781 #endif
783 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
784 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
786 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_pages,
787 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
788 SYSCTL_LONG(_vm, OID_AUTO, zone_burst,
789 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
790 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
791 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
792 SYSCTL_LONG(_vm, OID_AUTO, zone_kern_pages,
793 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");