2 * Copyright (c) 1997, 1998 John S. Dyson. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice immediately at the beginning of the file, without modification,
9 * this list of conditions, and the following disclaimer.
10 * 2. Absolutely no warranty of function or purpose is made by the author
13 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * Copyright (c) 2003-2017,2019 The DragonFly Project. All rights reserved.
17 * This code is derived from software contributed to The DragonFly Project
18 * by Matthew Dillon <dillon@backplane.com>
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * 1. Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * 3. Neither the name of The DragonFly Project nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific, prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
37 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
38 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
39 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
40 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
41 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
42 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
43 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
44 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 #include <sys/param.h>
49 #include <sys/queue.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/sysctl.h>
55 #include <sys/vmmeter.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_zone.h>
65 #include <sys/spinlock2.h>
66 #include <vm/vm_page2.h>
68 static MALLOC_DEFINE(M_ZONE
, "ZONE", "Zone header");
70 #define ZONE_ERROR_INVALID 0
71 #define ZONE_ERROR_NOTFREE 1
72 #define ZONE_ERROR_ALREADYFREE 2
74 #define ZONE_ROUNDING 32
76 #define ZENTRY_FREE 0x12342378
78 long zone_burst
= 128;
80 static void *zget(vm_zone_t z
, int *tryagainp
);
83 * Return an item from the specified zone. This function is non-blocking for
84 * ZONE_INTERRUPT zones.
91 globaldata_t gd
= mycpu
;
99 zerror(ZONE_ERROR_INVALID
);
101 zpcpu
= &z
->zpcpu
[gd
->gd_cpuid
];
104 * Avoid spinlock contention by allocating from a per-cpu queue
106 if (zpcpu
->zfreecnt
> 0) {
108 if (zpcpu
->zfreecnt
> 0) {
109 item
= zpcpu
->zitems
;
111 KASSERT(item
!= NULL
,
112 ("zitems_pcpu unexpectedly NULL"));
113 if (((void **)item
)[1] != (void *)ZENTRY_FREE
)
114 zerror(ZONE_ERROR_NOTFREE
);
115 ((void **)item
)[1] = NULL
;
117 zpcpu
->zitems
= ((void **) item
)[0];
128 * Per-zone spinlock for the remainder. Always load at least one
131 spin_lock(&z
->zspin
);
132 if (z
->zfreecnt
> z
->zfreemin
) {
137 KASSERT(item
!= NULL
, ("zitems unexpectedly NULL"));
138 if (((void **)item
)[1] != (void *)ZENTRY_FREE
)
139 zerror(ZONE_ERROR_NOTFREE
);
141 z
->zitems
= ((void **)item
)[0];
143 ((void **)item
)[0] = zpcpu
->zitems
;
144 zpcpu
->zitems
= item
;
146 } while (--n
> 0 && z
->zfreecnt
> z
->zfreemin
);
147 spin_unlock(&z
->zspin
);
150 spin_unlock(&z
->zspin
);
152 item
= zget(z
, &tryagain
);
157 * PANICFAIL allows the caller to assume that the zalloc()
158 * will always succeed. If it doesn't, we panic here.
160 if (item
== NULL
&& (z
->zflags
& ZONE_PANICFAIL
))
161 panic("zalloc(%s) failed", z
->zname
);
167 * Free an item to the specified zone.
172 zfree(vm_zone_t z
, void *item
)
174 globaldata_t gd
= mycpu
;
180 zpcpu
= &z
->zpcpu
[gd
->gd_cpuid
];
183 * Avoid spinlock contention by freeing into a per-cpu queue
193 ((void **)item
)[0] = zpcpu
->zitems
;
195 if (((void **)item
)[1] == (void *)ZENTRY_FREE
)
196 zerror(ZONE_ERROR_ALREADYFREE
);
197 ((void **)item
)[1] = (void *)ZENTRY_FREE
;
199 zpcpu
->zitems
= item
;
202 if (zpcpu
->zfreecnt
< zmax
) {
208 * Hystereis, move (zmax) (calculated below) items to the pool.
211 if (zmax
> zone_burst
)
216 while (count
< zmax
) {
217 tail_item
= ((void **)tail_item
)[0];
220 zpcpu
->zitems
= ((void **)tail_item
)[0];
221 zpcpu
->zfreecnt
-= count
;
224 * Per-zone spinlock for the remainder.
226 * Also implement hysteresis by freeing a number of pcpu
229 spin_lock(&z
->zspin
);
230 ((void **)tail_item
)[0] = z
->zitems
;
232 z
->zfreecnt
+= count
;
233 spin_unlock(&z
->zspin
);
239 * This file comprises a very simple zone allocator. This is used
240 * in lieu of the malloc allocator, where needed or more optimal.
242 * Note that the initial implementation of this had coloring, and
243 * absolutely no improvement (actually perf degradation) occurred.
245 * Note also that the zones are type stable. The only restriction is
246 * that the first two longwords of a data structure can be changed
247 * between allocations. Any data that must be stable between allocations
248 * must reside in areas after the first two longwords.
250 * zinitna, zinit, zbootinit are the initialization routines.
251 * zalloc, zfree, are the allocation/free routines.
254 LIST_HEAD(zlist
, vm_zone
) zlist
= LIST_HEAD_INITIALIZER(zlist
);
255 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS
);
256 static vm_pindex_t zone_kmem_pages
, zone_kern_pages
;
257 static long zone_kmem_kvaspace
;
260 * Create a zone, but don't allocate the zone structure. If the
261 * zone had been previously created by the zone boot code, initialize
262 * various parts of the zone code.
264 * If waits are not allowed during allocation (e.g. during interrupt
265 * code), a-priori allocate the kernel virtual space, and allocate
266 * only pages when needed.
269 * z pointer to zone structure.
270 * obj pointer to VM object (opt).
272 * size size of zone entries.
273 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
274 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
275 * zalloc number of pages allocated when memory is needed.
277 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
278 * by the nentries argument. The size of the memory allocatable is
279 * unlimited if ZONE_INTERRUPT is not set.
284 zinitna(vm_zone_t z
, char *name
, size_t size
, long nentries
, uint32_t flags
)
289 * Only zones created with zinit() are destroyable.
291 if (z
->zflags
& ZONE_DESTROYABLE
)
292 panic("zinitna: can't create destroyable zone");
295 * NOTE: We can only adjust zsize if we previously did not
298 if ((z
->zflags
& ZONE_BOOT
) == 0) {
299 z
->zsize
= roundup2(size
, ZONE_ROUNDING
);
300 spin_init(&z
->zspin
, "zinitna");
301 lockinit(&z
->zgetlk
, "zgetlk", 0, LK_CANRECURSE
);
309 lwkt_gettoken(&vm_token
);
310 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
311 lwkt_reltoken(&vm_token
);
313 bzero(z
->zpcpu
, sizeof(z
->zpcpu
));
317 z
->zkmcur
= z
->zkmmax
= 0;
321 * If we cannot wait, allocate KVA space up front, and we will fill
322 * in pages as needed. This is particularly required when creating
323 * an allocation space for map entries in kernel_map, because we
324 * do not want to go into a recursion deadlock with
325 * vm_map_entry_reserve().
327 if (z
->zflags
& ZONE_INTERRUPT
) {
328 totsize
= round_page((size_t)z
->zsize
* nentries
);
329 atomic_add_long(&zone_kmem_kvaspace
, totsize
);
331 z
->zkva
= kmem_alloc_pageable(kernel_map
, totsize
,
334 LIST_REMOVE(z
, zlink
);
338 z
->zpagemax
= totsize
/ PAGE_SIZE
;
339 z
->zallocflag
= VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
|
340 VM_ALLOC_NORMAL
| VM_ALLOC_RETRY
;
344 * Set reasonable pcpu cache bounds. Low-memory systems
345 * might try to cache too little, large-memory systems
346 * might try to cache more than necessarsy.
348 * In particular, pvzone can wind up being excessive and
349 * waste memory unnecessarily.
351 z
->zmax_pcpu
= z
->zmax
/ ncpus
/ 64;
352 if (z
->zmax_pcpu
< 1024)
354 if (z
->zmax_pcpu
* z
->zsize
> 16*1024*1024)
355 z
->zmax_pcpu
= 16*1024*1024 / z
->zsize
;
357 z
->zallocflag
= VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
;
363 if (z
->zsize
> PAGE_SIZE
)
366 z
->zfreemin
= PAGE_SIZE
/ z
->zsize
;
371 * Reduce kernel_map spam by allocating in chunks.
373 z
->zalloc
= ZONE_MAXPGLOAD
;
376 * Populate the interrrupt zone at creation time rather than
377 * on first allocation, as this is a potentially long operation.
379 if (z
->zflags
& ZONE_INTERRUPT
) {
391 * Subroutine same as zinitna, except zone data structure is allocated
392 * automatically by malloc. This routine should normally be used, except
393 * in certain tricky startup conditions in the VM system -- then
394 * zbootinit and zinitna can be used. Zinit is the standard zone
395 * initialization call.
400 zinit(char *name
, size_t size
, long nentries
, uint32_t flags
)
404 z
= (vm_zone_t
) kmalloc(sizeof (struct vm_zone
), M_ZONE
, M_NOWAIT
);
409 if (zinitna(z
, name
, size
, nentries
, flags
& ~ZONE_DESTROYABLE
) == 0) {
414 if (flags
& ZONE_DESTROYABLE
)
415 z
->zflags
|= ZONE_DESTROYABLE
;
421 * Initialize a zone before the system is fully up. This routine should
422 * only be called before full VM startup.
424 * Called from the low level boot code only.
427 zbootinit(vm_zone_t z
, char *name
, size_t size
, void *item
, long nitems
)
431 spin_init(&z
->zspin
, "zbootinit");
432 lockinit(&z
->zgetlk
, "zgetlk", 0, LK_CANRECURSE
);
433 bzero(z
->zpcpu
, sizeof(z
->zpcpu
));
437 z
->zflags
= ZONE_BOOT
;
443 bzero(item
, (size_t)nitems
* z
->zsize
);
445 for (i
= 0; i
< nitems
; i
++) {
446 ((void **)item
)[0] = z
->zitems
;
448 ((void **)item
)[1] = (void *)ZENTRY_FREE
;
451 item
= (uint8_t *)item
+ z
->zsize
;
453 z
->zfreecnt
= nitems
;
457 lwkt_gettoken(&vm_token
);
458 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
459 lwkt_reltoken(&vm_token
);
463 * Release all resources owned by zone created with zinit().
468 zdestroy(vm_zone_t z
)
473 panic("zdestroy: null zone");
474 if ((z
->zflags
& ZONE_DESTROYABLE
) == 0)
475 panic("zdestroy: undestroyable zone");
477 lwkt_gettoken(&vm_token
);
478 LIST_REMOVE(z
, zlink
);
479 lwkt_reltoken(&vm_token
);
482 * Release virtual mappings, physical memory and update sysctl stats.
484 KKASSERT((z
->zflags
& ZONE_INTERRUPT
) == 0);
485 for (i
= 0; i
< z
->zkmcur
; i
++) {
486 kmem_free(kernel_map
, z
->zkmvec
[i
],
487 (size_t)z
->zalloc
* PAGE_SIZE
);
488 atomic_subtract_long(&zone_kern_pages
, z
->zalloc
);
490 if (z
->zkmvec
!= NULL
)
491 kfree(z
->zkmvec
, M_ZONE
);
493 spin_uninit(&z
->zspin
);
499 * void *zalloc(vm_zone_t zone) --
500 * Returns an item from a specified zone. May not be called from a
501 * FAST interrupt or IPI function.
503 * void zfree(vm_zone_t zone, void *item) --
504 * Frees an item back to a specified zone. May not be called from a
505 * FAST interrupt or IPI function.
509 * Internal zone routine. Not to be called from external (non vm_zone) code.
511 * This function may return NULL.
516 zget(vm_zone_t z
, int *tryagainp
)
518 vm_page_t pgs
[ZONE_MAXPGLOAD
];
530 panic("zget: null zone");
533 * We need an encompassing per-zone lock for zget() refills.
535 * Without this we wind up locking on the vm_map inside kmem_alloc*()
536 * prior to any entries actually being added to the zone, potentially
537 * exhausting the per-cpu cache of vm_map_entry's when multiple threads
538 * are blocked on the same lock on the same cpu.
540 if ((z
->zflags
& ZONE_INTERRUPT
) == 0) {
541 if (lockmgr(&z
->zgetlk
, LK_EXCLUSIVE
| LK_SLEEPFAIL
)) {
547 if (z
->zflags
& ZONE_INTERRUPT
) {
549 * Interrupt zones do not mess with the kernel_map, they
550 * simply populate an existing mapping.
552 * First allocate as many pages as we can, stopping at
553 * our limit or if the page allocation fails. Try to
554 * avoid exhausting the interrupt free minimum by backing
555 * off to normal page allocations after a certain point.
557 for (i
= 0; i
< ZONE_MAXPGLOAD
&& i
< z
->zalloc
; ++i
) {
559 m
= vm_page_alloc(NULL
,
560 mycpu
->gd_rand_incr
++,
563 m
= vm_page_alloc(NULL
,
564 mycpu
->gd_rand_incr
++,
575 * Account for the pages.
577 * NOTE! Do not allow overlap with a prior page as it
578 * may still be undergoing allocation on another
581 spin_lock(&z
->zspin
);
582 noffset
= (size_t)z
->zpagecount
* PAGE_SIZE
;
583 /* noffset -= noffset % z->zsize; */
584 savezpc
= z
->zpagecount
;
587 * Track total memory use and kmem offset.
589 if (z
->zpagecount
+ nalloc
> z
->zpagemax
)
590 z
->zpagecount
= z
->zpagemax
;
592 z
->zpagecount
+= nalloc
;
594 item
= (char *)z
->zkva
+ noffset
;
595 npages
= z
->zpagecount
- savezpc
;
596 nitems
= ((size_t)(savezpc
+ npages
) * PAGE_SIZE
- noffset
) /
598 atomic_add_long(&zone_kmem_pages
, npages
);
599 spin_unlock(&z
->zspin
);
602 * Enter the pages into the reserved KVA space.
604 for (i
= 0; i
< npages
; ++i
) {
608 KKASSERT(m
->queue
== PQ_NONE
);
609 m
->valid
= VM_PAGE_BITS_ALL
;
613 zkva
= z
->zkva
+ (size_t)(savezpc
+ i
) * PAGE_SIZE
;
614 pmap_kenter(zkva
, VM_PAGE_TO_PHYS(m
));
615 bzero((void *)zkva
, PAGE_SIZE
);
617 for (i
= npages
; i
< nalloc
; ++i
) {
621 } else if (z
->zflags
& ZONE_SPECIAL
) {
623 * The special zone is the one used for vm_map_entry_t's.
624 * We have to avoid an infinite recursion in
625 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
626 * instead. The map entries are pre-reserved by the kernel
627 * by vm_map_entry_reserve_cpu_init().
629 nbytes
= (size_t)z
->zalloc
* PAGE_SIZE
;
630 z
->zpagecount
+= z
->zalloc
; /* Track total memory use */
632 item
= (void *)kmem_alloc3(kernel_map
, nbytes
,
633 VM_SUBSYS_ZALLOC
, KM_KRESERVE
);
635 /* note: z might be modified due to blocking */
637 atomic_add_long(&zone_kern_pages
, z
->zalloc
);
642 nitems
= nbytes
/ z
->zsize
;
645 * Otherwise allocate KVA from the kernel_map.
647 nbytes
= (size_t)z
->zalloc
* PAGE_SIZE
;
648 z
->zpagecount
+= z
->zalloc
; /* Track total memory use */
650 item
= (void *)kmem_alloc3(kernel_map
, nbytes
,
651 VM_SUBSYS_ZALLOC
, 0);
653 /* note: z might be modified due to blocking */
655 atomic_add_long(&zone_kern_pages
, z
->zalloc
);
658 if (z
->zflags
& ZONE_DESTROYABLE
) {
659 if (z
->zkmcur
== z
->zkmmax
) {
661 z
->zkmmax
==0 ? 1 : z
->zkmmax
*2;
662 z
->zkmvec
= krealloc(z
->zkmvec
,
663 z
->zkmmax
* sizeof(z
->zkmvec
[0]),
666 z
->zkmvec
[z
->zkmcur
++] = (vm_offset_t
)item
;
671 nitems
= nbytes
/ z
->zsize
;
675 * Enter any new pages into the pool, reserving one, or get the
676 * item from the existing pool.
678 spin_lock(&z
->zspin
);
682 * The zone code may need to allocate kernel memory, which can
683 * recurse zget() infinitely if we do not handle it properly.
684 * We deal with this by directly repopulating the pcpu vm_map_entry
687 if (nitems
> 1 && (z
->zflags
& ZONE_SPECIAL
)) {
688 struct globaldata
*gd
= mycpu
;
689 vm_map_entry_t entry
;
692 * Make sure we have enough structures in gd_vme_base to handle
693 * the reservation request.
695 * The critical section protects access to the per-cpu gd.
698 while (gd
->gd_vme_avail
< 2 && nitems
> 1) {
700 MAPENT_FREELIST(entry
) = gd
->gd_vme_base
;
701 gd
->gd_vme_base
= entry
;
702 atomic_add_int(&gd
->gd_vme_avail
, 1);
703 item
= (uint8_t *)item
+ z
->zsize
;
711 * Enter pages into the pool saving one for immediate
715 for (i
= 0; i
< nitems
; i
++) {
716 ((void **)item
)[0] = z
->zitems
;
718 ((void **)item
)[1] = (void *)ZENTRY_FREE
;
721 item
= (uint8_t *)item
+ z
->zsize
;
723 z
->zfreecnt
+= nitems
;
725 } else if (z
->zfreecnt
> 0) {
727 * Get an item from the existing pool.
730 z
->zitems
= ((void **)item
)[0];
732 if (((void **)item
)[1] != (void *)ZENTRY_FREE
)
733 zerror(ZONE_ERROR_NOTFREE
);
734 ((void **) item
)[1] = NULL
;
740 * No items available.
744 spin_unlock(&z
->zspin
);
747 * Release the per-zone global lock after the items have been
748 * added. Any other threads blocked in zget()'s zgetlk will
749 * then retry rather than potentially exhaust the per-cpu cache
750 * of vm_map_entry structures doing their own kmem_alloc() calls,
751 * or allocating excessive amounts of space unnecessarily.
753 if ((z
->zflags
& ZONE_INTERRUPT
) == 0)
754 lockmgr(&z
->zgetlk
, LK_RELEASE
);
763 sysctl_vm_zone(SYSCTL_HANDLER_ARGS
)
770 ksnprintf(tmpbuf
, sizeof(tmpbuf
),
771 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
772 error
= SYSCTL_OUT(req
, tmpbuf
, strlen(tmpbuf
));
776 lwkt_gettoken(&vm_token
);
777 LIST_FOREACH(curzone
, &zlist
, zlink
) {
785 len
= strlen(curzone
->zname
);
786 if (len
>= (sizeof(tmpname
) - 1))
787 len
= (sizeof(tmpname
) - 1);
788 for(i
= 0; i
< sizeof(tmpname
) - 1; i
++)
791 memcpy(tmpname
, curzone
->zname
, len
);
794 if (curzone
== LIST_FIRST(&zlist
)) {
798 freecnt
= curzone
->zfreecnt
;
799 znalloc
= curzone
->znalloc
;
800 for (n
= 0; n
< ncpus
; ++n
) {
801 freecnt
+= curzone
->zpcpu
[n
].zfreecnt
;
802 znalloc
+= curzone
->zpcpu
[n
].znalloc
;
805 ksnprintf(tmpbuf
+ offset
, sizeof(tmpbuf
) - offset
,
806 "%s %6.6lu, %8.8lu, %6.6lu, %6.6lu, %8.8lu\n",
807 tmpname
, curzone
->zsize
, curzone
->zmax
,
808 (curzone
->ztotal
- freecnt
),
811 len
= strlen((char *)tmpbuf
);
812 if (LIST_NEXT(curzone
, zlink
) == NULL
)
815 error
= SYSCTL_OUT(req
, tmpbuf
, len
);
820 lwkt_reltoken(&vm_token
);
824 #if defined(INVARIANTS)
835 case ZONE_ERROR_INVALID
:
836 msg
= "zone: invalid zone";
838 case ZONE_ERROR_NOTFREE
:
839 msg
= "zone: entry not free";
841 case ZONE_ERROR_ALREADYFREE
:
842 msg
= "zone: freeing free entry";
845 msg
= "zone: invalid error";
852 SYSCTL_OID(_vm
, OID_AUTO
, zone
, CTLTYPE_STRING
|CTLFLAG_RD
, \
853 NULL
, 0, sysctl_vm_zone
, "A", "Zone Info");
855 SYSCTL_LONG(_vm
, OID_AUTO
, zone_kmem_pages
,
856 CTLFLAG_RD
, &zone_kmem_pages
, 0, "Number of interrupt safe pages allocated by zone");
857 SYSCTL_LONG(_vm
, OID_AUTO
, zone_burst
,
858 CTLFLAG_RW
, &zone_burst
, 0, "Burst from depot to pcpu cache");
859 SYSCTL_LONG(_vm
, OID_AUTO
, zone_kmem_kvaspace
,
860 CTLFLAG_RD
, &zone_kmem_kvaspace
, 0, "KVA space allocated by zone");
861 SYSCTL_LONG(_vm
, OID_AUTO
, zone_kern_pages
,
862 CTLFLAG_RD
, &zone_kern_pages
, 0, "Number of non-interrupt safe pages allocated by zone");