2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
64 * GENERAL RULES ON VM_PAGE MANIPULATION
66 * - A page queue lock is required when adding or removing a page from a
67 * page queue regardless of other locks or the busy state of a page.
69 * * In general, no thread besides the page daemon can acquire or
70 * hold more than one page queue lock at a time.
72 * * The page daemon can acquire and hold any pair of page queue
75 * - The object lock is required when inserting or removing
76 * pages from an object (vm_page_insert() or vm_page_remove()).
81 * Resident memory management module.
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
89 #include <sys/param.h>
90 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/limits.h>
94 #include <sys/linker.h>
95 #include <sys/malloc.h>
97 #include <sys/msgbuf.h>
98 #include <sys/mutex.h>
100 #include <sys/rwlock.h>
101 #include <sys/sbuf.h>
103 #include <sys/sysctl.h>
104 #include <sys/vmmeter.h>
105 #include <sys/vnode.h>
109 #include <vm/vm_param.h>
110 #include <vm/vm_kern.h>
111 #include <vm/vm_object.h>
112 #include <vm/vm_page.h>
113 #include <vm/vm_pageout.h>
114 #include <vm/vm_pager.h>
115 #include <vm/vm_phys.h>
116 #include <vm/vm_radix.h>
117 #include <vm/vm_reserv.h>
118 #include <vm/vm_extern.h>
120 #include <vm/uma_int.h>
122 #include <machine/md_var.h>
125 * Associated with page of user-allocatable memory is a
129 struct vm_domain vm_dom
[MAXMEMDOM
];
130 struct mtx_padalign vm_page_queue_free_mtx
;
132 struct mtx_padalign pa_lock
[PA_LOCK_COUNT
];
134 vm_page_t vm_page_array
;
135 long vm_page_array_size
;
137 int vm_page_zero_count
;
139 static int boot_pages
= UMA_BOOT_PAGES
;
140 SYSCTL_INT(_vm
, OID_AUTO
, boot_pages
, CTLFLAG_RDTUN
| CTLFLAG_NOFETCH
,
142 "number of pages allocated for bootstrapping the VM system");
144 static int pa_tryrelock_restart
;
145 SYSCTL_INT(_vm
, OID_AUTO
, tryrelock_restart
, CTLFLAG_RD
,
146 &pa_tryrelock_restart
, 0, "Number of tryrelock restarts");
148 static TAILQ_HEAD(, vm_page
) blacklist_head
;
149 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS
);
150 SYSCTL_PROC(_vm
, OID_AUTO
, page_blacklist
, CTLTYPE_STRING
| CTLFLAG_RD
|
151 CTLFLAG_MPSAFE
, NULL
, 0, sysctl_vm_page_blacklist
, "A", "Blacklist pages");
153 /* Is the page daemon waiting for free pages? */
154 static int vm_pageout_pages_needed
;
156 static uma_zone_t fakepg_zone
;
158 static struct vnode
*vm_page_alloc_init(vm_page_t m
);
159 static void vm_page_cache_turn_free(vm_page_t m
);
160 static void vm_page_clear_dirty_mask(vm_page_t m
, vm_page_bits_t pagebits
);
161 static void vm_page_enqueue(uint8_t queue
, vm_page_t m
);
162 static void vm_page_free_wakeup(void);
163 static void vm_page_init_fakepg(void *dummy
);
164 static int vm_page_insert_after(vm_page_t m
, vm_object_t object
,
165 vm_pindex_t pindex
, vm_page_t mpred
);
166 static void vm_page_insert_radixdone(vm_page_t m
, vm_object_t object
,
168 static int vm_page_reclaim_run(int req_class
, u_long npages
, vm_page_t m_run
,
171 SYSINIT(vm_page
, SI_SUB_VM
, SI_ORDER_SECOND
, vm_page_init_fakepg
, NULL
);
174 vm_page_init_fakepg(void *dummy
)
177 fakepg_zone
= uma_zcreate("fakepg", sizeof(struct vm_page
), NULL
, NULL
,
178 NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
| UMA_ZONE_VM
);
181 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
182 #if PAGE_SIZE == 32768
184 CTASSERT(sizeof(u_long
) >= 8);
189 * Try to acquire a physical address lock while a pmap is locked. If we
190 * fail to trylock we unlock and lock the pmap directly and cache the
191 * locked pa in *locked. The caller should then restart their loop in case
192 * the virtual to physical mapping has changed.
195 vm_page_pa_tryrelock(pmap_t pmap
, vm_paddr_t pa
, vm_paddr_t
*locked
)
202 PA_LOCK_ASSERT(lockpa
, MA_OWNED
);
203 if (PA_LOCKPTR(pa
) == PA_LOCKPTR(lockpa
))
210 atomic_add_int(&pa_tryrelock_restart
, 1);
219 * Sets the page size, perhaps based upon the memory
220 * size. Must be called before any use of page-size
221 * dependent functions.
224 vm_set_page_size(void)
226 if (vm_cnt
.v_page_size
== 0)
227 vm_cnt
.v_page_size
= PAGE_SIZE
;
228 if (((vm_cnt
.v_page_size
- 1) & vm_cnt
.v_page_size
) != 0)
229 panic("vm_set_page_size: page size not a power of two");
233 * vm_page_blacklist_next:
235 * Find the next entry in the provided string of blacklist
236 * addresses. Entries are separated by space, comma, or newline.
237 * If an invalid integer is encountered then the rest of the
238 * string is skipped. Updates the list pointer to the next
239 * character, or NULL if the string is exhausted or invalid.
242 vm_page_blacklist_next(char **list
, char *end
)
247 if (list
== NULL
|| *list
== NULL
)
255 * If there's no end pointer then the buffer is coming from
256 * the kenv and we know it's null-terminated.
259 end
= *list
+ strlen(*list
);
261 /* Ensure that strtoq() won't walk off the end */
263 if (*end
== '\n' || *end
== ' ' || *end
== ',')
266 printf("Blacklist not terminated, skipping\n");
272 for (pos
= *list
; *pos
!= '\0'; pos
= cp
) {
273 bad
= strtoq(pos
, &cp
, 0);
274 if (*cp
== '\0' || *cp
== ' ' || *cp
== ',' || *cp
== '\n') {
283 if (*cp
== '\0' || ++cp
>= end
)
287 return (trunc_page(bad
));
289 printf("Garbage in RAM blacklist, skipping\n");
295 * vm_page_blacklist_check:
297 * Iterate through the provided string of blacklist addresses, pulling
298 * each entry out of the physical allocator free list and putting it
299 * onto a list for reporting via the vm.page_blacklist sysctl.
302 vm_page_blacklist_check(char *list
, char *end
)
310 while (next
!= NULL
) {
311 if ((pa
= vm_page_blacklist_next(&next
, end
)) == 0)
313 m
= vm_phys_paddr_to_vm_page(pa
);
316 mtx_lock(&vm_page_queue_free_mtx
);
317 ret
= vm_phys_unfree_page(m
);
318 mtx_unlock(&vm_page_queue_free_mtx
);
320 TAILQ_INSERT_TAIL(&blacklist_head
, m
, listq
);
322 printf("Skipping page with pa 0x%jx\n",
329 * vm_page_blacklist_load:
331 * Search for a special module named "ram_blacklist". It'll be a
332 * plain text file provided by the user via the loader directive
336 vm_page_blacklist_load(char **list
, char **end
)
345 mod
= preload_search_by_type("ram_blacklist");
347 ptr
= preload_fetch_addr(mod
);
348 len
= preload_fetch_size(mod
);
359 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS
)
366 error
= sysctl_wire_old_buffer(req
, 0);
369 sbuf_new_for_sysctl(&sbuf
, NULL
, 128, req
);
370 TAILQ_FOREACH(m
, &blacklist_head
, listq
) {
371 sbuf_printf(&sbuf
, "%s%#jx", first
? "" : ",",
372 (uintmax_t)m
->phys_addr
);
375 error
= sbuf_finish(&sbuf
);
381 vm_page_domain_init(struct vm_domain
*vmd
)
383 struct vm_pagequeue
*pq
;
386 *__DECONST(char **, &vmd
->vmd_pagequeues
[PQ_INACTIVE
].pq_name
) =
387 "vm inactive pagequeue";
388 *__DECONST(u_int
**, &vmd
->vmd_pagequeues
[PQ_INACTIVE
].pq_vcnt
) =
389 &vm_cnt
.v_inactive_count
;
390 *__DECONST(char **, &vmd
->vmd_pagequeues
[PQ_ACTIVE
].pq_name
) =
391 "vm active pagequeue";
392 *__DECONST(u_int
**, &vmd
->vmd_pagequeues
[PQ_ACTIVE
].pq_vcnt
) =
393 &vm_cnt
.v_active_count
;
394 vmd
->vmd_page_count
= 0;
395 vmd
->vmd_free_count
= 0;
397 vmd
->vmd_oom
= FALSE
;
399 for (i
= 0; i
< PQ_COUNT
; i
++) {
400 pq
= &vmd
->vmd_pagequeues
[i
];
401 TAILQ_INIT(&pq
->pq_pl
);
402 mtx_init(&pq
->pq_mutex
, pq
->pq_name
, "vm pagequeue",
403 MTX_DEF
| MTX_DUPOK
);
410 * Initializes the resident memory module.
412 * Allocates memory for the page cells, and
413 * for the object/offset-to-page hash table headers.
414 * Each page cell is initialized and placed on the free list.
417 vm_page_startup(vm_offset_t vaddr
)
420 vm_paddr_t page_range
;
425 char *list
, *listend
;
427 vm_paddr_t biggestsize
;
428 vm_paddr_t low_water
, high_water
;
434 vaddr
= round_page(vaddr
);
436 for (i
= 0; phys_avail
[i
+ 1]; i
+= 2) {
437 phys_avail
[i
] = round_page(phys_avail
[i
]);
438 phys_avail
[i
+ 1] = trunc_page(phys_avail
[i
+ 1]);
441 low_water
= phys_avail
[0];
442 high_water
= phys_avail
[1];
444 for (i
= 0; i
< vm_phys_nsegs
; i
++) {
445 if (vm_phys_segs
[i
].start
< low_water
)
446 low_water
= vm_phys_segs
[i
].start
;
447 if (vm_phys_segs
[i
].end
> high_water
)
448 high_water
= vm_phys_segs
[i
].end
;
450 for (i
= 0; phys_avail
[i
+ 1]; i
+= 2) {
451 vm_paddr_t size
= phys_avail
[i
+ 1] - phys_avail
[i
];
453 if (size
> biggestsize
) {
457 if (phys_avail
[i
] < low_water
)
458 low_water
= phys_avail
[i
];
459 if (phys_avail
[i
+ 1] > high_water
)
460 high_water
= phys_avail
[i
+ 1];
463 end
= phys_avail
[biggestone
+1];
466 * Initialize the page and queue locks.
468 mtx_init(&vm_page_queue_free_mtx
, "vm page free queue", NULL
, MTX_DEF
);
469 for (i
= 0; i
< PA_LOCK_COUNT
; i
++)
470 mtx_init(&pa_lock
[i
], "vm page", NULL
, MTX_DEF
);
471 for (i
= 0; i
< vm_ndomains
; i
++)
472 vm_page_domain_init(&vm_dom
[i
]);
475 * Almost all of the pages needed for boot strapping UMA are used
476 * for zone structures, so if the number of CPUs results in those
477 * structures taking more than one page each, we set aside more pages
478 * in proportion to the zone structure size.
480 pages_per_zone
= howmany(sizeof(struct uma_zone
) +
481 sizeof(struct uma_cache
) * (mp_maxid
+ 1), UMA_SLAB_SIZE
);
482 if (pages_per_zone
> 1) {
483 /* Reserve more pages so that we don't run out. */
484 boot_pages
= UMA_BOOT_PAGES_ZONES
* pages_per_zone
;
488 * Allocate memory for use when boot strapping the kernel memory
491 * CTFLAG_RDTUN doesn't work during the early boot process, so we must
492 * manually fetch the value.
494 TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages
);
495 new_end
= end
- (boot_pages
* UMA_SLAB_SIZE
);
496 new_end
= trunc_page(new_end
);
497 mapped
= pmap_map(&vaddr
, new_end
, end
,
498 VM_PROT_READ
| VM_PROT_WRITE
);
499 bzero((void *)mapped
, end
- new_end
);
500 uma_startup((void *)mapped
, boot_pages
);
502 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
503 defined(__i386__) || defined(__mips__)
505 * Allocate a bitmap to indicate that a random physical page
506 * needs to be included in a minidump.
508 * The amd64 port needs this to indicate which direct map pages
509 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
511 * However, i386 still needs this workspace internally within the
512 * minidump code. In theory, they are not needed on i386, but are
513 * included should the sf_buf code decide to use them.
516 for (i
= 0; dump_avail
[i
+ 1] != 0; i
+= 2)
517 if (dump_avail
[i
+ 1] > last_pa
)
518 last_pa
= dump_avail
[i
+ 1];
519 page_range
= last_pa
/ PAGE_SIZE
;
520 vm_page_dump_size
= round_page(roundup2(page_range
, NBBY
) / NBBY
);
521 new_end
-= vm_page_dump_size
;
522 vm_page_dump
= (void *)(uintptr_t)pmap_map(&vaddr
, new_end
,
523 new_end
+ vm_page_dump_size
, VM_PROT_READ
| VM_PROT_WRITE
);
524 bzero((void *)vm_page_dump
, vm_page_dump_size
);
528 * Request that the physical pages underlying the message buffer be
529 * included in a crash dump. Since the message buffer is accessed
530 * through the direct map, they are not automatically included.
532 pa
= DMAP_TO_PHYS((vm_offset_t
)msgbufp
->msg_ptr
);
533 last_pa
= pa
+ round_page(msgbufsize
);
534 while (pa
< last_pa
) {
540 * Compute the number of pages of memory that will be available for
541 * use (taking into account the overhead of a page structure per
544 first_page
= low_water
/ PAGE_SIZE
;
545 #ifdef VM_PHYSSEG_SPARSE
547 for (i
= 0; i
< vm_phys_nsegs
; i
++) {
548 page_range
+= atop(vm_phys_segs
[i
].end
-
549 vm_phys_segs
[i
].start
);
551 for (i
= 0; phys_avail
[i
+ 1] != 0; i
+= 2)
552 page_range
+= atop(phys_avail
[i
+ 1] - phys_avail
[i
]);
553 #elif defined(VM_PHYSSEG_DENSE)
554 page_range
= high_water
/ PAGE_SIZE
- first_page
;
556 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
561 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
566 * Initialize the mem entry structures now, and put them in the free
569 new_end
= trunc_page(end
- page_range
* sizeof(struct vm_page
));
570 mapped
= pmap_map(&vaddr
, new_end
, end
,
571 VM_PROT_READ
| VM_PROT_WRITE
);
572 vm_page_array
= (vm_page_t
) mapped
;
573 #if VM_NRESERVLEVEL > 0
575 * Allocate memory for the reservation management system's data
578 new_end
= vm_reserv_startup(&vaddr
, new_end
, high_water
);
580 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
582 * pmap_map on arm64, amd64, and mips can come out of the direct-map,
583 * not kvm like i386, so the pages must be tracked for a crashdump to
584 * include this data. This includes the vm_page_array and the early
585 * UMA bootstrap pages.
587 for (pa
= new_end
; pa
< phys_avail
[biggestone
+ 1]; pa
+= PAGE_SIZE
)
590 phys_avail
[biggestone
+ 1] = new_end
;
593 * Add physical memory segments corresponding to the available
596 for (i
= 0; phys_avail
[i
+ 1] != 0; i
+= 2)
597 vm_phys_add_seg(phys_avail
[i
], phys_avail
[i
+ 1]);
600 * Clear all of the page structures
602 bzero((caddr_t
) vm_page_array
, page_range
* sizeof(struct vm_page
));
603 for (i
= 0; i
< page_range
; i
++)
604 vm_page_array
[i
].order
= VM_NFREEORDER
;
605 vm_page_array_size
= page_range
;
608 * Initialize the physical memory allocator.
613 * Add every available physical page that is not blacklisted to
616 vm_cnt
.v_page_count
= 0;
617 vm_cnt
.v_free_count
= 0;
618 for (i
= 0; phys_avail
[i
+ 1] != 0; i
+= 2) {
620 last_pa
= phys_avail
[i
+ 1];
621 while (pa
< last_pa
) {
622 vm_phys_add_page(pa
);
627 TAILQ_INIT(&blacklist_head
);
628 vm_page_blacklist_load(&list
, &listend
);
629 vm_page_blacklist_check(list
, listend
);
631 list
= kern_getenv("vm.blacklist");
632 vm_page_blacklist_check(list
, NULL
);
635 #if VM_NRESERVLEVEL > 0
637 * Initialize the reservation management system.
645 vm_page_reference(vm_page_t m
)
648 vm_page_aflag_set(m
, PGA_REFERENCED
);
652 * vm_page_busy_downgrade:
654 * Downgrade an exclusive busy page into a single shared busy page.
657 vm_page_busy_downgrade(vm_page_t m
)
661 vm_page_assert_xbusied(m
);
665 x
&= VPB_BIT_WAITERS
;
666 if (atomic_cmpset_rel_int(&m
->busy_lock
,
667 VPB_SINGLE_EXCLUSIVER
| x
, VPB_SHARERS_WORD(1) | x
))
675 * Return a positive value if the page is shared busied, 0 otherwise.
678 vm_page_sbusied(vm_page_t m
)
683 return ((x
& VPB_BIT_SHARED
) != 0 && x
!= VPB_UNBUSIED
);
689 * Shared unbusy a page.
692 vm_page_sunbusy(vm_page_t m
)
696 vm_page_assert_sbusied(m
);
700 if (VPB_SHARERS(x
) > 1) {
701 if (atomic_cmpset_int(&m
->busy_lock
, x
,
706 if ((x
& VPB_BIT_WAITERS
) == 0) {
707 KASSERT(x
== VPB_SHARERS_WORD(1),
708 ("vm_page_sunbusy: invalid lock state"));
709 if (atomic_cmpset_int(&m
->busy_lock
,
710 VPB_SHARERS_WORD(1), VPB_UNBUSIED
))
714 KASSERT(x
== (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS
),
715 ("vm_page_sunbusy: invalid lock state for waiters"));
718 if (!atomic_cmpset_int(&m
->busy_lock
, x
, VPB_UNBUSIED
)) {
729 * vm_page_busy_sleep:
731 * Sleep and release the page lock, using the page pointer as wchan.
732 * This is used to implement the hard-path of busying mechanism.
734 * The given page must be locked.
737 vm_page_busy_sleep(vm_page_t m
, const char *wmesg
)
741 vm_page_lock_assert(m
, MA_OWNED
);
744 if (x
== VPB_UNBUSIED
) {
748 if ((x
& VPB_BIT_WAITERS
) == 0 &&
749 !atomic_cmpset_int(&m
->busy_lock
, x
, x
| VPB_BIT_WAITERS
)) {
753 msleep(m
, vm_page_lockptr(m
), PVM
| PDROP
, wmesg
, 0);
759 * Try to shared busy a page.
760 * If the operation succeeds 1 is returned otherwise 0.
761 * The operation never sleeps.
764 vm_page_trysbusy(vm_page_t m
)
770 if ((x
& VPB_BIT_SHARED
) == 0)
772 if (atomic_cmpset_acq_int(&m
->busy_lock
, x
, x
+ VPB_ONE_SHARER
))
778 vm_page_xunbusy_locked(vm_page_t m
)
781 vm_page_assert_xbusied(m
);
782 vm_page_assert_locked(m
);
784 atomic_store_rel_int(&m
->busy_lock
, VPB_UNBUSIED
);
785 /* There is a waiter, do wakeup() instead of vm_page_flash(). */
790 vm_page_xunbusy_maybelocked(vm_page_t m
)
794 vm_page_assert_xbusied(m
);
797 * Fast path for unbusy. If it succeeds, we know that there
798 * are no waiters, so we do not need a wakeup.
800 if (atomic_cmpset_rel_int(&m
->busy_lock
, VPB_SINGLE_EXCLUSIVER
,
804 lockacq
= !mtx_owned(vm_page_lockptr(m
));
807 vm_page_xunbusy_locked(m
);
813 * vm_page_xunbusy_hard:
815 * Called after the first try the exclusive unbusy of a page failed.
816 * It is assumed that the waiters bit is on.
819 vm_page_xunbusy_hard(vm_page_t m
)
822 vm_page_assert_xbusied(m
);
825 vm_page_xunbusy_locked(m
);
832 * Wakeup anyone waiting for the page.
833 * The ownership bits do not change.
835 * The given page must be locked.
838 vm_page_flash(vm_page_t m
)
842 vm_page_lock_assert(m
, MA_OWNED
);
846 if ((x
& VPB_BIT_WAITERS
) == 0)
848 if (atomic_cmpset_int(&m
->busy_lock
, x
,
849 x
& (~VPB_BIT_WAITERS
)))
856 * Keep page from being freed by the page daemon
857 * much of the same effect as wiring, except much lower
858 * overhead and should be used only for *very* temporary
859 * holding ("wiring").
862 vm_page_hold(vm_page_t mem
)
865 vm_page_lock_assert(mem
, MA_OWNED
);
870 vm_page_unhold(vm_page_t mem
)
873 vm_page_lock_assert(mem
, MA_OWNED
);
874 KASSERT(mem
->hold_count
>= 1, ("vm_page_unhold: hold count < 0!!!"));
876 if (mem
->hold_count
== 0 && (mem
->flags
& PG_UNHOLDFREE
) != 0)
877 vm_page_free_toq(mem
);
881 * vm_page_unhold_pages:
883 * Unhold each of the pages that is referenced by the given array.
886 vm_page_unhold_pages(vm_page_t
*ma
, int count
)
888 struct mtx
*mtx
, *new_mtx
;
891 for (; count
!= 0; count
--) {
893 * Avoid releasing and reacquiring the same page lock.
895 new_mtx
= vm_page_lockptr(*ma
);
896 if (mtx
!= new_mtx
) {
910 PHYS_TO_VM_PAGE(vm_paddr_t pa
)
914 #ifdef VM_PHYSSEG_SPARSE
915 m
= vm_phys_paddr_to_vm_page(pa
);
917 m
= vm_phys_fictitious_to_vm_page(pa
);
919 #elif defined(VM_PHYSSEG_DENSE)
923 if (pi
>= first_page
&& (pi
- first_page
) < vm_page_array_size
) {
924 m
= &vm_page_array
[pi
- first_page
];
927 return (vm_phys_fictitious_to_vm_page(pa
));
929 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
936 * Create a fictitious page with the specified physical address and
937 * memory attribute. The memory attribute is the only the machine-
938 * dependent aspect of a fictitious page that must be initialized.
941 vm_page_getfake(vm_paddr_t paddr
, vm_memattr_t memattr
)
945 m
= uma_zalloc(fakepg_zone
, M_WAITOK
| M_ZERO
);
946 vm_page_initfake(m
, paddr
, memattr
);
951 vm_page_initfake(vm_page_t m
, vm_paddr_t paddr
, vm_memattr_t memattr
)
954 if ((m
->flags
& PG_FICTITIOUS
) != 0) {
956 * The page's memattr might have changed since the
957 * previous initialization. Update the pmap to the
962 m
->phys_addr
= paddr
;
964 /* Fictitious pages don't use "segind". */
965 m
->flags
= PG_FICTITIOUS
;
966 /* Fictitious pages don't use "order" or "pool". */
967 m
->oflags
= VPO_UNMANAGED
;
968 m
->busy_lock
= VPB_SINGLE_EXCLUSIVER
;
972 pmap_page_set_memattr(m
, memattr
);
978 * Release a fictitious page.
981 vm_page_putfake(vm_page_t m
)
984 KASSERT((m
->oflags
& VPO_UNMANAGED
) != 0, ("managed %p", m
));
985 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0,
986 ("vm_page_putfake: bad page %p", m
));
987 uma_zfree(fakepg_zone
, m
);
991 * vm_page_updatefake:
993 * Update the given fictitious page to the specified physical address and
997 vm_page_updatefake(vm_page_t m
, vm_paddr_t paddr
, vm_memattr_t memattr
)
1000 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0,
1001 ("vm_page_updatefake: bad page %p", m
));
1002 m
->phys_addr
= paddr
;
1003 pmap_page_set_memattr(m
, memattr
);
1012 vm_page_free(vm_page_t m
)
1015 m
->flags
&= ~PG_ZERO
;
1016 vm_page_free_toq(m
);
1020 * vm_page_free_zero:
1022 * Free a page to the zerod-pages queue
1025 vm_page_free_zero(vm_page_t m
)
1028 m
->flags
|= PG_ZERO
;
1029 vm_page_free_toq(m
);
1033 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
1034 * array which was optionally read ahead or behind.
1037 vm_page_readahead_finish(vm_page_t m
)
1040 /* We shouldn't put invalid pages on queues. */
1041 KASSERT(m
->valid
!= 0, ("%s: %p is invalid", __func__
, m
));
1044 * Since the page is not the actually needed one, whether it should
1045 * be activated or deactivated is not obvious. Empirical results
1046 * have shown that deactivating the page is usually the best choice,
1047 * unless the page is wanted by another thread.
1050 if ((m
->busy_lock
& VPB_BIT_WAITERS
) != 0)
1051 vm_page_activate(m
);
1053 vm_page_deactivate(m
);
1059 * vm_page_sleep_if_busy:
1061 * Sleep and release the page queues lock if the page is busied.
1062 * Returns TRUE if the thread slept.
1064 * The given page must be unlocked and object containing it must
1068 vm_page_sleep_if_busy(vm_page_t m
, const char *msg
)
1072 vm_page_lock_assert(m
, MA_NOTOWNED
);
1073 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
1075 if (vm_page_busied(m
)) {
1077 * The page-specific object must be cached because page
1078 * identity can change during the sleep, causing the
1079 * re-lock of a different object.
1080 * It is assumed that a reference to the object is already
1081 * held by the callers.
1085 VM_OBJECT_WUNLOCK(obj
);
1086 vm_page_busy_sleep(m
, msg
);
1087 VM_OBJECT_WLOCK(obj
);
1094 * vm_page_dirty_KBI: [ internal use only ]
1096 * Set all bits in the page's dirty field.
1098 * The object containing the specified page must be locked if the
1099 * call is made from the machine-independent layer.
1101 * See vm_page_clear_dirty_mask().
1103 * This function should only be called by vm_page_dirty().
1106 vm_page_dirty_KBI(vm_page_t m
)
1109 /* These assertions refer to this operation by its public name. */
1110 KASSERT((m
->flags
& PG_CACHED
) == 0,
1111 ("vm_page_dirty: page in cache!"));
1112 KASSERT(m
->valid
== VM_PAGE_BITS_ALL
,
1113 ("vm_page_dirty: page is invalid!"));
1114 m
->dirty
= VM_PAGE_BITS_ALL
;
1118 * vm_page_insert: [ internal use only ]
1120 * Inserts the given mem entry into the object and object list.
1122 * The object must be locked.
1125 vm_page_insert(vm_page_t m
, vm_object_t object
, vm_pindex_t pindex
)
1129 VM_OBJECT_ASSERT_WLOCKED(object
);
1130 mpred
= vm_radix_lookup_le(&object
->rtree
, pindex
);
1131 return (vm_page_insert_after(m
, object
, pindex
, mpred
));
1135 * vm_page_insert_after:
1137 * Inserts the page "m" into the specified object at offset "pindex".
1139 * The page "mpred" must immediately precede the offset "pindex" within
1140 * the specified object.
1142 * The object must be locked.
1145 vm_page_insert_after(vm_page_t m
, vm_object_t object
, vm_pindex_t pindex
,
1150 VM_OBJECT_ASSERT_WLOCKED(object
);
1151 KASSERT(m
->object
== NULL
,
1152 ("vm_page_insert_after: page already inserted"));
1153 if (mpred
!= NULL
) {
1154 KASSERT(mpred
->object
== object
,
1155 ("vm_page_insert_after: object doesn't contain mpred"));
1156 KASSERT(mpred
->pindex
< pindex
,
1157 ("vm_page_insert_after: mpred doesn't precede pindex"));
1158 msucc
= TAILQ_NEXT(mpred
, listq
);
1160 msucc
= TAILQ_FIRST(&object
->memq
);
1162 KASSERT(msucc
->pindex
> pindex
,
1163 ("vm_page_insert_after: msucc doesn't succeed pindex"));
1166 * Record the object/offset pair in this page
1172 * Now link into the object's ordered list of backed pages.
1174 if (vm_radix_insert(&object
->rtree
, m
)) {
1179 vm_page_insert_radixdone(m
, object
, mpred
);
1184 * vm_page_insert_radixdone:
1186 * Complete page "m" insertion into the specified object after the
1187 * radix trie hooking.
1189 * The page "mpred" must precede the offset "m->pindex" within the
1192 * The object must be locked.
1195 vm_page_insert_radixdone(vm_page_t m
, vm_object_t object
, vm_page_t mpred
)
1198 VM_OBJECT_ASSERT_WLOCKED(object
);
1199 KASSERT(object
!= NULL
&& m
->object
== object
,
1200 ("vm_page_insert_radixdone: page %p has inconsistent object", m
));
1201 if (mpred
!= NULL
) {
1202 KASSERT(mpred
->object
== object
,
1203 ("vm_page_insert_after: object doesn't contain mpred"));
1204 KASSERT(mpred
->pindex
< m
->pindex
,
1205 ("vm_page_insert_after: mpred doesn't precede pindex"));
1209 TAILQ_INSERT_AFTER(&object
->memq
, mpred
, m
, listq
);
1211 TAILQ_INSERT_HEAD(&object
->memq
, m
, listq
);
1214 * Show that the object has one more resident page.
1216 object
->resident_page_count
++;
1219 * Hold the vnode until the last page is released.
1221 if (object
->resident_page_count
== 1 && object
->type
== OBJT_VNODE
)
1222 vhold(object
->handle
);
1225 * Since we are inserting a new and possibly dirty page,
1226 * update the object's OBJ_MIGHTBEDIRTY flag.
1228 if (pmap_page_is_write_mapped(m
))
1229 vm_object_set_writeable_dirty(object
);
1235 * Removes the given mem entry from the object/offset-page
1236 * table and the object page list, but do not invalidate/terminate
1237 * the backing store.
1239 * The object must be locked. The page must be locked if it is managed.
1242 vm_page_remove(vm_page_t m
)
1246 if ((m
->oflags
& VPO_UNMANAGED
) == 0)
1247 vm_page_assert_locked(m
);
1248 if ((object
= m
->object
) == NULL
)
1250 VM_OBJECT_ASSERT_WLOCKED(object
);
1251 if (vm_page_xbusied(m
))
1252 vm_page_xunbusy_maybelocked(m
);
1255 * Now remove from the object's list of backed pages.
1257 vm_radix_remove(&object
->rtree
, m
->pindex
);
1258 TAILQ_REMOVE(&object
->memq
, m
, listq
);
1261 * And show that the object has one fewer resident page.
1263 object
->resident_page_count
--;
1266 * The vnode may now be recycled.
1268 if (object
->resident_page_count
== 0 && object
->type
== OBJT_VNODE
)
1269 vdrop(object
->handle
);
1277 * Returns the page associated with the object/offset
1278 * pair specified; if none is found, NULL is returned.
1280 * The object must be locked.
1283 vm_page_lookup(vm_object_t object
, vm_pindex_t pindex
)
1286 VM_OBJECT_ASSERT_LOCKED(object
);
1287 return (vm_radix_lookup(&object
->rtree
, pindex
));
1291 * vm_page_find_least:
1293 * Returns the page associated with the object with least pindex
1294 * greater than or equal to the parameter pindex, or NULL.
1296 * The object must be locked.
1299 vm_page_find_least(vm_object_t object
, vm_pindex_t pindex
)
1303 VM_OBJECT_ASSERT_LOCKED(object
);
1304 if ((m
= TAILQ_FIRST(&object
->memq
)) != NULL
&& m
->pindex
< pindex
)
1305 m
= vm_radix_lookup_ge(&object
->rtree
, pindex
);
1310 * Returns the given page's successor (by pindex) within the object if it is
1311 * resident; if none is found, NULL is returned.
1313 * The object must be locked.
1316 vm_page_next(vm_page_t m
)
1320 VM_OBJECT_ASSERT_LOCKED(m
->object
);
1321 if ((next
= TAILQ_NEXT(m
, listq
)) != NULL
&&
1322 next
->pindex
!= m
->pindex
+ 1)
1328 * Returns the given page's predecessor (by pindex) within the object if it is
1329 * resident; if none is found, NULL is returned.
1331 * The object must be locked.
1334 vm_page_prev(vm_page_t m
)
1338 VM_OBJECT_ASSERT_LOCKED(m
->object
);
1339 if ((prev
= TAILQ_PREV(m
, pglist
, listq
)) != NULL
&&
1340 prev
->pindex
!= m
->pindex
- 1)
1346 * Uses the page mnew as a replacement for an existing page at index
1347 * pindex which must be already present in the object.
1349 * The existing page must not be on a paging queue.
1352 vm_page_replace(vm_page_t mnew
, vm_object_t object
, vm_pindex_t pindex
)
1356 VM_OBJECT_ASSERT_WLOCKED(object
);
1357 KASSERT(mnew
->object
== NULL
,
1358 ("vm_page_replace: page already in object"));
1361 * This function mostly follows vm_page_insert() and
1362 * vm_page_remove() without the radix, object count and vnode
1363 * dance. Double check such functions for more comments.
1366 mnew
->object
= object
;
1367 mnew
->pindex
= pindex
;
1368 mold
= vm_radix_replace(&object
->rtree
, mnew
);
1369 KASSERT(mold
->queue
== PQ_NONE
,
1370 ("vm_page_replace: mold is on a paging queue"));
1372 /* Keep the resident page list in sorted order. */
1373 TAILQ_INSERT_AFTER(&object
->memq
, mold
, mnew
, listq
);
1374 TAILQ_REMOVE(&object
->memq
, mold
, listq
);
1376 mold
->object
= NULL
;
1377 vm_page_xunbusy_maybelocked(mold
);
1380 * The object's resident_page_count does not change because we have
1381 * swapped one page for another, but OBJ_MIGHTBEDIRTY.
1383 if (pmap_page_is_write_mapped(mnew
))
1384 vm_object_set_writeable_dirty(object
);
1391 * Move the given memory entry from its
1392 * current object to the specified target object/offset.
1394 * Note: swap associated with the page must be invalidated by the move. We
1395 * have to do this for several reasons: (1) we aren't freeing the
1396 * page, (2) we are dirtying the page, (3) the VM system is probably
1397 * moving the page from object A to B, and will then later move
1398 * the backing store from A to B and we can't have a conflict.
1400 * Note: we *always* dirty the page. It is necessary both for the
1401 * fact that we moved it, and because we may be invalidating
1402 * swap. If the page is on the cache, we have to deactivate it
1403 * or vm_page_dirty() will panic. Dirty pages are not allowed
1406 * The objects must be locked.
1409 vm_page_rename(vm_page_t m
, vm_object_t new_object
, vm_pindex_t new_pindex
)
1414 VM_OBJECT_ASSERT_WLOCKED(new_object
);
1416 mpred
= vm_radix_lookup_le(&new_object
->rtree
, new_pindex
);
1417 KASSERT(mpred
== NULL
|| mpred
->pindex
!= new_pindex
,
1418 ("vm_page_rename: pindex already renamed"));
1421 * Create a custom version of vm_page_insert() which does not depend
1422 * by m_prev and can cheat on the implementation aspects of the
1426 m
->pindex
= new_pindex
;
1427 if (vm_radix_insert(&new_object
->rtree
, m
)) {
1433 * The operation cannot fail anymore. The removal must happen before
1434 * the listq iterator is tainted.
1440 /* Return back to the new pindex to complete vm_page_insert(). */
1441 m
->pindex
= new_pindex
;
1442 m
->object
= new_object
;
1444 vm_page_insert_radixdone(m
, new_object
, mpred
);
1450 * Convert all of the given object's cached pages that have a
1451 * pindex within the given range into free pages. If the value
1452 * zero is given for "end", then the range's upper bound is
1453 * infinity. If the given object is backed by a vnode and it
1454 * transitions from having one or more cached pages to none, the
1455 * vnode's hold count is reduced.
1458 vm_page_cache_free(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
1463 mtx_lock(&vm_page_queue_free_mtx
);
1464 if (__predict_false(vm_radix_is_empty(&object
->cache
))) {
1465 mtx_unlock(&vm_page_queue_free_mtx
);
1468 while ((m
= vm_radix_lookup_ge(&object
->cache
, start
)) != NULL
) {
1469 if (end
!= 0 && m
->pindex
>= end
)
1471 vm_radix_remove(&object
->cache
, m
->pindex
);
1472 vm_page_cache_turn_free(m
);
1474 empty
= vm_radix_is_empty(&object
->cache
);
1475 mtx_unlock(&vm_page_queue_free_mtx
);
1476 if (object
->type
== OBJT_VNODE
&& empty
)
1477 vdrop(object
->handle
);
1481 * Returns the cached page that is associated with the given
1482 * object and offset. If, however, none exists, returns NULL.
1484 * The free page queue must be locked.
1486 static inline vm_page_t
1487 vm_page_cache_lookup(vm_object_t object
, vm_pindex_t pindex
)
1490 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1491 return (vm_radix_lookup(&object
->cache
, pindex
));
1495 * Remove the given cached page from its containing object's
1496 * collection of cached pages.
1498 * The free page queue must be locked.
1501 vm_page_cache_remove(vm_page_t m
)
1504 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1505 KASSERT((m
->flags
& PG_CACHED
) != 0,
1506 ("vm_page_cache_remove: page %p is not cached", m
));
1507 vm_radix_remove(&m
->object
->cache
, m
->pindex
);
1509 vm_cnt
.v_cache_count
--;
1513 * Transfer all of the cached pages with offset greater than or
1514 * equal to 'offidxstart' from the original object's cache to the
1515 * new object's cache. However, any cached pages with offset
1516 * greater than or equal to the new object's size are kept in the
1517 * original object. Initially, the new object's cache must be
1518 * empty. Offset 'offidxstart' in the original object must
1519 * correspond to offset zero in the new object.
1521 * The new object must be locked.
1524 vm_page_cache_transfer(vm_object_t orig_object
, vm_pindex_t offidxstart
,
1525 vm_object_t new_object
)
1530 * Insertion into an object's collection of cached pages
1531 * requires the object to be locked. In contrast, removal does
1534 VM_OBJECT_ASSERT_WLOCKED(new_object
);
1535 KASSERT(vm_radix_is_empty(&new_object
->cache
),
1536 ("vm_page_cache_transfer: object %p has cached pages",
1538 mtx_lock(&vm_page_queue_free_mtx
);
1539 while ((m
= vm_radix_lookup_ge(&orig_object
->cache
,
1540 offidxstart
)) != NULL
) {
1542 * Transfer all of the pages with offset greater than or
1543 * equal to 'offidxstart' from the original object's
1544 * cache to the new object's cache.
1546 if ((m
->pindex
- offidxstart
) >= new_object
->size
)
1548 vm_radix_remove(&orig_object
->cache
, m
->pindex
);
1549 /* Update the page's object and offset. */
1550 m
->object
= new_object
;
1551 m
->pindex
-= offidxstart
;
1552 if (vm_radix_insert(&new_object
->cache
, m
))
1553 vm_page_cache_turn_free(m
);
1555 mtx_unlock(&vm_page_queue_free_mtx
);
1559 * Returns TRUE if a cached page is associated with the given object and
1560 * offset, and FALSE otherwise.
1562 * The object must be locked.
1565 vm_page_is_cached(vm_object_t object
, vm_pindex_t pindex
)
1570 * Insertion into an object's collection of cached pages requires the
1571 * object to be locked. Therefore, if the object is locked and the
1572 * object's collection is empty, there is no need to acquire the free
1573 * page queues lock in order to prove that the specified page doesn't
1576 VM_OBJECT_ASSERT_WLOCKED(object
);
1577 if (__predict_true(vm_object_cache_is_empty(object
)))
1579 mtx_lock(&vm_page_queue_free_mtx
);
1580 m
= vm_page_cache_lookup(object
, pindex
);
1581 mtx_unlock(&vm_page_queue_free_mtx
);
1588 * Allocate and return a page that is associated with the specified
1589 * object and offset pair. By default, this page is exclusive busied.
1591 * The caller must always specify an allocation class.
1593 * allocation classes:
1594 * VM_ALLOC_NORMAL normal process request
1595 * VM_ALLOC_SYSTEM system *really* needs a page
1596 * VM_ALLOC_INTERRUPT interrupt time request
1598 * optional allocation flags:
1599 * VM_ALLOC_COUNT(number) the number of additional pages that the caller
1600 * intends to allocate
1601 * VM_ALLOC_IFCACHED return page only if it is cached
1602 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page
1604 * VM_ALLOC_NOBUSY do not exclusive busy the page
1605 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
1606 * VM_ALLOC_NOOBJ page is not associated with an object and
1607 * should not be exclusive busy
1608 * VM_ALLOC_SBUSY shared busy the allocated page
1609 * VM_ALLOC_WIRED wire the allocated page
1610 * VM_ALLOC_ZERO prefer a zeroed page
1612 * This routine may not sleep.
1615 vm_page_alloc(vm_object_t object
, vm_pindex_t pindex
, int req
)
1617 struct vnode
*vp
= NULL
;
1618 vm_object_t m_object
;
1620 int flags
, req_class
;
1622 mpred
= 0; /* XXX: pacify gcc */
1623 KASSERT((object
!= NULL
) == ((req
& VM_ALLOC_NOOBJ
) == 0) &&
1624 (object
!= NULL
|| (req
& VM_ALLOC_SBUSY
) == 0) &&
1625 ((req
& (VM_ALLOC_NOBUSY
| VM_ALLOC_SBUSY
)) !=
1626 (VM_ALLOC_NOBUSY
| VM_ALLOC_SBUSY
)),
1627 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object
,
1630 VM_OBJECT_ASSERT_WLOCKED(object
);
1632 req_class
= req
& VM_ALLOC_CLASS_MASK
;
1635 * The page daemon is allowed to dig deeper into the free page list.
1637 if (curproc
== pageproc
&& req_class
!= VM_ALLOC_INTERRUPT
)
1638 req_class
= VM_ALLOC_SYSTEM
;
1640 if (object
!= NULL
) {
1641 mpred
= vm_radix_lookup_le(&object
->rtree
, pindex
);
1642 KASSERT(mpred
== NULL
|| mpred
->pindex
!= pindex
,
1643 ("vm_page_alloc: pindex already allocated"));
1647 * The page allocation request can came from consumers which already
1648 * hold the free page queue mutex, like vm_page_insert() in
1651 mtx_lock_flags(&vm_page_queue_free_mtx
, MTX_RECURSE
);
1652 if (vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
> vm_cnt
.v_free_reserved
||
1653 (req_class
== VM_ALLOC_SYSTEM
&&
1654 vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
> vm_cnt
.v_interrupt_free_min
) ||
1655 (req_class
== VM_ALLOC_INTERRUPT
&&
1656 vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
> 0)) {
1658 * Allocate from the free queue if the number of free pages
1659 * exceeds the minimum for the request class.
1661 if (object
!= NULL
&&
1662 (m
= vm_page_cache_lookup(object
, pindex
)) != NULL
) {
1663 if ((req
& VM_ALLOC_IFNOTCACHED
) != 0) {
1664 mtx_unlock(&vm_page_queue_free_mtx
);
1667 if (vm_phys_unfree_page(m
))
1668 vm_phys_set_pool(VM_FREEPOOL_DEFAULT
, m
, 0);
1669 #if VM_NRESERVLEVEL > 0
1670 else if (!vm_reserv_reactivate_page(m
))
1674 panic("vm_page_alloc: cache page %p is missing"
1675 " from the free queue", m
);
1676 } else if ((req
& VM_ALLOC_IFCACHED
) != 0) {
1677 mtx_unlock(&vm_page_queue_free_mtx
);
1679 #if VM_NRESERVLEVEL > 0
1680 } else if (object
== NULL
|| (object
->flags
& (OBJ_COLORED
|
1681 OBJ_FICTITIOUS
)) != OBJ_COLORED
|| (m
=
1682 vm_reserv_alloc_page(object
, pindex
, mpred
)) == NULL
) {
1686 m
= vm_phys_alloc_pages(object
!= NULL
?
1687 VM_FREEPOOL_DEFAULT
: VM_FREEPOOL_DIRECT
, 0);
1688 #if VM_NRESERVLEVEL > 0
1689 if (m
== NULL
&& vm_reserv_reclaim_inactive()) {
1690 m
= vm_phys_alloc_pages(object
!= NULL
?
1691 VM_FREEPOOL_DEFAULT
: VM_FREEPOOL_DIRECT
,
1698 * Not allocatable, give up.
1700 mtx_unlock(&vm_page_queue_free_mtx
);
1701 atomic_add_int(&vm_pageout_deficit
,
1702 max((u_int
)req
>> VM_ALLOC_COUNT_SHIFT
, 1));
1703 pagedaemon_wakeup();
1708 * At this point we had better have found a good page.
1710 KASSERT(m
!= NULL
, ("vm_page_alloc: missing page"));
1711 KASSERT(m
->queue
== PQ_NONE
,
1712 ("vm_page_alloc: page %p has unexpected queue %d", m
, m
->queue
));
1713 KASSERT(m
->wire_count
== 0, ("vm_page_alloc: page %p is wired", m
));
1714 KASSERT(m
->hold_count
== 0, ("vm_page_alloc: page %p is held", m
));
1715 KASSERT(!vm_page_sbusied(m
),
1716 ("vm_page_alloc: page %p is busy", m
));
1717 KASSERT(m
->dirty
== 0, ("vm_page_alloc: page %p is dirty", m
));
1718 KASSERT(pmap_page_get_memattr(m
) == VM_MEMATTR_DEFAULT
,
1719 ("vm_page_alloc: page %p has unexpected memattr %d", m
,
1720 pmap_page_get_memattr(m
)));
1721 if ((m
->flags
& PG_CACHED
) != 0) {
1722 KASSERT((m
->flags
& PG_ZERO
) == 0,
1723 ("vm_page_alloc: cached page %p is PG_ZERO", m
));
1724 KASSERT(m
->valid
!= 0,
1725 ("vm_page_alloc: cached page %p is invalid", m
));
1726 if (m
->object
== object
&& m
->pindex
== pindex
)
1727 vm_cnt
.v_reactivated
++;
1730 m_object
= m
->object
;
1731 vm_page_cache_remove(m
);
1732 if (m_object
->type
== OBJT_VNODE
&&
1733 vm_object_cache_is_empty(m_object
))
1734 vp
= m_object
->handle
;
1736 KASSERT(m
->valid
== 0,
1737 ("vm_page_alloc: free page %p is valid", m
));
1738 vm_phys_freecnt_adj(m
, -1);
1739 if ((m
->flags
& PG_ZERO
) != 0)
1740 vm_page_zero_count
--;
1742 mtx_unlock(&vm_page_queue_free_mtx
);
1745 * Initialize the page. Only the PG_ZERO flag is inherited.
1748 if ((req
& VM_ALLOC_ZERO
) != 0)
1751 if ((req
& VM_ALLOC_NODUMP
) != 0)
1755 m
->oflags
= object
== NULL
|| (object
->flags
& OBJ_UNMANAGED
) != 0 ?
1757 m
->busy_lock
= VPB_UNBUSIED
;
1758 if ((req
& (VM_ALLOC_NOBUSY
| VM_ALLOC_NOOBJ
| VM_ALLOC_SBUSY
)) == 0)
1759 m
->busy_lock
= VPB_SINGLE_EXCLUSIVER
;
1760 if ((req
& VM_ALLOC_SBUSY
) != 0)
1761 m
->busy_lock
= VPB_SHARERS_WORD(1);
1762 if (req
& VM_ALLOC_WIRED
) {
1764 * The page lock is not required for wiring a page until that
1765 * page is inserted into the object.
1767 atomic_add_int(&vm_cnt
.v_wire_count
, 1);
1772 if (object
!= NULL
) {
1773 if (vm_page_insert_after(m
, object
, pindex
, mpred
)) {
1774 /* See the comment below about hold count. */
1777 pagedaemon_wakeup();
1778 if (req
& VM_ALLOC_WIRED
) {
1779 atomic_subtract_int(&vm_cnt
.v_wire_count
, 1);
1783 m
->oflags
= VPO_UNMANAGED
;
1784 m
->busy_lock
= VPB_UNBUSIED
;
1789 /* Ignore device objects; the pager sets "memattr" for them. */
1790 if (object
->memattr
!= VM_MEMATTR_DEFAULT
&&
1791 (object
->flags
& OBJ_FICTITIOUS
) == 0)
1792 pmap_page_set_memattr(m
, object
->memattr
);
1797 * The following call to vdrop() must come after the above call
1798 * to vm_page_insert() in case both affect the same object and
1799 * vnode. Otherwise, the affected vnode's hold count could
1800 * temporarily become zero.
1806 * Don't wakeup too often - wakeup the pageout daemon when
1807 * we would be nearly out of memory.
1809 if (vm_paging_needed())
1810 pagedaemon_wakeup();
1816 vm_page_alloc_contig_vdrop(struct spglist
*lst
)
1819 while (!SLIST_EMPTY(lst
)) {
1820 vdrop((struct vnode
*)SLIST_FIRST(lst
)-> plinks
.s
.pv
);
1821 SLIST_REMOVE_HEAD(lst
, plinks
.s
.ss
);
1826 * vm_page_alloc_contig:
1828 * Allocate a contiguous set of physical pages of the given size "npages"
1829 * from the free lists. All of the physical pages must be at or above
1830 * the given physical address "low" and below the given physical address
1831 * "high". The given value "alignment" determines the alignment of the
1832 * first physical page in the set. If the given value "boundary" is
1833 * non-zero, then the set of physical pages cannot cross any physical
1834 * address boundary that is a multiple of that value. Both "alignment"
1835 * and "boundary" must be a power of two.
1837 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1838 * then the memory attribute setting for the physical pages is configured
1839 * to the object's memory attribute setting. Otherwise, the memory
1840 * attribute setting for the physical pages is configured to "memattr",
1841 * overriding the object's memory attribute setting. However, if the
1842 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1843 * memory attribute setting for the physical pages cannot be configured
1844 * to VM_MEMATTR_DEFAULT.
1846 * The caller must always specify an allocation class.
1848 * allocation classes:
1849 * VM_ALLOC_NORMAL normal process request
1850 * VM_ALLOC_SYSTEM system *really* needs a page
1851 * VM_ALLOC_INTERRUPT interrupt time request
1853 * optional allocation flags:
1854 * VM_ALLOC_NOBUSY do not exclusive busy the page
1855 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
1856 * VM_ALLOC_NOOBJ page is not associated with an object and
1857 * should not be exclusive busy
1858 * VM_ALLOC_SBUSY shared busy the allocated page
1859 * VM_ALLOC_WIRED wire the allocated page
1860 * VM_ALLOC_ZERO prefer a zeroed page
1862 * This routine may not sleep.
1865 vm_page_alloc_contig(vm_object_t object
, vm_pindex_t pindex
, int req
,
1866 u_long npages
, vm_paddr_t low
, vm_paddr_t high
, u_long alignment
,
1867 vm_paddr_t boundary
, vm_memattr_t memattr
)
1870 struct spglist deferred_vdrop_list
;
1871 vm_page_t m
, m_tmp
, m_ret
;
1875 KASSERT((object
!= NULL
) == ((req
& VM_ALLOC_NOOBJ
) == 0) &&
1876 (object
!= NULL
|| (req
& VM_ALLOC_SBUSY
) == 0) &&
1877 ((req
& (VM_ALLOC_NOBUSY
| VM_ALLOC_SBUSY
)) !=
1878 (VM_ALLOC_NOBUSY
| VM_ALLOC_SBUSY
)),
1879 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object
,
1881 if (object
!= NULL
) {
1882 VM_OBJECT_ASSERT_WLOCKED(object
);
1883 KASSERT(object
->type
== OBJT_PHYS
,
1884 ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1887 KASSERT(npages
> 0, ("vm_page_alloc_contig: npages is zero"));
1888 req_class
= req
& VM_ALLOC_CLASS_MASK
;
1891 * The page daemon is allowed to dig deeper into the free page list.
1893 if (curproc
== pageproc
&& req_class
!= VM_ALLOC_INTERRUPT
)
1894 req_class
= VM_ALLOC_SYSTEM
;
1896 SLIST_INIT(&deferred_vdrop_list
);
1897 mtx_lock(&vm_page_queue_free_mtx
);
1898 if (vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
>= npages
+
1899 vm_cnt
.v_free_reserved
|| (req_class
== VM_ALLOC_SYSTEM
&&
1900 vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
>= npages
+
1901 vm_cnt
.v_interrupt_free_min
) || (req_class
== VM_ALLOC_INTERRUPT
&&
1902 vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
>= npages
)) {
1903 #if VM_NRESERVLEVEL > 0
1905 if (object
== NULL
|| (object
->flags
& OBJ_COLORED
) == 0 ||
1906 (m_ret
= vm_reserv_alloc_contig(object
, pindex
, npages
,
1907 low
, high
, alignment
, boundary
)) == NULL
)
1909 m_ret
= vm_phys_alloc_contig(npages
, low
, high
,
1910 alignment
, boundary
);
1912 mtx_unlock(&vm_page_queue_free_mtx
);
1913 atomic_add_int(&vm_pageout_deficit
, npages
);
1914 pagedaemon_wakeup();
1918 for (m
= m_ret
; m
< &m_ret
[npages
]; m
++) {
1919 drop
= vm_page_alloc_init(m
);
1922 * Enqueue the vnode for deferred vdrop().
1924 m
->plinks
.s
.pv
= drop
;
1925 SLIST_INSERT_HEAD(&deferred_vdrop_list
, m
,
1930 #if VM_NRESERVLEVEL > 0
1931 if (vm_reserv_reclaim_contig(npages
, low
, high
, alignment
,
1936 mtx_unlock(&vm_page_queue_free_mtx
);
1941 * Initialize the pages. Only the PG_ZERO flag is inherited.
1944 if ((req
& VM_ALLOC_ZERO
) != 0)
1946 if ((req
& VM_ALLOC_NODUMP
) != 0)
1948 if ((req
& VM_ALLOC_WIRED
) != 0)
1949 atomic_add_int(&vm_cnt
.v_wire_count
, npages
);
1950 if (object
!= NULL
) {
1951 if (object
->memattr
!= VM_MEMATTR_DEFAULT
&&
1952 memattr
== VM_MEMATTR_DEFAULT
)
1953 memattr
= object
->memattr
;
1955 for (m
= m_ret
; m
< &m_ret
[npages
]; m
++) {
1957 m
->flags
= (m
->flags
| PG_NODUMP
) & flags
;
1958 m
->busy_lock
= VPB_UNBUSIED
;
1959 if (object
!= NULL
) {
1960 if ((req
& (VM_ALLOC_NOBUSY
| VM_ALLOC_SBUSY
)) == 0)
1961 m
->busy_lock
= VPB_SINGLE_EXCLUSIVER
;
1962 if ((req
& VM_ALLOC_SBUSY
) != 0)
1963 m
->busy_lock
= VPB_SHARERS_WORD(1);
1965 if ((req
& VM_ALLOC_WIRED
) != 0)
1967 /* Unmanaged pages don't use "act_count". */
1968 m
->oflags
= VPO_UNMANAGED
;
1969 if (object
!= NULL
) {
1970 if (vm_page_insert(m
, object
, pindex
)) {
1971 vm_page_alloc_contig_vdrop(
1972 &deferred_vdrop_list
);
1973 if (vm_paging_needed())
1974 pagedaemon_wakeup();
1975 if ((req
& VM_ALLOC_WIRED
) != 0)
1976 atomic_subtract_int(&vm_cnt
.v_wire_count
,
1978 for (m_tmp
= m
, m
= m_ret
;
1979 m
< &m_ret
[npages
]; m
++) {
1980 if ((req
& VM_ALLOC_WIRED
) != 0)
1984 m
->oflags
|= VPO_UNMANAGED
;
1986 m
->busy_lock
= VPB_UNBUSIED
;
1993 if (memattr
!= VM_MEMATTR_DEFAULT
)
1994 pmap_page_set_memattr(m
, memattr
);
1997 vm_page_alloc_contig_vdrop(&deferred_vdrop_list
);
1998 if (vm_paging_needed())
1999 pagedaemon_wakeup();
2004 * Initialize a page that has been freshly dequeued from a freelist.
2005 * The caller has to drop the vnode returned, if it is not NULL.
2007 * This function may only be used to initialize unmanaged pages.
2009 * To be called with vm_page_queue_free_mtx held.
2011 static struct vnode
*
2012 vm_page_alloc_init(vm_page_t m
)
2015 vm_object_t m_object
;
2017 KASSERT(m
->queue
== PQ_NONE
,
2018 ("vm_page_alloc_init: page %p has unexpected queue %d",
2020 KASSERT(m
->wire_count
== 0,
2021 ("vm_page_alloc_init: page %p is wired", m
));
2022 KASSERT(m
->hold_count
== 0,
2023 ("vm_page_alloc_init: page %p is held", m
));
2024 KASSERT(!vm_page_sbusied(m
),
2025 ("vm_page_alloc_init: page %p is busy", m
));
2026 KASSERT(m
->dirty
== 0,
2027 ("vm_page_alloc_init: page %p is dirty", m
));
2028 KASSERT(pmap_page_get_memattr(m
) == VM_MEMATTR_DEFAULT
,
2029 ("vm_page_alloc_init: page %p has unexpected memattr %d",
2030 m
, pmap_page_get_memattr(m
)));
2031 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
2033 if ((m
->flags
& PG_CACHED
) != 0) {
2034 KASSERT((m
->flags
& PG_ZERO
) == 0,
2035 ("vm_page_alloc_init: cached page %p is PG_ZERO", m
));
2037 m_object
= m
->object
;
2038 vm_page_cache_remove(m
);
2039 if (m_object
->type
== OBJT_VNODE
&&
2040 vm_object_cache_is_empty(m_object
))
2041 drop
= m_object
->handle
;
2043 KASSERT(m
->valid
== 0,
2044 ("vm_page_alloc_init: free page %p is valid", m
));
2045 vm_phys_freecnt_adj(m
, -1);
2046 if ((m
->flags
& PG_ZERO
) != 0)
2047 vm_page_zero_count
--;
2053 * vm_page_alloc_freelist:
2055 * Allocate a physical page from the specified free page list.
2057 * The caller must always specify an allocation class.
2059 * allocation classes:
2060 * VM_ALLOC_NORMAL normal process request
2061 * VM_ALLOC_SYSTEM system *really* needs a page
2062 * VM_ALLOC_INTERRUPT interrupt time request
2064 * optional allocation flags:
2065 * VM_ALLOC_COUNT(number) the number of additional pages that the caller
2066 * intends to allocate
2067 * VM_ALLOC_WIRED wire the allocated page
2068 * VM_ALLOC_ZERO prefer a zeroed page
2070 * This routine may not sleep.
2073 vm_page_alloc_freelist(int flind
, int req
)
2080 req_class
= req
& VM_ALLOC_CLASS_MASK
;
2083 * The page daemon is allowed to dig deeper into the free page list.
2085 if (curproc
== pageproc
&& req_class
!= VM_ALLOC_INTERRUPT
)
2086 req_class
= VM_ALLOC_SYSTEM
;
2089 * Do not allocate reserved pages unless the req has asked for it.
2091 mtx_lock_flags(&vm_page_queue_free_mtx
, MTX_RECURSE
);
2092 if (vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
> vm_cnt
.v_free_reserved
||
2093 (req_class
== VM_ALLOC_SYSTEM
&&
2094 vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
> vm_cnt
.v_interrupt_free_min
) ||
2095 (req_class
== VM_ALLOC_INTERRUPT
&&
2096 vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
> 0))
2097 m
= vm_phys_alloc_freelist_pages(flind
, VM_FREEPOOL_DIRECT
, 0);
2099 mtx_unlock(&vm_page_queue_free_mtx
);
2100 atomic_add_int(&vm_pageout_deficit
,
2101 max((u_int
)req
>> VM_ALLOC_COUNT_SHIFT
, 1));
2102 pagedaemon_wakeup();
2106 mtx_unlock(&vm_page_queue_free_mtx
);
2109 drop
= vm_page_alloc_init(m
);
2110 mtx_unlock(&vm_page_queue_free_mtx
);
2113 * Initialize the page. Only the PG_ZERO flag is inherited.
2117 if ((req
& VM_ALLOC_ZERO
) != 0)
2120 if ((req
& VM_ALLOC_WIRED
) != 0) {
2122 * The page lock is not required for wiring a page that does
2123 * not belong to an object.
2125 atomic_add_int(&vm_cnt
.v_wire_count
, 1);
2128 /* Unmanaged pages don't use "act_count". */
2129 m
->oflags
= VPO_UNMANAGED
;
2132 if (vm_paging_needed())
2133 pagedaemon_wakeup();
2137 #define VPSC_ANY 0 /* No restrictions. */
2138 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */
2139 #define VPSC_NOSUPER 2 /* Skip superpages. */
2142 * vm_page_scan_contig:
2144 * Scan vm_page_array[] between the specified entries "m_start" and
2145 * "m_end" for a run of contiguous physical pages that satisfy the
2146 * specified conditions, and return the lowest page in the run. The
2147 * specified "alignment" determines the alignment of the lowest physical
2148 * page in the run. If the specified "boundary" is non-zero, then the
2149 * run of physical pages cannot span a physical address that is a
2150 * multiple of "boundary".
2152 * "m_end" is never dereferenced, so it need not point to a vm_page
2153 * structure within vm_page_array[].
2155 * "npages" must be greater than zero. "m_start" and "m_end" must not
2156 * span a hole (or discontiguity) in the physical address space. Both
2157 * "alignment" and "boundary" must be a power of two.
2160 vm_page_scan_contig(u_long npages
, vm_page_t m_start
, vm_page_t m_end
,
2161 u_long alignment
, vm_paddr_t boundary
, int options
)
2163 struct mtx
*m_mtx
, *new_mtx
;
2167 #if VM_NRESERVLEVEL > 0
2170 int m_inc
, order
, run_ext
, run_len
;
2172 KASSERT(npages
> 0, ("npages is 0"));
2173 KASSERT(powerof2(alignment
), ("alignment is not a power of 2"));
2174 KASSERT(powerof2(boundary
), ("boundary is not a power of 2"));
2178 for (m
= m_start
; m
< m_end
&& run_len
< npages
; m
+= m_inc
) {
2179 KASSERT((m
->flags
& (PG_FICTITIOUS
| PG_MARKER
)) == 0,
2180 ("page %p is PG_FICTITIOUS or PG_MARKER", m
));
2183 * If the current page would be the start of a run, check its
2184 * physical address against the end, alignment, and boundary
2185 * conditions. If it doesn't satisfy these conditions, either
2186 * terminate the scan or advance to the next page that
2187 * satisfies the failed condition.
2190 KASSERT(m_run
== NULL
, ("m_run != NULL"));
2191 if (m
+ npages
> m_end
)
2193 pa
= VM_PAGE_TO_PHYS(m
);
2194 if ((pa
& (alignment
- 1)) != 0) {
2195 m_inc
= atop(roundup2(pa
, alignment
) - pa
);
2198 if (rounddown2(pa
^ (pa
+ ptoa(npages
) - 1),
2200 m_inc
= atop(roundup2(pa
, boundary
) - pa
);
2204 KASSERT(m_run
!= NULL
, ("m_run == NULL"));
2207 * Avoid releasing and reacquiring the same page lock.
2209 new_mtx
= vm_page_lockptr(m
);
2210 if (m_mtx
!= new_mtx
) {
2218 if (m
->wire_count
!= 0 || m
->hold_count
!= 0)
2220 #if VM_NRESERVLEVEL > 0
2221 else if ((level
= vm_reserv_level(m
)) >= 0 &&
2222 (options
& VPSC_NORESERV
) != 0) {
2224 /* Advance to the end of the reservation. */
2225 pa
= VM_PAGE_TO_PHYS(m
);
2226 m_inc
= atop(roundup2(pa
+ 1, vm_reserv_size(level
)) -
2230 else if ((object
= m
->object
) != NULL
) {
2232 * The page is considered eligible for relocation if
2233 * and only if it could be laundered or reclaimed by
2236 if (!VM_OBJECT_TRYRLOCK(object
)) {
2238 VM_OBJECT_RLOCK(object
);
2240 if (m
->object
!= object
) {
2242 * The page may have been freed.
2244 VM_OBJECT_RUNLOCK(object
);
2246 } else if (m
->wire_count
!= 0 ||
2247 m
->hold_count
!= 0) {
2252 KASSERT((m
->flags
& PG_UNHOLDFREE
) == 0,
2253 ("page %p is PG_UNHOLDFREE", m
));
2254 /* Don't care: PG_NODUMP, PG_WINATCFLS, PG_ZERO. */
2255 if (object
->type
!= OBJT_DEFAULT
&&
2256 object
->type
!= OBJT_SWAP
&&
2257 object
->type
!= OBJT_VNODE
)
2259 else if ((m
->flags
& PG_CACHED
) != 0 ||
2260 m
!= vm_page_lookup(object
, m
->pindex
)) {
2262 * The page is cached or recently converted
2263 * from cached to free.
2265 #if VM_NRESERVLEVEL > 0
2268 * The page is reserved. Extend the
2269 * current run by one page.
2274 if ((order
= m
->order
) < VM_NFREEORDER
) {
2276 * The page is enqueued in the
2277 * physical memory allocator's cache/
2278 * free page queues. Moreover, it is
2279 * the first page in a power-of-two-
2280 * sized run of contiguous cache/free
2281 * pages. Add these pages to the end
2282 * of the current run, and jump
2285 run_ext
= 1 << order
;
2289 #if VM_NRESERVLEVEL > 0
2290 } else if ((options
& VPSC_NOSUPER
) != 0 &&
2291 (level
= vm_reserv_level_iffullpop(m
)) >= 0) {
2293 /* Advance to the end of the superpage. */
2294 pa
= VM_PAGE_TO_PHYS(m
);
2295 m_inc
= atop(roundup2(pa
+ 1,
2296 vm_reserv_size(level
)) - pa
);
2298 } else if (object
->memattr
== VM_MEMATTR_DEFAULT
&&
2299 m
->queue
!= PQ_NONE
&& !vm_page_busied(m
)) {
2301 * The page is allocated but eligible for
2302 * relocation. Extend the current run by one
2305 KASSERT(pmap_page_get_memattr(m
) ==
2307 ("page %p has an unexpected memattr", m
));
2308 KASSERT((m
->oflags
& (VPO_SWAPINPROG
|
2309 VPO_SWAPSLEEP
| VPO_UNMANAGED
)) == 0,
2310 ("page %p has unexpected oflags", m
));
2311 /* Don't care: VPO_NOSYNC. */
2316 VM_OBJECT_RUNLOCK(object
);
2317 #if VM_NRESERVLEVEL > 0
2318 } else if (level
>= 0) {
2320 * The page is reserved but not yet allocated. In
2321 * other words, it is still cached or free. Extend
2322 * the current run by one page.
2326 } else if ((order
= m
->order
) < VM_NFREEORDER
) {
2328 * The page is enqueued in the physical memory
2329 * allocator's cache/free page queues. Moreover, it
2330 * is the first page in a power-of-two-sized run of
2331 * contiguous cache/free pages. Add these pages to
2332 * the end of the current run, and jump ahead.
2334 run_ext
= 1 << order
;
2338 * Skip the page for one of the following reasons: (1)
2339 * It is enqueued in the physical memory allocator's
2340 * cache/free page queues. However, it is not the
2341 * first page in a run of contiguous cache/free pages.
2342 * (This case rarely occurs because the scan is
2343 * performed in ascending order.) (2) It is not
2344 * reserved, and it is transitioning from free to
2345 * allocated. (Conversely, the transition from
2346 * allocated to free for managed pages is blocked by
2347 * the page lock.) (3) It is allocated but not
2348 * contained by an object and not wired, e.g.,
2349 * allocated by Xen's balloon driver.
2355 * Extend or reset the current run of pages.
2370 if (run_len
>= npages
)
2376 * vm_page_reclaim_run:
2378 * Try to relocate each of the allocated virtual pages within the
2379 * specified run of physical pages to a new physical address. Free the
2380 * physical pages underlying the relocated virtual pages. A virtual page
2381 * is relocatable if and only if it could be laundered or reclaimed by
2382 * the page daemon. Whenever possible, a virtual page is relocated to a
2383 * physical address above "high".
2385 * Returns 0 if every physical page within the run was already free or
2386 * just freed by a successful relocation. Otherwise, returns a non-zero
2387 * value indicating why the last attempt to relocate a virtual page was
2390 * "req_class" must be an allocation class.
2393 vm_page_reclaim_run(int req_class
, u_long npages
, vm_page_t m_run
,
2396 struct mtx
*m_mtx
, *new_mtx
;
2397 struct spglist free
;
2400 vm_page_t m
, m_end
, m_new
;
2401 int error
, order
, req
;
2403 KASSERT((req_class
& VM_ALLOC_CLASS_MASK
) == req_class
,
2404 ("req_class is not an allocation class"));
2408 m_end
= m_run
+ npages
;
2410 for (; error
== 0 && m
< m_end
; m
++) {
2411 KASSERT((m
->flags
& (PG_FICTITIOUS
| PG_MARKER
)) == 0,
2412 ("page %p is PG_FICTITIOUS or PG_MARKER", m
));
2415 * Avoid releasing and reacquiring the same page lock.
2417 new_mtx
= vm_page_lockptr(m
);
2418 if (m_mtx
!= new_mtx
) {
2425 if (m
->wire_count
!= 0 || m
->hold_count
!= 0)
2427 else if ((object
= m
->object
) != NULL
) {
2429 * The page is relocated if and only if it could be
2430 * laundered or reclaimed by the page daemon.
2432 if (!VM_OBJECT_TRYWLOCK(object
)) {
2434 VM_OBJECT_WLOCK(object
);
2436 if (m
->object
!= object
) {
2438 * The page may have been freed.
2440 VM_OBJECT_WUNLOCK(object
);
2442 } else if (m
->wire_count
!= 0 ||
2443 m
->hold_count
!= 0) {
2448 KASSERT((m
->flags
& PG_UNHOLDFREE
) == 0,
2449 ("page %p is PG_UNHOLDFREE", m
));
2450 /* Don't care: PG_NODUMP, PG_WINATCFLS, PG_ZERO. */
2451 if (object
->type
!= OBJT_DEFAULT
&&
2452 object
->type
!= OBJT_SWAP
&&
2453 object
->type
!= OBJT_VNODE
)
2455 else if ((m
->flags
& PG_CACHED
) != 0 ||
2456 m
!= vm_page_lookup(object
, m
->pindex
)) {
2458 * The page is cached or recently converted
2459 * from cached to free.
2461 VM_OBJECT_WUNLOCK(object
);
2463 } else if (object
->memattr
!= VM_MEMATTR_DEFAULT
)
2465 else if (m
->queue
!= PQ_NONE
&& !vm_page_busied(m
)) {
2466 KASSERT(pmap_page_get_memattr(m
) ==
2468 ("page %p has an unexpected memattr", m
));
2469 KASSERT((m
->oflags
& (VPO_SWAPINPROG
|
2470 VPO_SWAPSLEEP
| VPO_UNMANAGED
)) == 0,
2471 ("page %p has unexpected oflags", m
));
2472 /* Don't care: VPO_NOSYNC. */
2473 if (m
->valid
!= 0) {
2475 * First, try to allocate a new page
2476 * that is above "high". Failing
2477 * that, try to allocate a new page
2478 * that is below "m_run". Allocate
2479 * the new page between the end of
2480 * "m_run" and "high" only as a last
2483 req
= req_class
| VM_ALLOC_NOOBJ
;
2484 if ((m
->flags
& PG_NODUMP
) != 0)
2485 req
|= VM_ALLOC_NODUMP
;
2486 if (trunc_page(high
) !=
2487 ~(vm_paddr_t
)PAGE_MASK
) {
2488 m_new
= vm_page_alloc_contig(
2493 VM_MEMATTR_DEFAULT
);
2496 if (m_new
== NULL
) {
2497 pa
= VM_PAGE_TO_PHYS(m_run
);
2498 m_new
= vm_page_alloc_contig(
2500 0, pa
- 1, PAGE_SIZE
, 0,
2501 VM_MEMATTR_DEFAULT
);
2503 if (m_new
== NULL
) {
2505 m_new
= vm_page_alloc_contig(
2507 pa
, high
, PAGE_SIZE
, 0,
2508 VM_MEMATTR_DEFAULT
);
2510 if (m_new
== NULL
) {
2514 KASSERT(m_new
->wire_count
== 0,
2515 ("page %p is wired", m
));
2518 * Replace "m" with the new page. For
2519 * vm_page_replace(), "m" must be busy
2520 * and dequeued. Finally, change "m"
2521 * as if vm_page_free() was called.
2523 if (object
->ref_count
!= 0)
2525 m_new
->aflags
= m
->aflags
;
2526 KASSERT(m_new
->oflags
== VPO_UNMANAGED
,
2527 ("page %p is managed", m
));
2528 m_new
->oflags
= m
->oflags
& VPO_NOSYNC
;
2529 pmap_copy_page(m
, m_new
);
2530 m_new
->valid
= m
->valid
;
2531 m_new
->dirty
= m
->dirty
;
2532 m
->flags
&= ~PG_ZERO
;
2535 vm_page_replace_checked(m_new
, object
,
2541 * The new page must be deactivated
2542 * before the object is unlocked.
2544 new_mtx
= vm_page_lockptr(m_new
);
2545 if (m_mtx
!= new_mtx
) {
2550 vm_page_deactivate(m_new
);
2552 m
->flags
&= ~PG_ZERO
;
2555 KASSERT(m
->dirty
== 0,
2556 ("page %p is dirty", m
));
2558 SLIST_INSERT_HEAD(&free
, m
, plinks
.s
.ss
);
2562 VM_OBJECT_WUNLOCK(object
);
2565 mtx_lock(&vm_page_queue_free_mtx
);
2567 if (order
< VM_NFREEORDER
) {
2569 * The page is enqueued in the physical memory
2570 * allocator's cache/free page queues.
2571 * Moreover, it is the first page in a power-
2572 * of-two-sized run of contiguous cache/free
2573 * pages. Jump ahead to the last page within
2574 * that run, and continue from there.
2576 m
+= (1 << order
) - 1;
2578 #if VM_NRESERVLEVEL > 0
2579 else if (vm_reserv_is_page_free(m
))
2582 mtx_unlock(&vm_page_queue_free_mtx
);
2583 if (order
== VM_NFREEORDER
)
2589 if ((m
= SLIST_FIRST(&free
)) != NULL
) {
2590 mtx_lock(&vm_page_queue_free_mtx
);
2592 SLIST_REMOVE_HEAD(&free
, plinks
.s
.ss
);
2593 vm_phys_freecnt_adj(m
, 1);
2594 #if VM_NRESERVLEVEL > 0
2595 if (!vm_reserv_free_page(m
))
2599 vm_phys_free_pages(m
, 0);
2600 } while ((m
= SLIST_FIRST(&free
)) != NULL
);
2601 vm_page_zero_idle_wakeup();
2602 vm_page_free_wakeup();
2603 mtx_unlock(&vm_page_queue_free_mtx
);
2610 CTASSERT(powerof2(NRUNS
));
2612 #define RUN_INDEX(count) ((count) & (NRUNS - 1))
2614 #define MIN_RECLAIM 8
2617 * vm_page_reclaim_contig:
2619 * Reclaim allocated, contiguous physical memory satisfying the specified
2620 * conditions by relocating the virtual pages using that physical memory.
2621 * Returns true if reclamation is successful and false otherwise. Since
2622 * relocation requires the allocation of physical pages, reclamation may
2623 * fail due to a shortage of cache/free pages. When reclamation fails,
2624 * callers are expected to perform VM_WAIT before retrying a failed
2625 * allocation operation, e.g., vm_page_alloc_contig().
2627 * The caller must always specify an allocation class through "req".
2629 * allocation classes:
2630 * VM_ALLOC_NORMAL normal process request
2631 * VM_ALLOC_SYSTEM system *really* needs a page
2632 * VM_ALLOC_INTERRUPT interrupt time request
2634 * The optional allocation flags are ignored.
2636 * "npages" must be greater than zero. Both "alignment" and "boundary"
2637 * must be a power of two.
2640 vm_page_reclaim_contig(int req
, u_long npages
, vm_paddr_t low
, vm_paddr_t high
,
2641 u_long alignment
, vm_paddr_t boundary
)
2643 vm_paddr_t curr_low
;
2644 vm_page_t m_run
, m_runs
[NRUNS
];
2645 u_long count
, reclaimed
;
2646 int error
, i
, options
, req_class
;
2648 KASSERT(npages
> 0, ("npages is 0"));
2649 KASSERT(powerof2(alignment
), ("alignment is not a power of 2"));
2650 KASSERT(powerof2(boundary
), ("boundary is not a power of 2"));
2651 req_class
= req
& VM_ALLOC_CLASS_MASK
;
2654 * The page daemon is allowed to dig deeper into the free page list.
2656 if (curproc
== pageproc
&& req_class
!= VM_ALLOC_INTERRUPT
)
2657 req_class
= VM_ALLOC_SYSTEM
;
2660 * Return if the number of cached and free pages cannot satisfy the
2661 * requested allocation.
2663 count
= vm_cnt
.v_free_count
+ vm_cnt
.v_cache_count
;
2664 if (count
< npages
+ vm_cnt
.v_free_reserved
|| (count
< npages
+
2665 vm_cnt
.v_interrupt_free_min
&& req_class
== VM_ALLOC_SYSTEM
) ||
2666 (count
< npages
&& req_class
== VM_ALLOC_INTERRUPT
))
2670 * Scan up to three times, relaxing the restrictions ("options") on
2671 * the reclamation of reservations and superpages each time.
2673 for (options
= VPSC_NORESERV
;;) {
2675 * Find the highest runs that satisfy the given constraints
2676 * and restrictions, and record them in "m_runs".
2681 m_run
= vm_phys_scan_contig(npages
, curr_low
, high
,
2682 alignment
, boundary
, options
);
2685 curr_low
= VM_PAGE_TO_PHYS(m_run
) + ptoa(npages
);
2686 m_runs
[RUN_INDEX(count
)] = m_run
;
2691 * Reclaim the highest runs in LIFO (descending) order until
2692 * the number of reclaimed pages, "reclaimed", is at least
2693 * MIN_RECLAIM. Reset "reclaimed" each time because each
2694 * reclamation is idempotent, and runs will (likely) recur
2695 * from one scan to the next as restrictions are relaxed.
2698 for (i
= 0; count
> 0 && i
< NRUNS
; i
++) {
2700 m_run
= m_runs
[RUN_INDEX(count
)];
2701 error
= vm_page_reclaim_run(req_class
, npages
, m_run
,
2704 reclaimed
+= npages
;
2705 if (reclaimed
>= MIN_RECLAIM
)
2711 * Either relax the restrictions on the next scan or return if
2712 * the last scan had no restrictions.
2714 if (options
== VPSC_NORESERV
)
2715 options
= VPSC_NOSUPER
;
2716 else if (options
== VPSC_NOSUPER
)
2718 else if (options
== VPSC_ANY
)
2719 return (reclaimed
!= 0);
2724 * vm_wait: (also see VM_WAIT macro)
2726 * Sleep until free pages are available for allocation.
2727 * - Called in various places before memory allocations.
2733 mtx_lock(&vm_page_queue_free_mtx
);
2734 if (curproc
== pageproc
) {
2735 vm_pageout_pages_needed
= 1;
2736 msleep(&vm_pageout_pages_needed
, &vm_page_queue_free_mtx
,
2737 PDROP
| PSWP
, "VMWait", 0);
2739 if (!vm_pageout_wanted
) {
2740 vm_pageout_wanted
= true;
2741 wakeup(&vm_pageout_wanted
);
2743 vm_pages_needed
= true;
2744 msleep(&vm_cnt
.v_free_count
, &vm_page_queue_free_mtx
, PDROP
| PVM
,
2750 * vm_waitpfault: (also see VM_WAITPFAULT macro)
2752 * Sleep until free pages are available for allocation.
2753 * - Called only in vm_fault so that processes page faulting
2754 * can be easily tracked.
2755 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
2756 * processes will be able to grab memory first. Do not change
2757 * this balance without careful testing first.
2763 mtx_lock(&vm_page_queue_free_mtx
);
2764 if (!vm_pageout_wanted
) {
2765 vm_pageout_wanted
= true;
2766 wakeup(&vm_pageout_wanted
);
2768 vm_pages_needed
= true;
2769 msleep(&vm_cnt
.v_free_count
, &vm_page_queue_free_mtx
, PDROP
| PUSER
,
2773 struct vm_pagequeue
*
2774 vm_page_pagequeue(vm_page_t m
)
2777 return (&vm_phys_domain(m
)->vmd_pagequeues
[m
->queue
]);
2783 * Remove the given page from its current page queue.
2785 * The page must be locked.
2788 vm_page_dequeue(vm_page_t m
)
2790 struct vm_pagequeue
*pq
;
2792 vm_page_assert_locked(m
);
2793 KASSERT(m
->queue
< PQ_COUNT
, ("vm_page_dequeue: page %p is not queued",
2795 pq
= vm_page_pagequeue(m
);
2796 vm_pagequeue_lock(pq
);
2798 TAILQ_REMOVE(&pq
->pq_pl
, m
, plinks
.q
);
2799 vm_pagequeue_cnt_dec(pq
);
2800 vm_pagequeue_unlock(pq
);
2804 * vm_page_dequeue_locked:
2806 * Remove the given page from its current page queue.
2808 * The page and page queue must be locked.
2811 vm_page_dequeue_locked(vm_page_t m
)
2813 struct vm_pagequeue
*pq
;
2815 vm_page_lock_assert(m
, MA_OWNED
);
2816 pq
= vm_page_pagequeue(m
);
2817 vm_pagequeue_assert_locked(pq
);
2819 TAILQ_REMOVE(&pq
->pq_pl
, m
, plinks
.q
);
2820 vm_pagequeue_cnt_dec(pq
);
2826 * Add the given page to the specified page queue.
2828 * The page must be locked.
2831 vm_page_enqueue(uint8_t queue
, vm_page_t m
)
2833 struct vm_pagequeue
*pq
;
2835 vm_page_lock_assert(m
, MA_OWNED
);
2836 KASSERT(queue
< PQ_COUNT
,
2837 ("vm_page_enqueue: invalid queue %u request for page %p",
2839 pq
= &vm_phys_domain(m
)->vmd_pagequeues
[queue
];
2840 vm_pagequeue_lock(pq
);
2842 TAILQ_INSERT_TAIL(&pq
->pq_pl
, m
, plinks
.q
);
2843 vm_pagequeue_cnt_inc(pq
);
2844 vm_pagequeue_unlock(pq
);
2850 * Move the given page to the tail of its current page queue.
2852 * The page must be locked.
2855 vm_page_requeue(vm_page_t m
)
2857 struct vm_pagequeue
*pq
;
2859 vm_page_lock_assert(m
, MA_OWNED
);
2860 KASSERT(m
->queue
!= PQ_NONE
,
2861 ("vm_page_requeue: page %p is not queued", m
));
2862 pq
= vm_page_pagequeue(m
);
2863 vm_pagequeue_lock(pq
);
2864 TAILQ_REMOVE(&pq
->pq_pl
, m
, plinks
.q
);
2865 TAILQ_INSERT_TAIL(&pq
->pq_pl
, m
, plinks
.q
);
2866 vm_pagequeue_unlock(pq
);
2870 * vm_page_requeue_locked:
2872 * Move the given page to the tail of its current page queue.
2874 * The page queue must be locked.
2877 vm_page_requeue_locked(vm_page_t m
)
2879 struct vm_pagequeue
*pq
;
2881 KASSERT(m
->queue
!= PQ_NONE
,
2882 ("vm_page_requeue_locked: page %p is not queued", m
));
2883 pq
= vm_page_pagequeue(m
);
2884 vm_pagequeue_assert_locked(pq
);
2885 TAILQ_REMOVE(&pq
->pq_pl
, m
, plinks
.q
);
2886 TAILQ_INSERT_TAIL(&pq
->pq_pl
, m
, plinks
.q
);
2892 * Put the specified page on the active list (if appropriate).
2893 * Ensure that act_count is at least ACT_INIT but do not otherwise
2896 * The page must be locked.
2899 vm_page_activate(vm_page_t m
)
2903 vm_page_lock_assert(m
, MA_OWNED
);
2904 if ((queue
= m
->queue
) != PQ_ACTIVE
) {
2905 if (m
->wire_count
== 0 && (m
->oflags
& VPO_UNMANAGED
) == 0) {
2906 if (m
->act_count
< ACT_INIT
)
2907 m
->act_count
= ACT_INIT
;
2908 if (queue
!= PQ_NONE
)
2910 vm_page_enqueue(PQ_ACTIVE
, m
);
2912 KASSERT(queue
== PQ_NONE
,
2913 ("vm_page_activate: wired page %p is queued", m
));
2915 if (m
->act_count
< ACT_INIT
)
2916 m
->act_count
= ACT_INIT
;
2921 * vm_page_free_wakeup:
2923 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
2924 * routine is called when a page has been added to the cache or free
2927 * The page queues must be locked.
2930 vm_page_free_wakeup(void)
2933 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
2935 * if pageout daemon needs pages, then tell it that there are
2938 if (vm_pageout_pages_needed
&&
2939 vm_cnt
.v_cache_count
+ vm_cnt
.v_free_count
>= vm_cnt
.v_pageout_free_min
) {
2940 wakeup(&vm_pageout_pages_needed
);
2941 vm_pageout_pages_needed
= 0;
2944 * wakeup processes that are waiting on memory if we hit a
2945 * high water mark. And wakeup scheduler process if we have
2946 * lots of memory. this process will swapin processes.
2948 if (vm_pages_needed
&& !vm_page_count_min()) {
2949 vm_pages_needed
= false;
2950 wakeup(&vm_cnt
.v_free_count
);
2955 * Turn a cached page into a free page, by changing its attributes.
2956 * Keep the statistics up-to-date.
2958 * The free page queue must be locked.
2961 vm_page_cache_turn_free(vm_page_t m
)
2964 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
2968 KASSERT((m
->flags
& PG_CACHED
) != 0,
2969 ("vm_page_cache_turn_free: page %p is not cached", m
));
2970 m
->flags
&= ~PG_CACHED
;
2971 vm_cnt
.v_cache_count
--;
2972 vm_phys_freecnt_adj(m
, 1);
2978 * Returns the given page to the free list,
2979 * disassociating it with any VM object.
2981 * The object must be locked. The page must be locked if it is managed.
2984 vm_page_free_toq(vm_page_t m
)
2987 if ((m
->oflags
& VPO_UNMANAGED
) == 0) {
2988 vm_page_lock_assert(m
, MA_OWNED
);
2989 KASSERT(!pmap_page_is_mapped(m
),
2990 ("vm_page_free_toq: freeing mapped page %p", m
));
2992 KASSERT(m
->queue
== PQ_NONE
,
2993 ("vm_page_free_toq: unmanaged page %p is queued", m
));
2994 PCPU_INC(cnt
.v_tfree
);
2996 if (vm_page_sbusied(m
))
2997 panic("vm_page_free: freeing busy page %p", m
);
3000 * Unqueue, then remove page. Note that we cannot destroy
3001 * the page here because we do not want to call the pager's
3002 * callback routine until after we've put the page on the
3003 * appropriate free queue.
3009 * If fictitious remove object association and
3010 * return, otherwise delay object association removal.
3012 if ((m
->flags
& PG_FICTITIOUS
) != 0) {
3019 if (m
->wire_count
!= 0)
3020 panic("vm_page_free: freeing wired page %p", m
);
3021 if (m
->hold_count
!= 0) {
3022 m
->flags
&= ~PG_ZERO
;
3023 KASSERT((m
->flags
& PG_UNHOLDFREE
) == 0,
3024 ("vm_page_free: freeing PG_UNHOLDFREE page %p", m
));
3025 m
->flags
|= PG_UNHOLDFREE
;
3028 * Restore the default memory attribute to the page.
3030 if (pmap_page_get_memattr(m
) != VM_MEMATTR_DEFAULT
)
3031 pmap_page_set_memattr(m
, VM_MEMATTR_DEFAULT
);
3034 * Insert the page into the physical memory allocator's
3035 * cache/free page queues.
3037 mtx_lock(&vm_page_queue_free_mtx
);
3038 vm_phys_freecnt_adj(m
, 1);
3039 #if VM_NRESERVLEVEL > 0
3040 if (!vm_reserv_free_page(m
))
3044 vm_phys_free_pages(m
, 0);
3045 if ((m
->flags
& PG_ZERO
) != 0)
3046 ++vm_page_zero_count
;
3048 vm_page_zero_idle_wakeup();
3049 vm_page_free_wakeup();
3050 mtx_unlock(&vm_page_queue_free_mtx
);
3057 * Mark this page as wired down by yet
3058 * another map, removing it from paging queues
3061 * If the page is fictitious, then its wire count must remain one.
3063 * The page must be locked.
3066 vm_page_wire(vm_page_t m
)
3070 * Only bump the wire statistics if the page is not already wired,
3071 * and only unqueue the page if it is on some queue (if it is unmanaged
3072 * it is already off the queues).
3074 vm_page_lock_assert(m
, MA_OWNED
);
3075 if ((m
->flags
& PG_FICTITIOUS
) != 0) {
3076 KASSERT(m
->wire_count
== 1,
3077 ("vm_page_wire: fictitious page %p's wire count isn't one",
3081 if (m
->wire_count
== 0) {
3082 KASSERT((m
->oflags
& VPO_UNMANAGED
) == 0 ||
3083 m
->queue
== PQ_NONE
,
3084 ("vm_page_wire: unmanaged page %p is queued", m
));
3086 atomic_add_int(&vm_cnt
.v_wire_count
, 1);
3089 KASSERT(m
->wire_count
!= 0, ("vm_page_wire: wire_count overflow m=%p", m
));
3095 * Release one wiring of the specified page, potentially allowing it to be
3096 * paged out. Returns TRUE if the number of wirings transitions to zero and
3099 * Only managed pages belonging to an object can be paged out. If the number
3100 * of wirings transitions to zero and the page is eligible for page out, then
3101 * the page is added to the specified paging queue (unless PQ_NONE is
3104 * If a page is fictitious, then its wire count must always be one.
3106 * A managed page must be locked.
3109 vm_page_unwire(vm_page_t m
, uint8_t queue
)
3112 KASSERT(queue
< PQ_COUNT
|| queue
== PQ_NONE
,
3113 ("vm_page_unwire: invalid queue %u request for page %p",
3115 if ((m
->oflags
& VPO_UNMANAGED
) == 0)
3116 vm_page_assert_locked(m
);
3117 if ((m
->flags
& PG_FICTITIOUS
) != 0) {
3118 KASSERT(m
->wire_count
== 1,
3119 ("vm_page_unwire: fictitious page %p's wire count isn't one", m
));
3122 if (m
->wire_count
> 0) {
3124 if (m
->wire_count
== 0) {
3125 atomic_subtract_int(&vm_cnt
.v_wire_count
, 1);
3126 if ((m
->oflags
& VPO_UNMANAGED
) == 0 &&
3127 m
->object
!= NULL
&& queue
!= PQ_NONE
) {
3128 if (queue
== PQ_INACTIVE
)
3129 m
->flags
&= ~PG_WINATCFLS
;
3130 vm_page_enqueue(queue
, m
);
3136 panic("vm_page_unwire: page %p's wire count is zero", m
);
3140 * Move the specified page to the inactive queue.
3142 * Many pages placed on the inactive queue should actually go
3143 * into the cache, but it is difficult to figure out which. What
3144 * we do instead, if the inactive target is well met, is to put
3145 * clean pages at the head of the inactive queue instead of the tail.
3146 * This will cause them to be moved to the cache more quickly and
3147 * if not actively re-referenced, reclaimed more quickly. If we just
3148 * stick these pages at the end of the inactive queue, heavy filesystem
3149 * meta-data accesses can cause an unnecessary paging load on memory bound
3150 * processes. This optimization causes one-time-use metadata to be
3151 * reused more quickly.
3153 * Normally noreuse is FALSE, resulting in LRU operation. noreuse is set
3154 * to TRUE if we want this page to be 'as if it were placed in the cache',
3155 * except without unmapping it from the process address space. In
3156 * practice this is implemented by inserting the page at the head of the
3157 * queue, using a marker page to guide FIFO insertion ordering.
3159 * The page must be locked.
3162 _vm_page_deactivate(vm_page_t m
, boolean_t noreuse
)
3164 struct vm_pagequeue
*pq
;
3167 vm_page_assert_locked(m
);
3170 * Ignore if the page is already inactive, unless it is unlikely to be
3173 if ((queue
= m
->queue
) == PQ_INACTIVE
&& !noreuse
)
3175 if (m
->wire_count
== 0 && (m
->oflags
& VPO_UNMANAGED
) == 0) {
3176 pq
= &vm_phys_domain(m
)->vmd_pagequeues
[PQ_INACTIVE
];
3177 /* Avoid multiple acquisitions of the inactive queue lock. */
3178 if (queue
== PQ_INACTIVE
) {
3179 vm_pagequeue_lock(pq
);
3180 vm_page_dequeue_locked(m
);
3182 if (queue
!= PQ_NONE
)
3184 m
->flags
&= ~PG_WINATCFLS
;
3185 vm_pagequeue_lock(pq
);
3187 m
->queue
= PQ_INACTIVE
;
3189 TAILQ_INSERT_BEFORE(&vm_phys_domain(m
)->vmd_inacthead
,
3192 TAILQ_INSERT_TAIL(&pq
->pq_pl
, m
, plinks
.q
);
3193 vm_pagequeue_cnt_inc(pq
);
3194 vm_pagequeue_unlock(pq
);
3199 * Move the specified page to the inactive queue.
3201 * The page must be locked.
3204 vm_page_deactivate(vm_page_t m
)
3207 _vm_page_deactivate(m
, FALSE
);
3211 * Move the specified page to the inactive queue with the expectation
3212 * that it is unlikely to be reused.
3214 * The page must be locked.
3217 vm_page_deactivate_noreuse(vm_page_t m
)
3220 _vm_page_deactivate(m
, TRUE
);
3224 * vm_page_try_to_cache:
3226 * Returns 0 on failure, 1 on success
3229 vm_page_try_to_cache(vm_page_t m
)
3232 vm_page_lock_assert(m
, MA_OWNED
);
3233 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3234 if (m
->dirty
|| m
->hold_count
|| m
->wire_count
||
3235 (m
->oflags
& VPO_UNMANAGED
) != 0 || vm_page_busied(m
))
3245 * vm_page_try_to_free()
3247 * Attempt to free the page. If we cannot free it, we do nothing.
3248 * 1 is returned on success, 0 on failure.
3251 vm_page_try_to_free(vm_page_t m
)
3254 vm_page_lock_assert(m
, MA_OWNED
);
3255 if (m
->object
!= NULL
)
3256 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3257 if (m
->dirty
|| m
->hold_count
|| m
->wire_count
||
3258 (m
->oflags
& VPO_UNMANAGED
) != 0 || vm_page_busied(m
))
3270 * Put the specified page onto the page cache queue (if appropriate).
3272 * The object and page must be locked.
3275 vm_page_cache(vm_page_t m
)
3278 boolean_t cache_was_empty
;
3280 vm_page_lock_assert(m
, MA_OWNED
);
3282 VM_OBJECT_ASSERT_WLOCKED(object
);
3283 if (vm_page_busied(m
) || (m
->oflags
& VPO_UNMANAGED
) ||
3284 m
->hold_count
|| m
->wire_count
)
3285 panic("vm_page_cache: attempting to cache busy page");
3286 KASSERT(!pmap_page_is_mapped(m
),
3287 ("vm_page_cache: page %p is mapped", m
));
3288 KASSERT(m
->dirty
== 0, ("vm_page_cache: page %p is dirty", m
));
3289 if (m
->valid
== 0 || object
->type
== OBJT_DEFAULT
||
3290 (object
->type
== OBJT_SWAP
&&
3291 !vm_pager_has_page(object
, m
->pindex
, NULL
, NULL
))) {
3293 * Hypothesis: A cache-eligible page belonging to a
3294 * default object or swap object but without a backing
3295 * store must be zero filled.
3300 KASSERT((m
->flags
& PG_CACHED
) == 0,
3301 ("vm_page_cache: page %p is already cached", m
));
3304 * Remove the page from the paging queues.
3309 * Remove the page from the object's collection of resident
3312 vm_radix_remove(&object
->rtree
, m
->pindex
);
3313 TAILQ_REMOVE(&object
->memq
, m
, listq
);
3314 object
->resident_page_count
--;
3317 * Restore the default memory attribute to the page.
3319 if (pmap_page_get_memattr(m
) != VM_MEMATTR_DEFAULT
)
3320 pmap_page_set_memattr(m
, VM_MEMATTR_DEFAULT
);
3323 * Insert the page into the object's collection of cached pages
3324 * and the physical memory allocator's cache/free page queues.
3326 m
->flags
&= ~PG_ZERO
;
3327 mtx_lock(&vm_page_queue_free_mtx
);
3328 cache_was_empty
= vm_radix_is_empty(&object
->cache
);
3329 if (vm_radix_insert(&object
->cache
, m
)) {
3330 mtx_unlock(&vm_page_queue_free_mtx
);
3331 if (object
->type
== OBJT_VNODE
&&
3332 object
->resident_page_count
== 0)
3333 vdrop(object
->handle
);
3340 * The above call to vm_radix_insert() could reclaim the one pre-
3341 * existing cached page from this object, resulting in a call to
3344 if (!cache_was_empty
)
3345 cache_was_empty
= vm_radix_is_singleton(&object
->cache
);
3347 m
->flags
|= PG_CACHED
;
3348 vm_cnt
.v_cache_count
++;
3349 PCPU_INC(cnt
.v_tcached
);
3350 #if VM_NRESERVLEVEL > 0
3351 if (!vm_reserv_free_page(m
)) {
3355 vm_phys_free_pages(m
, 0);
3357 vm_page_free_wakeup();
3358 mtx_unlock(&vm_page_queue_free_mtx
);
3361 * Increment the vnode's hold count if this is the object's only
3362 * cached page. Decrement the vnode's hold count if this was
3363 * the object's only resident page.
3365 if (object
->type
== OBJT_VNODE
) {
3366 if (cache_was_empty
&& object
->resident_page_count
!= 0)
3367 vhold(object
->handle
);
3368 else if (!cache_was_empty
&& object
->resident_page_count
== 0)
3369 vdrop(object
->handle
);
3376 * Deactivate or do nothing, as appropriate.
3378 * The object and page must be locked.
3381 vm_page_advise(vm_page_t m
, int advice
)
3384 vm_page_assert_locked(m
);
3385 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3386 if (advice
== MADV_FREE
)
3388 * Mark the page clean. This will allow the page to be freed
3389 * up by the system. However, such pages are often reused
3390 * quickly by malloc() so we do not do anything that would
3391 * cause a page fault if we can help it.
3393 * Specifically, we do not try to actually free the page now
3394 * nor do we try to put it in the cache (which would cause a
3395 * page fault on reuse).
3397 * But we do make the page as freeable as we can without
3398 * actually taking the step of unmapping it.
3401 else if (advice
!= MADV_DONTNEED
)
3405 * Clear any references to the page. Otherwise, the page daemon will
3406 * immediately reactivate the page.
3408 vm_page_aflag_clear(m
, PGA_REFERENCED
);
3410 if (advice
!= MADV_FREE
&& m
->dirty
== 0 && pmap_is_modified(m
))
3414 * Place clean pages near the head of the inactive queue rather than
3415 * the tail, thus defeating the queue's LRU operation and ensuring that
3416 * the page will be reused quickly. Dirty pages are given a chance to
3417 * cycle once through the inactive queue before becoming eligible for
3420 _vm_page_deactivate(m
, m
->dirty
== 0);
3424 * Grab a page, waiting until we are waken up due to the page
3425 * changing state. We keep on waiting, if the page continues
3426 * to be in the object. If the page doesn't exist, first allocate it
3427 * and then conditionally zero it.
3429 * This routine may sleep.
3431 * The object must be locked on entry. The lock will, however, be released
3432 * and reacquired if the routine sleeps.
3435 vm_page_grab(vm_object_t object
, vm_pindex_t pindex
, int allocflags
)
3440 VM_OBJECT_ASSERT_WLOCKED(object
);
3441 KASSERT((allocflags
& VM_ALLOC_SBUSY
) == 0 ||
3442 (allocflags
& VM_ALLOC_IGN_SBUSY
) != 0,
3443 ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
3445 if ((m
= vm_page_lookup(object
, pindex
)) != NULL
) {
3446 sleep
= (allocflags
& VM_ALLOC_IGN_SBUSY
) != 0 ?
3447 vm_page_xbusied(m
) : vm_page_busied(m
);
3449 if ((allocflags
& VM_ALLOC_NOWAIT
) != 0)
3452 * Reference the page before unlocking and
3453 * sleeping so that the page daemon is less
3454 * likely to reclaim it.
3456 vm_page_aflag_set(m
, PGA_REFERENCED
);
3458 VM_OBJECT_WUNLOCK(object
);
3459 vm_page_busy_sleep(m
, "pgrbwt");
3460 VM_OBJECT_WLOCK(object
);
3463 if ((allocflags
& VM_ALLOC_WIRED
) != 0) {
3469 (VM_ALLOC_NOBUSY
| VM_ALLOC_SBUSY
)) == 0)
3471 if ((allocflags
& VM_ALLOC_SBUSY
) != 0)
3476 m
= vm_page_alloc(object
, pindex
, allocflags
);
3478 if ((allocflags
& VM_ALLOC_NOWAIT
) != 0)
3480 VM_OBJECT_WUNLOCK(object
);
3482 VM_OBJECT_WLOCK(object
);
3484 } else if (m
->valid
!= 0)
3486 if (allocflags
& VM_ALLOC_ZERO
&& (m
->flags
& PG_ZERO
) == 0)
3492 * Mapping function for valid or dirty bits in a page.
3494 * Inputs are required to range within a page.
3497 vm_page_bits(int base
, int size
)
3503 base
+ size
<= PAGE_SIZE
,
3504 ("vm_page_bits: illegal base/size %d/%d", base
, size
)
3507 if (size
== 0) /* handle degenerate case */
3510 first_bit
= base
>> DEV_BSHIFT
;
3511 last_bit
= (base
+ size
- 1) >> DEV_BSHIFT
;
3513 return (((vm_page_bits_t
)2 << last_bit
) -
3514 ((vm_page_bits_t
)1 << first_bit
));
3518 * vm_page_set_valid_range:
3520 * Sets portions of a page valid. The arguments are expected
3521 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
3522 * of any partial chunks touched by the range. The invalid portion of
3523 * such chunks will be zeroed.
3525 * (base + size) must be less then or equal to PAGE_SIZE.
3528 vm_page_set_valid_range(vm_page_t m
, int base
, int size
)
3532 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3533 if (size
== 0) /* handle degenerate case */
3537 * If the base is not DEV_BSIZE aligned and the valid
3538 * bit is clear, we have to zero out a portion of the
3541 if ((frag
= rounddown2(base
, DEV_BSIZE
)) != base
&&
3542 (m
->valid
& (1 << (base
>> DEV_BSHIFT
))) == 0)
3543 pmap_zero_page_area(m
, frag
, base
- frag
);
3546 * If the ending offset is not DEV_BSIZE aligned and the
3547 * valid bit is clear, we have to zero out a portion of
3550 endoff
= base
+ size
;
3551 if ((frag
= rounddown2(endoff
, DEV_BSIZE
)) != endoff
&&
3552 (m
->valid
& (1 << (endoff
>> DEV_BSHIFT
))) == 0)
3553 pmap_zero_page_area(m
, endoff
,
3554 DEV_BSIZE
- (endoff
& (DEV_BSIZE
- 1)));
3557 * Assert that no previously invalid block that is now being validated
3560 KASSERT((~m
->valid
& vm_page_bits(base
, size
) & m
->dirty
) == 0,
3561 ("vm_page_set_valid_range: page %p is dirty", m
));
3564 * Set valid bits inclusive of any overlap.
3566 m
->valid
|= vm_page_bits(base
, size
);
3570 * Clear the given bits from the specified page's dirty field.
3572 static __inline
void
3573 vm_page_clear_dirty_mask(vm_page_t m
, vm_page_bits_t pagebits
)
3576 #if PAGE_SIZE < 16384
3581 * If the object is locked and the page is neither exclusive busy nor
3582 * write mapped, then the page's dirty field cannot possibly be
3583 * set by a concurrent pmap operation.
3585 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3586 if (!vm_page_xbusied(m
) && !pmap_page_is_write_mapped(m
))
3587 m
->dirty
&= ~pagebits
;
3590 * The pmap layer can call vm_page_dirty() without
3591 * holding a distinguished lock. The combination of
3592 * the object's lock and an atomic operation suffice
3593 * to guarantee consistency of the page dirty field.
3595 * For PAGE_SIZE == 32768 case, compiler already
3596 * properly aligns the dirty field, so no forcible
3597 * alignment is needed. Only require existence of
3598 * atomic_clear_64 when page size is 32768.
3600 addr
= (uintptr_t)&m
->dirty
;
3601 #if PAGE_SIZE == 32768
3602 atomic_clear_64((uint64_t *)addr
, pagebits
);
3603 #elif PAGE_SIZE == 16384
3604 atomic_clear_32((uint32_t *)addr
, pagebits
);
3605 #else /* PAGE_SIZE <= 8192 */
3607 * Use a trick to perform a 32-bit atomic on the
3608 * containing aligned word, to not depend on the existence
3609 * of atomic_clear_{8, 16}.
3611 shift
= addr
& (sizeof(uint32_t) - 1);
3612 #if BYTE_ORDER == BIG_ENDIAN
3613 shift
= (sizeof(uint32_t) - sizeof(m
->dirty
) - shift
) * NBBY
;
3617 addr
&= ~(sizeof(uint32_t) - 1);
3618 atomic_clear_32((uint32_t *)addr
, pagebits
<< shift
);
3619 #endif /* PAGE_SIZE */
3624 * vm_page_set_validclean:
3626 * Sets portions of a page valid and clean. The arguments are expected
3627 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
3628 * of any partial chunks touched by the range. The invalid portion of
3629 * such chunks will be zero'd.
3631 * (base + size) must be less then or equal to PAGE_SIZE.
3634 vm_page_set_validclean(vm_page_t m
, int base
, int size
)
3636 vm_page_bits_t oldvalid
, pagebits
;
3639 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3640 if (size
== 0) /* handle degenerate case */
3644 * If the base is not DEV_BSIZE aligned and the valid
3645 * bit is clear, we have to zero out a portion of the
3648 if ((frag
= rounddown2(base
, DEV_BSIZE
)) != base
&&
3649 (m
->valid
& ((vm_page_bits_t
)1 << (base
>> DEV_BSHIFT
))) == 0)
3650 pmap_zero_page_area(m
, frag
, base
- frag
);
3653 * If the ending offset is not DEV_BSIZE aligned and the
3654 * valid bit is clear, we have to zero out a portion of
3657 endoff
= base
+ size
;
3658 if ((frag
= rounddown2(endoff
, DEV_BSIZE
)) != endoff
&&
3659 (m
->valid
& ((vm_page_bits_t
)1 << (endoff
>> DEV_BSHIFT
))) == 0)
3660 pmap_zero_page_area(m
, endoff
,
3661 DEV_BSIZE
- (endoff
& (DEV_BSIZE
- 1)));
3664 * Set valid, clear dirty bits. If validating the entire
3665 * page we can safely clear the pmap modify bit. We also
3666 * use this opportunity to clear the VPO_NOSYNC flag. If a process
3667 * takes a write fault on a MAP_NOSYNC memory area the flag will
3670 * We set valid bits inclusive of any overlap, but we can only
3671 * clear dirty bits for DEV_BSIZE chunks that are fully within
3674 oldvalid
= m
->valid
;
3675 pagebits
= vm_page_bits(base
, size
);
3676 m
->valid
|= pagebits
;
3678 if ((frag
= base
& (DEV_BSIZE
- 1)) != 0) {
3679 frag
= DEV_BSIZE
- frag
;
3685 pagebits
= vm_page_bits(base
, size
& (DEV_BSIZE
- 1));
3687 if (base
== 0 && size
== PAGE_SIZE
) {
3689 * The page can only be modified within the pmap if it is
3690 * mapped, and it can only be mapped if it was previously
3693 if (oldvalid
== VM_PAGE_BITS_ALL
)
3695 * Perform the pmap_clear_modify() first. Otherwise,
3696 * a concurrent pmap operation, such as
3697 * pmap_protect(), could clear a modification in the
3698 * pmap and set the dirty field on the page before
3699 * pmap_clear_modify() had begun and after the dirty
3700 * field was cleared here.
3702 pmap_clear_modify(m
);
3704 m
->oflags
&= ~VPO_NOSYNC
;
3705 } else if (oldvalid
!= VM_PAGE_BITS_ALL
)
3706 m
->dirty
&= ~pagebits
;
3708 vm_page_clear_dirty_mask(m
, pagebits
);
3712 vm_page_clear_dirty(vm_page_t m
, int base
, int size
)
3715 vm_page_clear_dirty_mask(m
, vm_page_bits(base
, size
));
3719 * vm_page_set_invalid:
3721 * Invalidates DEV_BSIZE'd chunks within a page. Both the
3722 * valid and dirty bits for the effected areas are cleared.
3725 vm_page_set_invalid(vm_page_t m
, int base
, int size
)
3727 vm_page_bits_t bits
;
3731 VM_OBJECT_ASSERT_WLOCKED(object
);
3732 if (object
->type
== OBJT_VNODE
&& base
== 0 && IDX_TO_OFF(m
->pindex
) +
3733 size
>= object
->un_pager
.vnp
.vnp_size
)
3734 bits
= VM_PAGE_BITS_ALL
;
3736 bits
= vm_page_bits(base
, size
);
3737 if (object
->ref_count
!= 0 && m
->valid
== VM_PAGE_BITS_ALL
&&
3740 KASSERT((bits
== 0 && m
->valid
== VM_PAGE_BITS_ALL
) ||
3741 !pmap_page_is_mapped(m
),
3742 ("vm_page_set_invalid: page %p is mapped", m
));
3748 * vm_page_zero_invalid()
3750 * The kernel assumes that the invalid portions of a page contain
3751 * garbage, but such pages can be mapped into memory by user code.
3752 * When this occurs, we must zero out the non-valid portions of the
3753 * page so user code sees what it expects.
3755 * Pages are most often semi-valid when the end of a file is mapped
3756 * into memory and the file's size is not page aligned.
3759 vm_page_zero_invalid(vm_page_t m
, boolean_t setvalid
)
3764 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3766 * Scan the valid bits looking for invalid sections that
3767 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the
3768 * valid bit may be set ) have already been zeroed by
3769 * vm_page_set_validclean().
3771 for (b
= i
= 0; i
<= PAGE_SIZE
/ DEV_BSIZE
; ++i
) {
3772 if (i
== (PAGE_SIZE
/ DEV_BSIZE
) ||
3773 (m
->valid
& ((vm_page_bits_t
)1 << i
))) {
3775 pmap_zero_page_area(m
,
3776 b
<< DEV_BSHIFT
, (i
- b
) << DEV_BSHIFT
);
3783 * setvalid is TRUE when we can safely set the zero'd areas
3784 * as being valid. We can do this if there are no cache consistancy
3785 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
3788 m
->valid
= VM_PAGE_BITS_ALL
;
3794 * Is (partial) page valid? Note that the case where size == 0
3795 * will return FALSE in the degenerate case where the page is
3796 * entirely invalid, and TRUE otherwise.
3799 vm_page_is_valid(vm_page_t m
, int base
, int size
)
3801 vm_page_bits_t bits
;
3803 VM_OBJECT_ASSERT_LOCKED(m
->object
);
3804 bits
= vm_page_bits(base
, size
);
3805 return (m
->valid
!= 0 && (m
->valid
& bits
) == bits
);
3809 * vm_page_ps_is_valid:
3811 * Returns TRUE if the entire (super)page is valid and FALSE otherwise.
3814 vm_page_ps_is_valid(vm_page_t m
)
3818 VM_OBJECT_ASSERT_LOCKED(m
->object
);
3819 npages
= atop(pagesizes
[m
->psind
]);
3822 * The physically contiguous pages that make up a superpage, i.e., a
3823 * page with a page size index ("psind") greater than zero, will
3824 * occupy adjacent entries in vm_page_array[].
3826 for (i
= 0; i
< npages
; i
++) {
3827 if (m
[i
].valid
!= VM_PAGE_BITS_ALL
)
3834 * Set the page's dirty bits if the page is modified.
3837 vm_page_test_dirty(vm_page_t m
)
3840 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3841 if (m
->dirty
!= VM_PAGE_BITS_ALL
&& pmap_is_modified(m
))
3846 vm_page_lock_KBI(vm_page_t m
, const char *file
, int line
)
3849 mtx_lock_flags_(vm_page_lockptr(m
), 0, file
, line
);
3853 vm_page_unlock_KBI(vm_page_t m
, const char *file
, int line
)
3856 mtx_unlock_flags_(vm_page_lockptr(m
), 0, file
, line
);
3860 vm_page_trylock_KBI(vm_page_t m
, const char *file
, int line
)
3863 return (mtx_trylock_flags_(vm_page_lockptr(m
), 0, file
, line
));
3866 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
3868 vm_page_assert_locked_KBI(vm_page_t m
, const char *file
, int line
)
3871 vm_page_lock_assert_KBI(m
, MA_OWNED
, file
, line
);
3875 vm_page_lock_assert_KBI(vm_page_t m
, int a
, const char *file
, int line
)
3878 mtx_assert_(vm_page_lockptr(m
), a
, file
, line
);
3884 vm_page_object_lock_assert(vm_page_t m
)
3888 * Certain of the page's fields may only be modified by the
3889 * holder of the containing object's lock or the exclusive busy.
3890 * holder. Unfortunately, the holder of the write busy is
3891 * not recorded, and thus cannot be checked here.
3893 if (m
->object
!= NULL
&& !vm_page_xbusied(m
))
3894 VM_OBJECT_ASSERT_WLOCKED(m
->object
);
3898 vm_page_assert_pga_writeable(vm_page_t m
, uint8_t bits
)
3901 if ((bits
& PGA_WRITEABLE
) == 0)
3905 * The PGA_WRITEABLE flag can only be set if the page is
3906 * managed, is exclusively busied or the object is locked.
3907 * Currently, this flag is only set by pmap_enter().
3909 KASSERT((m
->oflags
& VPO_UNMANAGED
) == 0,
3910 ("PGA_WRITEABLE on unmanaged page"));
3911 if (!vm_page_xbusied(m
))
3912 VM_OBJECT_ASSERT_LOCKED(m
->object
);
3916 #include "opt_ddb.h"
3918 #include <sys/kernel.h>
3920 #include <ddb/ddb.h>
3922 DB_SHOW_COMMAND(page
, vm_page_print_page_info
)
3924 db_printf("vm_cnt.v_free_count: %d\n", vm_cnt
.v_free_count
);
3925 db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt
.v_cache_count
);
3926 db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt
.v_inactive_count
);
3927 db_printf("vm_cnt.v_active_count: %d\n", vm_cnt
.v_active_count
);
3928 db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt
.v_wire_count
);
3929 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt
.v_free_reserved
);
3930 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt
.v_free_min
);
3931 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt
.v_free_target
);
3932 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt
.v_inactive_target
);
3935 DB_SHOW_COMMAND(pageq
, vm_page_print_pageq_info
)
3939 db_printf("pq_free %d pq_cache %d\n",
3940 vm_cnt
.v_free_count
, vm_cnt
.v_cache_count
);
3941 for (dom
= 0; dom
< vm_ndomains
; dom
++) {
3943 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n",
3945 vm_dom
[dom
].vmd_page_count
,
3946 vm_dom
[dom
].vmd_free_count
,
3947 vm_dom
[dom
].vmd_pagequeues
[PQ_ACTIVE
].pq_cnt
,
3948 vm_dom
[dom
].vmd_pagequeues
[PQ_INACTIVE
].pq_cnt
,
3949 vm_dom
[dom
].vmd_pass
);
3953 DB_SHOW_COMMAND(pginfo
, vm_page_print_pginfo
)
3959 db_printf("show pginfo addr\n");
3963 phys
= strchr(modif
, 'p') != NULL
;
3965 m
= PHYS_TO_VM_PAGE(addr
);
3967 m
= (vm_page_t
)addr
;
3969 "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n"
3970 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
3971 m
, m
->object
, (uintmax_t)m
->pindex
, (uintmax_t)m
->phys_addr
,
3972 m
->queue
, m
->hold_count
, m
->wire_count
, m
->aflags
, m
->oflags
,
3973 m
->flags
, m
->act_count
, m
->busy_lock
, m
->valid
, m
->dirty
);