2 * Copyright (c) 1991 Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
38 * $DragonFly: src/sys/vm/vm_page.c,v 1.34 2006/12/23 01:41:31 swildner Exp $
42 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43 * All rights reserved.
45 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47 * Permission to use, copy, modify and distribute this software and
48 * its documentation is hereby granted, provided that both the copyright
49 * notice and this permission notice appear in all copies of the
50 * software, derivative works or modified versions, and any portions
51 * thereof, and that both notices appear in supporting documentation.
53 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 * Carnegie Mellon requests users of this software to return to
59 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
60 * School of Computer Science
61 * Carnegie Mellon University
62 * Pittsburgh PA 15213-3890
64 * any improvements or extensions that they make and grant Carnegie the
65 * rights to redistribute these changes.
68 * Resident memory management module. The module manipulates 'VM pages'.
69 * A VM page is the core building block for memory management.
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
76 #include <sys/vmmeter.h>
77 #include <sys/vnode.h>
80 #include <vm/vm_param.h>
82 #include <vm/vm_kern.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_page2.h>
92 static void vm_page_queue_init(void);
93 static void vm_page_free_wakeup(void);
94 static vm_page_t
vm_page_select_cache(vm_object_t
, vm_pindex_t
);
95 static vm_page_t
_vm_page_list_find2(int basequeue
, int index
);
97 struct vpgqueues vm_page_queues
[PQ_COUNT
]; /* Array of tailq lists */
99 #define ASSERT_IN_CRIT_SECTION() KKASSERT(crit_test(curthread));
101 RB_GENERATE2(vm_page_rb_tree
, vm_page
, rb_entry
, rb_vm_page_compare
,
102 vm_pindex_t
, pindex
);
105 vm_page_queue_init(void)
109 for (i
= 0; i
< PQ_L2_SIZE
; i
++)
110 vm_page_queues
[PQ_FREE
+i
].cnt
= &vmstats
.v_free_count
;
111 for (i
= 0; i
< PQ_L2_SIZE
; i
++)
112 vm_page_queues
[PQ_CACHE
+i
].cnt
= &vmstats
.v_cache_count
;
114 vm_page_queues
[PQ_INACTIVE
].cnt
= &vmstats
.v_inactive_count
;
115 vm_page_queues
[PQ_ACTIVE
].cnt
= &vmstats
.v_active_count
;
116 vm_page_queues
[PQ_HOLD
].cnt
= &vmstats
.v_active_count
;
117 /* PQ_NONE has no queue */
119 for (i
= 0; i
< PQ_COUNT
; i
++)
120 TAILQ_INIT(&vm_page_queues
[i
].pl
);
124 * note: place in initialized data section? Is this necessary?
127 int vm_page_array_size
= 0;
128 int vm_page_zero_count
= 0;
129 vm_page_t vm_page_array
= 0;
134 * Sets the page size, perhaps based upon the memory size.
135 * Must be called before any use of page-size dependent functions.
138 vm_set_page_size(void)
140 if (vmstats
.v_page_size
== 0)
141 vmstats
.v_page_size
= PAGE_SIZE
;
142 if (((vmstats
.v_page_size
- 1) & vmstats
.v_page_size
) != 0)
143 panic("vm_set_page_size: page size not a power of two");
149 * Add a new page to the freelist for use by the system. New pages
150 * are added to both the head and tail of the associated free page
151 * queue in a bottom-up fashion, so both zero'd and non-zero'd page
152 * requests pull 'recent' adds (higher physical addresses) first.
154 * Must be called in a critical section.
157 vm_add_new_page(vm_paddr_t pa
)
159 struct vpgqueues
*vpq
;
162 ++vmstats
.v_page_count
;
163 ++vmstats
.v_free_count
;
164 m
= PHYS_TO_VM_PAGE(pa
);
167 m
->pc
= (pa
>> PAGE_SHIFT
) & PQ_L2_MASK
;
168 m
->queue
= m
->pc
+ PQ_FREE
;
169 KKASSERT(m
->dirty
== 0);
171 vpq
= &vm_page_queues
[m
->queue
];
173 TAILQ_INSERT_TAIL(&vpq
->pl
, m
, pageq
);
175 TAILQ_INSERT_HEAD(&vpq
->pl
, m
, pageq
);
176 vpq
->flipflop
= 1 - vpq
->flipflop
;
178 vm_page_queues
[m
->queue
].lcnt
++;
185 * Initializes the resident memory module.
187 * Allocates memory for the page cells, and for the object/offset-to-page
188 * hash table headers. Each page cell is initialized and placed on the
191 * starta/enda represents the range of physical memory addresses available
192 * for use (skipping memory already used by the kernel), subject to
193 * phys_avail[]. Note that phys_avail[] has already mapped out memory
194 * already in use by the kernel.
197 vm_page_startup(vm_offset_t vaddr
)
201 vm_paddr_t page_range
;
208 vm_paddr_t biggestone
, biggestsize
;
215 vaddr
= round_page(vaddr
);
217 for (i
= 0; phys_avail
[i
+ 1]; i
+= 2) {
218 phys_avail
[i
] = round_page(phys_avail
[i
]);
219 phys_avail
[i
+ 1] = trunc_page(phys_avail
[i
+ 1]);
222 for (i
= 0; phys_avail
[i
+ 1]; i
+= 2) {
223 vm_paddr_t size
= phys_avail
[i
+ 1] - phys_avail
[i
];
225 if (size
> biggestsize
) {
233 end
= phys_avail
[biggestone
+1];
234 end
= trunc_page(end
);
237 * Initialize the queue headers for the free queue, the active queue
238 * and the inactive queue.
241 vm_page_queue_init();
244 * Compute the number of pages of memory that will be available for
245 * use (taking into account the overhead of a page structure per
248 first_page
= phys_avail
[0] / PAGE_SIZE
;
249 page_range
= phys_avail
[(nblocks
- 1) * 2 + 1] / PAGE_SIZE
- first_page
;
250 npages
= (total
- (page_range
* sizeof(struct vm_page
))) / PAGE_SIZE
;
253 * Initialize the mem entry structures now, and put them in the free
256 vm_page_array
= (vm_page_t
) vaddr
;
260 * Validate these addresses.
262 new_end
= trunc_page(end
- page_range
* sizeof(struct vm_page
));
263 mapped
= pmap_map(mapped
, new_end
, end
,
264 VM_PROT_READ
| VM_PROT_WRITE
);
267 * Clear all of the page structures
269 bzero((caddr_t
) vm_page_array
, page_range
* sizeof(struct vm_page
));
270 vm_page_array_size
= page_range
;
273 * Construct the free queue(s) in ascending order (by physical
274 * address) so that the first 16MB of physical memory is allocated
275 * last rather than first. On large-memory machines, this avoids
276 * the exhaustion of low physical memory before isa_dmainit has run.
278 vmstats
.v_page_count
= 0;
279 vmstats
.v_free_count
= 0;
280 for (i
= 0; phys_avail
[i
+ 1] && npages
> 0; i
+= 2) {
285 last_pa
= phys_avail
[i
+ 1];
286 while (pa
< last_pa
&& npages
-- > 0) {
295 * Scan comparison function for Red-Black tree scans. An inclusive
296 * (start,end) is expected. Other fields are not used.
299 rb_vm_page_scancmp(struct vm_page
*p
, void *data
)
301 struct rb_vm_page_scan_info
*info
= data
;
303 if (p
->pindex
< info
->start_pindex
)
305 if (p
->pindex
> info
->end_pindex
)
311 rb_vm_page_compare(struct vm_page
*p1
, struct vm_page
*p2
)
313 if (p1
->pindex
< p2
->pindex
)
315 if (p1
->pindex
> p2
->pindex
)
321 * The opposite of vm_page_hold(). A page can be freed while being held,
322 * which places it on the PQ_HOLD queue. We must call vm_page_free_toq()
323 * in this case to actually free it once the hold count drops to 0.
325 * This routine must be called at splvm().
328 vm_page_unhold(vm_page_t mem
)
331 KASSERT(mem
->hold_count
>= 0, ("vm_page_unhold: hold count < 0!!!"));
332 if (mem
->hold_count
== 0 && mem
->queue
== PQ_HOLD
) {
334 vm_page_free_toq(mem
);
339 * Inserts the given mem entry into the object and object list.
341 * The pagetables are not updated but will presumably fault the page
342 * in if necessary, or if a kernel page the caller will at some point
343 * enter the page into the kernel's pmap. We are not allowed to block
344 * here so we *can't* do this anyway.
346 * This routine may not block.
347 * This routine must be called with a critical section held.
350 vm_page_insert(vm_page_t m
, vm_object_t object
, vm_pindex_t pindex
)
352 ASSERT_IN_CRIT_SECTION();
353 if (m
->object
!= NULL
)
354 panic("vm_page_insert: already inserted");
357 * Record the object/offset pair in this page
363 * Insert it into the object.
365 vm_page_rb_tree_RB_INSERT(&object
->rb_memq
, m
);
366 object
->generation
++;
369 * show that the object has one more resident page.
371 object
->resident_page_count
++;
374 * Since we are inserting a new and possibly dirty page,
375 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
377 if (m
->flags
& PG_WRITEABLE
)
378 vm_object_set_writeable_dirty(object
);
382 * Removes the given vm_page_t from the global (object,index) hash table
383 * and from the object's memq.
385 * The underlying pmap entry (if any) is NOT removed here.
386 * This routine may not block.
388 * The page must be BUSY and will remain BUSY on return. No spl needs to be
389 * held on call to this routine.
391 * note: FreeBSD side effect was to unbusy the page on return. We leave
395 vm_page_remove(vm_page_t m
)
400 if (m
->object
== NULL
) {
405 if ((m
->flags
& PG_BUSY
) == 0)
406 panic("vm_page_remove: page not busy");
411 * Remove the page from the object and update the object.
413 vm_page_rb_tree_RB_REMOVE(&object
->rb_memq
, m
);
414 object
->resident_page_count
--;
415 object
->generation
++;
422 * Locate and return the page at (object, pindex), or NULL if the
423 * page could not be found.
425 * This routine will operate properly without spl protection, but
426 * the returned page could be in flux if it is busy. Because an
427 * interrupt can race a caller's busy check (unbusying and freeing the
428 * page we return before the caller is able to check the busy bit),
429 * the caller should generally call this routine with a critical
432 * Callers may call this routine without spl protection if they know
433 * 'for sure' that the page will not be ripped out from under them
437 vm_page_lookup(vm_object_t object
, vm_pindex_t pindex
)
442 * Search the hash table for this object/offset pair
445 m
= vm_page_rb_tree_RB_LOOKUP(&object
->rb_memq
, pindex
);
447 KKASSERT(m
== NULL
|| (m
->object
== object
&& m
->pindex
== pindex
));
454 * Move the given memory entry from its current object to the specified
455 * target object/offset.
457 * The object must be locked.
458 * This routine may not block.
460 * Note: This routine will raise itself to splvm(), the caller need not.
462 * Note: Swap associated with the page must be invalidated by the move. We
463 * have to do this for several reasons: (1) we aren't freeing the
464 * page, (2) we are dirtying the page, (3) the VM system is probably
465 * moving the page from object A to B, and will then later move
466 * the backing store from A to B and we can't have a conflict.
468 * Note: We *always* dirty the page. It is necessary both for the
469 * fact that we moved it, and because we may be invalidating
470 * swap. If the page is on the cache, we have to deactivate it
471 * or vm_page_dirty() will panic. Dirty pages are not allowed
475 vm_page_rename(vm_page_t m
, vm_object_t new_object
, vm_pindex_t new_pindex
)
479 vm_page_insert(m
, new_object
, new_pindex
);
480 if (m
->queue
- m
->pc
== PQ_CACHE
)
481 vm_page_deactivate(m
);
488 * vm_page_unqueue() without any wakeup. This routine is used when a page
489 * is being moved between queues or otherwise is to remain BUSYied by the
492 * This routine must be called at splhigh().
493 * This routine may not block.
496 vm_page_unqueue_nowakeup(vm_page_t m
)
498 int queue
= m
->queue
;
499 struct vpgqueues
*pq
;
501 if (queue
!= PQ_NONE
) {
502 pq
= &vm_page_queues
[queue
];
504 TAILQ_REMOVE(&pq
->pl
, m
, pageq
);
511 * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
514 * This routine must be called at splhigh().
515 * This routine may not block.
518 vm_page_unqueue(vm_page_t m
)
520 int queue
= m
->queue
;
521 struct vpgqueues
*pq
;
523 if (queue
!= PQ_NONE
) {
525 pq
= &vm_page_queues
[queue
];
526 TAILQ_REMOVE(&pq
->pl
, m
, pageq
);
529 if ((queue
- m
->pc
) == PQ_CACHE
) {
530 if (vm_paging_needed())
537 * vm_page_list_find()
539 * Find a page on the specified queue with color optimization.
541 * The page coloring optimization attempts to locate a page that does
542 * not overload other nearby pages in the object in the cpu's L1 or L2
543 * caches. We need this optimization because cpu caches tend to be
544 * physical caches, while object spaces tend to be virtual.
546 * This routine must be called at splvm().
547 * This routine may not block.
549 * Note that this routine is carefully inlined. A non-inlined version
550 * is available for outside callers but the only critical path is
551 * from within this source file.
555 _vm_page_list_find(int basequeue
, int index
, boolean_t prefer_zero
)
560 m
= TAILQ_LAST(&vm_page_queues
[basequeue
+index
].pl
, pglist
);
562 m
= TAILQ_FIRST(&vm_page_queues
[basequeue
+index
].pl
);
564 m
= _vm_page_list_find2(basequeue
, index
);
569 _vm_page_list_find2(int basequeue
, int index
)
573 struct vpgqueues
*pq
;
575 pq
= &vm_page_queues
[basequeue
];
578 * Note that for the first loop, index+i and index-i wind up at the
579 * same place. Even though this is not totally optimal, we've already
580 * blown it by missing the cache case so we do not care.
583 for(i
= PQ_L2_SIZE
/ 2; i
> 0; --i
) {
584 if ((m
= TAILQ_FIRST(&pq
[(index
+ i
) & PQ_L2_MASK
].pl
)) != NULL
)
587 if ((m
= TAILQ_FIRST(&pq
[(index
- i
) & PQ_L2_MASK
].pl
)) != NULL
)
594 vm_page_list_find(int basequeue
, int index
, boolean_t prefer_zero
)
596 return(_vm_page_list_find(basequeue
, index
, prefer_zero
));
600 * Find a page on the cache queue with color optimization. As pages
601 * might be found, but not applicable, they are deactivated. This
602 * keeps us from using potentially busy cached pages.
604 * This routine must be called with a critical section held.
605 * This routine may not block.
608 vm_page_select_cache(vm_object_t object
, vm_pindex_t pindex
)
613 m
= _vm_page_list_find(
615 (pindex
+ object
->pg_color
) & PQ_L2_MASK
,
618 if (m
&& ((m
->flags
& (PG_BUSY
|PG_UNMANAGED
)) || m
->busy
||
619 m
->hold_count
|| m
->wire_count
)) {
620 vm_page_deactivate(m
);
629 * Find a free or zero page, with specified preference. We attempt to
630 * inline the nominal case and fall back to _vm_page_select_free()
633 * This routine must be called with a critical section held.
634 * This routine may not block.
636 static __inline vm_page_t
637 vm_page_select_free(vm_object_t object
, vm_pindex_t pindex
, boolean_t prefer_zero
)
641 m
= _vm_page_list_find(
643 (pindex
+ object
->pg_color
) & PQ_L2_MASK
,
652 * Allocate and return a memory cell associated with this VM object/offset
657 * VM_ALLOC_NORMAL allow use of cache pages, nominal free drain
658 * VM_ALLOC_SYSTEM greater free drain
659 * VM_ALLOC_INTERRUPT allow free list to be completely drained
660 * VM_ALLOC_ZERO advisory request for pre-zero'd page
662 * The object must be locked.
663 * This routine may not block.
664 * The returned page will be marked PG_BUSY
666 * Additional special handling is required when called from an interrupt
667 * (VM_ALLOC_INTERRUPT). We are not allowed to mess with the page cache
671 vm_page_alloc(vm_object_t object
, vm_pindex_t pindex
, int page_req
)
675 KASSERT(!vm_page_lookup(object
, pindex
),
676 ("vm_page_alloc: page already allocated"));
678 (VM_ALLOC_NORMAL
|VM_ALLOC_INTERRUPT
|VM_ALLOC_SYSTEM
));
681 * The pager is allowed to eat deeper into the free page list.
683 if (curthread
== pagethread
)
684 page_req
|= VM_ALLOC_SYSTEM
;
688 if (vmstats
.v_free_count
> vmstats
.v_free_reserved
||
689 ((page_req
& VM_ALLOC_INTERRUPT
) && vmstats
.v_free_count
> 0) ||
690 ((page_req
& VM_ALLOC_SYSTEM
) && vmstats
.v_cache_count
== 0 &&
691 vmstats
.v_free_count
> vmstats
.v_interrupt_free_min
)
694 * The free queue has sufficient free pages to take one out.
696 if (page_req
& VM_ALLOC_ZERO
)
697 m
= vm_page_select_free(object
, pindex
, TRUE
);
699 m
= vm_page_select_free(object
, pindex
, FALSE
);
700 } else if (page_req
& VM_ALLOC_NORMAL
) {
702 * Allocatable from the cache (non-interrupt only). On
703 * success, we must free the page and try again, thus
704 * ensuring that vmstats.v_*_free_min counters are replenished.
707 if (curthread
->td_preempted
) {
708 kprintf("vm_page_alloc(): warning, attempt to allocate"
709 " cache page from preempting interrupt\n");
712 m
= vm_page_select_cache(object
, pindex
);
715 m
= vm_page_select_cache(object
, pindex
);
718 * On success move the page into the free queue and loop.
721 KASSERT(m
->dirty
== 0,
722 ("Found dirty cache page %p", m
));
724 vm_page_protect(m
, VM_PROT_NONE
);
730 * On failure return NULL
733 #if defined(DIAGNOSTIC)
734 if (vmstats
.v_cache_count
> 0)
735 kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats
.v_cache_count
);
737 vm_pageout_deficit
++;
742 * No pages available, wakeup the pageout daemon and give up.
745 vm_pageout_deficit
++;
751 * Good page found. The page has not yet been busied. We are in
752 * a critical section.
754 KASSERT(m
!= NULL
, ("vm_page_alloc(): missing page on free queue\n"));
755 KASSERT(m
->dirty
== 0,
756 ("vm_page_alloc: free/cache page %p was dirty", m
));
759 * Remove from free queue
761 vm_page_unqueue_nowakeup(m
);
764 * Initialize structure. Only the PG_ZERO flag is inherited. Set
767 if (m
->flags
& PG_ZERO
) {
768 vm_page_zero_count
--;
769 m
->flags
= PG_ZERO
| PG_BUSY
;
780 * vm_page_insert() is safe prior to the crit_exit(). Note also that
781 * inserting a page here does not insert it into the pmap (which
782 * could cause us to block allocating memory). We cannot block
785 vm_page_insert(m
, object
, pindex
);
788 * Don't wakeup too often - wakeup the pageout daemon when
789 * we would be nearly out of memory.
791 if (vm_paging_needed())
797 * A PG_BUSY page is returned.
803 * Block until free pages are available for allocation, called in various
804 * places before memory allocations.
810 if (curthread
== pagethread
) {
811 vm_pageout_pages_needed
= 1;
812 tsleep(&vm_pageout_pages_needed
, 0, "VMWait", 0);
814 if (!vm_pages_needed
) {
816 wakeup(&vm_pages_needed
);
818 tsleep(&vmstats
.v_free_count
, 0, "vmwait", 0);
824 * Block until free pages are available for allocation
826 * Called only in vm_fault so that processes page faulting can be
829 * Sleeps at a lower priority than vm_wait() so that vm_wait()ing
830 * processes will be able to grab memory first. Do not change
831 * this balance without careful testing first.
837 if (!vm_pages_needed
) {
839 wakeup(&vm_pages_needed
);
841 tsleep(&vmstats
.v_free_count
, 0, "pfault", 0);
846 * Put the specified page on the active list (if appropriate). Ensure
847 * that act_count is at least ACT_INIT but do not otherwise mess with it.
849 * The page queues must be locked.
850 * This routine may not block.
853 vm_page_activate(vm_page_t m
)
856 if (m
->queue
!= PQ_ACTIVE
) {
857 if ((m
->queue
- m
->pc
) == PQ_CACHE
)
858 mycpu
->gd_cnt
.v_reactivated
++;
862 if (m
->wire_count
== 0 && (m
->flags
& PG_UNMANAGED
) == 0) {
863 m
->queue
= PQ_ACTIVE
;
864 vm_page_queues
[PQ_ACTIVE
].lcnt
++;
865 TAILQ_INSERT_TAIL(&vm_page_queues
[PQ_ACTIVE
].pl
,
867 if (m
->act_count
< ACT_INIT
)
868 m
->act_count
= ACT_INIT
;
869 vmstats
.v_active_count
++;
872 if (m
->act_count
< ACT_INIT
)
873 m
->act_count
= ACT_INIT
;
879 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
880 * routine is called when a page has been added to the cache or free
883 * This routine may not block.
884 * This routine must be called at splvm()
887 vm_page_free_wakeup(void)
890 * if pageout daemon needs pages, then tell it that there are
893 if (vm_pageout_pages_needed
&&
894 vmstats
.v_cache_count
+ vmstats
.v_free_count
>=
895 vmstats
.v_pageout_free_min
897 wakeup(&vm_pageout_pages_needed
);
898 vm_pageout_pages_needed
= 0;
902 * wakeup processes that are waiting on memory if we hit a
903 * high water mark. And wakeup scheduler process if we have
904 * lots of memory. this process will swapin processes.
906 if (vm_pages_needed
&& !vm_page_count_min()) {
908 wakeup(&vmstats
.v_free_count
);
915 * Returns the given page to the PQ_FREE list, disassociating it with
918 * The vm_page must be PG_BUSY on entry. PG_BUSY will be released on
919 * return (the page will have been freed). No particular spl is required
922 * This routine may not block.
925 vm_page_free_toq(vm_page_t m
)
927 struct vpgqueues
*pq
;
930 mycpu
->gd_cnt
.v_tfree
++;
932 if (m
->busy
|| ((m
->queue
- m
->pc
) == PQ_FREE
)) {
934 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
935 (u_long
)m
->pindex
, m
->busy
, (m
->flags
& PG_BUSY
) ? 1 : 0,
937 if ((m
->queue
- m
->pc
) == PQ_FREE
)
938 panic("vm_page_free: freeing free page");
940 panic("vm_page_free: freeing busy page");
944 * unqueue, then remove page. Note that we cannot destroy
945 * the page here because we do not want to call the pager's
946 * callback routine until after we've put the page on the
947 * appropriate free queue.
949 vm_page_unqueue_nowakeup(m
);
953 * No further management of fictitious pages occurs beyond object
956 if ((m
->flags
& PG_FICTITIOUS
) != 0) {
965 if (m
->wire_count
!= 0) {
966 if (m
->wire_count
> 1) {
968 "vm_page_free: invalid wire count (%d), pindex: 0x%lx",
969 m
->wire_count
, (long)m
->pindex
);
971 panic("vm_page_free: freeing wired page");
975 * Clear the UNMANAGED flag when freeing an unmanaged page.
977 if (m
->flags
& PG_UNMANAGED
) {
978 m
->flags
&= ~PG_UNMANAGED
;
981 if (m
->hold_count
!= 0) {
982 m
->flags
&= ~PG_ZERO
;
985 m
->queue
= PQ_FREE
+ m
->pc
;
987 pq
= &vm_page_queues
[m
->queue
];
992 * Put zero'd pages on the end ( where we look for zero'd pages
993 * first ) and non-zerod pages at the head.
995 if (m
->flags
& PG_ZERO
) {
996 TAILQ_INSERT_TAIL(&pq
->pl
, m
, pageq
);
997 ++vm_page_zero_count
;
999 TAILQ_INSERT_HEAD(&pq
->pl
, m
, pageq
);
1002 vm_page_free_wakeup();
1007 * vm_page_unmanage()
1009 * Prevent PV management from being done on the page. The page is
1010 * removed from the paging queues as if it were wired, and as a
1011 * consequence of no longer being managed the pageout daemon will not
1012 * touch it (since there is no way to locate the pte mappings for the
1013 * page). madvise() calls that mess with the pmap will also no longer
1014 * operate on the page.
1016 * Beyond that the page is still reasonably 'normal'. Freeing the page
1017 * will clear the flag.
1019 * This routine is used by OBJT_PHYS objects - objects using unswappable
1020 * physical memory as backing store rather then swap-backed memory and
1021 * will eventually be extended to support 4MB unmanaged physical
1024 * Must be called with a critical section held.
1027 vm_page_unmanage(vm_page_t m
)
1029 ASSERT_IN_CRIT_SECTION();
1030 if ((m
->flags
& PG_UNMANAGED
) == 0) {
1031 if (m
->wire_count
== 0)
1034 vm_page_flag_set(m
, PG_UNMANAGED
);
1038 * Mark this page as wired down by yet another map, removing it from
1039 * paging queues as necessary.
1041 * The page queues must be locked.
1042 * This routine may not block.
1045 vm_page_wire(vm_page_t m
)
1048 * Only bump the wire statistics if the page is not already wired,
1049 * and only unqueue the page if it is on some queue (if it is unmanaged
1050 * it is already off the queues). Don't do anything with fictitious
1051 * pages because they are always wired.
1054 if ((m
->flags
& PG_FICTITIOUS
) == 0) {
1055 if (m
->wire_count
== 0) {
1056 if ((m
->flags
& PG_UNMANAGED
) == 0)
1058 vmstats
.v_wire_count
++;
1061 KASSERT(m
->wire_count
!= 0,
1062 ("vm_page_wire: wire_count overflow m=%p", m
));
1064 vm_page_flag_set(m
, PG_MAPPED
);
1069 * Release one wiring of this page, potentially enabling it to be paged again.
1071 * Many pages placed on the inactive queue should actually go
1072 * into the cache, but it is difficult to figure out which. What
1073 * we do instead, if the inactive target is well met, is to put
1074 * clean pages at the head of the inactive queue instead of the tail.
1075 * This will cause them to be moved to the cache more quickly and
1076 * if not actively re-referenced, freed more quickly. If we just
1077 * stick these pages at the end of the inactive queue, heavy filesystem
1078 * meta-data accesses can cause an unnecessary paging load on memory bound
1079 * processes. This optimization causes one-time-use metadata to be
1080 * reused more quickly.
1082 * BUT, if we are in a low-memory situation we have no choice but to
1083 * put clean pages on the cache queue.
1085 * A number of routines use vm_page_unwire() to guarantee that the page
1086 * will go into either the inactive or active queues, and will NEVER
1087 * be placed in the cache - for example, just after dirtying a page.
1088 * dirty pages in the cache are not allowed.
1090 * The page queues must be locked.
1091 * This routine may not block.
1094 vm_page_unwire(vm_page_t m
, int activate
)
1097 if (m
->flags
& PG_FICTITIOUS
) {
1099 } else if (m
->wire_count
<= 0) {
1100 panic("vm_page_unwire: invalid wire count: %d", m
->wire_count
);
1102 if (--m
->wire_count
== 0) {
1103 --vmstats
.v_wire_count
;
1104 if (m
->flags
& PG_UNMANAGED
) {
1106 } else if (activate
) {
1108 &vm_page_queues
[PQ_ACTIVE
].pl
, m
, pageq
);
1109 m
->queue
= PQ_ACTIVE
;
1110 vm_page_queues
[PQ_ACTIVE
].lcnt
++;
1111 vmstats
.v_active_count
++;
1113 vm_page_flag_clear(m
, PG_WINATCFLS
);
1115 &vm_page_queues
[PQ_INACTIVE
].pl
, m
, pageq
);
1116 m
->queue
= PQ_INACTIVE
;
1117 vm_page_queues
[PQ_INACTIVE
].lcnt
++;
1118 vmstats
.v_inactive_count
++;
1127 * Move the specified page to the inactive queue. If the page has
1128 * any associated swap, the swap is deallocated.
1130 * Normally athead is 0 resulting in LRU operation. athead is set
1131 * to 1 if we want this page to be 'as if it were placed in the cache',
1132 * except without unmapping it from the process address space.
1134 * This routine may not block.
1136 static __inline
void
1137 _vm_page_deactivate(vm_page_t m
, int athead
)
1140 * Ignore if already inactive.
1142 if (m
->queue
== PQ_INACTIVE
)
1145 if (m
->wire_count
== 0 && (m
->flags
& PG_UNMANAGED
) == 0) {
1146 if ((m
->queue
- m
->pc
) == PQ_CACHE
)
1147 mycpu
->gd_cnt
.v_reactivated
++;
1148 vm_page_flag_clear(m
, PG_WINATCFLS
);
1151 TAILQ_INSERT_HEAD(&vm_page_queues
[PQ_INACTIVE
].pl
, m
, pageq
);
1153 TAILQ_INSERT_TAIL(&vm_page_queues
[PQ_INACTIVE
].pl
, m
, pageq
);
1154 m
->queue
= PQ_INACTIVE
;
1155 vm_page_queues
[PQ_INACTIVE
].lcnt
++;
1156 vmstats
.v_inactive_count
++;
1161 vm_page_deactivate(vm_page_t m
)
1164 _vm_page_deactivate(m
, 0);
1169 * vm_page_try_to_cache:
1171 * Returns 0 on failure, 1 on success
1174 vm_page_try_to_cache(vm_page_t m
)
1177 if (m
->dirty
|| m
->hold_count
|| m
->busy
|| m
->wire_count
||
1178 (m
->flags
& (PG_BUSY
|PG_UNMANAGED
))) {
1182 vm_page_test_dirty(m
);
1193 * Attempt to free the page. If we cannot free it, we do nothing.
1194 * 1 is returned on success, 0 on failure.
1197 vm_page_try_to_free(vm_page_t m
)
1200 if (m
->dirty
|| m
->hold_count
|| m
->busy
|| m
->wire_count
||
1201 (m
->flags
& (PG_BUSY
|PG_UNMANAGED
))) {
1205 vm_page_test_dirty(m
);
1211 vm_page_protect(m
, VM_PROT_NONE
);
1220 * Put the specified page onto the page cache queue (if appropriate).
1222 * This routine may not block.
1225 vm_page_cache(vm_page_t m
)
1227 ASSERT_IN_CRIT_SECTION();
1229 if ((m
->flags
& (PG_BUSY
|PG_UNMANAGED
)) || m
->busy
||
1230 m
->wire_count
|| m
->hold_count
) {
1231 kprintf("vm_page_cache: attempting to cache busy/held page\n");
1234 if ((m
->queue
- m
->pc
) == PQ_CACHE
)
1238 * Remove all pmaps and indicate that the page is not
1239 * writeable or mapped.
1242 vm_page_protect(m
, VM_PROT_NONE
);
1243 if (m
->dirty
!= 0) {
1244 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1247 vm_page_unqueue_nowakeup(m
);
1248 m
->queue
= PQ_CACHE
+ m
->pc
;
1249 vm_page_queues
[m
->queue
].lcnt
++;
1250 TAILQ_INSERT_TAIL(&vm_page_queues
[m
->queue
].pl
, m
, pageq
);
1251 vmstats
.v_cache_count
++;
1252 vm_page_free_wakeup();
1256 * vm_page_dontneed()
1258 * Cache, deactivate, or do nothing as appropriate. This routine
1259 * is typically used by madvise() MADV_DONTNEED.
1261 * Generally speaking we want to move the page into the cache so
1262 * it gets reused quickly. However, this can result in a silly syndrome
1263 * due to the page recycling too quickly. Small objects will not be
1264 * fully cached. On the otherhand, if we move the page to the inactive
1265 * queue we wind up with a problem whereby very large objects
1266 * unnecessarily blow away our inactive and cache queues.
1268 * The solution is to move the pages based on a fixed weighting. We
1269 * either leave them alone, deactivate them, or move them to the cache,
1270 * where moving them to the cache has the highest weighting.
1271 * By forcing some pages into other queues we eventually force the
1272 * system to balance the queues, potentially recovering other unrelated
1273 * space from active. The idea is to not force this to happen too
1277 vm_page_dontneed(vm_page_t m
)
1279 static int dnweight
;
1286 * occassionally leave the page alone
1289 if ((dnw
& 0x01F0) == 0 ||
1290 m
->queue
== PQ_INACTIVE
||
1291 m
->queue
- m
->pc
== PQ_CACHE
1293 if (m
->act_count
>= ACT_INIT
)
1300 vm_page_test_dirty(m
);
1302 if (m
->dirty
|| (dnw
& 0x0070) == 0) {
1304 * Deactivate the page 3 times out of 32.
1309 * Cache the page 28 times out of every 32. Note that
1310 * the page is deactivated instead of cached, but placed
1311 * at the head of the queue instead of the tail.
1315 _vm_page_deactivate(m
, head
);
1320 * Grab a page, blocking if it is busy and allocating a page if necessary.
1321 * A busy page is returned or NULL.
1323 * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified.
1324 * If VM_ALLOC_RETRY is not specified
1326 * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
1327 * always returned if we had blocked.
1328 * This routine will never return NULL if VM_ALLOC_RETRY is set.
1329 * This routine may not be called from an interrupt.
1330 * The returned page may not be entirely valid.
1332 * This routine may be called from mainline code without spl protection and
1333 * be guarenteed a busied page associated with the object at the specified
1337 vm_page_grab(vm_object_t object
, vm_pindex_t pindex
, int allocflags
)
1342 KKASSERT(allocflags
&
1343 (VM_ALLOC_NORMAL
|VM_ALLOC_INTERRUPT
|VM_ALLOC_SYSTEM
));
1346 if ((m
= vm_page_lookup(object
, pindex
)) != NULL
) {
1347 if (m
->busy
|| (m
->flags
& PG_BUSY
)) {
1348 generation
= object
->generation
;
1350 while ((object
->generation
== generation
) &&
1351 (m
->busy
|| (m
->flags
& PG_BUSY
))) {
1352 vm_page_flag_set(m
, PG_WANTED
| PG_REFERENCED
);
1353 tsleep(m
, 0, "pgrbwt", 0);
1354 if ((allocflags
& VM_ALLOC_RETRY
) == 0) {
1365 m
= vm_page_alloc(object
, pindex
, allocflags
& ~VM_ALLOC_RETRY
);
1368 if ((allocflags
& VM_ALLOC_RETRY
) == 0)
1378 * Mapping function for valid bits or for dirty bits in
1379 * a page. May not block.
1381 * Inputs are required to range within a page.
1384 vm_page_bits(int base
, int size
)
1390 base
+ size
<= PAGE_SIZE
,
1391 ("vm_page_bits: illegal base/size %d/%d", base
, size
)
1394 if (size
== 0) /* handle degenerate case */
1397 first_bit
= base
>> DEV_BSHIFT
;
1398 last_bit
= (base
+ size
- 1) >> DEV_BSHIFT
;
1400 return ((2 << last_bit
) - (1 << first_bit
));
1404 * Sets portions of a page valid and clean. The arguments are expected
1405 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1406 * of any partial chunks touched by the range. The invalid portion of
1407 * such chunks will be zero'd.
1409 * This routine may not block.
1411 * (base + size) must be less then or equal to PAGE_SIZE.
1414 vm_page_set_validclean(vm_page_t m
, int base
, int size
)
1420 if (size
== 0) /* handle degenerate case */
1424 * If the base is not DEV_BSIZE aligned and the valid
1425 * bit is clear, we have to zero out a portion of the
1429 if ((frag
= base
& ~(DEV_BSIZE
- 1)) != base
&&
1430 (m
->valid
& (1 << (base
>> DEV_BSHIFT
))) == 0
1432 pmap_zero_page_area(
1440 * If the ending offset is not DEV_BSIZE aligned and the
1441 * valid bit is clear, we have to zero out a portion of
1445 endoff
= base
+ size
;
1447 if ((frag
= endoff
& ~(DEV_BSIZE
- 1)) != endoff
&&
1448 (m
->valid
& (1 << (endoff
>> DEV_BSHIFT
))) == 0
1450 pmap_zero_page_area(
1453 DEV_BSIZE
- (endoff
& (DEV_BSIZE
- 1))
1458 * Set valid, clear dirty bits. If validating the entire
1459 * page we can safely clear the pmap modify bit. We also
1460 * use this opportunity to clear the PG_NOSYNC flag. If a process
1461 * takes a write fault on a MAP_NOSYNC memory area the flag will
1464 * We set valid bits inclusive of any overlap, but we can only
1465 * clear dirty bits for DEV_BSIZE chunks that are fully within
1469 pagebits
= vm_page_bits(base
, size
);
1470 m
->valid
|= pagebits
;
1472 if ((frag
= base
& (DEV_BSIZE
- 1)) != 0) {
1473 frag
= DEV_BSIZE
- frag
;
1479 pagebits
= vm_page_bits(base
, size
& (DEV_BSIZE
- 1));
1481 m
->dirty
&= ~pagebits
;
1482 if (base
== 0 && size
== PAGE_SIZE
) {
1483 pmap_clear_modify(m
);
1484 vm_page_flag_clear(m
, PG_NOSYNC
);
1489 vm_page_clear_dirty(vm_page_t m
, int base
, int size
)
1491 m
->dirty
&= ~vm_page_bits(base
, size
);
1495 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1496 * valid and dirty bits for the effected areas are cleared.
1501 vm_page_set_invalid(vm_page_t m
, int base
, int size
)
1505 bits
= vm_page_bits(base
, size
);
1508 m
->object
->generation
++;
1512 * The kernel assumes that the invalid portions of a page contain
1513 * garbage, but such pages can be mapped into memory by user code.
1514 * When this occurs, we must zero out the non-valid portions of the
1515 * page so user code sees what it expects.
1517 * Pages are most often semi-valid when the end of a file is mapped
1518 * into memory and the file's size is not page aligned.
1521 vm_page_zero_invalid(vm_page_t m
, boolean_t setvalid
)
1527 * Scan the valid bits looking for invalid sections that
1528 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1529 * valid bit may be set ) have already been zerod by
1530 * vm_page_set_validclean().
1532 for (b
= i
= 0; i
<= PAGE_SIZE
/ DEV_BSIZE
; ++i
) {
1533 if (i
== (PAGE_SIZE
/ DEV_BSIZE
) ||
1534 (m
->valid
& (1 << i
))
1537 pmap_zero_page_area(
1540 (i
- b
) << DEV_BSHIFT
1548 * setvalid is TRUE when we can safely set the zero'd areas
1549 * as being valid. We can do this if there are no cache consistency
1550 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1553 m
->valid
= VM_PAGE_BITS_ALL
;
1557 * Is a (partial) page valid? Note that the case where size == 0
1558 * will return FALSE in the degenerate case where the page is entirely
1559 * invalid, and TRUE otherwise.
1564 vm_page_is_valid(vm_page_t m
, int base
, int size
)
1566 int bits
= vm_page_bits(base
, size
);
1568 if (m
->valid
&& ((m
->valid
& bits
) == bits
))
1575 * update dirty bits from pmap/mmu. May not block.
1578 vm_page_test_dirty(vm_page_t m
)
1580 if ((m
->dirty
!= VM_PAGE_BITS_ALL
) && pmap_is_modified(m
)) {
1585 #include "opt_ddb.h"
1587 #include <sys/kernel.h>
1589 #include <ddb/ddb.h>
1591 DB_SHOW_COMMAND(page
, vm_page_print_page_info
)
1593 db_printf("vmstats.v_free_count: %d\n", vmstats
.v_free_count
);
1594 db_printf("vmstats.v_cache_count: %d\n", vmstats
.v_cache_count
);
1595 db_printf("vmstats.v_inactive_count: %d\n", vmstats
.v_inactive_count
);
1596 db_printf("vmstats.v_active_count: %d\n", vmstats
.v_active_count
);
1597 db_printf("vmstats.v_wire_count: %d\n", vmstats
.v_wire_count
);
1598 db_printf("vmstats.v_free_reserved: %d\n", vmstats
.v_free_reserved
);
1599 db_printf("vmstats.v_free_min: %d\n", vmstats
.v_free_min
);
1600 db_printf("vmstats.v_free_target: %d\n", vmstats
.v_free_target
);
1601 db_printf("vmstats.v_cache_min: %d\n", vmstats
.v_cache_min
);
1602 db_printf("vmstats.v_inactive_target: %d\n", vmstats
.v_inactive_target
);
1605 DB_SHOW_COMMAND(pageq
, vm_page_print_pageq_info
)
1608 db_printf("PQ_FREE:");
1609 for(i
=0;i
<PQ_L2_SIZE
;i
++) {
1610 db_printf(" %d", vm_page_queues
[PQ_FREE
+ i
].lcnt
);
1614 db_printf("PQ_CACHE:");
1615 for(i
=0;i
<PQ_L2_SIZE
;i
++) {
1616 db_printf(" %d", vm_page_queues
[PQ_CACHE
+ i
].lcnt
);
1620 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1621 vm_page_queues
[PQ_ACTIVE
].lcnt
,
1622 vm_page_queues
[PQ_INACTIVE
].lcnt
);