2 * Copyright (c) 1991 Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
38 * $DragonFly: src/sys/vm/vm_page.c,v 1.32 2005/07/27 07:55:15 dillon Exp $
42 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43 * All rights reserved.
45 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47 * Permission to use, copy, modify and distribute this software and
48 * its documentation is hereby granted, provided that both the copyright
49 * notice and this permission notice appear in all copies of the
50 * software, derivative works or modified versions, and any portions
51 * thereof, and that both notices appear in supporting documentation.
53 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 * Carnegie Mellon requests users of this software to return to
59 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
60 * School of Computer Science
61 * Carnegie Mellon University
62 * Pittsburgh PA 15213-3890
64 * any improvements or extensions that they make and grant Carnegie the
65 * rights to redistribute these changes.
68 * Resident memory management module. The module manipulates 'VM pages'.
69 * A VM page is the core building block for memory management.
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
76 #include <sys/vmmeter.h>
77 #include <sys/vnode.h>
80 #include <vm/vm_param.h>
82 #include <vm/vm_kern.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_page2.h>
92 static void vm_page_queue_init(void);
93 static void vm_page_free_wakeup(void);
94 static vm_page_t
vm_page_select_cache(vm_object_t
, vm_pindex_t
);
95 static vm_page_t
_vm_page_list_find2(int basequeue
, int index
);
97 static int vm_page_bucket_count
; /* How big is array? */
98 static int vm_page_hash_mask
; /* Mask for hash function */
99 static struct vm_page
**vm_page_buckets
; /* Array of buckets */
100 static volatile int vm_page_bucket_generation
;
101 struct vpgqueues vm_page_queues
[PQ_COUNT
]; /* Array of tailq lists */
103 #define ASSERT_IN_CRIT_SECTION() KKASSERT(crit_test(curthread));
106 vm_page_queue_init(void)
110 for (i
= 0; i
< PQ_L2_SIZE
; i
++)
111 vm_page_queues
[PQ_FREE
+i
].cnt
= &vmstats
.v_free_count
;
112 for (i
= 0; i
< PQ_L2_SIZE
; i
++)
113 vm_page_queues
[PQ_CACHE
+i
].cnt
= &vmstats
.v_cache_count
;
115 vm_page_queues
[PQ_INACTIVE
].cnt
= &vmstats
.v_inactive_count
;
116 vm_page_queues
[PQ_ACTIVE
].cnt
= &vmstats
.v_active_count
;
117 vm_page_queues
[PQ_HOLD
].cnt
= &vmstats
.v_active_count
;
118 /* PQ_NONE has no queue */
120 for (i
= 0; i
< PQ_COUNT
; i
++)
121 TAILQ_INIT(&vm_page_queues
[i
].pl
);
125 * note: place in initialized data section? Is this necessary?
128 int vm_page_array_size
= 0;
129 int vm_page_zero_count
= 0;
130 vm_page_t vm_page_array
= 0;
135 * Sets the page size, perhaps based upon the memory size.
136 * Must be called before any use of page-size dependent functions.
139 vm_set_page_size(void)
141 if (vmstats
.v_page_size
== 0)
142 vmstats
.v_page_size
= PAGE_SIZE
;
143 if (((vmstats
.v_page_size
- 1) & vmstats
.v_page_size
) != 0)
144 panic("vm_set_page_size: page size not a power of two");
150 * Add a new page to the freelist for use by the system. New pages
151 * are added to both the head and tail of the associated free page
152 * queue in a bottom-up fashion, so both zero'd and non-zero'd page
153 * requests pull 'recent' adds (higher physical addresses) first.
155 * Must be called in a critical section.
158 vm_add_new_page(vm_paddr_t pa
)
160 struct vpgqueues
*vpq
;
163 ++vmstats
.v_page_count
;
164 ++vmstats
.v_free_count
;
165 m
= PHYS_TO_VM_PAGE(pa
);
168 m
->pc
= (pa
>> PAGE_SHIFT
) & PQ_L2_MASK
;
169 m
->queue
= m
->pc
+ PQ_FREE
;
170 KKASSERT(m
->dirty
== 0);
172 vpq
= &vm_page_queues
[m
->queue
];
174 TAILQ_INSERT_TAIL(&vpq
->pl
, m
, pageq
);
176 TAILQ_INSERT_HEAD(&vpq
->pl
, m
, pageq
);
177 vpq
->flipflop
= 1 - vpq
->flipflop
;
179 vm_page_queues
[m
->queue
].lcnt
++;
186 * Initializes the resident memory module.
188 * Allocates memory for the page cells, and for the object/offset-to-page
189 * hash table headers. Each page cell is initialized and placed on the
192 * starta/enda represents the range of physical memory addresses available
193 * for use (skipping memory already used by the kernel), subject to
194 * phys_avail[]. Note that phys_avail[] has already mapped out memory
195 * already in use by the kernel.
198 vm_page_startup(vm_offset_t vaddr
)
201 struct vm_page
**bucket
;
203 vm_paddr_t page_range
;
210 vm_paddr_t biggestone
, biggestsize
;
218 vaddr
= round_page(vaddr
);
220 for (i
= 0; phys_avail
[i
+ 1]; i
+= 2) {
221 phys_avail
[i
] = round_page(phys_avail
[i
]);
222 phys_avail
[i
+ 1] = trunc_page(phys_avail
[i
+ 1]);
225 for (i
= 0; phys_avail
[i
+ 1]; i
+= 2) {
226 vm_paddr_t size
= phys_avail
[i
+ 1] - phys_avail
[i
];
228 if (size
> biggestsize
) {
236 end
= phys_avail
[biggestone
+1];
239 * Initialize the queue headers for the free queue, the active queue
240 * and the inactive queue.
243 vm_page_queue_init();
246 * Allocate (and initialize) the hash table buckets.
248 * The number of buckets MUST BE a power of 2, and the actual value is
249 * the next power of 2 greater than the number of physical pages in
252 * We make the hash table approximately 2x the number of pages to
253 * reduce the chain length. This is about the same size using the
254 * singly-linked list as the 1x hash table we were using before
255 * using TAILQ but the chain length will be smaller.
257 * Note: This computation can be tweaked if desired.
259 vm_page_buckets
= (struct vm_page
**)vaddr
;
260 bucket
= vm_page_buckets
;
261 if (vm_page_bucket_count
== 0) {
262 vm_page_bucket_count
= 1;
263 while (vm_page_bucket_count
< atop(total
))
264 vm_page_bucket_count
<<= 1;
266 vm_page_bucket_count
<<= 1;
267 vm_page_hash_mask
= vm_page_bucket_count
- 1;
270 * Cut a chunk out of the largest block of physical memory,
271 * moving its end point down to accomodate the hash table and
274 new_end
= end
- vm_page_bucket_count
* sizeof(struct vm_page
*);
275 new_end
= trunc_page(new_end
);
276 mapped
= round_page(vaddr
);
277 vaddr
= pmap_map(mapped
, new_end
, end
,
278 VM_PROT_READ
| VM_PROT_WRITE
);
279 vaddr
= round_page(vaddr
);
280 bzero((caddr_t
) mapped
, vaddr
- mapped
);
282 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
288 * Compute the number of pages of memory that will be available for
289 * use (taking into account the overhead of a page structure per
292 first_page
= phys_avail
[0] / PAGE_SIZE
;
293 page_range
= phys_avail
[(nblocks
- 1) * 2 + 1] / PAGE_SIZE
- first_page
;
294 npages
= (total
- (page_range
* sizeof(struct vm_page
)) -
295 (end
- new_end
)) / PAGE_SIZE
;
300 * Initialize the mem entry structures now, and put them in the free
303 vm_page_array
= (vm_page_t
) vaddr
;
307 * Validate these addresses.
309 new_end
= trunc_page(end
- page_range
* sizeof(struct vm_page
));
310 mapped
= pmap_map(mapped
, new_end
, end
,
311 VM_PROT_READ
| VM_PROT_WRITE
);
314 * Clear all of the page structures
316 bzero((caddr_t
) vm_page_array
, page_range
* sizeof(struct vm_page
));
317 vm_page_array_size
= page_range
;
320 * Construct the free queue(s) in ascending order (by physical
321 * address) so that the first 16MB of physical memory is allocated
322 * last rather than first. On large-memory machines, this avoids
323 * the exhaustion of low physical memory before isa_dmainit has run.
325 vmstats
.v_page_count
= 0;
326 vmstats
.v_free_count
= 0;
327 for (i
= 0; phys_avail
[i
+ 1] && npages
> 0; i
+= 2) {
332 last_pa
= phys_avail
[i
+ 1];
333 while (pa
< last_pa
&& npages
-- > 0) {
342 * Distributes the object/offset key pair among hash buckets.
344 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
345 * This routine may not block.
347 * We try to randomize the hash based on the object to spread the pages
348 * out in the hash table without it costing us too much.
351 vm_page_hash(vm_object_t object
, vm_pindex_t pindex
)
353 int i
= ((uintptr_t)object
+ pindex
) ^ object
->hash_rand
;
355 return(i
& vm_page_hash_mask
);
359 * The opposite of vm_page_hold(). A page can be freed while being held,
360 * which places it on the PQ_HOLD queue. We must call vm_page_free_toq()
361 * in this case to actually free it once the hold count drops to 0.
363 * This routine must be called at splvm().
366 vm_page_unhold(vm_page_t mem
)
369 KASSERT(mem
->hold_count
>= 0, ("vm_page_unhold: hold count < 0!!!"));
370 if (mem
->hold_count
== 0 && mem
->queue
== PQ_HOLD
) {
372 vm_page_free_toq(mem
);
377 * Inserts the given mem entry into the object and object list.
379 * The pagetables are not updated but will presumably fault the page
380 * in if necessary, or if a kernel page the caller will at some point
381 * enter the page into the kernel's pmap. We are not allowed to block
382 * here so we *can't* do this anyway.
384 * This routine may not block.
385 * This routine must be called with a critical section held.
388 vm_page_insert(vm_page_t m
, vm_object_t object
, vm_pindex_t pindex
)
390 struct vm_page
**bucket
;
392 ASSERT_IN_CRIT_SECTION();
393 if (m
->object
!= NULL
)
394 panic("vm_page_insert: already inserted");
397 * Record the object/offset pair in this page
403 * Insert it into the object_object/offset hash table
405 bucket
= &vm_page_buckets
[vm_page_hash(object
, pindex
)];
408 vm_page_bucket_generation
++;
411 * Now link into the object's list of backed pages.
413 TAILQ_INSERT_TAIL(&object
->memq
, m
, listq
);
414 object
->generation
++;
417 * show that the object has one more resident page.
419 object
->resident_page_count
++;
422 * Since we are inserting a new and possibly dirty page,
423 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
425 if (m
->flags
& PG_WRITEABLE
)
426 vm_object_set_writeable_dirty(object
);
430 * Removes the given vm_page_t from the global (object,index) hash table
431 * and from the object's memq.
433 * The underlying pmap entry (if any) is NOT removed here.
434 * This routine may not block.
436 * The page must be BUSY and will remain BUSY on return. No spl needs to be
437 * held on call to this routine.
439 * note: FreeBSD side effect was to unbusy the page on return. We leave
443 vm_page_remove(vm_page_t m
)
446 struct vm_page
**bucket
;
449 if (m
->object
== NULL
) {
454 if ((m
->flags
& PG_BUSY
) == 0)
455 panic("vm_page_remove: page not busy");
460 * Remove from the object_object/offset hash table. The object
461 * must be on the hash queue, we will panic if it isn't
463 * Note: we must NULL-out m->hnext to prevent loops in detached
464 * buffers with vm_page_lookup().
466 bucket
= &vm_page_buckets
[vm_page_hash(m
->object
, m
->pindex
)];
467 while (*bucket
!= m
) {
469 panic("vm_page_remove(): page not found in hash");
470 bucket
= &(*bucket
)->hnext
;
474 vm_page_bucket_generation
++;
477 * Now remove from the object's list of backed pages.
479 TAILQ_REMOVE(&object
->memq
, m
, listq
);
482 * And show that the object has one fewer resident page.
484 object
->resident_page_count
--;
485 object
->generation
++;
492 * Locate and return the page at (object, pindex), or NULL if the
493 * page could not be found.
495 * This routine will operate properly without spl protection, but
496 * the returned page could be in flux if it is busy. Because an
497 * interrupt can race a caller's busy check (unbusying and freeing the
498 * page we return before the caller is able to check the busy bit),
499 * the caller should generally call this routine with a critical
502 * Callers may call this routine without spl protection if they know
503 * 'for sure' that the page will not be ripped out from under them
507 vm_page_lookup(vm_object_t object
, vm_pindex_t pindex
)
510 struct vm_page
**bucket
;
514 * Search the hash table for this object/offset pair
517 generation
= vm_page_bucket_generation
;
518 bucket
= &vm_page_buckets
[vm_page_hash(object
, pindex
)];
519 for (m
= *bucket
; m
!= NULL
; m
= m
->hnext
) {
520 if ((m
->object
== object
) && (m
->pindex
== pindex
)) {
521 if (vm_page_bucket_generation
!= generation
)
526 if (vm_page_bucket_generation
!= generation
)
534 * Move the given memory entry from its current object to the specified
535 * target object/offset.
537 * The object must be locked.
538 * This routine may not block.
540 * Note: This routine will raise itself to splvm(), the caller need not.
542 * Note: Swap associated with the page must be invalidated by the move. We
543 * have to do this for several reasons: (1) we aren't freeing the
544 * page, (2) we are dirtying the page, (3) the VM system is probably
545 * moving the page from object A to B, and will then later move
546 * the backing store from A to B and we can't have a conflict.
548 * Note: We *always* dirty the page. It is necessary both for the
549 * fact that we moved it, and because we may be invalidating
550 * swap. If the page is on the cache, we have to deactivate it
551 * or vm_page_dirty() will panic. Dirty pages are not allowed
555 vm_page_rename(vm_page_t m
, vm_object_t new_object
, vm_pindex_t new_pindex
)
559 vm_page_insert(m
, new_object
, new_pindex
);
560 if (m
->queue
- m
->pc
== PQ_CACHE
)
561 vm_page_deactivate(m
);
568 * vm_page_unqueue() without any wakeup. This routine is used when a page
569 * is being moved between queues or otherwise is to remain BUSYied by the
572 * This routine must be called at splhigh().
573 * This routine may not block.
576 vm_page_unqueue_nowakeup(vm_page_t m
)
578 int queue
= m
->queue
;
579 struct vpgqueues
*pq
;
581 if (queue
!= PQ_NONE
) {
582 pq
= &vm_page_queues
[queue
];
584 TAILQ_REMOVE(&pq
->pl
, m
, pageq
);
591 * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
594 * This routine must be called at splhigh().
595 * This routine may not block.
598 vm_page_unqueue(vm_page_t m
)
600 int queue
= m
->queue
;
601 struct vpgqueues
*pq
;
603 if (queue
!= PQ_NONE
) {
605 pq
= &vm_page_queues
[queue
];
606 TAILQ_REMOVE(&pq
->pl
, m
, pageq
);
609 if ((queue
- m
->pc
) == PQ_CACHE
) {
610 if (vm_paging_needed())
617 * vm_page_list_find()
619 * Find a page on the specified queue with color optimization.
621 * The page coloring optimization attempts to locate a page that does
622 * not overload other nearby pages in the object in the cpu's L1 or L2
623 * caches. We need this optimization because cpu caches tend to be
624 * physical caches, while object spaces tend to be virtual.
626 * This routine must be called at splvm().
627 * This routine may not block.
629 * Note that this routine is carefully inlined. A non-inlined version
630 * is available for outside callers but the only critical path is
631 * from within this source file.
635 _vm_page_list_find(int basequeue
, int index
, boolean_t prefer_zero
)
640 m
= TAILQ_LAST(&vm_page_queues
[basequeue
+index
].pl
, pglist
);
642 m
= TAILQ_FIRST(&vm_page_queues
[basequeue
+index
].pl
);
644 m
= _vm_page_list_find2(basequeue
, index
);
649 _vm_page_list_find2(int basequeue
, int index
)
653 struct vpgqueues
*pq
;
655 pq
= &vm_page_queues
[basequeue
];
658 * Note that for the first loop, index+i and index-i wind up at the
659 * same place. Even though this is not totally optimal, we've already
660 * blown it by missing the cache case so we do not care.
663 for(i
= PQ_L2_SIZE
/ 2; i
> 0; --i
) {
664 if ((m
= TAILQ_FIRST(&pq
[(index
+ i
) & PQ_L2_MASK
].pl
)) != NULL
)
667 if ((m
= TAILQ_FIRST(&pq
[(index
- i
) & PQ_L2_MASK
].pl
)) != NULL
)
674 vm_page_list_find(int basequeue
, int index
, boolean_t prefer_zero
)
676 return(_vm_page_list_find(basequeue
, index
, prefer_zero
));
680 * Find a page on the cache queue with color optimization. As pages
681 * might be found, but not applicable, they are deactivated. This
682 * keeps us from using potentially busy cached pages.
684 * This routine must be called with a critical section held.
685 * This routine may not block.
688 vm_page_select_cache(vm_object_t object
, vm_pindex_t pindex
)
693 m
= _vm_page_list_find(
695 (pindex
+ object
->pg_color
) & PQ_L2_MASK
,
698 if (m
&& ((m
->flags
& (PG_BUSY
|PG_UNMANAGED
)) || m
->busy
||
699 m
->hold_count
|| m
->wire_count
)) {
700 vm_page_deactivate(m
);
709 * Find a free or zero page, with specified preference. We attempt to
710 * inline the nominal case and fall back to _vm_page_select_free()
713 * This routine must be called with a critical section held.
714 * This routine may not block.
716 static __inline vm_page_t
717 vm_page_select_free(vm_object_t object
, vm_pindex_t pindex
, boolean_t prefer_zero
)
721 m
= _vm_page_list_find(
723 (pindex
+ object
->pg_color
) & PQ_L2_MASK
,
732 * Allocate and return a memory cell associated with this VM object/offset
737 * VM_ALLOC_NORMAL allow use of cache pages, nominal free drain
738 * VM_ALLOC_SYSTEM greater free drain
739 * VM_ALLOC_INTERRUPT allow free list to be completely drained
740 * VM_ALLOC_ZERO advisory request for pre-zero'd page
742 * The object must be locked.
743 * This routine may not block.
744 * The returned page will be marked PG_BUSY
746 * Additional special handling is required when called from an interrupt
747 * (VM_ALLOC_INTERRUPT). We are not allowed to mess with the page cache
751 vm_page_alloc(vm_object_t object
, vm_pindex_t pindex
, int page_req
)
755 KASSERT(!vm_page_lookup(object
, pindex
),
756 ("vm_page_alloc: page already allocated"));
758 (VM_ALLOC_NORMAL
|VM_ALLOC_INTERRUPT
|VM_ALLOC_SYSTEM
));
761 * The pager is allowed to eat deeper into the free page list.
763 if (curthread
== pagethread
)
764 page_req
|= VM_ALLOC_SYSTEM
;
768 if (vmstats
.v_free_count
> vmstats
.v_free_reserved
||
769 ((page_req
& VM_ALLOC_INTERRUPT
) && vmstats
.v_free_count
> 0) ||
770 ((page_req
& VM_ALLOC_SYSTEM
) && vmstats
.v_cache_count
== 0 &&
771 vmstats
.v_free_count
> vmstats
.v_interrupt_free_min
)
774 * The free queue has sufficient free pages to take one out.
776 if (page_req
& VM_ALLOC_ZERO
)
777 m
= vm_page_select_free(object
, pindex
, TRUE
);
779 m
= vm_page_select_free(object
, pindex
, FALSE
);
780 } else if (page_req
& VM_ALLOC_NORMAL
) {
782 * Allocatable from the cache (non-interrupt only). On
783 * success, we must free the page and try again, thus
784 * ensuring that vmstats.v_*_free_min counters are replenished.
787 if (curthread
->td_preempted
) {
788 printf("vm_page_alloc(): warning, attempt to allocate"
789 " cache page from preempting interrupt\n");
792 m
= vm_page_select_cache(object
, pindex
);
795 m
= vm_page_select_cache(object
, pindex
);
798 * On success move the page into the free queue and loop.
801 KASSERT(m
->dirty
== 0,
802 ("Found dirty cache page %p", m
));
804 vm_page_protect(m
, VM_PROT_NONE
);
810 * On failure return NULL
813 #if defined(DIAGNOSTIC)
814 if (vmstats
.v_cache_count
> 0)
815 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats
.v_cache_count
);
817 vm_pageout_deficit
++;
822 * No pages available, wakeup the pageout daemon and give up.
825 vm_pageout_deficit
++;
831 * Good page found. The page has not yet been busied. We are in
832 * a critical section.
834 KASSERT(m
!= NULL
, ("vm_page_alloc(): missing page on free queue\n"));
835 KASSERT(m
->dirty
== 0,
836 ("vm_page_alloc: free/cache page %p was dirty", m
));
839 * Remove from free queue
841 vm_page_unqueue_nowakeup(m
);
844 * Initialize structure. Only the PG_ZERO flag is inherited. Set
847 if (m
->flags
& PG_ZERO
) {
848 vm_page_zero_count
--;
849 m
->flags
= PG_ZERO
| PG_BUSY
;
860 * vm_page_insert() is safe prior to the crit_exit(). Note also that
861 * inserting a page here does not insert it into the pmap (which
862 * could cause us to block allocating memory). We cannot block
865 vm_page_insert(m
, object
, pindex
);
868 * Don't wakeup too often - wakeup the pageout daemon when
869 * we would be nearly out of memory.
871 if (vm_paging_needed())
877 * A PG_BUSY page is returned.
883 * Block until free pages are available for allocation, called in various
884 * places before memory allocations.
890 if (curthread
== pagethread
) {
891 vm_pageout_pages_needed
= 1;
892 tsleep(&vm_pageout_pages_needed
, 0, "VMWait", 0);
894 if (!vm_pages_needed
) {
896 wakeup(&vm_pages_needed
);
898 tsleep(&vmstats
.v_free_count
, 0, "vmwait", 0);
904 * Block until free pages are available for allocation
906 * Called only in vm_fault so that processes page faulting can be
909 * Sleeps at a lower priority than vm_wait() so that vm_wait()ing
910 * processes will be able to grab memory first. Do not change
911 * this balance without careful testing first.
917 if (!vm_pages_needed
) {
919 wakeup(&vm_pages_needed
);
921 tsleep(&vmstats
.v_free_count
, 0, "pfault", 0);
926 * Put the specified page on the active list (if appropriate). Ensure
927 * that act_count is at least ACT_INIT but do not otherwise mess with it.
929 * The page queues must be locked.
930 * This routine may not block.
933 vm_page_activate(vm_page_t m
)
936 if (m
->queue
!= PQ_ACTIVE
) {
937 if ((m
->queue
- m
->pc
) == PQ_CACHE
)
938 mycpu
->gd_cnt
.v_reactivated
++;
942 if (m
->wire_count
== 0 && (m
->flags
& PG_UNMANAGED
) == 0) {
943 m
->queue
= PQ_ACTIVE
;
944 vm_page_queues
[PQ_ACTIVE
].lcnt
++;
945 TAILQ_INSERT_TAIL(&vm_page_queues
[PQ_ACTIVE
].pl
,
947 if (m
->act_count
< ACT_INIT
)
948 m
->act_count
= ACT_INIT
;
949 vmstats
.v_active_count
++;
952 if (m
->act_count
< ACT_INIT
)
953 m
->act_count
= ACT_INIT
;
959 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
960 * routine is called when a page has been added to the cache or free
963 * This routine may not block.
964 * This routine must be called at splvm()
967 vm_page_free_wakeup(void)
970 * if pageout daemon needs pages, then tell it that there are
973 if (vm_pageout_pages_needed
&&
974 vmstats
.v_cache_count
+ vmstats
.v_free_count
>=
975 vmstats
.v_pageout_free_min
977 wakeup(&vm_pageout_pages_needed
);
978 vm_pageout_pages_needed
= 0;
982 * wakeup processes that are waiting on memory if we hit a
983 * high water mark. And wakeup scheduler process if we have
984 * lots of memory. this process will swapin processes.
986 if (vm_pages_needed
&& !vm_page_count_min()) {
988 wakeup(&vmstats
.v_free_count
);
995 * Returns the given page to the PQ_FREE list, disassociating it with
998 * The vm_page must be PG_BUSY on entry. PG_BUSY will be released on
999 * return (the page will have been freed). No particular spl is required
1002 * This routine may not block.
1005 vm_page_free_toq(vm_page_t m
)
1007 struct vpgqueues
*pq
;
1010 mycpu
->gd_cnt
.v_tfree
++;
1012 if (m
->busy
|| ((m
->queue
- m
->pc
) == PQ_FREE
)) {
1014 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1015 (u_long
)m
->pindex
, m
->busy
, (m
->flags
& PG_BUSY
) ? 1 : 0,
1017 if ((m
->queue
- m
->pc
) == PQ_FREE
)
1018 panic("vm_page_free: freeing free page");
1020 panic("vm_page_free: freeing busy page");
1024 * unqueue, then remove page. Note that we cannot destroy
1025 * the page here because we do not want to call the pager's
1026 * callback routine until after we've put the page on the
1027 * appropriate free queue.
1029 vm_page_unqueue_nowakeup(m
);
1033 * No further management of fictitious pages occurs beyond object
1034 * and queue removal.
1036 if ((m
->flags
& PG_FICTITIOUS
) != 0) {
1045 if (m
->wire_count
!= 0) {
1046 if (m
->wire_count
> 1) {
1048 "vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1049 m
->wire_count
, (long)m
->pindex
);
1051 panic("vm_page_free: freeing wired page");
1055 * Clear the UNMANAGED flag when freeing an unmanaged page.
1057 if (m
->flags
& PG_UNMANAGED
) {
1058 m
->flags
&= ~PG_UNMANAGED
;
1061 if (m
->hold_count
!= 0) {
1062 m
->flags
&= ~PG_ZERO
;
1065 m
->queue
= PQ_FREE
+ m
->pc
;
1067 pq
= &vm_page_queues
[m
->queue
];
1072 * Put zero'd pages on the end ( where we look for zero'd pages
1073 * first ) and non-zerod pages at the head.
1075 if (m
->flags
& PG_ZERO
) {
1076 TAILQ_INSERT_TAIL(&pq
->pl
, m
, pageq
);
1077 ++vm_page_zero_count
;
1079 TAILQ_INSERT_HEAD(&pq
->pl
, m
, pageq
);
1082 vm_page_free_wakeup();
1087 * vm_page_unmanage()
1089 * Prevent PV management from being done on the page. The page is
1090 * removed from the paging queues as if it were wired, and as a
1091 * consequence of no longer being managed the pageout daemon will not
1092 * touch it (since there is no way to locate the pte mappings for the
1093 * page). madvise() calls that mess with the pmap will also no longer
1094 * operate on the page.
1096 * Beyond that the page is still reasonably 'normal'. Freeing the page
1097 * will clear the flag.
1099 * This routine is used by OBJT_PHYS objects - objects using unswappable
1100 * physical memory as backing store rather then swap-backed memory and
1101 * will eventually be extended to support 4MB unmanaged physical
1104 * Must be called with a critical section held.
1107 vm_page_unmanage(vm_page_t m
)
1109 ASSERT_IN_CRIT_SECTION();
1110 if ((m
->flags
& PG_UNMANAGED
) == 0) {
1111 if (m
->wire_count
== 0)
1114 vm_page_flag_set(m
, PG_UNMANAGED
);
1118 * Mark this page as wired down by yet another map, removing it from
1119 * paging queues as necessary.
1121 * The page queues must be locked.
1122 * This routine may not block.
1125 vm_page_wire(vm_page_t m
)
1128 * Only bump the wire statistics if the page is not already wired,
1129 * and only unqueue the page if it is on some queue (if it is unmanaged
1130 * it is already off the queues). Don't do anything with fictitious
1131 * pages because they are always wired.
1134 if ((m
->flags
& PG_FICTITIOUS
) == 0) {
1135 if (m
->wire_count
== 0) {
1136 if ((m
->flags
& PG_UNMANAGED
) == 0)
1138 vmstats
.v_wire_count
++;
1141 KASSERT(m
->wire_count
!= 0,
1142 ("vm_page_wire: wire_count overflow m=%p", m
));
1144 vm_page_flag_set(m
, PG_MAPPED
);
1149 * Release one wiring of this page, potentially enabling it to be paged again.
1151 * Many pages placed on the inactive queue should actually go
1152 * into the cache, but it is difficult to figure out which. What
1153 * we do instead, if the inactive target is well met, is to put
1154 * clean pages at the head of the inactive queue instead of the tail.
1155 * This will cause them to be moved to the cache more quickly and
1156 * if not actively re-referenced, freed more quickly. If we just
1157 * stick these pages at the end of the inactive queue, heavy filesystem
1158 * meta-data accesses can cause an unnecessary paging load on memory bound
1159 * processes. This optimization causes one-time-use metadata to be
1160 * reused more quickly.
1162 * BUT, if we are in a low-memory situation we have no choice but to
1163 * put clean pages on the cache queue.
1165 * A number of routines use vm_page_unwire() to guarantee that the page
1166 * will go into either the inactive or active queues, and will NEVER
1167 * be placed in the cache - for example, just after dirtying a page.
1168 * dirty pages in the cache are not allowed.
1170 * The page queues must be locked.
1171 * This routine may not block.
1174 vm_page_unwire(vm_page_t m
, int activate
)
1177 if (m
->flags
& PG_FICTITIOUS
) {
1179 } else if (m
->wire_count
<= 0) {
1180 panic("vm_page_unwire: invalid wire count: %d", m
->wire_count
);
1182 if (--m
->wire_count
== 0) {
1183 --vmstats
.v_wire_count
;
1184 if (m
->flags
& PG_UNMANAGED
) {
1186 } else if (activate
) {
1188 &vm_page_queues
[PQ_ACTIVE
].pl
, m
, pageq
);
1189 m
->queue
= PQ_ACTIVE
;
1190 vm_page_queues
[PQ_ACTIVE
].lcnt
++;
1191 vmstats
.v_active_count
++;
1193 vm_page_flag_clear(m
, PG_WINATCFLS
);
1195 &vm_page_queues
[PQ_INACTIVE
].pl
, m
, pageq
);
1196 m
->queue
= PQ_INACTIVE
;
1197 vm_page_queues
[PQ_INACTIVE
].lcnt
++;
1198 vmstats
.v_inactive_count
++;
1207 * Move the specified page to the inactive queue. If the page has
1208 * any associated swap, the swap is deallocated.
1210 * Normally athead is 0 resulting in LRU operation. athead is set
1211 * to 1 if we want this page to be 'as if it were placed in the cache',
1212 * except without unmapping it from the process address space.
1214 * This routine may not block.
1216 static __inline
void
1217 _vm_page_deactivate(vm_page_t m
, int athead
)
1220 * Ignore if already inactive.
1222 if (m
->queue
== PQ_INACTIVE
)
1225 if (m
->wire_count
== 0 && (m
->flags
& PG_UNMANAGED
) == 0) {
1226 if ((m
->queue
- m
->pc
) == PQ_CACHE
)
1227 mycpu
->gd_cnt
.v_reactivated
++;
1228 vm_page_flag_clear(m
, PG_WINATCFLS
);
1231 TAILQ_INSERT_HEAD(&vm_page_queues
[PQ_INACTIVE
].pl
, m
, pageq
);
1233 TAILQ_INSERT_TAIL(&vm_page_queues
[PQ_INACTIVE
].pl
, m
, pageq
);
1234 m
->queue
= PQ_INACTIVE
;
1235 vm_page_queues
[PQ_INACTIVE
].lcnt
++;
1236 vmstats
.v_inactive_count
++;
1241 vm_page_deactivate(vm_page_t m
)
1244 _vm_page_deactivate(m
, 0);
1249 * vm_page_try_to_cache:
1251 * Returns 0 on failure, 1 on success
1254 vm_page_try_to_cache(vm_page_t m
)
1257 if (m
->dirty
|| m
->hold_count
|| m
->busy
|| m
->wire_count
||
1258 (m
->flags
& (PG_BUSY
|PG_UNMANAGED
))) {
1262 vm_page_test_dirty(m
);
1273 * Attempt to free the page. If we cannot free it, we do nothing.
1274 * 1 is returned on success, 0 on failure.
1277 vm_page_try_to_free(vm_page_t m
)
1280 if (m
->dirty
|| m
->hold_count
|| m
->busy
|| m
->wire_count
||
1281 (m
->flags
& (PG_BUSY
|PG_UNMANAGED
))) {
1285 vm_page_test_dirty(m
);
1291 vm_page_protect(m
, VM_PROT_NONE
);
1300 * Put the specified page onto the page cache queue (if appropriate).
1302 * This routine may not block.
1305 vm_page_cache(vm_page_t m
)
1307 ASSERT_IN_CRIT_SECTION();
1309 if ((m
->flags
& (PG_BUSY
|PG_UNMANAGED
)) || m
->busy
||
1310 m
->wire_count
|| m
->hold_count
) {
1311 printf("vm_page_cache: attempting to cache busy/held page\n");
1314 if ((m
->queue
- m
->pc
) == PQ_CACHE
)
1318 * Remove all pmaps and indicate that the page is not
1319 * writeable or mapped.
1322 vm_page_protect(m
, VM_PROT_NONE
);
1323 if (m
->dirty
!= 0) {
1324 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1327 vm_page_unqueue_nowakeup(m
);
1328 m
->queue
= PQ_CACHE
+ m
->pc
;
1329 vm_page_queues
[m
->queue
].lcnt
++;
1330 TAILQ_INSERT_TAIL(&vm_page_queues
[m
->queue
].pl
, m
, pageq
);
1331 vmstats
.v_cache_count
++;
1332 vm_page_free_wakeup();
1336 * vm_page_dontneed()
1338 * Cache, deactivate, or do nothing as appropriate. This routine
1339 * is typically used by madvise() MADV_DONTNEED.
1341 * Generally speaking we want to move the page into the cache so
1342 * it gets reused quickly. However, this can result in a silly syndrome
1343 * due to the page recycling too quickly. Small objects will not be
1344 * fully cached. On the otherhand, if we move the page to the inactive
1345 * queue we wind up with a problem whereby very large objects
1346 * unnecessarily blow away our inactive and cache queues.
1348 * The solution is to move the pages based on a fixed weighting. We
1349 * either leave them alone, deactivate them, or move them to the cache,
1350 * where moving them to the cache has the highest weighting.
1351 * By forcing some pages into other queues we eventually force the
1352 * system to balance the queues, potentially recovering other unrelated
1353 * space from active. The idea is to not force this to happen too
1357 vm_page_dontneed(vm_page_t m
)
1359 static int dnweight
;
1366 * occassionally leave the page alone
1369 if ((dnw
& 0x01F0) == 0 ||
1370 m
->queue
== PQ_INACTIVE
||
1371 m
->queue
- m
->pc
== PQ_CACHE
1373 if (m
->act_count
>= ACT_INIT
)
1380 vm_page_test_dirty(m
);
1382 if (m
->dirty
|| (dnw
& 0x0070) == 0) {
1384 * Deactivate the page 3 times out of 32.
1389 * Cache the page 28 times out of every 32. Note that
1390 * the page is deactivated instead of cached, but placed
1391 * at the head of the queue instead of the tail.
1395 _vm_page_deactivate(m
, head
);
1400 * Grab a page, blocking if it is busy and allocating a page if necessary.
1401 * A busy page is returned or NULL.
1403 * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified.
1404 * If VM_ALLOC_RETRY is not specified
1406 * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
1407 * always returned if we had blocked.
1408 * This routine will never return NULL if VM_ALLOC_RETRY is set.
1409 * This routine may not be called from an interrupt.
1410 * The returned page may not be entirely valid.
1412 * This routine may be called from mainline code without spl protection and
1413 * be guarenteed a busied page associated with the object at the specified
1417 vm_page_grab(vm_object_t object
, vm_pindex_t pindex
, int allocflags
)
1422 KKASSERT(allocflags
&
1423 (VM_ALLOC_NORMAL
|VM_ALLOC_INTERRUPT
|VM_ALLOC_SYSTEM
));
1426 if ((m
= vm_page_lookup(object
, pindex
)) != NULL
) {
1427 if (m
->busy
|| (m
->flags
& PG_BUSY
)) {
1428 generation
= object
->generation
;
1430 while ((object
->generation
== generation
) &&
1431 (m
->busy
|| (m
->flags
& PG_BUSY
))) {
1432 vm_page_flag_set(m
, PG_WANTED
| PG_REFERENCED
);
1433 tsleep(m
, 0, "pgrbwt", 0);
1434 if ((allocflags
& VM_ALLOC_RETRY
) == 0) {
1445 m
= vm_page_alloc(object
, pindex
, allocflags
& ~VM_ALLOC_RETRY
);
1448 if ((allocflags
& VM_ALLOC_RETRY
) == 0)
1458 * Mapping function for valid bits or for dirty bits in
1459 * a page. May not block.
1461 * Inputs are required to range within a page.
1464 vm_page_bits(int base
, int size
)
1470 base
+ size
<= PAGE_SIZE
,
1471 ("vm_page_bits: illegal base/size %d/%d", base
, size
)
1474 if (size
== 0) /* handle degenerate case */
1477 first_bit
= base
>> DEV_BSHIFT
;
1478 last_bit
= (base
+ size
- 1) >> DEV_BSHIFT
;
1480 return ((2 << last_bit
) - (1 << first_bit
));
1484 * Sets portions of a page valid and clean. The arguments are expected
1485 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1486 * of any partial chunks touched by the range. The invalid portion of
1487 * such chunks will be zero'd.
1489 * This routine may not block.
1491 * (base + size) must be less then or equal to PAGE_SIZE.
1494 vm_page_set_validclean(vm_page_t m
, int base
, int size
)
1500 if (size
== 0) /* handle degenerate case */
1504 * If the base is not DEV_BSIZE aligned and the valid
1505 * bit is clear, we have to zero out a portion of the
1509 if ((frag
= base
& ~(DEV_BSIZE
- 1)) != base
&&
1510 (m
->valid
& (1 << (base
>> DEV_BSHIFT
))) == 0
1512 pmap_zero_page_area(
1520 * If the ending offset is not DEV_BSIZE aligned and the
1521 * valid bit is clear, we have to zero out a portion of
1525 endoff
= base
+ size
;
1527 if ((frag
= endoff
& ~(DEV_BSIZE
- 1)) != endoff
&&
1528 (m
->valid
& (1 << (endoff
>> DEV_BSHIFT
))) == 0
1530 pmap_zero_page_area(
1533 DEV_BSIZE
- (endoff
& (DEV_BSIZE
- 1))
1538 * Set valid, clear dirty bits. If validating the entire
1539 * page we can safely clear the pmap modify bit. We also
1540 * use this opportunity to clear the PG_NOSYNC flag. If a process
1541 * takes a write fault on a MAP_NOSYNC memory area the flag will
1544 * We set valid bits inclusive of any overlap, but we can only
1545 * clear dirty bits for DEV_BSIZE chunks that are fully within
1549 pagebits
= vm_page_bits(base
, size
);
1550 m
->valid
|= pagebits
;
1552 if ((frag
= base
& (DEV_BSIZE
- 1)) != 0) {
1553 frag
= DEV_BSIZE
- frag
;
1559 pagebits
= vm_page_bits(base
, size
& (DEV_BSIZE
- 1));
1561 m
->dirty
&= ~pagebits
;
1562 if (base
== 0 && size
== PAGE_SIZE
) {
1563 pmap_clear_modify(m
);
1564 vm_page_flag_clear(m
, PG_NOSYNC
);
1569 vm_page_clear_dirty(vm_page_t m
, int base
, int size
)
1571 m
->dirty
&= ~vm_page_bits(base
, size
);
1575 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1576 * valid and dirty bits for the effected areas are cleared.
1581 vm_page_set_invalid(vm_page_t m
, int base
, int size
)
1585 bits
= vm_page_bits(base
, size
);
1588 m
->object
->generation
++;
1592 * The kernel assumes that the invalid portions of a page contain
1593 * garbage, but such pages can be mapped into memory by user code.
1594 * When this occurs, we must zero out the non-valid portions of the
1595 * page so user code sees what it expects.
1597 * Pages are most often semi-valid when the end of a file is mapped
1598 * into memory and the file's size is not page aligned.
1601 vm_page_zero_invalid(vm_page_t m
, boolean_t setvalid
)
1607 * Scan the valid bits looking for invalid sections that
1608 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1609 * valid bit may be set ) have already been zerod by
1610 * vm_page_set_validclean().
1612 for (b
= i
= 0; i
<= PAGE_SIZE
/ DEV_BSIZE
; ++i
) {
1613 if (i
== (PAGE_SIZE
/ DEV_BSIZE
) ||
1614 (m
->valid
& (1 << i
))
1617 pmap_zero_page_area(
1620 (i
- b
) << DEV_BSHIFT
1628 * setvalid is TRUE when we can safely set the zero'd areas
1629 * as being valid. We can do this if there are no cache consistency
1630 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1633 m
->valid
= VM_PAGE_BITS_ALL
;
1637 * Is a (partial) page valid? Note that the case where size == 0
1638 * will return FALSE in the degenerate case where the page is entirely
1639 * invalid, and TRUE otherwise.
1644 vm_page_is_valid(vm_page_t m
, int base
, int size
)
1646 int bits
= vm_page_bits(base
, size
);
1648 if (m
->valid
&& ((m
->valid
& bits
) == bits
))
1655 * update dirty bits from pmap/mmu. May not block.
1658 vm_page_test_dirty(vm_page_t m
)
1660 if ((m
->dirty
!= VM_PAGE_BITS_ALL
) && pmap_is_modified(m
)) {
1665 #include "opt_ddb.h"
1667 #include <sys/kernel.h>
1669 #include <ddb/ddb.h>
1671 DB_SHOW_COMMAND(page
, vm_page_print_page_info
)
1673 db_printf("vmstats.v_free_count: %d\n", vmstats
.v_free_count
);
1674 db_printf("vmstats.v_cache_count: %d\n", vmstats
.v_cache_count
);
1675 db_printf("vmstats.v_inactive_count: %d\n", vmstats
.v_inactive_count
);
1676 db_printf("vmstats.v_active_count: %d\n", vmstats
.v_active_count
);
1677 db_printf("vmstats.v_wire_count: %d\n", vmstats
.v_wire_count
);
1678 db_printf("vmstats.v_free_reserved: %d\n", vmstats
.v_free_reserved
);
1679 db_printf("vmstats.v_free_min: %d\n", vmstats
.v_free_min
);
1680 db_printf("vmstats.v_free_target: %d\n", vmstats
.v_free_target
);
1681 db_printf("vmstats.v_cache_min: %d\n", vmstats
.v_cache_min
);
1682 db_printf("vmstats.v_inactive_target: %d\n", vmstats
.v_inactive_target
);
1685 DB_SHOW_COMMAND(pageq
, vm_page_print_pageq_info
)
1688 db_printf("PQ_FREE:");
1689 for(i
=0;i
<PQ_L2_SIZE
;i
++) {
1690 db_printf(" %d", vm_page_queues
[PQ_FREE
+ i
].lcnt
);
1694 db_printf("PQ_CACHE:");
1695 for(i
=0;i
<PQ_L2_SIZE
;i
++) {
1696 db_printf(" %d", vm_page_queues
[PQ_CACHE
+ i
].lcnt
);
1700 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1701 vm_page_queues
[PQ_ACTIVE
].lcnt
,
1702 vm_page_queues
[PQ_INACTIVE
].lcnt
);