2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94
44 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45 * All rights reserved.
47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59 * Carnegie Mellon requests users of this software to return to
61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
69 * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
70 * $DragonFly: src/sys/vm/vm_fault.c,v 1.34 2007/01/01 22:51:18 corecode Exp $
74 * Page fault handling module.
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
81 #include <sys/vnode.h>
82 #include <sys/resourcevar.h>
83 #include <sys/vmmeter.h>
84 #include <sys/vkernel.h>
85 #include <sys/sfbuf.h>
89 #include <vm/vm_param.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/vm_extern.h>
100 #include <sys/thread2.h>
101 #include <vm/vm_page2.h>
103 #define VM_FAULT_READ_AHEAD 8
104 #define VM_FAULT_READ_BEHIND 7
105 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
113 vm_object_t first_object
;
114 vm_prot_t first_prot
;
116 vm_map_entry_t entry
;
117 int lookup_still_valid
;
126 static int vm_fault_object(struct faultstate
*, vm_pindex_t
, vm_prot_t
);
127 static int vm_fault_vpagetable(struct faultstate
*, vm_pindex_t
*, vpte_t
);
128 static int vm_fault_additional_pages (vm_page_t
, int, int, vm_page_t
*, int *);
129 static int vm_fault_ratelimit(struct vmspace
*);
132 release_page(struct faultstate
*fs
)
134 vm_page_wakeup(fs
->m
);
135 vm_page_deactivate(fs
->m
);
140 unlock_map(struct faultstate
*fs
)
142 if (fs
->lookup_still_valid
) {
143 vm_map_lookup_done(fs
->map
, fs
->entry
, 0);
144 fs
->lookup_still_valid
= FALSE
;
149 * Clean up after a successful call to vm_fault_object() so another call
150 * to vm_fault_object() can be made.
153 _cleanup_successful_fault(struct faultstate
*fs
, int relock
)
155 if (fs
->object
!= fs
->first_object
) {
156 vm_page_free(fs
->first_m
);
157 vm_object_pip_wakeup(fs
->object
);
160 fs
->object
= fs
->first_object
;
161 if (relock
&& fs
->lookup_still_valid
== FALSE
) {
162 vm_map_lock_read(fs
->map
);
163 fs
->lookup_still_valid
= TRUE
;
168 _unlock_things(struct faultstate
*fs
, int dealloc
)
170 vm_object_pip_wakeup(fs
->first_object
);
171 _cleanup_successful_fault(fs
, 0);
173 vm_object_deallocate(fs
->first_object
);
176 if (fs
->vp
!= NULL
) {
182 #define unlock_things(fs) _unlock_things(fs, 0)
183 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
184 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
189 * Determine if the pager for the current object *might* contain the page.
191 * We only need to try the pager if this is not a default object (default
192 * objects are zero-fill and have no real pager), and if we are not taking
193 * a wiring fault or if the FS entry is wired.
195 #define TRYPAGER(fs) \
196 (fs->object->type != OBJT_DEFAULT && \
197 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
202 * Handle a page fault occuring at the given address, requiring the given
203 * permissions, in the map specified. If successful, the page is inserted
204 * into the associated physical map.
206 * NOTE: The given address should be truncated to the proper page address.
208 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
209 * a standard error specifying why the fault is fatal is returned.
211 * The map in question must be referenced, and remains so.
212 * The caller may hold no locks.
215 vm_fault(vm_map_t map
, vm_offset_t vaddr
, vm_prot_t fault_type
, int fault_flags
)
218 vm_pindex_t first_pindex
;
219 struct faultstate fs
;
221 mycpu
->gd_cnt
.v_vm_faults
++;
225 fs
.fault_flags
= fault_flags
;
229 * Find the vm_map_entry representing the backing store and resolve
230 * the top level object and page index. This may have the side
231 * effect of executing a copy-on-write on the map entry and/or
232 * creating a shadow object, but will not COW any actual VM pages.
234 * On success fs.map is left read-locked and various other fields
235 * are initialized but not otherwise referenced or locked.
237 * NOTE! vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
238 * if the map entry is a virtual page table and also writable,
239 * so we can set the 'A'accessed bit in the virtual page table entry.
242 result
= vm_map_lookup(&fs
.map
, vaddr
, fault_type
,
243 &fs
.entry
, &fs
.first_object
,
244 &first_pindex
, &fs
.first_prot
, &fs
.wired
);
247 * If the lookup failed or the map protections are incompatible,
248 * the fault generally fails. However, if the caller is trying
249 * to do a user wiring we have more work to do.
251 if (result
!= KERN_SUCCESS
) {
252 if (result
!= KERN_PROTECTION_FAILURE
)
254 if ((fs
.fault_flags
& VM_FAULT_WIRE_MASK
) != VM_FAULT_USER_WIRE
)
258 * If we are user-wiring a r/w segment, and it is COW, then
259 * we need to do the COW operation. Note that we don't
260 * currently COW RO sections now, because it is NOT desirable
261 * to COW .text. We simply keep .text from ever being COW'ed
262 * and take the heat that one cannot debug wired .text sections.
264 result
= vm_map_lookup(&fs
.map
, vaddr
,
265 VM_PROT_READ
|VM_PROT_WRITE
|
266 VM_PROT_OVERRIDE_WRITE
,
267 &fs
.entry
, &fs
.first_object
,
268 &first_pindex
, &fs
.first_prot
,
270 if (result
!= KERN_SUCCESS
)
274 * If we don't COW now, on a user wire, the user will never
275 * be able to write to the mapping. If we don't make this
276 * restriction, the bookkeeping would be nearly impossible.
278 if ((fs
.entry
->protection
& VM_PROT_WRITE
) == 0)
279 fs
.entry
->max_protection
&= ~VM_PROT_WRITE
;
283 * fs.map is read-locked
285 * Misc checks. Save the map generation number to detect races.
287 fs
.map_generation
= fs
.map
->timestamp
;
289 if (fs
.entry
->eflags
& MAP_ENTRY_NOFAULT
) {
290 panic("vm_fault: fault on nofault entry, addr: %lx",
295 * A system map entry may return a NULL object. No object means
296 * no pager means an unrecoverable kernel fault.
298 if (fs
.first_object
== NULL
) {
299 panic("vm_fault: unrecoverable fault at %p in entry %p",
300 (void *)vaddr
, fs
.entry
);
304 * Make a reference to this object to prevent its disposal while we
305 * are messing with it. Once we have the reference, the map is free
306 * to be diddled. Since objects reference their shadows (and copies),
307 * they will stay around as well.
309 * Bump the paging-in-progress count to prevent size changes (e.g.
310 * truncation operations) during I/O. This must be done after
311 * obtaining the vnode lock in order to avoid possible deadlocks.
313 vm_object_reference(fs
.first_object
);
314 fs
.vp
= vnode_pager_lock(fs
.first_object
);
315 vm_object_pip_add(fs
.first_object
, 1);
317 fs
.lookup_still_valid
= TRUE
;
319 fs
.object
= fs
.first_object
; /* so unlock_and_deallocate works */
322 * If the entry is wired we cannot change the page protection.
325 fault_type
= fs
.first_prot
;
328 * The page we want is at (first_object, first_pindex), but if the
329 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
330 * page table to figure out the actual pindex.
332 * NOTE! DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
335 if (fs
.entry
->maptype
== VM_MAPTYPE_VPAGETABLE
) {
336 result
= vm_fault_vpagetable(&fs
, &first_pindex
,
337 fs
.entry
->aux
.master_pde
);
338 if (result
== KERN_TRY_AGAIN
)
340 if (result
!= KERN_SUCCESS
)
345 * Now we have the actual (object, pindex), fault in the page. If
346 * vm_fault_object() fails it will unlock and deallocate the FS
347 * data. If it succeeds everything remains locked and fs->object
348 * will have an additinal PIP count if it is not equal to
351 result
= vm_fault_object(&fs
, first_pindex
, fault_type
);
353 if (result
== KERN_TRY_AGAIN
)
355 if (result
!= KERN_SUCCESS
)
359 * On success vm_fault_object() does not unlock or deallocate, and fs.m
360 * will contain a busied page.
362 * Enter the page into the pmap and do pmap-related adjustments.
365 pmap_enter(fs
.map
->pmap
, vaddr
, fs
.m
, fs
.prot
, fs
.wired
);
367 if (((fs
.fault_flags
& VM_FAULT_WIRE_MASK
) == 0) && (fs
.wired
== 0)) {
368 pmap_prefault(fs
.map
->pmap
, vaddr
, fs
.entry
);
371 vm_page_flag_clear(fs
.m
, PG_ZERO
);
372 vm_page_flag_set(fs
.m
, PG_MAPPED
|PG_REFERENCED
);
373 if (fs
.fault_flags
& VM_FAULT_HOLD
)
377 * If the page is not wired down, then put it where the pageout daemon
380 if (fs
.fault_flags
& VM_FAULT_WIRE_MASK
) {
384 vm_page_unwire(fs
.m
, 1);
386 vm_page_activate(fs
.m
);
389 if (curthread
->td_lwp
) {
391 curthread
->td_lwp
->lwp_ru
.ru_majflt
++;
393 curthread
->td_lwp
->lwp_ru
.ru_minflt
++;
398 * Unlock everything, and return
400 vm_page_wakeup(fs
.m
);
401 vm_object_deallocate(fs
.first_object
);
403 return (KERN_SUCCESS
);
407 * Translate the virtual page number (first_pindex) that is relative
408 * to the address space into a logical page number that is relative to the
409 * backing object. Use the virtual page table pointed to by (vpte).
411 * This implements an N-level page table. Any level can terminate the
412 * scan by setting VPTE_PS. A linear mapping is accomplished by setting
413 * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
417 vm_fault_vpagetable(struct faultstate
*fs
, vm_pindex_t
*pindex
, vpte_t vpte
)
420 int vshift
= 32 - PAGE_SHIFT
; /* page index bits remaining */
421 int result
= KERN_SUCCESS
;
424 if ((vpte
& VPTE_V
) == 0) {
425 unlock_and_deallocate(fs
);
426 return (KERN_FAILURE
);
428 if ((vpte
& VPTE_PS
) || vshift
== 0)
430 KKASSERT(vshift
>= VPTE_PAGE_BITS
);
433 * Get the page table page
435 result
= vm_fault_object(fs
, vpte
>> PAGE_SHIFT
, VM_PROT_READ
);
436 if (result
!= KERN_SUCCESS
)
440 * Process the returned fs.m and look up the page table
441 * entry in the page table page.
443 vshift
-= VPTE_PAGE_BITS
;
444 sf
= sf_buf_alloc(fs
->m
, SFB_CPUPRIVATE
);
445 vpte
= *((vpte_t
*)sf_buf_kva(sf
) +
446 ((*pindex
>> vshift
) & VPTE_PAGE_MASK
));
448 vm_page_flag_set(fs
->m
, PG_REFERENCED
);
449 vm_page_activate(fs
->m
);
450 vm_page_wakeup(fs
->m
);
451 cleanup_successful_fault(fs
);
454 * Combine remaining address bits with the vpte.
456 *pindex
= (vpte
>> PAGE_SHIFT
) +
457 (*pindex
& ((1 << vshift
) - 1));
458 return (KERN_SUCCESS
);
463 * Do all operations required to fault-in (fs.first_object, pindex). Run
464 * through the shadow chain as necessary and do required COW or virtual
465 * copy operations. The caller has already fully resolved the vm_map_entry
466 * and, if appropriate, has created a copy-on-write layer. All we need to
467 * do is iterate the object chain.
469 * On failure (fs) is unlocked and deallocated and the caller may return or
470 * retry depending on the failure code. On success (fs) is NOT unlocked or
471 * deallocated, fs.m will contained a resolved, busied page, and fs.object
472 * will have an additional PIP count if it is not equal to fs.first_object.
476 vm_fault_object(struct faultstate
*fs
,
477 vm_pindex_t first_pindex
, vm_prot_t fault_type
)
479 vm_object_t next_object
;
480 vm_page_t marray
[VM_FAULT_READ
];
484 fs
->prot
= fs
->first_prot
;
485 fs
->object
= fs
->first_object
;
486 pindex
= first_pindex
;
490 * If the object is dead, we stop here
492 if (fs
->object
->flags
& OBJ_DEAD
) {
493 unlock_and_deallocate(fs
);
494 return (KERN_PROTECTION_FAILURE
);
498 * See if page is resident. spl protection is required
499 * to avoid an interrupt unbusy/free race against our
500 * lookup. We must hold the protection through a page
501 * allocation or busy.
504 fs
->m
= vm_page_lookup(fs
->object
, pindex
);
508 * Wait/Retry if the page is busy. We have to do this
509 * if the page is busy via either PG_BUSY or
510 * vm_page_t->busy because the vm_pager may be using
511 * vm_page_t->busy for pageouts ( and even pageins if
512 * it is the vnode pager ), and we could end up trying
513 * to pagein and pageout the same page simultaneously.
515 * We can theoretically allow the busy case on a read
516 * fault if the page is marked valid, but since such
517 * pages are typically already pmap'd, putting that
518 * special case in might be more effort then it is
519 * worth. We cannot under any circumstances mess
520 * around with a vm_page_t->busy page except, perhaps,
523 if ((fs
->m
->flags
& PG_BUSY
) || fs
->m
->busy
) {
525 vm_page_sleep_busy(fs
->m
, TRUE
, "vmpfw");
526 mycpu
->gd_cnt
.v_intrans
++;
527 vm_object_deallocate(fs
->first_object
);
529 return (KERN_TRY_AGAIN
);
533 * If reactivating a page from PQ_CACHE we may have
536 queue
= fs
->m
->queue
;
537 vm_page_unqueue_nowakeup(fs
->m
);
539 if ((queue
- fs
->m
->pc
) == PQ_CACHE
&&
540 vm_page_count_severe()) {
541 vm_page_activate(fs
->m
);
542 unlock_and_deallocate(fs
);
545 return (KERN_TRY_AGAIN
);
549 * Mark page busy for other processes, and the
550 * pagedaemon. If it still isn't completely valid
551 * (readable), jump to readrest, else we found the
552 * page and can return.
554 * We can release the spl once we have marked the
560 if (((fs
->m
->valid
& VM_PAGE_BITS_ALL
) != VM_PAGE_BITS_ALL
) &&
561 fs
->m
->object
!= &kernel_object
) {
564 break; /* break to PAGE HAS BEEN FOUND */
568 * Page is not resident, If this is the search termination
569 * or the pager might contain the page, allocate a new page.
571 * NOTE: We are still in a critical section.
573 if (TRYPAGER(fs
) || fs
->object
== fs
->first_object
) {
575 * If the page is beyond the object size we fail
577 if (pindex
>= fs
->object
->size
) {
579 unlock_and_deallocate(fs
);
580 return (KERN_PROTECTION_FAILURE
);
586 if (fs
->didlimit
== 0 && curproc
!= NULL
) {
589 limticks
= vm_fault_ratelimit(curproc
->p_vmspace
);
592 unlock_and_deallocate(fs
);
593 tsleep(curproc
, 0, "vmrate", limticks
);
595 return (KERN_TRY_AGAIN
);
600 * Allocate a new page for this object/offset pair.
603 if (!vm_page_count_severe()) {
604 fs
->m
= vm_page_alloc(fs
->object
, pindex
,
605 (fs
->vp
|| fs
->object
->backing_object
) ? VM_ALLOC_NORMAL
: VM_ALLOC_NORMAL
| VM_ALLOC_ZERO
);
609 unlock_and_deallocate(fs
);
611 return (KERN_TRY_AGAIN
);
618 * We have found a valid page or we have allocated a new page.
619 * The page thus may not be valid or may not be entirely
622 * Attempt to fault-in the page if there is a chance that the
623 * pager has it, and potentially fault in additional pages
626 * We are NOT in splvm here and if TRYPAGER is true then
627 * fs.m will be non-NULL and will be PG_BUSY for us.
634 u_char behavior
= vm_map_entry_behavior(fs
->entry
);
636 if (behavior
== MAP_ENTRY_BEHAV_RANDOM
) {
641 if (behind
> VM_FAULT_READ_BEHIND
)
642 behind
= VM_FAULT_READ_BEHIND
;
644 ahead
= fs
->object
->size
- pindex
;
647 if (ahead
> VM_FAULT_READ_AHEAD
)
648 ahead
= VM_FAULT_READ_AHEAD
;
651 if ((fs
->first_object
->type
!= OBJT_DEVICE
) &&
652 (behavior
== MAP_ENTRY_BEHAV_SEQUENTIAL
||
653 (behavior
!= MAP_ENTRY_BEHAV_RANDOM
&&
654 pindex
>= fs
->entry
->lastr
&&
655 pindex
< fs
->entry
->lastr
+ VM_FAULT_READ
))
657 vm_pindex_t firstpindex
, tmppindex
;
659 if (first_pindex
< 2 * VM_FAULT_READ
)
662 firstpindex
= first_pindex
- 2 * VM_FAULT_READ
;
665 * note: partially valid pages cannot be
666 * included in the lookahead - NFS piecemeal
667 * writes will barf on it badly.
669 * spl protection is required to avoid races
670 * between the lookup and an interrupt
671 * unbusy/free sequence occuring prior to
675 for (tmppindex
= first_pindex
- 1;
676 tmppindex
>= firstpindex
;
681 mt
= vm_page_lookup(fs
->first_object
, tmppindex
);
682 if (mt
== NULL
|| (mt
->valid
!= VM_PAGE_BITS_ALL
))
685 (mt
->flags
& (PG_BUSY
| PG_FICTITIOUS
| PG_UNMANAGED
)) ||
690 vm_page_test_dirty(mt
);
692 vm_page_protect(mt
, VM_PROT_NONE
);
693 vm_page_deactivate(mt
);
705 * now we find out if any other pages should be paged
706 * in at this time this routine checks to see if the
707 * pages surrounding this fault reside in the same
708 * object as the page for this fault. If they do,
709 * then they are faulted in also into the object. The
710 * array "marray" returned contains an array of
711 * vm_page_t structs where one of them is the
712 * vm_page_t passed to the routine. The reqpage
713 * return value is the index into the marray for the
714 * vm_page_t passed to the routine.
716 * fs.m plus the additional pages are PG_BUSY'd.
718 faultcount
= vm_fault_additional_pages(
719 fs
->m
, behind
, ahead
, marray
, &reqpage
);
722 * update lastr imperfectly (we do not know how much
723 * getpages will actually read), but good enough.
725 fs
->entry
->lastr
= pindex
+ faultcount
- behind
;
728 * Call the pager to retrieve the data, if any, after
729 * releasing the lock on the map. We hold a ref on
730 * fs.object and the pages are PG_BUSY'd.
735 rv
= vm_pager_get_pages(fs
->object
, marray
,
736 faultcount
, reqpage
);
741 if (rv
== VM_PAGER_OK
) {
743 * Found the page. Leave it busy while we play
748 * Relookup in case pager changed page. Pager
749 * is responsible for disposition of old page
752 * XXX other code segments do relookups too.
753 * It's a bad abstraction that needs to be
756 fs
->m
= vm_page_lookup(fs
->object
, pindex
);
758 unlock_and_deallocate(fs
);
759 return (KERN_TRY_AGAIN
);
763 break; /* break to PAGE HAS BEEN FOUND */
767 * Remove the bogus page (which does not exist at this
768 * object/offset); before doing so, we must get back
769 * our object lock to preserve our invariant.
771 * Also wake up any other process that may want to bring
774 * If this is the top-level object, we must leave the
775 * busy page to prevent another process from rushing
776 * past us, and inserting the page in that object at
777 * the same time that we are.
779 if (rv
== VM_PAGER_ERROR
) {
781 kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc
->p_pid
, curproc
->p_comm
);
783 kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread
, curproc
->p_comm
);
786 * Data outside the range of the pager or an I/O error
789 * XXX - the check for kernel_map is a kludge to work
790 * around having the machine panic on a kernel space
791 * fault w/ I/O error.
793 if (((fs
->map
!= &kernel_map
) && (rv
== VM_PAGER_ERROR
)) ||
794 (rv
== VM_PAGER_BAD
)) {
797 unlock_and_deallocate(fs
);
798 if (rv
== VM_PAGER_ERROR
)
799 return (KERN_FAILURE
);
801 return (KERN_PROTECTION_FAILURE
);
804 if (fs
->object
!= fs
->first_object
) {
808 * XXX - we cannot just fall out at this
809 * point, m has been freed and is invalid!
815 * We get here if the object has a default pager (or unwiring)
816 * or the pager doesn't have the page.
818 if (fs
->object
== fs
->first_object
)
822 * Move on to the next object. Lock the next object before
823 * unlocking the current one.
825 pindex
+= OFF_TO_IDX(fs
->object
->backing_object_offset
);
826 next_object
= fs
->object
->backing_object
;
827 if (next_object
== NULL
) {
829 * If there's no object left, fill the page in the top
832 if (fs
->object
!= fs
->first_object
) {
833 vm_object_pip_wakeup(fs
->object
);
835 fs
->object
= fs
->first_object
;
836 pindex
= first_pindex
;
842 * Zero the page if necessary and mark it valid.
844 if ((fs
->m
->flags
& PG_ZERO
) == 0) {
845 vm_page_zero_fill(fs
->m
);
847 mycpu
->gd_cnt
.v_ozfod
++;
849 mycpu
->gd_cnt
.v_zfod
++;
850 fs
->m
->valid
= VM_PAGE_BITS_ALL
;
851 break; /* break to PAGE HAS BEEN FOUND */
853 if (fs
->object
!= fs
->first_object
) {
854 vm_object_pip_wakeup(fs
->object
);
856 KASSERT(fs
->object
!= next_object
, ("object loop %p", next_object
));
857 fs
->object
= next_object
;
858 vm_object_pip_add(fs
->object
, 1);
862 KASSERT((fs
->m
->flags
& PG_BUSY
) != 0,
863 ("vm_fault: not busy after main loop"));
866 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
871 * If the page is being written, but isn't already owned by the
872 * top-level object, we have to copy it into a new page owned by the
875 if (fs
->object
!= fs
->first_object
) {
877 * We only really need to copy if we want to write it.
879 if (fault_type
& VM_PROT_WRITE
) {
881 * This allows pages to be virtually copied from a
882 * backing_object into the first_object, where the
883 * backing object has no other refs to it, and cannot
884 * gain any more refs. Instead of a bcopy, we just
885 * move the page from the backing object to the
886 * first object. Note that we must mark the page
887 * dirty in the first object so that it will go out
888 * to swap when needed.
890 if (fs
->map_generation
== fs
->map
->timestamp
&&
892 * Only one shadow object
894 (fs
->object
->shadow_count
== 1) &&
896 * No COW refs, except us
898 (fs
->object
->ref_count
== 1) &&
900 * No one else can look this object up
902 (fs
->object
->handle
== NULL
) &&
904 * No other ways to look the object up
906 ((fs
->object
->type
== OBJT_DEFAULT
) ||
907 (fs
->object
->type
== OBJT_SWAP
)) &&
909 * We don't chase down the shadow chain
911 (fs
->object
== fs
->first_object
->backing_object
) &&
914 * grab the lock if we need to
916 (fs
->lookup_still_valid
||
917 lockmgr(&fs
->map
->lock
, LK_EXCLUSIVE
|LK_NOWAIT
) == 0)
920 fs
->lookup_still_valid
= 1;
922 * get rid of the unnecessary page
924 vm_page_protect(fs
->first_m
, VM_PROT_NONE
);
925 vm_page_free(fs
->first_m
);
929 * grab the page and put it into the
930 * process'es object. The page is
931 * automatically made dirty.
933 vm_page_rename(fs
->m
, fs
->first_object
, first_pindex
);
935 vm_page_busy(fs
->first_m
);
937 mycpu
->gd_cnt
.v_cow_optim
++;
940 * Oh, well, lets copy it.
942 vm_page_copy(fs
->m
, fs
->first_m
);
947 * We no longer need the old page or object.
953 * fs->object != fs->first_object due to above
956 vm_object_pip_wakeup(fs
->object
);
959 * Only use the new page below...
962 mycpu
->gd_cnt
.v_cow_faults
++;
964 fs
->object
= fs
->first_object
;
965 pindex
= first_pindex
;
968 * If it wasn't a write fault avoid having to copy
969 * the page by mapping it read-only.
971 fs
->prot
&= ~VM_PROT_WRITE
;
976 * We may have had to unlock a map to do I/O. If we did then
977 * lookup_still_valid will be FALSE. If the map generation count
978 * also changed then all sorts of things could have happened while
979 * we were doing the I/O and we need to retry.
982 if (!fs
->lookup_still_valid
&&
983 (fs
->map
->timestamp
!= fs
->map_generation
)) {
985 unlock_and_deallocate(fs
);
986 return (KERN_TRY_AGAIN
);
990 * Put this page into the physical map. We had to do the unlock above
991 * because pmap_enter may cause other faults. We don't put the page
992 * back on the active queue until later so that the page-out daemon
993 * won't find us (yet).
995 if (fs
->prot
& VM_PROT_WRITE
) {
996 vm_page_flag_set(fs
->m
, PG_WRITEABLE
);
997 vm_object_set_writeable_dirty(fs
->m
->object
);
1000 * If the fault is a write, we know that this page is being
1001 * written NOW so dirty it explicitly to save on
1002 * pmap_is_modified() calls later.
1004 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1005 * if the page is already dirty to prevent data written with
1006 * the expectation of being synced from not being synced.
1007 * Likewise if this entry does not request NOSYNC then make
1008 * sure the page isn't marked NOSYNC. Applications sharing
1009 * data should use the same flags to avoid ping ponging.
1011 * Also tell the backing pager, if any, that it should remove
1012 * any swap backing since the page is now dirty.
1014 if (fs
->entry
->eflags
& MAP_ENTRY_NOSYNC
) {
1015 if (fs
->m
->dirty
== 0)
1016 vm_page_flag_set(fs
->m
, PG_NOSYNC
);
1018 vm_page_flag_clear(fs
->m
, PG_NOSYNC
);
1020 if (fs
->fault_flags
& VM_FAULT_DIRTY
) {
1022 vm_page_dirty(fs
->m
);
1023 vm_pager_page_unswapped(fs
->m
);
1029 * Page had better still be busy. We are still locked up and
1030 * fs->object will have another PIP reference if it is not equal
1031 * to fs->first_object.
1033 KASSERT(fs
->m
->flags
& PG_BUSY
,
1034 ("vm_fault: page %p not busy!", fs
->m
));
1037 * Sanity check: page must be completely valid or it is not fit to
1038 * map into user space. vm_pager_get_pages() ensures this.
1040 if (fs
->m
->valid
!= VM_PAGE_BITS_ALL
) {
1041 vm_page_zero_invalid(fs
->m
, TRUE
);
1042 kprintf("Warning: page %p partially invalid on fault\n", fs
->m
);
1045 return (KERN_SUCCESS
);
1049 * quick version of vm_fault
1052 vm_fault_quick(caddr_t v
, int prot
)
1056 if (prot
& VM_PROT_WRITE
)
1057 r
= subyte(v
, fubyte(v
));
1064 * Wire down a range of virtual addresses in a map. The entry in question
1065 * should be marked in-transition and the map must be locked. We must
1066 * release the map temporarily while faulting-in the page to avoid a
1067 * deadlock. Note that the entry may be clipped while we are blocked but
1068 * will never be freed.
1071 vm_fault_wire(vm_map_t map
, vm_map_entry_t entry
, boolean_t user_wire
)
1073 boolean_t fictitious
;
1081 pmap
= vm_map_pmap(map
);
1082 start
= entry
->start
;
1084 fictitious
= entry
->object
.vm_object
&&
1085 (entry
->object
.vm_object
->type
== OBJT_DEVICE
);
1091 * We simulate a fault to get the page and enter it in the physical
1094 for (va
= start
; va
< end
; va
+= PAGE_SIZE
) {
1096 rv
= vm_fault(map
, va
, VM_PROT_READ
,
1097 VM_FAULT_USER_WIRE
);
1099 rv
= vm_fault(map
, va
, VM_PROT_READ
|VM_PROT_WRITE
,
1100 VM_FAULT_CHANGE_WIRING
);
1103 while (va
> start
) {
1105 if ((pa
= pmap_extract(pmap
, va
)) == 0)
1107 pmap_change_wiring(pmap
, va
, FALSE
);
1109 vm_page_unwire(PHYS_TO_VM_PAGE(pa
), 1);
1116 return (KERN_SUCCESS
);
1120 * Unwire a range of virtual addresses in a map. The map should be
1124 vm_fault_unwire(vm_map_t map
, vm_map_entry_t entry
)
1126 boolean_t fictitious
;
1133 pmap
= vm_map_pmap(map
);
1134 start
= entry
->start
;
1136 fictitious
= entry
->object
.vm_object
&&
1137 (entry
->object
.vm_object
->type
== OBJT_DEVICE
);
1140 * Since the pages are wired down, we must be able to get their
1141 * mappings from the physical map system.
1143 for (va
= start
; va
< end
; va
+= PAGE_SIZE
) {
1144 pa
= pmap_extract(pmap
, va
);
1146 pmap_change_wiring(pmap
, va
, FALSE
);
1148 vm_page_unwire(PHYS_TO_VM_PAGE(pa
), 1);
1154 * Reduce the rate at which memory is allocated to a process based
1155 * on the perceived load on the VM system. As the load increases
1156 * the allocation burst rate goes down and the delay increases.
1158 * Rate limiting does not apply when faulting active or inactive
1159 * pages. When faulting 'cache' pages, rate limiting only applies
1160 * if the system currently has a severe page deficit.
1162 * XXX vm_pagesupply should be increased when a page is freed.
1164 * We sleep up to 1/10 of a second.
1167 vm_fault_ratelimit(struct vmspace
*vmspace
)
1169 if (vm_load_enable
== 0)
1171 if (vmspace
->vm_pagesupply
> 0) {
1172 --vmspace
->vm_pagesupply
;
1176 if (vm_load_debug
) {
1177 kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1179 (1000 - vm_load
) / 10, vm_load
* hz
/ 10000,
1180 curproc
->p_pid
, curproc
->p_comm
);
1183 vmspace
->vm_pagesupply
= (1000 - vm_load
) / 10;
1184 return(vm_load
* hz
/ 10000);
1189 * vm_fault_copy_entry
1191 * Copy all of the pages from a wired-down map entry to another.
1193 * In/out conditions:
1194 * The source and destination maps must be locked for write.
1195 * The source map entry must be wired down (or be a sharing map
1196 * entry corresponding to a main map entry that is wired down).
1200 vm_fault_copy_entry(vm_map_t dst_map
, vm_map_t src_map
,
1201 vm_map_entry_t dst_entry
, vm_map_entry_t src_entry
)
1203 vm_object_t dst_object
;
1204 vm_object_t src_object
;
1205 vm_ooffset_t dst_offset
;
1206 vm_ooffset_t src_offset
;
1216 src_object
= src_entry
->object
.vm_object
;
1217 src_offset
= src_entry
->offset
;
1220 * Create the top-level object for the destination entry. (Doesn't
1221 * actually shadow anything - we copy the pages directly.)
1223 vm_map_entry_allocate_object(dst_entry
);
1224 dst_object
= dst_entry
->object
.vm_object
;
1226 prot
= dst_entry
->max_protection
;
1229 * Loop through all of the pages in the entry's range, copying each
1230 * one from the source object (it should be there) to the destination
1233 for (vaddr
= dst_entry
->start
, dst_offset
= 0;
1234 vaddr
< dst_entry
->end
;
1235 vaddr
+= PAGE_SIZE
, dst_offset
+= PAGE_SIZE
) {
1238 * Allocate a page in the destination object
1241 dst_m
= vm_page_alloc(dst_object
,
1242 OFF_TO_IDX(dst_offset
), VM_ALLOC_NORMAL
);
1243 if (dst_m
== NULL
) {
1246 } while (dst_m
== NULL
);
1249 * Find the page in the source object, and copy it in.
1250 * (Because the source is wired down, the page will be in
1253 src_m
= vm_page_lookup(src_object
,
1254 OFF_TO_IDX(dst_offset
+ src_offset
));
1256 panic("vm_fault_copy_wired: page missing");
1258 vm_page_copy(src_m
, dst_m
);
1261 * Enter it in the pmap...
1264 vm_page_flag_clear(dst_m
, PG_ZERO
);
1265 pmap_enter(dst_map
->pmap
, vaddr
, dst_m
, prot
, FALSE
);
1266 vm_page_flag_set(dst_m
, PG_WRITEABLE
|PG_MAPPED
);
1269 * Mark it no longer busy, and put it on the active list.
1271 vm_page_activate(dst_m
);
1272 vm_page_wakeup(dst_m
);
1278 * This routine checks around the requested page for other pages that
1279 * might be able to be faulted in. This routine brackets the viable
1280 * pages for the pages to be paged in.
1283 * m, rbehind, rahead
1286 * marray (array of vm_page_t), reqpage (index of requested page)
1289 * number of pages in marray
1292 vm_fault_additional_pages(vm_page_t m
, int rbehind
, int rahead
,
1293 vm_page_t
*marray
, int *reqpage
)
1297 vm_pindex_t pindex
, startpindex
, endpindex
, tpindex
;
1299 int cbehind
, cahead
;
1305 * we don't fault-ahead for device pager
1307 if (object
->type
== OBJT_DEVICE
) {
1314 * if the requested page is not available, then give up now
1317 if (!vm_pager_has_page(object
, pindex
, &cbehind
, &cahead
)) {
1321 if ((cbehind
== 0) && (cahead
== 0)) {
1327 if (rahead
> cahead
) {
1331 if (rbehind
> cbehind
) {
1336 * try to do any readahead that we might have free pages for.
1338 if ((rahead
+ rbehind
) >
1339 ((vmstats
.v_free_count
+ vmstats
.v_cache_count
) - vmstats
.v_free_reserved
)) {
1340 pagedaemon_wakeup();
1347 * scan backward for the read behind pages -- in memory
1349 * Assume that if the page is not found an interrupt will not
1350 * create it. Theoretically interrupts can only remove (busy)
1351 * pages, not create new associations.
1354 if (rbehind
> pindex
) {
1358 startpindex
= pindex
- rbehind
;
1362 for ( tpindex
= pindex
- 1; tpindex
>= startpindex
; tpindex
-= 1) {
1363 if (vm_page_lookup( object
, tpindex
)) {
1364 startpindex
= tpindex
+ 1;
1371 for(i
= 0, tpindex
= startpindex
; tpindex
< pindex
; i
++, tpindex
++) {
1373 rtm
= vm_page_alloc(object
, tpindex
, VM_ALLOC_NORMAL
);
1376 for (j
= 0; j
< i
; j
++) {
1377 vm_page_free(marray
[j
]);
1393 /* page offset of the required page */
1396 tpindex
= pindex
+ 1;
1400 * scan forward for the read ahead pages
1402 endpindex
= tpindex
+ rahead
;
1403 if (endpindex
> object
->size
)
1404 endpindex
= object
->size
;
1407 for( ; tpindex
< endpindex
; i
++, tpindex
++) {
1409 if (vm_page_lookup(object
, tpindex
)) {
1413 rtm
= vm_page_alloc(object
, tpindex
, VM_ALLOC_NORMAL
);
1422 /* return number of bytes of pages */