2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
62 * Virtual memory object module.
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
70 #include <sys/param.h>
71 #include <sys/systm.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mutex.h>
78 #include <sys/proc.h> /* for curproc, pageproc */
79 #include <sys/socket.h>
80 #include <sys/resourcevar.h>
81 #include <sys/rwlock.h>
83 #include <sys/vnode.h>
84 #include <sys/vmmeter.h>
88 #include <vm/vm_param.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/vm_pager.h>
95 #include <vm/swap_pager.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_extern.h>
98 #include <vm/vm_radix.h>
99 #include <vm/vm_reserv.h>
102 static int old_msync
;
103 SYSCTL_INT(_vm
, OID_AUTO
, old_msync
, CTLFLAG_RW
, &old_msync
, 0,
104 "Use old (insecure) msync behavior");
106 static int vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
,
107 int pagerflags
, int flags
, boolean_t
*clearobjflags
,
109 static boolean_t
vm_object_page_remove_write(vm_page_t p
, int flags
,
110 boolean_t
*clearobjflags
);
111 static void vm_object_qcollapse(vm_object_t object
);
112 static void vm_object_vndeallocate(vm_object_t object
);
115 * Virtual memory objects maintain the actual data
116 * associated with allocated virtual memory. A given
117 * page of memory exists within exactly one object.
119 * An object is only deallocated when all "references"
120 * are given up. Only one "reference" to a given
121 * region of an object should be writeable.
123 * Associated with each object is a list of all resident
124 * memory pages belonging to that object; this list is
125 * maintained by the "vm_page" module, and locked by the object's
128 * Each object also records a "pager" routine which is
129 * used to retrieve (and store) pages to the proper backing
130 * storage. In addition, objects may be backed by other
131 * objects from which they were virtual-copied.
133 * The only items within the object structure which are
134 * modified after time of creation are:
135 * reference count locked by object's lock
136 * pager routine locked by object's lock
140 struct object_q vm_object_list
;
141 struct mtx vm_object_list_mtx
; /* lock for object list and count */
143 struct vm_object kernel_object_store
;
144 struct vm_object kmem_object_store
;
146 static SYSCTL_NODE(_vm_stats
, OID_AUTO
, object
, CTLFLAG_RD
, 0,
149 static long object_collapses
;
150 SYSCTL_LONG(_vm_stats_object
, OID_AUTO
, collapses
, CTLFLAG_RD
,
151 &object_collapses
, 0, "VM object collapses");
153 static long object_bypasses
;
154 SYSCTL_LONG(_vm_stats_object
, OID_AUTO
, bypasses
, CTLFLAG_RD
,
155 &object_bypasses
, 0, "VM object bypasses");
157 static uma_zone_t obj_zone
;
159 static int vm_object_zinit(void *mem
, int size
, int flags
);
162 static void vm_object_zdtor(void *mem
, int size
, void *arg
);
165 vm_object_zdtor(void *mem
, int size
, void *arg
)
169 object
= (vm_object_t
)mem
;
170 KASSERT(object
->ref_count
== 0,
171 ("object %p ref_count = %d", object
, object
->ref_count
));
172 KASSERT(TAILQ_EMPTY(&object
->memq
),
173 ("object %p has resident pages in its memq", object
));
174 KASSERT(vm_radix_is_empty(&object
->rtree
),
175 ("object %p has resident pages in its trie", object
));
176 #if VM_NRESERVLEVEL > 0
177 KASSERT(LIST_EMPTY(&object
->rvq
),
178 ("object %p has reservations",
181 KASSERT(vm_object_cache_is_empty(object
),
182 ("object %p has cached pages",
184 KASSERT(object
->paging_in_progress
== 0,
185 ("object %p paging_in_progress = %d",
186 object
, object
->paging_in_progress
));
187 KASSERT(object
->resident_page_count
== 0,
188 ("object %p resident_page_count = %d",
189 object
, object
->resident_page_count
));
190 KASSERT(object
->shadow_count
== 0,
191 ("object %p shadow_count = %d",
192 object
, object
->shadow_count
));
193 KASSERT(object
->type
== OBJT_DEAD
,
194 ("object %p has non-dead type %d",
195 object
, object
->type
));
200 vm_object_zinit(void *mem
, int size
, int flags
)
204 object
= (vm_object_t
)mem
;
205 rw_init_flags(&object
->lock
, "vm object", RW_DUPOK
| RW_NEW
);
207 /* These are true for any object that has been freed */
208 object
->type
= OBJT_DEAD
;
209 object
->ref_count
= 0;
210 object
->rtree
.rt_root
= 0;
211 object
->rtree
.rt_flags
= 0;
212 object
->paging_in_progress
= 0;
213 object
->resident_page_count
= 0;
214 object
->shadow_count
= 0;
215 object
->cache
.rt_root
= 0;
216 object
->cache
.rt_flags
= 0;
218 mtx_lock(&vm_object_list_mtx
);
219 TAILQ_INSERT_TAIL(&vm_object_list
, object
, object_list
);
220 mtx_unlock(&vm_object_list_mtx
);
225 _vm_object_allocate(objtype_t type
, vm_pindex_t size
, vm_object_t object
)
228 TAILQ_INIT(&object
->memq
);
229 LIST_INIT(&object
->shadow_head
);
234 panic("_vm_object_allocate: can't create OBJT_DEAD");
237 object
->flags
= OBJ_ONEMAPPING
;
241 object
->flags
= OBJ_FICTITIOUS
| OBJ_UNMANAGED
;
244 object
->flags
= OBJ_FICTITIOUS
;
247 object
->flags
= OBJ_UNMANAGED
;
253 panic("_vm_object_allocate: type %d is undefined", type
);
256 object
->generation
= 1;
257 object
->ref_count
= 1;
258 object
->memattr
= VM_MEMATTR_DEFAULT
;
261 object
->handle
= NULL
;
262 object
->backing_object
= NULL
;
263 object
->backing_object_offset
= (vm_ooffset_t
) 0;
264 #if VM_NRESERVLEVEL > 0
265 LIST_INIT(&object
->rvq
);
267 umtx_shm_object_init(object
);
273 * Initialize the VM objects module.
278 TAILQ_INIT(&vm_object_list
);
279 mtx_init(&vm_object_list_mtx
, "vm object_list", NULL
, MTX_DEF
);
281 rw_init(&kernel_object
->lock
, "kernel vm object");
282 _vm_object_allocate(OBJT_PHYS
, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
),
284 #if VM_NRESERVLEVEL > 0
285 kernel_object
->flags
|= OBJ_COLORED
;
286 kernel_object
->pg_color
= (u_short
)atop(VM_MIN_KERNEL_ADDRESS
);
289 rw_init(&kmem_object
->lock
, "kmem vm object");
290 _vm_object_allocate(OBJT_PHYS
, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
),
292 #if VM_NRESERVLEVEL > 0
293 kmem_object
->flags
|= OBJ_COLORED
;
294 kmem_object
->pg_color
= (u_short
)atop(VM_MIN_KERNEL_ADDRESS
);
298 * The lock portion of struct vm_object must be type stable due
299 * to vm_pageout_fallback_object_lock locking a vm object
300 * without holding any references to it.
302 obj_zone
= uma_zcreate("VM OBJECT", sizeof (struct vm_object
), NULL
,
308 vm_object_zinit
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
314 vm_object_clear_flag(vm_object_t object
, u_short bits
)
317 VM_OBJECT_ASSERT_WLOCKED(object
);
318 object
->flags
&= ~bits
;
322 * Sets the default memory attribute for the specified object. Pages
323 * that are allocated to this object are by default assigned this memory
326 * Presently, this function must be called before any pages are allocated
327 * to the object. In the future, this requirement may be relaxed for
328 * "default" and "swap" objects.
331 vm_object_set_memattr(vm_object_t object
, vm_memattr_t memattr
)
334 VM_OBJECT_ASSERT_WLOCKED(object
);
335 switch (object
->type
) {
343 if (!TAILQ_EMPTY(&object
->memq
))
344 return (KERN_FAILURE
);
347 return (KERN_INVALID_ARGUMENT
);
349 panic("vm_object_set_memattr: object %p is of undefined type",
352 object
->memattr
= memattr
;
353 return (KERN_SUCCESS
);
357 vm_object_pip_add(vm_object_t object
, short i
)
360 VM_OBJECT_ASSERT_WLOCKED(object
);
361 object
->paging_in_progress
+= i
;
365 vm_object_pip_subtract(vm_object_t object
, short i
)
368 VM_OBJECT_ASSERT_WLOCKED(object
);
369 object
->paging_in_progress
-= i
;
373 vm_object_pip_wakeup(vm_object_t object
)
376 VM_OBJECT_ASSERT_WLOCKED(object
);
377 object
->paging_in_progress
--;
378 if ((object
->flags
& OBJ_PIPWNT
) && object
->paging_in_progress
== 0) {
379 vm_object_clear_flag(object
, OBJ_PIPWNT
);
385 vm_object_pip_wakeupn(vm_object_t object
, short i
)
388 VM_OBJECT_ASSERT_WLOCKED(object
);
390 object
->paging_in_progress
-= i
;
391 if ((object
->flags
& OBJ_PIPWNT
) && object
->paging_in_progress
== 0) {
392 vm_object_clear_flag(object
, OBJ_PIPWNT
);
398 vm_object_pip_wait(vm_object_t object
, char *waitid
)
401 VM_OBJECT_ASSERT_WLOCKED(object
);
402 while (object
->paging_in_progress
) {
403 object
->flags
|= OBJ_PIPWNT
;
404 VM_OBJECT_SLEEP(object
, object
, PVM
, waitid
, 0);
409 * vm_object_allocate:
411 * Returns a new object with the given size.
414 vm_object_allocate(objtype_t type
, vm_pindex_t size
)
418 object
= (vm_object_t
)uma_zalloc(obj_zone
, M_WAITOK
);
419 _vm_object_allocate(type
, size
, object
);
425 * vm_object_reference:
427 * Gets another reference to the given object. Note: OBJ_DEAD
428 * objects can be referenced during final cleaning.
431 vm_object_reference(vm_object_t object
)
435 VM_OBJECT_WLOCK(object
);
436 vm_object_reference_locked(object
);
437 VM_OBJECT_WUNLOCK(object
);
441 * vm_object_reference_locked:
443 * Gets another reference to the given object.
445 * The object must be locked.
448 vm_object_reference_locked(vm_object_t object
)
452 VM_OBJECT_ASSERT_WLOCKED(object
);
454 if (object
->type
== OBJT_VNODE
) {
461 * Handle deallocating an object of type OBJT_VNODE.
464 vm_object_vndeallocate(vm_object_t object
)
466 struct vnode
*vp
= (struct vnode
*) object
->handle
;
468 VM_OBJECT_ASSERT_WLOCKED(object
);
469 KASSERT(object
->type
== OBJT_VNODE
,
470 ("vm_object_vndeallocate: not a vnode object"));
471 KASSERT(vp
!= NULL
, ("vm_object_vndeallocate: missing vp"));
473 if (object
->ref_count
== 0) {
474 vprint("vm_object_vndeallocate", vp
);
475 panic("vm_object_vndeallocate: bad object reference count");
479 if (!umtx_shm_vnobj_persistent
&& object
->ref_count
== 1)
480 umtx_shm_object_terminated(object
);
483 * The test for text of vp vnode does not need a bypass to
484 * reach right VV_TEXT there, since it is obtained from
487 if (object
->ref_count
> 1 || (vp
->v_vflag
& VV_TEXT
) == 0) {
489 VM_OBJECT_WUNLOCK(object
);
490 /* vrele may need the vnode lock. */
494 VM_OBJECT_WUNLOCK(object
);
495 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
497 VM_OBJECT_WLOCK(object
);
499 if (object
->type
== OBJT_DEAD
) {
500 VM_OBJECT_WUNLOCK(object
);
503 if (object
->ref_count
== 0)
505 VM_OBJECT_WUNLOCK(object
);
512 * vm_object_deallocate:
514 * Release a reference to the specified object,
515 * gained either through a vm_object_allocate
516 * or a vm_object_reference call. When all references
517 * are gone, storage associated with this object
518 * may be relinquished.
520 * No object may be locked.
523 vm_object_deallocate(vm_object_t object
)
528 while (object
!= NULL
) {
529 VM_OBJECT_WLOCK(object
);
530 if (object
->type
== OBJT_VNODE
) {
531 vm_object_vndeallocate(object
);
535 KASSERT(object
->ref_count
!= 0,
536 ("vm_object_deallocate: object deallocated too many times: %d", object
->type
));
539 * If the reference count goes to 0 we start calling
540 * vm_object_terminate() on the object chain.
541 * A ref count of 1 may be a special case depending on the
542 * shadow count being 0 or 1.
545 if (object
->ref_count
> 1) {
546 VM_OBJECT_WUNLOCK(object
);
548 } else if (object
->ref_count
== 1) {
549 if (object
->type
== OBJT_SWAP
&&
550 (object
->flags
& OBJ_TMPFS
) != 0) {
551 vp
= object
->un_pager
.swp
.swp_tmpfs
;
553 VM_OBJECT_WUNLOCK(object
);
554 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
555 VM_OBJECT_WLOCK(object
);
556 if (object
->type
== OBJT_DEAD
||
557 object
->ref_count
!= 1) {
558 VM_OBJECT_WUNLOCK(object
);
563 if ((object
->flags
& OBJ_TMPFS
) != 0)
568 if (object
->shadow_count
== 0 &&
569 object
->handle
== NULL
&&
570 (object
->type
== OBJT_DEFAULT
||
571 (object
->type
== OBJT_SWAP
&&
572 (object
->flags
& OBJ_TMPFS_NODE
) == 0))) {
573 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
574 } else if ((object
->shadow_count
== 1) &&
575 (object
->handle
== NULL
) &&
576 (object
->type
== OBJT_DEFAULT
||
577 object
->type
== OBJT_SWAP
)) {
580 robject
= LIST_FIRST(&object
->shadow_head
);
581 KASSERT(robject
!= NULL
,
582 ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
584 object
->shadow_count
));
585 KASSERT((robject
->flags
& OBJ_TMPFS_NODE
) == 0,
586 ("shadowed tmpfs v_object %p", object
));
587 if (!VM_OBJECT_TRYWLOCK(robject
)) {
589 * Avoid a potential deadlock.
592 VM_OBJECT_WUNLOCK(object
);
594 * More likely than not the thread
595 * holding robject's lock has lower
596 * priority than the current thread.
597 * Let the lower priority thread run.
603 * Collapse object into its shadow unless its
604 * shadow is dead. In that case, object will
605 * be deallocated by the thread that is
606 * deallocating its shadow.
608 if ((robject
->flags
& OBJ_DEAD
) == 0 &&
609 (robject
->handle
== NULL
) &&
610 (robject
->type
== OBJT_DEFAULT
||
611 robject
->type
== OBJT_SWAP
)) {
613 robject
->ref_count
++;
615 if (robject
->paging_in_progress
) {
616 VM_OBJECT_WUNLOCK(object
);
617 vm_object_pip_wait(robject
,
619 temp
= robject
->backing_object
;
620 if (object
== temp
) {
621 VM_OBJECT_WLOCK(object
);
624 } else if (object
->paging_in_progress
) {
625 VM_OBJECT_WUNLOCK(robject
);
626 object
->flags
|= OBJ_PIPWNT
;
627 VM_OBJECT_SLEEP(object
, object
,
628 PDROP
| PVM
, "objde2", 0);
629 VM_OBJECT_WLOCK(robject
);
630 temp
= robject
->backing_object
;
631 if (object
== temp
) {
632 VM_OBJECT_WLOCK(object
);
636 VM_OBJECT_WUNLOCK(object
);
638 if (robject
->ref_count
== 1) {
639 robject
->ref_count
--;
644 vm_object_collapse(object
);
645 VM_OBJECT_WUNLOCK(object
);
648 VM_OBJECT_WUNLOCK(robject
);
650 VM_OBJECT_WUNLOCK(object
);
654 umtx_shm_object_terminated(object
);
655 temp
= object
->backing_object
;
657 KASSERT((object
->flags
& OBJ_TMPFS_NODE
) == 0,
658 ("shadowed tmpfs v_object 2 %p", object
));
659 VM_OBJECT_WLOCK(temp
);
660 LIST_REMOVE(object
, shadow_list
);
661 temp
->shadow_count
--;
662 VM_OBJECT_WUNLOCK(temp
);
663 object
->backing_object
= NULL
;
666 * Don't double-terminate, we could be in a termination
667 * recursion due to the terminate having to sync data
670 if ((object
->flags
& OBJ_DEAD
) == 0)
671 vm_object_terminate(object
);
673 VM_OBJECT_WUNLOCK(object
);
679 * vm_object_destroy removes the object from the global object list
680 * and frees the space for the object.
683 vm_object_destroy(vm_object_t object
)
687 * Release the allocation charge.
689 if (object
->cred
!= NULL
) {
690 swap_release_by_cred(object
->charge
, object
->cred
);
692 crfree(object
->cred
);
697 * Free the space for the object.
699 uma_zfree(obj_zone
, object
);
703 * vm_object_terminate actually destroys the specified object, freeing
704 * up all previously used resources.
706 * The object must be locked.
707 * This routine may block.
710 vm_object_terminate(vm_object_t object
)
714 VM_OBJECT_ASSERT_WLOCKED(object
);
717 * Make sure no one uses us.
719 vm_object_set_flag(object
, OBJ_DEAD
);
722 * wait for the pageout daemon to be done with the object
724 vm_object_pip_wait(object
, "objtrm");
726 KASSERT(!object
->paging_in_progress
,
727 ("vm_object_terminate: pageout in progress"));
730 * Clean and free the pages, as appropriate. All references to the
731 * object are gone, so we don't need to lock it.
733 if (object
->type
== OBJT_VNODE
) {
734 struct vnode
*vp
= (struct vnode
*)object
->handle
;
737 * Clean pages and flush buffers.
739 vm_object_page_clean(object
, 0, 0, OBJPC_SYNC
);
740 VM_OBJECT_WUNLOCK(object
);
742 vinvalbuf(vp
, V_SAVE
, 0, 0);
744 BO_LOCK(&vp
->v_bufobj
);
745 vp
->v_bufobj
.bo_flag
|= BO_DEAD
;
746 BO_UNLOCK(&vp
->v_bufobj
);
748 VM_OBJECT_WLOCK(object
);
751 KASSERT(object
->ref_count
== 0,
752 ("vm_object_terminate: object with references, ref_count=%d",
756 * Free any remaining pageable pages. This also removes them from the
757 * paging queues. However, don't free wired pages, just remove them
758 * from the object. Rather than incrementally removing each page from
759 * the object, the page and object are reset to any empty state.
761 TAILQ_FOREACH_SAFE(p
, &object
->memq
, listq
, p_next
) {
762 vm_page_assert_unbusied(p
);
765 * Optimize the page's removal from the object by resetting
766 * its "object" field. Specifically, if the page is not
767 * wired, then the effect of this assignment is that
768 * vm_page_free()'s call to vm_page_remove() will return
769 * immediately without modifying the page or the object.
772 if (p
->wire_count
== 0) {
774 PCPU_INC(cnt
.v_pfree
);
779 * If the object contained any pages, then reset it to an empty state.
780 * None of the object's fields, including "resident_page_count", were
781 * modified by the preceding loop.
783 if (object
->resident_page_count
!= 0) {
784 vm_radix_reclaim_allnodes(&object
->rtree
);
785 TAILQ_INIT(&object
->memq
);
786 object
->resident_page_count
= 0;
787 if (object
->type
== OBJT_VNODE
)
788 vdrop(object
->handle
);
791 #if VM_NRESERVLEVEL > 0
792 if (__predict_false(!LIST_EMPTY(&object
->rvq
)))
793 vm_reserv_break_all(object
);
795 if (__predict_false(!vm_object_cache_is_empty(object
)))
796 vm_page_cache_free(object
, 0, 0);
798 KASSERT(object
->cred
== NULL
|| object
->type
== OBJT_DEFAULT
||
799 object
->type
== OBJT_SWAP
,
800 ("%s: non-swap obj %p has cred", __func__
, object
));
803 * Let the pager know object is dead.
805 vm_pager_deallocate(object
);
806 VM_OBJECT_WUNLOCK(object
);
808 vm_object_destroy(object
);
812 * Make the page read-only so that we can clear the object flags. However, if
813 * this is a nosync mmap then the object is likely to stay dirty so do not
814 * mess with the page and do not clear the object flags. Returns TRUE if the
815 * page should be flushed, and FALSE otherwise.
818 vm_object_page_remove_write(vm_page_t p
, int flags
, boolean_t
*clearobjflags
)
822 * If we have been asked to skip nosync pages and this is a
823 * nosync page, skip it. Note that the object flags were not
824 * cleared in this case so we do not have to set them.
826 if ((flags
& OBJPC_NOSYNC
) != 0 && (p
->oflags
& VPO_NOSYNC
) != 0) {
827 *clearobjflags
= FALSE
;
830 pmap_remove_write(p
);
831 return (p
->dirty
!= 0);
836 * vm_object_page_clean
838 * Clean all dirty pages in the specified range of object. Leaves page
839 * on whatever queue it is currently on. If NOSYNC is set then do not
840 * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
841 * leaving the object dirty.
843 * When stuffing pages asynchronously, allow clustering. XXX we need a
844 * synchronous clustering mode implementation.
846 * Odd semantics: if start == end, we clean everything.
848 * The object must be locked.
850 * Returns FALSE if some page from the range was not written, as
851 * reported by the pager, and TRUE otherwise.
854 vm_object_page_clean(vm_object_t object
, vm_ooffset_t start
, vm_ooffset_t end
,
858 vm_pindex_t pi
, tend
, tstart
;
859 int curgeneration
, n
, pagerflags
;
860 boolean_t clearobjflags
, eio
, res
;
862 VM_OBJECT_ASSERT_WLOCKED(object
);
865 * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE
866 * objects. The check below prevents the function from
867 * operating on non-vnode objects.
869 if ((object
->flags
& OBJ_MIGHTBEDIRTY
) == 0 ||
870 object
->resident_page_count
== 0)
873 pagerflags
= (flags
& (OBJPC_SYNC
| OBJPC_INVAL
)) != 0 ?
874 VM_PAGER_PUT_SYNC
: VM_PAGER_CLUSTER_OK
;
875 pagerflags
|= (flags
& OBJPC_INVAL
) != 0 ? VM_PAGER_PUT_INVAL
: 0;
877 tstart
= OFF_TO_IDX(start
);
878 tend
= (end
== 0) ? object
->size
: OFF_TO_IDX(end
+ PAGE_MASK
);
879 clearobjflags
= tstart
== 0 && tend
>= object
->size
;
883 curgeneration
= object
->generation
;
885 for (p
= vm_page_find_least(object
, tstart
); p
!= NULL
; p
= np
) {
889 np
= TAILQ_NEXT(p
, listq
);
892 if (vm_page_sleep_if_busy(p
, "vpcwai")) {
893 if (object
->generation
!= curgeneration
) {
894 if ((flags
& OBJPC_SYNC
) != 0)
897 clearobjflags
= FALSE
;
899 np
= vm_page_find_least(object
, pi
);
902 if (!vm_object_page_remove_write(p
, flags
, &clearobjflags
))
905 n
= vm_object_page_collect_flush(object
, p
, pagerflags
,
906 flags
, &clearobjflags
, &eio
);
909 clearobjflags
= FALSE
;
911 if (object
->generation
!= curgeneration
) {
912 if ((flags
& OBJPC_SYNC
) != 0)
915 clearobjflags
= FALSE
;
919 * If the VOP_PUTPAGES() did a truncated write, so
920 * that even the first page of the run is not fully
921 * written, vm_pageout_flush() returns 0 as the run
922 * length. Since the condition that caused truncated
923 * write may be permanent, e.g. exhausted free space,
924 * accepting n == 0 would cause an infinite loop.
926 * Forwarding the iterator leaves the unwritten page
927 * behind, but there is not much we can do there if
928 * filesystem refuses to write it.
932 clearobjflags
= FALSE
;
934 np
= vm_page_find_least(object
, pi
+ n
);
937 VOP_FSYNC(vp
, (pagerflags
& VM_PAGER_PUT_SYNC
) ? MNT_WAIT
: 0);
941 vm_object_clear_flag(object
, OBJ_MIGHTBEDIRTY
);
946 vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
, int pagerflags
,
947 int flags
, boolean_t
*clearobjflags
, boolean_t
*eio
)
949 vm_page_t ma
[vm_pageout_page_count
], p_first
, tp
;
950 int count
, i
, mreq
, runlen
;
952 vm_page_lock_assert(p
, MA_NOTOWNED
);
953 VM_OBJECT_ASSERT_WLOCKED(object
);
958 for (tp
= p
; count
< vm_pageout_page_count
; count
++) {
959 tp
= vm_page_next(tp
);
960 if (tp
== NULL
|| vm_page_busied(tp
))
962 if (!vm_object_page_remove_write(tp
, flags
, clearobjflags
))
966 for (p_first
= p
; count
< vm_pageout_page_count
; count
++) {
967 tp
= vm_page_prev(p_first
);
968 if (tp
== NULL
|| vm_page_busied(tp
))
970 if (!vm_object_page_remove_write(tp
, flags
, clearobjflags
))
976 for (tp
= p_first
, i
= 0; i
< count
; tp
= TAILQ_NEXT(tp
, listq
), i
++)
979 vm_pageout_flush(ma
, count
, pagerflags
, mreq
, &runlen
, eio
);
984 * Note that there is absolutely no sense in writing out
985 * anonymous objects, so we track down the vnode object
987 * We invalidate (remove) all pages from the address space
988 * for semantic correctness.
990 * If the backing object is a device object with unmanaged pages, then any
991 * mappings to the specified range of pages must be removed before this
992 * function is called.
994 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
995 * may start out with a NULL object.
998 vm_object_sync(vm_object_t object
, vm_ooffset_t offset
, vm_size_t size
,
999 boolean_t syncio
, boolean_t invalidate
)
1001 vm_object_t backing_object
;
1004 int error
, flags
, fsync_after
;
1011 VM_OBJECT_WLOCK(object
);
1012 while ((backing_object
= object
->backing_object
) != NULL
) {
1013 VM_OBJECT_WLOCK(backing_object
);
1014 offset
+= object
->backing_object_offset
;
1015 VM_OBJECT_WUNLOCK(object
);
1016 object
= backing_object
;
1017 if (object
->size
< OFF_TO_IDX(offset
+ size
))
1018 size
= IDX_TO_OFF(object
->size
) - offset
;
1021 * Flush pages if writing is allowed, invalidate them
1022 * if invalidation requested. Pages undergoing I/O
1023 * will be ignored by vm_object_page_remove().
1025 * We cannot lock the vnode and then wait for paging
1026 * to complete without deadlocking against vm_fault.
1027 * Instead we simply call vm_object_page_remove() and
1028 * allow it to block internally on a page-by-page
1029 * basis when it encounters pages undergoing async
1032 if (object
->type
== OBJT_VNODE
&&
1033 (object
->flags
& OBJ_MIGHTBEDIRTY
) != 0) {
1034 vp
= object
->handle
;
1035 VM_OBJECT_WUNLOCK(object
);
1036 (void) vn_start_write(vp
, &mp
, V_WAIT
);
1037 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1038 if (syncio
&& !invalidate
&& offset
== 0 &&
1039 OFF_TO_IDX(size
) == object
->size
) {
1041 * If syncing the whole mapping of the file,
1042 * it is faster to schedule all the writes in
1043 * async mode, also allowing the clustering,
1044 * and then wait for i/o to complete.
1049 flags
= (syncio
|| invalidate
) ? OBJPC_SYNC
: 0;
1050 flags
|= invalidate
? (OBJPC_SYNC
| OBJPC_INVAL
) : 0;
1051 fsync_after
= FALSE
;
1053 VM_OBJECT_WLOCK(object
);
1054 res
= vm_object_page_clean(object
, offset
, offset
+ size
,
1056 VM_OBJECT_WUNLOCK(object
);
1058 error
= VOP_FSYNC(vp
, MNT_WAIT
, curthread
);
1060 vn_finished_write(mp
);
1063 VM_OBJECT_WLOCK(object
);
1065 if ((object
->type
== OBJT_VNODE
||
1066 object
->type
== OBJT_DEVICE
) && invalidate
) {
1067 if (object
->type
== OBJT_DEVICE
)
1069 * The option OBJPR_NOTMAPPED must be passed here
1070 * because vm_object_page_remove() cannot remove
1071 * unmanaged mappings.
1073 flags
= OBJPR_NOTMAPPED
;
1077 flags
= OBJPR_CLEANONLY
;
1078 vm_object_page_remove(object
, OFF_TO_IDX(offset
),
1079 OFF_TO_IDX(offset
+ size
+ PAGE_MASK
), flags
);
1081 VM_OBJECT_WUNLOCK(object
);
1086 * vm_object_madvise:
1088 * Implements the madvise function at the object/page level.
1090 * MADV_WILLNEED (any object)
1092 * Activate the specified pages if they are resident.
1094 * MADV_DONTNEED (any object)
1096 * Deactivate the specified pages if they are resident.
1098 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects,
1099 * OBJ_ONEMAPPING only)
1101 * Deactivate and clean the specified pages if they are
1102 * resident. This permits the process to reuse the pages
1103 * without faulting or the kernel to reclaim the pages
1107 vm_object_madvise(vm_object_t object
, vm_pindex_t pindex
, vm_pindex_t end
,
1110 vm_pindex_t tpindex
;
1111 vm_object_t backing_object
, tobject
;
1116 VM_OBJECT_WLOCK(object
);
1118 * Locate and adjust resident pages
1120 for (; pindex
< end
; pindex
+= 1) {
1126 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1127 * and those pages must be OBJ_ONEMAPPING.
1129 if (advise
== MADV_FREE
) {
1130 if ((tobject
->type
!= OBJT_DEFAULT
&&
1131 tobject
->type
!= OBJT_SWAP
) ||
1132 (tobject
->flags
& OBJ_ONEMAPPING
) == 0) {
1133 goto unlock_tobject
;
1135 } else if ((tobject
->flags
& OBJ_UNMANAGED
) != 0)
1136 goto unlock_tobject
;
1137 m
= vm_page_lookup(tobject
, tpindex
);
1138 if (m
== NULL
&& advise
== MADV_WILLNEED
) {
1140 * If the page is cached, reactivate it.
1142 m
= vm_page_alloc(tobject
, tpindex
, VM_ALLOC_IFCACHED
|
1147 * There may be swap even if there is no backing page
1149 if (advise
== MADV_FREE
&& tobject
->type
== OBJT_SWAP
)
1150 swap_pager_freespace(tobject
, tpindex
, 1);
1154 backing_object
= tobject
->backing_object
;
1155 if (backing_object
== NULL
)
1156 goto unlock_tobject
;
1157 VM_OBJECT_WLOCK(backing_object
);
1158 tpindex
+= OFF_TO_IDX(tobject
->backing_object_offset
);
1159 if (tobject
!= object
)
1160 VM_OBJECT_WUNLOCK(tobject
);
1161 tobject
= backing_object
;
1163 } else if (m
->valid
!= VM_PAGE_BITS_ALL
)
1164 goto unlock_tobject
;
1166 * If the page is not in a normal state, skip it.
1169 if (m
->hold_count
!= 0 || m
->wire_count
!= 0) {
1171 goto unlock_tobject
;
1173 KASSERT((m
->flags
& PG_FICTITIOUS
) == 0,
1174 ("vm_object_madvise: page %p is fictitious", m
));
1175 KASSERT((m
->oflags
& VPO_UNMANAGED
) == 0,
1176 ("vm_object_madvise: page %p is not managed", m
));
1177 if (vm_page_busied(m
)) {
1178 if (advise
== MADV_WILLNEED
) {
1180 * Reference the page before unlocking and
1181 * sleeping so that the page daemon is less
1182 * likely to reclaim it.
1184 vm_page_aflag_set(m
, PGA_REFERENCED
);
1186 if (object
!= tobject
)
1187 VM_OBJECT_WUNLOCK(object
);
1188 VM_OBJECT_WUNLOCK(tobject
);
1189 vm_page_busy_sleep(m
, "madvpo");
1190 VM_OBJECT_WLOCK(object
);
1193 if (advise
== MADV_WILLNEED
) {
1194 vm_page_activate(m
);
1196 vm_page_advise(m
, advise
);
1199 if (advise
== MADV_FREE
&& tobject
->type
== OBJT_SWAP
)
1200 swap_pager_freespace(tobject
, tpindex
, 1);
1202 if (tobject
!= object
)
1203 VM_OBJECT_WUNLOCK(tobject
);
1205 VM_OBJECT_WUNLOCK(object
);
1211 * Create a new object which is backed by the
1212 * specified existing object range. The source
1213 * object reference is deallocated.
1215 * The new object and offset into that object
1216 * are returned in the source parameters.
1220 vm_object_t
*object
, /* IN/OUT */
1221 vm_ooffset_t
*offset
, /* IN/OUT */
1230 * Don't create the new object if the old object isn't shared.
1232 if (source
!= NULL
) {
1233 VM_OBJECT_WLOCK(source
);
1234 if (source
->ref_count
== 1 &&
1235 source
->handle
== NULL
&&
1236 (source
->type
== OBJT_DEFAULT
||
1237 source
->type
== OBJT_SWAP
)) {
1238 VM_OBJECT_WUNLOCK(source
);
1241 VM_OBJECT_WUNLOCK(source
);
1245 * Allocate a new object with the given length.
1247 result
= vm_object_allocate(OBJT_DEFAULT
, atop(length
));
1250 * The new object shadows the source object, adding a reference to it.
1251 * Our caller changes his reference to point to the new object,
1252 * removing a reference to the source object. Net result: no change
1253 * of reference count.
1255 * Try to optimize the result object's page color when shadowing
1256 * in order to maintain page coloring consistency in the combined
1259 result
->backing_object
= source
;
1261 * Store the offset into the source object, and fix up the offset into
1264 result
->backing_object_offset
= *offset
;
1265 if (source
!= NULL
) {
1266 VM_OBJECT_WLOCK(source
);
1267 LIST_INSERT_HEAD(&source
->shadow_head
, result
, shadow_list
);
1268 source
->shadow_count
++;
1269 #if VM_NRESERVLEVEL > 0
1270 result
->flags
|= source
->flags
& OBJ_COLORED
;
1271 result
->pg_color
= (source
->pg_color
+ OFF_TO_IDX(*offset
)) &
1272 ((1 << (VM_NFREEORDER
- 1)) - 1);
1274 VM_OBJECT_WUNLOCK(source
);
1279 * Return the new things
1288 * Split the pages in a map entry into a new object. This affords
1289 * easier removal of unused pages, and keeps object inheritance from
1290 * being a negative impact on memory usage.
1293 vm_object_split(vm_map_entry_t entry
)
1295 vm_page_t m
, m_next
;
1296 vm_object_t orig_object
, new_object
, source
;
1297 vm_pindex_t idx
, offidxstart
;
1300 orig_object
= entry
->object
.vm_object
;
1301 if (orig_object
->type
!= OBJT_DEFAULT
&& orig_object
->type
!= OBJT_SWAP
)
1303 if (orig_object
->ref_count
<= 1)
1305 VM_OBJECT_WUNLOCK(orig_object
);
1307 offidxstart
= OFF_TO_IDX(entry
->offset
);
1308 size
= atop(entry
->end
- entry
->start
);
1311 * If swap_pager_copy() is later called, it will convert new_object
1312 * into a swap object.
1314 new_object
= vm_object_allocate(OBJT_DEFAULT
, size
);
1317 * At this point, the new object is still private, so the order in
1318 * which the original and new objects are locked does not matter.
1320 VM_OBJECT_WLOCK(new_object
);
1321 VM_OBJECT_WLOCK(orig_object
);
1322 source
= orig_object
->backing_object
;
1323 if (source
!= NULL
) {
1324 VM_OBJECT_WLOCK(source
);
1325 if ((source
->flags
& OBJ_DEAD
) != 0) {
1326 VM_OBJECT_WUNLOCK(source
);
1327 VM_OBJECT_WUNLOCK(orig_object
);
1328 VM_OBJECT_WUNLOCK(new_object
);
1329 vm_object_deallocate(new_object
);
1330 VM_OBJECT_WLOCK(orig_object
);
1333 LIST_INSERT_HEAD(&source
->shadow_head
,
1334 new_object
, shadow_list
);
1335 source
->shadow_count
++;
1336 vm_object_reference_locked(source
); /* for new_object */
1337 vm_object_clear_flag(source
, OBJ_ONEMAPPING
);
1338 VM_OBJECT_WUNLOCK(source
);
1339 new_object
->backing_object_offset
=
1340 orig_object
->backing_object_offset
+ entry
->offset
;
1341 new_object
->backing_object
= source
;
1343 if (orig_object
->cred
!= NULL
) {
1344 new_object
->cred
= orig_object
->cred
;
1345 crhold(orig_object
->cred
);
1346 new_object
->charge
= ptoa(size
);
1347 KASSERT(orig_object
->charge
>= ptoa(size
),
1348 ("orig_object->charge < 0"));
1349 orig_object
->charge
-= ptoa(size
);
1352 m
= vm_page_find_least(orig_object
, offidxstart
);
1353 for (; m
!= NULL
&& (idx
= m
->pindex
- offidxstart
) < size
;
1355 m_next
= TAILQ_NEXT(m
, listq
);
1358 * We must wait for pending I/O to complete before we can
1361 * We do not have to VM_PROT_NONE the page as mappings should
1362 * not be changed by this operation.
1364 if (vm_page_busied(m
)) {
1365 VM_OBJECT_WUNLOCK(new_object
);
1367 VM_OBJECT_WUNLOCK(orig_object
);
1368 vm_page_busy_sleep(m
, "spltwt");
1369 VM_OBJECT_WLOCK(orig_object
);
1370 VM_OBJECT_WLOCK(new_object
);
1374 /* vm_page_rename() will handle dirty and cache. */
1375 if (vm_page_rename(m
, new_object
, idx
)) {
1376 VM_OBJECT_WUNLOCK(new_object
);
1377 VM_OBJECT_WUNLOCK(orig_object
);
1379 VM_OBJECT_WLOCK(orig_object
);
1380 VM_OBJECT_WLOCK(new_object
);
1383 #if VM_NRESERVLEVEL > 0
1385 * If some of the reservation's allocated pages remain with
1386 * the original object, then transferring the reservation to
1387 * the new object is neither particularly beneficial nor
1388 * particularly harmful as compared to leaving the reservation
1389 * with the original object. If, however, all of the
1390 * reservation's allocated pages are transferred to the new
1391 * object, then transferring the reservation is typically
1392 * beneficial. Determining which of these two cases applies
1393 * would be more costly than unconditionally renaming the
1396 vm_reserv_rename(m
, new_object
, orig_object
, offidxstart
);
1398 if (orig_object
->type
== OBJT_SWAP
)
1401 if (orig_object
->type
== OBJT_SWAP
) {
1403 * swap_pager_copy() can sleep, in which case the orig_object's
1404 * and new_object's locks are released and reacquired.
1406 swap_pager_copy(orig_object
, new_object
, offidxstart
, 0);
1407 TAILQ_FOREACH(m
, &new_object
->memq
, listq
)
1411 * Transfer any cached pages from orig_object to new_object.
1412 * If swap_pager_copy() found swapped out pages within the
1413 * specified range of orig_object, then it changed
1414 * new_object's type to OBJT_SWAP when it transferred those
1415 * pages to new_object. Otherwise, new_object's type
1416 * should still be OBJT_DEFAULT and orig_object should not
1417 * contain any cached pages within the specified range.
1419 if (__predict_false(!vm_object_cache_is_empty(orig_object
)))
1420 vm_page_cache_transfer(orig_object
, offidxstart
,
1423 VM_OBJECT_WUNLOCK(orig_object
);
1424 VM_OBJECT_WUNLOCK(new_object
);
1425 entry
->object
.vm_object
= new_object
;
1426 entry
->offset
= 0LL;
1427 vm_object_deallocate(orig_object
);
1428 VM_OBJECT_WLOCK(new_object
);
1431 #define OBSC_COLLAPSE_NOWAIT 0x0002
1432 #define OBSC_COLLAPSE_WAIT 0x0004
1435 vm_object_collapse_scan_wait(vm_object_t object
, vm_page_t p
, vm_page_t next
,
1438 vm_object_t backing_object
;
1440 VM_OBJECT_ASSERT_WLOCKED(object
);
1441 backing_object
= object
->backing_object
;
1442 VM_OBJECT_ASSERT_WLOCKED(backing_object
);
1444 KASSERT(p
== NULL
|| vm_page_busied(p
), ("unbusy page %p", p
));
1445 KASSERT(p
== NULL
|| p
->object
== object
|| p
->object
== backing_object
,
1446 ("invalid ownership %p %p %p", p
, object
, backing_object
));
1447 if ((op
& OBSC_COLLAPSE_NOWAIT
) != 0)
1451 VM_OBJECT_WUNLOCK(object
);
1452 VM_OBJECT_WUNLOCK(backing_object
);
1456 vm_page_busy_sleep(p
, "vmocol");
1457 VM_OBJECT_WLOCK(object
);
1458 VM_OBJECT_WLOCK(backing_object
);
1459 return (TAILQ_FIRST(&backing_object
->memq
));
1463 vm_object_scan_all_shadowed(vm_object_t object
)
1465 vm_object_t backing_object
;
1467 vm_pindex_t backing_offset_index
, new_pindex
;
1469 VM_OBJECT_ASSERT_WLOCKED(object
);
1470 VM_OBJECT_ASSERT_WLOCKED(object
->backing_object
);
1472 backing_object
= object
->backing_object
;
1475 * Initial conditions:
1477 * We do not want to have to test for the existence of cache or swap
1478 * pages in the backing object. XXX but with the new swapper this
1479 * would be pretty easy to do.
1481 if (backing_object
->type
!= OBJT_DEFAULT
)
1484 backing_offset_index
= OFF_TO_IDX(object
->backing_object_offset
);
1486 for (p
= TAILQ_FIRST(&backing_object
->memq
); p
!= NULL
;
1487 p
= TAILQ_NEXT(p
, listq
)) {
1488 new_pindex
= p
->pindex
- backing_offset_index
;
1491 * Ignore pages outside the parent object's range and outside
1492 * the parent object's mapping of the backing object.
1494 if (p
->pindex
< backing_offset_index
||
1495 new_pindex
>= object
->size
)
1499 * See if the parent has the page or if the parent's object
1500 * pager has the page. If the parent has the page but the page
1501 * is not valid, the parent's object pager must have the page.
1503 * If this fails, the parent does not completely shadow the
1504 * object and we might as well give up now.
1506 pp
= vm_page_lookup(object
, new_pindex
);
1507 if ((pp
== NULL
|| pp
->valid
== 0) &&
1508 !vm_pager_has_page(object
, new_pindex
, NULL
, NULL
))
1515 vm_object_collapse_scan(vm_object_t object
, int op
)
1517 vm_object_t backing_object
;
1518 vm_page_t next
, p
, pp
;
1519 vm_pindex_t backing_offset_index
, new_pindex
;
1521 VM_OBJECT_ASSERT_WLOCKED(object
);
1522 VM_OBJECT_ASSERT_WLOCKED(object
->backing_object
);
1524 backing_object
= object
->backing_object
;
1525 backing_offset_index
= OFF_TO_IDX(object
->backing_object_offset
);
1528 * Initial conditions
1530 if ((op
& OBSC_COLLAPSE_WAIT
) != 0)
1531 vm_object_set_flag(backing_object
, OBJ_DEAD
);
1536 for (p
= TAILQ_FIRST(&backing_object
->memq
); p
!= NULL
; p
= next
) {
1537 next
= TAILQ_NEXT(p
, listq
);
1538 new_pindex
= p
->pindex
- backing_offset_index
;
1541 * Check for busy page
1543 if (vm_page_busied(p
)) {
1544 next
= vm_object_collapse_scan_wait(object
, p
, next
, op
);
1548 KASSERT(p
->object
== backing_object
,
1549 ("vm_object_collapse_scan: object mismatch"));
1551 if (p
->pindex
< backing_offset_index
||
1552 new_pindex
>= object
->size
) {
1553 if (backing_object
->type
== OBJT_SWAP
)
1554 swap_pager_freespace(backing_object
, p
->pindex
,
1558 * Page is out of the parent object's range, we can
1559 * simply destroy it.
1562 KASSERT(!pmap_page_is_mapped(p
),
1563 ("freeing mapped page %p", p
));
1564 if (p
->wire_count
== 0)
1572 pp
= vm_page_lookup(object
, new_pindex
);
1573 if (pp
!= NULL
&& vm_page_busied(pp
)) {
1575 * The page in the parent is busy and possibly not
1576 * (yet) valid. Until its state is finalized by the
1577 * busy bit owner, we can't tell whether it shadows the
1578 * original page. Therefore, we must either skip it
1579 * and the original (backing_object) page or wait for
1580 * its state to be finalized.
1582 * This is due to a race with vm_fault() where we must
1583 * unbusy the original (backing_obj) page before we can
1584 * (re)lock the parent. Hence we can get here.
1586 next
= vm_object_collapse_scan_wait(object
, pp
, next
,
1591 KASSERT(pp
== NULL
|| pp
->valid
!= 0,
1592 ("unbusy invalid page %p", pp
));
1594 if (pp
!= NULL
|| vm_pager_has_page(object
, new_pindex
, NULL
,
1597 * The page already exists in the parent OR swap exists
1598 * for this location in the parent. Leave the parent's
1599 * page alone. Destroy the original page from the
1602 if (backing_object
->type
== OBJT_SWAP
)
1603 swap_pager_freespace(backing_object
, p
->pindex
,
1606 KASSERT(!pmap_page_is_mapped(p
),
1607 ("freeing mapped page %p", p
));
1608 if (p
->wire_count
== 0)
1617 * Page does not exist in parent, rename the page from the
1618 * backing object to the main object.
1620 * If the page was mapped to a process, it can remain mapped
1621 * through the rename. vm_page_rename() will handle dirty and
1624 if (vm_page_rename(p
, object
, new_pindex
)) {
1625 next
= vm_object_collapse_scan_wait(object
, NULL
, next
,
1630 /* Use the old pindex to free the right page. */
1631 if (backing_object
->type
== OBJT_SWAP
)
1632 swap_pager_freespace(backing_object
,
1633 new_pindex
+ backing_offset_index
, 1);
1635 #if VM_NRESERVLEVEL > 0
1637 * Rename the reservation.
1639 vm_reserv_rename(p
, object
, backing_object
,
1640 backing_offset_index
);
1648 * this version of collapse allows the operation to occur earlier and
1649 * when paging_in_progress is true for an object... This is not a complete
1650 * operation, but should plug 99.9% of the rest of the leaks.
1653 vm_object_qcollapse(vm_object_t object
)
1655 vm_object_t backing_object
= object
->backing_object
;
1657 VM_OBJECT_ASSERT_WLOCKED(object
);
1658 VM_OBJECT_ASSERT_WLOCKED(backing_object
);
1660 if (backing_object
->ref_count
!= 1)
1663 vm_object_collapse_scan(object
, OBSC_COLLAPSE_NOWAIT
);
1667 * vm_object_collapse:
1669 * Collapse an object with the object backing it.
1670 * Pages in the backing object are moved into the
1671 * parent, and the backing object is deallocated.
1674 vm_object_collapse(vm_object_t object
)
1676 vm_object_t backing_object
, new_backing_object
;
1678 VM_OBJECT_ASSERT_WLOCKED(object
);
1682 * Verify that the conditions are right for collapse:
1684 * The object exists and the backing object exists.
1686 if ((backing_object
= object
->backing_object
) == NULL
)
1690 * we check the backing object first, because it is most likely
1693 VM_OBJECT_WLOCK(backing_object
);
1694 if (backing_object
->handle
!= NULL
||
1695 (backing_object
->type
!= OBJT_DEFAULT
&&
1696 backing_object
->type
!= OBJT_SWAP
) ||
1697 (backing_object
->flags
& OBJ_DEAD
) ||
1698 object
->handle
!= NULL
||
1699 (object
->type
!= OBJT_DEFAULT
&&
1700 object
->type
!= OBJT_SWAP
) ||
1701 (object
->flags
& OBJ_DEAD
)) {
1702 VM_OBJECT_WUNLOCK(backing_object
);
1706 if (object
->paging_in_progress
!= 0 ||
1707 backing_object
->paging_in_progress
!= 0) {
1708 vm_object_qcollapse(object
);
1709 VM_OBJECT_WUNLOCK(backing_object
);
1714 * We know that we can either collapse the backing object (if
1715 * the parent is the only reference to it) or (perhaps) have
1716 * the parent bypass the object if the parent happens to shadow
1717 * all the resident pages in the entire backing object.
1719 * This is ignoring pager-backed pages such as swap pages.
1720 * vm_object_collapse_scan fails the shadowing test in this
1723 if (backing_object
->ref_count
== 1) {
1724 vm_object_pip_add(object
, 1);
1725 vm_object_pip_add(backing_object
, 1);
1728 * If there is exactly one reference to the backing
1729 * object, we can collapse it into the parent.
1731 vm_object_collapse_scan(object
, OBSC_COLLAPSE_WAIT
);
1733 #if VM_NRESERVLEVEL > 0
1735 * Break any reservations from backing_object.
1737 if (__predict_false(!LIST_EMPTY(&backing_object
->rvq
)))
1738 vm_reserv_break_all(backing_object
);
1742 * Move the pager from backing_object to object.
1744 if (backing_object
->type
== OBJT_SWAP
) {
1746 * swap_pager_copy() can sleep, in which case
1747 * the backing_object's and object's locks are
1748 * released and reacquired.
1749 * Since swap_pager_copy() is being asked to
1750 * destroy the source, it will change the
1751 * backing_object's type to OBJT_DEFAULT.
1756 OFF_TO_IDX(object
->backing_object_offset
), TRUE
);
1759 * Free any cached pages from backing_object.
1761 if (__predict_false(
1762 !vm_object_cache_is_empty(backing_object
)))
1763 vm_page_cache_free(backing_object
, 0, 0);
1766 * Object now shadows whatever backing_object did.
1767 * Note that the reference to
1768 * backing_object->backing_object moves from within
1769 * backing_object to within object.
1771 LIST_REMOVE(object
, shadow_list
);
1772 backing_object
->shadow_count
--;
1773 if (backing_object
->backing_object
) {
1774 VM_OBJECT_WLOCK(backing_object
->backing_object
);
1775 LIST_REMOVE(backing_object
, shadow_list
);
1777 &backing_object
->backing_object
->shadow_head
,
1778 object
, shadow_list
);
1780 * The shadow_count has not changed.
1782 VM_OBJECT_WUNLOCK(backing_object
->backing_object
);
1784 object
->backing_object
= backing_object
->backing_object
;
1785 object
->backing_object_offset
+=
1786 backing_object
->backing_object_offset
;
1789 * Discard backing_object.
1791 * Since the backing object has no pages, no pager left,
1792 * and no object references within it, all that is
1793 * necessary is to dispose of it.
1795 KASSERT(backing_object
->ref_count
== 1, (
1796 "backing_object %p was somehow re-referenced during collapse!",
1798 vm_object_pip_wakeup(backing_object
);
1799 backing_object
->type
= OBJT_DEAD
;
1800 backing_object
->ref_count
= 0;
1801 VM_OBJECT_WUNLOCK(backing_object
);
1802 vm_object_destroy(backing_object
);
1804 vm_object_pip_wakeup(object
);
1808 * If we do not entirely shadow the backing object,
1809 * there is nothing we can do so we give up.
1811 if (object
->resident_page_count
!= object
->size
&&
1812 !vm_object_scan_all_shadowed(object
)) {
1813 VM_OBJECT_WUNLOCK(backing_object
);
1818 * Make the parent shadow the next object in the
1819 * chain. Deallocating backing_object will not remove
1820 * it, since its reference count is at least 2.
1822 LIST_REMOVE(object
, shadow_list
);
1823 backing_object
->shadow_count
--;
1825 new_backing_object
= backing_object
->backing_object
;
1826 if ((object
->backing_object
= new_backing_object
) != NULL
) {
1827 VM_OBJECT_WLOCK(new_backing_object
);
1829 &new_backing_object
->shadow_head
,
1833 new_backing_object
->shadow_count
++;
1834 vm_object_reference_locked(new_backing_object
);
1835 VM_OBJECT_WUNLOCK(new_backing_object
);
1836 object
->backing_object_offset
+=
1837 backing_object
->backing_object_offset
;
1841 * Drop the reference count on backing_object. Since
1842 * its ref_count was at least 2, it will not vanish.
1844 backing_object
->ref_count
--;
1845 VM_OBJECT_WUNLOCK(backing_object
);
1850 * Try again with this object's new backing object.
1856 * vm_object_page_remove:
1858 * For the given object, either frees or invalidates each of the
1859 * specified pages. In general, a page is freed. However, if a page is
1860 * wired for any reason other than the existence of a managed, wired
1861 * mapping, then it may be invalidated but not removed from the object.
1862 * Pages are specified by the given range ["start", "end") and the option
1863 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range
1864 * extends from "start" to the end of the object. If the option
1865 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1866 * specified range are affected. If the option OBJPR_NOTMAPPED is
1867 * specified, then the pages within the specified range must have no
1868 * mappings. Otherwise, if this option is not specified, any mappings to
1869 * the specified pages are removed before the pages are freed or
1872 * In general, this operation should only be performed on objects that
1873 * contain managed pages. There are, however, two exceptions. First, it
1874 * is performed on the kernel and kmem objects by vm_map_entry_delete().
1875 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1876 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must
1877 * not be specified and the option OBJPR_NOTMAPPED must be specified.
1879 * The object must be locked.
1882 vm_object_page_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
1887 VM_OBJECT_ASSERT_WLOCKED(object
);
1888 KASSERT((object
->flags
& OBJ_UNMANAGED
) == 0 ||
1889 (options
& (OBJPR_CLEANONLY
| OBJPR_NOTMAPPED
)) == OBJPR_NOTMAPPED
,
1890 ("vm_object_page_remove: illegal options for object %p", object
));
1891 if (object
->resident_page_count
== 0)
1893 vm_object_pip_add(object
, 1);
1895 p
= vm_page_find_least(object
, start
);
1898 * Here, the variable "p" is either (1) the page with the least pindex
1899 * greater than or equal to the parameter "start" or (2) NULL.
1901 for (; p
!= NULL
&& (p
->pindex
< end
|| end
== 0); p
= next
) {
1902 next
= TAILQ_NEXT(p
, listq
);
1905 * If the page is wired for any reason besides the existence
1906 * of managed, wired mappings, then it cannot be freed. For
1907 * example, fictitious pages, which represent device memory,
1908 * are inherently wired and cannot be freed. They can,
1909 * however, be invalidated if the option OBJPR_CLEANONLY is
1913 if (vm_page_xbusied(p
)) {
1914 VM_OBJECT_WUNLOCK(object
);
1915 vm_page_busy_sleep(p
, "vmopax");
1916 VM_OBJECT_WLOCK(object
);
1919 if (p
->wire_count
!= 0) {
1920 if ((options
& OBJPR_NOTMAPPED
) == 0)
1922 if ((options
& OBJPR_CLEANONLY
) == 0) {
1928 if (vm_page_busied(p
)) {
1929 VM_OBJECT_WUNLOCK(object
);
1930 vm_page_busy_sleep(p
, "vmopar");
1931 VM_OBJECT_WLOCK(object
);
1934 KASSERT((p
->flags
& PG_FICTITIOUS
) == 0,
1935 ("vm_object_page_remove: page %p is fictitious", p
));
1936 if ((options
& OBJPR_CLEANONLY
) != 0 && p
->valid
!= 0) {
1937 if ((options
& OBJPR_NOTMAPPED
) == 0)
1938 pmap_remove_write(p
);
1942 if ((options
& OBJPR_NOTMAPPED
) == 0)
1948 vm_object_pip_wakeup(object
);
1950 if (__predict_false(!vm_object_cache_is_empty(object
)))
1951 vm_page_cache_free(object
, start
, end
);
1955 * vm_object_page_noreuse:
1957 * For the given object, attempt to move the specified pages to
1958 * the head of the inactive queue. This bypasses regular LRU
1959 * operation and allows the pages to be reused quickly under memory
1960 * pressure. If a page is wired for any reason, then it will not
1961 * be queued. Pages are specified by the range ["start", "end").
1962 * As a special case, if "end" is zero, then the range extends from
1963 * "start" to the end of the object.
1965 * This operation should only be performed on objects that
1966 * contain non-fictitious, managed pages.
1968 * The object must be locked.
1971 vm_object_page_noreuse(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
1973 struct mtx
*mtx
, *new_mtx
;
1976 VM_OBJECT_ASSERT_WLOCKED(object
);
1977 KASSERT((object
->flags
& (OBJ_FICTITIOUS
| OBJ_UNMANAGED
)) == 0,
1978 ("vm_object_page_noreuse: illegal object %p", object
));
1979 if (object
->resident_page_count
== 0)
1981 p
= vm_page_find_least(object
, start
);
1984 * Here, the variable "p" is either (1) the page with the least pindex
1985 * greater than or equal to the parameter "start" or (2) NULL.
1988 for (; p
!= NULL
&& (p
->pindex
< end
|| end
== 0); p
= next
) {
1989 next
= TAILQ_NEXT(p
, listq
);
1992 * Avoid releasing and reacquiring the same page lock.
1994 new_mtx
= vm_page_lockptr(p
);
1995 if (mtx
!= new_mtx
) {
2001 vm_page_deactivate_noreuse(p
);
2008 * Populate the specified range of the object with valid pages. Returns
2009 * TRUE if the range is successfully populated and FALSE otherwise.
2011 * Note: This function should be optimized to pass a larger array of
2012 * pages to vm_pager_get_pages() before it is applied to a non-
2013 * OBJT_DEVICE object.
2015 * The object must be locked.
2018 vm_object_populate(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
2024 VM_OBJECT_ASSERT_WLOCKED(object
);
2025 for (pindex
= start
; pindex
< end
; pindex
++) {
2026 m
= vm_page_grab(object
, pindex
, VM_ALLOC_NORMAL
);
2027 if (m
->valid
!= VM_PAGE_BITS_ALL
) {
2028 rv
= vm_pager_get_pages(object
, &m
, 1, NULL
, NULL
);
2029 if (rv
!= VM_PAGER_OK
) {
2037 * Keep "m" busy because a subsequent iteration may unlock
2041 if (pindex
> start
) {
2042 m
= vm_page_lookup(object
, start
);
2043 while (m
!= NULL
&& m
->pindex
< pindex
) {
2045 m
= TAILQ_NEXT(m
, listq
);
2048 return (pindex
== end
);
2052 * Routine: vm_object_coalesce
2053 * Function: Coalesces two objects backing up adjoining
2054 * regions of memory into a single object.
2056 * returns TRUE if objects were combined.
2058 * NOTE: Only works at the moment if the second object is NULL -
2059 * if it's not, which object do we lock first?
2062 * prev_object First object to coalesce
2063 * prev_offset Offset into prev_object
2064 * prev_size Size of reference to prev_object
2065 * next_size Size of reference to the second object
2066 * reserved Indicator that extension region has
2067 * swap accounted for
2070 * The object must *not* be locked.
2073 vm_object_coalesce(vm_object_t prev_object
, vm_ooffset_t prev_offset
,
2074 vm_size_t prev_size
, vm_size_t next_size
, boolean_t reserved
)
2076 vm_pindex_t next_pindex
;
2078 if (prev_object
== NULL
)
2080 VM_OBJECT_WLOCK(prev_object
);
2081 if ((prev_object
->type
!= OBJT_DEFAULT
&&
2082 prev_object
->type
!= OBJT_SWAP
) ||
2083 (prev_object
->flags
& OBJ_TMPFS_NODE
) != 0) {
2084 VM_OBJECT_WUNLOCK(prev_object
);
2089 * Try to collapse the object first
2091 vm_object_collapse(prev_object
);
2094 * Can't coalesce if: . more than one reference . paged out . shadows
2095 * another object . has a copy elsewhere (any of which mean that the
2096 * pages not mapped to prev_entry may be in use anyway)
2098 if (prev_object
->backing_object
!= NULL
) {
2099 VM_OBJECT_WUNLOCK(prev_object
);
2103 prev_size
>>= PAGE_SHIFT
;
2104 next_size
>>= PAGE_SHIFT
;
2105 next_pindex
= OFF_TO_IDX(prev_offset
) + prev_size
;
2107 if ((prev_object
->ref_count
> 1) &&
2108 (prev_object
->size
!= next_pindex
)) {
2109 VM_OBJECT_WUNLOCK(prev_object
);
2114 * Account for the charge.
2116 if (prev_object
->cred
!= NULL
) {
2119 * If prev_object was charged, then this mapping,
2120 * although not charged now, may become writable
2121 * later. Non-NULL cred in the object would prevent
2122 * swap reservation during enabling of the write
2123 * access, so reserve swap now. Failed reservation
2124 * cause allocation of the separate object for the map
2125 * entry, and swap reservation for this entry is
2126 * managed in appropriate time.
2128 if (!reserved
&& !swap_reserve_by_cred(ptoa(next_size
),
2129 prev_object
->cred
)) {
2130 VM_OBJECT_WUNLOCK(prev_object
);
2133 prev_object
->charge
+= ptoa(next_size
);
2137 * Remove any pages that may still be in the object from a previous
2140 if (next_pindex
< prev_object
->size
) {
2141 vm_object_page_remove(prev_object
, next_pindex
, next_pindex
+
2143 if (prev_object
->type
== OBJT_SWAP
)
2144 swap_pager_freespace(prev_object
,
2145 next_pindex
, next_size
);
2147 if (prev_object
->cred
!= NULL
) {
2148 KASSERT(prev_object
->charge
>=
2149 ptoa(prev_object
->size
- next_pindex
),
2150 ("object %p overcharged 1 %jx %jx", prev_object
,
2151 (uintmax_t)next_pindex
, (uintmax_t)next_size
));
2152 prev_object
->charge
-= ptoa(prev_object
->size
-
2159 * Extend the object if necessary.
2161 if (next_pindex
+ next_size
> prev_object
->size
)
2162 prev_object
->size
= next_pindex
+ next_size
;
2164 VM_OBJECT_WUNLOCK(prev_object
);
2169 vm_object_set_writeable_dirty(vm_object_t object
)
2172 VM_OBJECT_ASSERT_WLOCKED(object
);
2173 if (object
->type
!= OBJT_VNODE
) {
2174 if ((object
->flags
& OBJ_TMPFS_NODE
) != 0) {
2175 KASSERT(object
->type
== OBJT_SWAP
, ("non-swap tmpfs"));
2176 vm_object_set_flag(object
, OBJ_TMPFS_DIRTY
);
2180 object
->generation
++;
2181 if ((object
->flags
& OBJ_MIGHTBEDIRTY
) != 0)
2183 vm_object_set_flag(object
, OBJ_MIGHTBEDIRTY
);
2189 * For each page offset within the specified range of the given object,
2190 * find the highest-level page in the shadow chain and unwire it. A page
2191 * must exist at every page offset, and the highest-level page must be
2195 vm_object_unwire(vm_object_t object
, vm_ooffset_t offset
, vm_size_t length
,
2198 vm_object_t tobject
;
2200 vm_pindex_t end_pindex
, pindex
, tpindex
;
2201 int depth
, locked_depth
;
2203 KASSERT((offset
& PAGE_MASK
) == 0,
2204 ("vm_object_unwire: offset is not page aligned"));
2205 KASSERT((length
& PAGE_MASK
) == 0,
2206 ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
2207 /* The wired count of a fictitious page never changes. */
2208 if ((object
->flags
& OBJ_FICTITIOUS
) != 0)
2210 pindex
= OFF_TO_IDX(offset
);
2211 end_pindex
= pindex
+ atop(length
);
2213 VM_OBJECT_RLOCK(object
);
2214 m
= vm_page_find_least(object
, pindex
);
2215 while (pindex
< end_pindex
) {
2216 if (m
== NULL
|| pindex
< m
->pindex
) {
2218 * The first object in the shadow chain doesn't
2219 * contain a page at the current index. Therefore,
2220 * the page must exist in a backing object.
2227 OFF_TO_IDX(tobject
->backing_object_offset
);
2228 tobject
= tobject
->backing_object
;
2229 KASSERT(tobject
!= NULL
,
2230 ("vm_object_unwire: missing page"));
2231 if ((tobject
->flags
& OBJ_FICTITIOUS
) != 0)
2234 if (depth
== locked_depth
) {
2236 VM_OBJECT_RLOCK(tobject
);
2238 } while ((tm
= vm_page_lookup(tobject
, tpindex
)) ==
2242 m
= TAILQ_NEXT(m
, listq
);
2245 vm_page_unwire(tm
, queue
);
2250 /* Release the accumulated object locks. */
2251 for (depth
= 0; depth
< locked_depth
; depth
++) {
2252 tobject
= object
->backing_object
;
2253 VM_OBJECT_RUNLOCK(object
);
2259 vm_object_vnode(vm_object_t object
)
2262 VM_OBJECT_ASSERT_LOCKED(object
);
2263 if (object
->type
== OBJT_VNODE
)
2264 return (object
->handle
);
2265 if (object
->type
== OBJT_SWAP
&& (object
->flags
& OBJ_TMPFS
) != 0)
2266 return (object
->un_pager
.swp
.swp_tmpfs
);
2271 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS
)
2273 struct kinfo_vmobject kvo
;
2274 char *fullpath
, *freepath
;
2281 if (req
->oldptr
== NULL
) {
2283 * If an old buffer has not been provided, generate an
2284 * estimate of the space needed for a subsequent call.
2286 mtx_lock(&vm_object_list_mtx
);
2288 TAILQ_FOREACH(obj
, &vm_object_list
, object_list
) {
2289 if (obj
->type
== OBJT_DEAD
)
2293 mtx_unlock(&vm_object_list_mtx
);
2294 return (SYSCTL_OUT(req
, NULL
, sizeof(struct kinfo_vmobject
) *
2301 * VM objects are type stable and are never removed from the
2302 * list once added. This allows us to safely read obj->object_list
2303 * after reacquiring the VM object lock.
2305 mtx_lock(&vm_object_list_mtx
);
2306 TAILQ_FOREACH(obj
, &vm_object_list
, object_list
) {
2307 if (obj
->type
== OBJT_DEAD
)
2309 VM_OBJECT_RLOCK(obj
);
2310 if (obj
->type
== OBJT_DEAD
) {
2311 VM_OBJECT_RUNLOCK(obj
);
2314 mtx_unlock(&vm_object_list_mtx
);
2315 kvo
.kvo_size
= ptoa(obj
->size
);
2316 kvo
.kvo_resident
= obj
->resident_page_count
;
2317 kvo
.kvo_ref_count
= obj
->ref_count
;
2318 kvo
.kvo_shadow_count
= obj
->shadow_count
;
2319 kvo
.kvo_memattr
= obj
->memattr
;
2321 kvo
.kvo_inactive
= 0;
2322 TAILQ_FOREACH(m
, &obj
->memq
, listq
) {
2324 * A page may belong to the object but be
2325 * dequeued and set to PQ_NONE while the
2326 * object lock is not held. This makes the
2327 * reads of m->queue below racy, and we do not
2328 * count pages set to PQ_NONE. However, this
2329 * sysctl is only meant to give an
2330 * approximation of the system anyway.
2332 if (m
->queue
== PQ_ACTIVE
)
2334 else if (m
->queue
== PQ_INACTIVE
)
2338 kvo
.kvo_vn_fileid
= 0;
2339 kvo
.kvo_vn_fsid
= 0;
2343 switch (obj
->type
) {
2345 kvo
.kvo_type
= KVME_TYPE_DEFAULT
;
2348 kvo
.kvo_type
= KVME_TYPE_VNODE
;
2353 kvo
.kvo_type
= KVME_TYPE_SWAP
;
2356 kvo
.kvo_type
= KVME_TYPE_DEVICE
;
2359 kvo
.kvo_type
= KVME_TYPE_PHYS
;
2362 kvo
.kvo_type
= KVME_TYPE_DEAD
;
2365 kvo
.kvo_type
= KVME_TYPE_SG
;
2367 case OBJT_MGTDEVICE
:
2368 kvo
.kvo_type
= KVME_TYPE_MGTDEVICE
;
2371 kvo
.kvo_type
= KVME_TYPE_UNKNOWN
;
2374 VM_OBJECT_RUNLOCK(obj
);
2376 vn_fullpath(curthread
, vp
, &fullpath
, &freepath
);
2377 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
2378 if (VOP_GETATTR(vp
, &va
, curthread
->td_ucred
) == 0) {
2379 kvo
.kvo_vn_fileid
= va
.va_fileid
;
2380 kvo
.kvo_vn_fsid
= va
.va_fsid
;
2385 strlcpy(kvo
.kvo_path
, fullpath
, sizeof(kvo
.kvo_path
));
2386 if (freepath
!= NULL
)
2387 free(freepath
, M_TEMP
);
2389 /* Pack record size down */
2390 kvo
.kvo_structsize
= offsetof(struct kinfo_vmobject
, kvo_path
) +
2391 strlen(kvo
.kvo_path
) + 1;
2392 kvo
.kvo_structsize
= roundup(kvo
.kvo_structsize
,
2394 error
= SYSCTL_OUT(req
, &kvo
, kvo
.kvo_structsize
);
2395 mtx_lock(&vm_object_list_mtx
);
2399 mtx_unlock(&vm_object_list_mtx
);
2402 SYSCTL_PROC(_vm
, OID_AUTO
, objects
, CTLTYPE_STRUCT
| CTLFLAG_RW
| CTLFLAG_SKIP
|
2403 CTLFLAG_MPSAFE
, NULL
, 0, sysctl_vm_object_list
, "S,kinfo_vmobject",
2404 "List of VM objects");
2406 #include "opt_ddb.h"
2408 #include <sys/kernel.h>
2410 #include <sys/cons.h>
2412 #include <ddb/ddb.h>
2415 _vm_object_in_map(vm_map_t map
, vm_object_t object
, vm_map_entry_t entry
)
2418 vm_map_entry_t tmpe
;
2426 tmpe
= map
->header
.next
;
2427 entcount
= map
->nentries
;
2428 while (entcount
-- && (tmpe
!= &map
->header
)) {
2429 if (_vm_object_in_map(map
, object
, tmpe
)) {
2434 } else if (entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) {
2435 tmpm
= entry
->object
.sub_map
;
2436 tmpe
= tmpm
->header
.next
;
2437 entcount
= tmpm
->nentries
;
2438 while (entcount
-- && tmpe
!= &tmpm
->header
) {
2439 if (_vm_object_in_map(tmpm
, object
, tmpe
)) {
2444 } else if ((obj
= entry
->object
.vm_object
) != NULL
) {
2445 for (; obj
; obj
= obj
->backing_object
)
2446 if (obj
== object
) {
2454 vm_object_in_map(vm_object_t object
)
2458 /* sx_slock(&allproc_lock); */
2459 FOREACH_PROC_IN_SYSTEM(p
) {
2460 if (!p
->p_vmspace
/* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2462 if (_vm_object_in_map(&p
->p_vmspace
->vm_map
, object
, 0)) {
2463 /* sx_sunlock(&allproc_lock); */
2467 /* sx_sunlock(&allproc_lock); */
2468 if (_vm_object_in_map(kernel_map
, object
, 0))
2473 DB_SHOW_COMMAND(vmochk
, vm_object_check
)
2478 * make sure that internal objs are in a map somewhere
2479 * and none have zero ref counts.
2481 TAILQ_FOREACH(object
, &vm_object_list
, object_list
) {
2482 if (object
->handle
== NULL
&&
2483 (object
->type
== OBJT_DEFAULT
|| object
->type
== OBJT_SWAP
)) {
2484 if (object
->ref_count
== 0) {
2485 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2486 (long)object
->size
);
2488 if (!vm_object_in_map(object
)) {
2490 "vmochk: internal obj is not in a map: "
2491 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2492 object
->ref_count
, (u_long
)object
->size
,
2493 (u_long
)object
->size
,
2494 (void *)object
->backing_object
);
2501 * vm_object_print: [ debug ]
2503 DB_SHOW_COMMAND(object
, vm_object_print_static
)
2505 /* XXX convert args. */
2506 vm_object_t object
= (vm_object_t
)addr
;
2507 boolean_t full
= have_addr
;
2511 /* XXX count is an (unused) arg. Avoid shadowing it. */
2512 #define count was_count
2520 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2521 object
, (int)object
->type
, (uintmax_t)object
->size
,
2522 object
->resident_page_count
, object
->ref_count
, object
->flags
,
2523 object
->cred
? object
->cred
->cr_ruid
: -1, (uintmax_t)object
->charge
);
2524 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2525 object
->shadow_count
,
2526 object
->backing_object
? object
->backing_object
->ref_count
: 0,
2527 object
->backing_object
, (uintmax_t)object
->backing_object_offset
);
2534 TAILQ_FOREACH(p
, &object
->memq
, listq
) {
2536 db_iprintf("memory:=");
2537 else if (count
== 6) {
2545 db_printf("(off=0x%jx,page=0x%jx)",
2546 (uintmax_t)p
->pindex
, (uintmax_t)VM_PAGE_TO_PHYS(p
));
2556 /* XXX need this non-static entry for calling from vm_map_print. */
2559 /* db_expr_t */ long addr
,
2560 boolean_t have_addr
,
2561 /* db_expr_t */ long count
,
2564 vm_object_print_static(addr
, have_addr
, count
, modif
);
2567 DB_SHOW_COMMAND(vmopag
, vm_object_print_pages
)
2572 vm_page_t m
, prev_m
;
2576 TAILQ_FOREACH(object
, &vm_object_list
, object_list
) {
2577 db_printf("new object: %p\n", (void *)object
);
2588 TAILQ_FOREACH(m
, &object
->memq
, listq
) {
2589 if (m
->pindex
> 128)
2591 if ((prev_m
= TAILQ_PREV(m
, pglist
, listq
)) != NULL
&&
2592 prev_m
->pindex
+ 1 != m
->pindex
) {
2594 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2595 (long)fidx
, rcount
, (long)pa
);
2607 (VM_PAGE_TO_PHYS(m
) == pa
+ rcount
* PAGE_SIZE
)) {
2612 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2613 (long)fidx
, rcount
, (long)pa
);
2623 pa
= VM_PAGE_TO_PHYS(m
);
2627 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2628 (long)fidx
, rcount
, (long)pa
);