2 * Copyright (c) 1991, 1993, 2013
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
60 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
64 * Virtual memory object module.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/proc.h> /* for curproc, pageproc */
70 #include <sys/thread.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/refcount.h>
80 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_pager.h>
87 #include <vm/swap_pager.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_zone.h>
92 #include <vm/vm_page2.h>
94 #include <machine/specialreg.h>
96 #define EASY_SCAN_FACTOR 8
98 static void vm_object_qcollapse(vm_object_t object
,
99 vm_object_t backing_object
);
100 static void vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
,
102 static void vm_object_lock_init(vm_object_t
);
105 * Virtual memory objects maintain the actual data
106 * associated with allocated virtual memory. A given
107 * page of memory exists within exactly one object.
109 * An object is only deallocated when all "references"
110 * are given up. Only one "reference" to a given
111 * region of an object should be writeable.
113 * Associated with each object is a list of all resident
114 * memory pages belonging to that object; this list is
115 * maintained by the "vm_page" module, and locked by the object's
118 * Each object also records a "pager" routine which is
119 * used to retrieve (and store) pages to the proper backing
120 * storage. In addition, objects may be backed by other
121 * objects from which they were virtual-copied.
123 * The only items within the object structure which are
124 * modified after time of creation are:
125 * reference count locked by object's lock
126 * pager routine locked by object's lock
130 struct vm_object kernel_object
;
132 static long object_collapses
;
133 static long object_bypasses
;
135 struct vm_object_hash vm_object_hash
[VMOBJ_HSIZE
];
137 MALLOC_DEFINE(M_VM_OBJECT
, "vm_object", "vm_object structures");
139 #define VMOBJ_HASH_PRIME1 66555444443333333ULL
140 #define VMOBJ_HASH_PRIME2 989042931893ULL
143 struct vm_object_hash
*
144 vmobj_hash(vm_object_t obj
)
149 hash1
= (uintptr_t)obj
+ ((uintptr_t)obj
>> 18);
150 hash1
%= VMOBJ_HASH_PRIME1
;
151 hash2
= ((uintptr_t)obj
>> 8) + ((uintptr_t)obj
>> 24);
152 hash2
%= VMOBJ_HASH_PRIME2
;
153 return (&vm_object_hash
[(hash1
^ hash2
) & VMOBJ_HMASK
]);
156 #if defined(DEBUG_LOCKS)
158 #define vm_object_vndeallocate(obj, vpp) \
159 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__)
162 * Debug helper to track hold/drop/ref/deallocate calls.
165 debugvm_object_add(vm_object_t obj
, char *file
, int line
, int addrem
)
169 i
= atomic_fetchadd_int(&obj
->debug_index
, 1);
170 i
= i
& (VMOBJ_DEBUG_ARRAY_SIZE
- 1);
171 ksnprintf(obj
->debug_hold_thrs
[i
],
172 sizeof(obj
->debug_hold_thrs
[i
]),
174 (addrem
== -1 ? '-' : (addrem
== 1 ? '+' : '=')),
175 (curthread
->td_proc
? curthread
->td_proc
->p_pid
: -1),
178 obj
->debug_hold_file
[i
] = file
;
179 obj
->debug_hold_line
[i
] = line
;
181 /* Uncomment for debugging obj refs/derefs in reproducable cases */
182 if (strcmp(curthread
->td_comm
, "sshd") == 0) {
183 kprintf("%d %p refs=%d ar=%d file: %s/%d\n",
184 (curthread
->td_proc
? curthread
->td_proc
->p_pid
: -1),
185 obj
, obj
->ref_count
, addrem
, file
, line
);
193 * Misc low level routines
196 vm_object_lock_init(vm_object_t obj
)
198 #if defined(DEBUG_LOCKS)
201 obj
->debug_index
= 0;
202 for (i
= 0; i
< VMOBJ_DEBUG_ARRAY_SIZE
; i
++) {
203 obj
->debug_hold_thrs
[i
][0] = 0;
204 obj
->debug_hold_file
[i
] = NULL
;
205 obj
->debug_hold_line
[i
] = 0;
211 vm_object_lock_swap(void)
217 vm_object_lock(vm_object_t obj
)
219 lwkt_gettoken(&obj
->token
);
223 * Returns TRUE on sucesss
226 vm_object_lock_try(vm_object_t obj
)
228 return(lwkt_trytoken(&obj
->token
));
232 vm_object_lock_shared(vm_object_t obj
)
234 lwkt_gettoken_shared(&obj
->token
);
238 vm_object_unlock(vm_object_t obj
)
240 lwkt_reltoken(&obj
->token
);
244 vm_object_upgrade(vm_object_t obj
)
246 lwkt_reltoken(&obj
->token
);
247 lwkt_gettoken(&obj
->token
);
251 vm_object_downgrade(vm_object_t obj
)
253 lwkt_reltoken(&obj
->token
);
254 lwkt_gettoken_shared(&obj
->token
);
258 vm_object_assert_held(vm_object_t obj
)
260 ASSERT_LWKT_TOKEN_HELD(&obj
->token
);
266 globaldata_t gd
= mycpu
;
269 pg_color
= (int)(intptr_t)gd
->gd_curthread
>> 10;
270 pg_color
+= gd
->gd_quick_color
;
271 gd
->gd_quick_color
+= PQ_PRIME2
;
277 VMOBJDEBUG(vm_object_hold
)(vm_object_t obj VMOBJDBARGS
)
279 KKASSERT(obj
!= NULL
);
282 * Object must be held (object allocation is stable due to callers
283 * context, typically already holding the token on a parent object)
284 * prior to potentially blocking on the lock, otherwise the object
285 * can get ripped away from us.
287 refcount_acquire(&obj
->hold_count
);
290 #if defined(DEBUG_LOCKS)
291 debugvm_object_add(obj
, file
, line
, 1);
296 VMOBJDEBUG(vm_object_hold_try
)(vm_object_t obj VMOBJDBARGS
)
298 KKASSERT(obj
!= NULL
);
301 * Object must be held (object allocation is stable due to callers
302 * context, typically already holding the token on a parent object)
303 * prior to potentially blocking on the lock, otherwise the object
304 * can get ripped away from us.
306 refcount_acquire(&obj
->hold_count
);
307 if (vm_object_lock_try(obj
) == 0) {
308 if (refcount_release(&obj
->hold_count
)) {
309 if (obj
->ref_count
== 0 && (obj
->flags
& OBJ_DEAD
))
310 kfree(obj
, M_VM_OBJECT
);
315 #if defined(DEBUG_LOCKS)
316 debugvm_object_add(obj
, file
, line
, 1);
322 VMOBJDEBUG(vm_object_hold_shared
)(vm_object_t obj VMOBJDBARGS
)
324 KKASSERT(obj
!= NULL
);
327 * Object must be held (object allocation is stable due to callers
328 * context, typically already holding the token on a parent object)
329 * prior to potentially blocking on the lock, otherwise the object
330 * can get ripped away from us.
332 refcount_acquire(&obj
->hold_count
);
333 vm_object_lock_shared(obj
);
335 #if defined(DEBUG_LOCKS)
336 debugvm_object_add(obj
, file
, line
, 1);
341 * Drop the token and hold_count on the object.
343 * WARNING! Token might be shared.
346 VMOBJDEBUG(vm_object_drop
)(vm_object_t obj VMOBJDBARGS
)
352 * No new holders should be possible once we drop hold_count 1->0 as
353 * there is no longer any way to reference the object.
355 KKASSERT(obj
->hold_count
> 0);
356 if (refcount_release(&obj
->hold_count
)) {
357 #if defined(DEBUG_LOCKS)
358 debugvm_object_add(obj
, file
, line
, -1);
361 if (obj
->ref_count
== 0 && (obj
->flags
& OBJ_DEAD
)) {
362 vm_object_unlock(obj
);
363 kfree(obj
, M_VM_OBJECT
);
365 vm_object_unlock(obj
);
368 #if defined(DEBUG_LOCKS)
369 debugvm_object_add(obj
, file
, line
, -1);
371 vm_object_unlock(obj
);
376 * Initialize a freshly allocated object, returning a held object.
378 * Used only by vm_object_allocate(), zinitna() and vm_object_init().
383 _vm_object_allocate(objtype_t type
, vm_pindex_t size
, vm_object_t object
)
385 struct vm_object_hash
*hash
;
387 RB_INIT(&object
->rb_memq
);
388 LIST_INIT(&object
->shadow_head
);
389 lwkt_token_init(&object
->token
, "vmobj");
393 object
->ref_count
= 1;
394 object
->memattr
= VM_MEMATTR_DEFAULT
;
395 object
->hold_count
= 0;
397 if ((object
->type
== OBJT_DEFAULT
) || (object
->type
== OBJT_SWAP
))
398 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
399 object
->paging_in_progress
= 0;
400 object
->resident_page_count
= 0;
401 object
->shadow_count
= 0;
402 /* cpu localization twist */
403 object
->pg_color
= vm_quickcolor();
404 object
->handle
= NULL
;
405 object
->backing_object
= NULL
;
406 object
->backing_object_offset
= (vm_ooffset_t
)0;
408 atomic_add_int(&object
->generation
, 1);
409 object
->swblock_count
= 0;
410 RB_INIT(&object
->swblock_root
);
411 vm_object_lock_init(object
);
412 pmap_object_init(object
);
414 vm_object_hold(object
);
416 hash
= vmobj_hash(object
);
417 lwkt_gettoken(&hash
->token
);
418 TAILQ_INSERT_TAIL(&hash
->list
, object
, object_list
);
419 lwkt_reltoken(&hash
->token
);
423 * Initialize a VM object.
426 vm_object_init(vm_object_t object
, vm_pindex_t size
)
428 _vm_object_allocate(OBJT_DEFAULT
, size
, object
);
429 vm_object_drop(object
);
433 * Initialize the VM objects module.
435 * Called from the low level boot code only. Note that this occurs before
436 * kmalloc is initialized so we cannot allocate any VM objects.
439 vm_object_init1(void)
443 for (i
= 0; i
< VMOBJ_HSIZE
; ++i
) {
444 TAILQ_INIT(&vm_object_hash
[i
].list
);
445 lwkt_token_init(&vm_object_hash
[i
].token
, "vmobjlst");
448 _vm_object_allocate(OBJT_DEFAULT
, OFF_TO_IDX(KvaEnd
),
450 vm_object_drop(&kernel_object
);
454 vm_object_init2(void)
456 kmalloc_set_unlimited(M_VM_OBJECT
);
460 * Allocate and return a new object of the specified type and size.
465 vm_object_allocate(objtype_t type
, vm_pindex_t size
)
469 obj
= kmalloc(sizeof(*obj
), M_VM_OBJECT
, M_INTWAIT
|M_ZERO
);
470 _vm_object_allocate(type
, size
, obj
);
477 * This version returns a held object, allowing further atomic initialization
481 vm_object_allocate_hold(objtype_t type
, vm_pindex_t size
)
485 obj
= kmalloc(sizeof(*obj
), M_VM_OBJECT
, M_INTWAIT
|M_ZERO
);
486 _vm_object_allocate(type
, size
, obj
);
492 * Add an additional reference to a vm_object. The object must already be
493 * held. The original non-lock version is no longer supported. The object
494 * must NOT be chain locked by anyone at the time the reference is added.
496 * Referencing a chain-locked object can blow up the fairly sensitive
497 * ref_count and shadow_count tests in the deallocator. Most callers
498 * will call vm_object_chain_wait() prior to calling
499 * vm_object_reference_locked() to avoid the case. The held token
500 * allows the caller to pair the wait and ref.
502 * The object must be held, but may be held shared if desired (hence why
503 * we use an atomic op).
506 VMOBJDEBUG(vm_object_reference_locked
)(vm_object_t object VMOBJDBARGS
)
508 KKASSERT(object
!= NULL
);
509 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
510 KKASSERT((object
->chainlk
& (CHAINLK_EXCL
| CHAINLK_MASK
)) == 0);
511 atomic_add_int(&object
->ref_count
, 1);
512 if (object
->type
== OBJT_VNODE
) {
513 vref(object
->handle
);
514 /* XXX what if the vnode is being destroyed? */
516 #if defined(DEBUG_LOCKS)
517 debugvm_object_add(object
, file
, line
, 1);
522 * This version explicitly allows the chain to be held (i.e. by the
523 * caller). The token must also be held.
526 VMOBJDEBUG(vm_object_reference_locked_chain_held
)(vm_object_t object
529 KKASSERT(object
!= NULL
);
530 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
531 atomic_add_int(&object
->ref_count
, 1);
532 if (object
->type
== OBJT_VNODE
) {
533 vref(object
->handle
);
534 /* XXX what if the vnode is being destroyed? */
536 #if defined(DEBUG_LOCKS)
537 debugvm_object_add(object
, file
, line
, 1);
542 * This version is only allowed for vnode objects.
545 VMOBJDEBUG(vm_object_reference_quick
)(vm_object_t object VMOBJDBARGS
)
547 KKASSERT(object
->type
== OBJT_VNODE
);
548 atomic_add_int(&object
->ref_count
, 1);
549 vref(object
->handle
);
550 #if defined(DEBUG_LOCKS)
551 debugvm_object_add(object
, file
, line
, 1);
556 * Object OBJ_CHAINLOCK lock handling.
558 * The caller can chain-lock backing objects recursively and then
559 * use vm_object_chain_release_all() to undo the whole chain.
561 * Chain locks are used to prevent collapses and are only applicable
562 * to OBJT_DEFAULT and OBJT_SWAP objects. Chain locking operations
563 * on other object types are ignored. This is also important because
564 * it allows e.g. the vnode underlying a memory mapping to take concurrent
567 * The object must usually be held on entry, though intermediate
568 * objects need not be held on release. The object must be held exclusively,
569 * NOT shared. Note that the prefault path checks the shared state and
570 * avoids using the chain functions.
573 vm_object_chain_wait(vm_object_t object
, int shared
)
575 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
577 uint32_t chainlk
= object
->chainlk
;
581 if (chainlk
& (CHAINLK_EXCL
| CHAINLK_EXCLREQ
)) {
582 tsleep_interlock(object
, 0);
583 if (atomic_cmpset_int(&object
->chainlk
,
585 chainlk
| CHAINLK_WAIT
)) {
586 tsleep(object
, PINTERLOCKED
,
595 if (chainlk
& (CHAINLK_MASK
| CHAINLK_EXCL
)) {
596 tsleep_interlock(object
, 0);
597 if (atomic_cmpset_int(&object
->chainlk
,
599 chainlk
| CHAINLK_WAIT
))
601 tsleep(object
, PINTERLOCKED
,
606 if (atomic_cmpset_int(&object
->chainlk
,
608 chainlk
& ~CHAINLK_WAIT
))
610 if (chainlk
& CHAINLK_WAIT
)
622 vm_object_chain_acquire(vm_object_t object
, int shared
)
624 if (object
->type
!= OBJT_DEFAULT
&& object
->type
!= OBJT_SWAP
)
626 if (vm_shared_fault
== 0)
630 uint32_t chainlk
= object
->chainlk
;
634 if (chainlk
& (CHAINLK_EXCL
| CHAINLK_EXCLREQ
)) {
635 tsleep_interlock(object
, 0);
636 if (atomic_cmpset_int(&object
->chainlk
,
638 chainlk
| CHAINLK_WAIT
)) {
639 tsleep(object
, PINTERLOCKED
,
643 } else if (atomic_cmpset_int(&object
->chainlk
,
644 chainlk
, chainlk
+ 1)) {
649 if (chainlk
& (CHAINLK_MASK
| CHAINLK_EXCL
)) {
650 tsleep_interlock(object
, 0);
651 if (atomic_cmpset_int(&object
->chainlk
,
656 tsleep(object
, PINTERLOCKED
,
661 if (atomic_cmpset_int(&object
->chainlk
,
663 (chainlk
| CHAINLK_EXCL
) &
666 if (chainlk
& CHAINLK_WAIT
)
678 vm_object_chain_release(vm_object_t object
)
680 /*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/
681 if (object
->type
!= OBJT_DEFAULT
&& object
->type
!= OBJT_SWAP
)
683 KKASSERT(object
->chainlk
& (CHAINLK_MASK
| CHAINLK_EXCL
));
685 uint32_t chainlk
= object
->chainlk
;
688 if (chainlk
& CHAINLK_MASK
) {
689 if ((chainlk
& CHAINLK_MASK
) == 1 &&
690 atomic_cmpset_int(&object
->chainlk
,
692 (chainlk
- 1) & ~CHAINLK_WAIT
)) {
693 if (chainlk
& CHAINLK_WAIT
)
697 if ((chainlk
& CHAINLK_MASK
) > 1 &&
698 atomic_cmpset_int(&object
->chainlk
,
699 chainlk
, chainlk
- 1)) {
704 KKASSERT(chainlk
& CHAINLK_EXCL
);
705 if (atomic_cmpset_int(&object
->chainlk
,
707 chainlk
& ~(CHAINLK_EXCL
|
709 if (chainlk
& CHAINLK_WAIT
)
718 * Release the chain from first_object through and including stopobj.
719 * The caller is typically holding the first and last object locked
720 * (shared or exclusive) to prevent destruction races.
722 * We release stopobj first as an optimization as this object is most
723 * likely to be shared across multiple processes.
726 vm_object_chain_release_all(vm_object_t first_object
, vm_object_t stopobj
)
728 vm_object_t backing_object
;
731 vm_object_chain_release(stopobj
);
732 object
= first_object
;
734 while (object
!= stopobj
) {
736 backing_object
= object
->backing_object
;
737 vm_object_chain_release(object
);
738 object
= backing_object
;
743 * Dereference an object and its underlying vnode. The object may be
744 * held shared. On return the object will remain held.
746 * This function may return a vnode in *vpp which the caller must release
747 * after the caller drops its own lock. If vpp is NULL, we assume that
748 * the caller was holding an exclusive lock on the object and we vrele()
752 VMOBJDEBUG(vm_object_vndeallocate
)(vm_object_t object
, struct vnode
**vpp
755 struct vnode
*vp
= (struct vnode
*) object
->handle
;
757 KASSERT(object
->type
== OBJT_VNODE
,
758 ("vm_object_vndeallocate: not a vnode object"));
759 KASSERT(vp
!= NULL
, ("vm_object_vndeallocate: missing vp"));
760 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
762 if (object
->ref_count
== 0) {
763 vprint("vm_object_vndeallocate", vp
);
764 panic("vm_object_vndeallocate: bad object reference count");
768 int count
= object
->ref_count
;
771 vm_object_upgrade(object
);
772 if (atomic_cmpset_int(&object
->ref_count
, count
, 0)) {
773 vclrflags(vp
, VTEXT
);
777 if (atomic_cmpset_int(&object
->ref_count
,
784 #if defined(DEBUG_LOCKS)
785 debugvm_object_add(object
, file
, line
, -1);
789 * vrele or return the vp to vrele. We can only safely vrele(vp)
790 * if the object was locked exclusively. But there are two races
793 * We had to upgrade the object above to safely clear VTEXT
794 * but the alternative path where the shared lock is retained
795 * can STILL race to 0 in other paths and cause our own vrele()
796 * to terminate the vnode. We can't allow that if the VM object
797 * is still locked shared.
806 * Release a reference to the specified object, gained either through a
807 * vm_object_allocate or a vm_object_reference call. When all references
808 * are gone, storage associated with this object may be relinquished.
810 * The caller does not have to hold the object locked but must have control
811 * over the reference in question in order to guarantee that the object
812 * does not get ripped out from under us.
814 * XXX Currently all deallocations require an exclusive lock.
817 VMOBJDEBUG(vm_object_deallocate
)(vm_object_t object VMOBJDBARGS
)
826 count
= object
->ref_count
;
830 * If decrementing the count enters into special handling
831 * territory (0, 1, or 2) we have to do it the hard way.
832 * Fortunate though, objects with only a few refs like this
833 * are not likely to be heavily contended anyway.
835 * For vnode objects we only care about 1->0 transitions.
837 if (count
<= 3 || (object
->type
== OBJT_VNODE
&& count
<= 1)) {
838 #if defined(DEBUG_LOCKS)
839 debugvm_object_add(object
, file
, line
, 0);
841 vm_object_hold(object
);
842 vm_object_deallocate_locked(object
);
843 vm_object_drop(object
);
848 * Try to decrement ref_count without acquiring a hold on
849 * the object. This is particularly important for the exec*()
850 * and exit*() code paths because the program binary may
851 * have a great deal of sharing and an exclusive lock will
852 * crowbar performance in those circumstances.
854 if (object
->type
== OBJT_VNODE
) {
855 vp
= (struct vnode
*)object
->handle
;
856 if (atomic_cmpset_int(&object
->ref_count
,
858 #if defined(DEBUG_LOCKS)
859 debugvm_object_add(object
, file
, line
, -1);
867 if (atomic_cmpset_int(&object
->ref_count
,
869 #if defined(DEBUG_LOCKS)
870 debugvm_object_add(object
, file
, line
, -1);
881 VMOBJDEBUG(vm_object_deallocate_locked
)(vm_object_t object VMOBJDBARGS
)
883 struct vm_object_dealloc_list
*dlist
= NULL
;
884 struct vm_object_dealloc_list
*dtmp
;
889 * We may chain deallocate object, but additional objects may
890 * collect on the dlist which also have to be deallocated. We
891 * must avoid a recursion, vm_object chains can get deep.
895 while (object
!= NULL
) {
897 * vnode case, caller either locked the object exclusively
898 * or this is a recursion with must_drop != 0 and the vnode
899 * object will be locked shared.
901 * If locked shared we have to drop the object before we can
902 * call vrele() or risk a shared/exclusive livelock.
904 if (object
->type
== OBJT_VNODE
) {
905 ASSERT_LWKT_TOKEN_HELD(&object
->token
);
907 struct vnode
*tmp_vp
;
909 vm_object_vndeallocate(object
, &tmp_vp
);
910 vm_object_drop(object
);
915 vm_object_vndeallocate(object
, NULL
);
919 ASSERT_LWKT_TOKEN_HELD_EXCL(&object
->token
);
922 * Normal case (object is locked exclusively)
924 if (object
->ref_count
== 0) {
925 panic("vm_object_deallocate: object deallocated "
926 "too many times: %d", object
->type
);
928 if (object
->ref_count
> 2) {
929 atomic_add_int(&object
->ref_count
, -1);
930 #if defined(DEBUG_LOCKS)
931 debugvm_object_add(object
, file
, line
, -1);
937 * Here on ref_count of one or two, which are special cases for
940 * Nominal ref_count > 1 case if the second ref is not from
943 * (ONEMAPPING only applies to DEFAULT AND SWAP objects)
945 if (object
->ref_count
== 2 && object
->shadow_count
== 0) {
946 if (object
->type
== OBJT_DEFAULT
||
947 object
->type
== OBJT_SWAP
) {
948 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
950 atomic_add_int(&object
->ref_count
, -1);
951 #if defined(DEBUG_LOCKS)
952 debugvm_object_add(object
, file
, line
, -1);
958 * If the second ref is from a shadow we chain along it
959 * upwards if object's handle is exhausted.
961 * We have to decrement object->ref_count before potentially
962 * collapsing the first shadow object or the collapse code
963 * will not be able to handle the degenerate case to remove
964 * object. However, if we do it too early the object can
965 * get ripped out from under us.
967 if (object
->ref_count
== 2 && object
->shadow_count
== 1 &&
968 object
->handle
== NULL
&& (object
->type
== OBJT_DEFAULT
||
969 object
->type
== OBJT_SWAP
)) {
970 temp
= LIST_FIRST(&object
->shadow_head
);
971 KKASSERT(temp
!= NULL
);
972 vm_object_hold(temp
);
975 * Wait for any paging to complete so the collapse
976 * doesn't (or isn't likely to) qcollapse. pip
977 * waiting must occur before we acquire the
981 temp
->paging_in_progress
||
982 object
->paging_in_progress
984 vm_object_pip_wait(temp
, "objde1");
985 vm_object_pip_wait(object
, "objde2");
989 * If the parent is locked we have to give up, as
990 * otherwise we would be acquiring locks in the
991 * wrong order and potentially deadlock.
993 if (temp
->chainlk
& (CHAINLK_EXCL
| CHAINLK_MASK
)) {
994 vm_object_drop(temp
);
997 vm_object_chain_acquire(temp
, 0);
1000 * Recheck/retry after the hold and the paging
1001 * wait, both of which can block us.
1003 if (object
->ref_count
!= 2 ||
1004 object
->shadow_count
!= 1 ||
1006 LIST_FIRST(&object
->shadow_head
) != temp
||
1007 (object
->type
!= OBJT_DEFAULT
&&
1008 object
->type
!= OBJT_SWAP
)) {
1009 vm_object_chain_release(temp
);
1010 vm_object_drop(temp
);
1015 * We can safely drop object's ref_count now.
1017 KKASSERT(object
->ref_count
== 2);
1018 atomic_add_int(&object
->ref_count
, -1);
1019 #if defined(DEBUG_LOCKS)
1020 debugvm_object_add(object
, file
, line
, -1);
1024 * If our single parent is not collapseable just
1025 * decrement ref_count (2->1) and stop.
1027 if (temp
->handle
|| (temp
->type
!= OBJT_DEFAULT
&&
1028 temp
->type
!= OBJT_SWAP
)) {
1029 vm_object_chain_release(temp
);
1030 vm_object_drop(temp
);
1035 * At this point we have already dropped object's
1036 * ref_count so it is possible for a race to
1037 * deallocate obj out from under us. Any collapse
1038 * will re-check the situation. We must not block
1039 * until we are able to collapse.
1041 * Bump temp's ref_count to avoid an unwanted
1042 * degenerate recursion (can't call
1043 * vm_object_reference_locked() because it asserts
1044 * that CHAINLOCK is not set).
1046 atomic_add_int(&temp
->ref_count
, 1);
1047 KKASSERT(temp
->ref_count
> 1);
1050 * Collapse temp, then deallocate the extra ref
1053 vm_object_collapse(temp
, &dlist
);
1054 vm_object_chain_release(temp
);
1056 vm_object_lock_swap();
1057 vm_object_drop(object
);
1065 * Drop the ref and handle termination on the 1->0 transition.
1066 * We may have blocked above so we have to recheck.
1069 KKASSERT(object
->ref_count
!= 0);
1070 if (object
->ref_count
>= 2) {
1071 atomic_add_int(&object
->ref_count
, -1);
1072 #if defined(DEBUG_LOCKS)
1073 debugvm_object_add(object
, file
, line
, -1);
1077 KKASSERT(object
->ref_count
== 1);
1080 * 1->0 transition. Chain through the backing_object.
1081 * Maintain the ref until we've located the backing object,
1084 while ((temp
= object
->backing_object
) != NULL
) {
1085 if (temp
->type
== OBJT_VNODE
)
1086 vm_object_hold_shared(temp
);
1088 vm_object_hold(temp
);
1089 if (temp
== object
->backing_object
)
1091 vm_object_drop(temp
);
1095 * 1->0 transition verified, retry if ref_count is no longer
1096 * 1. Otherwise disconnect the backing_object (temp) and
1099 if (object
->ref_count
!= 1) {
1100 vm_object_drop(temp
);
1105 * It shouldn't be possible for the object to be chain locked
1106 * if we're removing the last ref on it.
1108 * Removing object from temp's shadow list requires dropping
1109 * temp, which we will do on loop.
1111 * NOTE! vnodes do not use the shadow list, but still have
1112 * the backing_object reference.
1114 KKASSERT((object
->chainlk
& (CHAINLK_EXCL
|CHAINLK_MASK
)) == 0);
1117 if (object
->flags
& OBJ_ONSHADOW
) {
1118 LIST_REMOVE(object
, shadow_list
);
1119 temp
->shadow_count
--;
1120 atomic_add_int(&temp
->generation
, 1);
1121 vm_object_clear_flag(object
, OBJ_ONSHADOW
);
1123 object
->backing_object
= NULL
;
1126 atomic_add_int(&object
->ref_count
, -1);
1127 if ((object
->flags
& OBJ_DEAD
) == 0)
1128 vm_object_terminate(object
);
1129 if (must_drop
&& temp
)
1130 vm_object_lock_swap();
1132 vm_object_drop(object
);
1137 if (must_drop
&& object
)
1138 vm_object_drop(object
);
1141 * Additional tail recursion on dlist. Avoid a recursion. Objects
1142 * on the dlist have a hold count but are not locked.
1144 if ((dtmp
= dlist
) != NULL
) {
1146 object
= dtmp
->object
;
1147 kfree(dtmp
, M_TEMP
);
1149 vm_object_lock(object
); /* already held, add lock */
1150 must_drop
= 1; /* and we're responsible for it */
1156 * Destroy the specified object, freeing up related resources.
1158 * The object must have zero references.
1160 * The object must held. The caller is responsible for dropping the object
1161 * after terminate returns. Terminate does NOT drop the object.
1163 static int vm_object_terminate_callback(vm_page_t p
, void *data
);
1166 vm_object_terminate(vm_object_t object
)
1168 struct rb_vm_page_scan_info info
;
1169 struct vm_object_hash
*hash
;
1172 * Make sure no one uses us. Once we set OBJ_DEAD we should be
1173 * able to safely block.
1175 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
1176 KKASSERT((object
->flags
& OBJ_DEAD
) == 0);
1177 vm_object_set_flag(object
, OBJ_DEAD
);
1180 * Wait for the pageout daemon to be done with the object
1182 vm_object_pip_wait(object
, "objtrm1");
1184 KASSERT(!object
->paging_in_progress
,
1185 ("vm_object_terminate: pageout in progress"));
1188 * Clean and free the pages, as appropriate. All references to the
1189 * object are gone, so we don't need to lock it.
1191 if (object
->type
== OBJT_VNODE
) {
1195 * Clean pages and flush buffers.
1197 * NOTE! TMPFS buffer flushes do not typically flush the
1198 * actual page to swap as this would be highly
1199 * inefficient, and normal filesystems usually wrap
1200 * page flushes with buffer cache buffers.
1202 * To deal with this we have to call vinvalbuf() both
1203 * before and after the vm_object_page_clean().
1205 vp
= (struct vnode
*) object
->handle
;
1206 vinvalbuf(vp
, V_SAVE
, 0, 0);
1207 vm_object_page_clean(object
, 0, 0, OBJPC_SYNC
);
1208 vinvalbuf(vp
, V_SAVE
, 0, 0);
1212 * Wait for any I/O to complete, after which there had better not
1213 * be any references left on the object.
1215 vm_object_pip_wait(object
, "objtrm2");
1217 if (object
->ref_count
!= 0) {
1218 panic("vm_object_terminate: object with references, "
1219 "ref_count=%d", object
->ref_count
);
1223 * Cleanup any shared pmaps associated with this object.
1225 pmap_object_free(object
);
1228 * Now free any remaining pages. For internal objects, this also
1229 * removes them from paging queues. Don't free wired pages, just
1230 * remove them from the object.
1233 info
.object
= object
;
1236 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
1237 vm_object_terminate_callback
, &info
);
1238 } while (info
.error
);
1241 * Let the pager know object is dead.
1243 vm_pager_deallocate(object
);
1246 * Wait for the object hold count to hit 1, clean out pages as
1247 * we go. vmobj_token interlocks any race conditions that might
1248 * pick the object up from the vm_object_list after we have cleared
1252 if (RB_ROOT(&object
->rb_memq
) == NULL
)
1254 kprintf("vm_object_terminate: Warning, object %p "
1255 "still has %ld pages\n",
1256 object
, object
->resident_page_count
);
1257 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
1258 vm_object_terminate_callback
, &info
);
1262 * There had better not be any pages left
1264 KKASSERT(object
->resident_page_count
== 0);
1267 * Remove the object from the global object list.
1269 hash
= vmobj_hash(object
);
1270 lwkt_gettoken(&hash
->token
);
1271 TAILQ_REMOVE(&hash
->list
, object
, object_list
);
1272 lwkt_reltoken(&hash
->token
);
1274 if (object
->ref_count
!= 0) {
1275 panic("vm_object_terminate2: object with references, "
1276 "ref_count=%d", object
->ref_count
);
1280 * NOTE: The object hold_count is at least 1, so we cannot kfree()
1281 * the object here. See vm_object_drop().
1286 * The caller must hold the object.
1289 vm_object_terminate_callback(vm_page_t p
, void *data
)
1291 struct rb_vm_page_scan_info
*info
= data
;
1295 KKASSERT(object
== info
->object
);
1296 if (vm_page_busy_try(p
, TRUE
)) {
1297 vm_page_sleep_busy(p
, TRUE
, "vmotrm");
1301 if (object
!= p
->object
) {
1302 /* XXX remove once we determine it can't happen */
1303 kprintf("vm_object_terminate: Warning: Encountered "
1304 "busied page %p on queue %d\n", p
, p
->queue
);
1307 } else if (p
->wire_count
== 0) {
1309 * NOTE: p->dirty and PG_NEED_COMMIT are ignored.
1312 mycpu
->gd_cnt
.v_pfree
++;
1314 if (p
->queue
!= PQ_NONE
)
1315 kprintf("vm_object_terminate: Warning: Encountered "
1316 "wired page %p on queue %d\n", p
, p
->queue
);
1322 * Must be at end to avoid SMP races, caller holds object token
1324 if ((++info
->count
& 63) == 0)
1330 * Clean all dirty pages in the specified range of object. Leaves page
1331 * on whatever queue it is currently on. If NOSYNC is set then do not
1332 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
1333 * leaving the object dirty.
1335 * When stuffing pages asynchronously, allow clustering. XXX we need a
1336 * synchronous clustering mode implementation.
1338 * Odd semantics: if start == end, we clean everything.
1340 * The object must be locked? XXX
1342 static int vm_object_page_clean_pass1(struct vm_page
*p
, void *data
);
1343 static int vm_object_page_clean_pass2(struct vm_page
*p
, void *data
);
1346 vm_object_page_clean(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
1349 struct rb_vm_page_scan_info info
;
1355 vm_object_hold(object
);
1356 if (object
->type
!= OBJT_VNODE
||
1357 (object
->flags
& OBJ_MIGHTBEDIRTY
) == 0) {
1358 vm_object_drop(object
);
1362 pagerflags
= (flags
& (OBJPC_SYNC
| OBJPC_INVAL
)) ?
1363 VM_PAGER_PUT_SYNC
: VM_PAGER_CLUSTER_OK
;
1364 pagerflags
|= (flags
& OBJPC_INVAL
) ? VM_PAGER_PUT_INVAL
: 0;
1366 vp
= object
->handle
;
1369 * Interlock other major object operations. This allows us to
1370 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
1372 vm_object_set_flag(object
, OBJ_CLEANING
);
1375 * Handle 'entire object' case
1377 info
.start_pindex
= start
;
1379 info
.end_pindex
= object
->size
- 1;
1381 info
.end_pindex
= end
- 1;
1383 wholescan
= (start
== 0 && info
.end_pindex
== object
->size
- 1);
1385 info
.pagerflags
= pagerflags
;
1386 info
.object
= object
;
1389 * If cleaning the entire object do a pass to mark the pages read-only.
1390 * If everything worked out ok, clear OBJ_WRITEABLE and
1396 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1397 vm_object_page_clean_pass1
, &info
);
1398 if (info
.error
== 0) {
1399 vm_object_clear_flag(object
,
1400 OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
);
1401 if (object
->type
== OBJT_VNODE
&&
1402 (vp
= (struct vnode
*)object
->handle
) != NULL
) {
1404 * Use new-style interface to clear VISDIRTY
1405 * because the vnode is not necessarily removed
1406 * from the syncer list(s) as often as it was
1407 * under the old interface, which can leave
1408 * the vnode on the syncer list after reclaim.
1416 * Do a pass to clean all the dirty pages we find.
1421 generation
= object
->generation
;
1422 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1423 vm_object_page_clean_pass2
, &info
);
1424 } while (info
.error
|| generation
!= object
->generation
);
1426 vm_object_clear_flag(object
, OBJ_CLEANING
);
1427 vm_object_drop(object
);
1431 * The caller must hold the object.
1435 vm_object_page_clean_pass1(struct vm_page
*p
, void *data
)
1437 struct rb_vm_page_scan_info
*info
= data
;
1439 KKASSERT(p
->object
== info
->object
);
1441 vm_page_flag_set(p
, PG_CLEANCHK
);
1442 if ((info
->limit
& OBJPC_NOSYNC
) && (p
->flags
& PG_NOSYNC
)) {
1444 } else if (vm_page_busy_try(p
, FALSE
)) {
1447 KKASSERT(p
->object
== info
->object
);
1448 vm_page_protect(p
, VM_PROT_READ
);
1453 * Must be at end to avoid SMP races, caller holds object token
1455 if ((++info
->count
& 63) == 0)
1461 * The caller must hold the object
1465 vm_object_page_clean_pass2(struct vm_page
*p
, void *data
)
1467 struct rb_vm_page_scan_info
*info
= data
;
1470 KKASSERT(p
->object
== info
->object
);
1473 * Do not mess with pages that were inserted after we started
1474 * the cleaning pass.
1476 if ((p
->flags
& PG_CLEANCHK
) == 0)
1479 generation
= info
->object
->generation
;
1481 if (vm_page_busy_try(p
, TRUE
)) {
1482 vm_page_sleep_busy(p
, TRUE
, "vpcwai");
1487 KKASSERT(p
->object
== info
->object
&&
1488 info
->object
->generation
== generation
);
1491 * Before wasting time traversing the pmaps, check for trivial
1492 * cases where the page cannot be dirty.
1494 if (p
->valid
== 0 || (p
->queue
- p
->pc
) == PQ_CACHE
) {
1495 KKASSERT((p
->dirty
& p
->valid
) == 0 &&
1496 (p
->flags
& PG_NEED_COMMIT
) == 0);
1502 * Check whether the page is dirty or not. The page has been set
1503 * to be read-only so the check will not race a user dirtying the
1506 vm_page_test_dirty(p
);
1507 if ((p
->dirty
& p
->valid
) == 0 && (p
->flags
& PG_NEED_COMMIT
) == 0) {
1508 vm_page_flag_clear(p
, PG_CLEANCHK
);
1514 * If we have been asked to skip nosync pages and this is a
1515 * nosync page, skip it. Note that the object flags were
1516 * not cleared in this case (because pass1 will have returned an
1517 * error), so we do not have to set them.
1519 if ((info
->limit
& OBJPC_NOSYNC
) && (p
->flags
& PG_NOSYNC
)) {
1520 vm_page_flag_clear(p
, PG_CLEANCHK
);
1526 * Flush as many pages as we can. PG_CLEANCHK will be cleared on
1527 * the pages that get successfully flushed. Set info->error if
1528 * we raced an object modification.
1530 vm_object_page_collect_flush(info
->object
, p
, info
->pagerflags
);
1531 /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */
1534 * Must be at end to avoid SMP races, caller holds object token
1537 if ((++info
->count
& 63) == 0)
1543 * Collect the specified page and nearby pages and flush them out.
1544 * The number of pages flushed is returned. The passed page is busied
1545 * by the caller and we are responsible for its disposition.
1547 * The caller must hold the object.
1550 vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
, int pagerflags
)
1558 vm_page_t ma
[BLIST_MAX_ALLOC
];
1560 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
1563 page_base
= pi
% BLIST_MAX_ALLOC
;
1571 tp
= vm_page_lookup_busy_try(object
, pi
- page_base
+ ib
,
1577 if ((pagerflags
& VM_PAGER_IGNORE_CLEANCHK
) == 0 &&
1578 (tp
->flags
& PG_CLEANCHK
) == 0) {
1582 if ((tp
->queue
- tp
->pc
) == PQ_CACHE
) {
1583 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1587 vm_page_test_dirty(tp
);
1588 if ((tp
->dirty
& tp
->valid
) == 0 &&
1589 (tp
->flags
& PG_NEED_COMMIT
) == 0) {
1590 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1599 while (is
< BLIST_MAX_ALLOC
&&
1600 pi
- page_base
+ is
< object
->size
) {
1603 tp
= vm_page_lookup_busy_try(object
, pi
- page_base
+ is
,
1609 if ((pagerflags
& VM_PAGER_IGNORE_CLEANCHK
) == 0 &&
1610 (tp
->flags
& PG_CLEANCHK
) == 0) {
1614 if ((tp
->queue
- tp
->pc
) == PQ_CACHE
) {
1615 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1619 vm_page_test_dirty(tp
);
1620 if ((tp
->dirty
& tp
->valid
) == 0 &&
1621 (tp
->flags
& PG_NEED_COMMIT
) == 0) {
1622 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1631 * All pages in the ma[] array are busied now
1633 for (i
= ib
; i
< is
; ++i
) {
1634 vm_page_flag_clear(ma
[i
], PG_CLEANCHK
);
1635 vm_page_hold(ma
[i
]); /* XXX need this any more? */
1637 vm_pageout_flush(&ma
[ib
], is
- ib
, pagerflags
);
1638 for (i
= ib
; i
< is
; ++i
) /* XXX need this any more? */
1639 vm_page_unhold(ma
[i
]);
1643 * Same as vm_object_pmap_copy, except range checking really
1644 * works, and is meant for small sections of an object.
1646 * This code protects resident pages by making them read-only
1647 * and is typically called on a fork or split when a page
1648 * is converted to copy-on-write.
1650 * NOTE: If the page is already at VM_PROT_NONE, calling
1651 * vm_page_protect will have no effect.
1654 vm_object_pmap_copy_1(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
1659 if (object
== NULL
|| (object
->flags
& OBJ_WRITEABLE
) == 0)
1662 vm_object_hold(object
);
1663 for (idx
= start
; idx
< end
; idx
++) {
1664 p
= vm_page_lookup(object
, idx
);
1667 vm_page_protect(p
, VM_PROT_READ
);
1669 vm_object_drop(object
);
1673 * Removes all physical pages in the specified object range from all
1676 * The object must *not* be locked.
1679 static int vm_object_pmap_remove_callback(vm_page_t p
, void *data
);
1682 vm_object_pmap_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
1684 struct rb_vm_page_scan_info info
;
1690 info
.start_pindex
= start
;
1691 info
.end_pindex
= end
- 1;
1693 info
.object
= object
;
1695 vm_object_hold(object
);
1698 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1699 vm_object_pmap_remove_callback
, &info
);
1700 } while (info
.error
);
1701 if (start
== 0 && end
== object
->size
)
1702 vm_object_clear_flag(object
, OBJ_WRITEABLE
);
1703 vm_object_drop(object
);
1707 * The caller must hold the object
1710 vm_object_pmap_remove_callback(vm_page_t p
, void *data
)
1712 struct rb_vm_page_scan_info
*info
= data
;
1714 if (info
->object
!= p
->object
||
1715 p
->pindex
< info
->start_pindex
||
1716 p
->pindex
> info
->end_pindex
) {
1717 kprintf("vm_object_pmap_remove_callback: obj/pg race %p/%p\n",
1723 vm_page_protect(p
, VM_PROT_NONE
);
1726 * Must be at end to avoid SMP races, caller holds object token
1728 if ((++info
->count
& 63) == 0)
1734 * Implements the madvise function at the object/page level.
1736 * MADV_WILLNEED (any object)
1738 * Activate the specified pages if they are resident.
1740 * MADV_DONTNEED (any object)
1742 * Deactivate the specified pages if they are resident.
1744 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1746 * Deactivate and clean the specified pages if they are
1747 * resident. This permits the process to reuse the pages
1748 * without faulting or the kernel to reclaim the pages
1754 vm_object_madvise(vm_object_t object
, vm_pindex_t pindex
,
1755 vm_pindex_t count
, int advise
)
1757 vm_pindex_t end
, tpindex
;
1758 vm_object_t tobject
;
1766 end
= pindex
+ count
;
1768 vm_object_hold(object
);
1772 * Locate and adjust resident pages
1774 for (; pindex
< end
; pindex
+= 1) {
1776 if (tobject
!= object
)
1777 vm_object_drop(tobject
);
1782 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1783 * and those pages must be OBJ_ONEMAPPING.
1785 if (advise
== MADV_FREE
) {
1786 if ((tobject
->type
!= OBJT_DEFAULT
&&
1787 tobject
->type
!= OBJT_SWAP
) ||
1788 (tobject
->flags
& OBJ_ONEMAPPING
) == 0) {
1793 m
= vm_page_lookup_busy_try(tobject
, tpindex
, TRUE
, &error
);
1796 vm_page_sleep_busy(m
, TRUE
, "madvpo");
1801 * There may be swap even if there is no backing page
1803 if (advise
== MADV_FREE
&& tobject
->type
== OBJT_SWAP
)
1804 swap_pager_freespace(tobject
, tpindex
, 1);
1809 while ((xobj
= tobject
->backing_object
) != NULL
) {
1810 KKASSERT(xobj
!= object
);
1811 vm_object_hold(xobj
);
1812 if (xobj
== tobject
->backing_object
)
1814 vm_object_drop(xobj
);
1818 tpindex
+= OFF_TO_IDX(tobject
->backing_object_offset
);
1819 if (tobject
!= object
) {
1820 vm_object_lock_swap();
1821 vm_object_drop(tobject
);
1828 * If the page is not in a normal active state, we skip it.
1829 * If the page is not managed there are no page queues to
1830 * mess with. Things can break if we mess with pages in
1831 * any of the below states.
1833 if (m
->wire_count
||
1834 (m
->flags
& (PG_UNMANAGED
| PG_NEED_COMMIT
)) ||
1835 m
->valid
!= VM_PAGE_BITS_ALL
1842 * Theoretically once a page is known not to be busy, an
1843 * interrupt cannot come along and rip it out from under us.
1846 if (advise
== MADV_WILLNEED
) {
1847 vm_page_activate(m
);
1848 } else if (advise
== MADV_DONTNEED
) {
1849 vm_page_dontneed(m
);
1850 } else if (advise
== MADV_FREE
) {
1852 * Mark the page clean. This will allow the page
1853 * to be freed up by the system. However, such pages
1854 * are often reused quickly by malloc()/free()
1855 * so we do not do anything that would cause
1856 * a page fault if we can help it.
1858 * Specifically, we do not try to actually free
1859 * the page now nor do we try to put it in the
1860 * cache (which would cause a page fault on reuse).
1862 * But we do make the page is freeable as we
1863 * can without actually taking the step of unmapping
1866 pmap_clear_modify(m
);
1869 vm_page_dontneed(m
);
1870 if (tobject
->type
== OBJT_SWAP
)
1871 swap_pager_freespace(tobject
, tpindex
, 1);
1875 if (tobject
!= object
)
1876 vm_object_drop(tobject
);
1877 vm_object_drop(object
);
1881 * Create a new object which is backed by the specified existing object
1882 * range. Replace the pointer and offset that was pointing at the existing
1883 * object with the pointer/offset for the new object.
1885 * If addref is non-zero the returned object is given an additional reference.
1886 * This mechanic exists to avoid the situation where refs might be 1 and
1887 * race against a collapse when the caller intends to bump it. So the
1888 * caller cannot add the ref after the fact. Used when the caller is
1889 * duplicating a vm_map_entry.
1891 * No other requirements.
1894 vm_object_shadow(vm_object_t
*objectp
, vm_ooffset_t
*offset
, vm_size_t length
,
1904 * Don't create the new object if the old object isn't shared.
1905 * We have to chain wait before adding the reference to avoid
1906 * racing a collapse or deallocation.
1908 * Clear OBJ_ONEMAPPING flag when shadowing.
1910 * The caller owns a ref on source via *objectp which we are going
1911 * to replace. This ref is inherited by the backing_object assignment.
1912 * from nobject and does not need to be incremented here.
1914 * However, we add a temporary extra reference to the original source
1915 * prior to holding nobject in case we block, to avoid races where
1916 * someone else might believe that the source can be collapsed.
1920 if (source
->type
!= OBJT_VNODE
) {
1922 vm_object_hold(source
);
1923 vm_object_chain_wait(source
, 0);
1924 if (source
->ref_count
== 1 &&
1925 source
->handle
== NULL
&&
1926 (source
->type
== OBJT_DEFAULT
||
1927 source
->type
== OBJT_SWAP
)) {
1929 vm_object_reference_locked(source
);
1930 vm_object_clear_flag(source
,
1933 vm_object_drop(source
);
1936 vm_object_reference_locked(source
);
1937 vm_object_clear_flag(source
, OBJ_ONEMAPPING
);
1939 vm_object_reference_quick(source
);
1940 vm_object_clear_flag(source
, OBJ_ONEMAPPING
);
1945 * Allocate a new object with the given length. The new object
1946 * is returned referenced but we may have to add another one.
1947 * If we are adding a second reference we must clear OBJ_ONEMAPPING.
1948 * (typically because the caller is about to clone a vm_map_entry).
1950 * The source object currently has an extra reference to prevent
1951 * collapses into it while we mess with its shadow list, which
1952 * we will remove later in this routine.
1954 * The target object may require a second reference if asked for one
1957 result
= vm_object_allocate(OBJT_DEFAULT
, length
);
1959 panic("vm_object_shadow: no object for shadowing");
1960 vm_object_hold(result
);
1962 vm_object_reference_locked(result
);
1963 vm_object_clear_flag(result
, OBJ_ONEMAPPING
);
1967 * The new object shadows the source object. Chain wait before
1968 * adjusting shadow_count or the shadow list to avoid races.
1970 * Try to optimize the result object's page color when shadowing
1971 * in order to maintain page coloring consistency in the combined
1974 * The backing_object reference to source requires adding a ref to
1975 * source. We simply inherit the ref from the original *objectp
1976 * (which we are replacing) so no additional refs need to be added.
1977 * (we must still clean up the extra ref we had to prevent collapse
1980 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS
1982 KKASSERT(result
->backing_object
== NULL
);
1983 result
->backing_object
= source
;
1985 if (useshadowlist
) {
1986 vm_object_chain_wait(source
, 0);
1987 LIST_INSERT_HEAD(&source
->shadow_head
,
1988 result
, shadow_list
);
1989 source
->shadow_count
++;
1990 atomic_add_int(&source
->generation
, 1);
1991 vm_object_set_flag(result
, OBJ_ONSHADOW
);
1993 /* cpu localization twist */
1994 result
->pg_color
= vm_quickcolor();
1998 * Adjust the return storage. Drop the ref on source before
2001 result
->backing_object_offset
= *offset
;
2002 vm_object_drop(result
);
2005 if (useshadowlist
) {
2006 vm_object_deallocate_locked(source
);
2007 vm_object_drop(source
);
2009 vm_object_deallocate(source
);
2014 * Return the new things
2019 #define OBSC_TEST_ALL_SHADOWED 0x0001
2020 #define OBSC_COLLAPSE_NOWAIT 0x0002
2021 #define OBSC_COLLAPSE_WAIT 0x0004
2023 static int vm_object_backing_scan_callback(vm_page_t p
, void *data
);
2026 * The caller must hold the object.
2029 vm_object_backing_scan(vm_object_t object
, vm_object_t backing_object
, int op
)
2031 struct rb_vm_page_scan_info info
;
2032 struct vm_object_hash
*hash
;
2034 vm_object_assert_held(object
);
2035 vm_object_assert_held(backing_object
);
2037 KKASSERT(backing_object
== object
->backing_object
);
2038 info
.backing_offset_index
= OFF_TO_IDX(object
->backing_object_offset
);
2041 * Initial conditions
2043 if (op
& OBSC_TEST_ALL_SHADOWED
) {
2045 * We do not want to have to test for the existence of
2046 * swap pages in the backing object. XXX but with the
2047 * new swapper this would be pretty easy to do.
2049 * XXX what about anonymous MAP_SHARED memory that hasn't
2050 * been ZFOD faulted yet? If we do not test for this, the
2051 * shadow test may succeed! XXX
2053 if (backing_object
->type
!= OBJT_DEFAULT
)
2056 if (op
& OBSC_COLLAPSE_WAIT
) {
2057 KKASSERT((backing_object
->flags
& OBJ_DEAD
) == 0);
2058 vm_object_set_flag(backing_object
, OBJ_DEAD
);
2060 hash
= vmobj_hash(backing_object
);
2061 lwkt_gettoken(&hash
->token
);
2062 TAILQ_REMOVE(&hash
->list
, backing_object
, object_list
);
2063 lwkt_reltoken(&hash
->token
);
2067 * Our scan. We have to retry if a negative error code is returned,
2068 * otherwise 0 or 1 will be returned in info.error. 0 Indicates that
2069 * the scan had to be stopped because the parent does not completely
2072 info
.object
= object
;
2073 info
.backing_object
= backing_object
;
2078 vm_page_rb_tree_RB_SCAN(&backing_object
->rb_memq
, NULL
,
2079 vm_object_backing_scan_callback
,
2081 } while (info
.error
< 0);
2087 * The caller must hold the object.
2090 vm_object_backing_scan_callback(vm_page_t p
, void *data
)
2092 struct rb_vm_page_scan_info
*info
= data
;
2093 vm_object_t backing_object
;
2096 vm_pindex_t new_pindex
;
2097 vm_pindex_t backing_offset_index
;
2101 new_pindex
= pindex
- info
->backing_offset_index
;
2103 object
= info
->object
;
2104 backing_object
= info
->backing_object
;
2105 backing_offset_index
= info
->backing_offset_index
;
2107 if (op
& OBSC_TEST_ALL_SHADOWED
) {
2111 * Ignore pages outside the parent object's range
2112 * and outside the parent object's mapping of the
2115 * note that we do not busy the backing object's
2118 if (pindex
< backing_offset_index
||
2119 new_pindex
>= object
->size
2125 * See if the parent has the page or if the parent's
2126 * object pager has the page. If the parent has the
2127 * page but the page is not valid, the parent's
2128 * object pager must have the page.
2130 * If this fails, the parent does not completely shadow
2131 * the object and we might as well give up now.
2133 pp
= vm_page_lookup(object
, new_pindex
);
2134 if ((pp
== NULL
|| pp
->valid
== 0) &&
2135 !vm_pager_has_page(object
, new_pindex
)
2137 info
->error
= 0; /* problemo */
2138 return(-1); /* stop the scan */
2143 * Check for busy page. Note that we may have lost (p) when we
2144 * possibly blocked above.
2146 if (op
& (OBSC_COLLAPSE_WAIT
| OBSC_COLLAPSE_NOWAIT
)) {
2149 if (vm_page_busy_try(p
, TRUE
)) {
2150 if (op
& OBSC_COLLAPSE_NOWAIT
) {
2154 * If we slept, anything could have
2155 * happened. Ask that the scan be restarted.
2157 * Since the object is marked dead, the
2158 * backing offset should not have changed.
2160 vm_page_sleep_busy(p
, TRUE
, "vmocol");
2167 * If (p) is no longer valid restart the scan.
2169 if (p
->object
!= backing_object
|| p
->pindex
!= pindex
) {
2170 kprintf("vm_object_backing_scan: Warning: page "
2171 "%p ripped out from under us\n", p
);
2177 if (op
& OBSC_COLLAPSE_NOWAIT
) {
2178 if (p
->valid
== 0 ||
2180 (p
->flags
& PG_NEED_COMMIT
)) {
2185 /* XXX what if p->valid == 0 , hold_count, etc? */
2189 p
->object
== backing_object
,
2190 ("vm_object_qcollapse(): object mismatch")
2194 * Destroy any associated swap
2196 if (backing_object
->type
== OBJT_SWAP
)
2197 swap_pager_freespace(backing_object
, p
->pindex
, 1);
2200 p
->pindex
< backing_offset_index
||
2201 new_pindex
>= object
->size
2204 * Page is out of the parent object's range, we
2205 * can simply destroy it.
2207 vm_page_protect(p
, VM_PROT_NONE
);
2212 pp
= vm_page_lookup(object
, new_pindex
);
2213 if (pp
!= NULL
|| vm_pager_has_page(object
, new_pindex
)) {
2215 * page already exists in parent OR swap exists
2216 * for this location in the parent. Destroy
2217 * the original page from the backing object.
2219 * Leave the parent's page alone
2221 vm_page_protect(p
, VM_PROT_NONE
);
2227 * Page does not exist in parent, rename the
2228 * page from the backing object to the main object.
2230 * If the page was mapped to a process, it can remain
2231 * mapped through the rename.
2233 if ((p
->queue
- p
->pc
) == PQ_CACHE
)
2234 vm_page_deactivate(p
);
2236 vm_page_rename(p
, object
, new_pindex
);
2238 /* page automatically made dirty by rename */
2244 * This version of collapse allows the operation to occur earlier and
2245 * when paging_in_progress is true for an object... This is not a complete
2246 * operation, but should plug 99.9% of the rest of the leaks.
2248 * The caller must hold the object and backing_object and both must be
2251 * (only called from vm_object_collapse)
2254 vm_object_qcollapse(vm_object_t object
, vm_object_t backing_object
)
2256 if (backing_object
->ref_count
== 1) {
2257 atomic_add_int(&backing_object
->ref_count
, 2);
2258 #if defined(DEBUG_LOCKS)
2259 debugvm_object_add(backing_object
, "qcollapse", 1, 2);
2261 vm_object_backing_scan(object
, backing_object
,
2262 OBSC_COLLAPSE_NOWAIT
);
2263 atomic_add_int(&backing_object
->ref_count
, -2);
2264 #if defined(DEBUG_LOCKS)
2265 debugvm_object_add(backing_object
, "qcollapse", 2, -2);
2271 * Collapse an object with the object backing it. Pages in the backing
2272 * object are moved into the parent, and the backing object is deallocated.
2273 * Any conflict is resolved in favor of the parent's existing pages.
2275 * object must be held and chain-locked on call.
2277 * The caller must have an extra ref on object to prevent a race from
2278 * destroying it during the collapse.
2281 vm_object_collapse(vm_object_t object
, struct vm_object_dealloc_list
**dlistp
)
2283 struct vm_object_dealloc_list
*dlist
= NULL
;
2284 vm_object_t backing_object
;
2287 * Only one thread is attempting a collapse at any given moment.
2288 * There are few restrictions for (object) that callers of this
2289 * function check so reentrancy is likely.
2291 KKASSERT(object
!= NULL
);
2292 vm_object_assert_held(object
);
2293 KKASSERT(object
->chainlk
& (CHAINLK_MASK
| CHAINLK_EXCL
));
2300 * We can only collapse a DEFAULT/SWAP object with a
2301 * DEFAULT/SWAP object.
2303 if (object
->type
!= OBJT_DEFAULT
&& object
->type
!= OBJT_SWAP
) {
2304 backing_object
= NULL
;
2308 backing_object
= object
->backing_object
;
2309 if (backing_object
== NULL
)
2311 if (backing_object
->type
!= OBJT_DEFAULT
&&
2312 backing_object
->type
!= OBJT_SWAP
) {
2313 backing_object
= NULL
;
2318 * Hold (token lock) the backing_object and retest conditions.
2320 vm_object_hold(backing_object
);
2321 if (backing_object
!= object
->backing_object
||
2322 (backing_object
->type
!= OBJT_DEFAULT
&&
2323 backing_object
->type
!= OBJT_SWAP
)) {
2324 vm_object_drop(backing_object
);
2329 * Chain-lock the backing object too because if we
2330 * successfully merge its pages into the top object we
2331 * will collapse backing_object->backing_object as the
2332 * new backing_object. Re-check that it is still our
2335 vm_object_chain_acquire(backing_object
, 0);
2336 if (backing_object
!= object
->backing_object
) {
2337 vm_object_chain_release(backing_object
);
2338 vm_object_drop(backing_object
);
2343 * We check the backing object first, because it is most
2344 * likely not collapsable.
2346 if (backing_object
->handle
!= NULL
||
2347 (backing_object
->type
!= OBJT_DEFAULT
&&
2348 backing_object
->type
!= OBJT_SWAP
) ||
2349 (backing_object
->flags
& OBJ_DEAD
) ||
2350 object
->handle
!= NULL
||
2351 (object
->type
!= OBJT_DEFAULT
&&
2352 object
->type
!= OBJT_SWAP
) ||
2353 (object
->flags
& OBJ_DEAD
)) {
2358 * If paging is in progress we can't do a normal collapse.
2360 if (object
->paging_in_progress
!= 0 ||
2361 backing_object
->paging_in_progress
!= 0
2363 vm_object_qcollapse(object
, backing_object
);
2368 * We know that we can either collapse the backing object (if
2369 * the parent is the only reference to it) or (perhaps) have
2370 * the parent bypass the object if the parent happens to shadow
2371 * all the resident pages in the entire backing object.
2373 * This is ignoring pager-backed pages such as swap pages.
2374 * vm_object_backing_scan fails the shadowing test in this
2377 if (backing_object
->ref_count
== 1) {
2379 * If there is exactly one reference to the backing
2380 * object, we can collapse it into the parent.
2382 KKASSERT(object
->backing_object
== backing_object
);
2383 vm_object_backing_scan(object
, backing_object
,
2384 OBSC_COLLAPSE_WAIT
);
2387 * Move the pager from backing_object to object.
2389 if (backing_object
->type
== OBJT_SWAP
) {
2390 vm_object_pip_add(backing_object
, 1);
2393 * scrap the paging_offset junk and do a
2394 * discrete copy. This also removes major
2395 * assumptions about how the swap-pager
2396 * works from where it doesn't belong. The
2397 * new swapper is able to optimize the
2398 * destroy-source case.
2400 vm_object_pip_add(object
, 1);
2401 swap_pager_copy(backing_object
, object
,
2402 OFF_TO_IDX(object
->backing_object_offset
),
2404 vm_object_pip_wakeup(object
);
2405 vm_object_pip_wakeup(backing_object
);
2409 * Object now shadows whatever backing_object did.
2410 * Remove object from backing_object's shadow_list.
2412 * Removing object from backing_objects shadow list
2413 * requires releasing object, which we will do below.
2415 KKASSERT(object
->backing_object
== backing_object
);
2416 if (object
->flags
& OBJ_ONSHADOW
) {
2417 LIST_REMOVE(object
, shadow_list
);
2418 backing_object
->shadow_count
--;
2419 atomic_add_int(&backing_object
->generation
, 1);
2420 vm_object_clear_flag(object
, OBJ_ONSHADOW
);
2424 * backing_object->backing_object moves from within
2425 * backing_object to within object.
2427 * OBJT_VNODE bbobj's should have empty shadow lists.
2429 while ((bbobj
= backing_object
->backing_object
) != NULL
) {
2430 if (bbobj
->type
== OBJT_VNODE
)
2431 vm_object_hold_shared(bbobj
);
2433 vm_object_hold(bbobj
);
2434 if (bbobj
== backing_object
->backing_object
)
2436 vm_object_drop(bbobj
);
2440 * We are removing backing_object from bbobj's
2441 * shadow list and adding object to bbobj's shadow
2442 * list, so the ref_count on bbobj is unchanged.
2445 if (backing_object
->flags
& OBJ_ONSHADOW
) {
2446 /* not locked exclusively if vnode */
2447 KKASSERT(bbobj
->type
!= OBJT_VNODE
);
2448 LIST_REMOVE(backing_object
,
2450 bbobj
->shadow_count
--;
2451 atomic_add_int(&bbobj
->generation
, 1);
2452 vm_object_clear_flag(backing_object
,
2455 backing_object
->backing_object
= NULL
;
2457 object
->backing_object
= bbobj
;
2459 if (bbobj
->type
!= OBJT_VNODE
) {
2460 LIST_INSERT_HEAD(&bbobj
->shadow_head
,
2461 object
, shadow_list
);
2462 bbobj
->shadow_count
++;
2463 atomic_add_int(&bbobj
->generation
, 1);
2464 vm_object_set_flag(object
,
2469 object
->backing_object_offset
+=
2470 backing_object
->backing_object_offset
;
2472 vm_object_drop(bbobj
);
2475 * Discard the old backing_object. Nothing should be
2476 * able to ref it, other than a vm_map_split(),
2477 * and vm_map_split() will stall on our chain lock.
2478 * And we control the parent so it shouldn't be
2479 * possible for it to go away either.
2481 * Since the backing object has no pages, no pager
2482 * left, and no object references within it, all
2483 * that is necessary is to dispose of it.
2485 KASSERT(backing_object
->ref_count
== 1,
2486 ("backing_object %p was somehow "
2487 "re-referenced during collapse!",
2489 KASSERT(RB_EMPTY(&backing_object
->rb_memq
),
2490 ("backing_object %p somehow has left "
2491 "over pages during collapse!",
2495 * The object can be destroyed.
2497 * XXX just fall through and dodealloc instead
2498 * of forcing destruction?
2500 atomic_add_int(&backing_object
->ref_count
, -1);
2501 #if defined(DEBUG_LOCKS)
2502 debugvm_object_add(backing_object
, "collapse", 1, -1);
2504 if ((backing_object
->flags
& OBJ_DEAD
) == 0)
2505 vm_object_terminate(backing_object
);
2510 * If we do not entirely shadow the backing object,
2511 * there is nothing we can do so we give up.
2513 if (vm_object_backing_scan(object
, backing_object
,
2514 OBSC_TEST_ALL_SHADOWED
) == 0) {
2519 * bbobj is backing_object->backing_object. Since
2520 * object completely shadows backing_object we can
2521 * bypass it and become backed by bbobj instead.
2523 * The shadow list for vnode backing objects is not
2524 * used and a shared hold is allowed.
2526 while ((bbobj
= backing_object
->backing_object
) != NULL
) {
2527 if (bbobj
->type
== OBJT_VNODE
)
2528 vm_object_hold_shared(bbobj
);
2530 vm_object_hold(bbobj
);
2531 if (bbobj
== backing_object
->backing_object
)
2533 vm_object_drop(bbobj
);
2537 * Make object shadow bbobj instead of backing_object.
2538 * Remove object from backing_object's shadow list.
2540 * Deallocating backing_object will not remove
2541 * it, since its reference count is at least 2.
2543 * Removing object from backing_object's shadow
2544 * list requires releasing a ref, which we do
2545 * below by setting dodealloc to 1.
2547 KKASSERT(object
->backing_object
== backing_object
);
2548 if (object
->flags
& OBJ_ONSHADOW
) {
2549 LIST_REMOVE(object
, shadow_list
);
2550 backing_object
->shadow_count
--;
2551 atomic_add_int(&backing_object
->generation
, 1);
2552 vm_object_clear_flag(object
, OBJ_ONSHADOW
);
2556 * Add a ref to bbobj, bbobj now shadows object.
2558 * NOTE: backing_object->backing_object still points
2559 * to bbobj. That relationship remains intact
2560 * because backing_object has > 1 ref, so
2561 * someone else is pointing to it (hence why
2562 * we can't collapse it into object and can
2563 * only handle the all-shadowed bypass case).
2566 if (bbobj
->type
!= OBJT_VNODE
) {
2567 vm_object_chain_wait(bbobj
, 0);
2568 vm_object_reference_locked(bbobj
);
2569 LIST_INSERT_HEAD(&bbobj
->shadow_head
,
2570 object
, shadow_list
);
2571 bbobj
->shadow_count
++;
2572 atomic_add_int(&bbobj
->generation
, 1);
2573 vm_object_set_flag(object
,
2576 vm_object_reference_quick(bbobj
);
2578 object
->backing_object_offset
+=
2579 backing_object
->backing_object_offset
;
2580 object
->backing_object
= bbobj
;
2581 vm_object_drop(bbobj
);
2583 object
->backing_object
= NULL
;
2587 * Drop the reference count on backing_object. To
2588 * handle ref_count races properly we can't assume
2589 * that the ref_count is still at least 2 so we
2590 * have to actually call vm_object_deallocate()
2591 * (after clearing the chainlock).
2598 * Ok, we want to loop on the new object->bbobj association,
2599 * possibly collapsing it further. However if dodealloc is
2600 * non-zero we have to deallocate the backing_object which
2601 * itself can potentially undergo a collapse, creating a
2602 * recursion depth issue with the LWKT token subsystem.
2604 * In the case where we must deallocate the backing_object
2605 * it is possible now that the backing_object has a single
2606 * shadow count on some other object (not represented here
2607 * as yet), since it no longer shadows us. Thus when we
2608 * call vm_object_deallocate() it may attempt to collapse
2609 * itself into its remaining parent.
2612 struct vm_object_dealloc_list
*dtmp
;
2614 vm_object_chain_release(backing_object
);
2615 vm_object_unlock(backing_object
);
2616 /* backing_object remains held */
2619 * Auto-deallocation list for caller convenience.
2624 dtmp
= kmalloc(sizeof(*dtmp
), M_TEMP
, M_WAITOK
);
2625 dtmp
->object
= backing_object
;
2626 dtmp
->next
= *dlistp
;
2629 vm_object_chain_release(backing_object
);
2630 vm_object_drop(backing_object
);
2632 /* backing_object = NULL; not needed */
2637 * Clean up any left over backing_object
2639 if (backing_object
) {
2640 vm_object_chain_release(backing_object
);
2641 vm_object_drop(backing_object
);
2645 * Clean up any auto-deallocation list. This is a convenience
2646 * for top-level callers so they don't have to pass &dlist.
2647 * Do not clean up any caller-passed dlistp, the caller will
2651 vm_object_deallocate_list(&dlist
);
2656 * vm_object_collapse() may collect additional objects in need of
2657 * deallocation. This routine deallocates these objects. The
2658 * deallocation itself can trigger additional collapses (which the
2659 * deallocate function takes care of). This procedure is used to
2660 * reduce procedural recursion since these vm_object shadow chains
2661 * can become quite long.
2664 vm_object_deallocate_list(struct vm_object_dealloc_list
**dlistp
)
2666 struct vm_object_dealloc_list
*dlist
;
2668 while ((dlist
= *dlistp
) != NULL
) {
2669 *dlistp
= dlist
->next
;
2670 vm_object_lock(dlist
->object
);
2671 vm_object_deallocate_locked(dlist
->object
);
2672 vm_object_drop(dlist
->object
);
2673 kfree(dlist
, M_TEMP
);
2678 * Removes all physical pages in the specified object range from the
2679 * object's list of pages.
2683 static int vm_object_page_remove_callback(vm_page_t p
, void *data
);
2686 vm_object_page_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
2687 boolean_t clean_only
)
2689 struct rb_vm_page_scan_info info
;
2693 * Degenerate cases and assertions
2695 vm_object_hold(object
);
2696 if (object
== NULL
||
2697 (object
->resident_page_count
== 0 && object
->swblock_count
== 0)) {
2698 vm_object_drop(object
);
2701 KASSERT(object
->type
!= OBJT_PHYS
,
2702 ("attempt to remove pages from a physical object"));
2705 * Indicate that paging is occuring on the object
2707 vm_object_pip_add(object
, 1);
2710 * Figure out the actual removal range and whether we are removing
2711 * the entire contents of the object or not. If removing the entire
2712 * contents, be sure to get all pages, even those that might be
2713 * beyond the end of the object.
2715 info
.object
= object
;
2716 info
.start_pindex
= start
;
2718 info
.end_pindex
= (vm_pindex_t
)-1;
2720 info
.end_pindex
= end
- 1;
2721 info
.limit
= clean_only
;
2723 all
= (start
== 0 && info
.end_pindex
>= object
->size
- 1);
2726 * Loop until we are sure we have gotten them all.
2730 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
2731 vm_object_page_remove_callback
, &info
);
2732 } while (info
.error
);
2735 * Remove any related swap if throwing away pages, or for
2736 * non-swap objects (the swap is a clean copy in that case).
2738 if (object
->type
!= OBJT_SWAP
|| clean_only
== FALSE
) {
2740 swap_pager_freespace_all(object
);
2742 swap_pager_freespace(object
, info
.start_pindex
,
2743 info
.end_pindex
- info
.start_pindex
+ 1);
2749 vm_object_pip_wakeup(object
);
2750 vm_object_drop(object
);
2754 * The caller must hold the object.
2756 * NOTE: User yields are allowed when removing more than one page, but not
2757 * allowed if only removing one page (the path for single page removals
2758 * might hold a spinlock).
2761 vm_object_page_remove_callback(vm_page_t p
, void *data
)
2763 struct rb_vm_page_scan_info
*info
= data
;
2765 if (info
->object
!= p
->object
||
2766 p
->pindex
< info
->start_pindex
||
2767 p
->pindex
> info
->end_pindex
) {
2768 kprintf("vm_object_page_remove_callbackA: obj/pg race %p/%p\n",
2772 if (vm_page_busy_try(p
, TRUE
)) {
2773 vm_page_sleep_busy(p
, TRUE
, "vmopar");
2777 if (info
->object
!= p
->object
) {
2778 /* this should never happen */
2779 kprintf("vm_object_page_remove_callbackB: obj/pg race %p/%p\n",
2786 * Wired pages cannot be destroyed, but they can be invalidated
2787 * and we do so if clean_only (limit) is not set.
2789 * WARNING! The page may be wired due to being part of a buffer
2790 * cache buffer, and the buffer might be marked B_CACHE.
2791 * This is fine as part of a truncation but VFSs must be
2792 * sure to fix the buffer up when re-extending the file.
2794 * NOTE! PG_NEED_COMMIT is ignored.
2796 if (p
->wire_count
!= 0) {
2797 vm_page_protect(p
, VM_PROT_NONE
);
2798 if (info
->limit
== 0)
2805 * limit is our clean_only flag. If set and the page is dirty or
2806 * requires a commit, do not free it. If set and the page is being
2807 * held by someone, do not free it.
2809 if (info
->limit
&& p
->valid
) {
2810 vm_page_test_dirty(p
);
2811 if ((p
->valid
& p
->dirty
) || (p
->flags
& PG_NEED_COMMIT
)) {
2820 vm_page_protect(p
, VM_PROT_NONE
);
2824 * Must be at end to avoid SMP races, caller holds object token
2827 if ((++info
->count
& 63) == 0)
2834 * Try to extend prev_object into an adjoining region of virtual
2835 * memory, return TRUE on success.
2837 * The caller does not need to hold (prev_object) but must have a stable
2838 * pointer to it (typically by holding the vm_map locked).
2840 * This function only works for anonymous memory objects which either
2841 * have (a) one reference or (b) we are extending the object's size.
2842 * Otherwise the related VM pages we want to use for the object might
2843 * be in use by another mapping.
2846 vm_object_coalesce(vm_object_t prev_object
, vm_pindex_t prev_pindex
,
2847 vm_size_t prev_size
, vm_size_t next_size
)
2849 vm_pindex_t next_pindex
;
2851 if (prev_object
== NULL
)
2854 vm_object_hold(prev_object
);
2856 if (prev_object
->type
!= OBJT_DEFAULT
&&
2857 prev_object
->type
!= OBJT_SWAP
) {
2858 vm_object_drop(prev_object
);
2863 * Try to collapse the object first
2865 vm_object_chain_acquire(prev_object
, 0);
2866 vm_object_collapse(prev_object
, NULL
);
2869 * We can't coalesce if we shadow another object (figuring out the
2870 * relationships become too complex).
2872 if (prev_object
->backing_object
!= NULL
) {
2873 vm_object_chain_release(prev_object
);
2874 vm_object_drop(prev_object
);
2878 prev_size
>>= PAGE_SHIFT
;
2879 next_size
>>= PAGE_SHIFT
;
2880 next_pindex
= prev_pindex
+ prev_size
;
2883 * We can't if the object has more than one ref count unless we
2884 * are extending it into newly minted space.
2886 if (prev_object
->ref_count
> 1 &&
2887 prev_object
->size
!= next_pindex
) {
2888 vm_object_chain_release(prev_object
);
2889 vm_object_drop(prev_object
);
2894 * Remove any pages that may still be in the object from a previous
2897 if (next_pindex
< prev_object
->size
) {
2898 vm_object_page_remove(prev_object
,
2900 next_pindex
+ next_size
, FALSE
);
2901 if (prev_object
->type
== OBJT_SWAP
)
2902 swap_pager_freespace(prev_object
,
2903 next_pindex
, next_size
);
2907 * Extend the object if necessary.
2909 if (next_pindex
+ next_size
> prev_object
->size
)
2910 prev_object
->size
= next_pindex
+ next_size
;
2911 vm_object_chain_release(prev_object
);
2912 vm_object_drop(prev_object
);
2918 * Make the object writable and flag is being possibly dirty.
2920 * The object might not be held (or might be held but held shared),
2921 * the related vnode is probably not held either. Object and vnode are
2922 * stable by virtue of the vm_page busied by the caller preventing
2925 * If the related mount is flagged MNTK_THR_SYNC we need to call
2926 * vsetobjdirty(). Filesystems using this option usually shortcut
2927 * synchronization by only scanning the syncer list.
2930 vm_object_set_writeable_dirty(vm_object_t object
)
2934 /*vm_object_assert_held(object);*/
2936 * Avoid contention in vm fault path by checking the state before
2937 * issuing an atomic op on it.
2939 if ((object
->flags
& (OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
)) !=
2940 (OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
)) {
2941 vm_object_set_flag(object
, OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
);
2943 if (object
->type
== OBJT_VNODE
&&
2944 (vp
= (struct vnode
*)object
->handle
) != NULL
) {
2945 if ((vp
->v_flag
& VOBJDIRTY
) == 0) {
2947 (vp
->v_mount
->mnt_kern_flag
& MNTK_THR_SYNC
)) {
2949 * New style THR_SYNC places vnodes on the
2950 * syncer list more deterministically.
2955 * Old style scan would not necessarily place
2956 * a vnode on the syncer list when possibly
2957 * modified via mmap.
2959 vsetflags(vp
, VOBJDIRTY
);
2965 #include "opt_ddb.h"
2967 #include <sys/cons.h>
2969 #include <ddb/ddb.h>
2971 static int _vm_object_in_map (vm_map_t map
, vm_object_t object
,
2972 vm_map_entry_t entry
);
2973 static int vm_object_in_map (vm_object_t object
);
2976 * The caller must hold the object.
2979 _vm_object_in_map(vm_map_t map
, vm_object_t object
, vm_map_entry_t entry
)
2982 vm_map_entry_t tmpe
;
2983 vm_object_t obj
, nobj
;
2989 tmpe
= map
->header
.next
;
2990 entcount
= map
->nentries
;
2991 while (entcount
-- && (tmpe
!= &map
->header
)) {
2992 if( _vm_object_in_map(map
, object
, tmpe
)) {
2999 switch(entry
->maptype
) {
3000 case VM_MAPTYPE_SUBMAP
:
3001 tmpm
= entry
->object
.sub_map
;
3002 tmpe
= tmpm
->header
.next
;
3003 entcount
= tmpm
->nentries
;
3004 while (entcount
-- && tmpe
!= &tmpm
->header
) {
3005 if( _vm_object_in_map(tmpm
, object
, tmpe
)) {
3011 case VM_MAPTYPE_NORMAL
:
3012 case VM_MAPTYPE_VPAGETABLE
:
3013 obj
= entry
->object
.vm_object
;
3015 if (obj
== object
) {
3016 if (obj
!= entry
->object
.vm_object
)
3017 vm_object_drop(obj
);
3020 while ((nobj
= obj
->backing_object
) != NULL
) {
3021 vm_object_hold(nobj
);
3022 if (nobj
== obj
->backing_object
)
3024 vm_object_drop(nobj
);
3026 if (obj
!= entry
->object
.vm_object
) {
3028 vm_object_lock_swap();
3029 vm_object_drop(obj
);
3040 static int vm_object_in_map_callback(struct proc
*p
, void *data
);
3042 struct vm_object_in_map_info
{
3051 vm_object_in_map(vm_object_t object
)
3053 struct vm_object_in_map_info info
;
3056 info
.object
= object
;
3058 allproc_scan(vm_object_in_map_callback
, &info
, 0);
3061 if( _vm_object_in_map(&kernel_map
, object
, 0))
3063 if( _vm_object_in_map(&pager_map
, object
, 0))
3065 if( _vm_object_in_map(&buffer_map
, object
, 0))
3074 vm_object_in_map_callback(struct proc
*p
, void *data
)
3076 struct vm_object_in_map_info
*info
= data
;
3079 if (_vm_object_in_map(&p
->p_vmspace
->vm_map
, info
->object
, 0)) {
3087 DB_SHOW_COMMAND(vmochk
, vm_object_check
)
3089 struct vm_object_hash
*hash
;
3094 * make sure that internal objs are in a map somewhere
3095 * and none have zero ref counts.
3097 for (n
= 0; n
< VMOBJ_HSIZE
; ++n
) {
3098 hash
= &vm_object_hash
[n
];
3099 for (object
= TAILQ_FIRST(&hash
->list
);
3101 object
= TAILQ_NEXT(object
, object_list
)) {
3102 if (object
->type
== OBJT_MARKER
)
3104 if (object
->handle
!= NULL
||
3105 (object
->type
!= OBJT_DEFAULT
&&
3106 object
->type
!= OBJT_SWAP
)) {
3109 if (object
->ref_count
== 0) {
3110 db_printf("vmochk: internal obj has "
3111 "zero ref count: %ld\n",
3112 (long)object
->size
);
3114 if (vm_object_in_map(object
))
3116 db_printf("vmochk: internal obj is not in a map: "
3117 "ref: %d, size: %lu: 0x%lx, "
3118 "backing_object: %p\n",
3119 object
->ref_count
, (u_long
)object
->size
,
3120 (u_long
)object
->size
,
3121 (void *)object
->backing_object
);
3129 DB_SHOW_COMMAND(object
, vm_object_print_static
)
3131 /* XXX convert args. */
3132 vm_object_t object
= (vm_object_t
)addr
;
3133 boolean_t full
= have_addr
;
3137 /* XXX count is an (unused) arg. Avoid shadowing it. */
3138 #define count was_count
3146 "Object %p: type=%d, size=0x%lx, res=%ld, ref=%d, flags=0x%x\n",
3147 object
, (int)object
->type
, (u_long
)object
->size
,
3148 object
->resident_page_count
, object
->ref_count
, object
->flags
);
3150 * XXX no %qd in kernel. Truncate object->backing_object_offset.
3152 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
3153 object
->shadow_count
,
3154 object
->backing_object
? object
->backing_object
->ref_count
: 0,
3155 object
->backing_object
, (long)object
->backing_object_offset
);
3162 RB_FOREACH(p
, vm_page_rb_tree
, &object
->rb_memq
) {
3164 db_iprintf("memory:=");
3165 else if (count
== 6) {
3173 db_printf("(off=0x%lx,page=0x%lx)",
3174 (u_long
) p
->pindex
, (u_long
) VM_PAGE_TO_PHYS(p
));
3185 * XXX need this non-static entry for calling from vm_map_print.
3190 vm_object_print(/* db_expr_t */ long addr
,
3191 boolean_t have_addr
,
3192 /* db_expr_t */ long count
,
3195 vm_object_print_static(addr
, have_addr
, count
, modif
);
3201 DB_SHOW_COMMAND(vmopag
, vm_object_print_pages
)
3203 struct vm_object_hash
*hash
;
3209 for (n
= 0; n
< VMOBJ_HSIZE
; ++n
) {
3210 hash
= &vm_object_hash
[n
];
3211 for (object
= TAILQ_FIRST(&hash
->list
);
3213 object
= TAILQ_NEXT(object
, object_list
)) {
3214 vm_pindex_t idx
, fidx
;
3216 vm_paddr_t pa
= -1, padiff
;
3220 if (object
->type
== OBJT_MARKER
)
3222 db_printf("new object: %p\n", (void *)object
);
3232 osize
= object
->size
;
3235 for (idx
= 0; idx
< osize
; idx
++) {
3236 m
= vm_page_lookup(object
, idx
);
3239 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
3240 (long)fidx
, rcount
, (long)pa
);
3254 (VM_PAGE_TO_PHYS(m
) == pa
+ rcount
* PAGE_SIZE
)) {
3259 padiff
= pa
+ rcount
* PAGE_SIZE
- VM_PAGE_TO_PHYS(m
);
3260 padiff
>>= PAGE_SHIFT
;
3261 padiff
&= PQ_L2_MASK
;
3263 pa
= VM_PAGE_TO_PHYS(m
) - rcount
* PAGE_SIZE
;
3267 db_printf(" index(%ld)run(%d)pa(0x%lx)",
3268 (long)fidx
, rcount
, (long)pa
);
3269 db_printf("pd(%ld)\n", (long)padiff
);
3279 pa
= VM_PAGE_TO_PHYS(m
);
3283 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
3284 (long)fidx
, rcount
, (long)pa
);