4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 * Carnegie Mellon requests users of this software to return to
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
66 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
70 * Virtual memory object module.
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h> /* for curproc, pageproc */
76 #include <sys/thread.h>
77 #include <sys/vnode.h>
78 #include <sys/vmmeter.h>
80 #include <sys/mount.h>
81 #include <sys/kernel.h>
82 #include <sys/sysctl.h>
83 #include <sys/refcount.h>
86 #include <vm/vm_param.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_zone.h>
98 #define EASY_SCAN_FACTOR 8
100 static void vm_object_qcollapse(vm_object_t object
,
101 vm_object_t backing_object
);
102 static void vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
,
104 static void vm_object_lock_init(vm_object_t
);
108 * Virtual memory objects maintain the actual data
109 * associated with allocated virtual memory. A given
110 * page of memory exists within exactly one object.
112 * An object is only deallocated when all "references"
113 * are given up. Only one "reference" to a given
114 * region of an object should be writeable.
116 * Associated with each object is a list of all resident
117 * memory pages belonging to that object; this list is
118 * maintained by the "vm_page" module, and locked by the object's
121 * Each object also records a "pager" routine which is
122 * used to retrieve (and store) pages to the proper backing
123 * storage. In addition, objects may be backed by other
124 * objects from which they were virtual-copied.
126 * The only items within the object structure which are
127 * modified after time of creation are:
128 * reference count locked by object's lock
129 * pager routine locked by object's lock
133 struct object_q vm_object_list
; /* locked by vmobj_token */
134 struct vm_object kernel_object
;
136 static long vm_object_count
; /* locked by vmobj_token */
137 extern int vm_pageout_page_count
;
139 static long object_collapses
;
140 static long object_bypasses
;
141 static int next_index
;
142 static vm_zone_t obj_zone
;
143 static struct vm_zone obj_zone_store
;
144 #define VM_OBJECTS_INIT 256
145 static struct vm_object vm_objects_init
[VM_OBJECTS_INIT
];
148 * Misc low level routines
151 vm_object_lock_init(vm_object_t obj
)
153 #if defined(DEBUG_LOCKS)
156 obj
->debug_hold_bitmap
= 0;
157 obj
->debug_hold_ovfl
= 0;
158 for (i
= 0; i
< VMOBJ_DEBUG_ARRAY_SIZE
; i
++) {
159 obj
->debug_hold_thrs
[i
] = NULL
;
160 obj
->debug_hold_file
[i
] = NULL
;
161 obj
->debug_hold_line
[i
] = 0;
167 vm_object_lock_swap(void)
173 vm_object_lock(vm_object_t obj
)
175 lwkt_gettoken(&obj
->token
);
179 * Returns TRUE on sucesss
182 vm_object_lock_try(vm_object_t obj
)
184 return(lwkt_trytoken(&obj
->token
));
188 vm_object_lock_shared(vm_object_t obj
)
190 lwkt_gettoken_shared(&obj
->token
);
194 vm_object_unlock(vm_object_t obj
)
196 lwkt_reltoken(&obj
->token
);
200 vm_object_assert_held(vm_object_t obj
)
202 ASSERT_LWKT_TOKEN_HELD(&obj
->token
);
207 vm_object_hold(vm_object_t obj
)
209 debugvm_object_hold(vm_object_t obj
, char *file
, int line
)
212 KKASSERT(obj
!= NULL
);
215 * Object must be held (object allocation is stable due to callers
216 * context, typically already holding the token on a parent object)
217 * prior to potentially blocking on the lock, otherwise the object
218 * can get ripped away from us.
220 refcount_acquire(&obj
->hold_count
);
223 #if defined(DEBUG_LOCKS)
228 mask
= ~obj
->debug_hold_bitmap
;
230 if (mask
== 0xFFFFFFFFU
) {
231 if (obj
->debug_hold_ovfl
== 0)
232 obj
->debug_hold_ovfl
= 1;
236 if (atomic_cmpset_int(&obj
->debug_hold_bitmap
, ~mask
,
238 obj
->debug_hold_bitmap
|= (1 << i
);
239 obj
->debug_hold_thrs
[i
] = curthread
;
240 obj
->debug_hold_file
[i
] = file
;
241 obj
->debug_hold_line
[i
] = line
;
250 vm_object_hold_try(vm_object_t obj
)
252 debugvm_object_hold_try(vm_object_t obj
, char *file
, int line
)
255 KKASSERT(obj
!= NULL
);
258 * Object must be held (object allocation is stable due to callers
259 * context, typically already holding the token on a parent object)
260 * prior to potentially blocking on the lock, otherwise the object
261 * can get ripped away from us.
263 refcount_acquire(&obj
->hold_count
);
264 if (vm_object_lock_try(obj
) == 0) {
265 if (refcount_release(&obj
->hold_count
)) {
266 if (obj
->ref_count
== 0 && (obj
->flags
& OBJ_DEAD
))
267 zfree(obj_zone
, obj
);
272 #if defined(DEBUG_LOCKS)
277 mask
= ~obj
->debug_hold_bitmap
;
279 if (mask
== 0xFFFFFFFFU
) {
280 if (obj
->debug_hold_ovfl
== 0)
281 obj
->debug_hold_ovfl
= 1;
285 if (atomic_cmpset_int(&obj
->debug_hold_bitmap
, ~mask
,
287 obj
->debug_hold_bitmap
|= (1 << i
);
288 obj
->debug_hold_thrs
[i
] = curthread
;
289 obj
->debug_hold_file
[i
] = file
;
290 obj
->debug_hold_line
[i
] = line
;
300 vm_object_hold_shared(vm_object_t obj
)
302 debugvm_object_hold_shared(vm_object_t obj
, char *file
, int line
)
305 KKASSERT(obj
!= NULL
);
308 * Object must be held (object allocation is stable due to callers
309 * context, typically already holding the token on a parent object)
310 * prior to potentially blocking on the lock, otherwise the object
311 * can get ripped away from us.
313 refcount_acquire(&obj
->hold_count
);
314 vm_object_lock_shared(obj
);
316 #if defined(DEBUG_LOCKS)
321 mask
= ~obj
->debug_hold_bitmap
;
323 if (mask
== 0xFFFFFFFFU
) {
324 if (obj
->debug_hold_ovfl
== 0)
325 obj
->debug_hold_ovfl
= 1;
329 if (atomic_cmpset_int(&obj
->debug_hold_bitmap
, ~mask
,
331 obj
->debug_hold_bitmap
|= (1 << i
);
332 obj
->debug_hold_thrs
[i
] = curthread
;
333 obj
->debug_hold_file
[i
] = file
;
334 obj
->debug_hold_line
[i
] = line
;
342 * Drop the token and hold_count on the object.
345 vm_object_drop(vm_object_t obj
)
350 #if defined(DEBUG_LOCKS)
354 for (i
= 0; i
< VMOBJ_DEBUG_ARRAY_SIZE
; i
++) {
355 if ((obj
->debug_hold_bitmap
& (1 << i
)) &&
356 (obj
->debug_hold_thrs
[i
] == curthread
)) {
357 obj
->debug_hold_bitmap
&= ~(1 << i
);
358 obj
->debug_hold_thrs
[i
] = NULL
;
359 obj
->debug_hold_file
[i
] = NULL
;
360 obj
->debug_hold_line
[i
] = 0;
366 if (found
== 0 && obj
->debug_hold_ovfl
== 0)
367 panic("vm_object: attempt to drop hold on non-self-held obj");
371 * No new holders should be possible once we drop hold_count 1->0 as
372 * there is no longer any way to reference the object.
374 KKASSERT(obj
->hold_count
> 0);
375 if (refcount_release(&obj
->hold_count
)) {
376 if (obj
->ref_count
== 0 && (obj
->flags
& OBJ_DEAD
)) {
377 vm_object_unlock(obj
);
378 zfree(obj_zone
, obj
);
380 vm_object_unlock(obj
);
383 vm_object_unlock(obj
);
388 * Initialize a freshly allocated object, returning a held object.
390 * Used only by vm_object_allocate() and zinitna().
395 _vm_object_allocate(objtype_t type
, vm_pindex_t size
, vm_object_t object
)
399 RB_INIT(&object
->rb_memq
);
400 LIST_INIT(&object
->shadow_head
);
401 lwkt_token_init(&object
->token
, "vmobj");
405 object
->ref_count
= 1;
406 object
->hold_count
= 0;
408 if ((object
->type
== OBJT_DEFAULT
) || (object
->type
== OBJT_SWAP
))
409 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
410 object
->paging_in_progress
= 0;
411 object
->resident_page_count
= 0;
412 object
->agg_pv_list_count
= 0;
413 object
->shadow_count
= 0;
414 /* cpu localization twist */
415 object
->pg_color
= (int)(intptr_t)curthread
;
416 if ( size
> (PQ_L2_SIZE
/ 3 + PQ_PRIME1
))
417 incr
= PQ_L2_SIZE
/ 3 + PQ_PRIME1
;
420 next_index
= (next_index
+ incr
) & PQ_L2_MASK
;
421 object
->handle
= NULL
;
422 object
->backing_object
= NULL
;
423 object
->backing_object_offset
= (vm_ooffset_t
)0;
425 object
->generation
++;
426 object
->swblock_count
= 0;
427 RB_INIT(&object
->swblock_root
);
428 vm_object_lock_init(object
);
429 pmap_object_init(object
);
431 vm_object_hold(object
);
432 lwkt_gettoken(&vmobj_token
);
433 TAILQ_INSERT_TAIL(&vm_object_list
, object
, object_list
);
435 lwkt_reltoken(&vmobj_token
);
439 * Initialize the VM objects module.
441 * Called from the low level boot code only.
446 TAILQ_INIT(&vm_object_list
);
448 _vm_object_allocate(OBJT_DEFAULT
, OFF_TO_IDX(KvaEnd
),
450 vm_object_drop(&kernel_object
);
452 obj_zone
= &obj_zone_store
;
453 zbootinit(obj_zone
, "VM OBJECT", sizeof (struct vm_object
),
454 vm_objects_init
, VM_OBJECTS_INIT
);
458 vm_object_init2(void)
460 zinitna(obj_zone
, NULL
, NULL
, 0, 0, ZONE_PANICFAIL
, 1);
464 * Allocate and return a new object of the specified type and size.
469 vm_object_allocate(objtype_t type
, vm_pindex_t size
)
473 result
= (vm_object_t
) zalloc(obj_zone
);
475 _vm_object_allocate(type
, size
, result
);
476 vm_object_drop(result
);
482 * This version returns a held object, allowing further atomic initialization
486 vm_object_allocate_hold(objtype_t type
, vm_pindex_t size
)
490 result
= (vm_object_t
) zalloc(obj_zone
);
492 _vm_object_allocate(type
, size
, result
);
498 * Add an additional reference to a vm_object. The object must already be
499 * held. The original non-lock version is no longer supported. The object
500 * must NOT be chain locked by anyone at the time the reference is added.
502 * Referencing a chain-locked object can blow up the fairly sensitive
503 * ref_count and shadow_count tests in the deallocator. Most callers
504 * will call vm_object_chain_wait() prior to calling
505 * vm_object_reference_locked() to avoid the case.
507 * The object must be held.
510 vm_object_reference_locked(vm_object_t object
)
512 KKASSERT(object
!= NULL
);
513 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
514 KKASSERT((object
->flags
& OBJ_CHAINLOCK
) == 0);
516 if (object
->type
== OBJT_VNODE
) {
517 vref(object
->handle
);
518 /* XXX what if the vnode is being destroyed? */
523 * Object OBJ_CHAINLOCK lock handling.
525 * The caller can chain-lock backing objects recursively and then
526 * use vm_object_chain_release_all() to undo the whole chain.
528 * Chain locks are used to prevent collapses and are only applicable
529 * to OBJT_DEFAULT and OBJT_SWAP objects. Chain locking operations
530 * on other object types are ignored. This is also important because
531 * it allows e.g. the vnode underlying a memory mapping to take concurrent
534 * The object must usually be held on entry, though intermediate
535 * objects need not be held on release.
538 vm_object_chain_wait(vm_object_t object
)
540 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
541 while (object
->flags
& OBJ_CHAINLOCK
) {
542 vm_object_set_flag(object
, OBJ_CHAINWANT
);
543 tsleep(object
, 0, "objchain", 0);
548 vm_object_chain_acquire(vm_object_t object
)
550 if (object
->type
== OBJT_DEFAULT
|| object
->type
== OBJT_SWAP
) {
551 vm_object_chain_wait(object
);
552 vm_object_set_flag(object
, OBJ_CHAINLOCK
);
557 vm_object_chain_release(vm_object_t object
)
559 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
560 if (object
->type
== OBJT_DEFAULT
|| object
->type
== OBJT_SWAP
) {
561 KKASSERT(object
->flags
& OBJ_CHAINLOCK
);
562 if (object
->flags
& OBJ_CHAINWANT
) {
563 vm_object_clear_flag(object
,
564 OBJ_CHAINLOCK
| OBJ_CHAINWANT
);
567 vm_object_clear_flag(object
, OBJ_CHAINLOCK
);
573 * This releases the entire chain of objects from first_object to and
574 * including stopobj, flowing through object->backing_object.
576 * We release stopobj first as an optimization as this object is most
577 * likely to be shared across multiple processes.
580 vm_object_chain_release_all(vm_object_t first_object
, vm_object_t stopobj
)
582 vm_object_t backing_object
;
585 vm_object_chain_release(stopobj
);
586 object
= first_object
;
588 while (object
!= stopobj
) {
590 if (object
!= first_object
)
591 vm_object_hold(object
);
592 backing_object
= object
->backing_object
;
593 vm_object_chain_release(object
);
594 if (object
!= first_object
)
595 vm_object_drop(object
);
596 object
= backing_object
;
601 * Dereference an object and its underlying vnode.
603 * The object must be held and will be held on return.
606 vm_object_vndeallocate(vm_object_t object
)
608 struct vnode
*vp
= (struct vnode
*) object
->handle
;
610 KASSERT(object
->type
== OBJT_VNODE
,
611 ("vm_object_vndeallocate: not a vnode object"));
612 KASSERT(vp
!= NULL
, ("vm_object_vndeallocate: missing vp"));
613 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
615 if (object
->ref_count
== 0) {
616 vprint("vm_object_vndeallocate", vp
);
617 panic("vm_object_vndeallocate: bad object reference count");
621 if (object
->ref_count
== 0)
622 vclrflags(vp
, VTEXT
);
627 * Release a reference to the specified object, gained either through a
628 * vm_object_allocate or a vm_object_reference call. When all references
629 * are gone, storage associated with this object may be relinquished.
631 * The caller does not have to hold the object locked but must have control
632 * over the reference in question in order to guarantee that the object
633 * does not get ripped out from under us.
636 vm_object_deallocate(vm_object_t object
)
639 vm_object_hold(object
);
640 vm_object_deallocate_locked(object
);
641 vm_object_drop(object
);
646 vm_object_deallocate_locked(vm_object_t object
)
648 struct vm_object_dealloc_list
*dlist
= NULL
;
649 struct vm_object_dealloc_list
*dtmp
;
654 * We may chain deallocate object, but additional objects may
655 * collect on the dlist which also have to be deallocated. We
656 * must avoid a recursion, vm_object chains can get deep.
659 while (object
!= NULL
) {
662 * Don't rip a ref_count out from under an object undergoing
663 * collapse, it will confuse the collapse code.
665 vm_object_chain_wait(object
);
667 if (object
->type
== OBJT_VNODE
) {
668 vm_object_vndeallocate(object
);
672 if (object
->ref_count
== 0) {
673 panic("vm_object_deallocate: object deallocated "
674 "too many times: %d", object
->type
);
676 if (object
->ref_count
> 2) {
682 * Here on ref_count of one or two, which are special cases for
685 * Nominal ref_count > 1 case if the second ref is not from
688 if (object
->ref_count
== 2 && object
->shadow_count
== 0) {
689 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
695 * If the second ref is from a shadow we chain along it
696 * upwards if object's handle is exhausted.
698 * We have to decrement object->ref_count before potentially
699 * collapsing the first shadow object or the collapse code
700 * will not be able to handle the degenerate case to remove
701 * object. However, if we do it too early the object can
702 * get ripped out from under us.
704 if (object
->ref_count
== 2 && object
->shadow_count
== 1 &&
705 object
->handle
== NULL
&& (object
->type
== OBJT_DEFAULT
||
706 object
->type
== OBJT_SWAP
)) {
707 temp
= LIST_FIRST(&object
->shadow_head
);
708 KKASSERT(temp
!= NULL
);
709 vm_object_hold(temp
);
712 * Wait for any paging to complete so the collapse
713 * doesn't (or isn't likely to) qcollapse. pip
714 * waiting must occur before we acquire the
718 temp
->paging_in_progress
||
719 object
->paging_in_progress
721 vm_object_pip_wait(temp
, "objde1");
722 vm_object_pip_wait(object
, "objde2");
726 * If the parent is locked we have to give up, as
727 * otherwise we would be acquiring locks in the
728 * wrong order and potentially deadlock.
730 if (temp
->flags
& OBJ_CHAINLOCK
) {
731 vm_object_drop(temp
);
734 vm_object_chain_acquire(temp
);
737 * Recheck/retry after the hold and the paging
738 * wait, both of which can block us.
740 if (object
->ref_count
!= 2 ||
741 object
->shadow_count
!= 1 ||
743 LIST_FIRST(&object
->shadow_head
) != temp
||
744 (object
->type
!= OBJT_DEFAULT
&&
745 object
->type
!= OBJT_SWAP
)) {
746 vm_object_chain_release(temp
);
747 vm_object_drop(temp
);
752 * We can safely drop object's ref_count now.
754 KKASSERT(object
->ref_count
== 2);
758 * If our single parent is not collapseable just
759 * decrement ref_count (2->1) and stop.
761 if (temp
->handle
|| (temp
->type
!= OBJT_DEFAULT
&&
762 temp
->type
!= OBJT_SWAP
)) {
763 vm_object_chain_release(temp
);
764 vm_object_drop(temp
);
769 * At this point we have already dropped object's
770 * ref_count so it is possible for a race to
771 * deallocate obj out from under us. Any collapse
772 * will re-check the situation. We must not block
773 * until we are able to collapse.
775 * Bump temp's ref_count to avoid an unwanted
776 * degenerate recursion (can't call
777 * vm_object_reference_locked() because it asserts
778 * that CHAINLOCK is not set).
781 KKASSERT(temp
->ref_count
> 1);
784 * Collapse temp, then deallocate the extra ref
787 vm_object_collapse(temp
, &dlist
);
788 vm_object_chain_release(temp
);
790 vm_object_lock_swap();
791 vm_object_drop(object
);
799 * Drop the ref and handle termination on the 1->0 transition.
800 * We may have blocked above so we have to recheck.
803 KKASSERT(object
->ref_count
!= 0);
804 if (object
->ref_count
>= 2) {
808 KKASSERT(object
->ref_count
== 1);
811 * 1->0 transition. Chain through the backing_object.
812 * Maintain the ref until we've located the backing object,
815 while ((temp
= object
->backing_object
) != NULL
) {
816 vm_object_hold(temp
);
817 if (temp
== object
->backing_object
)
819 vm_object_drop(temp
);
823 * 1->0 transition verified, retry if ref_count is no longer
824 * 1. Otherwise disconnect the backing_object (temp) and
827 if (object
->ref_count
!= 1) {
828 vm_object_drop(temp
);
833 * It shouldn't be possible for the object to be chain locked
834 * if we're removing the last ref on it.
836 KKASSERT((object
->flags
& OBJ_CHAINLOCK
) == 0);
839 LIST_REMOVE(object
, shadow_list
);
840 temp
->shadow_count
--;
842 object
->backing_object
= NULL
;
846 if ((object
->flags
& OBJ_DEAD
) == 0)
847 vm_object_terminate(object
);
848 if (must_drop
&& temp
)
849 vm_object_lock_swap();
851 vm_object_drop(object
);
855 if (must_drop
&& object
)
856 vm_object_drop(object
);
859 * Additional tail recursion on dlist. Avoid a recursion. Objects
860 * on the dlist have a hold count but are not locked.
862 if ((dtmp
= dlist
) != NULL
) {
864 object
= dtmp
->object
;
867 vm_object_lock(object
); /* already held, add lock */
868 must_drop
= 1; /* and we're responsible for it */
874 * Destroy the specified object, freeing up related resources.
876 * The object must have zero references.
878 * The object must held. The caller is responsible for dropping the object
879 * after terminate returns. Terminate does NOT drop the object.
881 static int vm_object_terminate_callback(vm_page_t p
, void *data
);
884 vm_object_terminate(vm_object_t object
)
887 * Make sure no one uses us. Once we set OBJ_DEAD we should be
888 * able to safely block.
890 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
891 KKASSERT((object
->flags
& OBJ_DEAD
) == 0);
892 vm_object_set_flag(object
, OBJ_DEAD
);
895 * Wait for the pageout daemon to be done with the object
897 vm_object_pip_wait(object
, "objtrm1");
899 KASSERT(!object
->paging_in_progress
,
900 ("vm_object_terminate: pageout in progress"));
903 * Clean and free the pages, as appropriate. All references to the
904 * object are gone, so we don't need to lock it.
906 if (object
->type
== OBJT_VNODE
) {
910 * Clean pages and flush buffers.
912 * NOTE! TMPFS buffer flushes do not typically flush the
913 * actual page to swap as this would be highly
914 * inefficient, and normal filesystems usually wrap
915 * page flushes with buffer cache buffers.
917 * To deal with this we have to call vinvalbuf() both
918 * before and after the vm_object_page_clean().
920 vp
= (struct vnode
*) object
->handle
;
921 vinvalbuf(vp
, V_SAVE
, 0, 0);
922 vm_object_page_clean(object
, 0, 0, OBJPC_SYNC
);
923 vinvalbuf(vp
, V_SAVE
, 0, 0);
927 * Wait for any I/O to complete, after which there had better not
928 * be any references left on the object.
930 vm_object_pip_wait(object
, "objtrm2");
932 if (object
->ref_count
!= 0) {
933 panic("vm_object_terminate: object with references, "
934 "ref_count=%d", object
->ref_count
);
938 * Cleanup any shared pmaps associated with this object.
940 pmap_object_free(object
);
943 * Now free any remaining pages. For internal objects, this also
944 * removes them from paging queues. Don't free wired pages, just
945 * remove them from the object.
947 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
948 vm_object_terminate_callback
, NULL
);
951 * Let the pager know object is dead.
953 vm_pager_deallocate(object
);
956 * Wait for the object hold count to hit 1, clean out pages as
957 * we go. vmobj_token interlocks any race conditions that might
958 * pick the object up from the vm_object_list after we have cleared
962 if (RB_ROOT(&object
->rb_memq
) == NULL
)
964 kprintf("vm_object_terminate: Warning, object %p "
965 "still has %d pages\n",
966 object
, object
->resident_page_count
);
967 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
968 vm_object_terminate_callback
, NULL
);
972 * There had better not be any pages left
974 KKASSERT(object
->resident_page_count
== 0);
977 * Remove the object from the global object list.
979 lwkt_gettoken(&vmobj_token
);
980 TAILQ_REMOVE(&vm_object_list
, object
, object_list
);
982 lwkt_reltoken(&vmobj_token
);
983 vm_object_dead_wakeup(object
);
985 if (object
->ref_count
!= 0) {
986 panic("vm_object_terminate2: object with references, "
987 "ref_count=%d", object
->ref_count
);
991 * NOTE: The object hold_count is at least 1, so we cannot zfree()
992 * the object here. See vm_object_drop().
997 * The caller must hold the object.
1000 vm_object_terminate_callback(vm_page_t p
, void *data __unused
)
1005 vm_page_busy_wait(p
, TRUE
, "vmpgtrm");
1006 if (object
!= p
->object
) {
1007 kprintf("vm_object_terminate: Warning: Encountered "
1008 "busied page %p on queue %d\n", p
, p
->queue
);
1010 } else if (p
->wire_count
== 0) {
1012 * NOTE: p->dirty and PG_NEED_COMMIT are ignored.
1015 mycpu
->gd_cnt
.v_pfree
++;
1017 if (p
->queue
!= PQ_NONE
)
1018 kprintf("vm_object_terminate: Warning: Encountered "
1019 "wired page %p on queue %d\n", p
, p
->queue
);
1028 * The object is dead but still has an object<->pager association. Sleep
1029 * and return. The caller typically retests the association in a loop.
1031 * The caller must hold the object.
1034 vm_object_dead_sleep(vm_object_t object
, const char *wmesg
)
1036 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
1037 if (object
->handle
) {
1038 vm_object_set_flag(object
, OBJ_DEADWNT
);
1039 tsleep(object
, 0, wmesg
, 0);
1040 /* object may be invalid after this point */
1045 * Wakeup anyone waiting for the object<->pager disassociation on
1048 * The caller must hold the object.
1051 vm_object_dead_wakeup(vm_object_t object
)
1053 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
1054 if (object
->flags
& OBJ_DEADWNT
) {
1055 vm_object_clear_flag(object
, OBJ_DEADWNT
);
1061 * Clean all dirty pages in the specified range of object. Leaves page
1062 * on whatever queue it is currently on. If NOSYNC is set then do not
1063 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
1064 * leaving the object dirty.
1066 * When stuffing pages asynchronously, allow clustering. XXX we need a
1067 * synchronous clustering mode implementation.
1069 * Odd semantics: if start == end, we clean everything.
1071 * The object must be locked? XXX
1073 static int vm_object_page_clean_pass1(struct vm_page
*p
, void *data
);
1074 static int vm_object_page_clean_pass2(struct vm_page
*p
, void *data
);
1077 vm_object_page_clean(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
1080 struct rb_vm_page_scan_info info
;
1086 vm_object_hold(object
);
1087 if (object
->type
!= OBJT_VNODE
||
1088 (object
->flags
& OBJ_MIGHTBEDIRTY
) == 0) {
1089 vm_object_drop(object
);
1093 pagerflags
= (flags
& (OBJPC_SYNC
| OBJPC_INVAL
)) ?
1094 VM_PAGER_PUT_SYNC
: VM_PAGER_CLUSTER_OK
;
1095 pagerflags
|= (flags
& OBJPC_INVAL
) ? VM_PAGER_PUT_INVAL
: 0;
1097 vp
= object
->handle
;
1100 * Interlock other major object operations. This allows us to
1101 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
1103 vm_object_set_flag(object
, OBJ_CLEANING
);
1106 * Handle 'entire object' case
1108 info
.start_pindex
= start
;
1110 info
.end_pindex
= object
->size
- 1;
1112 info
.end_pindex
= end
- 1;
1114 wholescan
= (start
== 0 && info
.end_pindex
== object
->size
- 1);
1116 info
.pagerflags
= pagerflags
;
1117 info
.object
= object
;
1120 * If cleaning the entire object do a pass to mark the pages read-only.
1121 * If everything worked out ok, clear OBJ_WRITEABLE and
1126 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1127 vm_object_page_clean_pass1
, &info
);
1128 if (info
.error
== 0) {
1129 vm_object_clear_flag(object
,
1130 OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
);
1131 if (object
->type
== OBJT_VNODE
&&
1132 (vp
= (struct vnode
*)object
->handle
) != NULL
) {
1133 if (vp
->v_flag
& VOBJDIRTY
)
1134 vclrflags(vp
, VOBJDIRTY
);
1140 * Do a pass to clean all the dirty pages we find.
1144 generation
= object
->generation
;
1145 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1146 vm_object_page_clean_pass2
, &info
);
1147 } while (info
.error
|| generation
!= object
->generation
);
1149 vm_object_clear_flag(object
, OBJ_CLEANING
);
1150 vm_object_drop(object
);
1154 * The caller must hold the object.
1158 vm_object_page_clean_pass1(struct vm_page
*p
, void *data
)
1160 struct rb_vm_page_scan_info
*info
= data
;
1162 vm_page_flag_set(p
, PG_CLEANCHK
);
1163 if ((info
->limit
& OBJPC_NOSYNC
) && (p
->flags
& PG_NOSYNC
)) {
1165 } else if (vm_page_busy_try(p
, FALSE
) == 0) {
1166 vm_page_protect(p
, VM_PROT_READ
); /* must not block */
1176 * The caller must hold the object
1180 vm_object_page_clean_pass2(struct vm_page
*p
, void *data
)
1182 struct rb_vm_page_scan_info
*info
= data
;
1186 * Do not mess with pages that were inserted after we started
1187 * the cleaning pass.
1189 if ((p
->flags
& PG_CLEANCHK
) == 0)
1192 generation
= info
->object
->generation
;
1193 vm_page_busy_wait(p
, TRUE
, "vpcwai");
1194 if (p
->object
!= info
->object
||
1195 info
->object
->generation
!= generation
) {
1202 * Before wasting time traversing the pmaps, check for trivial
1203 * cases where the page cannot be dirty.
1205 if (p
->valid
== 0 || (p
->queue
- p
->pc
) == PQ_CACHE
) {
1206 KKASSERT((p
->dirty
& p
->valid
) == 0 &&
1207 (p
->flags
& PG_NEED_COMMIT
) == 0);
1213 * Check whether the page is dirty or not. The page has been set
1214 * to be read-only so the check will not race a user dirtying the
1217 vm_page_test_dirty(p
);
1218 if ((p
->dirty
& p
->valid
) == 0 && (p
->flags
& PG_NEED_COMMIT
) == 0) {
1219 vm_page_flag_clear(p
, PG_CLEANCHK
);
1225 * If we have been asked to skip nosync pages and this is a
1226 * nosync page, skip it. Note that the object flags were
1227 * not cleared in this case (because pass1 will have returned an
1228 * error), so we do not have to set them.
1230 if ((info
->limit
& OBJPC_NOSYNC
) && (p
->flags
& PG_NOSYNC
)) {
1231 vm_page_flag_clear(p
, PG_CLEANCHK
);
1237 * Flush as many pages as we can. PG_CLEANCHK will be cleared on
1238 * the pages that get successfully flushed. Set info->error if
1239 * we raced an object modification.
1241 vm_object_page_collect_flush(info
->object
, p
, info
->pagerflags
);
1249 * Collect the specified page and nearby pages and flush them out.
1250 * The number of pages flushed is returned. The passed page is busied
1251 * by the caller and we are responsible for its disposition.
1253 * The caller must hold the object.
1256 vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
, int pagerflags
)
1265 vm_page_t maf
[vm_pageout_page_count
];
1266 vm_page_t mab
[vm_pageout_page_count
];
1267 vm_page_t ma
[vm_pageout_page_count
];
1269 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
1274 for(i
= 1; i
< vm_pageout_page_count
; i
++) {
1277 tp
= vm_page_lookup_busy_try(object
, pi
+ i
, TRUE
, &error
);
1282 if ((pagerflags
& VM_PAGER_IGNORE_CLEANCHK
) == 0 &&
1283 (tp
->flags
& PG_CLEANCHK
) == 0) {
1287 if ((tp
->queue
- tp
->pc
) == PQ_CACHE
) {
1288 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1292 vm_page_test_dirty(tp
);
1293 if ((tp
->dirty
& tp
->valid
) == 0 &&
1294 (tp
->flags
& PG_NEED_COMMIT
) == 0) {
1295 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1304 chkb
= vm_pageout_page_count
- maxf
;
1306 * NOTE: chkb can be 0
1308 for(i
= 1; chkb
&& i
< chkb
; i
++) {
1311 tp
= vm_page_lookup_busy_try(object
, pi
- i
, TRUE
, &error
);
1316 if ((pagerflags
& VM_PAGER_IGNORE_CLEANCHK
) == 0 &&
1317 (tp
->flags
& PG_CLEANCHK
) == 0) {
1321 if ((tp
->queue
- tp
->pc
) == PQ_CACHE
) {
1322 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1326 vm_page_test_dirty(tp
);
1327 if ((tp
->dirty
& tp
->valid
) == 0 &&
1328 (tp
->flags
& PG_NEED_COMMIT
) == 0) {
1329 vm_page_flag_clear(tp
, PG_CLEANCHK
);
1338 * All pages in the maf[] and mab[] array are busied.
1340 for (i
= 0; i
< maxb
; i
++) {
1341 int index
= (maxb
- i
) - 1;
1343 vm_page_flag_clear(ma
[index
], PG_CLEANCHK
);
1345 vm_page_flag_clear(p
, PG_CLEANCHK
);
1347 for(i
= 0; i
< maxf
; i
++) {
1348 int index
= (maxb
+ i
) + 1;
1350 vm_page_flag_clear(ma
[index
], PG_CLEANCHK
);
1352 runlen
= maxb
+ maxf
+ 1;
1354 for (i
= 0; i
< runlen
; i
++) /* XXX need this any more? */
1355 vm_page_hold(ma
[i
]);
1357 vm_pageout_flush(ma
, runlen
, pagerflags
);
1359 for (i
= 0; i
< runlen
; i
++) /* XXX need this any more? */
1360 vm_page_unhold(ma
[i
]);
1364 * Same as vm_object_pmap_copy, except range checking really
1365 * works, and is meant for small sections of an object.
1367 * This code protects resident pages by making them read-only
1368 * and is typically called on a fork or split when a page
1369 * is converted to copy-on-write.
1371 * NOTE: If the page is already at VM_PROT_NONE, calling
1372 * vm_page_protect will have no effect.
1375 vm_object_pmap_copy_1(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
1380 if (object
== NULL
|| (object
->flags
& OBJ_WRITEABLE
) == 0)
1383 vm_object_hold(object
);
1384 for (idx
= start
; idx
< end
; idx
++) {
1385 p
= vm_page_lookup(object
, idx
);
1388 vm_page_protect(p
, VM_PROT_READ
);
1390 vm_object_drop(object
);
1394 * Removes all physical pages in the specified object range from all
1397 * The object must *not* be locked.
1400 static int vm_object_pmap_remove_callback(vm_page_t p
, void *data
);
1403 vm_object_pmap_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
1405 struct rb_vm_page_scan_info info
;
1409 info
.start_pindex
= start
;
1410 info
.end_pindex
= end
- 1;
1412 vm_object_hold(object
);
1413 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1414 vm_object_pmap_remove_callback
, &info
);
1415 if (start
== 0 && end
== object
->size
)
1416 vm_object_clear_flag(object
, OBJ_WRITEABLE
);
1417 vm_object_drop(object
);
1421 * The caller must hold the object
1424 vm_object_pmap_remove_callback(vm_page_t p
, void *data __unused
)
1426 vm_page_protect(p
, VM_PROT_NONE
);
1431 * Implements the madvise function at the object/page level.
1433 * MADV_WILLNEED (any object)
1435 * Activate the specified pages if they are resident.
1437 * MADV_DONTNEED (any object)
1439 * Deactivate the specified pages if they are resident.
1441 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1443 * Deactivate and clean the specified pages if they are
1444 * resident. This permits the process to reuse the pages
1445 * without faulting or the kernel to reclaim the pages
1451 vm_object_madvise(vm_object_t object
, vm_pindex_t pindex
, int count
, int advise
)
1453 vm_pindex_t end
, tpindex
;
1454 vm_object_t tobject
;
1462 end
= pindex
+ count
;
1464 vm_object_hold(object
);
1468 * Locate and adjust resident pages
1470 for (; pindex
< end
; pindex
+= 1) {
1472 if (tobject
!= object
)
1473 vm_object_drop(tobject
);
1478 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1479 * and those pages must be OBJ_ONEMAPPING.
1481 if (advise
== MADV_FREE
) {
1482 if ((tobject
->type
!= OBJT_DEFAULT
&&
1483 tobject
->type
!= OBJT_SWAP
) ||
1484 (tobject
->flags
& OBJ_ONEMAPPING
) == 0) {
1489 m
= vm_page_lookup_busy_try(tobject
, tpindex
, TRUE
, &error
);
1492 vm_page_sleep_busy(m
, TRUE
, "madvpo");
1497 * There may be swap even if there is no backing page
1499 if (advise
== MADV_FREE
&& tobject
->type
== OBJT_SWAP
)
1500 swap_pager_freespace(tobject
, tpindex
, 1);
1505 while ((xobj
= tobject
->backing_object
) != NULL
) {
1506 KKASSERT(xobj
!= object
);
1507 vm_object_hold(xobj
);
1508 if (xobj
== tobject
->backing_object
)
1510 vm_object_drop(xobj
);
1514 tpindex
+= OFF_TO_IDX(tobject
->backing_object_offset
);
1515 if (tobject
!= object
) {
1516 vm_object_lock_swap();
1517 vm_object_drop(tobject
);
1524 * If the page is not in a normal active state, we skip it.
1525 * If the page is not managed there are no page queues to
1526 * mess with. Things can break if we mess with pages in
1527 * any of the below states.
1529 if (m
->wire_count
||
1530 (m
->flags
& (PG_UNMANAGED
| PG_NEED_COMMIT
)) ||
1531 m
->valid
!= VM_PAGE_BITS_ALL
1538 * Theoretically once a page is known not to be busy, an
1539 * interrupt cannot come along and rip it out from under us.
1542 if (advise
== MADV_WILLNEED
) {
1543 vm_page_activate(m
);
1544 } else if (advise
== MADV_DONTNEED
) {
1545 vm_page_dontneed(m
);
1546 } else if (advise
== MADV_FREE
) {
1548 * Mark the page clean. This will allow the page
1549 * to be freed up by the system. However, such pages
1550 * are often reused quickly by malloc()/free()
1551 * so we do not do anything that would cause
1552 * a page fault if we can help it.
1554 * Specifically, we do not try to actually free
1555 * the page now nor do we try to put it in the
1556 * cache (which would cause a page fault on reuse).
1558 * But we do make the page is freeable as we
1559 * can without actually taking the step of unmapping
1562 pmap_clear_modify(m
);
1565 vm_page_dontneed(m
);
1566 if (tobject
->type
== OBJT_SWAP
)
1567 swap_pager_freespace(tobject
, tpindex
, 1);
1571 if (tobject
!= object
)
1572 vm_object_drop(tobject
);
1573 vm_object_drop(object
);
1577 * Create a new object which is backed by the specified existing object
1578 * range. Replace the pointer and offset that was pointing at the existing
1579 * object with the pointer/offset for the new object.
1581 * No other requirements.
1584 vm_object_shadow(vm_object_t
*objectp
, vm_ooffset_t
*offset
, vm_size_t length
,
1593 * Don't create the new object if the old object isn't shared.
1594 * We have to chain wait before adding the reference to avoid
1595 * racing a collapse or deallocation.
1597 * Add the additional ref to source here to avoid racing a later
1598 * collapse or deallocation. Clear the ONEMAPPING flag whether
1599 * addref is TRUE or not in this case because the original object
1603 vm_object_hold(source
);
1604 vm_object_chain_wait(source
);
1605 if (source
->ref_count
== 1 &&
1606 source
->handle
== NULL
&&
1607 (source
->type
== OBJT_DEFAULT
||
1608 source
->type
== OBJT_SWAP
)) {
1609 vm_object_drop(source
);
1611 vm_object_reference_locked(source
);
1612 vm_object_clear_flag(source
, OBJ_ONEMAPPING
);
1616 vm_object_reference_locked(source
);
1617 vm_object_clear_flag(source
, OBJ_ONEMAPPING
);
1621 * Allocate a new object with the given length. The new object
1622 * is returned referenced but we may have to add another one.
1623 * If we are adding a second reference we must clear OBJ_ONEMAPPING.
1624 * (typically because the caller is about to clone a vm_map_entry).
1626 * The source object currently has an extra reference to prevent
1627 * collapses into it while we mess with its shadow list, which
1628 * we will remove later in this routine.
1630 if ((result
= vm_object_allocate(OBJT_DEFAULT
, length
)) == NULL
)
1631 panic("vm_object_shadow: no object for shadowing");
1632 vm_object_hold(result
);
1634 vm_object_reference_locked(result
);
1635 vm_object_clear_flag(result
, OBJ_ONEMAPPING
);
1639 * The new object shadows the source object. Chain wait before
1640 * adjusting shadow_count or the shadow list to avoid races.
1642 * Try to optimize the result object's page color when shadowing
1643 * in order to maintain page coloring consistency in the combined
1646 KKASSERT(result
->backing_object
== NULL
);
1647 result
->backing_object
= source
;
1649 vm_object_chain_wait(source
);
1650 LIST_INSERT_HEAD(&source
->shadow_head
, result
, shadow_list
);
1651 source
->shadow_count
++;
1652 source
->generation
++;
1653 /* cpu localization twist */
1654 result
->pg_color
= (int)(intptr_t)curthread
;
1658 * Adjust the return storage. Drop the ref on source before
1661 result
->backing_object_offset
= *offset
;
1662 vm_object_drop(result
);
1665 vm_object_deallocate_locked(source
);
1666 vm_object_drop(source
);
1670 * Return the new things
1675 #define OBSC_TEST_ALL_SHADOWED 0x0001
1676 #define OBSC_COLLAPSE_NOWAIT 0x0002
1677 #define OBSC_COLLAPSE_WAIT 0x0004
1679 static int vm_object_backing_scan_callback(vm_page_t p
, void *data
);
1682 * The caller must hold the object.
1685 vm_object_backing_scan(vm_object_t object
, vm_object_t backing_object
, int op
)
1687 struct rb_vm_page_scan_info info
;
1689 vm_object_assert_held(object
);
1690 vm_object_assert_held(backing_object
);
1692 KKASSERT(backing_object
== object
->backing_object
);
1693 info
.backing_offset_index
= OFF_TO_IDX(object
->backing_object_offset
);
1696 * Initial conditions
1698 if (op
& OBSC_TEST_ALL_SHADOWED
) {
1700 * We do not want to have to test for the existence of
1701 * swap pages in the backing object. XXX but with the
1702 * new swapper this would be pretty easy to do.
1704 * XXX what about anonymous MAP_SHARED memory that hasn't
1705 * been ZFOD faulted yet? If we do not test for this, the
1706 * shadow test may succeed! XXX
1708 if (backing_object
->type
!= OBJT_DEFAULT
)
1711 if (op
& OBSC_COLLAPSE_WAIT
) {
1712 KKASSERT((backing_object
->flags
& OBJ_DEAD
) == 0);
1713 vm_object_set_flag(backing_object
, OBJ_DEAD
);
1714 lwkt_gettoken(&vmobj_token
);
1715 TAILQ_REMOVE(&vm_object_list
, backing_object
, object_list
);
1717 lwkt_reltoken(&vmobj_token
);
1718 vm_object_dead_wakeup(backing_object
);
1722 * Our scan. We have to retry if a negative error code is returned,
1723 * otherwise 0 or 1 will be returned in info.error. 0 Indicates that
1724 * the scan had to be stopped because the parent does not completely
1727 info
.object
= object
;
1728 info
.backing_object
= backing_object
;
1732 vm_page_rb_tree_RB_SCAN(&backing_object
->rb_memq
, NULL
,
1733 vm_object_backing_scan_callback
,
1735 } while (info
.error
< 0);
1741 * The caller must hold the object.
1744 vm_object_backing_scan_callback(vm_page_t p
, void *data
)
1746 struct rb_vm_page_scan_info
*info
= data
;
1747 vm_object_t backing_object
;
1750 vm_pindex_t new_pindex
;
1751 vm_pindex_t backing_offset_index
;
1755 new_pindex
= pindex
- info
->backing_offset_index
;
1757 object
= info
->object
;
1758 backing_object
= info
->backing_object
;
1759 backing_offset_index
= info
->backing_offset_index
;
1761 if (op
& OBSC_TEST_ALL_SHADOWED
) {
1765 * Ignore pages outside the parent object's range
1766 * and outside the parent object's mapping of the
1769 * note that we do not busy the backing object's
1772 if (pindex
< backing_offset_index
||
1773 new_pindex
>= object
->size
1779 * See if the parent has the page or if the parent's
1780 * object pager has the page. If the parent has the
1781 * page but the page is not valid, the parent's
1782 * object pager must have the page.
1784 * If this fails, the parent does not completely shadow
1785 * the object and we might as well give up now.
1787 pp
= vm_page_lookup(object
, new_pindex
);
1788 if ((pp
== NULL
|| pp
->valid
== 0) &&
1789 !vm_pager_has_page(object
, new_pindex
)
1791 info
->error
= 0; /* problemo */
1792 return(-1); /* stop the scan */
1797 * Check for busy page. Note that we may have lost (p) when we
1798 * possibly blocked above.
1800 if (op
& (OBSC_COLLAPSE_WAIT
| OBSC_COLLAPSE_NOWAIT
)) {
1803 if (vm_page_busy_try(p
, TRUE
)) {
1804 if (op
& OBSC_COLLAPSE_NOWAIT
) {
1808 * If we slept, anything could have
1809 * happened. Ask that the scan be restarted.
1811 * Since the object is marked dead, the
1812 * backing offset should not have changed.
1814 vm_page_sleep_busy(p
, TRUE
, "vmocol");
1821 * If (p) is no longer valid restart the scan.
1823 if (p
->object
!= backing_object
|| p
->pindex
!= pindex
) {
1824 kprintf("vm_object_backing_scan: Warning: page "
1825 "%p ripped out from under us\n", p
);
1831 if (op
& OBSC_COLLAPSE_NOWAIT
) {
1832 if (p
->valid
== 0 ||
1834 (p
->flags
& PG_NEED_COMMIT
)) {
1839 /* XXX what if p->valid == 0 , hold_count, etc? */
1843 p
->object
== backing_object
,
1844 ("vm_object_qcollapse(): object mismatch")
1848 * Destroy any associated swap
1850 if (backing_object
->type
== OBJT_SWAP
)
1851 swap_pager_freespace(backing_object
, p
->pindex
, 1);
1854 p
->pindex
< backing_offset_index
||
1855 new_pindex
>= object
->size
1858 * Page is out of the parent object's range, we
1859 * can simply destroy it.
1861 vm_page_protect(p
, VM_PROT_NONE
);
1866 pp
= vm_page_lookup(object
, new_pindex
);
1867 if (pp
!= NULL
|| vm_pager_has_page(object
, new_pindex
)) {
1869 * page already exists in parent OR swap exists
1870 * for this location in the parent. Destroy
1871 * the original page from the backing object.
1873 * Leave the parent's page alone
1875 vm_page_protect(p
, VM_PROT_NONE
);
1881 * Page does not exist in parent, rename the
1882 * page from the backing object to the main object.
1884 * If the page was mapped to a process, it can remain
1885 * mapped through the rename.
1887 if ((p
->queue
- p
->pc
) == PQ_CACHE
)
1888 vm_page_deactivate(p
);
1890 vm_page_rename(p
, object
, new_pindex
);
1892 /* page automatically made dirty by rename */
1898 * This version of collapse allows the operation to occur earlier and
1899 * when paging_in_progress is true for an object... This is not a complete
1900 * operation, but should plug 99.9% of the rest of the leaks.
1902 * The caller must hold the object and backing_object and both must be
1905 * (only called from vm_object_collapse)
1908 vm_object_qcollapse(vm_object_t object
, vm_object_t backing_object
)
1910 if (backing_object
->ref_count
== 1) {
1911 backing_object
->ref_count
+= 2;
1912 vm_object_backing_scan(object
, backing_object
,
1913 OBSC_COLLAPSE_NOWAIT
);
1914 backing_object
->ref_count
-= 2;
1919 * Collapse an object with the object backing it. Pages in the backing
1920 * object are moved into the parent, and the backing object is deallocated.
1921 * Any conflict is resolved in favor of the parent's existing pages.
1923 * object must be held and chain-locked on call.
1925 * The caller must have an extra ref on object to prevent a race from
1926 * destroying it during the collapse.
1929 vm_object_collapse(vm_object_t object
, struct vm_object_dealloc_list
**dlistp
)
1931 struct vm_object_dealloc_list
*dlist
= NULL
;
1932 vm_object_t backing_object
;
1935 * Only one thread is attempting a collapse at any given moment.
1936 * There are few restrictions for (object) that callers of this
1937 * function check so reentrancy is likely.
1939 KKASSERT(object
!= NULL
);
1940 vm_object_assert_held(object
);
1941 KKASSERT(object
->flags
& OBJ_CHAINLOCK
);
1948 * We have to hold the backing object, check races.
1950 while ((backing_object
= object
->backing_object
) != NULL
) {
1951 vm_object_hold(backing_object
);
1952 if (backing_object
== object
->backing_object
)
1954 vm_object_drop(backing_object
);
1958 * No backing object? Nothing to collapse then.
1960 if (backing_object
== NULL
)
1964 * You can't collapse with a non-default/non-swap object.
1966 if (backing_object
->type
!= OBJT_DEFAULT
&&
1967 backing_object
->type
!= OBJT_SWAP
) {
1968 vm_object_drop(backing_object
);
1969 backing_object
= NULL
;
1974 * Chain-lock the backing object too because if we
1975 * successfully merge its pages into the top object we
1976 * will collapse backing_object->backing_object as the
1977 * new backing_object. Re-check that it is still our
1980 vm_object_chain_acquire(backing_object
);
1981 if (backing_object
!= object
->backing_object
) {
1982 vm_object_chain_release(backing_object
);
1983 vm_object_drop(backing_object
);
1988 * we check the backing object first, because it is most likely
1991 if (backing_object
->handle
!= NULL
||
1992 (backing_object
->type
!= OBJT_DEFAULT
&&
1993 backing_object
->type
!= OBJT_SWAP
) ||
1994 (backing_object
->flags
& OBJ_DEAD
) ||
1995 object
->handle
!= NULL
||
1996 (object
->type
!= OBJT_DEFAULT
&&
1997 object
->type
!= OBJT_SWAP
) ||
1998 (object
->flags
& OBJ_DEAD
)) {
2003 * If paging is in progress we can't do a normal collapse.
2006 object
->paging_in_progress
!= 0 ||
2007 backing_object
->paging_in_progress
!= 0
2009 vm_object_qcollapse(object
, backing_object
);
2014 * We know that we can either collapse the backing object (if
2015 * the parent is the only reference to it) or (perhaps) have
2016 * the parent bypass the object if the parent happens to shadow
2017 * all the resident pages in the entire backing object.
2019 * This is ignoring pager-backed pages such as swap pages.
2020 * vm_object_backing_scan fails the shadowing test in this
2023 if (backing_object
->ref_count
== 1) {
2025 * If there is exactly one reference to the backing
2026 * object, we can collapse it into the parent.
2028 KKASSERT(object
->backing_object
== backing_object
);
2029 vm_object_backing_scan(object
, backing_object
,
2030 OBSC_COLLAPSE_WAIT
);
2033 * Move the pager from backing_object to object.
2035 if (backing_object
->type
== OBJT_SWAP
) {
2036 vm_object_pip_add(backing_object
, 1);
2039 * scrap the paging_offset junk and do a
2040 * discrete copy. This also removes major
2041 * assumptions about how the swap-pager
2042 * works from where it doesn't belong. The
2043 * new swapper is able to optimize the
2044 * destroy-source case.
2046 vm_object_pip_add(object
, 1);
2047 swap_pager_copy(backing_object
, object
,
2048 OFF_TO_IDX(object
->backing_object_offset
),
2050 vm_object_pip_wakeup(object
);
2051 vm_object_pip_wakeup(backing_object
);
2055 * Object now shadows whatever backing_object did.
2056 * Remove object from backing_object's shadow_list.
2058 LIST_REMOVE(object
, shadow_list
);
2059 KKASSERT(object
->backing_object
== backing_object
);
2060 backing_object
->shadow_count
--;
2061 backing_object
->generation
++;
2064 * backing_object->backing_object moves from within
2065 * backing_object to within object.
2067 while ((bbobj
= backing_object
->backing_object
) != NULL
) {
2068 vm_object_hold(bbobj
);
2069 if (bbobj
== backing_object
->backing_object
)
2071 vm_object_drop(bbobj
);
2074 LIST_REMOVE(backing_object
, shadow_list
);
2075 bbobj
->shadow_count
--;
2076 bbobj
->generation
++;
2077 backing_object
->backing_object
= NULL
;
2079 object
->backing_object
= bbobj
;
2081 LIST_INSERT_HEAD(&bbobj
->shadow_head
,
2082 object
, shadow_list
);
2083 bbobj
->shadow_count
++;
2084 bbobj
->generation
++;
2087 object
->backing_object_offset
+=
2088 backing_object
->backing_object_offset
;
2090 vm_object_drop(bbobj
);
2093 * Discard the old backing_object. Nothing should be
2094 * able to ref it, other than a vm_map_split(),
2095 * and vm_map_split() will stall on our chain lock.
2096 * And we control the parent so it shouldn't be
2097 * possible for it to go away either.
2099 * Since the backing object has no pages, no pager
2100 * left, and no object references within it, all
2101 * that is necessary is to dispose of it.
2103 KASSERT(backing_object
->ref_count
== 1,
2104 ("backing_object %p was somehow "
2105 "re-referenced during collapse!",
2107 KASSERT(RB_EMPTY(&backing_object
->rb_memq
),
2108 ("backing_object %p somehow has left "
2109 "over pages during collapse!",
2113 * The object can be destroyed.
2115 * XXX just fall through and dodealloc instead
2116 * of forcing destruction?
2118 --backing_object
->ref_count
;
2119 if ((backing_object
->flags
& OBJ_DEAD
) == 0)
2120 vm_object_terminate(backing_object
);
2125 * If we do not entirely shadow the backing object,
2126 * there is nothing we can do so we give up.
2128 if (vm_object_backing_scan(object
, backing_object
,
2129 OBSC_TEST_ALL_SHADOWED
) == 0) {
2134 * bbobj is backing_object->backing_object. Since
2135 * object completely shadows backing_object we can
2136 * bypass it and become backed by bbobj instead.
2138 while ((bbobj
= backing_object
->backing_object
) != NULL
) {
2139 vm_object_hold(bbobj
);
2140 if (bbobj
== backing_object
->backing_object
)
2142 vm_object_drop(bbobj
);
2146 * Make object shadow bbobj instead of backing_object.
2147 * Remove object from backing_object's shadow list.
2149 * Deallocating backing_object will not remove
2150 * it, since its reference count is at least 2.
2152 KKASSERT(object
->backing_object
== backing_object
);
2153 LIST_REMOVE(object
, shadow_list
);
2154 backing_object
->shadow_count
--;
2155 backing_object
->generation
++;
2158 * Add a ref to bbobj, bbobj now shadows object.
2160 * NOTE: backing_object->backing_object still points
2161 * to bbobj. That relationship remains intact
2162 * because backing_object has > 1 ref, so
2163 * someone else is pointing to it (hence why
2164 * we can't collapse it into object and can
2165 * only handle the all-shadowed bypass case).
2168 vm_object_chain_wait(bbobj
);
2169 vm_object_reference_locked(bbobj
);
2170 LIST_INSERT_HEAD(&bbobj
->shadow_head
,
2171 object
, shadow_list
);
2172 bbobj
->shadow_count
++;
2173 bbobj
->generation
++;
2174 object
->backing_object_offset
+=
2175 backing_object
->backing_object_offset
;
2176 object
->backing_object
= bbobj
;
2177 vm_object_drop(bbobj
);
2179 object
->backing_object
= NULL
;
2183 * Drop the reference count on backing_object. To
2184 * handle ref_count races properly we can't assume
2185 * that the ref_count is still at least 2 so we
2186 * have to actually call vm_object_deallocate()
2187 * (after clearing the chainlock).
2194 * Ok, we want to loop on the new object->bbobj association,
2195 * possibly collapsing it further. However if dodealloc is
2196 * non-zero we have to deallocate the backing_object which
2197 * itself can potentially undergo a collapse, creating a
2198 * recursion depth issue with the LWKT token subsystem.
2200 * In the case where we must deallocate the backing_object
2201 * it is possible now that the backing_object has a single
2202 * shadow count on some other object (not represented here
2203 * as yet), since it no longer shadows us. Thus when we
2204 * call vm_object_deallocate() it may attempt to collapse
2205 * itself into its remaining parent.
2208 struct vm_object_dealloc_list
*dtmp
;
2210 vm_object_chain_release(backing_object
);
2211 vm_object_unlock(backing_object
);
2212 /* backing_object remains held */
2215 * Auto-deallocation list for caller convenience.
2220 dtmp
= kmalloc(sizeof(*dtmp
), M_TEMP
, M_WAITOK
);
2221 dtmp
->object
= backing_object
;
2222 dtmp
->next
= *dlistp
;
2225 vm_object_chain_release(backing_object
);
2226 vm_object_drop(backing_object
);
2228 /* backing_object = NULL; not needed */
2233 * Clean up any left over backing_object
2235 if (backing_object
) {
2236 vm_object_chain_release(backing_object
);
2237 vm_object_drop(backing_object
);
2241 * Clean up any auto-deallocation list. This is a convenience
2242 * for top-level callers so they don't have to pass &dlist.
2243 * Do not clean up any caller-passed dlistp, the caller will
2247 vm_object_deallocate_list(&dlist
);
2252 * vm_object_collapse() may collect additional objects in need of
2253 * deallocation. This routine deallocates these objects. The
2254 * deallocation itself can trigger additional collapses (which the
2255 * deallocate function takes care of). This procedure is used to
2256 * reduce procedural recursion since these vm_object shadow chains
2257 * can become quite long.
2260 vm_object_deallocate_list(struct vm_object_dealloc_list
**dlistp
)
2262 struct vm_object_dealloc_list
*dlist
;
2264 while ((dlist
= *dlistp
) != NULL
) {
2265 *dlistp
= dlist
->next
;
2266 vm_object_lock(dlist
->object
);
2267 vm_object_deallocate_locked(dlist
->object
);
2268 vm_object_drop(dlist
->object
);
2269 kfree(dlist
, M_TEMP
);
2274 * Removes all physical pages in the specified object range from the
2275 * object's list of pages.
2279 static int vm_object_page_remove_callback(vm_page_t p
, void *data
);
2282 vm_object_page_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
2283 boolean_t clean_only
)
2285 struct rb_vm_page_scan_info info
;
2289 * Degenerate cases and assertions
2291 vm_object_hold(object
);
2292 if (object
== NULL
||
2293 (object
->resident_page_count
== 0 && object
->swblock_count
== 0)) {
2294 vm_object_drop(object
);
2297 KASSERT(object
->type
!= OBJT_PHYS
,
2298 ("attempt to remove pages from a physical object"));
2301 * Indicate that paging is occuring on the object
2303 vm_object_pip_add(object
, 1);
2306 * Figure out the actual removal range and whether we are removing
2307 * the entire contents of the object or not. If removing the entire
2308 * contents, be sure to get all pages, even those that might be
2309 * beyond the end of the object.
2311 info
.start_pindex
= start
;
2313 info
.end_pindex
= (vm_pindex_t
)-1;
2315 info
.end_pindex
= end
- 1;
2316 info
.limit
= clean_only
;
2317 all
= (start
== 0 && info
.end_pindex
>= object
->size
- 1);
2320 * Loop until we are sure we have gotten them all.
2324 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
2325 vm_object_page_remove_callback
, &info
);
2326 } while (info
.error
);
2329 * Remove any related swap if throwing away pages, or for
2330 * non-swap objects (the swap is a clean copy in that case).
2332 if (object
->type
!= OBJT_SWAP
|| clean_only
== FALSE
) {
2334 swap_pager_freespace_all(object
);
2336 swap_pager_freespace(object
, info
.start_pindex
,
2337 info
.end_pindex
- info
.start_pindex
+ 1);
2343 vm_object_pip_wakeup(object
);
2344 vm_object_drop(object
);
2348 * The caller must hold the object
2351 vm_object_page_remove_callback(vm_page_t p
, void *data
)
2353 struct rb_vm_page_scan_info
*info
= data
;
2355 if (vm_page_busy_try(p
, TRUE
)) {
2356 vm_page_sleep_busy(p
, TRUE
, "vmopar");
2362 * Wired pages cannot be destroyed, but they can be invalidated
2363 * and we do so if clean_only (limit) is not set.
2365 * WARNING! The page may be wired due to being part of a buffer
2366 * cache buffer, and the buffer might be marked B_CACHE.
2367 * This is fine as part of a truncation but VFSs must be
2368 * sure to fix the buffer up when re-extending the file.
2370 * NOTE! PG_NEED_COMMIT is ignored.
2372 if (p
->wire_count
!= 0) {
2373 vm_page_protect(p
, VM_PROT_NONE
);
2374 if (info
->limit
== 0)
2381 * limit is our clean_only flag. If set and the page is dirty or
2382 * requires a commit, do not free it. If set and the page is being
2383 * held by someone, do not free it.
2385 if (info
->limit
&& p
->valid
) {
2386 vm_page_test_dirty(p
);
2387 if ((p
->valid
& p
->dirty
) || (p
->flags
& PG_NEED_COMMIT
)) {
2392 if (p
->hold_count
) {
2402 vm_page_protect(p
, VM_PROT_NONE
);
2408 * Coalesces two objects backing up adjoining regions of memory into a
2411 * returns TRUE if objects were combined.
2413 * NOTE: Only works at the moment if the second object is NULL -
2414 * if it's not, which object do we lock first?
2417 * prev_object First object to coalesce
2418 * prev_offset Offset into prev_object
2419 * next_object Second object into coalesce
2420 * next_offset Offset into next_object
2422 * prev_size Size of reference to prev_object
2423 * next_size Size of reference to next_object
2425 * The caller does not need to hold (prev_object) but must have a stable
2426 * pointer to it (typically by holding the vm_map locked).
2429 vm_object_coalesce(vm_object_t prev_object
, vm_pindex_t prev_pindex
,
2430 vm_size_t prev_size
, vm_size_t next_size
)
2432 vm_pindex_t next_pindex
;
2434 if (prev_object
== NULL
)
2437 vm_object_hold(prev_object
);
2439 if (prev_object
->type
!= OBJT_DEFAULT
&&
2440 prev_object
->type
!= OBJT_SWAP
) {
2441 vm_object_drop(prev_object
);
2446 * Try to collapse the object first
2448 vm_object_chain_acquire(prev_object
);
2449 vm_object_collapse(prev_object
, NULL
);
2452 * Can't coalesce if: . more than one reference . paged out . shadows
2453 * another object . has a copy elsewhere (any of which mean that the
2454 * pages not mapped to prev_entry may be in use anyway)
2457 if (prev_object
->backing_object
!= NULL
) {
2458 vm_object_chain_release(prev_object
);
2459 vm_object_drop(prev_object
);
2463 prev_size
>>= PAGE_SHIFT
;
2464 next_size
>>= PAGE_SHIFT
;
2465 next_pindex
= prev_pindex
+ prev_size
;
2467 if ((prev_object
->ref_count
> 1) &&
2468 (prev_object
->size
!= next_pindex
)) {
2469 vm_object_chain_release(prev_object
);
2470 vm_object_drop(prev_object
);
2475 * Remove any pages that may still be in the object from a previous
2478 if (next_pindex
< prev_object
->size
) {
2479 vm_object_page_remove(prev_object
,
2481 next_pindex
+ next_size
, FALSE
);
2482 if (prev_object
->type
== OBJT_SWAP
)
2483 swap_pager_freespace(prev_object
,
2484 next_pindex
, next_size
);
2488 * Extend the object if necessary.
2490 if (next_pindex
+ next_size
> prev_object
->size
)
2491 prev_object
->size
= next_pindex
+ next_size
;
2493 vm_object_chain_release(prev_object
);
2494 vm_object_drop(prev_object
);
2499 * Make the object writable and flag is being possibly dirty.
2501 * The caller must hold the object. XXX called from vm_page_dirty(),
2502 * There is currently no requirement to hold the object.
2505 vm_object_set_writeable_dirty(vm_object_t object
)
2509 /*vm_object_assert_held(object);*/
2511 * Avoid contention in vm fault path by checking the state before
2512 * issuing an atomic op on it.
2514 if ((object
->flags
& (OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
)) !=
2515 (OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
)) {
2516 vm_object_set_flag(object
, OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
);
2518 if (object
->type
== OBJT_VNODE
&&
2519 (vp
= (struct vnode
*)object
->handle
) != NULL
) {
2520 if ((vp
->v_flag
& VOBJDIRTY
) == 0) {
2521 vsetflags(vp
, VOBJDIRTY
);
2526 #include "opt_ddb.h"
2528 #include <sys/kernel.h>
2530 #include <sys/cons.h>
2532 #include <ddb/ddb.h>
2534 static int _vm_object_in_map (vm_map_t map
, vm_object_t object
,
2535 vm_map_entry_t entry
);
2536 static int vm_object_in_map (vm_object_t object
);
2539 * The caller must hold the object.
2542 _vm_object_in_map(vm_map_t map
, vm_object_t object
, vm_map_entry_t entry
)
2545 vm_map_entry_t tmpe
;
2546 vm_object_t obj
, nobj
;
2552 tmpe
= map
->header
.next
;
2553 entcount
= map
->nentries
;
2554 while (entcount
-- && (tmpe
!= &map
->header
)) {
2555 if( _vm_object_in_map(map
, object
, tmpe
)) {
2562 switch(entry
->maptype
) {
2563 case VM_MAPTYPE_SUBMAP
:
2564 tmpm
= entry
->object
.sub_map
;
2565 tmpe
= tmpm
->header
.next
;
2566 entcount
= tmpm
->nentries
;
2567 while (entcount
-- && tmpe
!= &tmpm
->header
) {
2568 if( _vm_object_in_map(tmpm
, object
, tmpe
)) {
2574 case VM_MAPTYPE_NORMAL
:
2575 case VM_MAPTYPE_VPAGETABLE
:
2576 obj
= entry
->object
.vm_object
;
2578 if (obj
== object
) {
2579 if (obj
!= entry
->object
.vm_object
)
2580 vm_object_drop(obj
);
2583 while ((nobj
= obj
->backing_object
) != NULL
) {
2584 vm_object_hold(nobj
);
2585 if (nobj
== obj
->backing_object
)
2587 vm_object_drop(nobj
);
2589 if (obj
!= entry
->object
.vm_object
) {
2591 vm_object_lock_swap();
2592 vm_object_drop(obj
);
2603 static int vm_object_in_map_callback(struct proc
*p
, void *data
);
2605 struct vm_object_in_map_info
{
2614 vm_object_in_map(vm_object_t object
)
2616 struct vm_object_in_map_info info
;
2619 info
.object
= object
;
2621 allproc_scan(vm_object_in_map_callback
, &info
);
2624 if( _vm_object_in_map(&kernel_map
, object
, 0))
2626 if( _vm_object_in_map(&pager_map
, object
, 0))
2628 if( _vm_object_in_map(&buffer_map
, object
, 0))
2637 vm_object_in_map_callback(struct proc
*p
, void *data
)
2639 struct vm_object_in_map_info
*info
= data
;
2642 if (_vm_object_in_map(&p
->p_vmspace
->vm_map
, info
->object
, 0)) {
2650 DB_SHOW_COMMAND(vmochk
, vm_object_check
)
2655 * make sure that internal objs are in a map somewhere
2656 * and none have zero ref counts.
2658 for (object
= TAILQ_FIRST(&vm_object_list
);
2660 object
= TAILQ_NEXT(object
, object_list
)) {
2661 if (object
->type
== OBJT_MARKER
)
2663 if (object
->handle
== NULL
&&
2664 (object
->type
== OBJT_DEFAULT
|| object
->type
== OBJT_SWAP
)) {
2665 if (object
->ref_count
== 0) {
2666 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2667 (long)object
->size
);
2669 if (!vm_object_in_map(object
)) {
2671 "vmochk: internal obj is not in a map: "
2672 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2673 object
->ref_count
, (u_long
)object
->size
,
2674 (u_long
)object
->size
,
2675 (void *)object
->backing_object
);
2684 DB_SHOW_COMMAND(object
, vm_object_print_static
)
2686 /* XXX convert args. */
2687 vm_object_t object
= (vm_object_t
)addr
;
2688 boolean_t full
= have_addr
;
2692 /* XXX count is an (unused) arg. Avoid shadowing it. */
2693 #define count was_count
2701 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
2702 object
, (int)object
->type
, (u_long
)object
->size
,
2703 object
->resident_page_count
, object
->ref_count
, object
->flags
);
2705 * XXX no %qd in kernel. Truncate object->backing_object_offset.
2707 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
2708 object
->shadow_count
,
2709 object
->backing_object
? object
->backing_object
->ref_count
: 0,
2710 object
->backing_object
, (long)object
->backing_object_offset
);
2717 RB_FOREACH(p
, vm_page_rb_tree
, &object
->rb_memq
) {
2719 db_iprintf("memory:=");
2720 else if (count
== 6) {
2728 db_printf("(off=0x%lx,page=0x%lx)",
2729 (u_long
) p
->pindex
, (u_long
) VM_PAGE_TO_PHYS(p
));
2740 * XXX need this non-static entry for calling from vm_map_print.
2745 vm_object_print(/* db_expr_t */ long addr
,
2746 boolean_t have_addr
,
2747 /* db_expr_t */ long count
,
2750 vm_object_print_static(addr
, have_addr
, count
, modif
);
2756 DB_SHOW_COMMAND(vmopag
, vm_object_print_pages
)
2761 for (object
= TAILQ_FIRST(&vm_object_list
);
2763 object
= TAILQ_NEXT(object
, object_list
)) {
2764 vm_pindex_t idx
, fidx
;
2766 vm_paddr_t pa
= -1, padiff
;
2770 if (object
->type
== OBJT_MARKER
)
2772 db_printf("new object: %p\n", (void *)object
);
2782 osize
= object
->size
;
2785 for (idx
= 0; idx
< osize
; idx
++) {
2786 m
= vm_page_lookup(object
, idx
);
2789 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2790 (long)fidx
, rcount
, (long)pa
);
2805 (VM_PAGE_TO_PHYS(m
) == pa
+ rcount
* PAGE_SIZE
)) {
2810 padiff
= pa
+ rcount
* PAGE_SIZE
- VM_PAGE_TO_PHYS(m
);
2811 padiff
>>= PAGE_SHIFT
;
2812 padiff
&= PQ_L2_MASK
;
2814 pa
= VM_PAGE_TO_PHYS(m
) - rcount
* PAGE_SIZE
;
2818 db_printf(" index(%ld)run(%d)pa(0x%lx)",
2819 (long)fidx
, rcount
, (long)pa
);
2820 db_printf("pd(%ld)\n", (long)padiff
);
2830 pa
= VM_PAGE_TO_PHYS(m
);
2834 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2835 (long)fidx
, rcount
, (long)pa
);