2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
65 * $DragonFly: src/sys/vm/vm_object.c,v 1.33 2008/05/09 07:24:48 dillon Exp $
69 * Virtual memory object module.
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h> /* for curproc, pageproc */
75 #include <sys/vnode.h>
76 #include <sys/vmmeter.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
83 #include <vm/vm_param.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_zone.h>
95 #define EASY_SCAN_FACTOR 8
97 static void vm_object_qcollapse(vm_object_t object
);
98 static int vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
,
102 * Virtual memory objects maintain the actual data
103 * associated with allocated virtual memory. A given
104 * page of memory exists within exactly one object.
106 * An object is only deallocated when all "references"
107 * are given up. Only one "reference" to a given
108 * region of an object should be writeable.
110 * Associated with each object is a list of all resident
111 * memory pages belonging to that object; this list is
112 * maintained by the "vm_page" module, and locked by the object's
115 * Each object also records a "pager" routine which is
116 * used to retrieve (and store) pages to the proper backing
117 * storage. In addition, objects may be backed by other
118 * objects from which they were virtual-copied.
120 * The only items within the object structure which are
121 * modified after time of creation are:
122 * reference count locked by object's lock
123 * pager routine locked by object's lock
127 struct object_q vm_object_list
;
128 struct vm_object kernel_object
;
130 static long vm_object_count
; /* count of all objects */
131 extern int vm_pageout_page_count
;
133 static long object_collapses
;
134 static long object_bypasses
;
135 static int next_index
;
136 static vm_zone_t obj_zone
;
137 static struct vm_zone obj_zone_store
;
138 static int object_hash_rand
;
139 #define VM_OBJECTS_INIT 256
140 static struct vm_object vm_objects_init
[VM_OBJECTS_INIT
];
143 _vm_object_allocate(objtype_t type
, vm_pindex_t size
, vm_object_t object
)
146 RB_INIT(&object
->rb_memq
);
147 LIST_INIT(&object
->shadow_head
);
151 object
->ref_count
= 1;
153 if ((object
->type
== OBJT_DEFAULT
) || (object
->type
== OBJT_SWAP
))
154 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
155 object
->paging_in_progress
= 0;
156 object
->resident_page_count
= 0;
157 object
->shadow_count
= 0;
158 object
->pg_color
= next_index
;
159 if ( size
> (PQ_L2_SIZE
/ 3 + PQ_PRIME1
))
160 incr
= PQ_L2_SIZE
/ 3 + PQ_PRIME1
;
163 next_index
= (next_index
+ incr
) & PQ_L2_MASK
;
164 object
->handle
= NULL
;
165 object
->backing_object
= NULL
;
166 object
->backing_object_offset
= (vm_ooffset_t
) 0;
168 * Try to generate a number that will spread objects out in the
169 * hash table. We 'wipe' new objects across the hash in 128 page
170 * increments plus 1 more to offset it a little more by the time
173 object
->hash_rand
= object_hash_rand
- 129;
175 object
->generation
++;
176 object
->swblock_count
= 0;
177 RB_INIT(&object
->swblock_root
);
180 TAILQ_INSERT_TAIL(&vm_object_list
, object
, object_list
);
182 object_hash_rand
= object
->hash_rand
;
189 * Initialize the VM objects module.
194 TAILQ_INIT(&vm_object_list
);
196 _vm_object_allocate(OBJT_DEFAULT
, OFF_TO_IDX(KvaEnd
),
199 obj_zone
= &obj_zone_store
;
200 zbootinit(obj_zone
, "VM OBJECT", sizeof (struct vm_object
),
201 vm_objects_init
, VM_OBJECTS_INIT
);
205 vm_object_init2(void)
207 zinitna(obj_zone
, NULL
, NULL
, 0, 0, ZONE_PANICFAIL
, 1);
211 * vm_object_allocate:
213 * Returns a new object with the given size.
217 vm_object_allocate(objtype_t type
, vm_pindex_t size
)
221 result
= (vm_object_t
) zalloc(obj_zone
);
223 _vm_object_allocate(type
, size
, result
);
230 * vm_object_reference:
232 * Gets another reference to the given object.
235 vm_object_reference(vm_object_t object
)
241 if (object
->type
== OBJT_VNODE
) {
242 vref(object
->handle
);
243 /* XXX what if the vnode is being destroyed? */
248 vm_object_vndeallocate(vm_object_t object
)
250 struct vnode
*vp
= (struct vnode
*) object
->handle
;
252 KASSERT(object
->type
== OBJT_VNODE
,
253 ("vm_object_vndeallocate: not a vnode object"));
254 KASSERT(vp
!= NULL
, ("vm_object_vndeallocate: missing vp"));
256 if (object
->ref_count
== 0) {
257 vprint("vm_object_vndeallocate", vp
);
258 panic("vm_object_vndeallocate: bad object reference count");
263 if (object
->ref_count
== 0)
264 vclrflags(vp
, VTEXT
);
269 * vm_object_deallocate:
271 * Release a reference to the specified object,
272 * gained either through a vm_object_allocate
273 * or a vm_object_reference call. When all references
274 * are gone, storage associated with this object
275 * may be relinquished.
277 * No object may be locked.
280 vm_object_deallocate(vm_object_t object
)
284 while (object
!= NULL
) {
285 if (object
->type
== OBJT_VNODE
) {
286 vm_object_vndeallocate(object
);
290 if (object
->ref_count
== 0) {
291 panic("vm_object_deallocate: object deallocated too many times: %d", object
->type
);
292 } else if (object
->ref_count
> 2) {
298 * Here on ref_count of one or two, which are special cases for
301 if ((object
->ref_count
== 2) && (object
->shadow_count
== 0)) {
302 vm_object_set_flag(object
, OBJ_ONEMAPPING
);
305 } else if ((object
->ref_count
== 2) && (object
->shadow_count
== 1)) {
307 if ((object
->handle
== NULL
) &&
308 (object
->type
== OBJT_DEFAULT
||
309 object
->type
== OBJT_SWAP
)) {
312 robject
= LIST_FIRST(&object
->shadow_head
);
313 KASSERT(robject
!= NULL
,
314 ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
316 object
->shadow_count
));
317 if ((robject
->handle
== NULL
) &&
318 (robject
->type
== OBJT_DEFAULT
||
319 robject
->type
== OBJT_SWAP
)) {
321 robject
->ref_count
++;
324 robject
->paging_in_progress
||
325 object
->paging_in_progress
327 vm_object_pip_sleep(robject
, "objde1");
328 vm_object_pip_sleep(object
, "objde2");
331 if (robject
->ref_count
== 1) {
332 robject
->ref_count
--;
338 vm_object_collapse(object
);
347 if (object
->ref_count
!= 0)
353 temp
= object
->backing_object
;
355 LIST_REMOVE(object
, shadow_list
);
356 temp
->shadow_count
--;
358 object
->backing_object
= NULL
;
362 * Don't double-terminate, we could be in a termination
363 * recursion due to the terminate having to sync data
366 if ((object
->flags
& OBJ_DEAD
) == 0)
367 vm_object_terminate(object
);
373 * vm_object_terminate actually destroys the specified object, freeing
374 * up all previously used resources.
376 * The object must be locked.
377 * This routine may block.
379 static int vm_object_terminate_callback(vm_page_t p
, void *data
);
382 vm_object_terminate(vm_object_t object
)
385 * Make sure no one uses us.
387 vm_object_set_flag(object
, OBJ_DEAD
);
390 * wait for the pageout daemon to be done with the object
392 vm_object_pip_wait(object
, "objtrm");
394 KASSERT(!object
->paging_in_progress
,
395 ("vm_object_terminate: pageout in progress"));
398 * Clean and free the pages, as appropriate. All references to the
399 * object are gone, so we don't need to lock it.
401 if (object
->type
== OBJT_VNODE
) {
405 * Clean pages and flush buffers.
407 vm_object_page_clean(object
, 0, 0, OBJPC_SYNC
);
409 vp
= (struct vnode
*) object
->handle
;
410 vinvalbuf(vp
, V_SAVE
, 0, 0);
414 * Wait for any I/O to complete, after which there had better not
415 * be any references left on the object.
417 vm_object_pip_wait(object
, "objtrm");
419 if (object
->ref_count
!= 0)
420 panic("vm_object_terminate: object with references, ref_count=%d", object
->ref_count
);
423 * Now free any remaining pages. For internal objects, this also
424 * removes them from paging queues. Don't free wired pages, just
425 * remove them from the object.
428 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
429 vm_object_terminate_callback
, NULL
);
433 * Let the pager know object is dead.
435 vm_pager_deallocate(object
);
438 * Remove the object from the global object list.
441 TAILQ_REMOVE(&vm_object_list
, object
, object_list
);
445 vm_object_dead_wakeup(object
);
446 if (object
->ref_count
!= 0)
447 panic("vm_object_terminate2: object with references, ref_count=%d", object
->ref_count
);
450 * Free the space for the object.
452 zfree(obj_zone
, object
);
456 vm_object_terminate_callback(vm_page_t p
, void *data __unused
)
458 if (p
->busy
|| (p
->flags
& PG_BUSY
))
459 panic("vm_object_terminate: freeing busy page %p", p
);
460 if (p
->wire_count
== 0) {
463 mycpu
->gd_cnt
.v_pfree
++;
465 if (p
->queue
!= PQ_NONE
)
466 kprintf("vm_object_terminate: Warning: Encountered wired page %p on queue %d\n", p
, p
->queue
);
475 * The object is dead but still has an object<->pager association. Sleep
476 * and return. The caller typically retests the association in a loop.
479 vm_object_dead_sleep(vm_object_t object
, const char *wmesg
)
482 if (object
->handle
) {
483 vm_object_set_flag(object
, OBJ_DEADWNT
);
484 tsleep(object
, 0, wmesg
, 0);
490 * Wakeup anyone waiting for the object<->pager disassociation on
494 vm_object_dead_wakeup(vm_object_t object
)
496 if (object
->flags
& OBJ_DEADWNT
) {
497 vm_object_clear_flag(object
, OBJ_DEADWNT
);
503 * vm_object_page_clean
505 * Clean all dirty pages in the specified range of object. Leaves page
506 * on whatever queue it is currently on. If NOSYNC is set then do not
507 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
508 * leaving the object dirty.
510 * When stuffing pages asynchronously, allow clustering. XXX we need a
511 * synchronous clustering mode implementation.
513 * Odd semantics: if start == end, we clean everything.
515 static int vm_object_page_clean_pass1(struct vm_page
*p
, void *data
);
516 static int vm_object_page_clean_pass2(struct vm_page
*p
, void *data
);
519 vm_object_page_clean(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
522 struct rb_vm_page_scan_info info
;
528 if (object
->type
!= OBJT_VNODE
||
529 (object
->flags
& OBJ_MIGHTBEDIRTY
) == 0)
532 pagerflags
= (flags
& (OBJPC_SYNC
| OBJPC_INVAL
)) ?
533 VM_PAGER_PUT_SYNC
: VM_PAGER_CLUSTER_OK
;
534 pagerflags
|= (flags
& OBJPC_INVAL
) ? VM_PAGER_PUT_INVAL
: 0;
539 * Interlock other major object operations. This allows us to
540 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
543 vm_object_set_flag(object
, OBJ_CLEANING
);
546 * Handle 'entire object' case
548 info
.start_pindex
= start
;
550 info
.end_pindex
= object
->size
- 1;
552 info
.end_pindex
= end
- 1;
554 wholescan
= (start
== 0 && info
.end_pindex
== object
->size
- 1);
556 info
.pagerflags
= pagerflags
;
557 info
.object
= object
;
560 * If cleaning the entire object do a pass to mark the pages read-only.
561 * If everything worked out ok, clear OBJ_WRITEABLE and
566 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
567 vm_object_page_clean_pass1
, &info
);
568 if (info
.error
== 0) {
569 vm_object_clear_flag(object
,
570 OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
);
571 if (object
->type
== OBJT_VNODE
&&
572 (vp
= (struct vnode
*)object
->handle
) != NULL
) {
573 if (vp
->v_flag
& VOBJDIRTY
)
574 vclrflags(vp
, VOBJDIRTY
);
580 * Do a pass to clean all the dirty pages we find.
584 curgeneration
= object
->generation
;
585 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
586 vm_object_page_clean_pass2
, &info
);
587 } while (info
.error
|| curgeneration
!= object
->generation
);
589 vm_object_clear_flag(object
, OBJ_CLEANING
);
595 vm_object_page_clean_pass1(struct vm_page
*p
, void *data
)
597 struct rb_vm_page_scan_info
*info
= data
;
599 vm_page_flag_set(p
, PG_CLEANCHK
);
600 if ((info
->limit
& OBJPC_NOSYNC
) && (p
->flags
& PG_NOSYNC
))
603 vm_page_protect(p
, VM_PROT_READ
); /* must not block */
609 vm_object_page_clean_pass2(struct vm_page
*p
, void *data
)
611 struct rb_vm_page_scan_info
*info
= data
;
615 * Do not mess with pages that were inserted after we started
618 if ((p
->flags
& PG_CLEANCHK
) == 0)
622 * Before wasting time traversing the pmaps, check for trivial
623 * cases where the page cannot be dirty.
625 if (p
->valid
== 0 || (p
->queue
- p
->pc
) == PQ_CACHE
) {
626 KKASSERT((p
->dirty
& p
->valid
) == 0);
631 * Check whether the page is dirty or not. The page has been set
632 * to be read-only so the check will not race a user dirtying the
635 vm_page_test_dirty(p
);
636 if ((p
->dirty
& p
->valid
) == 0) {
637 vm_page_flag_clear(p
, PG_CLEANCHK
);
642 * If we have been asked to skip nosync pages and this is a
643 * nosync page, skip it. Note that the object flags were
644 * not cleared in this case (because pass1 will have returned an
645 * error), so we do not have to set them.
647 if ((info
->limit
& OBJPC_NOSYNC
) && (p
->flags
& PG_NOSYNC
)) {
648 vm_page_flag_clear(p
, PG_CLEANCHK
);
653 * Flush as many pages as we can. PG_CLEANCHK will be cleared on
654 * the pages that get successfully flushed. Set info->error if
655 * we raced an object modification.
657 n
= vm_object_page_collect_flush(info
->object
, p
, info
->pagerflags
);
664 * This routine must be called within a critical section to properly avoid
665 * an interrupt unbusy/free race that can occur prior to the busy check.
667 * Using the object generation number here to detect page ripout is not
668 * the best idea in the world. XXX
670 * NOTE: we operate under the assumption that a page found to not be busy
671 * will not be ripped out from under us by an interrupt. XXX we should
672 * recode this to explicitly busy the pages.
675 vm_object_page_collect_flush(vm_object_t object
, vm_page_t p
, int pagerflags
)
684 vm_page_t maf
[vm_pageout_page_count
];
685 vm_page_t mab
[vm_pageout_page_count
];
686 vm_page_t ma
[vm_pageout_page_count
];
688 curgeneration
= object
->generation
;
691 while (vm_page_sleep_busy(p
, TRUE
, "vpcwai")) {
692 if (object
->generation
!= curgeneration
) {
696 KKASSERT(p
->object
== object
&& p
->pindex
== pi
);
699 for(i
= 1; i
< vm_pageout_page_count
; i
++) {
702 if ((tp
= vm_page_lookup(object
, pi
+ i
)) != NULL
) {
703 if ((tp
->flags
& PG_BUSY
) ||
704 ((pagerflags
& VM_PAGER_IGNORE_CLEANCHK
) == 0 &&
705 (tp
->flags
& PG_CLEANCHK
) == 0) ||
708 if((tp
->queue
- tp
->pc
) == PQ_CACHE
) {
709 vm_page_flag_clear(tp
, PG_CLEANCHK
);
712 vm_page_test_dirty(tp
);
713 if ((tp
->dirty
& tp
->valid
) == 0) {
714 vm_page_flag_clear(tp
, PG_CLEANCHK
);
725 chkb
= vm_pageout_page_count
- maxf
;
727 for(i
= 1; i
< chkb
;i
++) {
730 if ((tp
= vm_page_lookup(object
, pi
- i
)) != NULL
) {
731 if ((tp
->flags
& PG_BUSY
) ||
732 ((pagerflags
& VM_PAGER_IGNORE_CLEANCHK
) == 0 &&
733 (tp
->flags
& PG_CLEANCHK
) == 0) ||
736 if((tp
->queue
- tp
->pc
) == PQ_CACHE
) {
737 vm_page_flag_clear(tp
, PG_CLEANCHK
);
740 vm_page_test_dirty(tp
);
741 if ((tp
->dirty
& tp
->valid
) == 0) {
742 vm_page_flag_clear(tp
, PG_CLEANCHK
);
753 for(i
= 0; i
< maxb
; i
++) {
754 int index
= (maxb
- i
) - 1;
756 vm_page_flag_clear(ma
[index
], PG_CLEANCHK
);
758 vm_page_flag_clear(p
, PG_CLEANCHK
);
760 for(i
= 0; i
< maxf
; i
++) {
761 int index
= (maxb
+ i
) + 1;
763 vm_page_flag_clear(ma
[index
], PG_CLEANCHK
);
765 runlen
= maxb
+ maxf
+ 1;
767 vm_pageout_flush(ma
, runlen
, pagerflags
);
768 for (i
= 0; i
< runlen
; i
++) {
769 if (ma
[i
]->valid
& ma
[i
]->dirty
) {
770 vm_page_protect(ma
[i
], VM_PROT_READ
);
771 vm_page_flag_set(ma
[i
], PG_CLEANCHK
);
774 * maxf will end up being the actual number of pages
775 * we wrote out contiguously, non-inclusive of the
776 * first page. We do not count look-behind pages.
778 if (i
>= maxb
+ 1 && (maxf
> i
- maxb
- 1))
786 /* XXX I cannot tell if this should be an exported symbol */
788 * vm_object_deactivate_pages
790 * Deactivate all pages in the specified object. (Keep its pages
791 * in memory even though it is no longer referenced.)
793 * The object must be locked.
795 static int vm_object_deactivate_pages_callback(vm_page_t p
, void *data
);
798 vm_object_deactivate_pages(vm_object_t object
)
801 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
802 vm_object_deactivate_pages_callback
, NULL
);
807 vm_object_deactivate_pages_callback(vm_page_t p
, void *data __unused
)
809 vm_page_deactivate(p
);
816 * Same as vm_object_pmap_copy, except range checking really
817 * works, and is meant for small sections of an object.
819 * This code protects resident pages by making them read-only
820 * and is typically called on a fork or split when a page
821 * is converted to copy-on-write.
823 * NOTE: If the page is already at VM_PROT_NONE, calling
824 * vm_page_protect will have no effect.
827 vm_object_pmap_copy_1(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
832 if (object
== NULL
|| (object
->flags
& OBJ_WRITEABLE
) == 0)
836 * spl protection needed to prevent races between the lookup,
837 * an interrupt unbusy/free, and our protect call.
840 for (idx
= start
; idx
< end
; idx
++) {
841 p
= vm_page_lookup(object
, idx
);
844 vm_page_protect(p
, VM_PROT_READ
);
850 * vm_object_pmap_remove:
852 * Removes all physical pages in the specified
853 * object range from all physical maps.
855 * The object must *not* be locked.
858 static int vm_object_pmap_remove_callback(vm_page_t p
, void *data
);
861 vm_object_pmap_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
)
863 struct rb_vm_page_scan_info info
;
867 info
.start_pindex
= start
;
868 info
.end_pindex
= end
- 1;
870 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
871 vm_object_pmap_remove_callback
, &info
);
872 if (start
== 0 && end
== object
->size
)
873 vm_object_clear_flag(object
, OBJ_WRITEABLE
);
878 vm_object_pmap_remove_callback(vm_page_t p
, void *data __unused
)
880 vm_page_protect(p
, VM_PROT_NONE
);
887 * Implements the madvise function at the object/page level.
889 * MADV_WILLNEED (any object)
891 * Activate the specified pages if they are resident.
893 * MADV_DONTNEED (any object)
895 * Deactivate the specified pages if they are resident.
897 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects,
898 * OBJ_ONEMAPPING only)
900 * Deactivate and clean the specified pages if they are
901 * resident. This permits the process to reuse the pages
902 * without faulting or the kernel to reclaim the pages
906 vm_object_madvise(vm_object_t object
, vm_pindex_t pindex
, int count
, int advise
)
908 vm_pindex_t end
, tpindex
;
915 end
= pindex
+ count
;
918 * Locate and adjust resident pages
921 for (; pindex
< end
; pindex
+= 1) {
927 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
928 * and those pages must be OBJ_ONEMAPPING.
930 if (advise
== MADV_FREE
) {
931 if ((tobject
->type
!= OBJT_DEFAULT
&&
932 tobject
->type
!= OBJT_SWAP
) ||
933 (tobject
->flags
& OBJ_ONEMAPPING
) == 0) {
939 * spl protection is required to avoid a race between the
940 * lookup, an interrupt unbusy/free, and our busy check.
944 m
= vm_page_lookup(tobject
, tpindex
);
948 * There may be swap even if there is no backing page
950 if (advise
== MADV_FREE
&& tobject
->type
== OBJT_SWAP
)
951 swap_pager_freespace(tobject
, tpindex
, 1);
957 if (tobject
->backing_object
== NULL
)
959 tpindex
+= OFF_TO_IDX(tobject
->backing_object_offset
);
960 tobject
= tobject
->backing_object
;
965 * If the page is busy or not in a normal active state,
966 * we skip it. If the page is not managed there are no
967 * page queues to mess with. Things can break if we mess
968 * with pages in any of the below states.
973 (m
->flags
& PG_UNMANAGED
) ||
974 m
->valid
!= VM_PAGE_BITS_ALL
980 if (vm_page_sleep_busy(m
, TRUE
, "madvpo")) {
987 * Theoretically once a page is known not to be busy, an
988 * interrupt cannot come along and rip it out from under us.
991 if (advise
== MADV_WILLNEED
) {
993 } else if (advise
== MADV_DONTNEED
) {
995 } else if (advise
== MADV_FREE
) {
997 * Mark the page clean. This will allow the page
998 * to be freed up by the system. However, such pages
999 * are often reused quickly by malloc()/free()
1000 * so we do not do anything that would cause
1001 * a page fault if we can help it.
1003 * Specifically, we do not try to actually free
1004 * the page now nor do we try to put it in the
1005 * cache (which would cause a page fault on reuse).
1007 * But we do make the page is freeable as we
1008 * can without actually taking the step of unmapping
1011 pmap_clear_modify(m
);
1014 vm_page_dontneed(m
);
1015 if (tobject
->type
== OBJT_SWAP
)
1016 swap_pager_freespace(tobject
, tpindex
, 1);
1024 * Create a new object which is backed by the
1025 * specified existing object range. The source
1026 * object reference is deallocated.
1028 * The new object and offset into that object
1029 * are returned in the source parameters.
1033 vm_object_shadow(vm_object_t
*object
, /* IN/OUT */
1034 vm_ooffset_t
*offset
, /* IN/OUT */
1043 * Don't create the new object if the old object isn't shared.
1046 if (source
!= NULL
&&
1047 source
->ref_count
== 1 &&
1048 source
->handle
== NULL
&&
1049 (source
->type
== OBJT_DEFAULT
||
1050 source
->type
== OBJT_SWAP
))
1054 * Allocate a new object with the given length
1057 if ((result
= vm_object_allocate(OBJT_DEFAULT
, length
)) == NULL
)
1058 panic("vm_object_shadow: no object for shadowing");
1061 * The new object shadows the source object, adding a reference to it.
1062 * Our caller changes his reference to point to the new object,
1063 * removing a reference to the source object. Net result: no change
1064 * of reference count.
1066 * Try to optimize the result object's page color when shadowing
1067 * in order to maintain page coloring consistency in the combined
1070 result
->backing_object
= source
;
1072 LIST_INSERT_HEAD(&source
->shadow_head
, result
, shadow_list
);
1073 source
->shadow_count
++;
1074 source
->generation
++;
1075 result
->pg_color
= (source
->pg_color
+ OFF_TO_IDX(*offset
)) & PQ_L2_MASK
;
1079 * Store the offset into the source object, and fix up the offset into
1083 result
->backing_object_offset
= *offset
;
1086 * Return the new things
1093 #define OBSC_TEST_ALL_SHADOWED 0x0001
1094 #define OBSC_COLLAPSE_NOWAIT 0x0002
1095 #define OBSC_COLLAPSE_WAIT 0x0004
1097 static int vm_object_backing_scan_callback(vm_page_t p
, void *data
);
1100 vm_object_backing_scan(vm_object_t object
, int op
)
1102 struct rb_vm_page_scan_info info
;
1103 vm_object_t backing_object
;
1106 * spl protection is required to avoid races between the memq/lookup,
1107 * an interrupt doing an unbusy/free, and our busy check. Amoung
1112 backing_object
= object
->backing_object
;
1113 info
.backing_offset_index
= OFF_TO_IDX(object
->backing_object_offset
);
1116 * Initial conditions
1119 if (op
& OBSC_TEST_ALL_SHADOWED
) {
1121 * We do not want to have to test for the existence of
1122 * swap pages in the backing object. XXX but with the
1123 * new swapper this would be pretty easy to do.
1125 * XXX what about anonymous MAP_SHARED memory that hasn't
1126 * been ZFOD faulted yet? If we do not test for this, the
1127 * shadow test may succeed! XXX
1129 if (backing_object
->type
!= OBJT_DEFAULT
) {
1134 if (op
& OBSC_COLLAPSE_WAIT
) {
1135 KKASSERT((backing_object
->flags
& OBJ_DEAD
) == 0);
1136 vm_object_set_flag(backing_object
, OBJ_DEAD
);
1140 * Our scan. We have to retry if a negative error code is returned,
1141 * otherwise 0 or 1 will be returned in info.error. 0 Indicates that
1142 * the scan had to be stopped because the parent does not completely
1145 info
.object
= object
;
1146 info
.backing_object
= backing_object
;
1150 vm_page_rb_tree_RB_SCAN(&backing_object
->rb_memq
, NULL
,
1151 vm_object_backing_scan_callback
,
1153 } while (info
.error
< 0);
1159 vm_object_backing_scan_callback(vm_page_t p
, void *data
)
1161 struct rb_vm_page_scan_info
*info
= data
;
1162 vm_object_t backing_object
;
1164 vm_pindex_t new_pindex
;
1165 vm_pindex_t backing_offset_index
;
1168 new_pindex
= p
->pindex
- info
->backing_offset_index
;
1170 object
= info
->object
;
1171 backing_object
= info
->backing_object
;
1172 backing_offset_index
= info
->backing_offset_index
;
1174 if (op
& OBSC_TEST_ALL_SHADOWED
) {
1178 * Ignore pages outside the parent object's range
1179 * and outside the parent object's mapping of the
1182 * note that we do not busy the backing object's
1186 p
->pindex
< backing_offset_index
||
1187 new_pindex
>= object
->size
1193 * See if the parent has the page or if the parent's
1194 * object pager has the page. If the parent has the
1195 * page but the page is not valid, the parent's
1196 * object pager must have the page.
1198 * If this fails, the parent does not completely shadow
1199 * the object and we might as well give up now.
1202 pp
= vm_page_lookup(object
, new_pindex
);
1203 if ((pp
== NULL
|| pp
->valid
== 0) &&
1204 !vm_pager_has_page(object
, new_pindex
)
1206 info
->error
= 0; /* problemo */
1207 return(-1); /* stop the scan */
1212 * Check for busy page
1215 if (op
& (OBSC_COLLAPSE_WAIT
| OBSC_COLLAPSE_NOWAIT
)) {
1218 if (op
& OBSC_COLLAPSE_NOWAIT
) {
1220 (p
->flags
& PG_BUSY
) ||
1228 } else if (op
& OBSC_COLLAPSE_WAIT
) {
1229 if (vm_page_sleep_busy(p
, TRUE
, "vmocol")) {
1231 * If we slept, anything could have
1232 * happened. Ask that the scan be restarted.
1234 * Since the object is marked dead, the
1235 * backing offset should not have changed.
1248 p
->object
== backing_object
,
1249 ("vm_object_qcollapse(): object mismatch")
1253 * Destroy any associated swap
1255 if (backing_object
->type
== OBJT_SWAP
)
1256 swap_pager_freespace(backing_object
, p
->pindex
, 1);
1259 p
->pindex
< backing_offset_index
||
1260 new_pindex
>= object
->size
1263 * Page is out of the parent object's range, we
1264 * can simply destroy it.
1266 vm_page_protect(p
, VM_PROT_NONE
);
1271 pp
= vm_page_lookup(object
, new_pindex
);
1272 if (pp
!= NULL
|| vm_pager_has_page(object
, new_pindex
)) {
1274 * page already exists in parent OR swap exists
1275 * for this location in the parent. Destroy
1276 * the original page from the backing object.
1278 * Leave the parent's page alone
1280 vm_page_protect(p
, VM_PROT_NONE
);
1286 * Page does not exist in parent, rename the
1287 * page from the backing object to the main object.
1289 * If the page was mapped to a process, it can remain
1290 * mapped through the rename.
1292 if ((p
->queue
- p
->pc
) == PQ_CACHE
)
1293 vm_page_deactivate(p
);
1295 vm_page_rename(p
, object
, new_pindex
);
1296 /* page automatically made dirty by rename */
1302 * this version of collapse allows the operation to occur earlier and
1303 * when paging_in_progress is true for an object... This is not a complete
1304 * operation, but should plug 99.9% of the rest of the leaks.
1307 vm_object_qcollapse(vm_object_t object
)
1309 vm_object_t backing_object
= object
->backing_object
;
1311 if (backing_object
->ref_count
!= 1)
1314 backing_object
->ref_count
+= 2;
1316 vm_object_backing_scan(object
, OBSC_COLLAPSE_NOWAIT
);
1318 backing_object
->ref_count
-= 2;
1322 * vm_object_collapse:
1324 * Collapse an object with the object backing it.
1325 * Pages in the backing object are moved into the
1326 * parent, and the backing object is deallocated.
1329 vm_object_collapse(vm_object_t object
)
1332 vm_object_t backing_object
;
1335 * Verify that the conditions are right for collapse:
1337 * The object exists and the backing object exists.
1342 if ((backing_object
= object
->backing_object
) == NULL
)
1346 * we check the backing object first, because it is most likely
1349 if (backing_object
->handle
!= NULL
||
1350 (backing_object
->type
!= OBJT_DEFAULT
&&
1351 backing_object
->type
!= OBJT_SWAP
) ||
1352 (backing_object
->flags
& OBJ_DEAD
) ||
1353 object
->handle
!= NULL
||
1354 (object
->type
!= OBJT_DEFAULT
&&
1355 object
->type
!= OBJT_SWAP
) ||
1356 (object
->flags
& OBJ_DEAD
)) {
1361 object
->paging_in_progress
!= 0 ||
1362 backing_object
->paging_in_progress
!= 0
1364 vm_object_qcollapse(object
);
1369 * We know that we can either collapse the backing object (if
1370 * the parent is the only reference to it) or (perhaps) have
1371 * the parent bypass the object if the parent happens to shadow
1372 * all the resident pages in the entire backing object.
1374 * This is ignoring pager-backed pages such as swap pages.
1375 * vm_object_backing_scan fails the shadowing test in this
1379 if (backing_object
->ref_count
== 1) {
1381 * If there is exactly one reference to the backing
1382 * object, we can collapse it into the parent.
1384 vm_object_backing_scan(object
, OBSC_COLLAPSE_WAIT
);
1387 * Move the pager from backing_object to object.
1390 if (backing_object
->type
== OBJT_SWAP
) {
1391 vm_object_pip_add(backing_object
, 1);
1394 * scrap the paging_offset junk and do a
1395 * discrete copy. This also removes major
1396 * assumptions about how the swap-pager
1397 * works from where it doesn't belong. The
1398 * new swapper is able to optimize the
1399 * destroy-source case.
1402 vm_object_pip_add(object
, 1);
1406 OFF_TO_IDX(object
->backing_object_offset
), TRUE
);
1407 vm_object_pip_wakeup(object
);
1409 vm_object_pip_wakeup(backing_object
);
1412 * Object now shadows whatever backing_object did.
1413 * Note that the reference to
1414 * backing_object->backing_object moves from within
1415 * backing_object to within object.
1418 LIST_REMOVE(object
, shadow_list
);
1419 object
->backing_object
->shadow_count
--;
1420 object
->backing_object
->generation
++;
1421 if (backing_object
->backing_object
) {
1422 LIST_REMOVE(backing_object
, shadow_list
);
1423 backing_object
->backing_object
->shadow_count
--;
1424 backing_object
->backing_object
->generation
++;
1426 object
->backing_object
= backing_object
->backing_object
;
1427 if (object
->backing_object
) {
1429 &object
->backing_object
->shadow_head
,
1433 object
->backing_object
->shadow_count
++;
1434 object
->backing_object
->generation
++;
1437 object
->backing_object_offset
+=
1438 backing_object
->backing_object_offset
;
1441 * Discard backing_object.
1443 * Since the backing object has no pages, no pager left,
1444 * and no object references within it, all that is
1445 * necessary is to dispose of it.
1448 KASSERT(backing_object
->ref_count
== 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object
));
1449 KASSERT(RB_EMPTY(&backing_object
->rb_memq
), ("backing_object %p somehow has left over pages during collapse!", backing_object
));
1459 zfree(obj_zone
, backing_object
);
1463 vm_object_t new_backing_object
;
1466 * If we do not entirely shadow the backing object,
1467 * there is nothing we can do so we give up.
1470 if (vm_object_backing_scan(object
, OBSC_TEST_ALL_SHADOWED
) == 0) {
1475 * Make the parent shadow the next object in the
1476 * chain. Deallocating backing_object will not remove
1477 * it, since its reference count is at least 2.
1480 LIST_REMOVE(object
, shadow_list
);
1481 backing_object
->shadow_count
--;
1482 backing_object
->generation
++;
1484 new_backing_object
= backing_object
->backing_object
;
1485 if ((object
->backing_object
= new_backing_object
) != NULL
) {
1486 vm_object_reference(new_backing_object
);
1488 &new_backing_object
->shadow_head
,
1492 new_backing_object
->shadow_count
++;
1493 new_backing_object
->generation
++;
1494 object
->backing_object_offset
+=
1495 backing_object
->backing_object_offset
;
1499 * Drop the reference count on backing_object. Since
1500 * its ref_count was at least 2, it will not vanish;
1501 * so we don't need to call vm_object_deallocate, but
1504 vm_object_deallocate(backing_object
);
1509 * Try again with this object's new backing object.
1515 * vm_object_page_remove: [internal]
1517 * Removes all physical pages in the specified
1518 * object range from the object's list of pages.
1520 static int vm_object_page_remove_callback(vm_page_t p
, void *data
);
1523 vm_object_page_remove(vm_object_t object
, vm_pindex_t start
, vm_pindex_t end
,
1524 boolean_t clean_only
)
1526 struct rb_vm_page_scan_info info
;
1530 * Degenerate cases and assertions
1532 if (object
== NULL
||
1533 (object
->resident_page_count
== 0 && object
->swblock_count
== 0)) {
1536 KASSERT(object
->type
!= OBJT_PHYS
,
1537 ("attempt to remove pages from a physical object"));
1540 * Indicate that paging is occuring on the object
1543 vm_object_pip_add(object
, 1);
1546 * Figure out the actual removal range and whether we are removing
1547 * the entire contents of the object or not. If removing the entire
1548 * contents, be sure to get all pages, even those that might be
1549 * beyond the end of the object.
1551 info
.start_pindex
= start
;
1553 info
.end_pindex
= (vm_pindex_t
)-1;
1555 info
.end_pindex
= end
- 1;
1556 info
.limit
= clean_only
;
1557 all
= (start
== 0 && info
.end_pindex
>= object
->size
- 1);
1560 * Loop until we are sure we have gotten them all.
1564 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
1565 vm_object_page_remove_callback
, &info
);
1566 } while (info
.error
);
1569 * Remove any related swap if throwing away pages, or for
1570 * non-swap objects (the swap is a clean copy in that case).
1572 if (object
->type
!= OBJT_SWAP
|| clean_only
== FALSE
) {
1574 swap_pager_freespace_all(object
);
1576 swap_pager_freespace(object
, info
.start_pindex
,
1577 info
.end_pindex
- info
.start_pindex
+ 1);
1583 vm_object_pip_wakeup(object
);
1588 vm_object_page_remove_callback(vm_page_t p
, void *data
)
1590 struct rb_vm_page_scan_info
*info
= data
;
1593 * Wired pages cannot be destroyed, but they can be invalidated
1594 * and we do so if clean_only (limit) is not set.
1596 * WARNING! The page may be wired due to being part of a buffer
1597 * cache buffer, and the buffer might be marked B_CACHE.
1598 * This is fine as part of a truncation but VFSs must be
1599 * sure to fix the buffer up when re-extending the file.
1601 if (p
->wire_count
!= 0) {
1602 vm_page_protect(p
, VM_PROT_NONE
);
1603 if (info
->limit
== 0)
1609 * The busy flags are only cleared at
1610 * interrupt -- minimize the spl transitions
1613 if (vm_page_sleep_busy(p
, TRUE
, "vmopar")) {
1619 * limit is our clean_only flag. If set and the page is dirty, do
1620 * not free it. If set and the page is being held by someone, do
1623 if (info
->limit
&& p
->valid
) {
1624 vm_page_test_dirty(p
);
1625 if (p
->valid
& p
->dirty
)
1635 vm_page_protect(p
, VM_PROT_NONE
);
1641 * Routine: vm_object_coalesce
1642 * Function: Coalesces two objects backing up adjoining
1643 * regions of memory into a single object.
1645 * returns TRUE if objects were combined.
1647 * NOTE: Only works at the moment if the second object is NULL -
1648 * if it's not, which object do we lock first?
1651 * prev_object First object to coalesce
1652 * prev_offset Offset into prev_object
1653 * next_object Second object into coalesce
1654 * next_offset Offset into next_object
1656 * prev_size Size of reference to prev_object
1657 * next_size Size of reference to next_object
1660 * The object must *not* be locked.
1663 vm_object_coalesce(vm_object_t prev_object
, vm_pindex_t prev_pindex
,
1664 vm_size_t prev_size
, vm_size_t next_size
)
1666 vm_pindex_t next_pindex
;
1668 if (prev_object
== NULL
) {
1672 if (prev_object
->type
!= OBJT_DEFAULT
&&
1673 prev_object
->type
!= OBJT_SWAP
) {
1678 * Try to collapse the object first
1680 vm_object_collapse(prev_object
);
1683 * Can't coalesce if: . more than one reference . paged out . shadows
1684 * another object . has a copy elsewhere (any of which mean that the
1685 * pages not mapped to prev_entry may be in use anyway)
1688 if (prev_object
->backing_object
!= NULL
) {
1692 prev_size
>>= PAGE_SHIFT
;
1693 next_size
>>= PAGE_SHIFT
;
1694 next_pindex
= prev_pindex
+ prev_size
;
1696 if ((prev_object
->ref_count
> 1) &&
1697 (prev_object
->size
!= next_pindex
)) {
1702 * Remove any pages that may still be in the object from a previous
1705 if (next_pindex
< prev_object
->size
) {
1706 vm_object_page_remove(prev_object
,
1708 next_pindex
+ next_size
, FALSE
);
1709 if (prev_object
->type
== OBJT_SWAP
)
1710 swap_pager_freespace(prev_object
,
1711 next_pindex
, next_size
);
1715 * Extend the object if necessary.
1717 if (next_pindex
+ next_size
> prev_object
->size
)
1718 prev_object
->size
= next_pindex
+ next_size
;
1724 vm_object_set_writeable_dirty(vm_object_t object
)
1728 vm_object_set_flag(object
, OBJ_WRITEABLE
|OBJ_MIGHTBEDIRTY
);
1729 if (object
->type
== OBJT_VNODE
&&
1730 (vp
= (struct vnode
*)object
->handle
) != NULL
) {
1731 if ((vp
->v_flag
& VOBJDIRTY
) == 0) {
1732 vsetflags(vp
, VOBJDIRTY
);
1739 #include "opt_ddb.h"
1741 #include <sys/kernel.h>
1743 #include <sys/cons.h>
1745 #include <ddb/ddb.h>
1747 static int _vm_object_in_map (vm_map_t map
, vm_object_t object
,
1748 vm_map_entry_t entry
);
1749 static int vm_object_in_map (vm_object_t object
);
1752 _vm_object_in_map(vm_map_t map
, vm_object_t object
, vm_map_entry_t entry
)
1755 vm_map_entry_t tmpe
;
1762 tmpe
= map
->header
.next
;
1763 entcount
= map
->nentries
;
1764 while (entcount
-- && (tmpe
!= &map
->header
)) {
1765 if( _vm_object_in_map(map
, object
, tmpe
)) {
1772 switch(entry
->maptype
) {
1773 case VM_MAPTYPE_SUBMAP
:
1774 tmpm
= entry
->object
.sub_map
;
1775 tmpe
= tmpm
->header
.next
;
1776 entcount
= tmpm
->nentries
;
1777 while (entcount
-- && tmpe
!= &tmpm
->header
) {
1778 if( _vm_object_in_map(tmpm
, object
, tmpe
)) {
1784 case VM_MAPTYPE_NORMAL
:
1785 case VM_MAPTYPE_VPAGETABLE
:
1786 obj
= entry
->object
.vm_object
;
1790 obj
= obj
->backing_object
;
1799 static int vm_object_in_map_callback(struct proc
*p
, void *data
);
1801 struct vm_object_in_map_info
{
1807 vm_object_in_map(vm_object_t object
)
1809 struct vm_object_in_map_info info
;
1812 info
.object
= object
;
1814 allproc_scan(vm_object_in_map_callback
, &info
);
1817 if( _vm_object_in_map(&kernel_map
, object
, 0))
1819 if( _vm_object_in_map(&pager_map
, object
, 0))
1821 if( _vm_object_in_map(&buffer_map
, object
, 0))
1827 vm_object_in_map_callback(struct proc
*p
, void *data
)
1829 struct vm_object_in_map_info
*info
= data
;
1832 if (_vm_object_in_map(&p
->p_vmspace
->vm_map
, info
->object
, 0)) {
1840 DB_SHOW_COMMAND(vmochk
, vm_object_check
)
1845 * make sure that internal objs are in a map somewhere
1846 * and none have zero ref counts.
1848 for (object
= TAILQ_FIRST(&vm_object_list
);
1850 object
= TAILQ_NEXT(object
, object_list
)) {
1851 if (object
->type
== OBJT_MARKER
)
1853 if (object
->handle
== NULL
&&
1854 (object
->type
== OBJT_DEFAULT
|| object
->type
== OBJT_SWAP
)) {
1855 if (object
->ref_count
== 0) {
1856 db_printf("vmochk: internal obj has zero ref count: %ld\n",
1857 (long)object
->size
);
1859 if (!vm_object_in_map(object
)) {
1861 "vmochk: internal obj is not in a map: "
1862 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
1863 object
->ref_count
, (u_long
)object
->size
,
1864 (u_long
)object
->size
,
1865 (void *)object
->backing_object
);
1872 * vm_object_print: [ debug ]
1874 DB_SHOW_COMMAND(object
, vm_object_print_static
)
1876 /* XXX convert args. */
1877 vm_object_t object
= (vm_object_t
)addr
;
1878 boolean_t full
= have_addr
;
1882 /* XXX count is an (unused) arg. Avoid shadowing it. */
1883 #define count was_count
1891 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
1892 object
, (int)object
->type
, (u_long
)object
->size
,
1893 object
->resident_page_count
, object
->ref_count
, object
->flags
);
1895 * XXX no %qd in kernel. Truncate object->backing_object_offset.
1897 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
1898 object
->shadow_count
,
1899 object
->backing_object
? object
->backing_object
->ref_count
: 0,
1900 object
->backing_object
, (long)object
->backing_object_offset
);
1907 RB_FOREACH(p
, vm_page_rb_tree
, &object
->rb_memq
) {
1909 db_iprintf("memory:=");
1910 else if (count
== 6) {
1918 db_printf("(off=0x%lx,page=0x%lx)",
1919 (u_long
) p
->pindex
, (u_long
) VM_PAGE_TO_PHYS(p
));
1929 /* XXX need this non-static entry for calling from vm_map_print. */
1931 vm_object_print(/* db_expr_t */ long addr
,
1932 boolean_t have_addr
,
1933 /* db_expr_t */ long count
,
1936 vm_object_print_static(addr
, have_addr
, count
, modif
);
1939 DB_SHOW_COMMAND(vmopag
, vm_object_print_pages
)
1944 for (object
= TAILQ_FIRST(&vm_object_list
);
1946 object
= TAILQ_NEXT(object
, object_list
)) {
1947 vm_pindex_t idx
, fidx
;
1949 vm_paddr_t pa
= -1, padiff
;
1953 if (object
->type
== OBJT_MARKER
)
1955 db_printf("new object: %p\n", (void *)object
);
1965 osize
= object
->size
;
1968 for (idx
= 0; idx
< osize
; idx
++) {
1969 m
= vm_page_lookup(object
, idx
);
1972 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1973 (long)fidx
, rcount
, (long)pa
);
1988 (VM_PAGE_TO_PHYS(m
) == pa
+ rcount
* PAGE_SIZE
)) {
1993 padiff
= pa
+ rcount
* PAGE_SIZE
- VM_PAGE_TO_PHYS(m
);
1994 padiff
>>= PAGE_SHIFT
;
1995 padiff
&= PQ_L2_MASK
;
1997 pa
= VM_PAGE_TO_PHYS(m
) - rcount
* PAGE_SIZE
;
2001 db_printf(" index(%ld)run(%d)pa(0x%lx)",
2002 (long)fidx
, rcount
, (long)pa
);
2003 db_printf("pd(%ld)\n", (long)padiff
);
2013 pa
= VM_PAGE_TO_PHYS(m
);
2017 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2018 (long)fidx
, rcount
, (long)pa
);