4 * cc -I/usr/src/sys vmpageinfo.c -o ~/bin/vmpageinfo -lkvm
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 #define _KERNEL_STRUCTURES_
45 #include <sys/param.h>
48 #include <sys/malloc.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/namecache.h>
52 #include <sys/slaballoc.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/swap_pager.h>
59 #include <vm/vnode_pager.h>
71 { "_vm_page_array_size" },
85 struct vm_page
**vm_page_buckets
;
86 int vm_page_hash_mask
;
88 struct vm_page
*vm_page_array
;
89 struct vm_object
*kernel_object_ptr
;
90 int vm_page_array_size
;
97 void checkpage(kvm_t
*kd
, vm_page_t mptr
, vm_page_t m
, struct vm_object
*obj
);
98 static void kkread_vmpage(kvm_t
*kd
, u_long addr
, vm_page_t m
);
99 static void kkread(kvm_t
*kd
, u_long addr
, void *buf
, size_t nbytes
);
100 static int kkread_err(kvm_t
*kd
, u_long addr
, void *buf
, size_t nbytes
);
103 static void addsltrack(vm_page_t m
);
104 static void dumpsltrack(kvm_t
*kd
);
106 static int unique_object(void *ptr
);
109 long count_wired
; /* total */
110 long count_wired_vnode
;
111 long count_wired_anon
;
112 long count_wired_in_pmap
;
113 long count_wired_pgtable
;
114 long count_wired_other
;
115 long count_wired_kernel
;
116 long count_wired_obj_other
;
119 long count_anon_in_pmap
;
125 long count_noobj_offqueue
;
126 long count_noobj_onqueue
;
129 main(int ac
, char **av
)
131 const char *corefile
= NULL
;
132 const char *sysfile
= NULL
;
134 struct vm_object obj
;
145 while ((ch
= getopt(ac
, av
, "M:N:dv")) != -1) {
160 fprintf(stderr
, "%s [-M core] [-N system]\n", av
[0]);
167 if ((kd
= kvm_open(sysfile
, corefile
, NULL
, O_RDONLY
, "kvm:")) == NULL
) {
171 if (kvm_nlist(kd
, Nl
) != 0) {
176 kkread(kd
, Nl
[0].n_value
, &vm_page_array
, sizeof(vm_page_array
));
177 kkread(kd
, Nl
[1].n_value
, &vm_page_array_size
, sizeof(vm_page_array_size
));
178 kernel_object_ptr
= (void *)Nl
[2].n_value
;
179 kkread(kd
, Nl
[3].n_value
, &nbuf
, sizeof(nbuf
));
180 kkread(kd
, Nl
[4].n_value
, &nswbuf_mem
, sizeof(nswbuf_mem
));
181 kkread(kd
, Nl
[5].n_value
, &nswbuf_kva
, sizeof(nswbuf_kva
));
182 kkread(kd
, Nl
[6].n_value
, &nswbuf_raw
, sizeof(nswbuf_raw
));
183 kern_size
= Nl
[8].n_value
- Nl
[7].n_value
;
186 * Scan the vm_page_array validating all pages with associated objects
188 for (i
= 0; i
< vm_page_array_size
; ++i
) {
190 printf("page %d\r", i
);
193 kkread_vmpage(kd
, (u_long
)&vm_page_array
[i
], &m
);
195 kkread(kd
, (u_long
)m
.object
, &obj
, sizeof(obj
));
196 checkpage(kd
, &vm_page_array
[i
], &m
, &obj
);
198 if (m
.queue
>= PQ_HOLD
) {
200 } else if (m
.queue
>= PQ_CACHE
) {
202 } else if (m
.queue
>= PQ_ACTIVE
) {
204 } else if (m
.queue
>= PQ_INACTIVE
) {
206 } else if (m
.queue
>= PQ_FREE
) {
214 if (m
.object
== NULL
) {
215 if ((m
.flags
& PG_MAPPED
) &&
216 (m
.flags
& PG_WRITEABLE
) &&
217 (m
.flags
& PG_UNQUEUED
)) {
218 ++count_wired_pgtable
;
222 } else if (m
.object
== kernel_object_ptr
) {
223 ++count_wired_kernel
;
233 ++count_wired_in_pmap;
239 ++count_wired_obj_other
;
245 if (m
.md
.pmap_count
) {
246 if (m
.object
&& m
.object
!= kernel_object_ptr
) {
250 ++count_anon_in_pmap
;
260 printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
261 "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
265 (intmax_t)m
.pindex
* PAGE_SIZE
,
272 0 /* m.md.writeable_count */,
273 0 /*m.md.pmap_count*/,
278 if (m
.object
== kernel_object_ptr
) {
280 if (unique_object(m
.object
))
281 count_kernel
+= obj
.resident_page_count
;
282 } else if (m
.object
) {
286 if (unique_object(m
.object
))
287 count_anon
+= obj
.resident_page_count
;
291 if (unique_object(m
.object
))
292 count_anon
+= obj
.resident_page_count
;
296 if (unique_object(m
.object
))
297 count_vnode
+= obj
.resident_page_count
;
301 if (unique_object(m
.object
))
302 count_device
+= obj
.resident_page_count
;
306 if (unique_object(m
.object
))
307 count_phys
+= obj
.resident_page_count
;
311 if (unique_object(m
.object
))
312 count_unknown
+= obj
.resident_page_count
;
315 if (unique_object(m
.object
))
316 count_unknown
+= obj
.resident_page_count
;
322 if (m
.queue
== PQ_NONE
)
323 ++count_noobj_offqueue
;
324 else if (m
.queue
- m
.pc
!= PQ_FREE
)
325 ++count_noobj_onqueue
;
329 printf(" %-7s", ostr
);
330 if (m
.busy_count
& PBUSY_LOCKED
)
332 if (m
.busy_count
& PBUSY_WANTED
)
334 if (m
.flags
& PG_WINATCFLS
)
335 printf(" WINATCFLS");
336 if (m
.flags
& PG_FICTITIOUS
)
337 printf(" FICTITIOUS");
338 if (m
.flags
& PG_WRITEABLE
)
339 printf(" WRITEABLE");
340 if (m
.flags
& PG_MAPPED
)
342 if (m
.flags
& PG_NEED_COMMIT
)
343 printf(" NEED_COMMIT");
344 if (m
.flags
& PG_REFERENCED
)
345 printf(" REFERENCED");
346 if (m
.flags
& PG_CLEANCHK
)
348 if (m
.busy_count
& PBUSY_SWAPINPROG
)
349 printf(" SWAPINPROG");
350 if (m
.flags
& PG_NOSYNC
)
352 if (m
.flags
& PG_UNQUEUED
)
354 if (m
.flags
& PG_MARKER
)
356 if (m
.flags
& PG_RAM
)
358 if (m
.flags
& PG_SWAPPED
)
361 if (m
.flags
& PG_SLAB
)
366 if (m
.flags
& PG_SLAB
)
371 if (debugopt
|| verboseopt
)
373 printf("%8.2fM free\n", count_free
* 4096.0 / 1048576.0);
375 printf("%8.2fM wired vnode (in buffer cache)\n",
376 count_wired_vnode
* 4096.0 / 1048576.0);
377 printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
378 count_wired_in_pmap
* 4096.0 / 1048576.0);
379 printf("%8.2fM wired pgtable\n",
380 count_wired_pgtable
* 4096.0 / 1048576.0);
381 printf("%8.2fM wired anon\n",
382 count_wired_anon
* 4096.0 / 1048576.0);
383 printf("%8.2fM wired kernel_object\n",
384 count_wired_kernel
* 4096.0 / 1048576.0);
386 printf("\t%8.2fM vm_page_array\n",
387 vm_page_array_size
* sizeof(struct vm_page
) / 1048576.0);
388 printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
389 (nbuf
+ nswbuf_mem
+ nswbuf_kva
+ nswbuf_raw
) *
390 sizeof(struct buf
) / 1048576.0);
391 printf("\t%8.2fM kernel binary\n", kern_size
/ 1048576.0);
392 printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
394 printf("%8.2fM wired other (unknown object)\n",
395 count_wired_obj_other
* 4096.0 / 1048576.0);
396 printf("%8.2fM wired other (no object, probably kernel)\n",
397 count_wired_other
* 4096.0 / 1048576.0);
399 printf("%8.2fM WIRED TOTAL\n",
400 count_wired
* 4096.0 / 1048576.0);
403 printf("%8.2fM anonymous (total, includes in-pmap)\n",
404 count_anon
* 4096.0 / 1048576.0);
405 printf("%8.2fM anonymous memory in-pmap\n",
406 count_anon_in_pmap
* 4096.0 / 1048576.0);
407 printf("%8.2fM vnode (includes wired)\n",
408 count_vnode
* 4096.0 / 1048576.0);
409 printf("%8.2fM device\n", count_device
* 4096.0 / 1048576.0);
410 printf("%8.2fM phys\n", count_phys
* 4096.0 / 1048576.0);
411 printf("%8.2fM kernel (includes wired)\n",
412 count_kernel
* 4096.0 / 1048576.0);
413 printf("%8.2fM unknown\n", count_unknown
* 4096.0 / 1048576.0);
414 printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
415 count_noobj_offqueue
* 4096.0 / 1048576.0);
416 printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
417 count_noobj_onqueue
* 4096.0 / 1048576.0);
421 * Scan the vm_page_buckets array validating all pages found
423 for (i
= 0; i
<= vm_page_hash_mask
; ++i
) {
425 printf("index %d\r", i
);
428 kkread(kd
, (u_long
)&vm_page_buckets
[i
], &mptr
, sizeof(mptr
));
430 kkread(kd
, (u_long
)mptr
, &m
, sizeof(m
));
432 kkread(kd
, (u_long
)m
.object
, &obj
, sizeof(obj
));
433 hv
= ((uintptr_t)m
.object
+ m
.pindex
) ^ obj
.hash_rand
;
434 hv
&= vm_page_hash_mask
;
436 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
437 " should be in bucket %d\n", i
, mptr
, hv
);
438 checkpage(kd
, mptr
, &m
, &obj
);
440 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
441 " has no object\n", i
, mptr
);
456 * A page with an object.
459 checkpage(kvm_t
*kd
, vm_page_t mptr
, vm_page_t m
, struct vm_object
*obj
)
466 hv
= ((uintptr_t)m
->object
+ m
->pindex
) ^ obj
->hash_rand
;
467 hv
&= vm_page_hash_mask
;
468 kkread(kd
, (u_long
)&vm_page_buckets
[hv
], &scanptr
, sizeof(scanptr
));
472 kkread(kd
, (u_long
)scanptr
, &scan
, sizeof(scan
));
473 scanptr
= scan
.hnext
;
477 printf("good checkpage %p bucket %d\n", mptr
, hv
);
479 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
480 " page not found in bucket list\n", hv
, mptr
);
486 * Acclerate the reading of VM pages
489 kkread_vmpage(kvm_t
*kd
, u_long addr
, vm_page_t m
)
491 static struct vm_page vpcache
[1024];
495 if (addr
< vpbeg
|| addr
>= vpend
) {
497 vpend
= addr
+ 1024 * sizeof(*m
);
498 if (vpend
> (u_long
)(uintptr_t)vm_page_array
+
499 vm_page_array_size
* sizeof(*m
)) {
500 vpend
= (u_long
)(uintptr_t)vm_page_array
+
501 vm_page_array_size
* sizeof(*m
);
503 kkread(kd
, vpbeg
, vpcache
, vpend
- vpbeg
);
505 *m
= vpcache
[(addr
- vpbeg
) / sizeof(*m
)];
509 kkread(kvm_t
*kd
, u_long addr
, void *buf
, size_t nbytes
)
511 if (kvm_read(kd
, addr
, buf
, nbytes
) != nbytes
) {
518 kkread_err(kvm_t
*kd
, u_long addr
, void *buf
, size_t nbytes
)
520 if (kvm_read(kd
, addr
, buf
, nbytes
) != nbytes
) {
527 struct SLTrack
*next
;
532 #define SLHMASK (SLHSIZE - 1)
534 struct SLTrack
*SLHash
[SLHSIZE
];
539 addsltrack(vm_page_t m
)
542 u_long addr
= (m
->pindex
* PAGE_SIZE
) & ~131071L;
545 if (m
->wire_count
== 0 || (m
->flags
& PG_MAPPED
) == 0 ||
549 i
= (addr
/ 131072) & SLHMASK
;
550 for (slt
= SLHash
[i
]; slt
; slt
= slt
->next
) {
551 if (slt
->addr
== addr
)
555 slt
= malloc(sizeof(*slt
));
557 slt
->next
= SLHash
[i
];
565 dumpsltrack(kvm_t
*kd
)
569 long total_zones
= 0;
572 for (i
= 0; i
< SLHSIZE
; ++i
) {
573 for (slt
= SLHash
[i
]; slt
; slt
= slt
->next
) {
576 if (kkread_err(kd
, slt
->addr
, &z
, sizeof(z
))) {
577 printf("SLZone 0x%016lx not mapped\n",
581 printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
594 printf("FullZones/TotalZones: %ld/%ld\n", full_zones
, total_zones
);
597 #define HASH_SIZE (1024*1024)
598 #define HASH_MASK (HASH_SIZE - 1)
601 struct dup_entry
*next
;
605 struct dup_entry
*dup_hash
[HASH_SIZE
];
608 unique_object(void *ptr
)
610 struct dup_entry
*hen
;
613 hv
= (intptr_t)ptr
^ ((intptr_t)ptr
>> 20);
615 for (hen
= dup_hash
[hv
]; hen
; hen
= hen
->next
) {
619 hen
= malloc(sizeof(*hen
));
620 hen
->next
= dup_hash
[hv
];