IMPORT openssh-9.8p1
[dragonfly.git] / test / debug / vmpageinfo.c
blobe4619687cbbbbb8c90e173dbf37e8e4b054cb587
1 /*
2 * VMPAGEINFO.C
4 * cc -I/usr/src/sys vmpageinfo.c -o ~/bin/vmpageinfo -lkvm
6 * vmpageinfo
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
25 * distribution.
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
44 #define _KERNEL_STRUCTURES_
45 #include <sys/param.h>
46 #include <sys/user.h>
47 #include <sys/buf.h>
48 #include <sys/malloc.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/namecache.h>
52 #include <sys/slaballoc.h>
54 #include <vm/vm.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/swap_pager.h>
59 #include <vm/vnode_pager.h>
61 #include <stdio.h>
62 #include <stdlib.h>
63 #include <string.h>
64 #include <fcntl.h>
65 #include <kvm.h>
66 #include <nlist.h>
67 #include <getopt.h>
69 struct nlist Nl[] = {
70 { "_vm_page_array" },
71 { "_vm_page_array_size" },
72 { "_kernel_object" },
73 { "_nbuf" },
74 { "_nswbuf_mem" },
75 { "_nswbuf_kva" },
76 { "_nswbuf_raw" },
77 { "_kernbase" },
78 { "__end" },
79 { NULL }
82 int debugopt;
83 int verboseopt;
84 #if 0
85 struct vm_page **vm_page_buckets;
86 int vm_page_hash_mask;
87 #endif
88 struct vm_page *vm_page_array;
89 struct vm_object *kernel_object_ptr;
90 int vm_page_array_size;
91 long nbuf;
92 long nswbuf_mem;
93 long nswbuf_kva;
94 long nswbuf_raw;
95 long kern_size;
97 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
98 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
99 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
100 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
102 #if 0
103 static void addsltrack(vm_page_t m);
104 static void dumpsltrack(kvm_t *kd);
105 #endif
106 static int unique_object(void *ptr);
108 long count_free;
109 long count_wired; /* total */
110 long count_wired_vnode;
111 long count_wired_anon;
112 long count_wired_in_pmap;
113 long count_wired_pgtable;
114 long count_wired_other;
115 long count_wired_kernel;
116 long count_wired_obj_other;
118 long count_anon;
119 long count_anon_in_pmap;
120 long count_vnode;
121 long count_device;
122 long count_phys;
123 long count_kernel;
124 long count_unknown;
125 long count_noobj_offqueue;
126 long count_noobj_onqueue;
129 main(int ac, char **av)
131 const char *corefile = NULL;
132 const char *sysfile = NULL;
133 struct vm_page m;
134 struct vm_object obj;
135 kvm_t *kd;
136 int ch;
137 #if 0
138 vm_page_t mptr;
139 int hv;
140 #endif
141 int i;
142 const char *qstr;
143 const char *ostr;
145 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
146 switch(ch) {
147 case 'd':
148 ++debugopt;
149 break;
150 case 'v':
151 ++verboseopt;
152 break;
153 case 'M':
154 corefile = optarg;
155 break;
156 case 'N':
157 sysfile = optarg;
158 break;
159 default:
160 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
161 exit(1);
164 ac -= optind;
165 av += optind;
167 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
168 perror("kvm_open");
169 exit(1);
171 if (kvm_nlist(kd, Nl) != 0) {
172 perror("kvm_nlist");
173 exit(1);
176 kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
177 kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
178 kernel_object_ptr = (void *)Nl[2].n_value;
179 kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
180 kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
181 kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
182 kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
183 kern_size = Nl[8].n_value - Nl[7].n_value;
186 * Scan the vm_page_array validating all pages with associated objects
188 for (i = 0; i < vm_page_array_size; ++i) {
189 if (debugopt) {
190 printf("page %d\r", i);
191 fflush(stdout);
193 kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
194 if (m.object) {
195 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
196 checkpage(kd, &vm_page_array[i], &m, &obj);
198 if (m.queue >= PQ_HOLD) {
199 qstr = "HOLD";
200 } else if (m.queue >= PQ_CACHE) {
201 qstr = "CACHE";
202 } else if (m.queue >= PQ_ACTIVE) {
203 qstr = "ACTIVE";
204 } else if (m.queue >= PQ_INACTIVE) {
205 qstr = "INACTIVE";
206 } else if (m.queue >= PQ_FREE) {
207 qstr = "FREE";
208 ++count_free;
209 } else {
210 qstr = "NONE";
212 if (m.wire_count) {
213 ++count_wired;
214 if (m.object == NULL) {
215 if ((m.flags & PG_MAPPED) &&
216 (m.flags & PG_WRITEABLE) &&
217 (m.flags & PG_UNQUEUED)) {
218 ++count_wired_pgtable;
219 } else {
220 ++count_wired_other;
222 } else if (m.object == kernel_object_ptr) {
223 ++count_wired_kernel;
224 } else {
225 switch(obj.type) {
226 case OBJT_VNODE:
227 ++count_wired_vnode;
228 break;
229 case OBJT_DEFAULT:
230 case OBJT_SWAP:
232 if (m.md.pmap_count)
233 ++count_wired_in_pmap;
234 else
236 ++count_wired_anon;
237 break;
238 default:
239 ++count_wired_obj_other;
240 break;
243 } else
244 #if 0
245 if (m.md.pmap_count) {
246 if (m.object && m.object != kernel_object_ptr) {
247 switch(obj.type) {
248 case OBJT_DEFAULT:
249 case OBJT_SWAP:
250 ++count_anon_in_pmap;
251 break;
252 default:
253 break;
257 #endif
259 if (verboseopt) {
260 printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
261 "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
262 &vm_page_array[i],
263 m.object,
264 (intmax_t)m.pindex,
265 (intmax_t)m.pindex * PAGE_SIZE,
266 m.valid,
267 m.dirty,
268 m.hold_count,
269 m.wire_count,
270 m.act_count,
271 m.busy_count,
272 0 /* m.md.writeable_count */,
273 0 /*m.md.pmap_count*/,
274 qstr
278 if (m.object == kernel_object_ptr) {
279 ostr = "kernel";
280 if (unique_object(m.object))
281 count_kernel += obj.resident_page_count;
282 } else if (m.object) {
283 switch(obj.type) {
284 case OBJT_DEFAULT:
285 ostr = "default";
286 if (unique_object(m.object))
287 count_anon += obj.resident_page_count;
288 break;
289 case OBJT_SWAP:
290 ostr = "swap";
291 if (unique_object(m.object))
292 count_anon += obj.resident_page_count;
293 break;
294 case OBJT_VNODE:
295 ostr = "vnode";
296 if (unique_object(m.object))
297 count_vnode += obj.resident_page_count;
298 break;
299 case OBJT_DEVICE:
300 ostr = "device";
301 if (unique_object(m.object))
302 count_device += obj.resident_page_count;
303 break;
304 case OBJT_PHYS:
305 ostr = "phys";
306 if (unique_object(m.object))
307 count_phys += obj.resident_page_count;
308 break;
309 case OBJT_DEAD:
310 ostr = "dead";
311 if (unique_object(m.object))
312 count_unknown += obj.resident_page_count;
313 break;
314 default:
315 if (unique_object(m.object))
316 count_unknown += obj.resident_page_count;
317 ostr = "unknown";
318 break;
320 } else {
321 ostr = "-";
322 if (m.queue == PQ_NONE)
323 ++count_noobj_offqueue;
324 else if (m.queue - m.pc != PQ_FREE)
325 ++count_noobj_onqueue;
328 if (verboseopt) {
329 printf(" %-7s", ostr);
330 if (m.busy_count & PBUSY_LOCKED)
331 printf(" BUSY");
332 if (m.busy_count & PBUSY_WANTED)
333 printf(" WANTED");
334 if (m.flags & PG_WINATCFLS)
335 printf(" WINATCFLS");
336 if (m.flags & PG_FICTITIOUS)
337 printf(" FICTITIOUS");
338 if (m.flags & PG_WRITEABLE)
339 printf(" WRITEABLE");
340 if (m.flags & PG_MAPPED)
341 printf(" MAPPED");
342 if (m.flags & PG_NEED_COMMIT)
343 printf(" NEED_COMMIT");
344 if (m.flags & PG_REFERENCED)
345 printf(" REFERENCED");
346 if (m.flags & PG_CLEANCHK)
347 printf(" CLEANCHK");
348 if (m.busy_count & PBUSY_SWAPINPROG)
349 printf(" SWAPINPROG");
350 if (m.flags & PG_NOSYNC)
351 printf(" NOSYNC");
352 if (m.flags & PG_UNQUEUED)
353 printf(" UNQUEUED");
354 if (m.flags & PG_MARKER)
355 printf(" MARKER");
356 if (m.flags & PG_RAM)
357 printf(" RAM");
358 if (m.flags & PG_SWAPPED)
359 printf(" SWAPPED");
360 #if 0
361 if (m.flags & PG_SLAB)
362 printf(" SLAB");
363 #endif
364 printf("\n");
365 #if 0
366 if (m.flags & PG_SLAB)
367 addsltrack(&m);
368 #endif
371 if (debugopt || verboseopt)
372 printf("\n");
373 printf("%8.2fM free\n", count_free * 4096.0 / 1048576.0);
375 printf("%8.2fM wired vnode (in buffer cache)\n",
376 count_wired_vnode * 4096.0 / 1048576.0);
377 printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
378 count_wired_in_pmap * 4096.0 / 1048576.0);
379 printf("%8.2fM wired pgtable\n",
380 count_wired_pgtable * 4096.0 / 1048576.0);
381 printf("%8.2fM wired anon\n",
382 count_wired_anon * 4096.0 / 1048576.0);
383 printf("%8.2fM wired kernel_object\n",
384 count_wired_kernel * 4096.0 / 1048576.0);
386 printf("\t%8.2fM vm_page_array\n",
387 vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
388 printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
389 (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
390 sizeof(struct buf) / 1048576.0);
391 printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
392 printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
394 printf("%8.2fM wired other (unknown object)\n",
395 count_wired_obj_other * 4096.0 / 1048576.0);
396 printf("%8.2fM wired other (no object, probably kernel)\n",
397 count_wired_other * 4096.0 / 1048576.0);
399 printf("%8.2fM WIRED TOTAL\n",
400 count_wired * 4096.0 / 1048576.0);
402 printf("\n");
403 printf("%8.2fM anonymous (total, includes in-pmap)\n",
404 count_anon * 4096.0 / 1048576.0);
405 printf("%8.2fM anonymous memory in-pmap\n",
406 count_anon_in_pmap * 4096.0 / 1048576.0);
407 printf("%8.2fM vnode (includes wired)\n",
408 count_vnode * 4096.0 / 1048576.0);
409 printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
410 printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
411 printf("%8.2fM kernel (includes wired)\n",
412 count_kernel * 4096.0 / 1048576.0);
413 printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
414 printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
415 count_noobj_offqueue * 4096.0 / 1048576.0);
416 printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
417 count_noobj_onqueue * 4096.0 / 1048576.0);
419 #if 0
421 * Scan the vm_page_buckets array validating all pages found
423 for (i = 0; i <= vm_page_hash_mask; ++i) {
424 if (debugopt) {
425 printf("index %d\r", i);
426 fflush(stdout);
428 kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
429 while (mptr) {
430 kkread(kd, (u_long)mptr, &m, sizeof(m));
431 if (m.object) {
432 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
433 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
434 hv &= vm_page_hash_mask;
435 if (i != hv)
436 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
437 " should be in bucket %d\n", i, mptr, hv);
438 checkpage(kd, mptr, &m, &obj);
439 } else {
440 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
441 " has no object\n", i, mptr);
443 mptr = m.hnext;
446 #endif
447 if (debugopt)
448 printf("\n");
449 #if 0
450 dumpsltrack(kd);
451 #endif
452 return(0);
456 * A page with an object.
458 void
459 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
461 #if 0
462 struct vm_page scan;
463 vm_page_t scanptr;
464 int hv;
466 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
467 hv &= vm_page_hash_mask;
468 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
469 while (scanptr) {
470 if (scanptr == mptr)
471 break;
472 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
473 scanptr = scan.hnext;
475 if (scanptr) {
476 if (debugopt > 1)
477 printf("good checkpage %p bucket %d\n", mptr, hv);
478 } else {
479 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
480 " page not found in bucket list\n", hv, mptr);
482 #endif
486 * Acclerate the reading of VM pages
488 static void
489 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
491 static struct vm_page vpcache[1024];
492 static u_long vpbeg;
493 static u_long vpend;
495 if (addr < vpbeg || addr >= vpend) {
496 vpbeg = addr;
497 vpend = addr + 1024 * sizeof(*m);
498 if (vpend > (u_long)(uintptr_t)vm_page_array +
499 vm_page_array_size * sizeof(*m)) {
500 vpend = (u_long)(uintptr_t)vm_page_array +
501 vm_page_array_size * sizeof(*m);
503 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
505 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
508 static void
509 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
511 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
512 perror("kvm_read");
513 exit(1);
517 static int
518 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
520 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
521 return 1;
523 return 0;
526 struct SLTrack {
527 struct SLTrack *next;
528 u_long addr;
531 #define SLHSIZE 1024
532 #define SLHMASK (SLHSIZE - 1)
534 struct SLTrack *SLHash[SLHSIZE];
536 #if 0
537 static
538 void
539 addsltrack(vm_page_t m)
541 struct SLTrack *slt;
542 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
543 int i;
545 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
546 m->object == NULL)
547 return;
549 i = (addr / 131072) & SLHMASK;
550 for (slt = SLHash[i]; slt; slt = slt->next) {
551 if (slt->addr == addr)
552 break;
554 if (slt == NULL) {
555 slt = malloc(sizeof(*slt));
556 slt->addr = addr;
557 slt->next = SLHash[i];
558 SLHash[i] = slt;
561 #endif
563 static
564 void
565 dumpsltrack(kvm_t *kd)
567 struct SLTrack *slt;
568 int i;
569 long total_zones = 0;
570 long full_zones = 0;
572 for (i = 0; i < SLHSIZE; ++i) {
573 for (slt = SLHash[i]; slt; slt = slt->next) {
574 SLZone z;
576 if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
577 printf("SLZone 0x%016lx not mapped\n",
578 slt->addr);
579 continue;
581 printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
582 "chunksz=%-5d }\n",
583 slt->addr,
584 z.z_Magic,
585 z.z_Cpu,
586 z.z_NFree,
587 z.z_ChunkSize
589 ++total_zones;
590 if (z.z_NFree == 0)
591 ++full_zones;
594 printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
597 #define HASH_SIZE (1024*1024)
598 #define HASH_MASK (HASH_SIZE - 1)
600 struct dup_entry {
601 struct dup_entry *next;
602 void *ptr;
605 struct dup_entry *dup_hash[HASH_SIZE];
607 static int
608 unique_object(void *ptr)
610 struct dup_entry *hen;
611 int hv;
613 hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
614 hv &= HASH_MASK;
615 for (hen = dup_hash[hv]; hen; hen = hen->next) {
616 if (hen->ptr == ptr)
617 return 0;
619 hen = malloc(sizeof(*hen));
620 hen->next = dup_hash[hv];
621 hen->ptr = ptr;
622 dup_hash[hv] = hen;
624 return 1;