kernel - Refactor struct vmstats and vm_zone
[dragonfly.git] / test / debug / vmpageinfo.c
blobeaa38acdc5f4b58ad4ee64d2f8c4e6c6d1990d11
1 /*
2 * VMPAGEINFO.C
4 * cc -I/usr/src/sys vmpageinfo.c -o /usr/local/bin/vmpageinfo -lkvm
6 * vmpageinfo
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
25 * distribution.
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
43 * $DragonFly: src/test/debug/vmpageinfo.c,v 1.2 2006/05/23 01:00:05 dillon Exp $
46 #define _KERNEL_STRUCTURES_
47 #include <sys/param.h>
48 #include <sys/user.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/vnode.h>
52 #include <sys/namecache.h>
53 #include <sys/slaballoc.h>
55 #include <vm/vm.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <fcntl.h>
67 #include <kvm.h>
68 #include <nlist.h>
69 #include <getopt.h>
71 struct nlist Nl[] = {
72 #if 0
73 { "_vm_page_buckets" },
74 { "_vm_page_hash_mask" },
75 #endif
76 { "_vm_page_array" },
77 { "_vm_page_array_size" },
78 { NULL }
81 int debugopt;
82 int verboseopt;
83 #if 0
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
86 #endif
87 struct vm_page *vm_page_array;
88 int vm_page_array_size;
90 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
91 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
92 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
93 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
95 #if 0
96 static void addsltrack(vm_page_t m);
97 #endif
98 static void dumpsltrack(kvm_t *kd);
101 main(int ac, char **av)
103 const char *corefile = NULL;
104 const char *sysfile = NULL;
105 struct vm_page m;
106 struct vm_object obj;
107 kvm_t *kd;
108 int ch;
109 #if 0
110 vm_page_t mptr;
111 int hv;
112 #endif
113 int i;
114 const char *qstr;
115 const char *ostr;
117 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
118 switch(ch) {
119 case 'd':
120 ++debugopt;
121 break;
122 case 'v':
123 ++verboseopt;
124 break;
125 case 'M':
126 corefile = optarg;
127 break;
128 case 'N':
129 sysfile = optarg;
130 break;
131 default:
132 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
133 exit(1);
136 ac -= optind;
137 av += optind;
139 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
140 perror("kvm_open");
141 exit(1);
143 if (kvm_nlist(kd, Nl) != 0) {
144 perror("kvm_nlist");
145 exit(1);
148 #if 0
149 kkread(kd, Nl[0].n_value, &vm_page_buckets, sizeof(vm_page_buckets));
150 kkread(kd, Nl[1].n_value, &vm_page_hash_mask, sizeof(vm_page_hash_mask));
151 #endif
152 kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
153 kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
156 * Scan the vm_page_array validating all pages with associated objects
158 for (i = 0; i < vm_page_array_size; ++i) {
159 if (debugopt) {
160 printf("page %d\r", i);
161 fflush(stdout);
163 kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
164 if (m.object) {
165 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
166 checkpage(kd, &vm_page_array[i], &m, &obj);
168 if (verboseopt) {
169 if (m.queue >= PQ_HOLD) {
170 qstr = "HOLD";
171 } else if (m.queue >= PQ_CACHE) {
172 qstr = "CACHE";
173 } else if (m.queue >= PQ_ACTIVE) {
174 qstr = "ACTIVE";
175 } else if (m.queue >= PQ_INACTIVE) {
176 qstr = "INACTIVE";
177 } else if (m.queue >= PQ_FREE) {
178 qstr = "FREE";
179 } else {
180 qstr = "NONE";
182 printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
183 "wire=%-2d act=%-3d busy=%d %8s",
184 &vm_page_array[i],
185 m.object,
186 (intmax_t)m.pindex,
187 (intmax_t)m.pindex * PAGE_SIZE,
188 m.valid,
189 m.dirty,
190 m.hold_count,
191 m.wire_count,
192 m.act_count,
193 m.busy,
194 qstr
196 if (m.object) {
197 switch(obj.type) {
198 case OBJT_DEFAULT:
199 ostr = "default";
200 break;
201 case OBJT_SWAP:
202 ostr = "swap";
203 break;
204 case OBJT_VNODE:
205 ostr = "vnode";
206 break;
207 case OBJT_DEVICE:
208 ostr = "device";
209 break;
210 case OBJT_PHYS:
211 ostr = "phys";
212 break;
213 case OBJT_DEAD:
214 ostr = "dead";
215 break;
216 default:
217 ostr = "unknown";
218 break;
220 } else {
221 ostr = "-";
223 printf(" %-7s", ostr);
224 if (m.flags & PG_BUSY)
225 printf(" BUSY");
226 if (m.flags & PG_WANTED)
227 printf(" WANTED");
228 if (m.flags & PG_WINATCFLS)
229 printf(" WINATCFLS");
230 if (m.flags & PG_FICTITIOUS)
231 printf(" FICTITIOUS");
232 if (m.flags & PG_WRITEABLE)
233 printf(" WRITEABLE");
234 if (m.flags & PG_MAPPED)
235 printf(" MAPPED");
236 if (m.flags & PG_NEED_COMMIT)
237 printf(" NEED_COMMIT");
238 if (m.flags & PG_ZERO)
239 printf(" ZERO");
240 if (m.flags & PG_REFERENCED)
241 printf(" REFERENCED");
242 if (m.flags & PG_CLEANCHK)
243 printf(" CLEANCHK");
244 if (m.flags & PG_SWAPINPROG)
245 printf(" SWAPINPROG");
246 if (m.flags & PG_NOSYNC)
247 printf(" NOSYNC");
248 if (m.flags & PG_UNMANAGED)
249 printf(" UNMANAGED");
250 if (m.flags & PG_MARKER)
251 printf(" MARKER");
252 if (m.flags & PG_RAM)
253 printf(" RAM");
254 if (m.flags & PG_SWAPPED)
255 printf(" SWAPPED");
256 #if 0
257 if (m.flags & PG_SLAB)
258 printf(" SLAB");
259 #endif
260 printf("\n");
261 #if 0
262 if (m.flags & PG_SLAB)
263 addsltrack(&m);
264 #endif
267 if (debugopt || verboseopt)
268 printf("\n");
270 #if 0
272 * Scan the vm_page_buckets array validating all pages found
274 for (i = 0; i <= vm_page_hash_mask; ++i) {
275 if (debugopt) {
276 printf("index %d\r", i);
277 fflush(stdout);
279 kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
280 while (mptr) {
281 kkread(kd, (u_long)mptr, &m, sizeof(m));
282 if (m.object) {
283 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
284 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
285 hv &= vm_page_hash_mask;
286 if (i != hv)
287 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
288 " should be in bucket %d\n", i, mptr, hv);
289 checkpage(kd, mptr, &m, &obj);
290 } else {
291 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
292 " has no object\n", i, mptr);
294 mptr = m.hnext;
297 #endif
298 if (debugopt)
299 printf("\n");
300 dumpsltrack(kd);
301 return(0);
305 * A page with an object.
307 void
308 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
310 #if 0
311 struct vm_page scan;
312 vm_page_t scanptr;
313 int hv;
315 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
316 hv &= vm_page_hash_mask;
317 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
318 while (scanptr) {
319 if (scanptr == mptr)
320 break;
321 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
322 scanptr = scan.hnext;
324 if (scanptr) {
325 if (debugopt > 1)
326 printf("good checkpage %p bucket %d\n", mptr, hv);
327 } else {
328 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
329 " page not found in bucket list\n", hv, mptr);
331 #endif
335 * Acclerate the reading of VM pages
337 static void
338 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
340 static struct vm_page vpcache[1024];
341 static u_long vpbeg;
342 static u_long vpend;
344 if (addr < vpbeg || addr >= vpend) {
345 vpbeg = addr;
346 vpend = addr + 1024 * sizeof(*m);
347 if (vpend > (u_long)(uintptr_t)vm_page_array +
348 vm_page_array_size * sizeof(*m)) {
349 vpend = (u_long)(uintptr_t)vm_page_array +
350 vm_page_array_size * sizeof(*m);
352 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
354 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
357 static void
358 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
360 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
361 perror("kvm_read");
362 exit(1);
366 static int
367 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
369 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
370 return 1;
372 return 0;
375 struct SLTrack {
376 struct SLTrack *next;
377 u_long addr;
380 #define SLHSIZE 1024
381 #define SLHMASK (SLHSIZE - 1)
383 struct SLTrack *SLHash[SLHSIZE];
385 #if 0
386 static
387 void
388 addsltrack(vm_page_t m)
390 struct SLTrack *slt;
391 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
392 int i;
394 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
395 m->object == NULL)
396 return;
398 i = (addr / 131072) & SLHMASK;
399 for (slt = SLHash[i]; slt; slt = slt->next) {
400 if (slt->addr == addr)
401 break;
403 if (slt == NULL) {
404 slt = malloc(sizeof(*slt));
405 slt->addr = addr;
406 slt->next = SLHash[i];
407 SLHash[i] = slt;
410 #endif
412 static
413 void
414 dumpsltrack(kvm_t *kd)
416 struct SLTrack *slt;
417 int i;
418 long total_zones = 0;
419 long full_zones = 0;
421 for (i = 0; i < SLHSIZE; ++i) {
422 for (slt = SLHash[i]; slt; slt = slt->next) {
423 SLZone z;
425 if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
426 printf("SLZone 0x%016lx not mapped\n",
427 slt->addr);
428 continue;
430 printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
431 "chunksz=%-5d }\n",
432 slt->addr,
433 z.z_Magic,
434 z.z_Cpu,
435 z.z_NFree,
436 z.z_ChunkSize
438 ++total_zones;
439 if (z.z_NFree == 0)
440 ++full_zones;
443 printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);