4 * cc -I/usr/src/sys vnodeinfo.c -o /usr/local/bin/vnodeinfo -lkvm
8 * Dump the mountlist and related vnodes.
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * $DragonFly: src/test/debug/vnodeinfo.c,v 1.13 2007/05/06 20:45:01 dillon Exp $
46 #define _KERNEL_STRUCTURES
47 #include <sys/param.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/namecache.h>
52 #include <sys/mount.h>
53 #include <sys/vnode.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
63 #include <vfs/ufs/quota.h>
64 #include <vfs/ufs/inode.h>
76 { "_vnode_free_list" },
80 static void kkread(kvm_t
*kd
, u_long addr
, void *buf
, size_t nbytes
);
81 static struct mount
*dumpmount(kvm_t
*kd
, struct mount
*mp
);
82 static struct vnode
*dumpvp(kvm_t
*kd
, struct vnode
*vp
, int whichlist
);
83 static void dumpbufs(kvm_t
*kd
, void *bufp
, const char *id
);
84 static void dumplocks(kvm_t
*kd
, struct lockf
*lockf
);
85 static void dumplockinfo(kvm_t
*kd
, struct lockf_range
*item
);
86 static int getobjpages(kvm_t
*kd
, struct vm_object
*obj
);
87 static int getobjvnpsize(kvm_t
*kd
, struct vm_object
*obj
);
94 main(int ac
, char **av
)
101 const char *corefile
= NULL
;
102 const char *sysfile
= NULL
;
104 while ((ch
= getopt(ac
, av
, "alnbM:N:")) != -1) {
127 fprintf(stderr
, "%s [-M core] [-N system]\n", av
[0]);
132 if ((kd
= kvm_open(sysfile
, corefile
, NULL
, O_RDONLY
, "kvm:")) == NULL
) {
136 if (kvm_nlist(kd
, Nl
) != 0) {
140 kkread(kd
, Nl
[0].n_value
, &mp
, sizeof(mp
));
142 mp
= dumpmount(kd
, mp
);
143 kkread(kd
, Nl
[1].n_value
, &vp
, sizeof(vp
));
144 printf("VNODEFREELIST {\n");
146 vp
= dumpvp(kd
, vp
, 0);
151 static struct mount
*
152 dumpmount(kvm_t
*kd
, struct mount
*mp
)
157 kkread(kd
, (u_long
)mp
, &mnt
, sizeof(mnt
));
158 printf("MOUNTPOINT %s on %s {\n",
159 mnt
.mnt_stat
.f_mntfromname
, mnt
.mnt_stat
.f_mntonname
);
160 printf(" lk_flags %08x share %d wait %d excl %d holder = %p\n",
161 mnt
.mnt_lock
.lk_flags
, mnt
.mnt_lock
.lk_sharecount
,
162 mnt
.mnt_lock
.lk_waitcount
, mnt
.mnt_lock
.lk_exclusivecount
,
163 mnt
.mnt_lock
.lk_lockholder
);
164 printf(" mnt_flag %08x mnt_kern_flag %08x\n",
165 mnt
.mnt_flag
, mnt
.mnt_kern_flag
);
166 printf(" mnt_nvnodelistsize %d\n", mnt
.mnt_nvnodelistsize
);
167 printf(" mnt_stat.f_fsid %08x %08x\n", mnt
.mnt_stat
.f_fsid
.val
[0],
168 mnt
.mnt_stat
.f_fsid
.val
[1]);
169 vp
= mnt
.mnt_nvnodelist
.tqh_first
;
171 vp
= dumpvp(kd
, vp
, 1);
175 return(mnt
.mnt_list
.tqe_next
);
179 vtype(enum vtype type
)
205 snprintf(buf
, sizeof(buf
), "%d", (int)type
);
209 static struct vnode
*
210 dumpvp(kvm_t
*kd
, struct vnode
*vp
, int whichlist
)
214 kkread(kd
, (u_long
)vp
, &vn
, sizeof(vn
));
216 printf(" vnode %p usecnt %08x holdcnt %d type=%s flags %08x",
217 vp
, vn
.v_sysref
.refcnt
, vn
.v_auxrefs
, vtype(vn
.v_type
), vn
.v_flag
);
219 if ((vn
.v_flag
& VOBJBUF
) && vn
.v_object
) {
220 int npages
= getobjpages(kd
, vn
.v_object
);
221 int vnpsize
= getobjvnpsize(kd
, vn
.v_object
);
222 if (npages
|| vnpsize
)
223 printf(" vmobjpgs=%d vnpsize=%d", npages
, vnpsize
);
226 if (vn
.v_flag
& VROOT
)
228 if (vn
.v_flag
& VTEXT
)
230 if (vn
.v_flag
& VSYSTEM
)
232 if (vn
.v_flag
& VISTTY
)
235 if (vn
.v_flag
& VXLOCK
)
237 if (vn
.v_flag
& VXWANT
)
241 if (vn
.v_flag
& VRECLAIMED
)
242 printf(" VRECLAIMED");
243 if (vn
.v_flag
& VINACTIVE
)
244 printf(" VINACTIVE");
246 if (vn
.v_flag
& VOBJBUF
)
248 if (vn
.v_flag
& VAGE
)
250 if (vn
.v_flag
& VOLOCK
)
252 if (vn
.v_flag
& VOWANT
)
255 if (vn
.v_flag
& VDOOMED
)
258 if (vn
.v_flag
& VFREE
)
260 if (vn
.v_flag
& VCACHED
)
263 if (vn
.v_flag
& VINFREE
)
266 if (vn
.v_flag
& VONWORKLST
)
267 printf(" VONWORKLST");
268 if (vn
.v_flag
& VMOUNT
)
270 if (vn
.v_flag
& VOBJDIRTY
)
271 printf(" VOBJDIRTY");
272 if (vn
.v_flag
& VMAYHAVELOCKS
)
273 printf(" VMAYHAVELOCKS");
277 if (vn
.v_lock
.lk_sharecount
|| vn
.v_lock
.lk_waitcount
||
278 vn
.v_lock
.lk_exclusivecount
|| vn
.v_lock
.lk_lockholder
!= LK_NOTHREAD
) {
279 printf("\tlk_flags %08x share %d wait %d excl %d holder = %p\n",
280 vn
.v_lock
.lk_flags
, vn
.v_lock
.lk_sharecount
,
281 vn
.v_lock
.lk_waitcount
, vn
.v_lock
.lk_exclusivecount
,
282 vn
.v_lock
.lk_lockholder
);
285 if (withnames
&& TAILQ_FIRST(&vn
.v_namecache
)) {
286 struct namecache ncp
;
290 kkread(kd
, (u_long
)TAILQ_FIRST(&vn
.v_namecache
), &ncp
, sizeof(ncp
));
291 if ((nlen
= ncp
.nc_nlen
) >= sizeof(buf
))
292 nlen
= sizeof(buf
) - 1;
296 kkread(kd
, (u_long
)ncp
.nc_name
, buf
, nlen
);
298 printf("\tfilename %s\n", buf
);
303 if (vn
.v_rbclean_tree
.rbh_root
) {
304 printf("\tCLEAN BUFFERS\n");
305 dumpbufs(kd
, vn
.v_rbclean_tree
.rbh_root
, "ROOT");
307 if (vn
.v_rbdirty_tree
.rbh_root
) {
308 printf("\tDIRTY BUFFERS\n");
309 dumpbufs(kd
, vn
.v_rbdirty_tree
.rbh_root
, "ROOT");
314 if (vn
.v_tag
== VT_UFS
&& vn
.v_data
) {
315 struct inode
*ip
= vn
.v_data
;
318 kkread(kd
, (u_long
)&ip
->i_lockf
, &lockf
, sizeof(lockf
));
319 dumplocks(kd
, &lockf
);
325 return(vn
.v_nmntvnodes
.tqe_next
);
327 return(vn
.v_freelist
.tqe_next
);
331 dumpbufs(kvm_t
*kd
, void *bufp
, const char *id
)
335 kkread(kd
, (u_long
)bufp
, &buf
, sizeof(buf
));
336 printf("\t %-8s %p loffset %012llx foffset %08llx",
338 buf
.b_bio1
.bio_offset
,
339 buf
.b_bio2
.bio_offset
);
340 printf(" q=%d lck=%d/%d flags=%08x dep=%p",
341 buf
.b_qindex
, buf
.b_lock
.lk_sharecount
,
342 buf
.b_lock
.lk_exclusivecount
,
343 buf
.b_flags
, buf
.b_dep
.lh_first
);
346 if (buf
.b_rbnode
.rbe_left
)
347 dumpbufs(kd
, buf
.b_rbnode
.rbe_left
, "LEFT");
348 if (buf
.b_rbnode
.rbe_right
)
349 dumpbufs(kd
, buf
.b_rbnode
.rbe_right
, "RIGHT");
353 dumplocks(kvm_t
*kd
, struct lockf
*lockf
)
355 struct lockf_range item
;
356 struct lockf_range
*scan
;
358 if ((scan
= TAILQ_FIRST(&lockf
->lf_range
)) != NULL
) {
361 kkread(kd
, (u_long
)scan
, &item
, sizeof(item
));
362 dumplockinfo(kd
, &item
);
363 } while ((scan
= TAILQ_NEXT(&item
, lf_link
)) != NULL
);
366 if ((scan
= TAILQ_FIRST(&lockf
->lf_blocked
)) != NULL
) {
369 kkread(kd
, (u_long
)scan
, &item
, sizeof(item
));
370 dumplockinfo(kd
, &item
);
371 } while ((scan
= TAILQ_NEXT(&item
, lf_link
)) != NULL
);
378 dumplockinfo(kvm_t
*kd
, struct lockf_range
*item
)
382 if (item
->lf_owner
&& (item
->lf_flags
& F_POSIX
)) {
383 kkread(kd
, (u_long
)&item
->lf_owner
->p_pid
,
384 &ownerpid
, sizeof(ownerpid
));
389 printf("\t ty=%d flgs=%04x %lld-%lld owner=%d\n",
390 item
->lf_type
, item
->lf_flags
,
391 item
->lf_start
, item
->lf_end
,
398 getobjpages(kvm_t
*kd
, struct vm_object
*obj
)
400 struct vm_object vmobj
;
402 kkread(kd
, (u_long
)obj
, &vmobj
, sizeof(vmobj
));
403 return(vmobj
.resident_page_count
);
408 getobjvnpsize(kvm_t
*kd
, struct vm_object
*obj
)
410 struct vm_object vmobj
;
412 kkread(kd
, (u_long
)obj
, &vmobj
, sizeof(vmobj
));
413 return ((int)vmobj
.size
);
417 kkread(kvm_t
*kd
, u_long addr
, void *buf
, size_t nbytes
)
419 if (kvm_read(kd
, addr
, buf
, nbytes
) != nbytes
) {