8672 proc_t changes broke genunix dmods and walker
[unleashed.git] / usr / src / cmd / mdb / common / modules / mdb_ks / mdb_ks.c
bloba801d4d39f12742e0f22bec9ad19c2ae56f82fb0
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2017 Joyent, Inc.
27 * Mdb kernel support module. This module is loaded automatically when the
28 * kvm target is initialized. Any global functions declared here are exported
29 * for the resolution of symbols in subsequently loaded modules.
31 * WARNING: Do not assume that static variables in mdb_ks will be initialized
32 * to zero.
35 #include <mdb/mdb_target.h>
36 #include <mdb/mdb_param.h>
37 #include <mdb/mdb_modapi.h>
38 #include <mdb/mdb_ks.h>
40 #include <sys/types.h>
41 #include <sys/procfs.h>
42 #include <sys/proc.h>
43 #include <sys/dnlc.h>
44 #include <sys/autoconf.h>
45 #include <sys/machelf.h>
46 #include <sys/modctl.h>
47 #include <sys/hwconf.h>
48 #include <sys/kobj.h>
49 #include <sys/fs/autofs.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/refstr_impl.h>
52 #include <sys/cpuvar.h>
53 #include <sys/dlpi.h>
54 #include <sys/clock_impl.h>
55 #include <sys/swap.h>
56 #include <errno.h>
58 #include <vm/seg_vn.h>
59 #include <vm/page.h>
61 #define MDB_PATH_NELEM 256 /* Maximum path components */
63 typedef struct mdb_path {
64 size_t mdp_nelem; /* Number of components */
65 uint_t mdp_complete; /* Path completely resolved? */
66 uintptr_t mdp_vnode[MDB_PATH_NELEM]; /* Array of vnode_t addresses */
67 char *mdp_name[MDB_PATH_NELEM]; /* Array of name components */
68 } mdb_path_t;
70 static int mdb_autonode2path(uintptr_t, mdb_path_t *);
71 static int mdb_sprintpath(char *, size_t, mdb_path_t *);
74 * Kernel parameters from <sys/param.h> which we keep in-core:
76 unsigned long _mdb_ks_pagesize;
77 unsigned int _mdb_ks_pageshift;
78 unsigned long _mdb_ks_pageoffset;
79 unsigned long long _mdb_ks_pagemask;
80 unsigned long _mdb_ks_mmu_pagesize;
81 unsigned int _mdb_ks_mmu_pageshift;
82 unsigned long _mdb_ks_mmu_pageoffset;
83 unsigned long _mdb_ks_mmu_pagemask;
84 uintptr_t _mdb_ks_kernelbase;
85 uintptr_t _mdb_ks_userlimit;
86 uintptr_t _mdb_ks_userlimit32;
87 uintptr_t _mdb_ks_argsbase;
88 unsigned long _mdb_ks_msg_bsize;
89 unsigned long _mdb_ks_defaultstksz;
90 int _mdb_ks_ncpu;
91 int _mdb_ks_ncpu_log2;
92 int _mdb_ks_ncpu_p2;
95 * In-core copy of DNLC information:
97 #define MDB_DNLC_HSIZE 1024
98 #define MDB_DNLC_HASH(vp) (((uintptr_t)(vp) >> 3) & (MDB_DNLC_HSIZE - 1))
99 #define MDB_DNLC_NCACHE_SZ(ncp) (sizeof (ncache_t) + (ncp)->namlen)
100 #define MDB_DNLC_MAX_RETRY 4
102 static ncache_t **dnlc_hash; /* mdbs hash array of dnlc entries */
105 * copy of page_hash-related data
107 static int page_hash_loaded;
108 static long mdb_page_hashsz;
109 static uint_t mdb_page_hashsz_shift; /* Needed for PAGE_HASH_FUNC */
110 static uintptr_t mdb_page_hash; /* base address of page hash */
111 #define page_hashsz mdb_page_hashsz
112 #define page_hashsz_shift mdb_page_hashsz_shift
115 * This will be the location of the vnodeops pointer for "autofs_vnodeops"
116 * The pointer still needs to be read with mdb_vread() to get the location
117 * of the vnodeops structure for autofs.
119 static struct vnodeops *autofs_vnops_ptr;
122 * STREAMS queue registrations:
124 typedef struct mdb_qinfo {
125 const mdb_qops_t *qi_ops; /* Address of ops vector */
126 uintptr_t qi_addr; /* Address of qinit structure (key) */
127 struct mdb_qinfo *qi_next; /* Next qinfo in list */
128 } mdb_qinfo_t;
130 static mdb_qinfo_t *qi_head; /* Head of qinfo chain */
133 * Device naming callback structure:
135 typedef struct nm_query {
136 const char *nm_name; /* Device driver name [in/out] */
137 major_t nm_major; /* Device major number [in/out] */
138 ushort_t nm_found; /* Did we find a match? [out] */
139 } nm_query_t;
142 * Address-to-modctl callback structure:
144 typedef struct a2m_query {
145 uintptr_t a2m_addr; /* Virtual address [in] */
146 uintptr_t a2m_where; /* Modctl address [out] */
147 } a2m_query_t;
150 * Segment-to-mdb_map callback structure:
152 typedef struct {
153 struct seg_ops *asm_segvn_ops; /* Address of segvn ops [in] */
154 void (*asm_callback)(const struct mdb_map *, void *); /* Callb [in] */
155 void *asm_cbdata; /* Callback data [in] */
156 } asmap_arg_t;
158 static void
159 dnlc_free(void)
161 ncache_t *ncp, *next;
162 int i;
164 if (dnlc_hash == NULL) {
165 return;
169 * Free up current dnlc entries
171 for (i = 0; i < MDB_DNLC_HSIZE; i++) {
172 for (ncp = dnlc_hash[i]; ncp; ncp = next) {
173 next = ncp->hash_next;
174 mdb_free(ncp, MDB_DNLC_NCACHE_SZ(ncp));
177 mdb_free(dnlc_hash, MDB_DNLC_HSIZE * sizeof (ncache_t *));
178 dnlc_hash = NULL;
181 char bad_dnlc[] = "inconsistent dnlc chain: %d, ncache va: %p"
182 " - continuing with the rest\n";
184 static int
185 dnlc_load(void)
187 int i; /* hash index */
188 int retry_cnt = 0;
189 int skip_bad_chains = 0;
190 int nc_hashsz; /* kernel hash array size */
191 uintptr_t nc_hash_addr; /* kernel va of ncache hash array */
192 uintptr_t head; /* kernel va of head of hash chain */
195 * If we've already cached the DNLC and we're looking at a dump,
196 * our cache is good forever, so don't bother re-loading.
198 if (dnlc_hash && mdb_prop_postmortem) {
199 return (0);
203 * For a core dump, retries wont help.
204 * Just print and skip any bad chains.
206 if (mdb_prop_postmortem) {
207 skip_bad_chains = 1;
209 retry:
210 if (retry_cnt++ >= MDB_DNLC_MAX_RETRY) {
212 * Give up retrying the rapidly changing dnlc.
213 * Just print and skip any bad chains
215 skip_bad_chains = 1;
218 dnlc_free(); /* Free up the mdb hashed dnlc - if any */
221 * Although nc_hashsz and the location of nc_hash doesn't currently
222 * change, it may do in the future with a more dynamic dnlc.
223 * So always read these values afresh.
225 if (mdb_readvar(&nc_hashsz, "nc_hashsz") == -1) {
226 mdb_warn("failed to read nc_hashsz");
227 return (-1);
229 if (mdb_readvar(&nc_hash_addr, "nc_hash") == -1) {
230 mdb_warn("failed to read nc_hash");
231 return (-1);
235 * Allocate the mdb dnlc hash array
237 dnlc_hash = mdb_zalloc(MDB_DNLC_HSIZE * sizeof (ncache_t *), UM_SLEEP);
239 /* for each kernel hash chain */
240 for (i = 0, head = nc_hash_addr; i < nc_hashsz;
241 i++, head += sizeof (nc_hash_t)) {
242 nc_hash_t nch; /* kernel hash chain header */
243 ncache_t *ncp; /* name cache pointer */
244 int hash; /* mdb hash value */
245 uintptr_t nc_va; /* kernel va of next ncache */
246 uintptr_t ncprev_va; /* kernel va of previous ncache */
247 int khash; /* kernel dnlc hash value */
248 uchar_t namelen; /* name length */
249 ncache_t nc; /* name cache entry */
250 int nc_size; /* size of a name cache entry */
253 * We read each element of the nc_hash array individually
254 * just before we process the entries in its chain. This is
255 * because the chain can change so rapidly on a running system.
257 if (mdb_vread(&nch, sizeof (nc_hash_t), head) == -1) {
258 mdb_warn("failed to read nc_hash chain header %d", i);
259 dnlc_free();
260 return (-1);
263 ncprev_va = head;
264 nc_va = (uintptr_t)(nch.hash_next);
265 /* for each entry in the chain */
266 while (nc_va != head) {
268 * The size of the ncache entries varies
269 * because the name is appended to the structure.
270 * So we read in the structure then re-read
271 * for the structure plus name.
273 if (mdb_vread(&nc, sizeof (ncache_t), nc_va) == -1) {
274 if (skip_bad_chains) {
275 mdb_warn(bad_dnlc, i, nc_va);
276 break;
278 goto retry;
280 nc_size = MDB_DNLC_NCACHE_SZ(&nc);
281 ncp = mdb_alloc(nc_size, UM_SLEEP);
282 if (mdb_vread(ncp, nc_size - 1, nc_va) == -1) {
283 mdb_free(ncp, nc_size);
284 if (skip_bad_chains) {
285 mdb_warn(bad_dnlc, i, nc_va);
286 break;
288 goto retry;
292 * Check for chain consistency
294 if ((uintptr_t)ncp->hash_prev != ncprev_va) {
295 mdb_free(ncp, nc_size);
296 if (skip_bad_chains) {
297 mdb_warn(bad_dnlc, i, nc_va);
298 break;
300 goto retry;
303 * Terminate the new name with a null.
304 * Note, we allowed space for this null when
305 * allocating space for the entry.
307 ncp->name[ncp->namlen] = '\0';
310 * Validate new entry by re-hashing using the
311 * kernel dnlc hash function and comparing the hash
313 DNLCHASH(ncp->name, ncp->dp, khash, namelen);
314 if ((namelen != ncp->namlen) ||
315 (khash != ncp->hash)) {
316 mdb_free(ncp, nc_size);
317 if (skip_bad_chains) {
318 mdb_warn(bad_dnlc, i, nc_va);
319 break;
321 goto retry;
325 * Finally put the validated entry into the mdb
326 * hash chains. Reuse the kernel next hash field
327 * for the mdb hash chain pointer.
329 hash = MDB_DNLC_HASH(ncp->vp);
330 ncprev_va = nc_va;
331 nc_va = (uintptr_t)(ncp->hash_next);
332 ncp->hash_next = dnlc_hash[hash];
333 dnlc_hash[hash] = ncp;
336 return (0);
339 /*ARGSUSED*/
341 dnlcdump(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
343 ncache_t *ent;
344 int i;
346 if ((flags & DCMD_ADDRSPEC) || argc != 0)
347 return (DCMD_USAGE);
349 if (dnlc_load() == -1)
350 return (DCMD_ERR);
352 mdb_printf("%<u>%-?s %-?s %-32s%</u>\n", "VP", "DVP", "NAME");
354 for (i = 0; i < MDB_DNLC_HSIZE; i++) {
355 for (ent = dnlc_hash[i]; ent != NULL; ent = ent->hash_next) {
356 mdb_printf("%0?p %0?p %s\n",
357 ent->vp, ent->dp, ent->name);
361 return (DCMD_OK);
364 static int
365 mdb_sprintpath(char *buf, size_t len, mdb_path_t *path)
367 char *s = buf;
368 int i;
370 if (len < sizeof ("/..."))
371 return (-1);
373 if (!path->mdp_complete) {
374 (void) strcpy(s, "??");
375 s += 2;
377 if (path->mdp_nelem == 0)
378 return (-1);
381 if (path->mdp_nelem == 0) {
382 (void) strcpy(s, "/");
383 return (0);
386 for (i = path->mdp_nelem - 1; i >= 0; i--) {
388 * Number of bytes left is the distance from where we
389 * are to the end, minus 2 for '/' and '\0'
391 ssize_t left = (ssize_t)(&buf[len] - s) - 2;
393 if (left <= 0)
394 break;
396 *s++ = '/';
397 (void) strncpy(s, path->mdp_name[i], left);
398 s[left - 1] = '\0';
399 s += strlen(s);
401 if (left < strlen(path->mdp_name[i]))
402 break;
405 if (i >= 0)
406 (void) strcpy(&buf[len - 4], "...");
408 return (0);
411 static int
412 mdb_autonode2path(uintptr_t addr, mdb_path_t *path)
414 fninfo_t fni;
415 fnnode_t fn;
417 vnode_t vn;
418 vfs_t vfs;
419 struct vnodeops *autofs_vnops = NULL;
422 * "autofs_vnops_ptr" is the address of the pointer to the vnodeops
423 * structure for autofs. We want to read it each time we access
424 * it since autofs could (in theory) be unloaded and reloaded.
426 if (mdb_vread(&autofs_vnops, sizeof (autofs_vnops),
427 (uintptr_t)autofs_vnops_ptr) == -1)
428 return (-1);
430 if (mdb_vread(&vn, sizeof (vn), addr) == -1)
431 return (-1);
433 if (autofs_vnops == NULL || vn.v_op != autofs_vnops)
434 return (-1);
436 addr = (uintptr_t)vn.v_data;
438 if (mdb_vread(&vfs, sizeof (vfs), (uintptr_t)vn.v_vfsp) == -1 ||
439 mdb_vread(&fni, sizeof (fni), (uintptr_t)vfs.vfs_data) == -1 ||
440 mdb_vread(&vn, sizeof (vn), (uintptr_t)fni.fi_rootvp) == -1)
441 return (-1);
443 for (;;) {
444 size_t elem = path->mdp_nelem++;
445 char elemstr[MAXNAMELEN];
446 char *c, *p;
448 if (elem == MDB_PATH_NELEM) {
449 path->mdp_nelem--;
450 return (-1);
453 if (mdb_vread(&fn, sizeof (fn), addr) != sizeof (fn)) {
454 path->mdp_nelem--;
455 return (-1);
458 if (mdb_readstr(elemstr, sizeof (elemstr),
459 (uintptr_t)fn.fn_name) <= 0) {
460 (void) strcpy(elemstr, "?");
463 c = mdb_alloc(strlen(elemstr) + 1, UM_SLEEP | UM_GC);
464 (void) strcpy(c, elemstr);
466 path->mdp_vnode[elem] = (uintptr_t)fn.fn_vnode;
468 if (addr == (uintptr_t)fn.fn_parent) {
469 path->mdp_name[elem] = &c[1];
470 path->mdp_complete = TRUE;
471 break;
474 if ((p = strrchr(c, '/')) != NULL)
475 path->mdp_name[elem] = p + 1;
476 else
477 path->mdp_name[elem] = c;
479 addr = (uintptr_t)fn.fn_parent;
482 return (0);
486 mdb_vnode2path(uintptr_t addr, char *buf, size_t buflen)
488 uintptr_t rootdir;
489 ncache_t *ent;
490 vnode_t vp;
491 mdb_path_t path;
494 * Check to see if we have a cached value for this vnode
496 if (mdb_vread(&vp, sizeof (vp), addr) != -1 &&
497 vp.v_path != NULL &&
498 mdb_readstr(buf, buflen, (uintptr_t)vp.v_path) != -1)
499 return (0);
501 if (dnlc_load() == -1)
502 return (-1);
504 if (mdb_readvar(&rootdir, "rootdir") == -1) {
505 mdb_warn("failed to read 'rootdir'");
506 return (-1);
509 bzero(&path, sizeof (mdb_path_t));
510 again:
511 if ((addr == NULL) && (path.mdp_nelem == 0)) {
513 * 0 elems && complete tells sprintpath to just print "/"
515 path.mdp_complete = TRUE;
516 goto out;
519 if (addr == rootdir) {
520 path.mdp_complete = TRUE;
521 goto out;
524 for (ent = dnlc_hash[MDB_DNLC_HASH(addr)]; ent; ent = ent->hash_next) {
525 if ((uintptr_t)ent->vp == addr) {
526 if (strcmp(ent->name, "..") == 0 ||
527 strcmp(ent->name, ".") == 0)
528 continue;
530 path.mdp_vnode[path.mdp_nelem] = (uintptr_t)ent->vp;
531 path.mdp_name[path.mdp_nelem] = ent->name;
532 path.mdp_nelem++;
534 if (path.mdp_nelem == MDB_PATH_NELEM) {
535 path.mdp_nelem--;
536 mdb_warn("path exceeded maximum expected "
537 "elements\n");
538 return (-1);
541 addr = (uintptr_t)ent->dp;
542 goto again;
546 (void) mdb_autonode2path(addr, &path);
548 out:
549 return (mdb_sprintpath(buf, buflen, &path));
553 uintptr_t
554 mdb_pid2proc(pid_t pid, proc_t *proc)
556 int pid_hashsz, hash;
557 uintptr_t paddr, pidhash, procdir;
558 struct pid pidp;
560 if (mdb_readvar(&pidhash, "pidhash") == -1)
561 return (0);
563 if (mdb_readvar(&pid_hashsz, "pid_hashsz") == -1)
564 return (0);
566 if (mdb_readvar(&procdir, "procdir") == -1)
567 return (0);
569 hash = pid & (pid_hashsz - 1);
571 if (mdb_vread(&paddr, sizeof (paddr),
572 pidhash + (hash * sizeof (paddr))) == -1)
573 return (0);
575 while (paddr != 0) {
576 if (mdb_vread(&pidp, sizeof (pidp), paddr) == -1)
577 return (0);
579 if (pidp.pid_id == pid) {
580 uintptr_t procp;
582 if (mdb_vread(&procp, sizeof (procp), procdir +
583 (pidp.pid_prslot * sizeof (procp))) == -1)
584 return (0);
586 if (proc != NULL)
587 (void) mdb_vread(proc, sizeof (proc_t), procp);
589 return (procp);
591 paddr = (uintptr_t)pidp.pid_link;
593 return (0);
597 mdb_cpu2cpuid(uintptr_t cpup)
599 cpu_t cpu;
601 if (mdb_vread(&cpu, sizeof (cpu_t), cpup) != sizeof (cpu_t))
602 return (-1);
604 return (cpu.cpu_id);
608 mdb_cpuset_find(uintptr_t cpusetp)
610 ulong_t *cpuset;
611 size_t nr_words = BT_BITOUL(NCPU);
612 size_t sz = nr_words * sizeof (ulong_t);
613 size_t i;
614 int cpu = -1;
616 cpuset = mdb_alloc(sz, UM_SLEEP);
618 if (mdb_vread((void *)cpuset, sz, cpusetp) != sz)
619 goto out;
621 for (i = 0; i < nr_words; i++) {
622 size_t j;
623 ulong_t m;
625 for (j = 0, m = 1; j < BT_NBIPUL; j++, m <<= 1) {
626 if (cpuset[i] & m) {
627 cpu = i * BT_NBIPUL + j;
628 goto out;
633 out:
634 mdb_free(cpuset, sz);
635 return (cpu);
638 static int
639 page_hash_load(void)
641 if (page_hash_loaded) {
642 return (1);
645 if (mdb_readvar(&mdb_page_hashsz, "page_hashsz") == -1) {
646 mdb_warn("unable to read page_hashsz");
647 return (0);
649 if (mdb_readvar(&mdb_page_hashsz_shift, "page_hashsz_shift") == -1) {
650 mdb_warn("unable to read page_hashsz_shift");
651 return (0);
653 if (mdb_readvar(&mdb_page_hash, "page_hash") == -1) {
654 mdb_warn("unable to read page_hash");
655 return (0);
658 page_hash_loaded = 1; /* zeroed on state change */
659 return (1);
662 uintptr_t
663 mdb_page_lookup(uintptr_t vp, u_offset_t offset)
665 size_t ndx;
666 uintptr_t page_hash_entry, pp;
668 if (!page_hash_loaded && !page_hash_load()) {
669 return (NULL);
672 ndx = PAGE_HASH_FUNC(vp, offset);
673 page_hash_entry = mdb_page_hash + ndx * sizeof (uintptr_t);
675 if (mdb_vread(&pp, sizeof (pp), page_hash_entry) < 0) {
676 mdb_warn("unable to read page_hash[%ld] (%p)", ndx,
677 page_hash_entry);
678 return (NULL);
681 while (pp != NULL) {
682 page_t page;
683 long nndx;
685 if (mdb_vread(&page, sizeof (page), pp) < 0) {
686 mdb_warn("unable to read page_t at %p", pp);
687 return (NULL);
690 if ((uintptr_t)page.p_vnode == vp &&
691 (uint64_t)page.p_offset == offset)
692 return (pp);
695 * Double check that the pages actually hash to the
696 * bucket we're searching. If not, our version of
697 * PAGE_HASH_FUNC() doesn't match the kernel's, and we're
698 * not going to be able to find the page. The most
699 * likely reason for this that mdb_ks doesn't match the
700 * kernel we're running against.
702 nndx = PAGE_HASH_FUNC(page.p_vnode, page.p_offset);
703 if (page.p_vnode != NULL && nndx != ndx) {
704 mdb_warn("mdb_page_lookup: mdb_ks PAGE_HASH_FUNC() "
705 "mismatch: in bucket %ld, but page %p hashes to "
706 "bucket %ld\n", ndx, pp, nndx);
707 return (NULL);
710 pp = (uintptr_t)page.p_hash;
713 return (NULL);
716 char
717 mdb_vtype2chr(vtype_t type, mode_t mode)
719 static const char vttab[] = {
720 ' ', /* VNON */
721 ' ', /* VREG */
722 '/', /* VDIR */
723 ' ', /* VBLK */
724 ' ', /* VCHR */
725 '@', /* VLNK */
726 '|', /* VFIFO */
727 '>', /* VDOOR */
728 ' ', /* VPROC */
729 '=', /* VSOCK */
730 ' ', /* VBAD */
733 if (type < 0 || type >= sizeof (vttab) / sizeof (vttab[0]))
734 return ('?');
736 if (type == VREG && (mode & 0111) != 0)
737 return ('*');
739 return (vttab[type]);
742 struct pfn2page {
743 pfn_t pfn;
744 page_t *pp;
747 /*ARGSUSED*/
748 static int
749 pfn2page_cb(uintptr_t addr, const struct memseg *msp, void *data)
751 struct pfn2page *p = data;
753 if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
754 p->pp = msp->pages + (p->pfn - msp->pages_base);
755 return (WALK_DONE);
758 return (WALK_NEXT);
761 uintptr_t
762 mdb_pfn2page(pfn_t pfn)
764 struct pfn2page arg;
765 struct page page;
767 arg.pfn = pfn;
768 arg.pp = NULL;
770 if (mdb_walk("memseg", (mdb_walk_cb_t)pfn2page_cb, &arg) == -1) {
771 mdb_warn("pfn2page: can't walk memsegs");
772 return (0);
774 if (arg.pp == NULL) {
775 mdb_warn("pfn2page: unable to find page_t for pfn %lx\n",
776 pfn);
777 return (0);
780 if (mdb_vread(&page, sizeof (page_t), (uintptr_t)arg.pp) == -1) {
781 mdb_warn("pfn2page: can't read page 0x%lx at %p", pfn, arg.pp);
782 return (0);
784 if (page.p_pagenum != pfn) {
785 mdb_warn("pfn2page: page_t 0x%p should have PFN 0x%lx, "
786 "but actually has 0x%lx\n", arg.pp, pfn, page.p_pagenum);
787 return (0);
790 return ((uintptr_t)arg.pp);
793 pfn_t
794 mdb_page2pfn(uintptr_t addr)
796 struct page page;
798 if (mdb_vread(&page, sizeof (page_t), addr) == -1) {
799 mdb_warn("pp2pfn: can't read page at %p", addr);
800 return ((pfn_t)(-1));
803 return (page.p_pagenum);
806 static int
807 a2m_walk_modctl(uintptr_t addr, const struct modctl *m, a2m_query_t *a2m)
809 struct module mod;
811 if (m->mod_mp == NULL)
812 return (0);
814 if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
815 mdb_warn("couldn't read modctl %p's module", addr);
816 return (0);
819 if (a2m->a2m_addr >= (uintptr_t)mod.text &&
820 a2m->a2m_addr < (uintptr_t)mod.text + mod.text_size)
821 goto found;
823 if (a2m->a2m_addr >= (uintptr_t)mod.data &&
824 a2m->a2m_addr < (uintptr_t)mod.data + mod.data_size)
825 goto found;
827 return (0);
829 found:
830 a2m->a2m_where = addr;
831 return (-1);
834 uintptr_t
835 mdb_addr2modctl(uintptr_t addr)
837 a2m_query_t a2m;
839 a2m.a2m_addr = addr;
840 a2m.a2m_where = NULL;
842 (void) mdb_walk("modctl", (mdb_walk_cb_t)a2m_walk_modctl, &a2m);
843 return (a2m.a2m_where);
846 static mdb_qinfo_t *
847 qi_lookup(uintptr_t qinit_addr)
849 mdb_qinfo_t *qip;
851 for (qip = qi_head; qip != NULL; qip = qip->qi_next) {
852 if (qip->qi_addr == qinit_addr)
853 return (qip);
856 return (NULL);
859 void
860 mdb_qops_install(const mdb_qops_t *qops, uintptr_t qinit_addr)
862 mdb_qinfo_t *qip = qi_lookup(qinit_addr);
864 if (qip != NULL) {
865 qip->qi_ops = qops;
866 return;
869 qip = mdb_alloc(sizeof (mdb_qinfo_t), UM_SLEEP);
871 qip->qi_ops = qops;
872 qip->qi_addr = qinit_addr;
873 qip->qi_next = qi_head;
875 qi_head = qip;
878 void
879 mdb_qops_remove(const mdb_qops_t *qops, uintptr_t qinit_addr)
881 mdb_qinfo_t *qip, *p = NULL;
883 for (qip = qi_head; qip != NULL; p = qip, qip = qip->qi_next) {
884 if (qip->qi_addr == qinit_addr && qip->qi_ops == qops) {
885 if (qi_head == qip)
886 qi_head = qip->qi_next;
887 else
888 p->qi_next = qip->qi_next;
889 mdb_free(qip, sizeof (mdb_qinfo_t));
890 return;
895 char *
896 mdb_qname(const queue_t *q, char *buf, size_t nbytes)
898 struct module_info mi;
899 struct qinit qi;
901 if (mdb_vread(&qi, sizeof (qi), (uintptr_t)q->q_qinfo) == -1) {
902 mdb_warn("failed to read qinit at %p", q->q_qinfo);
903 goto err;
906 if (mdb_vread(&mi, sizeof (mi), (uintptr_t)qi.qi_minfo) == -1) {
907 mdb_warn("failed to read module_info at %p", qi.qi_minfo);
908 goto err;
911 if (mdb_readstr(buf, nbytes, (uintptr_t)mi.mi_idname) <= 0) {
912 mdb_warn("failed to read mi_idname at %p", mi.mi_idname);
913 goto err;
916 return (buf);
918 err:
919 (void) mdb_snprintf(buf, nbytes, "???");
920 return (buf);
923 void
924 mdb_qinfo(const queue_t *q, char *buf, size_t nbytes)
926 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
927 buf[0] = '\0';
929 if (qip != NULL)
930 qip->qi_ops->q_info(q, buf, nbytes);
933 uintptr_t
934 mdb_qrnext(const queue_t *q)
936 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
938 if (qip != NULL)
939 return (qip->qi_ops->q_rnext(q));
941 return (NULL);
944 uintptr_t
945 mdb_qwnext(const queue_t *q)
947 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
949 if (qip != NULL)
950 return (qip->qi_ops->q_wnext(q));
952 return (NULL);
955 uintptr_t
956 mdb_qrnext_default(const queue_t *q)
958 return ((uintptr_t)q->q_next);
961 uintptr_t
962 mdb_qwnext_default(const queue_t *q)
964 return ((uintptr_t)q->q_next);
968 * The following three routines borrowed from modsubr.c
970 static int
971 nm_hash(const char *name)
973 char c;
974 int hash = 0;
976 for (c = *name++; c; c = *name++)
977 hash ^= c;
979 return (hash & MOD_BIND_HASHMASK);
982 static uintptr_t
983 find_mbind(const char *name, uintptr_t *hashtab)
985 int hashndx;
986 uintptr_t mb;
987 struct bind mb_local;
988 char node_name[MAXPATHLEN + 1];
990 hashndx = nm_hash(name);
991 mb = hashtab[hashndx];
992 while (mb) {
993 if (mdb_vread(&mb_local, sizeof (mb_local), mb) == -1) {
994 mdb_warn("failed to read struct bind at %p", mb);
995 return (NULL);
997 if (mdb_readstr(node_name, sizeof (node_name),
998 (uintptr_t)mb_local.b_name) == -1) {
999 mdb_warn("failed to read node name string at %p",
1000 mb_local.b_name);
1001 return (NULL);
1004 if (strcmp(name, node_name) == 0)
1005 break;
1007 mb = (uintptr_t)mb_local.b_next;
1009 return (mb);
1013 mdb_name_to_major(const char *name, major_t *major)
1015 uintptr_t mbind;
1016 uintptr_t mb_hashtab[MOD_BIND_HASHSIZE];
1017 struct bind mbind_local;
1020 if (mdb_readsym(mb_hashtab, sizeof (mb_hashtab), "mb_hashtab") == -1) {
1021 mdb_warn("failed to read symbol 'mb_hashtab'");
1022 return (-1);
1025 if ((mbind = find_mbind(name, mb_hashtab)) != NULL) {
1026 if (mdb_vread(&mbind_local, sizeof (mbind_local), mbind) ==
1027 -1) {
1028 mdb_warn("failed to read mbind struct at %p", mbind);
1029 return (-1);
1032 *major = (major_t)mbind_local.b_num;
1033 return (0);
1035 return (-1);
1038 const char *
1039 mdb_major_to_name(major_t major)
1041 static char name[MODMAXNAMELEN + 1];
1043 uintptr_t devnamesp;
1044 struct devnames dn;
1045 uint_t devcnt;
1047 if (mdb_readvar(&devcnt, "devcnt") == -1 || major >= devcnt ||
1048 mdb_readvar(&devnamesp, "devnamesp") == -1)
1049 return (NULL);
1051 if (mdb_vread(&dn, sizeof (struct devnames), devnamesp +
1052 major * sizeof (struct devnames)) != sizeof (struct devnames))
1053 return (NULL);
1055 if (mdb_readstr(name, MODMAXNAMELEN + 1, (uintptr_t)dn.dn_name) == -1)
1056 return (NULL);
1058 return ((const char *)name);
1062 * Return the name of the driver attached to the dip in drivername.
1065 mdb_devinfo2driver(uintptr_t dip_addr, char *drivername, size_t namebufsize)
1067 struct dev_info devinfo;
1068 char bind_name[MAXPATHLEN + 1];
1069 major_t major;
1070 const char *namestr;
1073 if (mdb_vread(&devinfo, sizeof (devinfo), dip_addr) == -1) {
1074 mdb_warn("failed to read devinfo at %p", dip_addr);
1075 return (-1);
1078 if (mdb_readstr(bind_name, sizeof (bind_name),
1079 (uintptr_t)devinfo.devi_binding_name) == -1) {
1080 mdb_warn("failed to read binding name at %p",
1081 devinfo.devi_binding_name);
1082 return (-1);
1086 * Many->one relation: various names to one major number
1088 if (mdb_name_to_major(bind_name, &major) == -1) {
1089 mdb_warn("failed to translate bind name to major number\n");
1090 return (-1);
1094 * One->one relation: one major number corresponds to one driver
1096 if ((namestr = mdb_major_to_name(major)) == NULL) {
1097 (void) strncpy(drivername, "???", namebufsize);
1098 return (-1);
1101 (void) strncpy(drivername, namestr, namebufsize);
1102 return (0);
1106 * Find the name of the driver attached to this dip (if any), given:
1107 * - the address of a dip (in core)
1108 * - the NAME of the global pointer to the driver's i_ddi_soft_state struct
1109 * - pointer to a pointer to receive the address
1112 mdb_devinfo2statep(uintptr_t dip_addr, char *soft_statep_name,
1113 uintptr_t *statep)
1115 struct dev_info dev_info;
1118 if (mdb_vread(&dev_info, sizeof (dev_info), dip_addr) == -1) {
1119 mdb_warn("failed to read devinfo at %p", dip_addr);
1120 return (-1);
1123 return (mdb_get_soft_state_byname(soft_statep_name,
1124 dev_info.devi_instance, statep, NULL, 0));
1128 * Returns a pointer to the top of the soft state struct for the instance
1129 * specified (in state_addr), given the address of the global soft state
1130 * pointer and size of the struct. Also fills in the buffer pointed to by
1131 * state_buf_p (if non-NULL) with the contents of the state struct.
1134 mdb_get_soft_state_byaddr(uintptr_t ssaddr, uint_t instance,
1135 uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1137 struct i_ddi_soft_state ss;
1138 void *statep;
1141 if (mdb_vread(&ss, sizeof (ss), ssaddr) == -1)
1142 return (-1);
1144 if (instance >= ss.n_items)
1145 return (-1);
1147 if (mdb_vread(&statep, sizeof (statep), (uintptr_t)ss.array +
1148 (sizeof (statep) * instance)) == -1)
1149 return (-1);
1151 if (state_addr != NULL)
1152 *state_addr = (uintptr_t)statep;
1154 if (statep == NULL) {
1155 errno = ENOENT;
1156 return (-1);
1159 if (state_buf_p != NULL) {
1161 /* Read the state struct into the buffer in local space. */
1162 if (mdb_vread(state_buf_p, sizeof_state,
1163 (uintptr_t)statep) == -1)
1164 return (-1);
1167 return (0);
1172 * Returns a pointer to the top of the soft state struct for the instance
1173 * specified (in state_addr), given the name of the global soft state pointer
1174 * and size of the struct. Also fills in the buffer pointed to by
1175 * state_buf_p (if non-NULL) with the contents of the state struct.
1178 mdb_get_soft_state_byname(char *softstatep_name, uint_t instance,
1179 uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1181 uintptr_t ssaddr;
1183 if (mdb_readvar((void *)&ssaddr, softstatep_name) == -1)
1184 return (-1);
1186 return (mdb_get_soft_state_byaddr(ssaddr, instance, state_addr,
1187 state_buf_p, sizeof_state));
1190 static const mdb_dcmd_t dcmds[] = {
1191 { "dnlc", NULL, "print DNLC contents", dnlcdump },
1192 { NULL }
1195 static const mdb_modinfo_t modinfo = { MDB_API_VERSION, dcmds };
1197 /*ARGSUSED*/
1198 static void
1199 update_vars(void *arg)
1201 GElf_Sym sym;
1203 if (mdb_lookup_by_name("auto_vnodeops", &sym) == 0)
1204 autofs_vnops_ptr = (struct vnodeops *)(uintptr_t)sym.st_value;
1205 else
1206 autofs_vnops_ptr = NULL;
1208 (void) mdb_readvar(&_mdb_ks_pagesize, "_pagesize");
1209 (void) mdb_readvar(&_mdb_ks_pageshift, "_pageshift");
1210 (void) mdb_readvar(&_mdb_ks_pageoffset, "_pageoffset");
1211 (void) mdb_readvar(&_mdb_ks_pagemask, "_pagemask");
1212 (void) mdb_readvar(&_mdb_ks_mmu_pagesize, "_mmu_pagesize");
1213 (void) mdb_readvar(&_mdb_ks_mmu_pageshift, "_mmu_pageshift");
1214 (void) mdb_readvar(&_mdb_ks_mmu_pageoffset, "_mmu_pageoffset");
1215 (void) mdb_readvar(&_mdb_ks_mmu_pagemask, "_mmu_pagemask");
1216 (void) mdb_readvar(&_mdb_ks_kernelbase, "_kernelbase");
1218 (void) mdb_readvar(&_mdb_ks_userlimit, "_userlimit");
1219 (void) mdb_readvar(&_mdb_ks_userlimit32, "_userlimit32");
1220 (void) mdb_readvar(&_mdb_ks_argsbase, "_argsbase");
1221 (void) mdb_readvar(&_mdb_ks_msg_bsize, "_msg_bsize");
1222 (void) mdb_readvar(&_mdb_ks_defaultstksz, "_defaultstksz");
1223 (void) mdb_readvar(&_mdb_ks_ncpu, "_ncpu");
1224 (void) mdb_readvar(&_mdb_ks_ncpu_log2, "_ncpu_log2");
1225 (void) mdb_readvar(&_mdb_ks_ncpu_p2, "_ncpu_p2");
1227 page_hash_loaded = 0; /* invalidate cached page_hash state */
1230 const mdb_modinfo_t *
1231 _mdb_init(void)
1234 * When used with mdb, mdb_ks is a separate dmod. With kmdb, however,
1235 * mdb_ks is compiled into the debugger module. kmdb cannot
1236 * automatically modunload itself when it exits. If it restarts after
1237 * debugger fault, static variables may not be initialized to zero.
1238 * They must be manually reinitialized here.
1240 dnlc_hash = NULL;
1241 qi_head = NULL;
1243 mdb_callback_add(MDB_CALLBACK_STCHG, update_vars, NULL);
1245 update_vars(NULL);
1247 return (&modinfo);
1250 void
1251 _mdb_fini(void)
1253 dnlc_free();
1254 while (qi_head != NULL) {
1255 mdb_qinfo_t *qip = qi_head;
1256 qi_head = qip->qi_next;
1257 mdb_free(qip, sizeof (mdb_qinfo_t));
1262 * Interface between MDB kproc target and mdb_ks. The kproc target relies
1263 * on looking up and invoking these functions in mdb_ks so that dependencies
1264 * on the current kernel implementation are isolated in mdb_ks.
1268 * Given the address of a proc_t, return the p.p_as pointer; return NULL
1269 * if we were unable to read a proc structure from the given address.
1271 uintptr_t
1272 mdb_kproc_as(uintptr_t proc_addr)
1274 proc_t p;
1276 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p))
1277 return ((uintptr_t)p.p_as);
1279 return (NULL);
1283 * Given the address of a proc_t, return the p.p_model value; return
1284 * PR_MODEL_UNKNOWN if we were unable to read a proc structure or if
1285 * the model value does not match one of the two known values.
1287 uint_t
1288 mdb_kproc_model(uintptr_t proc_addr)
1290 proc_t p;
1292 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p)) {
1293 switch (p.p_model) {
1294 case DATAMODEL_ILP32:
1295 return (PR_MODEL_ILP32);
1296 case DATAMODEL_LP64:
1297 return (PR_MODEL_LP64);
1301 return (PR_MODEL_UNKNOWN);
1305 * Callback function for walking process's segment list. For each segment,
1306 * we fill in an mdb_map_t describing its properties, and then invoke
1307 * the callback function provided by the kproc target.
1309 static int
1310 asmap_step(uintptr_t addr, const struct seg *seg, asmap_arg_t *asmp)
1312 struct segvn_data svd;
1313 mdb_map_t map;
1315 if (seg->s_ops == asmp->asm_segvn_ops && mdb_vread(&svd,
1316 sizeof (svd), (uintptr_t)seg->s_data) == sizeof (svd)) {
1318 if (svd.vp != NULL) {
1319 if (mdb_vnode2path((uintptr_t)svd.vp, map.map_name,
1320 MDB_TGT_MAPSZ) != 0) {
1321 (void) mdb_snprintf(map.map_name,
1322 MDB_TGT_MAPSZ, "[ vnode %p ]", svd.vp);
1324 } else
1325 (void) strcpy(map.map_name, "[ anon ]");
1327 } else {
1328 (void) mdb_snprintf(map.map_name, MDB_TGT_MAPSZ,
1329 "[ seg %p ]", addr);
1332 map.map_base = (uintptr_t)seg->s_base;
1333 map.map_size = seg->s_size;
1334 map.map_flags = 0;
1336 asmp->asm_callback((const struct mdb_map *)&map, asmp->asm_cbdata);
1337 return (WALK_NEXT);
1341 * Given a process address space, walk its segment list using the seg walker,
1342 * convert the segment data to an mdb_map_t, and pass this information
1343 * back to the kproc target via the given callback function.
1346 mdb_kproc_asiter(uintptr_t as,
1347 void (*func)(const struct mdb_map *, void *), void *p)
1349 asmap_arg_t arg;
1350 GElf_Sym sym;
1352 arg.asm_segvn_ops = NULL;
1353 arg.asm_callback = func;
1354 arg.asm_cbdata = p;
1356 if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
1357 arg.asm_segvn_ops = (struct seg_ops *)(uintptr_t)sym.st_value;
1359 return (mdb_pwalk("seg", (mdb_walk_cb_t)asmap_step, &arg, as));
1363 * Copy the auxv array from the given process's u-area into the provided
1364 * buffer. If the buffer is NULL, only return the size of the auxv array
1365 * so the caller knows how much space will be required.
1368 mdb_kproc_auxv(uintptr_t proc, auxv_t *auxv)
1370 if (auxv != NULL) {
1371 proc_t p;
1373 if (mdb_vread(&p, sizeof (p), proc) != sizeof (p))
1374 return (-1);
1376 bcopy(p.p_user.u_auxv, auxv,
1377 sizeof (auxv_t) * __KERN_NAUXV_IMPL);
1380 return (__KERN_NAUXV_IMPL);
1384 * Given a process address, return the PID.
1386 pid_t
1387 mdb_kproc_pid(uintptr_t proc_addr)
1389 struct pid pid;
1390 proc_t p;
1392 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p) &&
1393 mdb_vread(&pid, sizeof (pid), (uintptr_t)p.p_pidp) == sizeof (pid))
1394 return (pid.pid_id);
1396 return (-1);
1400 * Interface between the MDB kvm target and mdb_ks. The kvm target relies
1401 * on looking up and invoking these functions in mdb_ks so that dependencies
1402 * on the current kernel implementation are isolated in mdb_ks.
1406 * Determine whether or not the thread that panicked the given kernel was a
1407 * kernel thread (panic_thread->t_procp == &p0).
1409 void
1410 mdb_dump_print_content(dumphdr_t *dh, pid_t content)
1412 GElf_Sym sym;
1413 uintptr_t pt;
1414 uintptr_t procp;
1415 int expcont = 0;
1416 int actcont;
1418 (void) mdb_readvar(&expcont, "dump_conflags");
1419 actcont = dh->dump_flags & DF_CONTENT;
1421 if (actcont == DF_ALL) {
1422 mdb_printf("dump content: all kernel and user pages\n");
1423 return;
1424 } else if (actcont == DF_CURPROC) {
1425 mdb_printf("dump content: kernel pages and pages from "
1426 "PID %d", content);
1427 return;
1430 mdb_printf("dump content: kernel pages only\n");
1431 if (!(expcont & DF_CURPROC))
1432 return;
1434 if (mdb_readvar(&pt, "panic_thread") != sizeof (pt) || pt == NULL)
1435 goto kthreadpanic_err;
1437 if (mdb_vread(&procp, sizeof (procp), pt + OFFSETOF(kthread_t,
1438 t_procp)) == -1 || procp == NULL)
1439 goto kthreadpanic_err;
1441 if (mdb_lookup_by_name("p0", &sym) != 0)
1442 goto kthreadpanic_err;
1444 if (procp == (uintptr_t)sym.st_value) {
1445 mdb_printf(" (curproc requested, but a kernel thread "
1446 "panicked)\n");
1447 } else {
1448 mdb_printf(" (curproc requested, but the process that "
1449 "panicked could not be dumped)\n");
1452 return;
1454 kthreadpanic_err:
1455 mdb_printf(" (curproc requested, but the process that panicked could "
1456 "not be found)\n");
1460 * Determine the process that was saved in a `curproc' dump. This process will
1461 * be recorded as the first element in dump_pids[].
1464 mdb_dump_find_curproc(void)
1466 uintptr_t pidp;
1467 pid_t pid = -1;
1469 if (mdb_readvar(&pidp, "dump_pids") == sizeof (pidp) &&
1470 mdb_vread(&pid, sizeof (pid), pidp) == sizeof (pid) &&
1471 pid > 0)
1472 return (pid);
1473 else
1474 return (-1);
1479 * Following three funcs extracted from sunddi.c
1483 * Return core address of root node of devinfo tree
1485 static uintptr_t
1486 mdb_ddi_root_node(void)
1488 uintptr_t top_devinfo_addr;
1490 /* return (top_devinfo); */
1491 if (mdb_readvar(&top_devinfo_addr, "top_devinfo") == -1) {
1492 mdb_warn("failed to read top_devinfo");
1493 return (NULL);
1495 return (top_devinfo_addr);
1499 * Return the name of the devinfo node pointed at by 'dip_addr' in the buffer
1500 * pointed at by 'name.'
1502 * - dip_addr is a pointer to a dev_info struct in core.
1504 static char *
1505 mdb_ddi_deviname(uintptr_t dip_addr, char *name, size_t name_size)
1507 uintptr_t addrname;
1508 ssize_t length;
1509 char *local_namep = name;
1510 size_t local_name_size = name_size;
1511 struct dev_info local_dip;
1514 if (dip_addr == mdb_ddi_root_node()) {
1515 if (name_size < 1) {
1516 mdb_warn("failed to get node name: buf too small\n");
1517 return (NULL);
1520 *name = '\0';
1521 return (name);
1524 if (name_size < 2) {
1525 mdb_warn("failed to get node name: buf too small\n");
1526 return (NULL);
1529 local_namep = name;
1530 *local_namep++ = '/';
1531 *local_namep = '\0';
1532 local_name_size--;
1534 if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1535 mdb_warn("failed to read devinfo struct");
1538 length = mdb_readstr(local_namep, local_name_size,
1539 (uintptr_t)local_dip.devi_node_name);
1540 if (length == -1) {
1541 mdb_warn("failed to read node name");
1542 return (NULL);
1544 local_namep += length;
1545 local_name_size -= length;
1546 addrname = (uintptr_t)local_dip.devi_addr;
1548 if (addrname != NULL) {
1550 if (local_name_size < 2) {
1551 mdb_warn("not enough room for node address string");
1552 return (name);
1554 *local_namep++ = '@';
1555 *local_namep = '\0';
1556 local_name_size--;
1558 length = mdb_readstr(local_namep, local_name_size, addrname);
1559 if (length == -1) {
1560 mdb_warn("failed to read name");
1561 return (NULL);
1565 return (name);
1569 * Generate the full path under the /devices dir to the device entry.
1571 * dip is a pointer to a devinfo struct in core (not in local memory).
1573 char *
1574 mdb_ddi_pathname(uintptr_t dip_addr, char *path, size_t pathlen)
1576 struct dev_info local_dip;
1577 uintptr_t parent_dip;
1578 char *bp;
1579 size_t buf_left;
1582 if (dip_addr == mdb_ddi_root_node()) {
1583 *path = '\0';
1584 return (path);
1588 if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1589 mdb_warn("failed to read devinfo struct");
1592 parent_dip = (uintptr_t)local_dip.devi_parent;
1593 (void) mdb_ddi_pathname(parent_dip, path, pathlen);
1595 bp = path + strlen(path);
1596 buf_left = pathlen - strlen(path);
1597 (void) mdb_ddi_deviname(dip_addr, bp, buf_left);
1598 return (path);
1603 * Read in the string value of a refstr, which is appended to the end of
1604 * the structure.
1606 ssize_t
1607 mdb_read_refstr(uintptr_t refstr_addr, char *str, size_t nbytes)
1609 struct refstr *r = (struct refstr *)refstr_addr;
1611 return (mdb_readstr(str, nbytes, (uintptr_t)r->rs_string));
1615 * Chase an mblk list by b_next and return the length.
1618 mdb_mblk_count(const mblk_t *mb)
1620 int count;
1621 mblk_t mblk;
1623 if (mb == NULL)
1624 return (0);
1626 count = 1;
1627 while (mb->b_next != NULL) {
1628 count++;
1629 if (mdb_vread(&mblk, sizeof (mblk), (uintptr_t)mb->b_next) ==
1631 break;
1632 mb = &mblk;
1634 return (count);
1638 * Write the given MAC address as a printable string in the usual colon-
1639 * separated format. Assumes that buflen is at least 2.
1641 void
1642 mdb_mac_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
1644 int slen;
1646 if (alen == 0 || buflen < 4) {
1647 (void) strcpy(buf, "?");
1648 return;
1650 for (;;) {
1652 * If there are more MAC address bytes available, but we won't
1653 * have any room to print them, then add "..." to the string
1654 * instead. See below for the 'magic number' explanation.
1656 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
1657 (void) strcpy(buf, "...");
1658 break;
1660 slen = mdb_snprintf(buf, buflen, "%02x", *addr++);
1661 buf += slen;
1662 if (--alen == 0)
1663 break;
1664 *buf++ = ':';
1665 buflen -= slen + 1;
1667 * At this point, based on the first 'if' statement above,
1668 * either alen == 1 and buflen >= 3, or alen > 1 and
1669 * buflen >= 4. The first case leaves room for the final "xx"
1670 * number and trailing NUL byte. The second leaves room for at
1671 * least "...". Thus the apparently 'magic' numbers chosen for
1672 * that statement.
1678 * Produce a string that represents a DLPI primitive, or NULL if no such string
1679 * is possible.
1681 const char *
1682 mdb_dlpi_prim(int prim)
1684 switch (prim) {
1685 case DL_INFO_REQ: return ("DL_INFO_REQ");
1686 case DL_INFO_ACK: return ("DL_INFO_ACK");
1687 case DL_ATTACH_REQ: return ("DL_ATTACH_REQ");
1688 case DL_DETACH_REQ: return ("DL_DETACH_REQ");
1689 case DL_BIND_REQ: return ("DL_BIND_REQ");
1690 case DL_BIND_ACK: return ("DL_BIND_ACK");
1691 case DL_UNBIND_REQ: return ("DL_UNBIND_REQ");
1692 case DL_OK_ACK: return ("DL_OK_ACK");
1693 case DL_ERROR_ACK: return ("DL_ERROR_ACK");
1694 case DL_ENABMULTI_REQ: return ("DL_ENABMULTI_REQ");
1695 case DL_DISABMULTI_REQ: return ("DL_DISABMULTI_REQ");
1696 case DL_PROMISCON_REQ: return ("DL_PROMISCON_REQ");
1697 case DL_PROMISCOFF_REQ: return ("DL_PROMISCOFF_REQ");
1698 case DL_UNITDATA_REQ: return ("DL_UNITDATA_REQ");
1699 case DL_UNITDATA_IND: return ("DL_UNITDATA_IND");
1700 case DL_UDERROR_IND: return ("DL_UDERROR_IND");
1701 case DL_PHYS_ADDR_REQ: return ("DL_PHYS_ADDR_REQ");
1702 case DL_PHYS_ADDR_ACK: return ("DL_PHYS_ADDR_ACK");
1703 case DL_SET_PHYS_ADDR_REQ: return ("DL_SET_PHYS_ADDR_REQ");
1704 case DL_NOTIFY_REQ: return ("DL_NOTIFY_REQ");
1705 case DL_NOTIFY_ACK: return ("DL_NOTIFY_ACK");
1706 case DL_NOTIFY_IND: return ("DL_NOTIFY_IND");
1707 case DL_NOTIFY_CONF: return ("DL_NOTIFY_CONF");
1708 case DL_CAPABILITY_REQ: return ("DL_CAPABILITY_REQ");
1709 case DL_CAPABILITY_ACK: return ("DL_CAPABILITY_ACK");
1710 case DL_CONTROL_REQ: return ("DL_CONTROL_REQ");
1711 case DL_CONTROL_ACK: return ("DL_CONTROL_ACK");
1712 case DL_PASSIVE_REQ: return ("DL_PASSIVE_REQ");
1713 default: return (NULL);
1718 * mdb_gethrtime() returns the hires system time. This will be the timestamp at
1719 * which we dropped into, if called from, kmdb(1); the core dump's hires time
1720 * if inspecting one; or the running system's hires time if we're inspecting
1721 * a live kernel.
1723 hrtime_t
1724 mdb_gethrtime(void)
1726 uintptr_t ptr;
1727 GElf_Sym sym;
1728 lbolt_info_t lbi;
1729 hrtime_t ts;
1732 * We first check whether the lbolt info structure has been allocated
1733 * and initialized. If not, lbolt_hybrid will be pointing at
1734 * lbolt_bootstrap.
1736 if (mdb_lookup_by_name("lbolt_bootstrap", &sym) == -1)
1737 return (0);
1739 if (mdb_readvar(&ptr, "lbolt_hybrid") == -1)
1740 return (0);
1742 if (ptr == (uintptr_t)sym.st_value)
1743 return (0);
1745 #ifdef _KMDB
1746 if (mdb_readvar(&ptr, "lb_info") == -1)
1747 return (0);
1749 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1750 sizeof (lbolt_info_t))
1751 return (0);
1753 ts = lbi.lbi_debug_ts;
1754 #else
1755 if (mdb_prop_postmortem) {
1756 if (mdb_readvar(&ptr, "lb_info") == -1)
1757 return (0);
1759 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1760 sizeof (lbolt_info_t))
1761 return (0);
1763 ts = lbi.lbi_debug_ts;
1764 } else {
1765 ts = gethrtime();
1767 #endif
1768 return (ts);
1772 * mdb_get_lbolt() returns the number of clock ticks since system boot.
1773 * Depending on the context in which it's called, the value will be derived
1774 * from different sources per mdb_gethrtime(). If inspecting a panicked
1775 * system, the routine returns the 'panic_lbolt64' variable from the core file.
1777 int64_t
1778 mdb_get_lbolt(void)
1780 lbolt_info_t lbi;
1781 uintptr_t ptr;
1782 int64_t pl;
1783 hrtime_t ts;
1784 int nsec;
1786 if (mdb_readvar(&pl, "panic_lbolt64") != -1 && pl > 0)
1787 return (pl);
1790 * mdb_gethrtime() will return zero if the lbolt info structure hasn't
1791 * been allocated and initialized yet, or if it fails to read it.
1793 if ((ts = mdb_gethrtime()) <= 0)
1794 return (0);
1797 * Load the time spent in kmdb, if any.
1799 if (mdb_readvar(&ptr, "lb_info") == -1)
1800 return (0);
1802 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1803 sizeof (lbolt_info_t))
1804 return (0);
1806 if (mdb_readvar(&nsec, "nsec_per_tick") == -1 || nsec == 0) {
1807 mdb_warn("failed to read 'nsec_per_tick'");
1808 return (-1);
1811 return ((ts/nsec) - lbi.lbi_debug_time);