9452 ptable_dcmd() needs a little cleanup
[unleashed.git] / usr / src / cmd / mdb / i86pc / modules / unix / i86mmu.c
blob15f4697b91deac5d85cbaa8a05ff250e2acb1cdf
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Copyright 2018 Joyent, Inc.
29 * This part of the file contains the mdb support for dcmds:
30 * ::memseg_list
31 * and walkers for:
32 * memseg - a memseg list walker for ::memseg_list
36 #include <sys/types.h>
37 #include <sys/machparam.h>
38 #include <sys/controlregs.h>
39 #include <sys/mach_mmu.h>
40 #ifdef __xpv
41 #include <sys/hypervisor.h>
42 #endif
43 #include <vm/as.h>
45 #include <mdb/mdb_modapi.h>
46 #include <mdb/mdb_target.h>
48 #include <vm/page.h>
49 #include <vm/hat_i86.h>
51 #define VA_SIGN_BIT (1UL << 47)
52 #define VA_SIGN_EXTEND(va) (((va) ^ VA_SIGN_BIT) - VA_SIGN_BIT)
54 struct pfn2pp {
55 pfn_t pfn;
56 page_t *pp;
59 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *);
60 static void init_mmu(void);
62 int
63 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
65 if (asp == NULL)
66 return (DCMD_ERR);
68 init_mmu();
70 if (mmu.num_level == 0)
71 return (DCMD_ERR);
73 return (do_va2pa(addr, asp, 0, pap, NULL));
77 * ::memseg_list dcmd and walker to implement it.
79 /*ARGSUSED*/
80 int
81 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
83 struct memseg ms;
85 if (!(flags & DCMD_ADDRSPEC)) {
86 if (mdb_pwalk_dcmd("memseg", "memseg_list",
87 0, NULL, 0) == -1) {
88 mdb_warn("can't walk memseg");
89 return (DCMD_ERR);
91 return (DCMD_OK);
94 if (DCMD_HDRSPEC(flags))
95 mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
96 "PAGES", "EPAGES", "BASE", "END");
98 if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
99 mdb_warn("can't read memseg at %#lx", addr);
100 return (DCMD_ERR);
103 mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
104 ms.pages, ms.epages, ms.pages_base, ms.pages_end);
106 return (DCMD_OK);
110 * walk the memseg structures
113 memseg_walk_init(mdb_walk_state_t *wsp)
115 if (wsp->walk_addr != NULL) {
116 mdb_warn("memseg only supports global walks\n");
117 return (WALK_ERR);
120 if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
121 mdb_warn("symbol 'memsegs' not found");
122 return (WALK_ERR);
125 wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
126 return (WALK_NEXT);
131 memseg_walk_step(mdb_walk_state_t *wsp)
133 int status;
135 if (wsp->walk_addr == 0) {
136 return (WALK_DONE);
139 if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
140 wsp->walk_addr) == -1) {
141 mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
142 return (WALK_DONE);
145 status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
146 wsp->walk_cbdata);
148 wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
150 return (status);
153 void
154 memseg_walk_fini(mdb_walk_state_t *wsp)
156 mdb_free(wsp->walk_data, sizeof (struct memseg));
160 * Now HAT related dcmds.
163 static struct hat *khat; /* value of kas.a_hat */
164 struct hat_mmu_info mmu;
165 uintptr_t kernelbase;
168 * stuff for i86xpv images
170 static int is_xpv;
171 static uintptr_t mfn_list_addr; /* kernel MFN list address */
172 uintptr_t xen_virt_start; /* address of mfn_to_pfn[] table */
173 ulong_t mfn_count; /* number of pfn's in the MFN list */
174 pfn_t *mfn_list; /* local MFN list copy */
177 * read mmu parameters from kernel
179 static void
180 init_mmu(void)
182 struct as kas;
184 if (mmu.num_level != 0)
185 return;
187 if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
188 mdb_warn("Can't use HAT information before mmu_init()\n");
189 if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
190 mdb_warn("Couldn't find kas - kernel's struct as\n");
191 if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
192 mdb_warn("Couldn't find kernelbase\n");
193 khat = kas.a_hat;
196 * Is this a paravirtualized domain image?
198 if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr),
199 "mfn_list") == -1 ||
200 mdb_readsym(&xen_virt_start, sizeof (xen_virt_start),
201 "xen_virt_start") == -1 ||
202 mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) {
203 mfn_list_addr = NULL;
206 is_xpv = mfn_list_addr != NULL;
208 #ifndef _KMDB
210 * recreate the local mfn_list
212 if (is_xpv) {
213 size_t sz = mfn_count * sizeof (pfn_t);
214 mfn_list = mdb_zalloc(sz, UM_SLEEP);
216 if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) {
217 mdb_warn("Failed to read MFN list\n");
218 mdb_free(mfn_list, sz);
219 mfn_list = NULL;
222 #endif
225 void
226 free_mmu(void)
228 #ifdef __xpv
229 if (mfn_list != NULL)
230 mdb_free(mfn_list, mfn_count * sizeof (mfn_t));
231 #endif
234 #ifdef __xpv
236 #ifdef _KMDB
239 * Convert between MFNs and PFNs. Since we're in kmdb we can go directly
240 * through the machine to phys mapping and the MFN list.
243 pfn_t
244 mdb_mfn_to_pfn(mfn_t mfn)
246 pfn_t pfn;
247 mfn_t tmp;
248 pfn_t *pfn_list;
250 if (mfn_list_addr == NULL)
251 return (-(pfn_t)1);
253 pfn_list = (pfn_t *)xen_virt_start;
254 if (mdb_vread(&pfn, sizeof (pfn), (uintptr_t)(pfn_list + mfn)) == -1)
255 return (-(pfn_t)1);
257 if (mdb_vread(&tmp, sizeof (tmp),
258 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
259 return (-(pfn_t)1);
261 if (pfn >= mfn_count || tmp != mfn)
262 return (-(pfn_t)1);
264 return (pfn);
267 mfn_t
268 mdb_pfn_to_mfn(pfn_t pfn)
270 mfn_t mfn;
272 init_mmu();
274 if (mfn_list_addr == NULL || pfn >= mfn_count)
275 return (-(mfn_t)1);
277 if (mdb_vread(&mfn, sizeof (mfn),
278 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
279 return (-(mfn_t)1);
281 return (mfn);
284 #else /* _KMDB */
287 * Convert between MFNs and PFNs. Since a crash dump doesn't include the
288 * MFN->PFN translation table (it's part of the hypervisor, not our image)
289 * we do the MFN->PFN translation by searching the PFN->MFN (mfn_list)
290 * table, if it's there.
293 pfn_t
294 mdb_mfn_to_pfn(mfn_t mfn)
296 pfn_t pfn;
298 init_mmu();
300 if (mfn_list == NULL)
301 return (-(pfn_t)1);
303 for (pfn = 0; pfn < mfn_count; ++pfn) {
304 if (mfn_list[pfn] != mfn)
305 continue;
306 return (pfn);
309 return (-(pfn_t)1);
312 mfn_t
313 mdb_pfn_to_mfn(pfn_t pfn)
315 init_mmu();
317 if (mfn_list == NULL || pfn >= mfn_count)
318 return (-(mfn_t)1);
320 return (mfn_list[pfn]);
323 #endif /* _KMDB */
325 static paddr_t
326 mdb_ma_to_pa(uint64_t ma)
328 pfn_t pfn = mdb_mfn_to_pfn(mmu_btop(ma));
329 if (pfn == -(pfn_t)1)
330 return (-(paddr_t)1);
332 return (mmu_ptob((paddr_t)pfn) | (ma & (MMU_PAGESIZE - 1)));
335 #else /* __xpv */
337 #define mdb_ma_to_pa(ma) (ma)
338 #define mdb_mfn_to_pfn(mfn) (mfn)
339 #define mdb_pfn_to_mfn(pfn) (pfn)
341 #endif /* __xpv */
344 * ::mfntopfn dcmd translates hypervisor machine page number
345 * to physical page number
347 /*ARGSUSED*/
349 mfntopfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
351 pfn_t pfn;
353 if ((flags & DCMD_ADDRSPEC) == 0) {
354 mdb_warn("MFN missing\n");
355 return (DCMD_USAGE);
358 if ((pfn = mdb_mfn_to_pfn((pfn_t)addr)) == -(pfn_t)1) {
359 mdb_warn("Invalid mfn %lr\n", (pfn_t)addr);
360 return (DCMD_ERR);
363 mdb_printf("%lr\n", pfn);
365 return (DCMD_OK);
369 * ::pfntomfn dcmd translates physical page number to
370 * hypervisor machine page number
372 /*ARGSUSED*/
374 pfntomfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
376 pfn_t mfn;
378 if ((flags & DCMD_ADDRSPEC) == 0) {
379 mdb_warn("PFN missing\n");
380 return (DCMD_USAGE);
383 if ((mfn = mdb_pfn_to_mfn((pfn_t)addr)) == -(pfn_t)1) {
384 mdb_warn("Invalid pfn %lr\n", (pfn_t)addr);
385 return (DCMD_ABORT);
388 mdb_printf("%lr\n", mfn);
390 if (flags & DCMD_LOOP)
391 mdb_set_dot(addr + 1);
392 return (DCMD_OK);
395 static pfn_t
396 pte2mfn(x86pte_t pte, uint_t level)
398 pfn_t mfn;
399 if (level > 0 && (pte & PT_PAGESIZE))
400 mfn = mmu_btop(pte & PT_PADDR_LGPG);
401 else
402 mfn = mmu_btop(pte & PT_PADDR);
403 return (mfn);
406 static int
407 do_pte_dcmd(int level, uint64_t pte)
409 static char *attr[] = {
410 "wrback", "wrthru", "uncached", "uncached",
411 "wrback", "wrthru", "wrcombine", "uncached"};
412 int pat_index = 0;
413 pfn_t mfn;
415 mdb_printf("pte=0x%llr: ", pte);
417 mfn = pte2mfn(pte, level);
418 mdb_printf("%s=0x%lr ", is_xpv ? "mfn" : "pfn", mfn);
420 if (PTE_GET(pte, mmu.pt_nx))
421 mdb_printf("noexec ");
423 if (PTE_GET(pte, PT_NOCONSIST))
424 mdb_printf("noconsist ");
426 if (PTE_GET(pte, PT_NOSYNC))
427 mdb_printf("nosync ");
429 if (PTE_GET(pte, mmu.pt_global))
430 mdb_printf("global ");
432 if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
433 mdb_printf("largepage ");
435 if (level > 0 && PTE_GET(pte, PT_MOD))
436 mdb_printf("mod ");
438 if (level > 0 && PTE_GET(pte, PT_REF))
439 mdb_printf("ref ");
441 if (PTE_GET(pte, PT_USER))
442 mdb_printf("user ");
444 if (PTE_GET(pte, PT_WRITABLE))
445 mdb_printf("write ");
448 * Report non-standard cacheability
450 pat_index = 0;
451 if (level > 0) {
452 if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
453 pat_index += 4;
454 } else {
455 if (PTE_GET(pte, PT_PAT_4K))
456 pat_index += 4;
459 if (PTE_GET(pte, PT_NOCACHE))
460 pat_index += 2;
462 if (PTE_GET(pte, PT_WRITETHRU))
463 pat_index += 1;
465 if (pat_index != 0)
466 mdb_printf("%s", attr[pat_index]);
468 if (PTE_GET(pte, PT_VALID) == 0)
469 mdb_printf(" !VALID ");
471 mdb_printf("\n");
472 return (DCMD_OK);
476 * Print a PTE in more human friendly way. The PTE is assumed to be in
477 * a level 0 page table, unless -l specifies another level.
479 /*ARGSUSED*/
481 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
483 uint64_t level = 0;
485 init_mmu();
487 if (mmu.num_level == 0)
488 return (DCMD_ERR);
490 if ((flags & DCMD_ADDRSPEC) == 0)
491 return (DCMD_USAGE);
493 if (mdb_getopts(argc, argv,
494 'l', MDB_OPT_UINT64, &level) != argc)
495 return (DCMD_USAGE);
497 if (level > mmu.max_level) {
498 mdb_warn("invalid level %lu\n", level);
499 return (DCMD_ERR);
502 if (addr == 0)
503 return (DCMD_OK);
505 return (do_pte_dcmd((int)level, addr));
508 static size_t
509 va2entry(htable_t *htable, uintptr_t addr)
511 size_t entry = (addr - htable->ht_vaddr);
513 entry >>= mmu.level_shift[htable->ht_level];
514 return (entry & HTABLE_NUM_PTES(htable) - 1);
517 static x86pte_t
518 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr)
520 x86pte_t buf;
522 if (htable->ht_flags & HTABLE_COPIED) {
523 uintptr_t ptr = (uintptr_t)hat->hat_copied_ptes;
524 ptr += va2entry(htable, addr) << mmu.pte_size_shift;
525 return (*(x86pte_t *)ptr);
528 paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
529 paddr += va2entry(htable, addr) << mmu.pte_size_shift;
531 if ((mdb_pread(&buf, mmu.pte_size, paddr)) == mmu.pte_size)
532 return (buf);
534 return (0);
537 static int
538 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap,
539 pfn_t *mfnp)
541 struct as as;
542 struct hat *hatp;
543 struct hat hat;
544 htable_t *ht;
545 htable_t htable;
546 uintptr_t base;
547 int h;
548 int level;
549 int found = 0;
550 x86pte_t pte;
551 physaddr_t paddr;
553 if (asp != NULL) {
554 if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
555 mdb_warn("Couldn't read struct as\n");
556 return (DCMD_ERR);
558 hatp = as.a_hat;
559 } else {
560 hatp = khat;
564 * read the hat and its hash table
566 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
567 mdb_warn("Couldn't read struct hat\n");
568 return (DCMD_ERR);
572 * read the htable hashtable
574 for (level = 0; level <= mmu.max_level; ++level) {
575 if (level == TOP_LEVEL(&hat))
576 base = 0;
577 else
578 base = addr & mmu.level_mask[level + 1];
580 for (h = 0; h < hat.hat_num_hash; ++h) {
581 if (mdb_vread(&ht, sizeof (htable_t *),
582 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
583 mdb_warn("Couldn't read htable\n");
584 return (DCMD_ERR);
586 for (; ht != NULL; ht = htable.ht_next) {
587 if (mdb_vread(&htable, sizeof (htable_t),
588 (uintptr_t)ht) == -1) {
589 mdb_warn("Couldn't read htable\n");
590 return (DCMD_ERR);
593 if (htable.ht_vaddr != base ||
594 htable.ht_level != level)
595 continue;
597 pte = get_pte(&hat, &htable, addr);
599 if (print_level) {
600 mdb_printf("\tlevel=%d htable=0x%p "
601 "pte=0x%llr\n", level, ht, pte);
604 if (!PTE_ISVALID(pte)) {
605 mdb_printf("Address %p is unmapped.\n",
606 addr);
607 return (DCMD_ERR);
610 if (found)
611 continue;
613 if (PTE_IS_LGPG(pte, level))
614 paddr = mdb_ma_to_pa(pte &
615 PT_PADDR_LGPG);
616 else
617 paddr = mdb_ma_to_pa(pte & PT_PADDR);
618 paddr += addr & mmu.level_offset[level];
619 if (pap != NULL)
620 *pap = paddr;
621 if (mfnp != NULL)
622 *mfnp = pte2mfn(pte, level);
623 found = 1;
628 done:
629 if (!found)
630 return (DCMD_ERR);
631 return (DCMD_OK);
635 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
637 uintptr_t addrspace;
638 char *addrspace_str = NULL;
639 int piped = flags & DCMD_PIPE_OUT;
640 pfn_t pfn;
641 pfn_t mfn;
642 int rc;
644 init_mmu();
646 if (mmu.num_level == 0)
647 return (DCMD_ERR);
649 if (mdb_getopts(argc, argv,
650 'a', MDB_OPT_STR, &addrspace_str) != argc)
651 return (DCMD_USAGE);
653 if ((flags & DCMD_ADDRSPEC) == 0)
654 return (DCMD_USAGE);
657 * parse the address space
659 if (addrspace_str != NULL)
660 addrspace = mdb_strtoull(addrspace_str);
661 else
662 addrspace = 0;
664 rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn);
666 if (rc != DCMD_OK)
667 return (rc);
669 if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) {
670 mdb_warn("Invalid mfn %lr\n", mfn);
671 return (DCMD_ERR);
674 if (piped) {
675 mdb_printf("0x%lr\n", pfn);
676 return (DCMD_OK);
679 mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn);
681 if (is_xpv)
682 mdb_printf(" (mfn 0x%lr)", mfn);
684 mdb_printf("\n");
686 return (DCMD_OK);
690 * Report all hat's that either use PFN as a page table or that map the page.
692 static int
693 do_report_maps(pfn_t pfn)
695 struct hat *hatp;
696 struct hat hat;
697 htable_t *ht;
698 htable_t htable;
699 uintptr_t base;
700 int h;
701 int level;
702 int entry;
703 x86pte_t pte;
704 physaddr_t paddr;
705 size_t len;
708 * The hats are kept in a list with khat at the head.
710 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
712 * read the hat and its hash table
714 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
715 mdb_warn("Couldn't read struct hat\n");
716 return (DCMD_ERR);
720 * read the htable hashtable
722 paddr = 0;
723 for (h = 0; h < hat.hat_num_hash; ++h) {
724 if (mdb_vread(&ht, sizeof (htable_t *),
725 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
726 mdb_warn("Couldn't read htable\n");
727 return (DCMD_ERR);
729 for (; ht != NULL; ht = htable.ht_next) {
730 if (mdb_vread(&htable, sizeof (htable_t),
731 (uintptr_t)ht) == -1) {
732 mdb_warn("Couldn't read htable\n");
733 return (DCMD_ERR);
737 * only report kernel addresses once
739 if (hatp != khat &&
740 htable.ht_vaddr >= kernelbase)
741 continue;
744 * Is the PFN a pagetable itself?
746 if (htable.ht_pfn == pfn) {
747 mdb_printf("Pagetable for "
748 "hat=%p htable=%p\n", hatp, ht);
749 continue;
753 * otherwise, examine page mappings
755 level = htable.ht_level;
756 if (level > mmu.max_page_level)
757 continue;
758 paddr = mmu_ptob((physaddr_t)htable.ht_pfn);
759 for (entry = 0;
760 entry < HTABLE_NUM_PTES(&htable);
761 ++entry) {
763 base = htable.ht_vaddr + entry *
764 mmu.level_size[level];
767 * only report kernel addresses once
769 if (hatp != khat &&
770 base >= kernelbase)
771 continue;
773 len = mdb_pread(&pte, mmu.pte_size,
774 paddr + entry * mmu.pte_size);
775 if (len != mmu.pte_size)
776 return (DCMD_ERR);
778 if ((pte & PT_VALID) == 0)
779 continue;
780 if (level == 0 || !(pte & PT_PAGESIZE))
781 pte &= PT_PADDR;
782 else
783 pte &= PT_PADDR_LGPG;
784 if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
785 continue;
786 mdb_printf("hat=%p maps addr=%p\n",
787 hatp, (caddr_t)base);
793 done:
794 return (DCMD_OK);
798 * given a PFN as its address argument, prints out the uses of it
800 /*ARGSUSED*/
802 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
804 pfn_t pfn;
805 uint_t mflag = 0;
807 init_mmu();
809 if (mmu.num_level == 0)
810 return (DCMD_ERR);
812 if ((flags & DCMD_ADDRSPEC) == 0)
813 return (DCMD_USAGE);
815 if (mdb_getopts(argc, argv,
816 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
817 return (DCMD_USAGE);
819 pfn = (pfn_t)addr;
820 if (mflag)
821 pfn = mdb_mfn_to_pfn(pfn);
823 return (do_report_maps(pfn));
826 static int
827 do_ptable_dcmd(pfn_t pfn, uint64_t level)
829 struct hat *hatp;
830 struct hat hat;
831 htable_t *ht;
832 htable_t htable;
833 uintptr_t base;
834 int h;
835 int entry;
836 uintptr_t pagesize;
837 x86pte_t pte;
838 physaddr_t paddr;
839 size_t len;
842 * The hats are kept in a list with khat at the head.
844 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
846 * read the hat and its hash table
848 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
849 mdb_warn("Couldn't read struct hat\n");
850 return (DCMD_ERR);
854 * read the htable hashtable
856 paddr = 0;
857 for (h = 0; h < hat.hat_num_hash; ++h) {
858 if (mdb_vread(&ht, sizeof (htable_t *),
859 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
860 mdb_warn("Couldn't read htable\n");
861 return (DCMD_ERR);
863 for (; ht != NULL; ht = htable.ht_next) {
864 if (mdb_vread(&htable, sizeof (htable_t),
865 (uintptr_t)ht) == -1) {
866 mdb_warn("Couldn't read htable\n");
867 return (DCMD_ERR);
871 * Is this the PFN for this htable
873 if (htable.ht_pfn == pfn)
874 goto found_it;
879 found_it:
880 if (htable.ht_pfn == pfn) {
881 mdb_printf("htable=%p\n", ht);
882 if (level == (uint64_t)-1) {
883 level = htable.ht_level;
884 } else if (htable.ht_level != level) {
885 mdb_warn("htable has level %d but forcing level %lu\n",
886 htable.ht_level, level);
888 base = htable.ht_vaddr;
889 pagesize = mmu.level_size[level];
890 } else {
891 if (level == (uint64_t)-1)
892 level = 0;
893 mdb_warn("couldn't find matching htable, using level=%lu, "
894 "base address=0x0\n", level);
895 base = 0;
896 pagesize = mmu.level_size[level];
899 paddr = mmu_ptob((physaddr_t)pfn);
900 for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
901 len = mdb_pread(&pte, mmu.pte_size,
902 paddr + entry * mmu.pte_size);
903 if (len != mmu.pte_size)
904 return (DCMD_ERR);
906 if (pte == 0)
907 continue;
909 mdb_printf("[%3d] va=0x%p ", entry,
910 VA_SIGN_EXTEND(base + entry * pagesize));
911 do_pte_dcmd(level, pte);
914 done:
915 return (DCMD_OK);
919 * Dump the page table at the given PFN
921 /*ARGSUSED*/
923 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
925 pfn_t pfn;
926 uint_t mflag = 0;
927 uint64_t level = (uint64_t)-1;
929 init_mmu();
931 if (mmu.num_level == 0)
932 return (DCMD_ERR);
934 if ((flags & DCMD_ADDRSPEC) == 0)
935 return (DCMD_USAGE);
937 if (mdb_getopts(argc, argv,
938 'm', MDB_OPT_SETBITS, TRUE, &mflag,
939 'l', MDB_OPT_UINT64, &level, NULL) != argc)
940 return (DCMD_USAGE);
942 if (level != (uint64_t)-1 && level > mmu.max_level) {
943 mdb_warn("invalid level %lu\n", level);
944 return (DCMD_ERR);
947 pfn = (pfn_t)addr;
948 if (mflag)
949 pfn = mdb_mfn_to_pfn(pfn);
951 return (do_ptable_dcmd(pfn, level));
954 static int
955 do_htables_dcmd(hat_t *hatp)
957 struct hat hat;
958 htable_t *ht;
959 htable_t htable;
960 int h;
963 * read the hat and its hash table
965 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
966 mdb_warn("Couldn't read struct hat\n");
967 return (DCMD_ERR);
971 * read the htable hashtable
973 for (h = 0; h < hat.hat_num_hash; ++h) {
974 if (mdb_vread(&ht, sizeof (htable_t *),
975 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
976 mdb_warn("Couldn't read htable ptr\\n");
977 return (DCMD_ERR);
979 for (; ht != NULL; ht = htable.ht_next) {
980 mdb_printf("%p\n", ht);
981 if (mdb_vread(&htable, sizeof (htable_t),
982 (uintptr_t)ht) == -1) {
983 mdb_warn("Couldn't read htable\n");
984 return (DCMD_ERR);
988 return (DCMD_OK);
992 * Dump the htables for the given hat
994 /*ARGSUSED*/
996 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
998 hat_t *hat;
1000 init_mmu();
1002 if (mmu.num_level == 0)
1003 return (DCMD_ERR);
1005 if ((flags & DCMD_ADDRSPEC) == 0)
1006 return (DCMD_USAGE);
1008 hat = (hat_t *)addr;
1010 return (do_htables_dcmd(hat));
1013 static uintptr_t
1014 entry2va(size_t *entries)
1016 uintptr_t va = 0;
1018 for (level_t l = mmu.max_level; l >= 0; l--)
1019 va += entries[l] << mmu.level_shift[l];
1021 return (VA_SIGN_EXTEND(va));
1024 static void
1025 ptmap_report(size_t *entries, uintptr_t start,
1026 boolean_t user, boolean_t writable, boolean_t wflag)
1028 uint64_t curva = entry2va(entries);
1030 mdb_printf("mapped %s,%s range of %lu bytes: %a-%a\n",
1031 user ? "user" : "kernel", writable ? "writable" : "read-only",
1032 curva - start, start, curva - 1);
1033 if (wflag && start >= kernelbase)
1034 (void) mdb_call_dcmd("whatis", start, DCMD_ADDRSPEC, 0, NULL);
1038 ptmap_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1040 physaddr_t paddrs[MAX_NUM_LEVEL] = { 0, };
1041 size_t entry[MAX_NUM_LEVEL] = { 0, };
1042 uintptr_t start = (uintptr_t)-1;
1043 boolean_t writable = B_FALSE;
1044 boolean_t user = B_FALSE;
1045 boolean_t wflag = B_FALSE;
1046 level_t curlevel;
1048 if ((flags & DCMD_ADDRSPEC) == 0)
1049 return (DCMD_USAGE);
1051 if (mdb_getopts(argc, argv,
1052 'w', MDB_OPT_SETBITS, TRUE, &wflag, NULL) != argc)
1053 return (DCMD_USAGE);
1055 init_mmu();
1057 if (mmu.num_level == 0)
1058 return (DCMD_ERR);
1060 curlevel = mmu.max_level;
1062 paddrs[curlevel] = addr & MMU_PAGEMASK;
1064 for (;;) {
1065 physaddr_t pte_addr;
1066 x86pte_t pte;
1068 pte_addr = paddrs[curlevel] +
1069 (entry[curlevel] << mmu.pte_size_shift);
1071 if (mdb_pread(&pte, sizeof (pte), pte_addr) != sizeof (pte)) {
1072 mdb_warn("couldn't read pte at %p", pte_addr);
1073 return (DCMD_ERR);
1076 if (PTE_GET(pte, PT_VALID) == 0) {
1077 if (start != (uintptr_t)-1) {
1078 ptmap_report(entry, start,
1079 user, writable, wflag);
1080 start = (uintptr_t)-1;
1082 } else if (curlevel == 0 || PTE_GET(pte, PT_PAGESIZE)) {
1083 if (start == (uintptr_t)-1) {
1084 start = entry2va(entry);
1085 user = PTE_GET(pte, PT_USER);
1086 writable = PTE_GET(pte, PT_WRITABLE);
1087 } else if (user != PTE_GET(pte, PT_USER) ||
1088 writable != PTE_GET(pte, PT_WRITABLE)) {
1089 ptmap_report(entry, start,
1090 user, writable, wflag);
1091 start = entry2va(entry);
1092 user = PTE_GET(pte, PT_USER);
1093 writable = PTE_GET(pte, PT_WRITABLE);
1095 } else {
1096 /* Descend a level. */
1097 physaddr_t pa = mmu_ptob(pte2mfn(pte, curlevel));
1098 paddrs[--curlevel] = pa;
1099 entry[curlevel] = 0;
1100 continue;
1103 while (++entry[curlevel] == mmu.ptes_per_table) {
1104 /* Ascend back up. */
1105 entry[curlevel] = 0;
1106 if (curlevel == mmu.max_level) {
1107 if (start != (uintptr_t)-1) {
1108 ptmap_report(entry, start,
1109 user, writable, wflag);
1111 goto out;
1114 curlevel++;
1118 out:
1119 return (DCMD_OK);