Merge commit '74ecdb5171c9f3673b9393b1a3dc6f3a65e93895'
[unleashed.git] / usr / src / cmd / mdb / i86pc / modules / unix / i86mmu.c
blob4ba77df7c47c12f5d07dadc8f1c76bd21d5bc3ce
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Copyright 2018 Joyent, Inc.
29 * This part of the file contains the mdb support for dcmds:
30 * ::memseg_list
31 * and walkers for:
32 * memseg - a memseg list walker for ::memseg_list
36 #include <sys/types.h>
37 #include <sys/machparam.h>
38 #include <sys/controlregs.h>
39 #include <sys/mach_mmu.h>
40 #include <vm/as.h>
42 #include <mdb/mdb_modapi.h>
43 #include <mdb/mdb_target.h>
45 #include <vm/page.h>
46 #include <vm/hat_i86.h>
48 #define VA_SIGN_BIT (1UL << 47)
49 #define VA_SIGN_EXTEND(va) (((va) ^ VA_SIGN_BIT) - VA_SIGN_BIT)
51 struct pfn2pp {
52 pfn_t pfn;
53 page_t *pp;
56 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *);
57 static void init_mmu(void);
59 int
60 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
62 if (asp == NULL)
63 return (DCMD_ERR);
65 init_mmu();
67 if (mmu.num_level == 0)
68 return (DCMD_ERR);
70 return (do_va2pa(addr, asp, 0, pap, NULL));
74 * ::memseg_list dcmd and walker to implement it.
76 /*ARGSUSED*/
77 int
78 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
80 struct memseg ms;
82 if (!(flags & DCMD_ADDRSPEC)) {
83 if (mdb_pwalk_dcmd("memseg", "memseg_list",
84 0, NULL, 0) == -1) {
85 mdb_warn("can't walk memseg");
86 return (DCMD_ERR);
88 return (DCMD_OK);
91 if (DCMD_HDRSPEC(flags))
92 mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
93 "PAGES", "EPAGES", "BASE", "END");
95 if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
96 mdb_warn("can't read memseg at %#lx", addr);
97 return (DCMD_ERR);
100 mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
101 ms.pages, ms.epages, ms.pages_base, ms.pages_end);
103 return (DCMD_OK);
107 * walk the memseg structures
110 memseg_walk_init(mdb_walk_state_t *wsp)
112 if (wsp->walk_addr != (uintptr_t)NULL) {
113 mdb_warn("memseg only supports global walks\n");
114 return (WALK_ERR);
117 if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
118 mdb_warn("symbol 'memsegs' not found");
119 return (WALK_ERR);
122 wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
123 return (WALK_NEXT);
128 memseg_walk_step(mdb_walk_state_t *wsp)
130 int status;
132 if (wsp->walk_addr == (uintptr_t)NULL) {
133 return (WALK_DONE);
136 if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
137 wsp->walk_addr) == -1) {
138 mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
139 return (WALK_DONE);
142 status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
143 wsp->walk_cbdata);
145 wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
147 return (status);
150 void
151 memseg_walk_fini(mdb_walk_state_t *wsp)
153 mdb_free(wsp->walk_data, sizeof (struct memseg));
157 * Now HAT related dcmds.
160 static struct hat *khat; /* value of kas.a_hat */
161 struct hat_mmu_info mmu;
162 uintptr_t kernelbase;
165 * stuff for i86xpv images
167 static int is_xpv;
168 static uintptr_t mfn_list_addr; /* kernel MFN list address */
169 uintptr_t xen_virt_start; /* address of mfn_to_pfn[] table */
170 ulong_t mfn_count; /* number of pfn's in the MFN list */
171 pfn_t *mfn_list; /* local MFN list copy */
174 * read mmu parameters from kernel
176 static void
177 init_mmu(void)
179 struct as kas;
181 if (mmu.num_level != 0)
182 return;
184 if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
185 mdb_warn("Can't use HAT information before mmu_init()\n");
186 if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
187 mdb_warn("Couldn't find kas - kernel's struct as\n");
188 if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
189 mdb_warn("Couldn't find kernelbase\n");
190 khat = kas.a_hat;
193 * Is this a paravirtualized domain image?
195 if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr),
196 "mfn_list") == -1 ||
197 mdb_readsym(&xen_virt_start, sizeof (xen_virt_start),
198 "xen_virt_start") == -1 ||
199 mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) {
200 mfn_list_addr = (uintptr_t)NULL;
203 is_xpv = mfn_list_addr != (uintptr_t)NULL;
205 #ifndef _KMDB
207 * recreate the local mfn_list
209 if (is_xpv) {
210 size_t sz = mfn_count * sizeof (pfn_t);
211 mfn_list = mdb_zalloc(sz, UM_SLEEP);
213 if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) {
214 mdb_warn("Failed to read MFN list\n");
215 mdb_free(mfn_list, sz);
216 mfn_list = NULL;
219 #endif
222 void
223 free_mmu(void)
228 #define mdb_ma_to_pa(ma) (ma)
229 #define mdb_mfn_to_pfn(mfn) (mfn)
230 #define mdb_pfn_to_mfn(pfn) (pfn)
234 * ::mfntopfn dcmd translates hypervisor machine page number
235 * to physical page number
237 /*ARGSUSED*/
239 mfntopfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
241 pfn_t pfn;
243 if ((flags & DCMD_ADDRSPEC) == 0) {
244 mdb_warn("MFN missing\n");
245 return (DCMD_USAGE);
248 if ((pfn = mdb_mfn_to_pfn((pfn_t)addr)) == -(pfn_t)1) {
249 mdb_warn("Invalid mfn %lr\n", (pfn_t)addr);
250 return (DCMD_ERR);
253 mdb_printf("%lr\n", pfn);
255 return (DCMD_OK);
259 * ::pfntomfn dcmd translates physical page number to
260 * hypervisor machine page number
262 /*ARGSUSED*/
264 pfntomfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
266 pfn_t mfn;
268 if ((flags & DCMD_ADDRSPEC) == 0) {
269 mdb_warn("PFN missing\n");
270 return (DCMD_USAGE);
273 if ((mfn = mdb_pfn_to_mfn((pfn_t)addr)) == -(pfn_t)1) {
274 mdb_warn("Invalid pfn %lr\n", (pfn_t)addr);
275 return (DCMD_ABORT);
278 mdb_printf("%lr\n", mfn);
280 if (flags & DCMD_LOOP)
281 mdb_set_dot(addr + 1);
282 return (DCMD_OK);
285 static pfn_t
286 pte2mfn(x86pte_t pte, uint_t level)
288 pfn_t mfn;
289 if (level > 0 && (pte & PT_PAGESIZE))
290 mfn = mmu_btop(pte & PT_PADDR_LGPG);
291 else
292 mfn = mmu_btop(pte & PT_PADDR);
293 return (mfn);
296 static int
297 do_pte_dcmd(int level, uint64_t pte)
299 static char *attr[] = {
300 "wrback", "wrthru", "uncached", "uncached",
301 "wrback", "wrthru", "wrcombine", "uncached"};
302 int pat_index = 0;
303 pfn_t mfn;
305 mdb_printf("pte=0x%llr: ", pte);
307 mfn = pte2mfn(pte, level);
308 mdb_printf("%s=0x%lr ", is_xpv ? "mfn" : "pfn", mfn);
310 if (PTE_GET(pte, mmu.pt_nx))
311 mdb_printf("noexec ");
313 if (PTE_GET(pte, PT_NOCONSIST))
314 mdb_printf("noconsist ");
316 if (PTE_GET(pte, PT_NOSYNC))
317 mdb_printf("nosync ");
319 if (PTE_GET(pte, mmu.pt_global))
320 mdb_printf("global ");
322 if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
323 mdb_printf("largepage ");
325 if (level > 0 && PTE_GET(pte, PT_MOD))
326 mdb_printf("mod ");
328 if (level > 0 && PTE_GET(pte, PT_REF))
329 mdb_printf("ref ");
331 if (PTE_GET(pte, PT_USER))
332 mdb_printf("user ");
334 if (PTE_GET(pte, PT_WRITABLE))
335 mdb_printf("write ");
338 * Report non-standard cacheability
340 pat_index = 0;
341 if (level > 0) {
342 if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
343 pat_index += 4;
344 } else {
345 if (PTE_GET(pte, PT_PAT_4K))
346 pat_index += 4;
349 if (PTE_GET(pte, PT_NOCACHE))
350 pat_index += 2;
352 if (PTE_GET(pte, PT_WRITETHRU))
353 pat_index += 1;
355 if (pat_index != 0)
356 mdb_printf("%s", attr[pat_index]);
358 if (PTE_GET(pte, PT_VALID) == 0)
359 mdb_printf(" !VALID ");
361 mdb_printf("\n");
362 return (DCMD_OK);
366 * Print a PTE in more human friendly way. The PTE is assumed to be in
367 * a level 0 page table, unless -l specifies another level.
369 /*ARGSUSED*/
371 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
373 uint64_t level = 0;
375 init_mmu();
377 if (mmu.num_level == 0)
378 return (DCMD_ERR);
380 if ((flags & DCMD_ADDRSPEC) == 0)
381 return (DCMD_USAGE);
383 if (mdb_getopts(argc, argv,
384 'l', MDB_OPT_UINT64, &level) != argc)
385 return (DCMD_USAGE);
387 if (level > mmu.max_level) {
388 mdb_warn("invalid level %lu\n", level);
389 return (DCMD_ERR);
392 if (addr == 0)
393 return (DCMD_OK);
395 return (do_pte_dcmd((int)level, addr));
398 static size_t
399 va2entry(htable_t *htable, uintptr_t addr)
401 size_t entry = (addr - htable->ht_vaddr);
403 entry >>= mmu.level_shift[htable->ht_level];
404 return (entry & HTABLE_NUM_PTES(htable) - 1);
407 static x86pte_t
408 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr)
410 x86pte_t buf;
412 if (htable->ht_flags & HTABLE_COPIED) {
413 uintptr_t ptr = (uintptr_t)hat->hat_copied_ptes;
414 ptr += va2entry(htable, addr) << mmu.pte_size_shift;
415 return (*(x86pte_t *)ptr);
418 paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
419 paddr += va2entry(htable, addr) << mmu.pte_size_shift;
421 if ((mdb_pread(&buf, mmu.pte_size, paddr)) == mmu.pte_size)
422 return (buf);
424 return (0);
427 static int
428 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap,
429 pfn_t *mfnp)
431 struct as as;
432 struct hat *hatp;
433 struct hat hat;
434 htable_t *ht;
435 htable_t htable;
436 uintptr_t base;
437 int h;
438 int level;
439 int found = 0;
440 x86pte_t pte;
441 physaddr_t paddr;
443 if (asp != NULL) {
444 if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
445 mdb_warn("Couldn't read struct as\n");
446 return (DCMD_ERR);
448 hatp = as.a_hat;
449 } else {
450 hatp = khat;
454 * read the hat and its hash table
456 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
457 mdb_warn("Couldn't read struct hat\n");
458 return (DCMD_ERR);
462 * read the htable hashtable
464 for (level = 0; level <= mmu.max_level; ++level) {
465 if (level == TOP_LEVEL(&hat))
466 base = 0;
467 else
468 base = addr & mmu.level_mask[level + 1];
470 for (h = 0; h < hat.hat_num_hash; ++h) {
471 if (mdb_vread(&ht, sizeof (htable_t *),
472 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
473 mdb_warn("Couldn't read htable\n");
474 return (DCMD_ERR);
476 for (; ht != NULL; ht = htable.ht_next) {
477 if (mdb_vread(&htable, sizeof (htable_t),
478 (uintptr_t)ht) == -1) {
479 mdb_warn("Couldn't read htable\n");
480 return (DCMD_ERR);
483 if (htable.ht_vaddr != base ||
484 htable.ht_level != level)
485 continue;
487 pte = get_pte(&hat, &htable, addr);
489 if (print_level) {
490 mdb_printf("\tlevel=%d htable=0x%p "
491 "pte=0x%llr\n", level, ht, pte);
494 if (!PTE_ISVALID(pte)) {
495 mdb_printf("Address %p is unmapped.\n",
496 addr);
497 return (DCMD_ERR);
500 if (found)
501 continue;
503 if (PTE_IS_LGPG(pte, level))
504 paddr = mdb_ma_to_pa(pte &
505 PT_PADDR_LGPG);
506 else
507 paddr = mdb_ma_to_pa(pte & PT_PADDR);
508 paddr += addr & mmu.level_offset[level];
509 if (pap != NULL)
510 *pap = paddr;
511 if (mfnp != NULL)
512 *mfnp = pte2mfn(pte, level);
513 found = 1;
518 done:
519 if (!found)
520 return (DCMD_ERR);
521 return (DCMD_OK);
525 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
527 uintptr_t addrspace;
528 char *addrspace_str = NULL;
529 int piped = flags & DCMD_PIPE_OUT;
530 pfn_t pfn;
531 pfn_t mfn;
532 int rc;
534 init_mmu();
536 if (mmu.num_level == 0)
537 return (DCMD_ERR);
539 if (mdb_getopts(argc, argv,
540 'a', MDB_OPT_STR, &addrspace_str) != argc)
541 return (DCMD_USAGE);
543 if ((flags & DCMD_ADDRSPEC) == 0)
544 return (DCMD_USAGE);
547 * parse the address space
549 if (addrspace_str != NULL)
550 addrspace = mdb_strtoull(addrspace_str);
551 else
552 addrspace = 0;
554 rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn);
556 if (rc != DCMD_OK)
557 return (rc);
559 if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) {
560 mdb_warn("Invalid mfn %lr\n", mfn);
561 return (DCMD_ERR);
564 if (piped) {
565 mdb_printf("0x%lr\n", pfn);
566 return (DCMD_OK);
569 mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn);
571 if (is_xpv)
572 mdb_printf(" (mfn 0x%lr)", mfn);
574 mdb_printf("\n");
576 return (DCMD_OK);
580 * Report all hat's that either use PFN as a page table or that map the page.
582 static int
583 do_report_maps(pfn_t pfn)
585 struct hat *hatp;
586 struct hat hat;
587 htable_t *ht;
588 htable_t htable;
589 uintptr_t base;
590 int h;
591 int level;
592 int entry;
593 x86pte_t pte;
594 physaddr_t paddr;
595 size_t len;
598 * The hats are kept in a list with khat at the head.
600 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
602 * read the hat and its hash table
604 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
605 mdb_warn("Couldn't read struct hat\n");
606 return (DCMD_ERR);
610 * read the htable hashtable
612 paddr = 0;
613 for (h = 0; h < hat.hat_num_hash; ++h) {
614 if (mdb_vread(&ht, sizeof (htable_t *),
615 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
616 mdb_warn("Couldn't read htable\n");
617 return (DCMD_ERR);
619 for (; ht != NULL; ht = htable.ht_next) {
620 if (mdb_vread(&htable, sizeof (htable_t),
621 (uintptr_t)ht) == -1) {
622 mdb_warn("Couldn't read htable\n");
623 return (DCMD_ERR);
627 * only report kernel addresses once
629 if (hatp != khat &&
630 htable.ht_vaddr >= kernelbase)
631 continue;
634 * Is the PFN a pagetable itself?
636 if (htable.ht_pfn == pfn) {
637 mdb_printf("Pagetable for "
638 "hat=%p htable=%p\n", hatp, ht);
639 continue;
643 * otherwise, examine page mappings
645 level = htable.ht_level;
646 if (level > mmu.max_page_level)
647 continue;
648 paddr = mmu_ptob((physaddr_t)htable.ht_pfn);
649 for (entry = 0;
650 entry < HTABLE_NUM_PTES(&htable);
651 ++entry) {
653 base = htable.ht_vaddr + entry *
654 mmu.level_size[level];
657 * only report kernel addresses once
659 if (hatp != khat &&
660 base >= kernelbase)
661 continue;
663 len = mdb_pread(&pte, mmu.pte_size,
664 paddr + entry * mmu.pte_size);
665 if (len != mmu.pte_size)
666 return (DCMD_ERR);
668 if ((pte & PT_VALID) == 0)
669 continue;
670 if (level == 0 || !(pte & PT_PAGESIZE))
671 pte &= PT_PADDR;
672 else
673 pte &= PT_PADDR_LGPG;
674 if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
675 continue;
676 mdb_printf("hat=%p maps addr=%p\n",
677 hatp, (caddr_t)base);
683 done:
684 return (DCMD_OK);
688 * given a PFN as its address argument, prints out the uses of it
690 /*ARGSUSED*/
692 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
694 pfn_t pfn;
695 uint_t mflag = 0;
697 init_mmu();
699 if (mmu.num_level == 0)
700 return (DCMD_ERR);
702 if ((flags & DCMD_ADDRSPEC) == 0)
703 return (DCMD_USAGE);
705 if (mdb_getopts(argc, argv,
706 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
707 return (DCMD_USAGE);
709 pfn = (pfn_t)addr;
710 if (mflag)
711 pfn = mdb_mfn_to_pfn(pfn);
713 return (do_report_maps(pfn));
716 static int
717 do_ptable_dcmd(pfn_t pfn, uint64_t level)
719 struct hat *hatp;
720 struct hat hat;
721 htable_t *ht;
722 htable_t htable;
723 uintptr_t base;
724 int h;
725 int entry;
726 uintptr_t pagesize;
727 x86pte_t pte;
728 x86pte_t buf;
729 physaddr_t paddr;
730 size_t len;
733 * The hats are kept in a list with khat at the head.
735 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
737 * read the hat and its hash table
739 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
740 mdb_warn("Couldn't read struct hat\n");
741 return (DCMD_ERR);
745 * read the htable hashtable
747 paddr = 0;
748 for (h = 0; h < hat.hat_num_hash; ++h) {
749 if (mdb_vread(&ht, sizeof (htable_t *),
750 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
751 mdb_warn("Couldn't read htable\n");
752 return (DCMD_ERR);
754 for (; ht != NULL; ht = htable.ht_next) {
755 if (mdb_vread(&htable, sizeof (htable_t),
756 (uintptr_t)ht) == -1) {
757 mdb_warn("Couldn't read htable\n");
758 return (DCMD_ERR);
762 * Is this the PFN for this htable
764 if (htable.ht_pfn == pfn)
765 goto found_it;
770 found_it:
771 if (htable.ht_pfn == pfn) {
772 mdb_printf("htable=%p\n", ht);
773 if (level == (uint64_t)-1) {
774 level = htable.ht_level;
775 } else if (htable.ht_level != level) {
776 mdb_warn("htable has level %d but forcing level %lu\n",
777 htable.ht_level, level);
779 base = htable.ht_vaddr;
780 pagesize = mmu.level_size[level];
781 } else {
782 if (level == (uint64_t)-1)
783 level = 0;
784 mdb_warn("couldn't find matching htable, using level=%lu, "
785 "base address=0x0\n", level);
786 base = 0;
787 pagesize = mmu.level_size[level];
790 paddr = mmu_ptob((physaddr_t)pfn);
791 for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
792 len = mdb_pread(&buf, mmu.pte_size,
793 paddr + entry * mmu.pte_size);
794 if (len != mmu.pte_size)
795 return (DCMD_ERR);
796 pte = buf;
798 if (pte == 0)
799 continue;
801 mdb_printf("[%3d] va=0x%p ", entry,
802 VA_SIGN_EXTEND(base + entry * pagesize));
803 do_pte_dcmd(level, pte);
806 done:
807 return (DCMD_OK);
811 * Dump the page table at the given PFN
813 /*ARGSUSED*/
815 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
817 pfn_t pfn;
818 uint_t mflag = 0;
819 uint64_t level = (uint64_t)-1;
821 init_mmu();
823 if (mmu.num_level == 0)
824 return (DCMD_ERR);
826 if ((flags & DCMD_ADDRSPEC) == 0)
827 return (DCMD_USAGE);
829 if (mdb_getopts(argc, argv,
830 'm', MDB_OPT_SETBITS, TRUE, &mflag,
831 'l', MDB_OPT_UINT64, &level, NULL) != argc)
832 return (DCMD_USAGE);
834 if (level != (uint64_t)-1 && level > mmu.max_level) {
835 mdb_warn("invalid level %lu\n", level);
836 return (DCMD_ERR);
839 pfn = (pfn_t)addr;
840 if (mflag)
841 pfn = mdb_mfn_to_pfn(pfn);
843 return (do_ptable_dcmd(pfn, level));
846 static int
847 do_htables_dcmd(hat_t *hatp)
849 struct hat hat;
850 htable_t *ht;
851 htable_t htable;
852 int h;
855 * read the hat and its hash table
857 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
858 mdb_warn("Couldn't read struct hat\n");
859 return (DCMD_ERR);
863 * read the htable hashtable
865 for (h = 0; h < hat.hat_num_hash; ++h) {
866 if (mdb_vread(&ht, sizeof (htable_t *),
867 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
868 mdb_warn("Couldn't read htable ptr\\n");
869 return (DCMD_ERR);
871 for (; ht != NULL; ht = htable.ht_next) {
872 mdb_printf("%p\n", ht);
873 if (mdb_vread(&htable, sizeof (htable_t),
874 (uintptr_t)ht) == -1) {
875 mdb_warn("Couldn't read htable\n");
876 return (DCMD_ERR);
880 return (DCMD_OK);
884 * Dump the htables for the given hat
886 /*ARGSUSED*/
888 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
890 hat_t *hat;
892 init_mmu();
894 if (mmu.num_level == 0)
895 return (DCMD_ERR);
897 if ((flags & DCMD_ADDRSPEC) == 0)
898 return (DCMD_USAGE);
900 hat = (hat_t *)addr;
902 return (do_htables_dcmd(hat));
905 static uintptr_t
906 entry2va(size_t *entries)
908 uintptr_t va = 0;
910 for (level_t l = mmu.max_level; l >= 0; l--)
911 va += entries[l] << mmu.level_shift[l];
913 return (VA_SIGN_EXTEND(va));
916 static void
917 ptmap_report(size_t *entries, uintptr_t start,
918 boolean_t user, boolean_t writable, boolean_t wflag)
920 uint64_t curva = entry2va(entries);
922 mdb_printf("mapped %s,%s range of %lu bytes: %a-%a\n",
923 user ? "user" : "kernel", writable ? "writable" : "read-only",
924 curva - start, start, curva - 1);
925 if (wflag && start >= kernelbase)
926 (void) mdb_call_dcmd("whatis", start, DCMD_ADDRSPEC, 0, NULL);
930 ptmap_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
932 physaddr_t paddrs[MAX_NUM_LEVEL] = { 0, };
933 size_t entry[MAX_NUM_LEVEL] = { 0, };
934 uintptr_t start = (uintptr_t)-1;
935 boolean_t writable = B_FALSE;
936 boolean_t user = B_FALSE;
937 boolean_t wflag = B_FALSE;
938 level_t curlevel;
940 if ((flags & DCMD_ADDRSPEC) == 0)
941 return (DCMD_USAGE);
943 if (mdb_getopts(argc, argv,
944 'w', MDB_OPT_SETBITS, TRUE, &wflag, NULL) != argc)
945 return (DCMD_USAGE);
947 init_mmu();
949 if (mmu.num_level == 0)
950 return (DCMD_ERR);
952 curlevel = mmu.max_level;
954 paddrs[curlevel] = addr & MMU_PAGEMASK;
956 for (;;) {
957 physaddr_t pte_addr;
958 x86pte_t pte;
960 pte_addr = paddrs[curlevel] +
961 (entry[curlevel] << mmu.pte_size_shift);
963 if (mdb_pread(&pte, sizeof (pte), pte_addr) != sizeof (pte)) {
964 mdb_warn("couldn't read pte at %p", pte_addr);
965 return (DCMD_ERR);
968 if (PTE_GET(pte, PT_VALID) == 0) {
969 if (start != (uintptr_t)-1) {
970 ptmap_report(entry, start,
971 user, writable, wflag);
972 start = (uintptr_t)-1;
974 } else if (curlevel == 0 || PTE_GET(pte, PT_PAGESIZE)) {
975 if (start == (uintptr_t)-1) {
976 start = entry2va(entry);
977 user = PTE_GET(pte, PT_USER);
978 writable = PTE_GET(pte, PT_WRITABLE);
979 } else if (user != PTE_GET(pte, PT_USER) ||
980 writable != PTE_GET(pte, PT_WRITABLE)) {
981 ptmap_report(entry, start,
982 user, writable, wflag);
983 start = entry2va(entry);
984 user = PTE_GET(pte, PT_USER);
985 writable = PTE_GET(pte, PT_WRITABLE);
987 } else {
988 /* Descend a level. */
989 physaddr_t pa = mmu_ptob(pte2mfn(pte, curlevel));
990 paddrs[--curlevel] = pa;
991 entry[curlevel] = 0;
992 continue;
995 while (++entry[curlevel] == mmu.ptes_per_table) {
996 /* Ascend back up. */
997 entry[curlevel] = 0;
998 if (curlevel == mmu.max_level) {
999 if (start != (uintptr_t)-1) {
1000 ptmap_report(entry, start,
1001 user, writable, wflag);
1003 goto out;
1006 curlevel++;
1010 out:
1011 return (DCMD_OK);