4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Copyright 2018 Joyent, Inc.
29 * This part of the file contains the mdb support for dcmds:
32 * memseg - a memseg list walker for ::memseg_list
36 #include <sys/types.h>
37 #include <sys/machparam.h>
38 #include <sys/controlregs.h>
39 #include <sys/mach_mmu.h>
41 #include <sys/hypervisor.h>
45 #include <mdb/mdb_modapi.h>
46 #include <mdb/mdb_target.h>
49 #include <vm/hat_i86.h>
51 #define VA_SIGN_BIT (1UL << 47)
52 #define VA_SIGN_EXTEND(va) (((va) ^ VA_SIGN_BIT) - VA_SIGN_BIT)
59 static int do_va2pa(uintptr_t, struct as
*, int, physaddr_t
*, pfn_t
*);
60 static void init_mmu(void);
63 platform_vtop(uintptr_t addr
, struct as
*asp
, physaddr_t
*pap
)
70 if (mmu
.num_level
== 0)
73 return (do_va2pa(addr
, asp
, 0, pap
, NULL
));
77 * ::memseg_list dcmd and walker to implement it.
81 memseg_list(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
85 if (!(flags
& DCMD_ADDRSPEC
)) {
86 if (mdb_pwalk_dcmd("memseg", "memseg_list",
88 mdb_warn("can't walk memseg");
94 if (DCMD_HDRSPEC(flags
))
95 mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
96 "PAGES", "EPAGES", "BASE", "END");
98 if (mdb_vread(&ms
, sizeof (struct memseg
), addr
) == -1) {
99 mdb_warn("can't read memseg at %#lx", addr
);
103 mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr
,
104 ms
.pages
, ms
.epages
, ms
.pages_base
, ms
.pages_end
);
110 * walk the memseg structures
113 memseg_walk_init(mdb_walk_state_t
*wsp
)
115 if (wsp
->walk_addr
!= NULL
) {
116 mdb_warn("memseg only supports global walks\n");
120 if (mdb_readvar(&wsp
->walk_addr
, "memsegs") == -1) {
121 mdb_warn("symbol 'memsegs' not found");
125 wsp
->walk_data
= mdb_alloc(sizeof (struct memseg
), UM_SLEEP
);
131 memseg_walk_step(mdb_walk_state_t
*wsp
)
135 if (wsp
->walk_addr
== 0) {
139 if (mdb_vread(wsp
->walk_data
, sizeof (struct memseg
),
140 wsp
->walk_addr
) == -1) {
141 mdb_warn("failed to read struct memseg at %p", wsp
->walk_addr
);
145 status
= wsp
->walk_callback(wsp
->walk_addr
, wsp
->walk_data
,
148 wsp
->walk_addr
= (uintptr_t)(((struct memseg
*)wsp
->walk_data
)->next
);
154 memseg_walk_fini(mdb_walk_state_t
*wsp
)
156 mdb_free(wsp
->walk_data
, sizeof (struct memseg
));
160 * Now HAT related dcmds.
163 static struct hat
*khat
; /* value of kas.a_hat */
164 struct hat_mmu_info mmu
;
165 uintptr_t kernelbase
;
168 * stuff for i86xpv images
171 static uintptr_t mfn_list_addr
; /* kernel MFN list address */
172 uintptr_t xen_virt_start
; /* address of mfn_to_pfn[] table */
173 ulong_t mfn_count
; /* number of pfn's in the MFN list */
174 pfn_t
*mfn_list
; /* local MFN list copy */
177 * read mmu parameters from kernel
184 if (mmu
.num_level
!= 0)
187 if (mdb_readsym(&mmu
, sizeof (mmu
), "mmu") == -1)
188 mdb_warn("Can't use HAT information before mmu_init()\n");
189 if (mdb_readsym(&kas
, sizeof (kas
), "kas") == -1)
190 mdb_warn("Couldn't find kas - kernel's struct as\n");
191 if (mdb_readsym(&kernelbase
, sizeof (kernelbase
), "kernelbase") == -1)
192 mdb_warn("Couldn't find kernelbase\n");
196 * Is this a paravirtualized domain image?
198 if (mdb_readsym(&mfn_list_addr
, sizeof (mfn_list_addr
),
200 mdb_readsym(&xen_virt_start
, sizeof (xen_virt_start
),
201 "xen_virt_start") == -1 ||
202 mdb_readsym(&mfn_count
, sizeof (mfn_count
), "mfn_count") == -1) {
203 mfn_list_addr
= NULL
;
206 is_xpv
= mfn_list_addr
!= NULL
;
210 * recreate the local mfn_list
213 size_t sz
= mfn_count
* sizeof (pfn_t
);
214 mfn_list
= mdb_zalloc(sz
, UM_SLEEP
);
216 if (mdb_vread(mfn_list
, sz
, (uintptr_t)mfn_list_addr
) == -1) {
217 mdb_warn("Failed to read MFN list\n");
218 mdb_free(mfn_list
, sz
);
229 if (mfn_list
!= NULL
)
230 mdb_free(mfn_list
, mfn_count
* sizeof (mfn_t
));
239 * Convert between MFNs and PFNs. Since we're in kmdb we can go directly
240 * through the machine to phys mapping and the MFN list.
244 mdb_mfn_to_pfn(mfn_t mfn
)
250 if (mfn_list_addr
== NULL
)
253 pfn_list
= (pfn_t
*)xen_virt_start
;
254 if (mdb_vread(&pfn
, sizeof (pfn
), (uintptr_t)(pfn_list
+ mfn
)) == -1)
257 if (mdb_vread(&tmp
, sizeof (tmp
),
258 (uintptr_t)(mfn_list_addr
+ (pfn
* sizeof (mfn_t
)))) == -1)
261 if (pfn
>= mfn_count
|| tmp
!= mfn
)
268 mdb_pfn_to_mfn(pfn_t pfn
)
274 if (mfn_list_addr
== NULL
|| pfn
>= mfn_count
)
277 if (mdb_vread(&mfn
, sizeof (mfn
),
278 (uintptr_t)(mfn_list_addr
+ (pfn
* sizeof (mfn_t
)))) == -1)
287 * Convert between MFNs and PFNs. Since a crash dump doesn't include the
288 * MFN->PFN translation table (it's part of the hypervisor, not our image)
289 * we do the MFN->PFN translation by searching the PFN->MFN (mfn_list)
290 * table, if it's there.
294 mdb_mfn_to_pfn(mfn_t mfn
)
300 if (mfn_list
== NULL
)
303 for (pfn
= 0; pfn
< mfn_count
; ++pfn
) {
304 if (mfn_list
[pfn
] != mfn
)
313 mdb_pfn_to_mfn(pfn_t pfn
)
317 if (mfn_list
== NULL
|| pfn
>= mfn_count
)
320 return (mfn_list
[pfn
]);
326 mdb_ma_to_pa(uint64_t ma
)
328 pfn_t pfn
= mdb_mfn_to_pfn(mmu_btop(ma
));
329 if (pfn
== -(pfn_t
)1)
330 return (-(paddr_t
)1);
332 return (mmu_ptob((paddr_t
)pfn
) | (ma
& (MMU_PAGESIZE
- 1)));
337 #define mdb_ma_to_pa(ma) (ma)
338 #define mdb_mfn_to_pfn(mfn) (mfn)
339 #define mdb_pfn_to_mfn(pfn) (pfn)
344 * ::mfntopfn dcmd translates hypervisor machine page number
345 * to physical page number
349 mfntopfn_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
353 if ((flags
& DCMD_ADDRSPEC
) == 0) {
354 mdb_warn("MFN missing\n");
358 if ((pfn
= mdb_mfn_to_pfn((pfn_t
)addr
)) == -(pfn_t
)1) {
359 mdb_warn("Invalid mfn %lr\n", (pfn_t
)addr
);
363 mdb_printf("%lr\n", pfn
);
369 * ::pfntomfn dcmd translates physical page number to
370 * hypervisor machine page number
374 pfntomfn_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
378 if ((flags
& DCMD_ADDRSPEC
) == 0) {
379 mdb_warn("PFN missing\n");
383 if ((mfn
= mdb_pfn_to_mfn((pfn_t
)addr
)) == -(pfn_t
)1) {
384 mdb_warn("Invalid pfn %lr\n", (pfn_t
)addr
);
388 mdb_printf("%lr\n", mfn
);
390 if (flags
& DCMD_LOOP
)
391 mdb_set_dot(addr
+ 1);
396 pte2mfn(x86pte_t pte
, uint_t level
)
399 if (level
> 0 && (pte
& PT_PAGESIZE
))
400 mfn
= mmu_btop(pte
& PT_PADDR_LGPG
);
402 mfn
= mmu_btop(pte
& PT_PADDR
);
407 do_pte_dcmd(int level
, uint64_t pte
)
409 static char *attr
[] = {
410 "wrback", "wrthru", "uncached", "uncached",
411 "wrback", "wrthru", "wrcombine", "uncached"};
415 mdb_printf("pte=0x%llr: ", pte
);
417 mfn
= pte2mfn(pte
, level
);
418 mdb_printf("%s=0x%lr ", is_xpv
? "mfn" : "pfn", mfn
);
420 if (PTE_GET(pte
, mmu
.pt_nx
))
421 mdb_printf("noexec ");
423 if (PTE_GET(pte
, PT_NOCONSIST
))
424 mdb_printf("noconsist ");
426 if (PTE_GET(pte
, PT_NOSYNC
))
427 mdb_printf("nosync ");
429 if (PTE_GET(pte
, mmu
.pt_global
))
430 mdb_printf("global ");
432 if (level
> 0 && PTE_GET(pte
, PT_PAGESIZE
))
433 mdb_printf("largepage ");
435 if (level
> 0 && PTE_GET(pte
, PT_MOD
))
438 if (level
> 0 && PTE_GET(pte
, PT_REF
))
441 if (PTE_GET(pte
, PT_USER
))
444 if (PTE_GET(pte
, PT_WRITABLE
))
445 mdb_printf("write ");
448 * Report non-standard cacheability
452 if (PTE_GET(pte
, PT_PAGESIZE
) && PTE_GET(pte
, PT_PAT_LARGE
))
455 if (PTE_GET(pte
, PT_PAT_4K
))
459 if (PTE_GET(pte
, PT_NOCACHE
))
462 if (PTE_GET(pte
, PT_WRITETHRU
))
466 mdb_printf("%s", attr
[pat_index
]);
468 if (PTE_GET(pte
, PT_VALID
) == 0)
469 mdb_printf(" !VALID ");
476 * Print a PTE in more human friendly way. The PTE is assumed to be in
477 * a level 0 page table, unless -l specifies another level.
481 pte_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
487 if (mmu
.num_level
== 0)
490 if ((flags
& DCMD_ADDRSPEC
) == 0)
493 if (mdb_getopts(argc
, argv
,
494 'l', MDB_OPT_UINT64
, &level
) != argc
)
497 if (level
> mmu
.max_level
) {
498 mdb_warn("invalid level %lu\n", level
);
505 return (do_pte_dcmd((int)level
, addr
));
509 va2entry(htable_t
*htable
, uintptr_t addr
)
511 size_t entry
= (addr
- htable
->ht_vaddr
);
513 entry
>>= mmu
.level_shift
[htable
->ht_level
];
514 return (entry
& HTABLE_NUM_PTES(htable
) - 1);
518 get_pte(hat_t
*hat
, htable_t
*htable
, uintptr_t addr
)
522 if (htable
->ht_flags
& HTABLE_COPIED
) {
523 uintptr_t ptr
= (uintptr_t)hat
->hat_copied_ptes
;
524 ptr
+= va2entry(htable
, addr
) << mmu
.pte_size_shift
;
525 return (*(x86pte_t
*)ptr
);
528 paddr_t paddr
= mmu_ptob((paddr_t
)htable
->ht_pfn
);
529 paddr
+= va2entry(htable
, addr
) << mmu
.pte_size_shift
;
531 if ((mdb_pread(&buf
, mmu
.pte_size
, paddr
)) == mmu
.pte_size
)
538 do_va2pa(uintptr_t addr
, struct as
*asp
, int print_level
, physaddr_t
*pap
,
554 if (mdb_vread(&as
, sizeof (as
), (uintptr_t)asp
) == -1) {
555 mdb_warn("Couldn't read struct as\n");
564 * read the hat and its hash table
566 if (mdb_vread(&hat
, sizeof (hat
), (uintptr_t)hatp
) == -1) {
567 mdb_warn("Couldn't read struct hat\n");
572 * read the htable hashtable
574 for (level
= 0; level
<= mmu
.max_level
; ++level
) {
575 if (level
== TOP_LEVEL(&hat
))
578 base
= addr
& mmu
.level_mask
[level
+ 1];
580 for (h
= 0; h
< hat
.hat_num_hash
; ++h
) {
581 if (mdb_vread(&ht
, sizeof (htable_t
*),
582 (uintptr_t)(hat
.hat_ht_hash
+ h
)) == -1) {
583 mdb_warn("Couldn't read htable\n");
586 for (; ht
!= NULL
; ht
= htable
.ht_next
) {
587 if (mdb_vread(&htable
, sizeof (htable_t
),
588 (uintptr_t)ht
) == -1) {
589 mdb_warn("Couldn't read htable\n");
593 if (htable
.ht_vaddr
!= base
||
594 htable
.ht_level
!= level
)
597 pte
= get_pte(&hat
, &htable
, addr
);
600 mdb_printf("\tlevel=%d htable=0x%p "
601 "pte=0x%llr\n", level
, ht
, pte
);
604 if (!PTE_ISVALID(pte
)) {
605 mdb_printf("Address %p is unmapped.\n",
613 if (PTE_IS_LGPG(pte
, level
))
614 paddr
= mdb_ma_to_pa(pte
&
617 paddr
= mdb_ma_to_pa(pte
& PT_PADDR
);
618 paddr
+= addr
& mmu
.level_offset
[level
];
622 *mfnp
= pte2mfn(pte
, level
);
635 va2pfn_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
638 char *addrspace_str
= NULL
;
639 int piped
= flags
& DCMD_PIPE_OUT
;
646 if (mmu
.num_level
== 0)
649 if (mdb_getopts(argc
, argv
,
650 'a', MDB_OPT_STR
, &addrspace_str
) != argc
)
653 if ((flags
& DCMD_ADDRSPEC
) == 0)
657 * parse the address space
659 if (addrspace_str
!= NULL
)
660 addrspace
= mdb_strtoull(addrspace_str
);
664 rc
= do_va2pa(addr
, (struct as
*)addrspace
, !piped
, NULL
, &mfn
);
669 if ((pfn
= mdb_mfn_to_pfn(mfn
)) == -(pfn_t
)1) {
670 mdb_warn("Invalid mfn %lr\n", mfn
);
675 mdb_printf("0x%lr\n", pfn
);
679 mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr
, pfn
);
682 mdb_printf(" (mfn 0x%lr)", mfn
);
690 * Report all hat's that either use PFN as a page table or that map the page.
693 do_report_maps(pfn_t pfn
)
708 * The hats are kept in a list with khat at the head.
710 for (hatp
= khat
; hatp
!= NULL
; hatp
= hat
.hat_next
) {
712 * read the hat and its hash table
714 if (mdb_vread(&hat
, sizeof (hat
), (uintptr_t)hatp
) == -1) {
715 mdb_warn("Couldn't read struct hat\n");
720 * read the htable hashtable
723 for (h
= 0; h
< hat
.hat_num_hash
; ++h
) {
724 if (mdb_vread(&ht
, sizeof (htable_t
*),
725 (uintptr_t)(hat
.hat_ht_hash
+ h
)) == -1) {
726 mdb_warn("Couldn't read htable\n");
729 for (; ht
!= NULL
; ht
= htable
.ht_next
) {
730 if (mdb_vread(&htable
, sizeof (htable_t
),
731 (uintptr_t)ht
) == -1) {
732 mdb_warn("Couldn't read htable\n");
737 * only report kernel addresses once
740 htable
.ht_vaddr
>= kernelbase
)
744 * Is the PFN a pagetable itself?
746 if (htable
.ht_pfn
== pfn
) {
747 mdb_printf("Pagetable for "
748 "hat=%p htable=%p\n", hatp
, ht
);
753 * otherwise, examine page mappings
755 level
= htable
.ht_level
;
756 if (level
> mmu
.max_page_level
)
758 paddr
= mmu_ptob((physaddr_t
)htable
.ht_pfn
);
760 entry
< HTABLE_NUM_PTES(&htable
);
763 base
= htable
.ht_vaddr
+ entry
*
764 mmu
.level_size
[level
];
767 * only report kernel addresses once
773 len
= mdb_pread(&pte
, mmu
.pte_size
,
774 paddr
+ entry
* mmu
.pte_size
);
775 if (len
!= mmu
.pte_size
)
778 if ((pte
& PT_VALID
) == 0)
780 if (level
== 0 || !(pte
& PT_PAGESIZE
))
783 pte
&= PT_PADDR_LGPG
;
784 if (mmu_btop(mdb_ma_to_pa(pte
)) != pfn
)
786 mdb_printf("hat=%p maps addr=%p\n",
787 hatp
, (caddr_t
)base
);
798 * given a PFN as its address argument, prints out the uses of it
802 report_maps_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
809 if (mmu
.num_level
== 0)
812 if ((flags
& DCMD_ADDRSPEC
) == 0)
815 if (mdb_getopts(argc
, argv
,
816 'm', MDB_OPT_SETBITS
, TRUE
, &mflag
, NULL
) != argc
)
821 pfn
= mdb_mfn_to_pfn(pfn
);
823 return (do_report_maps(pfn
));
827 do_ptable_dcmd(pfn_t pfn
, uint64_t level
)
842 * The hats are kept in a list with khat at the head.
844 for (hatp
= khat
; hatp
!= NULL
; hatp
= hat
.hat_next
) {
846 * read the hat and its hash table
848 if (mdb_vread(&hat
, sizeof (hat
), (uintptr_t)hatp
) == -1) {
849 mdb_warn("Couldn't read struct hat\n");
854 * read the htable hashtable
857 for (h
= 0; h
< hat
.hat_num_hash
; ++h
) {
858 if (mdb_vread(&ht
, sizeof (htable_t
*),
859 (uintptr_t)(hat
.hat_ht_hash
+ h
)) == -1) {
860 mdb_warn("Couldn't read htable\n");
863 for (; ht
!= NULL
; ht
= htable
.ht_next
) {
864 if (mdb_vread(&htable
, sizeof (htable_t
),
865 (uintptr_t)ht
) == -1) {
866 mdb_warn("Couldn't read htable\n");
871 * Is this the PFN for this htable
873 if (htable
.ht_pfn
== pfn
)
880 if (htable
.ht_pfn
== pfn
) {
881 mdb_printf("htable=%p\n", ht
);
882 if (level
== (uint64_t)-1) {
883 level
= htable
.ht_level
;
884 } else if (htable
.ht_level
!= level
) {
885 mdb_warn("htable has level %d but forcing level %lu\n",
886 htable
.ht_level
, level
);
888 base
= htable
.ht_vaddr
;
889 pagesize
= mmu
.level_size
[level
];
891 if (level
== (uint64_t)-1)
893 mdb_warn("couldn't find matching htable, using level=%lu, "
894 "base address=0x0\n", level
);
896 pagesize
= mmu
.level_size
[level
];
899 paddr
= mmu_ptob((physaddr_t
)pfn
);
900 for (entry
= 0; entry
< mmu
.ptes_per_table
; ++entry
) {
901 len
= mdb_pread(&pte
, mmu
.pte_size
,
902 paddr
+ entry
* mmu
.pte_size
);
903 if (len
!= mmu
.pte_size
)
909 mdb_printf("[%3d] va=0x%p ", entry
,
910 VA_SIGN_EXTEND(base
+ entry
* pagesize
));
911 do_pte_dcmd(level
, pte
);
919 * Dump the page table at the given PFN
923 ptable_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
927 uint64_t level
= (uint64_t)-1;
931 if (mmu
.num_level
== 0)
934 if ((flags
& DCMD_ADDRSPEC
) == 0)
937 if (mdb_getopts(argc
, argv
,
938 'm', MDB_OPT_SETBITS
, TRUE
, &mflag
,
939 'l', MDB_OPT_UINT64
, &level
, NULL
) != argc
)
942 if (level
!= (uint64_t)-1 && level
> mmu
.max_level
) {
943 mdb_warn("invalid level %lu\n", level
);
949 pfn
= mdb_mfn_to_pfn(pfn
);
951 return (do_ptable_dcmd(pfn
, level
));
955 do_htables_dcmd(hat_t
*hatp
)
963 * read the hat and its hash table
965 if (mdb_vread(&hat
, sizeof (hat
), (uintptr_t)hatp
) == -1) {
966 mdb_warn("Couldn't read struct hat\n");
971 * read the htable hashtable
973 for (h
= 0; h
< hat
.hat_num_hash
; ++h
) {
974 if (mdb_vread(&ht
, sizeof (htable_t
*),
975 (uintptr_t)(hat
.hat_ht_hash
+ h
)) == -1) {
976 mdb_warn("Couldn't read htable ptr\\n");
979 for (; ht
!= NULL
; ht
= htable
.ht_next
) {
980 mdb_printf("%p\n", ht
);
981 if (mdb_vread(&htable
, sizeof (htable_t
),
982 (uintptr_t)ht
) == -1) {
983 mdb_warn("Couldn't read htable\n");
992 * Dump the htables for the given hat
996 htables_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
1002 if (mmu
.num_level
== 0)
1005 if ((flags
& DCMD_ADDRSPEC
) == 0)
1006 return (DCMD_USAGE
);
1008 hat
= (hat_t
*)addr
;
1010 return (do_htables_dcmd(hat
));
1014 entry2va(size_t *entries
)
1018 for (level_t l
= mmu
.max_level
; l
>= 0; l
--)
1019 va
+= entries
[l
] << mmu
.level_shift
[l
];
1021 return (VA_SIGN_EXTEND(va
));
1025 ptmap_report(size_t *entries
, uintptr_t start
,
1026 boolean_t user
, boolean_t writable
, boolean_t wflag
)
1028 uint64_t curva
= entry2va(entries
);
1030 mdb_printf("mapped %s,%s range of %lu bytes: %a-%a\n",
1031 user
? "user" : "kernel", writable
? "writable" : "read-only",
1032 curva
- start
, start
, curva
- 1);
1033 if (wflag
&& start
>= kernelbase
)
1034 (void) mdb_call_dcmd("whatis", start
, DCMD_ADDRSPEC
, 0, NULL
);
1038 ptmap_dcmd(uintptr_t addr
, uint_t flags
, int argc
, const mdb_arg_t
*argv
)
1040 physaddr_t paddrs
[MAX_NUM_LEVEL
] = { 0, };
1041 size_t entry
[MAX_NUM_LEVEL
] = { 0, };
1042 uintptr_t start
= (uintptr_t)-1;
1043 boolean_t writable
= B_FALSE
;
1044 boolean_t user
= B_FALSE
;
1045 boolean_t wflag
= B_FALSE
;
1048 if ((flags
& DCMD_ADDRSPEC
) == 0)
1049 return (DCMD_USAGE
);
1051 if (mdb_getopts(argc
, argv
,
1052 'w', MDB_OPT_SETBITS
, TRUE
, &wflag
, NULL
) != argc
)
1053 return (DCMD_USAGE
);
1057 if (mmu
.num_level
== 0)
1060 curlevel
= mmu
.max_level
;
1062 paddrs
[curlevel
] = addr
& MMU_PAGEMASK
;
1065 physaddr_t pte_addr
;
1068 pte_addr
= paddrs
[curlevel
] +
1069 (entry
[curlevel
] << mmu
.pte_size_shift
);
1071 if (mdb_pread(&pte
, sizeof (pte
), pte_addr
) != sizeof (pte
)) {
1072 mdb_warn("couldn't read pte at %p", pte_addr
);
1076 if (PTE_GET(pte
, PT_VALID
) == 0) {
1077 if (start
!= (uintptr_t)-1) {
1078 ptmap_report(entry
, start
,
1079 user
, writable
, wflag
);
1080 start
= (uintptr_t)-1;
1082 } else if (curlevel
== 0 || PTE_GET(pte
, PT_PAGESIZE
)) {
1083 if (start
== (uintptr_t)-1) {
1084 start
= entry2va(entry
);
1085 user
= PTE_GET(pte
, PT_USER
);
1086 writable
= PTE_GET(pte
, PT_WRITABLE
);
1087 } else if (user
!= PTE_GET(pte
, PT_USER
) ||
1088 writable
!= PTE_GET(pte
, PT_WRITABLE
)) {
1089 ptmap_report(entry
, start
,
1090 user
, writable
, wflag
);
1091 start
= entry2va(entry
);
1092 user
= PTE_GET(pte
, PT_USER
);
1093 writable
= PTE_GET(pte
, PT_WRITABLE
);
1096 /* Descend a level. */
1097 physaddr_t pa
= mmu_ptob(pte2mfn(pte
, curlevel
));
1098 paddrs
[--curlevel
] = pa
;
1099 entry
[curlevel
] = 0;
1103 while (++entry
[curlevel
] == mmu
.ptes_per_table
) {
1104 /* Ascend back up. */
1105 entry
[curlevel
] = 0;
1106 if (curlevel
== mmu
.max_level
) {
1107 if (start
!= (uintptr_t)-1) {
1108 ptmap_report(entry
, start
,
1109 user
, writable
, wflag
);