4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * HAT interfaces used by the kernel debugger to interact with the VM system.
31 * These interfaces are invoked when the world is stopped. As such, no blocking
32 * operations may be performed.
35 #include <sys/cpuvar.h>
36 #include <sys/kdi_impl.h>
37 #include <sys/errno.h>
38 #include <sys/systm.h>
39 #include <sys/sysmacros.h>
41 #include <sys/bootconf.h>
42 #include <sys/cmn_err.h>
43 #include <vm/seg_kmem.h>
44 #include <vm/hat_i86.h>
45 #include <sys/bootinfo.h>
46 #include <vm/kboot_mmu.h>
47 #include <sys/machsystm.h>
50 * The debugger needs direct access to the PTE of one page table entry
51 * in order to implement vtop and physical read/writes
53 static uintptr_t hat_kdi_page
= 0; /* vaddr for phsical page accesses */
54 static uint_t use_kbm
= 1;
55 uint_t hat_kdi_use_pae
; /* if 0, use x86pte32_t for pte type */
57 static x86pte_t
*hat_kdi_pte
= NULL
; /* vaddr of pte for hat_kdi_page */
60 * Get the address for remapping physical pages during boot
63 hat_boot_kdi_init(void)
65 hat_kdi_page
= (uintptr_t)kbm_push(0); /* first call gets address... */
69 * Switch to using a page in the kernel's va range for physical memory access.
70 * We need to allocate a virtual page, then permanently map in the page that
71 * contains the PTE to it.
76 /*LINTED:set but not used in function*/
80 * Get an kernel page VA to use for phys mem access. Then make sure
81 * the VA has a page table.
83 hat_kdi_use_pae
= mmu
.pae_hat
;
84 hat_kdi_page
= (uintptr_t)vmem_alloc(heap_arena
, PAGESIZE
, VM_SLEEP
);
85 ht
= htable_create(kas
.a_hat
, hat_kdi_page
, 0, NULL
);
89 * Get an address at which to put the pagetable and devload it.
91 hat_kdi_pte
= vmem_xalloc(heap_arena
, MMU_PAGESIZE
, MMU_PAGESIZE
, 0,
92 0, NULL
, NULL
, VM_SLEEP
);
93 hat_devload(kas
.a_hat
, (caddr_t
)hat_kdi_pte
, MMU_PAGESIZE
, ht
->ht_pfn
,
94 PROT_READ
| PROT_WRITE
| HAT_NOSYNC
| HAT_UNORDERED_OK
,
95 HAT_LOAD
| HAT_LOAD_NOCONSIST
);
97 PT_INDEX_PTR(hat_kdi_pte
, htable_va2entry(hat_kdi_page
, ht
));
99 HTABLE_INC(ht
->ht_valid_cnt
);
103 #define kdi_mtop(m) (m)
104 #define kdi_ptom(p) (p)
108 kdi_vtop(uintptr_t va
, uint64_t *pap
)
110 uintptr_t vaddr
= va
;
119 * if the mmu struct isn't relevant yet, we need to probe
120 * the boot loader's pagetables.
123 if (kbm_probe(&vaddr
, &len
, &pfn
, &prot
) == 0)
128 pfn
+= mmu_btop(va
- vaddr
);
129 *pap
= pfn_to_pa(pfn
) + (vaddr
& MMU_PAGEOFFSET
);
134 * We can't go through normal hat routines, so we'll use
135 * kdi_pread() to walk the page tables
137 *pap
= getcr3() & MMU_PAGEMASK
;
138 for (level
= mmu
.max_level
; ; --level
) {
139 index
= (va
>> LEVEL_SHIFT(level
)) & (mmu
.ptes_per_table
- 1);
140 *pap
+= index
<< mmu
.pte_size_shift
;
142 if (kdi_pread((caddr_t
)&pte
, mmu
.pte_size
, *pap
, &len
) != 0)
146 if (level
> 0 && level
<= mmu
.max_page_level
&&
147 (pte
& PT_PAGESIZE
)) {
148 *pap
= kdi_mtop(pte
& PT_PADDR_LGPG
);
151 *pap
= kdi_mtop(pte
& PT_PADDR
);
156 *pap
+= va
& LEVEL_OFFSET(level
);
161 kdi_prw(caddr_t buf
, size_t nbytes
, uint64_t pa
, size_t *ncopiedp
, int doread
)
172 * if this is called before any initialization - fail
174 if (hat_kdi_page
== 0)
179 * figure out the addresses and construct a minimal PTE
181 pgoff
= pa
& MMU_PAGEOFFSET
;
182 sz
= MIN(nbytes
, MMU_PAGESIZE
- pgoff
);
183 va
= (caddr_t
)hat_kdi_page
+ pgoff
;
184 pte
= kdi_ptom(mmu_ptob(mmu_btop(pa
))) | PT_VALID
;
189 PTE_SET(pte
, PT_WRITABLE
);
195 * map the physical page
199 else if (hat_kdi_use_pae
)
202 *(x86pte32_t
*)hat_kdi_pte
= pte
;
203 mmu_tlbflush_entry((caddr_t
)hat_kdi_page
);
212 else if (hat_kdi_use_pae
)
215 *(x86pte32_t
*)hat_kdi_pte
= 0;
216 mmu_tlbflush_entry((caddr_t
)hat_kdi_page
);
232 kdi_pread(caddr_t buf
, size_t nbytes
, uint64_t addr
, size_t *ncopiedp
)
234 return (kdi_prw(buf
, nbytes
, addr
, ncopiedp
, 1));
238 kdi_pwrite(caddr_t buf
, size_t nbytes
, uint64_t addr
, size_t *ncopiedp
)
240 return (kdi_prw(buf
, nbytes
, addr
, ncopiedp
, 0));
245 * Return the number of bytes, relative to the beginning of a given range, that
246 * are non-toxic (can be read from and written to with relative impunity).
250 kdi_range_is_nontoxic(uintptr_t va
, size_t sz
, int write
)
253 extern uintptr_t toxic_addr
;
254 extern size_t toxic_size
;
257 * Check 64 bit toxic range.
259 if (toxic_addr
!= 0 &&
260 va
+ sz
>= toxic_addr
&&
261 va
< toxic_addr
+ toxic_size
)
262 return (va
< toxic_addr
? toxic_addr
- va
: 0);
265 * avoid any Virtual Address hole
267 if (va
+ sz
>= hole_start
&& va
< hole_end
)
268 return (va
< hole_start
? hole_start
- va
: 0);
272 #elif defined(__i386)
273 extern void *device_arena_contains(void *, size_t, size_t *);
276 v
= (uintptr_t)device_arena_contains((void *)va
, sz
, NULL
);