Merge commit '74ecdb5171c9f3673b9393b1a3dc6f3a65e93895'
[unleashed.git] / arch / x86 / kernel / platform / i86pc / vm / hat_kdi.c
blob8b59afd58e6491047b7edf4f6042aeff7c914133
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2018 Joyent, Inc.
30 * HAT interfaces used by the kernel debugger to interact with the VM system.
31 * These interfaces are invoked when the world is stopped. As such, no blocking
32 * operations may be performed.
35 #include <sys/cpuvar.h>
36 #include <sys/kdi_impl.h>
37 #include <sys/errno.h>
38 #include <sys/systm.h>
39 #include <sys/sysmacros.h>
40 #include <sys/mman.h>
41 #include <sys/bootconf.h>
42 #include <sys/cmn_err.h>
43 #include <vm/seg_kmem.h>
44 #include <vm/hat_i86.h>
45 #include <sys/bootinfo.h>
46 #include <vm/kboot_mmu.h>
47 #include <sys/machsystm.h>
50 * The debugger needs direct access to the PTE of one page table entry
51 * in order to implement vtop and physical read/writes
53 static uintptr_t hat_kdi_page = 0; /* vaddr for phsical page accesses */
54 static uint_t use_kbm = 1;
55 uint_t hat_kdi_use_pae; /* if 0, use x86pte32_t for pte type */
57 static x86pte_t *hat_kdi_pte = NULL; /* vaddr of pte for hat_kdi_page */
60 * Get the address for remapping physical pages during boot
62 void
63 hat_boot_kdi_init(void)
65 hat_kdi_page = (uintptr_t)kbm_push(0); /* first call gets address... */
69 * Switch to using a page in the kernel's va range for physical memory access.
70 * We need to allocate a virtual page, then permanently map in the page that
71 * contains the PTE to it.
73 void
74 hat_kdi_init(void)
76 /*LINTED:set but not used in function*/
77 htable_t *ht __unused;
80 * Get an kernel page VA to use for phys mem access. Then make sure
81 * the VA has a page table.
83 hat_kdi_use_pae = mmu.pae_hat;
84 hat_kdi_page = (uintptr_t)vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
85 ht = htable_create(kas.a_hat, hat_kdi_page, 0, NULL);
86 use_kbm = 0;
89 * Get an address at which to put the pagetable and devload it.
91 hat_kdi_pte = vmem_xalloc(heap_arena, MMU_PAGESIZE, MMU_PAGESIZE, 0,
92 0, NULL, NULL, VM_SLEEP);
93 hat_devload(kas.a_hat, (caddr_t)hat_kdi_pte, MMU_PAGESIZE, ht->ht_pfn,
94 PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
95 HAT_LOAD | HAT_LOAD_NOCONSIST);
96 hat_kdi_pte =
97 PT_INDEX_PTR(hat_kdi_pte, htable_va2entry(hat_kdi_page, ht));
99 HTABLE_INC(ht->ht_valid_cnt);
100 htable_release(ht);
103 #define kdi_mtop(m) (m)
104 #define kdi_ptom(p) (p)
106 /*ARGSUSED*/
108 kdi_vtop(uintptr_t va, uint64_t *pap)
110 uintptr_t vaddr = va;
111 size_t len;
112 pfn_t pfn;
113 uint_t prot;
114 int level;
115 x86pte_t pte;
116 int index;
119 * if the mmu struct isn't relevant yet, we need to probe
120 * the boot loader's pagetables.
122 if (!khat_running) {
123 if (kbm_probe(&vaddr, &len, &pfn, &prot) == 0)
124 return (ENOENT);
125 if (vaddr > va)
126 return (ENOENT);
127 if (vaddr < va)
128 pfn += mmu_btop(va - vaddr);
129 *pap = pfn_to_pa(pfn) + (vaddr & MMU_PAGEOFFSET);
130 return (0);
134 * We can't go through normal hat routines, so we'll use
135 * kdi_pread() to walk the page tables
137 *pap = getcr3_pa();
138 for (level = mmu.max_level; ; --level) {
139 index = (va >> LEVEL_SHIFT(level)) & (mmu.ptes_per_table - 1);
140 *pap += index << mmu.pte_size_shift;
141 pte = 0;
142 if (kdi_pread((caddr_t)&pte, mmu.pte_size, *pap, &len) != 0)
143 return (ENOENT);
144 if (pte == 0)
145 return (ENOENT);
146 if (level > 0 && level <= mmu.max_page_level &&
147 (pte & PT_PAGESIZE)) {
148 *pap = kdi_mtop(pte & PT_PADDR_LGPG);
149 break;
150 } else {
151 *pap = kdi_mtop(pte & PT_PADDR);
152 if (level == 0)
153 break;
156 *pap += va & LEVEL_OFFSET(level);
157 return (0);
160 static int
161 kdi_prw(caddr_t buf, size_t nbytes, uint64_t pa, size_t *ncopiedp, int doread)
163 size_t ncopied = 0;
164 off_t pgoff;
165 size_t sz;
166 caddr_t va;
167 caddr_t from;
168 caddr_t to;
169 x86pte_t pte;
172 * if this is called before any initialization - fail
174 if (hat_kdi_page == 0)
175 return (EAGAIN);
177 while (nbytes > 0) {
179 * figure out the addresses and construct a minimal PTE
181 pgoff = pa & MMU_PAGEOFFSET;
182 sz = MIN(nbytes, MMU_PAGESIZE - pgoff);
183 va = (caddr_t)hat_kdi_page + pgoff;
184 pte = kdi_ptom(mmu_ptob(mmu_btop(pa))) | PT_VALID;
185 if (doread) {
186 from = va;
187 to = buf;
188 } else {
189 PTE_SET(pte, PT_WRITABLE);
190 from = buf;
191 to = va;
195 * map the physical page
197 if (use_kbm)
198 (void) kbm_push(pa);
199 else if (hat_kdi_use_pae)
200 *hat_kdi_pte = pte;
201 else
202 *(x86pte32_t *)hat_kdi_pte = pte;
203 mmu_flush_tlb_kpage(hat_kdi_page);
205 bcopy(from, to, sz);
208 * erase the mapping
210 if (use_kbm)
211 kbm_pop();
212 else if (hat_kdi_use_pae)
213 *hat_kdi_pte = 0;
214 else
215 *(x86pte32_t *)hat_kdi_pte = 0;
216 mmu_flush_tlb_kpage(hat_kdi_page);
218 buf += sz;
219 pa += sz;
220 nbytes -= sz;
221 ncopied += sz;
224 if (ncopied == 0)
225 return (ENOENT);
227 *ncopiedp = ncopied;
228 return (0);
232 kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
234 return (kdi_prw(buf, nbytes, addr, ncopiedp, 1));
238 kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
240 return (kdi_prw(buf, nbytes, addr, ncopiedp, 0));
243 #if !defined(__xpv)
245 * This gets used for flushing the TLB on all the slaves just prior to doing a
246 * kdi_prw(). It's unclear why this was originally done, since kdi_prw() itself
247 * will flush any lingering hat_kdi_page mappings, but let's presume it was a
248 * good idea.
250 void
251 kdi_flush_caches(void)
253 mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
255 #endif
258 * Return the number of bytes, relative to the beginning of a given range, that
259 * are non-toxic (can be read from and written to with relative impunity).
261 /*ARGSUSED*/
262 size_t
263 kdi_range_is_nontoxic(uintptr_t va, size_t sz, int write)
265 #if defined(__amd64)
266 extern uintptr_t toxic_addr;
267 extern size_t toxic_size;
270 * Check 64 bit toxic range.
272 if (toxic_addr != 0 &&
273 va + sz >= toxic_addr &&
274 va < toxic_addr + toxic_size)
275 return (va < toxic_addr ? toxic_addr - va : 0);
278 * avoid any Virtual Address hole
280 if (va + sz >= hole_start && va < hole_end)
281 return (va < hole_start ? hole_start - va : 0);
283 return (sz);
285 #elif defined(__i386)
286 extern void *device_arena_contains(void *, size_t, size_t *);
287 uintptr_t v;
289 v = (uintptr_t)device_arena_contains((void *)va, sz, NULL);
290 if (v == 0)
291 return (sz);
292 else if (v <= va)
293 return (0);
294 else
295 return (v - va);
297 #endif /* __i386 */