4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2018 Joyent, Inc.
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/archsystm.h>
32 #include <sys/debug.h>
33 #include <sys/bootconf.h>
34 #include <sys/bootsvcs.h>
35 #include <sys/bootinfo.h>
37 #include <sys/cmn_err.h>
38 #include <sys/param.h>
39 #include <sys/machparam.h>
40 #include <sys/machsystm.h>
41 #include <sys/promif.h>
43 #include <vm/kboot_mmu.h>
44 #include <vm/hat_pte.h>
45 #include <vm/hat_i86.h>
46 #include <vm/seg_kmem.h>
50 * Joe's debug printing
53 bop_printf(NULL, "kboot_mmu.c: %s is %" PRIx64 "\n", #x, (uint64_t)(x));
55 #define DBG(x) /* naught */
59 * Page table and memory stuff.
61 static caddr_t window
;
62 static caddr_t pte_to_window
;
65 * this are needed by mmu_init()
67 int kbm_nx_support
= 0; /* NX bit in PTEs is in use */
68 int kbm_pae_support
= 0; /* PAE is 64 bit Page table entries */
69 int kbm_pge_support
= 0; /* PGE is Page table global bit enabled */
70 int kbm_largepage_support
= 0;
71 uint_t kbm_nucleus_size
= 0;
73 #define BOOT_SHIFT(l) (shift_amt[l])
74 #define BOOT_SZ(l) ((size_t)1 << BOOT_SHIFT(l))
75 #define BOOT_OFFSET(l) (BOOT_SZ(l) - 1)
76 #define BOOT_MASK(l) (~BOOT_OFFSET(l))
79 * Initialize memory management parameters for boot time page table management
82 kbm_init(struct xboot_info
*bi
)
85 * configure mmu information
87 kbm_nucleus_size
= (uintptr_t)bi
->bi_kseg_size
;
88 kbm_largepage_support
= bi
->bi_use_largepage
;
89 kbm_nx_support
= bi
->bi_use_nx
;
90 kbm_pae_support
= bi
->bi_use_pae
;
91 kbm_pge_support
= bi
->bi_use_pge
;
92 window
= bi
->bi_pt_window
;
94 pte_to_window
= bi
->bi_pte_to_pt_window
;
96 if (kbm_pae_support
) {
97 shift_amt
= shift_amt_pae
;
107 shift_amt
= shift_amt_nopae
;
108 ptes_per_table
= 1024;
110 lpagesize
= FOUR_MEG
;
114 top_page_table
= bi
->bi_top_page_table
;
119 * Change the addressible page table window to point at a given page
123 kbm_remap_window(paddr_t physaddr
, int writeable
)
125 x86pte_t pt_bits
= PT_NOCONSIST
| PT_VALID
| PT_WRITABLE
;
130 *((x86pte_t
*)pte_to_window
) = physaddr
| pt_bits
;
132 *((x86pte32_t
*)pte_to_window
) = physaddr
| pt_bits
;
139 * Add a mapping for the physical page at the given virtual address.
142 kbm_map(uintptr_t va
, paddr_t pa
, uint_t level
, uint_t is_kernel
)
145 paddr_t pte_physaddr
;
149 panic("kbm_map() called too late");
151 pteval
= pa_to_ma(pa
) | PT_NOCONSIST
| PT_VALID
| PT_WRITABLE
;
153 pteval
|= PT_PAGESIZE
;
154 if (kbm_pge_support
&& is_kernel
)
159 * Find the pte that will map this address. This creates any
160 * missing intermediate level page tables.
162 ptep
= find_pte(va
, &pte_physaddr
, level
, 0);
164 bop_panic("kbm_map: find_pte returned NULL");
169 *((x86pte32_t
*)ptep
) = pteval
;
170 mmu_invlpg((caddr_t
)va
);
176 * Probe the boot time page tables to find the first mapping
177 * including va (or higher) and return non-zero if one is found.
178 * va is updated to the starting address and len to the pagesize.
179 * pp will be set to point to the 1st page_t of the mapped page(s).
181 * Note that if va is in the middle of a large page, the returned va
182 * will be less than what was asked for.
185 kbm_probe(uintptr_t *va
, size_t *len
, pfn_t
*pfn
, uint_t
*prot
)
189 paddr_t pte_physaddr
;
194 panic("kbm_probe() called too late");
202 if (IN_VA_HOLE(probe_va
))
203 probe_va
= mmu
.hole_end
;
205 if (IN_HYPERVISOR_VA(probe_va
))
206 #if defined(__amd64) && defined(__xpv)
207 probe_va
= HYPERVISOR_VIRT_END
;
213 * If we don't have a valid PTP/PTE at this level
214 * then we can bump VA by this level's pagesize and try again.
215 * When the probe_va wraps around, we are done.
217 ptep
= find_pte(probe_va
, &pte_physaddr
, l
, 1);
219 bop_panic("kbm_probe: find_pte returned NULL");
223 pte_val
= *((x86pte32_t
*)ptep
);
224 if (!PTE_ISVALID(pte_val
)) {
225 probe_va
= (probe_va
& BOOT_MASK(l
)) + BOOT_SZ(l
);
232 * If this entry is a pointer to a lower level page table
235 if (!PTE_ISPAGE(pte_val
, l
)) {
242 * We found a boot level page table entry
245 *va
= probe_va
& ~(*len
- 1);
246 *pfn
= PTE2PFN(pte_val
, l
);
249 *prot
= PROT_READ
| PROT_EXEC
;
250 if (PTE_GET(pte_val
, PT_WRITABLE
))
254 * pt_nx is cleared if processor doesn't support NX bit
256 if (PTE_GET(pte_val
, mmu
.pt_nx
))
265 * Destroy a boot loader page table 4K mapping.
268 kbm_unmap(uintptr_t va
)
271 panic("kbm_unmap() called too late");
275 uint_t probe_only
= 1;
277 ptep
= find_pte(va
, NULL
, level
, probe_only
);
284 *((x86pte32_t
*)ptep
) = 0;
285 mmu_invlpg((caddr_t
)va
);
291 * Change a boot loader page table 4K mapping.
292 * Returns the pfn of the old mapping.
295 kbm_remap(uintptr_t va
, pfn_t pfn
)
299 uint_t probe_only
= 1;
300 x86pte_t pte_val
= pa_to_ma(pfn_to_pa(pfn
)) | PT_WRITABLE
|
301 PT_NOCONSIST
| PT_VALID
;
305 panic("kbm_remap() called too late");
306 ptep
= find_pte(va
, NULL
, level
, probe_only
);
308 bop_panic("kbm_remap: find_pte returned NULL");
313 old_pte
= *((x86pte32_t
*)ptep
);
316 *((x86pte_t
*)ptep
) = pte_val
;
318 *((x86pte32_t
*)ptep
) = pte_val
;
319 mmu_invlpg((caddr_t
)va
);
321 if (!(old_pte
& PT_VALID
) || ma_to_pa(old_pte
) == -1)
322 return (PFN_INVALID
);
323 return (mmu_btop(ma_to_pa(old_pte
)));
328 * Change a boot loader page table 4K mapping to read only.
331 kbm_read_only(uintptr_t va
, paddr_t pa
)
333 x86pte_t pte_val
= pa_to_ma(pa
) |
334 PT_NOCONSIST
| PT_REF
| PT_MOD
| PT_VALID
;
339 ptep
= find_pte(va
, NULL
, level
, 0);
341 bop_panic("kbm_read_only: find_pte returned NULL");
346 *((x86pte32_t
*)ptep
) = pte_val
;
347 mmu_invlpg((caddr_t
)va
);
351 * interfaces for kernel debugger to access physical memory
353 static x86pte_t save_pte
;
358 static int first_time
= 1;
366 save_pte
= *((x86pte_t
*)pte_to_window
);
368 save_pte
= *((x86pte32_t
*)pte_to_window
);
369 return (kbm_remap_window(pa
, 0));
376 *((x86pte_t
*)pte_to_window
) = save_pte
;
378 *((x86pte32_t
*)pte_to_window
) = save_pte
;
383 get_pteval(paddr_t table
, uint_t index
)
385 void *table_ptr
= kbm_remap_window(table
, 0);
388 return (((x86pte_t
*)table_ptr
)[index
]);
389 return (((x86pte32_t
*)table_ptr
)[index
]);
393 set_pteval(paddr_t table
, uint_t index
, uint_t level
, x86pte_t pteval
)
395 void *table_ptr
= kbm_remap_window(table
, 0);
397 ((x86pte_t
*)table_ptr
)[index
] = pteval
;
399 ((x86pte32_t
*)table_ptr
)[index
] = pteval
;
400 if (level
== top_level
&& level
== 2)
405 make_ptable(x86pte_t
*pteval
, uint_t level
)
410 new_table
= do_bop_phys_alloc(MMU_PAGESIZE
, MMU_PAGESIZE
);
411 table_ptr
= kbm_remap_window(new_table
, 1);
412 bzero(table_ptr
, MMU_PAGESIZE
);
414 if (level
== top_level
&& level
== 2)
415 *pteval
= pa_to_ma(new_table
) | PT_VALID
;
417 *pteval
= pa_to_ma(new_table
) |
418 PT_VALID
| PT_REF
| PT_USER
| PT_WRITABLE
;
424 map_pte(paddr_t table
, uint_t index
)
426 void *table_ptr
= kbm_remap_window(table
, 0);
427 return ((x86pte_t
*)((caddr_t
)table_ptr
+ index
* pte_size
));