i386: hvf: cleanup x86_gen.h
[qemu/rayw.git] / target / i386 / hvf / x86_mmu.c
blob5c1f35acd067c39212d7f444c1c096164af69915
1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
19 #include "panic.h"
21 #include "qemu-common.h"
22 #include "cpu.h"
23 #include "x86.h"
24 #include "x86_mmu.h"
25 #include "string.h"
26 #include "vmcs.h"
27 #include "vmx.h"
29 #include "memory.h"
30 #include "exec/address-spaces.h"
32 #define pte_present(pte) (pte & PT_PRESENT)
33 #define pte_write_access(pte) (pte & PT_WRITE)
34 #define pte_user_access(pte) (pte & PT_USER)
35 #define pte_exec_access(pte) (!(pte & PT_NX))
37 #define pte_large_page(pte) (pte & PT_PS)
38 #define pte_global_access(pte) (pte & PT_GLOBAL)
40 #define PAE_CR3_MASK (~0x1fllu)
41 #define LEGACY_CR3_MASK (0xffffffff)
43 #define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
44 #define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
45 #define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
47 struct gpt_translation {
48 target_ulong gva;
49 uint64_t gpa;
50 int err_code;
51 uint64_t pte[5];
52 bool write_access;
53 bool user_access;
54 bool exec_access;
57 static int gpt_top_level(struct CPUState *cpu, bool pae)
59 if (!pae) {
60 return 2;
62 if (x86_is_long_mode(cpu)) {
63 return 4;
66 return 3;
69 static inline int gpt_entry(target_ulong addr, int level, bool pae)
71 int level_shift = pae ? 9 : 10;
72 return (addr >> (level_shift * (level - 1) + 12)) & ((1 << level_shift) - 1);
75 static inline int pte_size(bool pae)
77 return pae ? 8 : 4;
81 static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
82 int level, bool pae)
84 int index;
85 uint64_t pte = 0;
86 uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
87 uint64_t gpa = pt->pte[level] & page_mask;
89 if (level == 3 && !x86_is_long_mode(cpu)) {
90 gpa = pt->pte[level];
93 index = gpt_entry(pt->gva, level, pae);
94 address_space_rw(&address_space_memory, gpa + index * pte_size(pae),
95 MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);
97 pt->pte[level - 1] = pte;
99 return true;
102 /* test page table entry */
103 static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
104 int level, bool *is_large, bool pae)
106 uint64_t pte = pt->pte[level];
108 if (pt->write_access) {
109 pt->err_code |= MMU_PAGE_WT;
111 if (pt->user_access) {
112 pt->err_code |= MMU_PAGE_US;
114 if (pt->exec_access) {
115 pt->err_code |= MMU_PAGE_NX;
118 if (!pte_present(pte)) {
119 return false;
122 if (pae && !x86_is_long_mode(cpu) && 2 == level) {
123 goto exit;
126 if (1 == level && pte_large_page(pte)) {
127 pt->err_code |= MMU_PAGE_PT;
128 *is_large = true;
130 if (!level) {
131 pt->err_code |= MMU_PAGE_PT;
134 uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
135 /* check protection */
136 if (cr0 & CR0_WP) {
137 if (pt->write_access && !pte_write_access(pte)) {
138 return false;
142 if (pt->user_access && !pte_user_access(pte)) {
143 return false;
146 if (pae && pt->exec_access && !pte_exec_access(pte)) {
147 return false;
150 exit:
151 /* TODO: check reserved bits */
152 return true;
155 static inline uint64_t pse_pte_to_page(uint64_t pte)
157 return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
160 static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
162 VM_PANIC_ON(!pte_large_page(pt->pte[1]))
163 /* 2Mb large page */
164 if (pae) {
165 return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
168 /* 4Mb large page */
169 return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);
174 static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
175 struct gpt_translation *pt, bool pae)
177 int top_level, level;
178 bool is_large = false;
179 target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
180 uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
182 memset(pt, 0, sizeof(*pt));
183 top_level = gpt_top_level(cpu, pae);
185 pt->pte[top_level] = pae ? (cr3 & PAE_CR3_MASK) : (cr3 & LEGACY_CR3_MASK);
186 pt->gva = addr;
187 pt->user_access = (err_code & MMU_PAGE_US);
188 pt->write_access = (err_code & MMU_PAGE_WT);
189 pt->exec_access = (err_code & MMU_PAGE_NX);
191 for (level = top_level; level > 0; level--) {
192 get_pt_entry(cpu, pt, level, pae);
194 if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
195 return false;
198 if (is_large) {
199 break;
203 if (!is_large) {
204 pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
205 } else {
206 pt->gpa = large_page_gpa(pt, pae);
209 return true;
213 bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa)
215 bool res;
216 struct gpt_translation pt;
217 int err_code = 0;
219 if (!x86_is_paging_mode(cpu)) {
220 *gpa = gva;
221 return true;
224 res = walk_gpt(cpu, gva, err_code, &pt, x86_is_pae_enabled(cpu));
225 if (res) {
226 *gpa = pt.gpa;
227 return true;
230 return false;
233 void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes)
235 uint64_t gpa;
237 while (bytes > 0) {
238 /* copy page */
239 int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
241 if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
242 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
243 } else {
244 address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
245 data, copy, 1);
248 bytes -= copy;
249 gva += copy;
250 data += copy;
254 void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes)
256 uint64_t gpa;
258 while (bytes > 0) {
259 /* copy page */
260 int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
262 if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
263 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
265 address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
266 data, copy, 0);
268 bytes -= copy;
269 gva += copy;
270 data += copy;