2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu-common.h"
27 #include "exec/address-spaces.h"
29 #define pte_present(pte) (pte & PT_PRESENT)
30 #define pte_write_access(pte) (pte & PT_WRITE)
31 #define pte_user_access(pte) (pte & PT_USER)
32 #define pte_exec_access(pte) (!(pte & PT_NX))
34 #define pte_large_page(pte) (pte & PT_PS)
35 #define pte_global_access(pte) (pte & PT_GLOBAL)
37 #define PAE_CR3_MASK (~0x1fllu)
38 #define LEGACY_CR3_MASK (0xffffffff)
40 #define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
41 #define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
42 #define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
44 struct gpt_translation
{
54 static int gpt_top_level(struct CPUState
*cpu
, bool pae
)
59 if (x86_is_long_mode(cpu
)) {
66 static inline int gpt_entry(target_ulong addr
, int level
, bool pae
)
68 int level_shift
= pae
? 9 : 10;
69 return (addr
>> (level_shift
* (level
- 1) + 12)) & ((1 << level_shift
) - 1);
72 static inline int pte_size(bool pae
)
78 static bool get_pt_entry(struct CPUState
*cpu
, struct gpt_translation
*pt
,
83 uint64_t page_mask
= pae
? PAE_PTE_PAGE_MASK
: LEGACY_PTE_PAGE_MASK
;
84 uint64_t gpa
= pt
->pte
[level
] & page_mask
;
86 if (level
== 3 && !x86_is_long_mode(cpu
)) {
90 index
= gpt_entry(pt
->gva
, level
, pae
);
91 address_space_read(&address_space_memory
, gpa
+ index
* pte_size(pae
),
92 MEMTXATTRS_UNSPECIFIED
, &pte
, pte_size(pae
));
94 pt
->pte
[level
- 1] = pte
;
99 /* test page table entry */
100 static bool test_pt_entry(struct CPUState
*cpu
, struct gpt_translation
*pt
,
101 int level
, bool *is_large
, bool pae
)
103 uint64_t pte
= pt
->pte
[level
];
105 if (pt
->write_access
) {
106 pt
->err_code
|= MMU_PAGE_WT
;
108 if (pt
->user_access
) {
109 pt
->err_code
|= MMU_PAGE_US
;
111 if (pt
->exec_access
) {
112 pt
->err_code
|= MMU_PAGE_NX
;
115 if (!pte_present(pte
)) {
119 if (pae
&& !x86_is_long_mode(cpu
) && 2 == level
) {
123 if (1 == level
&& pte_large_page(pte
)) {
124 pt
->err_code
|= MMU_PAGE_PT
;
128 pt
->err_code
|= MMU_PAGE_PT
;
131 uint32_t cr0
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR0
);
132 /* check protection */
134 if (pt
->write_access
&& !pte_write_access(pte
)) {
139 if (pt
->user_access
&& !pte_user_access(pte
)) {
143 if (pae
&& pt
->exec_access
&& !pte_exec_access(pte
)) {
148 /* TODO: check reserved bits */
152 static inline uint64_t pse_pte_to_page(uint64_t pte
)
154 return ((pte
& 0x1fe000) << 19) | (pte
& 0xffc00000);
157 static inline uint64_t large_page_gpa(struct gpt_translation
*pt
, bool pae
)
159 VM_PANIC_ON(!pte_large_page(pt
->pte
[1]))
162 return (pt
->pte
[1] & PAE_PTE_LARGE_PAGE_MASK
) | (pt
->gva
& 0x1fffff);
166 return pse_pte_to_page(pt
->pte
[1]) | (pt
->gva
& 0x3fffff);
171 static bool walk_gpt(struct CPUState
*cpu
, target_ulong addr
, int err_code
,
172 struct gpt_translation
*pt
, bool pae
)
174 int top_level
, level
;
175 bool is_large
= false;
176 target_ulong cr3
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR3
);
177 uint64_t page_mask
= pae
? PAE_PTE_PAGE_MASK
: LEGACY_PTE_PAGE_MASK
;
179 memset(pt
, 0, sizeof(*pt
));
180 top_level
= gpt_top_level(cpu
, pae
);
182 pt
->pte
[top_level
] = pae
? (cr3
& PAE_CR3_MASK
) : (cr3
& LEGACY_CR3_MASK
);
184 pt
->user_access
= (err_code
& MMU_PAGE_US
);
185 pt
->write_access
= (err_code
& MMU_PAGE_WT
);
186 pt
->exec_access
= (err_code
& MMU_PAGE_NX
);
188 for (level
= top_level
; level
> 0; level
--) {
189 get_pt_entry(cpu
, pt
, level
, pae
);
191 if (!test_pt_entry(cpu
, pt
, level
- 1, &is_large
, pae
)) {
201 pt
->gpa
= (pt
->pte
[0] & page_mask
) | (pt
->gva
& 0xfff);
203 pt
->gpa
= large_page_gpa(pt
, pae
);
210 bool mmu_gva_to_gpa(struct CPUState
*cpu
, target_ulong gva
, uint64_t *gpa
)
213 struct gpt_translation pt
;
216 if (!x86_is_paging_mode(cpu
)) {
221 res
= walk_gpt(cpu
, gva
, err_code
, &pt
, x86_is_pae_enabled(cpu
));
230 void vmx_write_mem(struct CPUState
*cpu
, target_ulong gva
, void *data
, int bytes
)
236 int copy
= MIN(bytes
, 0x1000 - (gva
& 0xfff));
238 if (!mmu_gva_to_gpa(cpu
, gva
, &gpa
)) {
239 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__
, gva
);
241 address_space_write(&address_space_memory
, gpa
,
242 MEMTXATTRS_UNSPECIFIED
, data
, copy
);
251 void vmx_read_mem(struct CPUState
*cpu
, void *data
, target_ulong gva
, int bytes
)
257 int copy
= MIN(bytes
, 0x1000 - (gva
& 0xfff));
259 if (!mmu_gva_to_gpa(cpu
, gva
, &gpa
)) {
260 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__
, gva
);
262 address_space_read(&address_space_memory
, gpa
, MEMTXATTRS_UNSPECIFIED
,