2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
21 #include "qemu-common.h"
30 #include "exec/address-spaces.h"
32 #define pte_present(pte) (pte & PT_PRESENT)
33 #define pte_write_access(pte) (pte & PT_WRITE)
34 #define pte_user_access(pte) (pte & PT_USER)
35 #define pte_exec_access(pte) (!(pte & PT_NX))
37 #define pte_large_page(pte) (pte & PT_PS)
38 #define pte_global_access(pte) (pte & PT_GLOBAL)
40 #define PAE_CR3_MASK (~0x1fllu)
41 #define LEGACY_CR3_MASK (0xffffffff)
43 #define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
44 #define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
45 #define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
47 struct gpt_translation
{
57 static int gpt_top_level(struct CPUState
*cpu
, bool pae
)
62 if (x86_is_long_mode(cpu
)) {
69 static inline int gpt_entry(target_ulong addr
, int level
, bool pae
)
71 int level_shift
= pae
? 9 : 10;
72 return (addr
>> (level_shift
* (level
- 1) + 12)) & ((1 << level_shift
) - 1);
75 static inline int pte_size(bool pae
)
81 static bool get_pt_entry(struct CPUState
*cpu
, struct gpt_translation
*pt
,
86 uint64_t page_mask
= pae
? PAE_PTE_PAGE_MASK
: LEGACY_PTE_PAGE_MASK
;
87 uint64_t gpa
= pt
->pte
[level
] & page_mask
;
89 if (level
== 3 && !x86_is_long_mode(cpu
)) {
93 index
= gpt_entry(pt
->gva
, level
, pae
);
94 address_space_rw(&address_space_memory
, gpa
+ index
* pte_size(pae
),
95 MEMTXATTRS_UNSPECIFIED
, (uint8_t *)&pte
, pte_size(pae
), 0);
97 pt
->pte
[level
- 1] = pte
;
102 /* test page table entry */
103 static bool test_pt_entry(struct CPUState
*cpu
, struct gpt_translation
*pt
,
104 int level
, bool *is_large
, bool pae
)
106 uint64_t pte
= pt
->pte
[level
];
108 if (pt
->write_access
) {
109 pt
->err_code
|= MMU_PAGE_WT
;
111 if (pt
->user_access
) {
112 pt
->err_code
|= MMU_PAGE_US
;
114 if (pt
->exec_access
) {
115 pt
->err_code
|= MMU_PAGE_NX
;
118 if (!pte_present(pte
)) {
122 if (pae
&& !x86_is_long_mode(cpu
) && 2 == level
) {
126 if (1 == level
&& pte_large_page(pte
)) {
127 pt
->err_code
|= MMU_PAGE_PT
;
131 pt
->err_code
|= MMU_PAGE_PT
;
134 uint32_t cr0
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR0
);
135 /* check protection */
137 if (pt
->write_access
&& !pte_write_access(pte
)) {
142 if (pt
->user_access
&& !pte_user_access(pte
)) {
146 if (pae
&& pt
->exec_access
&& !pte_exec_access(pte
)) {
151 /* TODO: check reserved bits */
155 static inline uint64_t pse_pte_to_page(uint64_t pte
)
157 return ((pte
& 0x1fe000) << 19) | (pte
& 0xffc00000);
160 static inline uint64_t large_page_gpa(struct gpt_translation
*pt
, bool pae
)
162 VM_PANIC_ON(!pte_large_page(pt
->pte
[1]))
165 return (pt
->pte
[1] & PAE_PTE_LARGE_PAGE_MASK
) | (pt
->gva
& 0x1fffff);
169 return pse_pte_to_page(pt
->pte
[1]) | (pt
->gva
& 0x3fffff);
174 static bool walk_gpt(struct CPUState
*cpu
, target_ulong addr
, int err_code
,
175 struct gpt_translation
*pt
, bool pae
)
177 int top_level
, level
;
178 bool is_large
= false;
179 target_ulong cr3
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR3
);
180 uint64_t page_mask
= pae
? PAE_PTE_PAGE_MASK
: LEGACY_PTE_PAGE_MASK
;
182 memset(pt
, 0, sizeof(*pt
));
183 top_level
= gpt_top_level(cpu
, pae
);
185 pt
->pte
[top_level
] = pae
? (cr3
& PAE_CR3_MASK
) : (cr3
& LEGACY_CR3_MASK
);
187 pt
->user_access
= (err_code
& MMU_PAGE_US
);
188 pt
->write_access
= (err_code
& MMU_PAGE_WT
);
189 pt
->exec_access
= (err_code
& MMU_PAGE_NX
);
191 for (level
= top_level
; level
> 0; level
--) {
192 get_pt_entry(cpu
, pt
, level
, pae
);
194 if (!test_pt_entry(cpu
, pt
, level
- 1, &is_large
, pae
)) {
204 pt
->gpa
= (pt
->pte
[0] & page_mask
) | (pt
->gva
& 0xfff);
206 pt
->gpa
= large_page_gpa(pt
, pae
);
213 bool mmu_gva_to_gpa(struct CPUState
*cpu
, target_ulong gva
, uint64_t *gpa
)
216 struct gpt_translation pt
;
219 if (!x86_is_paging_mode(cpu
)) {
224 res
= walk_gpt(cpu
, gva
, err_code
, &pt
, x86_is_pae_enabled(cpu
));
233 void vmx_write_mem(struct CPUState
*cpu
, target_ulong gva
, void *data
, int bytes
)
239 int copy
= MIN(bytes
, 0x1000 - (gva
& 0xfff));
241 if (!mmu_gva_to_gpa(cpu
, gva
, &gpa
)) {
242 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__
, gva
);
244 address_space_rw(&address_space_memory
, gpa
, MEMTXATTRS_UNSPECIFIED
,
254 void vmx_read_mem(struct CPUState
*cpu
, void *data
, target_ulong gva
, int bytes
)
260 int copy
= MIN(bytes
, 0x1000 - (gva
& 0xfff));
262 if (!mmu_gva_to_gpa(cpu
, gva
, &gpa
)) {
263 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__
, gva
);
265 address_space_rw(&address_space_memory
, gpa
, MEMTXATTRS_UNSPECIFIED
,