2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
27 #define pte_present(pte) (pte & PT_PRESENT)
28 #define pte_write_access(pte) (pte & PT_WRITE)
29 #define pte_user_access(pte) (pte & PT_USER)
30 #define pte_exec_access(pte) (!(pte & PT_NX))
32 #define pte_large_page(pte) (pte & PT_PS)
33 #define pte_global_access(pte) (pte & PT_GLOBAL)
35 #define PAE_CR3_MASK (~0x1fllu)
36 #define LEGACY_CR3_MASK (0xffffffff)
38 #define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
39 #define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
40 #define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
42 struct gpt_translation
{
52 static int gpt_top_level(CPUState
*cpu
, bool pae
)
57 if (x86_is_long_mode(cpu
)) {
64 static inline int gpt_entry(target_ulong addr
, int level
, bool pae
)
66 int level_shift
= pae
? 9 : 10;
67 return (addr
>> (level_shift
* (level
- 1) + 12)) & ((1 << level_shift
) - 1);
70 static inline int pte_size(bool pae
)
76 static bool get_pt_entry(CPUState
*cpu
, struct gpt_translation
*pt
,
81 uint64_t page_mask
= pae
? PAE_PTE_PAGE_MASK
: LEGACY_PTE_PAGE_MASK
;
82 uint64_t gpa
= pt
->pte
[level
] & page_mask
;
84 if (level
== 3 && !x86_is_long_mode(cpu
)) {
88 index
= gpt_entry(pt
->gva
, level
, pae
);
89 address_space_read(&address_space_memory
, gpa
+ index
* pte_size(pae
),
90 MEMTXATTRS_UNSPECIFIED
, &pte
, pte_size(pae
));
92 pt
->pte
[level
- 1] = pte
;
97 /* test page table entry */
98 static bool test_pt_entry(CPUState
*cpu
, struct gpt_translation
*pt
,
99 int level
, bool *is_large
, bool pae
)
101 uint64_t pte
= pt
->pte
[level
];
103 if (pt
->write_access
) {
104 pt
->err_code
|= MMU_PAGE_WT
;
106 if (pt
->user_access
) {
107 pt
->err_code
|= MMU_PAGE_US
;
109 if (pt
->exec_access
) {
110 pt
->err_code
|= MMU_PAGE_NX
;
113 if (!pte_present(pte
)) {
117 if (pae
&& !x86_is_long_mode(cpu
) && 2 == level
) {
121 if (1 == level
&& pte_large_page(pte
)) {
122 pt
->err_code
|= MMU_PAGE_PT
;
126 pt
->err_code
|= MMU_PAGE_PT
;
129 uint32_t cr0
= rvmcs(cpu
->accel
->fd
, VMCS_GUEST_CR0
);
130 /* check protection */
131 if (cr0
& CR0_WP_MASK
) {
132 if (pt
->write_access
&& !pte_write_access(pte
)) {
137 if (pt
->user_access
&& !pte_user_access(pte
)) {
141 if (pae
&& pt
->exec_access
&& !pte_exec_access(pte
)) {
146 /* TODO: check reserved bits */
150 static inline uint64_t pse_pte_to_page(uint64_t pte
)
152 return ((pte
& 0x1fe000) << 19) | (pte
& 0xffc00000);
155 static inline uint64_t large_page_gpa(struct gpt_translation
*pt
, bool pae
)
157 VM_PANIC_ON(!pte_large_page(pt
->pte
[1]))
160 return (pt
->pte
[1] & PAE_PTE_LARGE_PAGE_MASK
) | (pt
->gva
& 0x1fffff);
164 return pse_pte_to_page(pt
->pte
[1]) | (pt
->gva
& 0x3fffff);
169 static bool walk_gpt(CPUState
*cpu
, target_ulong addr
, int err_code
,
170 struct gpt_translation
*pt
, bool pae
)
172 int top_level
, level
;
173 bool is_large
= false;
174 target_ulong cr3
= rvmcs(cpu
->accel
->fd
, VMCS_GUEST_CR3
);
175 uint64_t page_mask
= pae
? PAE_PTE_PAGE_MASK
: LEGACY_PTE_PAGE_MASK
;
177 memset(pt
, 0, sizeof(*pt
));
178 top_level
= gpt_top_level(cpu
, pae
);
180 pt
->pte
[top_level
] = pae
? (cr3
& PAE_CR3_MASK
) : (cr3
& LEGACY_CR3_MASK
);
182 pt
->user_access
= (err_code
& MMU_PAGE_US
);
183 pt
->write_access
= (err_code
& MMU_PAGE_WT
);
184 pt
->exec_access
= (err_code
& MMU_PAGE_NX
);
186 for (level
= top_level
; level
> 0; level
--) {
187 get_pt_entry(cpu
, pt
, level
, pae
);
189 if (!test_pt_entry(cpu
, pt
, level
- 1, &is_large
, pae
)) {
199 pt
->gpa
= (pt
->pte
[0] & page_mask
) | (pt
->gva
& 0xfff);
201 pt
->gpa
= large_page_gpa(pt
, pae
);
208 bool mmu_gva_to_gpa(CPUState
*cpu
, target_ulong gva
, uint64_t *gpa
)
211 struct gpt_translation pt
;
214 if (!x86_is_paging_mode(cpu
)) {
219 res
= walk_gpt(cpu
, gva
, err_code
, &pt
, x86_is_pae_enabled(cpu
));
228 void vmx_write_mem(CPUState
*cpu
, target_ulong gva
, void *data
, int bytes
)
234 int copy
= MIN(bytes
, 0x1000 - (gva
& 0xfff));
236 if (!mmu_gva_to_gpa(cpu
, gva
, &gpa
)) {
237 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__
, gva
);
239 address_space_write(&address_space_memory
, gpa
,
240 MEMTXATTRS_UNSPECIFIED
, data
, copy
);
249 void vmx_read_mem(CPUState
*cpu
, void *data
, target_ulong gva
, int bytes
)
255 int copy
= MIN(bytes
, 0x1000 - (gva
& 0xfff));
257 if (!mmu_gva_to_gpa(cpu
, gva
, &gpa
)) {
258 VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__
, gva
);
260 address_space_read(&address_space_memory
, gpa
, MEMTXATTRS_UNSPECIFIED
,