2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
35 #define PT_MAX_FULL_LEVELS 4
37 #define PT_MAX_FULL_LEVELS 2
40 #define pt_element_t u32
41 #define guest_walker guest_walker32
42 #define FNAME(name) paging##32_##name
43 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
44 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
45 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
46 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
47 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
48 #define PT_MAX_FULL_LEVELS 2
50 #error Invalid PTTYPE value
54 * The guest_walker structure emulates the behavior of the hardware page
59 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
62 pt_element_t inherited_ar
;
68 * Fetch a guest pte for a guest virtual address
70 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
71 struct kvm_vcpu
*vcpu
, gva_t addr
,
72 int write_fault
, int user_fault
, int fetch_fault
)
75 struct kvm_memory_slot
*slot
;
80 pgprintk("%s: addr %lx\n", __FUNCTION__
, addr
);
81 walker
->level
= vcpu
->mmu
.root_level
;
85 if (!is_long_mode(vcpu
)) {
86 walker
->ptep
= &vcpu
->pdptrs
[(addr
>> 30) & 3];
88 if (!(root
& PT_PRESENT_MASK
))
93 table_gfn
= (root
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
94 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
95 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__
,
96 walker
->level
- 1, table_gfn
);
97 slot
= gfn_to_memslot(vcpu
->kvm
, table_gfn
);
98 hpa
= safe_gpa_to_hpa(vcpu
, root
& PT64_BASE_ADDR_MASK
);
99 walker
->table
= kmap_atomic(pfn_to_page(hpa
>> PAGE_SHIFT
), KM_USER0
);
101 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
102 (vcpu
->cr3
& ~(PAGE_MASK
| CR3_FLAGS_MASK
)) == 0);
104 walker
->inherited_ar
= PT_USER_MASK
| PT_WRITABLE_MASK
;
107 int index
= PT_INDEX(addr
, walker
->level
);
110 ptep
= &walker
->table
[index
];
111 ASSERT(((unsigned long)walker
->table
& PAGE_MASK
) ==
112 ((unsigned long)ptep
& PAGE_MASK
));
114 if (!is_present_pte(*ptep
))
117 if (write_fault
&& !is_writeble_pte(*ptep
))
118 if (user_fault
|| is_write_protection(vcpu
))
121 if (user_fault
&& !(*ptep
& PT_USER_MASK
))
125 if (fetch_fault
&& is_nx(vcpu
) && (*ptep
& PT64_NX_MASK
))
129 if (!(*ptep
& PT_ACCESSED_MASK
)) {
130 mark_page_dirty(vcpu
->kvm
, table_gfn
);
131 *ptep
|= PT_ACCESSED_MASK
;
134 if (walker
->level
== PT_PAGE_TABLE_LEVEL
) {
135 walker
->gfn
= (*ptep
& PT_BASE_ADDR_MASK
)
140 if (walker
->level
== PT_DIRECTORY_LEVEL
141 && (*ptep
& PT_PAGE_SIZE_MASK
)
142 && (PTTYPE
== 64 || is_pse(vcpu
))) {
143 walker
->gfn
= (*ptep
& PT_DIR_BASE_ADDR_MASK
)
145 walker
->gfn
+= PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
);
149 walker
->inherited_ar
&= walker
->table
[index
];
150 table_gfn
= (*ptep
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
151 paddr
= safe_gpa_to_hpa(vcpu
, *ptep
& PT_BASE_ADDR_MASK
);
152 kunmap_atomic(walker
->table
, KM_USER0
);
153 walker
->table
= kmap_atomic(pfn_to_page(paddr
>> PAGE_SHIFT
),
156 walker
->table_gfn
[walker
->level
- 1 ] = table_gfn
;
157 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__
,
158 walker
->level
- 1, table_gfn
);
161 pgprintk("%s: pte %llx\n", __FUNCTION__
, (u64
)*ptep
);
165 walker
->error_code
= 0;
169 walker
->error_code
= PFERR_PRESENT_MASK
;
173 walker
->error_code
|= PFERR_WRITE_MASK
;
175 walker
->error_code
|= PFERR_USER_MASK
;
177 walker
->error_code
|= PFERR_FETCH_MASK
;
181 static void FNAME(release_walker
)(struct guest_walker
*walker
)
184 kunmap_atomic(walker
->table
, KM_USER0
);
187 static void FNAME(mark_pagetable_dirty
)(struct kvm
*kvm
,
188 struct guest_walker
*walker
)
190 mark_page_dirty(kvm
, walker
->table_gfn
[walker
->level
- 1]);
193 static void FNAME(set_pte_common
)(struct kvm_vcpu
*vcpu
,
201 struct guest_walker
*walker
,
205 int dirty
= *gpte
& PT_DIRTY_MASK
;
206 u64 spte
= *shadow_pte
;
207 int was_rmapped
= is_rmap_pte(spte
);
209 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
210 " user_fault %d gfn %lx\n",
211 __FUNCTION__
, spte
, (u64
)*gpte
, access_bits
,
212 write_fault
, user_fault
, gfn
);
214 if (write_fault
&& !dirty
) {
215 *gpte
|= PT_DIRTY_MASK
;
217 FNAME(mark_pagetable_dirty
)(vcpu
->kvm
, walker
);
220 spte
|= PT_PRESENT_MASK
| PT_ACCESSED_MASK
| PT_DIRTY_MASK
;
221 spte
|= *gpte
& PT64_NX_MASK
;
223 access_bits
&= ~PT_WRITABLE_MASK
;
225 paddr
= gpa_to_hpa(vcpu
, gaddr
& PT64_BASE_ADDR_MASK
);
227 spte
|= PT_PRESENT_MASK
;
228 if (access_bits
& PT_USER_MASK
)
229 spte
|= PT_USER_MASK
;
231 if (is_error_hpa(paddr
)) {
233 spte
|= PT_SHADOW_IO_MARK
;
234 spte
&= ~PT_PRESENT_MASK
;
235 set_shadow_pte(shadow_pte
, spte
);
241 if ((access_bits
& PT_WRITABLE_MASK
)
242 || (write_fault
&& !is_write_protection(vcpu
) && !user_fault
)) {
243 struct kvm_mmu_page
*shadow
;
245 spte
|= PT_WRITABLE_MASK
;
247 mmu_unshadow(vcpu
, gfn
);
251 shadow
= kvm_mmu_lookup_page(vcpu
, gfn
);
253 pgprintk("%s: found shadow page for %lx, marking ro\n",
255 access_bits
&= ~PT_WRITABLE_MASK
;
256 if (is_writeble_pte(spte
)) {
257 spte
&= ~PT_WRITABLE_MASK
;
258 kvm_arch_ops
->tlb_flush(vcpu
);
267 if (access_bits
& PT_WRITABLE_MASK
)
268 mark_page_dirty(vcpu
->kvm
, gaddr
>> PAGE_SHIFT
);
270 set_shadow_pte(shadow_pte
, spte
);
271 page_header_update_slot(vcpu
->kvm
, shadow_pte
, gaddr
);
273 rmap_add(vcpu
, shadow_pte
);
276 static void FNAME(set_pte
)(struct kvm_vcpu
*vcpu
, pt_element_t
*gpte
,
277 u64
*shadow_pte
, u64 access_bits
,
278 int user_fault
, int write_fault
, int *ptwrite
,
279 struct guest_walker
*walker
, gfn_t gfn
)
281 access_bits
&= *gpte
;
282 FNAME(set_pte_common
)(vcpu
, shadow_pte
, *gpte
& PT_BASE_ADDR_MASK
,
283 gpte
, access_bits
, user_fault
, write_fault
,
284 ptwrite
, walker
, gfn
);
287 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*page
,
288 u64
*spte
, const void *pte
, int bytes
)
292 if (bytes
< sizeof(pt_element_t
))
294 gpte
= *(const pt_element_t
*)pte
;
295 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
))
297 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__
, (u64
)gpte
, spte
);
298 FNAME(set_pte
)(vcpu
, &gpte
, spte
, PT_USER_MASK
| PT_WRITABLE_MASK
, 0,
300 (gpte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
);
303 static void FNAME(set_pde
)(struct kvm_vcpu
*vcpu
, pt_element_t
*gpde
,
304 u64
*shadow_pte
, u64 access_bits
,
305 int user_fault
, int write_fault
, int *ptwrite
,
306 struct guest_walker
*walker
, gfn_t gfn
)
310 access_bits
&= *gpde
;
311 gaddr
= (gpa_t
)gfn
<< PAGE_SHIFT
;
312 if (PTTYPE
== 32 && is_cpuid_PSE36())
313 gaddr
|= (*gpde
& PT32_DIR_PSE36_MASK
) <<
314 (32 - PT32_DIR_PSE36_SHIFT
);
315 FNAME(set_pte_common
)(vcpu
, shadow_pte
, gaddr
,
316 gpde
, access_bits
, user_fault
, write_fault
,
317 ptwrite
, walker
, gfn
);
321 * Fetch a shadow pte for a specific level in the paging hierarchy.
323 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
324 struct guest_walker
*walker
,
325 int user_fault
, int write_fault
, int *ptwrite
)
330 u64
*prev_shadow_ent
= NULL
;
331 pt_element_t
*guest_ent
= walker
->ptep
;
333 if (!is_present_pte(*guest_ent
))
336 shadow_addr
= vcpu
->mmu
.root_hpa
;
337 level
= vcpu
->mmu
.shadow_root_level
;
338 if (level
== PT32E_ROOT_LEVEL
) {
339 shadow_addr
= vcpu
->mmu
.pae_root
[(addr
>> 30) & 3];
340 shadow_addr
&= PT64_BASE_ADDR_MASK
;
345 u32 index
= SHADOW_PT_INDEX(addr
, level
);
346 struct kvm_mmu_page
*shadow_page
;
350 unsigned hugepage_access
= 0;
352 shadow_ent
= ((u64
*)__va(shadow_addr
)) + index
;
353 if (is_present_pte(*shadow_ent
) || is_io_pte(*shadow_ent
)) {
354 if (level
== PT_PAGE_TABLE_LEVEL
)
356 shadow_addr
= *shadow_ent
& PT64_BASE_ADDR_MASK
;
357 prev_shadow_ent
= shadow_ent
;
361 if (level
== PT_PAGE_TABLE_LEVEL
)
364 if (level
- 1 == PT_PAGE_TABLE_LEVEL
365 && walker
->level
== PT_DIRECTORY_LEVEL
) {
367 hugepage_access
= *guest_ent
;
368 hugepage_access
&= PT_USER_MASK
| PT_WRITABLE_MASK
;
369 if (*guest_ent
& PT64_NX_MASK
)
370 hugepage_access
|= (1 << 2);
371 hugepage_access
>>= PT_WRITABLE_SHIFT
;
372 table_gfn
= (*guest_ent
& PT_BASE_ADDR_MASK
)
376 table_gfn
= walker
->table_gfn
[level
- 2];
378 shadow_page
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
379 metaphysical
, hugepage_access
,
381 shadow_addr
= __pa(shadow_page
->spt
);
382 shadow_pte
= shadow_addr
| PT_PRESENT_MASK
| PT_ACCESSED_MASK
383 | PT_WRITABLE_MASK
| PT_USER_MASK
;
384 *shadow_ent
= shadow_pte
;
385 prev_shadow_ent
= shadow_ent
;
388 if (walker
->level
== PT_DIRECTORY_LEVEL
) {
389 FNAME(set_pde
)(vcpu
, guest_ent
, shadow_ent
,
390 walker
->inherited_ar
, user_fault
, write_fault
,
391 ptwrite
, walker
, walker
->gfn
);
393 ASSERT(walker
->level
== PT_PAGE_TABLE_LEVEL
);
394 FNAME(set_pte
)(vcpu
, guest_ent
, shadow_ent
,
395 walker
->inherited_ar
, user_fault
, write_fault
,
396 ptwrite
, walker
, walker
->gfn
);
402 * Page fault handler. There are several causes for a page fault:
403 * - there is no shadow pte for the guest pte
404 * - write access through a shadow pte marked read only so that we can set
406 * - write access to a shadow pte marked read only so we can update the page
407 * dirty bitmap, when userspace requests it
408 * - mmio access; in this case we will never install a present shadow pte
409 * - normal guest page fault due to the guest pte marked not present, not
410 * writable, or not executable
412 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
413 * a negative value on error.
415 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
418 int write_fault
= error_code
& PFERR_WRITE_MASK
;
419 int user_fault
= error_code
& PFERR_USER_MASK
;
420 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
421 struct guest_walker walker
;
426 pgprintk("%s: addr %lx err %x\n", __FUNCTION__
, addr
, error_code
);
427 kvm_mmu_audit(vcpu
, "pre page fault");
429 r
= mmu_topup_memory_caches(vcpu
);
434 * Look up the shadow pte for the faulting address.
436 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
440 * The page is not mapped by the guest. Let the guest handle it.
443 pgprintk("%s: guest page fault\n", __FUNCTION__
);
444 inject_page_fault(vcpu
, addr
, walker
.error_code
);
445 FNAME(release_walker
)(&walker
);
446 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
450 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
452 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__
,
453 shadow_pte
, *shadow_pte
, write_pt
);
455 FNAME(release_walker
)(&walker
);
458 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
461 * mmio: emulate if accessible, otherwise its a guest fault.
463 if (is_io_pte(*shadow_pte
))
466 ++vcpu
->stat
.pf_fixed
;
467 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
472 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
474 struct guest_walker walker
;
475 gpa_t gpa
= UNMAPPED_GVA
;
478 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, 0, 0, 0);
481 gpa
= (gpa_t
)walker
.gfn
<< PAGE_SHIFT
;
482 gpa
|= vaddr
& ~PAGE_MASK
;
485 FNAME(release_walker
)(&walker
);
492 #undef PT_BASE_ADDR_MASK
494 #undef SHADOW_PT_INDEX
496 #undef PT_DIR_BASE_ADDR_MASK
497 #undef PT_MAX_FULL_LEVELS