2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
35 #define PT_LEVEL_BITS PT64_LEVEL_BITS
37 #define PT_MAX_FULL_LEVELS 4
38 #define CMPXCHG cmpxchg
40 #define CMPXCHG cmpxchg64
41 #define PT_MAX_FULL_LEVELS 2
44 #define pt_element_t u32
45 #define guest_walker guest_walker32
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
49 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
50 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
51 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
52 #define PT_LEVEL_BITS PT32_LEVEL_BITS
53 #define PT_MAX_FULL_LEVELS 2
54 #define CMPXCHG cmpxchg
56 #error Invalid PTTYPE value
59 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
60 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
63 * The guest_walker structure emulates the behavior of the hardware page
68 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
69 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
70 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
77 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
79 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
82 static bool FNAME(cmpxchg_gpte
)(struct kvm
*kvm
,
83 gfn_t table_gfn
, unsigned index
,
84 pt_element_t orig_pte
, pt_element_t new_pte
)
90 page
= gfn_to_page(kvm
, table_gfn
);
92 table
= kmap_atomic(page
, KM_USER0
);
93 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
94 kunmap_atomic(table
, KM_USER0
);
96 kvm_release_page_dirty(page
);
98 return (ret
!= orig_pte
);
101 static unsigned FNAME(gpte_access
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
)
105 access
= (gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
)) | ACC_EXEC_MASK
;
108 access
&= ~(gpte
>> PT64_NX_SHIFT
);
114 * Fetch a guest pte for a guest virtual address
116 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
117 struct kvm_vcpu
*vcpu
, gva_t addr
,
118 int write_fault
, int user_fault
, int fetch_fault
)
122 unsigned index
, pt_access
, pte_access
;
126 trace_kvm_mmu_pagetable_walk(addr
, write_fault
, user_fault
,
129 walker
->level
= vcpu
->arch
.mmu
.root_level
;
130 pte
= vcpu
->arch
.cr3
;
132 if (!is_long_mode(vcpu
)) {
133 pte
= kvm_pdptr_read(vcpu
, (addr
>> 30) & 3);
134 trace_kvm_mmu_paging_element(pte
, walker
->level
);
135 if (!is_present_gpte(pte
))
140 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
141 (vcpu
->arch
.cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
146 index
= PT_INDEX(addr
, walker
->level
);
148 table_gfn
= gpte_to_gfn(pte
);
149 pte_gpa
= gfn_to_gpa(table_gfn
);
150 pte_gpa
+= index
* sizeof(pt_element_t
);
151 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
152 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
154 if (kvm_read_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
)))
157 trace_kvm_mmu_paging_element(pte
, walker
->level
);
159 if (!is_present_gpte(pte
))
162 rsvd_fault
= is_rsvd_bits_set(vcpu
, pte
, walker
->level
);
166 if (write_fault
&& !is_writable_pte(pte
))
167 if (user_fault
|| is_write_protection(vcpu
))
170 if (user_fault
&& !(pte
& PT_USER_MASK
))
174 if (fetch_fault
&& (pte
& PT64_NX_MASK
))
178 if (!(pte
& PT_ACCESSED_MASK
)) {
179 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
,
181 if (FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
,
182 index
, pte
, pte
|PT_ACCESSED_MASK
))
184 mark_page_dirty(vcpu
->kvm
, table_gfn
);
185 pte
|= PT_ACCESSED_MASK
;
188 pte_access
= pt_access
& FNAME(gpte_access
)(vcpu
, pte
);
190 walker
->ptes
[walker
->level
- 1] = pte
;
192 if ((walker
->level
== PT_PAGE_TABLE_LEVEL
) ||
193 ((walker
->level
== PT_DIRECTORY_LEVEL
) &&
195 (PTTYPE
== 64 || is_pse(vcpu
))) ||
196 ((walker
->level
== PT_PDPE_LEVEL
) &&
198 is_long_mode(vcpu
))) {
199 int lvl
= walker
->level
;
201 walker
->gfn
= gpte_to_gfn_lvl(pte
, lvl
);
202 walker
->gfn
+= (addr
& PT_LVL_OFFSET_MASK(lvl
))
206 walker
->level
== PT_DIRECTORY_LEVEL
&&
208 walker
->gfn
+= pse36_gfn_delta(pte
);
213 pt_access
= pte_access
;
217 if (write_fault
&& !is_dirty_gpte(pte
)) {
220 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
221 ret
= FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
, index
, pte
,
225 mark_page_dirty(vcpu
->kvm
, table_gfn
);
226 pte
|= PT_DIRTY_MASK
;
227 walker
->ptes
[walker
->level
- 1] = pte
;
230 walker
->pt_access
= pt_access
;
231 walker
->pte_access
= pte_access
;
232 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
233 __func__
, (u64
)pte
, pte_access
, pt_access
);
237 walker
->error_code
= 0;
241 walker
->error_code
= PFERR_PRESENT_MASK
;
245 walker
->error_code
|= PFERR_WRITE_MASK
;
247 walker
->error_code
|= PFERR_USER_MASK
;
248 if (fetch_fault
&& is_nx(vcpu
))
249 walker
->error_code
|= PFERR_FETCH_MASK
;
251 walker
->error_code
|= PFERR_RSVD_MASK
;
252 trace_kvm_mmu_walker_error(walker
->error_code
);
256 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
257 u64
*spte
, const void *pte
)
264 gpte
= *(const pt_element_t
*)pte
;
265 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
266 if (!is_present_gpte(gpte
)) {
268 new_spte
= shadow_trap_nonpresent_pte
;
270 new_spte
= shadow_notrap_nonpresent_pte
;
271 __set_spte(spte
, new_spte
);
275 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
276 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
277 if (gpte_to_gfn(gpte
) != vcpu
->arch
.update_pte
.gfn
)
279 pfn
= vcpu
->arch
.update_pte
.pfn
;
280 if (is_error_pfn(pfn
))
282 if (mmu_notifier_retry(vcpu
, vcpu
->arch
.update_pte
.mmu_seq
))
286 * we call mmu_set_spte() with reset_host_protection = true beacuse that
287 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
289 mmu_set_spte(vcpu
, spte
, sp
->role
.access
, pte_access
, 0, 0,
290 is_dirty_gpte(gpte
), NULL
, PT_PAGE_TABLE_LEVEL
,
291 gpte_to_gfn(gpte
), pfn
, true, true);
295 * Fetch a shadow pte for a specific level in the paging hierarchy.
297 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
298 struct guest_walker
*gw
,
299 int user_fault
, int write_fault
, int hlevel
,
300 int *ptwrite
, pfn_t pfn
)
302 unsigned access
= gw
->pt_access
;
303 struct kvm_mmu_page
*sp
;
304 u64 spte
, *sptep
= NULL
;
309 bool dirty
= is_dirty_gpte(gw
->ptes
[gw
->level
- 1]);
310 unsigned direct_access
;
311 pt_element_t curr_pte
;
312 struct kvm_shadow_walk_iterator iterator
;
314 if (!is_present_gpte(gw
->ptes
[gw
->level
- 1]))
317 direct_access
= gw
->pt_access
& gw
->pte_access
;
319 direct_access
&= ~ACC_WRITE_MASK
;
321 for_each_shadow_entry(vcpu
, addr
, iterator
) {
322 level
= iterator
.level
;
323 sptep
= iterator
.sptep
;
324 if (iterator
.level
== hlevel
) {
325 mmu_set_spte(vcpu
, sptep
, access
,
326 gw
->pte_access
& access
,
327 user_fault
, write_fault
,
328 dirty
, ptwrite
, level
,
329 gw
->gfn
, pfn
, false, true);
333 if (is_shadow_present_pte(*sptep
) && !is_large_pte(*sptep
)) {
334 struct kvm_mmu_page
*child
;
336 if (level
!= gw
->level
)
340 * For the direct sp, if the guest pte's dirty bit
341 * changed form clean to dirty, it will corrupt the
342 * sp's access: allow writable in the read-only sp,
343 * so we should update the spte at this point to get
344 * a new sp with the correct access.
346 child
= page_header(*sptep
& PT64_BASE_ADDR_MASK
);
347 if (child
->role
.access
== direct_access
)
350 mmu_page_remove_parent_pte(child
, sptep
);
351 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
352 kvm_flush_remote_tlbs(vcpu
->kvm
);
355 if (is_large_pte(*sptep
)) {
356 drop_spte(vcpu
->kvm
, sptep
, shadow_trap_nonpresent_pte
);
357 kvm_flush_remote_tlbs(vcpu
->kvm
);
360 if (level
<= gw
->level
) {
362 access
= direct_access
;
365 * It is a large guest pages backed by small host pages,
366 * So we set @direct(@sp->role.direct)=1, and set
367 * @table_gfn(@sp->gfn)=the base page frame for linear
370 table_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(level
) - 1);
371 access
&= gw
->pte_access
;
374 table_gfn
= gw
->table_gfn
[level
- 2];
376 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
377 direct
, access
, sptep
);
379 r
= kvm_read_guest_atomic(vcpu
->kvm
,
380 gw
->pte_gpa
[level
- 2],
381 &curr_pte
, sizeof(curr_pte
));
382 if (r
|| curr_pte
!= gw
->ptes
[level
- 2]) {
383 kvm_mmu_put_page(sp
, sptep
);
384 kvm_release_pfn_clean(pfn
);
391 | PT_PRESENT_MASK
| PT_ACCESSED_MASK
392 | PT_WRITABLE_MASK
| PT_USER_MASK
;
400 * Page fault handler. There are several causes for a page fault:
401 * - there is no shadow pte for the guest pte
402 * - write access through a shadow pte marked read only so that we can set
404 * - write access to a shadow pte marked read only so we can update the page
405 * dirty bitmap, when userspace requests it
406 * - mmio access; in this case we will never install a present shadow pte
407 * - normal guest page fault due to the guest pte marked not present, not
408 * writable, or not executable
410 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
411 * a negative value on error.
413 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
416 int write_fault
= error_code
& PFERR_WRITE_MASK
;
417 int user_fault
= error_code
& PFERR_USER_MASK
;
418 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
419 struct guest_walker walker
;
424 int level
= PT_PAGE_TABLE_LEVEL
;
425 unsigned long mmu_seq
;
427 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
428 kvm_mmu_audit(vcpu
, "pre page fault");
430 r
= mmu_topup_memory_caches(vcpu
);
435 * Look up the guest pte for the faulting address.
437 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
441 * The page is not mapped by the guest. Let the guest handle it.
444 pgprintk("%s: guest page fault\n", __func__
);
445 inject_page_fault(vcpu
, addr
, walker
.error_code
);
446 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
450 if (walker
.level
>= PT_DIRECTORY_LEVEL
) {
451 level
= min(walker
.level
, mapping_level(vcpu
, walker
.gfn
));
452 walker
.gfn
= walker
.gfn
& ~(KVM_PAGES_PER_HPAGE(level
) - 1);
455 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
457 pfn
= gfn_to_pfn(vcpu
->kvm
, walker
.gfn
);
460 if (is_error_pfn(pfn
))
461 return kvm_handle_bad_page(vcpu
->kvm
, walker
.gfn
, pfn
);
463 spin_lock(&vcpu
->kvm
->mmu_lock
);
464 if (mmu_notifier_retry(vcpu
, mmu_seq
))
466 kvm_mmu_free_some_pages(vcpu
);
467 sptep
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
468 level
, &write_pt
, pfn
);
470 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__
,
471 sptep
, *sptep
, write_pt
);
474 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
476 ++vcpu
->stat
.pf_fixed
;
477 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
478 spin_unlock(&vcpu
->kvm
->mmu_lock
);
483 spin_unlock(&vcpu
->kvm
->mmu_lock
);
484 kvm_release_pfn_clean(pfn
);
488 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
)
490 struct kvm_shadow_walk_iterator iterator
;
491 struct kvm_mmu_page
*sp
;
497 spin_lock(&vcpu
->kvm
->mmu_lock
);
499 for_each_shadow_entry(vcpu
, gva
, iterator
) {
500 level
= iterator
.level
;
501 sptep
= iterator
.sptep
;
503 sp
= page_header(__pa(sptep
));
504 if (is_last_spte(*sptep
, level
)) {
511 (PT_LEVEL_BITS
- PT64_LEVEL_BITS
) * level
;
512 offset
= sp
->role
.quadrant
<< shift
;
514 pte_gpa
= (sp
->gfn
<< PAGE_SHIFT
) + offset
;
515 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
517 if (is_shadow_present_pte(*sptep
)) {
518 if (is_large_pte(*sptep
))
519 --vcpu
->kvm
->stat
.lpages
;
520 drop_spte(vcpu
->kvm
, sptep
,
521 shadow_trap_nonpresent_pte
);
524 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
528 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
533 kvm_flush_remote_tlbs(vcpu
->kvm
);
535 atomic_inc(&vcpu
->kvm
->arch
.invlpg_counter
);
537 spin_unlock(&vcpu
->kvm
->mmu_lock
);
542 if (mmu_topup_memory_caches(vcpu
))
544 kvm_mmu_pte_write(vcpu
, pte_gpa
, NULL
, sizeof(pt_element_t
), 0);
547 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
, u32 access
,
550 struct guest_walker walker
;
551 gpa_t gpa
= UNMAPPED_GVA
;
554 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
,
555 !!(access
& PFERR_WRITE_MASK
),
556 !!(access
& PFERR_USER_MASK
),
557 !!(access
& PFERR_FETCH_MASK
));
560 gpa
= gfn_to_gpa(walker
.gfn
);
561 gpa
|= vaddr
& ~PAGE_MASK
;
563 *error
= walker
.error_code
;
568 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
569 struct kvm_mmu_page
*sp
)
572 pt_element_t pt
[256 / sizeof(pt_element_t
)];
576 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
577 nonpaging_prefetch_page(vcpu
, sp
);
581 pte_gpa
= gfn_to_gpa(sp
->gfn
);
583 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
584 pte_gpa
+= offset
* sizeof(pt_element_t
);
587 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
+= ARRAY_SIZE(pt
)) {
588 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, pt
, sizeof pt
);
589 pte_gpa
+= ARRAY_SIZE(pt
) * sizeof(pt_element_t
);
590 for (j
= 0; j
< ARRAY_SIZE(pt
); ++j
)
591 if (r
|| is_present_gpte(pt
[j
]))
592 sp
->spt
[i
+j
] = shadow_trap_nonpresent_pte
;
594 sp
->spt
[i
+j
] = shadow_notrap_nonpresent_pte
;
599 * Using the cached information from sp->gfns is safe because:
600 * - The spte has a reference to the struct page, so the pfn for a given gfn
601 * can't change unless all sptes pointing to it are nuked first.
603 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
606 int i
, offset
, nr_present
;
607 bool reset_host_protection
;
610 offset
= nr_present
= 0;
612 /* direct kvm_mmu_page can not be unsync. */
613 BUG_ON(sp
->role
.direct
);
616 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
618 first_pte_gpa
= gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
620 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
626 if (!is_shadow_present_pte(sp
->spt
[i
]))
629 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
631 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
632 sizeof(pt_element_t
)))
635 gfn
= gpte_to_gfn(gpte
);
636 if (gfn
!= sp
->gfns
[i
] ||
637 !is_present_gpte(gpte
) || !(gpte
& PT_ACCESSED_MASK
)) {
640 if (is_present_gpte(gpte
) || !clear_unsync
)
641 nonpresent
= shadow_trap_nonpresent_pte
;
643 nonpresent
= shadow_notrap_nonpresent_pte
;
644 drop_spte(vcpu
->kvm
, &sp
->spt
[i
], nonpresent
);
649 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
650 if (!(sp
->spt
[i
] & SPTE_HOST_WRITEABLE
)) {
651 pte_access
&= ~ACC_WRITE_MASK
;
652 reset_host_protection
= 0;
654 reset_host_protection
= 1;
656 set_spte(vcpu
, &sp
->spt
[i
], pte_access
, 0, 0,
657 is_dirty_gpte(gpte
), PT_PAGE_TABLE_LEVEL
, gfn
,
658 spte_to_pfn(sp
->spt
[i
]), true, false,
659 reset_host_protection
);
668 #undef PT_BASE_ADDR_MASK
671 #undef PT_LVL_ADDR_MASK
672 #undef PT_LVL_OFFSET_MASK
674 #undef PT_MAX_FULL_LEVELS
676 #undef gpte_to_gfn_lvl