2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
35 #define PT_LEVEL_BITS PT64_LEVEL_BITS
37 #define PT_MAX_FULL_LEVELS 4
38 #define CMPXCHG cmpxchg
40 #define CMPXCHG cmpxchg64
41 #define PT_MAX_FULL_LEVELS 2
44 #define pt_element_t u32
45 #define guest_walker guest_walker32
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
49 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
50 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
51 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
52 #define PT_LEVEL_BITS PT32_LEVEL_BITS
53 #define PT_MAX_FULL_LEVELS 2
54 #define CMPXCHG cmpxchg
56 #error Invalid PTTYPE value
59 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
60 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
63 * The guest_walker structure emulates the behavior of the hardware page
68 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
69 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
70 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
77 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
79 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
82 static bool FNAME(cmpxchg_gpte
)(struct kvm
*kvm
,
83 gfn_t table_gfn
, unsigned index
,
84 pt_element_t orig_pte
, pt_element_t new_pte
)
90 page
= gfn_to_page(kvm
, table_gfn
);
92 table
= kmap_atomic(page
, KM_USER0
);
93 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
94 kunmap_atomic(table
, KM_USER0
);
96 kvm_release_page_dirty(page
);
98 return (ret
!= orig_pte
);
101 static unsigned FNAME(gpte_access
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
)
105 access
= (gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
)) | ACC_EXEC_MASK
;
108 access
&= ~(gpte
>> PT64_NX_SHIFT
);
114 * Fetch a guest pte for a guest virtual address
116 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
117 struct kvm_vcpu
*vcpu
, gva_t addr
,
118 int write_fault
, int user_fault
, int fetch_fault
)
122 unsigned index
, pt_access
, uninitialized_var(pte_access
);
124 bool eperm
, present
, rsvd_fault
;
126 trace_kvm_mmu_pagetable_walk(addr
, write_fault
, user_fault
,
130 eperm
= rsvd_fault
= false;
131 walker
->level
= vcpu
->arch
.mmu
.root_level
;
132 pte
= vcpu
->arch
.cr3
;
134 if (!is_long_mode(vcpu
)) {
135 pte
= kvm_pdptr_read(vcpu
, (addr
>> 30) & 3);
136 trace_kvm_mmu_paging_element(pte
, walker
->level
);
137 if (!is_present_gpte(pte
)) {
144 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
145 (vcpu
->arch
.cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
150 index
= PT_INDEX(addr
, walker
->level
);
152 table_gfn
= gpte_to_gfn(pte
);
153 pte_gpa
= gfn_to_gpa(table_gfn
);
154 pte_gpa
+= index
* sizeof(pt_element_t
);
155 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
156 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
158 if (kvm_read_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
))) {
163 trace_kvm_mmu_paging_element(pte
, walker
->level
);
165 if (!is_present_gpte(pte
)) {
170 if (is_rsvd_bits_set(vcpu
, pte
, walker
->level
)) {
175 if (write_fault
&& !is_writable_pte(pte
))
176 if (user_fault
|| is_write_protection(vcpu
))
179 if (user_fault
&& !(pte
& PT_USER_MASK
))
183 if (fetch_fault
&& (pte
& PT64_NX_MASK
))
187 if (!eperm
&& !rsvd_fault
&& !(pte
& PT_ACCESSED_MASK
)) {
188 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
,
190 if (FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
,
191 index
, pte
, pte
|PT_ACCESSED_MASK
))
193 mark_page_dirty(vcpu
->kvm
, table_gfn
);
194 pte
|= PT_ACCESSED_MASK
;
197 pte_access
= pt_access
& FNAME(gpte_access
)(vcpu
, pte
);
199 walker
->ptes
[walker
->level
- 1] = pte
;
201 if ((walker
->level
== PT_PAGE_TABLE_LEVEL
) ||
202 ((walker
->level
== PT_DIRECTORY_LEVEL
) &&
204 (PTTYPE
== 64 || is_pse(vcpu
))) ||
205 ((walker
->level
== PT_PDPE_LEVEL
) &&
207 is_long_mode(vcpu
))) {
208 int lvl
= walker
->level
;
210 walker
->gfn
= gpte_to_gfn_lvl(pte
, lvl
);
211 walker
->gfn
+= (addr
& PT_LVL_OFFSET_MASK(lvl
))
215 walker
->level
== PT_DIRECTORY_LEVEL
&&
217 walker
->gfn
+= pse36_gfn_delta(pte
);
222 pt_access
= pte_access
;
226 if (!present
|| eperm
|| rsvd_fault
)
229 if (write_fault
&& !is_dirty_gpte(pte
)) {
232 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
233 ret
= FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
, index
, pte
,
237 mark_page_dirty(vcpu
->kvm
, table_gfn
);
238 pte
|= PT_DIRTY_MASK
;
239 walker
->ptes
[walker
->level
- 1] = pte
;
242 walker
->pt_access
= pt_access
;
243 walker
->pte_access
= pte_access
;
244 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
245 __func__
, (u64
)pte
, pte_access
, pt_access
);
249 walker
->error_code
= 0;
251 walker
->error_code
|= PFERR_PRESENT_MASK
;
253 walker
->error_code
|= PFERR_WRITE_MASK
;
255 walker
->error_code
|= PFERR_USER_MASK
;
256 if (fetch_fault
&& is_nx(vcpu
))
257 walker
->error_code
|= PFERR_FETCH_MASK
;
259 walker
->error_code
|= PFERR_RSVD_MASK
;
260 trace_kvm_mmu_walker_error(walker
->error_code
);
264 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
265 u64
*spte
, const void *pte
)
272 gpte
= *(const pt_element_t
*)pte
;
273 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
274 if (!is_present_gpte(gpte
)) {
276 new_spte
= shadow_trap_nonpresent_pte
;
278 new_spte
= shadow_notrap_nonpresent_pte
;
279 __set_spte(spte
, new_spte
);
283 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
284 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
285 if (gpte_to_gfn(gpte
) != vcpu
->arch
.update_pte
.gfn
)
287 pfn
= vcpu
->arch
.update_pte
.pfn
;
288 if (is_error_pfn(pfn
))
290 if (mmu_notifier_retry(vcpu
, vcpu
->arch
.update_pte
.mmu_seq
))
294 * we call mmu_set_spte() with reset_host_protection = true beacuse that
295 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
297 mmu_set_spte(vcpu
, spte
, sp
->role
.access
, pte_access
, 0, 0,
298 is_dirty_gpte(gpte
), NULL
, PT_PAGE_TABLE_LEVEL
,
299 gpte_to_gfn(gpte
), pfn
, true, true);
302 static bool FNAME(gpte_changed
)(struct kvm_vcpu
*vcpu
,
303 struct guest_walker
*gw
, int level
)
306 pt_element_t curr_pte
;
308 r
= kvm_read_guest_atomic(vcpu
->kvm
, gw
->pte_gpa
[level
- 1],
309 &curr_pte
, sizeof(curr_pte
));
310 return r
|| curr_pte
!= gw
->ptes
[level
- 1];
314 * Fetch a shadow pte for a specific level in the paging hierarchy.
316 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
317 struct guest_walker
*gw
,
318 int user_fault
, int write_fault
, int hlevel
,
319 int *ptwrite
, pfn_t pfn
)
321 unsigned access
= gw
->pt_access
;
322 struct kvm_mmu_page
*sp
= NULL
;
323 bool dirty
= is_dirty_gpte(gw
->ptes
[gw
->level
- 1]);
325 unsigned direct_access
;
326 struct kvm_shadow_walk_iterator it
;
328 if (!is_present_gpte(gw
->ptes
[gw
->level
- 1]))
331 direct_access
= gw
->pt_access
& gw
->pte_access
;
333 direct_access
&= ~ACC_WRITE_MASK
;
335 top_level
= vcpu
->arch
.mmu
.root_level
;
336 if (top_level
== PT32E_ROOT_LEVEL
)
337 top_level
= PT32_ROOT_LEVEL
;
339 * Verify that the top-level gpte is still there. Since the page
340 * is a root page, it is either write protected (and cannot be
341 * changed from now on) or it is invalid (in which case, we don't
342 * really care if it changes underneath us after this point).
344 if (FNAME(gpte_changed
)(vcpu
, gw
, top_level
))
345 goto out_gpte_changed
;
347 for (shadow_walk_init(&it
, vcpu
, addr
);
348 shadow_walk_okay(&it
) && it
.level
> gw
->level
;
349 shadow_walk_next(&it
)) {
352 drop_large_spte(vcpu
, it
.sptep
);
355 if (!is_shadow_present_pte(*it
.sptep
)) {
356 table_gfn
= gw
->table_gfn
[it
.level
- 2];
357 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, it
.level
-1,
358 false, access
, it
.sptep
);
362 * Verify that the gpte in the page we've just write
363 * protected is still there.
365 if (FNAME(gpte_changed
)(vcpu
, gw
, it
.level
- 1))
366 goto out_gpte_changed
;
369 link_shadow_page(it
.sptep
, sp
);
373 shadow_walk_okay(&it
) && it
.level
> hlevel
;
374 shadow_walk_next(&it
)) {
377 validate_direct_spte(vcpu
, it
.sptep
, direct_access
);
379 drop_large_spte(vcpu
, it
.sptep
);
381 if (is_shadow_present_pte(*it
.sptep
))
384 direct_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(it
.level
) - 1);
386 sp
= kvm_mmu_get_page(vcpu
, direct_gfn
, addr
, it
.level
-1,
387 true, direct_access
, it
.sptep
);
388 link_shadow_page(it
.sptep
, sp
);
391 mmu_set_spte(vcpu
, it
.sptep
, access
, gw
->pte_access
& access
,
392 user_fault
, write_fault
, dirty
, ptwrite
, it
.level
,
393 gw
->gfn
, pfn
, false, true);
399 kvm_mmu_put_page(sp
, it
.sptep
);
400 kvm_release_pfn_clean(pfn
);
405 * Page fault handler. There are several causes for a page fault:
406 * - there is no shadow pte for the guest pte
407 * - write access through a shadow pte marked read only so that we can set
409 * - write access to a shadow pte marked read only so we can update the page
410 * dirty bitmap, when userspace requests it
411 * - mmio access; in this case we will never install a present shadow pte
412 * - normal guest page fault due to the guest pte marked not present, not
413 * writable, or not executable
415 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
416 * a negative value on error.
418 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
421 int write_fault
= error_code
& PFERR_WRITE_MASK
;
422 int user_fault
= error_code
& PFERR_USER_MASK
;
423 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
424 struct guest_walker walker
;
429 int level
= PT_PAGE_TABLE_LEVEL
;
430 unsigned long mmu_seq
;
432 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
433 kvm_mmu_audit(vcpu
, "pre page fault");
435 r
= mmu_topup_memory_caches(vcpu
);
440 * Look up the guest pte for the faulting address.
442 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
446 * The page is not mapped by the guest. Let the guest handle it.
449 pgprintk("%s: guest page fault\n", __func__
);
450 inject_page_fault(vcpu
, addr
, walker
.error_code
);
451 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
455 if (walker
.level
>= PT_DIRECTORY_LEVEL
) {
456 level
= min(walker
.level
, mapping_level(vcpu
, walker
.gfn
));
457 walker
.gfn
= walker
.gfn
& ~(KVM_PAGES_PER_HPAGE(level
) - 1);
460 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
462 pfn
= gfn_to_pfn(vcpu
->kvm
, walker
.gfn
);
465 if (is_error_pfn(pfn
))
466 return kvm_handle_bad_page(vcpu
->kvm
, walker
.gfn
, pfn
);
468 spin_lock(&vcpu
->kvm
->mmu_lock
);
469 if (mmu_notifier_retry(vcpu
, mmu_seq
))
471 kvm_mmu_free_some_pages(vcpu
);
472 sptep
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
473 level
, &write_pt
, pfn
);
475 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__
,
476 sptep
, *sptep
, write_pt
);
479 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
481 ++vcpu
->stat
.pf_fixed
;
482 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
483 spin_unlock(&vcpu
->kvm
->mmu_lock
);
488 spin_unlock(&vcpu
->kvm
->mmu_lock
);
489 kvm_release_pfn_clean(pfn
);
493 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
)
495 struct kvm_shadow_walk_iterator iterator
;
496 struct kvm_mmu_page
*sp
;
502 spin_lock(&vcpu
->kvm
->mmu_lock
);
504 for_each_shadow_entry(vcpu
, gva
, iterator
) {
505 level
= iterator
.level
;
506 sptep
= iterator
.sptep
;
508 sp
= page_header(__pa(sptep
));
509 if (is_last_spte(*sptep
, level
)) {
516 (PT_LEVEL_BITS
- PT64_LEVEL_BITS
) * level
;
517 offset
= sp
->role
.quadrant
<< shift
;
519 pte_gpa
= (sp
->gfn
<< PAGE_SHIFT
) + offset
;
520 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
522 if (is_shadow_present_pte(*sptep
)) {
523 if (is_large_pte(*sptep
))
524 --vcpu
->kvm
->stat
.lpages
;
525 drop_spte(vcpu
->kvm
, sptep
,
526 shadow_trap_nonpresent_pte
);
529 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
533 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
538 kvm_flush_remote_tlbs(vcpu
->kvm
);
540 atomic_inc(&vcpu
->kvm
->arch
.invlpg_counter
);
542 spin_unlock(&vcpu
->kvm
->mmu_lock
);
547 if (mmu_topup_memory_caches(vcpu
))
549 kvm_mmu_pte_write(vcpu
, pte_gpa
, NULL
, sizeof(pt_element_t
), 0);
552 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
, u32 access
,
555 struct guest_walker walker
;
556 gpa_t gpa
= UNMAPPED_GVA
;
559 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
,
560 !!(access
& PFERR_WRITE_MASK
),
561 !!(access
& PFERR_USER_MASK
),
562 !!(access
& PFERR_FETCH_MASK
));
565 gpa
= gfn_to_gpa(walker
.gfn
);
566 gpa
|= vaddr
& ~PAGE_MASK
;
568 *error
= walker
.error_code
;
573 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
574 struct kvm_mmu_page
*sp
)
577 pt_element_t pt
[256 / sizeof(pt_element_t
)];
581 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
582 nonpaging_prefetch_page(vcpu
, sp
);
586 pte_gpa
= gfn_to_gpa(sp
->gfn
);
588 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
589 pte_gpa
+= offset
* sizeof(pt_element_t
);
592 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
+= ARRAY_SIZE(pt
)) {
593 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, pt
, sizeof pt
);
594 pte_gpa
+= ARRAY_SIZE(pt
) * sizeof(pt_element_t
);
595 for (j
= 0; j
< ARRAY_SIZE(pt
); ++j
)
596 if (r
|| is_present_gpte(pt
[j
]))
597 sp
->spt
[i
+j
] = shadow_trap_nonpresent_pte
;
599 sp
->spt
[i
+j
] = shadow_notrap_nonpresent_pte
;
604 * Using the cached information from sp->gfns is safe because:
605 * - The spte has a reference to the struct page, so the pfn for a given gfn
606 * can't change unless all sptes pointing to it are nuked first.
608 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
611 int i
, offset
, nr_present
;
612 bool reset_host_protection
;
615 offset
= nr_present
= 0;
617 /* direct kvm_mmu_page can not be unsync. */
618 BUG_ON(sp
->role
.direct
);
621 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
623 first_pte_gpa
= gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
625 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
631 if (!is_shadow_present_pte(sp
->spt
[i
]))
634 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
636 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
637 sizeof(pt_element_t
)))
640 gfn
= gpte_to_gfn(gpte
);
641 if (is_rsvd_bits_set(vcpu
, gpte
, PT_PAGE_TABLE_LEVEL
)
642 || gfn
!= sp
->gfns
[i
] || !is_present_gpte(gpte
)
643 || !(gpte
& PT_ACCESSED_MASK
)) {
646 if (is_present_gpte(gpte
) || !clear_unsync
)
647 nonpresent
= shadow_trap_nonpresent_pte
;
649 nonpresent
= shadow_notrap_nonpresent_pte
;
650 drop_spte(vcpu
->kvm
, &sp
->spt
[i
], nonpresent
);
655 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
656 if (!(sp
->spt
[i
] & SPTE_HOST_WRITEABLE
)) {
657 pte_access
&= ~ACC_WRITE_MASK
;
658 reset_host_protection
= 0;
660 reset_host_protection
= 1;
662 set_spte(vcpu
, &sp
->spt
[i
], pte_access
, 0, 0,
663 is_dirty_gpte(gpte
), PT_PAGE_TABLE_LEVEL
, gfn
,
664 spte_to_pfn(sp
->spt
[i
]), true, false,
665 reset_host_protection
);
674 #undef PT_BASE_ADDR_MASK
677 #undef PT_LVL_ADDR_MASK
678 #undef PT_LVL_OFFSET_MASK
680 #undef PT_MAX_FULL_LEVELS
682 #undef gpte_to_gfn_lvl