[PATCH] KVM: MMU: add audit code to check mappings, etc are correct
[linux-2.6/btrfs-unstable.git] / drivers / kvm / mmu.c
blob6e7381bc22182bb4150f2fa9666118248854bb5c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * MMU support
9 * Copyright (C) 2006 Qumranet, Inc.
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <asm/page.h>
22 #include <linux/mm.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
26 #include "vmx.h"
27 #include "kvm.h"
29 #undef MMU_DEBUG
31 #undef AUDIT
33 #ifdef AUDIT
34 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
35 #else
36 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
37 #endif
39 #ifdef MMU_DEBUG
41 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
42 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
44 #else
46 #define pgprintk(x...) do { } while (0)
47 #define rmap_printk(x...) do { } while (0)
49 #endif
51 #if defined(MMU_DEBUG) || defined(AUDIT)
52 static int dbg = 1;
53 #endif
55 #define ASSERT(x) \
56 if (!(x)) { \
57 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
58 __FILE__, __LINE__, #x); \
61 #define PT64_PT_BITS 9
62 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
63 #define PT32_PT_BITS 10
64 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
66 #define PT_WRITABLE_SHIFT 1
68 #define PT_PRESENT_MASK (1ULL << 0)
69 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
70 #define PT_USER_MASK (1ULL << 2)
71 #define PT_PWT_MASK (1ULL << 3)
72 #define PT_PCD_MASK (1ULL << 4)
73 #define PT_ACCESSED_MASK (1ULL << 5)
74 #define PT_DIRTY_MASK (1ULL << 6)
75 #define PT_PAGE_SIZE_MASK (1ULL << 7)
76 #define PT_PAT_MASK (1ULL << 7)
77 #define PT_GLOBAL_MASK (1ULL << 8)
78 #define PT64_NX_MASK (1ULL << 63)
80 #define PT_PAT_SHIFT 7
81 #define PT_DIR_PAT_SHIFT 12
82 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
84 #define PT32_DIR_PSE36_SIZE 4
85 #define PT32_DIR_PSE36_SHIFT 13
86 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
89 #define PT32_PTE_COPY_MASK \
90 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
92 #define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
94 #define PT_FIRST_AVAIL_BITS_SHIFT 9
95 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
97 #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
98 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
100 #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
101 #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
103 #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
104 #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
106 #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
108 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
110 #define PT64_LEVEL_BITS 9
112 #define PT64_LEVEL_SHIFT(level) \
113 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
115 #define PT64_LEVEL_MASK(level) \
116 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
118 #define PT64_INDEX(address, level)\
119 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
122 #define PT32_LEVEL_BITS 10
124 #define PT32_LEVEL_SHIFT(level) \
125 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
127 #define PT32_LEVEL_MASK(level) \
128 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
130 #define PT32_INDEX(address, level)\
131 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
134 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
135 #define PT64_DIR_BASE_ADDR_MASK \
136 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
138 #define PT32_BASE_ADDR_MASK PAGE_MASK
139 #define PT32_DIR_BASE_ADDR_MASK \
140 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
143 #define PFERR_PRESENT_MASK (1U << 0)
144 #define PFERR_WRITE_MASK (1U << 1)
145 #define PFERR_USER_MASK (1U << 2)
147 #define PT64_ROOT_LEVEL 4
148 #define PT32_ROOT_LEVEL 2
149 #define PT32E_ROOT_LEVEL 3
151 #define PT_DIRECTORY_LEVEL 2
152 #define PT_PAGE_TABLE_LEVEL 1
154 #define RMAP_EXT 4
156 struct kvm_rmap_desc {
157 u64 *shadow_ptes[RMAP_EXT];
158 struct kvm_rmap_desc *more;
161 static int is_write_protection(struct kvm_vcpu *vcpu)
163 return vcpu->cr0 & CR0_WP_MASK;
166 static int is_cpuid_PSE36(void)
168 return 1;
171 static int is_present_pte(unsigned long pte)
173 return pte & PT_PRESENT_MASK;
176 static int is_writeble_pte(unsigned long pte)
178 return pte & PT_WRITABLE_MASK;
181 static int is_io_pte(unsigned long pte)
183 return pte & PT_SHADOW_IO_MARK;
186 static int is_rmap_pte(u64 pte)
188 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
189 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
192 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
193 size_t objsize, int min)
195 void *obj;
197 if (cache->nobjs >= min)
198 return 0;
199 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
200 obj = kzalloc(objsize, GFP_NOWAIT);
201 if (!obj)
202 return -ENOMEM;
203 cache->objects[cache->nobjs++] = obj;
205 return 0;
208 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
210 while (mc->nobjs)
211 kfree(mc->objects[--mc->nobjs]);
214 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
216 int r;
218 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
219 sizeof(struct kvm_pte_chain), 4);
220 if (r)
221 goto out;
222 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
223 sizeof(struct kvm_rmap_desc), 1);
224 out:
225 return r;
228 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
230 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
231 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
234 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
235 size_t size)
237 void *p;
239 BUG_ON(!mc->nobjs);
240 p = mc->objects[--mc->nobjs];
241 memset(p, 0, size);
242 return p;
245 static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
247 if (mc->nobjs < KVM_NR_MEM_OBJS)
248 mc->objects[mc->nobjs++] = obj;
249 else
250 kfree(obj);
253 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
255 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
256 sizeof(struct kvm_pte_chain));
259 static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
260 struct kvm_pte_chain *pc)
262 mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
265 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
267 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
268 sizeof(struct kvm_rmap_desc));
271 static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
272 struct kvm_rmap_desc *rd)
274 mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
278 * Reverse mapping data structures:
280 * If page->private bit zero is zero, then page->private points to the
281 * shadow page table entry that points to page_address(page).
283 * If page->private bit zero is one, (then page->private & ~1) points
284 * to a struct kvm_rmap_desc containing more mappings.
286 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
288 struct page *page;
289 struct kvm_rmap_desc *desc;
290 int i;
292 if (!is_rmap_pte(*spte))
293 return;
294 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
295 if (!page->private) {
296 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
297 page->private = (unsigned long)spte;
298 } else if (!(page->private & 1)) {
299 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
300 desc = mmu_alloc_rmap_desc(vcpu);
301 desc->shadow_ptes[0] = (u64 *)page->private;
302 desc->shadow_ptes[1] = spte;
303 page->private = (unsigned long)desc | 1;
304 } else {
305 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
306 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
307 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
308 desc = desc->more;
309 if (desc->shadow_ptes[RMAP_EXT-1]) {
310 desc->more = mmu_alloc_rmap_desc(vcpu);
311 desc = desc->more;
313 for (i = 0; desc->shadow_ptes[i]; ++i)
315 desc->shadow_ptes[i] = spte;
319 static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
320 struct page *page,
321 struct kvm_rmap_desc *desc,
322 int i,
323 struct kvm_rmap_desc *prev_desc)
325 int j;
327 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
329 desc->shadow_ptes[i] = desc->shadow_ptes[j];
330 desc->shadow_ptes[j] = 0;
331 if (j != 0)
332 return;
333 if (!prev_desc && !desc->more)
334 page->private = (unsigned long)desc->shadow_ptes[0];
335 else
336 if (prev_desc)
337 prev_desc->more = desc->more;
338 else
339 page->private = (unsigned long)desc->more | 1;
340 mmu_free_rmap_desc(vcpu, desc);
343 static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
345 struct page *page;
346 struct kvm_rmap_desc *desc;
347 struct kvm_rmap_desc *prev_desc;
348 int i;
350 if (!is_rmap_pte(*spte))
351 return;
352 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
353 if (!page->private) {
354 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
355 BUG();
356 } else if (!(page->private & 1)) {
357 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
358 if ((u64 *)page->private != spte) {
359 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
360 spte, *spte);
361 BUG();
363 page->private = 0;
364 } else {
365 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
366 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
367 prev_desc = NULL;
368 while (desc) {
369 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
370 if (desc->shadow_ptes[i] == spte) {
371 rmap_desc_remove_entry(vcpu, page,
372 desc, i,
373 prev_desc);
374 return;
376 prev_desc = desc;
377 desc = desc->more;
379 BUG();
383 static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
385 struct kvm *kvm = vcpu->kvm;
386 struct page *page;
387 struct kvm_memory_slot *slot;
388 struct kvm_rmap_desc *desc;
389 u64 *spte;
391 slot = gfn_to_memslot(kvm, gfn);
392 BUG_ON(!slot);
393 page = gfn_to_page(slot, gfn);
395 while (page->private) {
396 if (!(page->private & 1))
397 spte = (u64 *)page->private;
398 else {
399 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
400 spte = desc->shadow_ptes[0];
402 BUG_ON(!spte);
403 BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
404 page_to_pfn(page) << PAGE_SHIFT);
405 BUG_ON(!(*spte & PT_PRESENT_MASK));
406 BUG_ON(!(*spte & PT_WRITABLE_MASK));
407 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
408 rmap_remove(vcpu, spte);
409 kvm_arch_ops->tlb_flush(vcpu);
410 *spte &= ~(u64)PT_WRITABLE_MASK;
414 static int is_empty_shadow_page(hpa_t page_hpa)
416 u64 *pos;
417 u64 *end;
419 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
420 pos != end; pos++)
421 if (*pos != 0) {
422 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
423 pos, *pos);
424 return 0;
426 return 1;
429 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
431 struct kvm_mmu_page *page_head = page_header(page_hpa);
433 ASSERT(is_empty_shadow_page(page_hpa));
434 list_del(&page_head->link);
435 page_head->page_hpa = page_hpa;
436 list_add(&page_head->link, &vcpu->free_pages);
437 ++vcpu->kvm->n_free_mmu_pages;
440 static unsigned kvm_page_table_hashfn(gfn_t gfn)
442 return gfn;
445 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
446 u64 *parent_pte)
448 struct kvm_mmu_page *page;
450 if (list_empty(&vcpu->free_pages))
451 return NULL;
453 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
454 list_del(&page->link);
455 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
456 ASSERT(is_empty_shadow_page(page->page_hpa));
457 page->slot_bitmap = 0;
458 page->global = 1;
459 page->multimapped = 0;
460 page->parent_pte = parent_pte;
461 --vcpu->kvm->n_free_mmu_pages;
462 return page;
465 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
466 struct kvm_mmu_page *page, u64 *parent_pte)
468 struct kvm_pte_chain *pte_chain;
469 struct hlist_node *node;
470 int i;
472 if (!parent_pte)
473 return;
474 if (!page->multimapped) {
475 u64 *old = page->parent_pte;
477 if (!old) {
478 page->parent_pte = parent_pte;
479 return;
481 page->multimapped = 1;
482 pte_chain = mmu_alloc_pte_chain(vcpu);
483 INIT_HLIST_HEAD(&page->parent_ptes);
484 hlist_add_head(&pte_chain->link, &page->parent_ptes);
485 pte_chain->parent_ptes[0] = old;
487 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
488 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
489 continue;
490 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
491 if (!pte_chain->parent_ptes[i]) {
492 pte_chain->parent_ptes[i] = parent_pte;
493 return;
496 pte_chain = mmu_alloc_pte_chain(vcpu);
497 BUG_ON(!pte_chain);
498 hlist_add_head(&pte_chain->link, &page->parent_ptes);
499 pte_chain->parent_ptes[0] = parent_pte;
502 static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
503 struct kvm_mmu_page *page,
504 u64 *parent_pte)
506 struct kvm_pte_chain *pte_chain;
507 struct hlist_node *node;
508 int i;
510 if (!page->multimapped) {
511 BUG_ON(page->parent_pte != parent_pte);
512 page->parent_pte = NULL;
513 return;
515 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
516 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
517 if (!pte_chain->parent_ptes[i])
518 break;
519 if (pte_chain->parent_ptes[i] != parent_pte)
520 continue;
521 while (i + 1 < NR_PTE_CHAIN_ENTRIES
522 && pte_chain->parent_ptes[i + 1]) {
523 pte_chain->parent_ptes[i]
524 = pte_chain->parent_ptes[i + 1];
525 ++i;
527 pte_chain->parent_ptes[i] = NULL;
528 if (i == 0) {
529 hlist_del(&pte_chain->link);
530 mmu_free_pte_chain(vcpu, pte_chain);
531 if (hlist_empty(&page->parent_ptes)) {
532 page->multimapped = 0;
533 page->parent_pte = NULL;
536 return;
538 BUG();
541 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
542 gfn_t gfn)
544 unsigned index;
545 struct hlist_head *bucket;
546 struct kvm_mmu_page *page;
547 struct hlist_node *node;
549 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
550 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
551 bucket = &vcpu->kvm->mmu_page_hash[index];
552 hlist_for_each_entry(page, node, bucket, hash_link)
553 if (page->gfn == gfn && !page->role.metaphysical) {
554 pgprintk("%s: found role %x\n",
555 __FUNCTION__, page->role.word);
556 return page;
558 return NULL;
561 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
562 gfn_t gfn,
563 gva_t gaddr,
564 unsigned level,
565 int metaphysical,
566 u64 *parent_pte)
568 union kvm_mmu_page_role role;
569 unsigned index;
570 unsigned quadrant;
571 struct hlist_head *bucket;
572 struct kvm_mmu_page *page;
573 struct hlist_node *node;
575 role.word = 0;
576 role.glevels = vcpu->mmu.root_level;
577 role.level = level;
578 role.metaphysical = metaphysical;
579 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
580 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
581 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
582 role.quadrant = quadrant;
584 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
585 gfn, role.word);
586 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
587 bucket = &vcpu->kvm->mmu_page_hash[index];
588 hlist_for_each_entry(page, node, bucket, hash_link)
589 if (page->gfn == gfn && page->role.word == role.word) {
590 mmu_page_add_parent_pte(vcpu, page, parent_pte);
591 pgprintk("%s: found\n", __FUNCTION__);
592 return page;
594 page = kvm_mmu_alloc_page(vcpu, parent_pte);
595 if (!page)
596 return page;
597 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
598 page->gfn = gfn;
599 page->role = role;
600 hlist_add_head(&page->hash_link, bucket);
601 if (!metaphysical)
602 rmap_write_protect(vcpu, gfn);
603 return page;
606 static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
607 struct kvm_mmu_page *page)
609 unsigned i;
610 u64 *pt;
611 u64 ent;
613 pt = __va(page->page_hpa);
615 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
616 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
617 if (pt[i] & PT_PRESENT_MASK)
618 rmap_remove(vcpu, &pt[i]);
619 pt[i] = 0;
621 kvm_arch_ops->tlb_flush(vcpu);
622 return;
625 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
626 ent = pt[i];
628 pt[i] = 0;
629 if (!(ent & PT_PRESENT_MASK))
630 continue;
631 ent &= PT64_BASE_ADDR_MASK;
632 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
636 static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
637 struct kvm_mmu_page *page,
638 u64 *parent_pte)
640 mmu_page_remove_parent_pte(vcpu, page, parent_pte);
643 static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
644 struct kvm_mmu_page *page)
646 u64 *parent_pte;
648 while (page->multimapped || page->parent_pte) {
649 if (!page->multimapped)
650 parent_pte = page->parent_pte;
651 else {
652 struct kvm_pte_chain *chain;
654 chain = container_of(page->parent_ptes.first,
655 struct kvm_pte_chain, link);
656 parent_pte = chain->parent_ptes[0];
658 BUG_ON(!parent_pte);
659 kvm_mmu_put_page(vcpu, page, parent_pte);
660 *parent_pte = 0;
662 kvm_mmu_page_unlink_children(vcpu, page);
663 if (!page->root_count) {
664 hlist_del(&page->hash_link);
665 kvm_mmu_free_page(vcpu, page->page_hpa);
666 } else {
667 list_del(&page->link);
668 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
672 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
674 unsigned index;
675 struct hlist_head *bucket;
676 struct kvm_mmu_page *page;
677 struct hlist_node *node, *n;
678 int r;
680 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
681 r = 0;
682 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
683 bucket = &vcpu->kvm->mmu_page_hash[index];
684 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
685 if (page->gfn == gfn && !page->role.metaphysical) {
686 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
687 page->role.word);
688 kvm_mmu_zap_page(vcpu, page);
689 r = 1;
691 return r;
694 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
696 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
697 struct kvm_mmu_page *page_head = page_header(__pa(pte));
699 __set_bit(slot, &page_head->slot_bitmap);
702 hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
704 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
706 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
709 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
711 struct kvm_memory_slot *slot;
712 struct page *page;
714 ASSERT((gpa & HPA_ERR_MASK) == 0);
715 slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
716 if (!slot)
717 return gpa | HPA_ERR_MASK;
718 page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
719 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
720 | (gpa & (PAGE_SIZE-1));
723 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
725 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
727 if (gpa == UNMAPPED_GVA)
728 return UNMAPPED_GVA;
729 return gpa_to_hpa(vcpu, gpa);
732 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
736 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
738 int level = PT32E_ROOT_LEVEL;
739 hpa_t table_addr = vcpu->mmu.root_hpa;
741 for (; ; level--) {
742 u32 index = PT64_INDEX(v, level);
743 u64 *table;
744 u64 pte;
746 ASSERT(VALID_PAGE(table_addr));
747 table = __va(table_addr);
749 if (level == 1) {
750 pte = table[index];
751 if (is_present_pte(pte) && is_writeble_pte(pte))
752 return 0;
753 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
754 page_header_update_slot(vcpu->kvm, table, v);
755 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
756 PT_USER_MASK;
757 rmap_add(vcpu, &table[index]);
758 return 0;
761 if (table[index] == 0) {
762 struct kvm_mmu_page *new_table;
763 gfn_t pseudo_gfn;
765 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
766 >> PAGE_SHIFT;
767 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
768 v, level - 1,
769 1, &table[index]);
770 if (!new_table) {
771 pgprintk("nonpaging_map: ENOMEM\n");
772 return -ENOMEM;
775 table[index] = new_table->page_hpa | PT_PRESENT_MASK
776 | PT_WRITABLE_MASK | PT_USER_MASK;
778 table_addr = table[index] & PT64_BASE_ADDR_MASK;
782 static void mmu_free_roots(struct kvm_vcpu *vcpu)
784 int i;
785 struct kvm_mmu_page *page;
787 #ifdef CONFIG_X86_64
788 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
789 hpa_t root = vcpu->mmu.root_hpa;
791 ASSERT(VALID_PAGE(root));
792 page = page_header(root);
793 --page->root_count;
794 vcpu->mmu.root_hpa = INVALID_PAGE;
795 return;
797 #endif
798 for (i = 0; i < 4; ++i) {
799 hpa_t root = vcpu->mmu.pae_root[i];
801 ASSERT(VALID_PAGE(root));
802 root &= PT64_BASE_ADDR_MASK;
803 page = page_header(root);
804 --page->root_count;
805 vcpu->mmu.pae_root[i] = INVALID_PAGE;
807 vcpu->mmu.root_hpa = INVALID_PAGE;
810 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
812 int i;
813 gfn_t root_gfn;
814 struct kvm_mmu_page *page;
816 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
818 #ifdef CONFIG_X86_64
819 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
820 hpa_t root = vcpu->mmu.root_hpa;
822 ASSERT(!VALID_PAGE(root));
823 root = kvm_mmu_get_page(vcpu, root_gfn, 0,
824 PT64_ROOT_LEVEL, 0, NULL)->page_hpa;
825 page = page_header(root);
826 ++page->root_count;
827 vcpu->mmu.root_hpa = root;
828 return;
830 #endif
831 for (i = 0; i < 4; ++i) {
832 hpa_t root = vcpu->mmu.pae_root[i];
834 ASSERT(!VALID_PAGE(root));
835 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
836 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
837 else if (vcpu->mmu.root_level == 0)
838 root_gfn = 0;
839 root = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
840 PT32_ROOT_LEVEL, !is_paging(vcpu),
841 NULL)->page_hpa;
842 page = page_header(root);
843 ++page->root_count;
844 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
846 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
849 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
851 return vaddr;
854 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
855 u32 error_code)
857 gpa_t addr = gva;
858 hpa_t paddr;
859 int r;
861 r = mmu_topup_memory_caches(vcpu);
862 if (r)
863 return r;
865 ASSERT(vcpu);
866 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
869 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
871 if (is_error_hpa(paddr))
872 return 1;
874 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
877 static void nonpaging_free(struct kvm_vcpu *vcpu)
879 mmu_free_roots(vcpu);
882 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
884 struct kvm_mmu *context = &vcpu->mmu;
886 context->new_cr3 = nonpaging_new_cr3;
887 context->page_fault = nonpaging_page_fault;
888 context->gva_to_gpa = nonpaging_gva_to_gpa;
889 context->free = nonpaging_free;
890 context->root_level = 0;
891 context->shadow_root_level = PT32E_ROOT_LEVEL;
892 mmu_alloc_roots(vcpu);
893 ASSERT(VALID_PAGE(context->root_hpa));
894 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
895 return 0;
898 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
900 ++kvm_stat.tlb_flush;
901 kvm_arch_ops->tlb_flush(vcpu);
904 static void paging_new_cr3(struct kvm_vcpu *vcpu)
906 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
907 mmu_free_roots(vcpu);
908 mmu_alloc_roots(vcpu);
909 kvm_mmu_flush_tlb(vcpu);
910 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
913 static void mark_pagetable_nonglobal(void *shadow_pte)
915 page_header(__pa(shadow_pte))->global = 0;
918 static inline void set_pte_common(struct kvm_vcpu *vcpu,
919 u64 *shadow_pte,
920 gpa_t gaddr,
921 int dirty,
922 u64 access_bits,
923 gfn_t gfn)
925 hpa_t paddr;
927 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
928 if (!dirty)
929 access_bits &= ~PT_WRITABLE_MASK;
931 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
933 *shadow_pte |= access_bits;
935 if (!(*shadow_pte & PT_GLOBAL_MASK))
936 mark_pagetable_nonglobal(shadow_pte);
938 if (is_error_hpa(paddr)) {
939 *shadow_pte |= gaddr;
940 *shadow_pte |= PT_SHADOW_IO_MARK;
941 *shadow_pte &= ~PT_PRESENT_MASK;
942 return;
945 *shadow_pte |= paddr;
947 if (access_bits & PT_WRITABLE_MASK) {
948 struct kvm_mmu_page *shadow;
950 shadow = kvm_mmu_lookup_page(vcpu, gfn);
951 if (shadow) {
952 pgprintk("%s: found shadow page for %lx, marking ro\n",
953 __FUNCTION__, gfn);
954 access_bits &= ~PT_WRITABLE_MASK;
955 if (is_writeble_pte(*shadow_pte)) {
956 *shadow_pte &= ~PT_WRITABLE_MASK;
957 kvm_arch_ops->tlb_flush(vcpu);
962 if (access_bits & PT_WRITABLE_MASK)
963 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
965 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
966 rmap_add(vcpu, shadow_pte);
969 static void inject_page_fault(struct kvm_vcpu *vcpu,
970 u64 addr,
971 u32 err_code)
973 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
976 static inline int fix_read_pf(u64 *shadow_ent)
978 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
979 !(*shadow_ent & PT_USER_MASK)) {
981 * If supervisor write protect is disabled, we shadow kernel
982 * pages as user pages so we can trap the write access.
984 *shadow_ent |= PT_USER_MASK;
985 *shadow_ent &= ~PT_WRITABLE_MASK;
987 return 1;
990 return 0;
993 static int may_access(u64 pte, int write, int user)
996 if (user && !(pte & PT_USER_MASK))
997 return 0;
998 if (write && !(pte & PT_WRITABLE_MASK))
999 return 0;
1000 return 1;
1003 static void paging_free(struct kvm_vcpu *vcpu)
1005 nonpaging_free(vcpu);
1008 #define PTTYPE 64
1009 #include "paging_tmpl.h"
1010 #undef PTTYPE
1012 #define PTTYPE 32
1013 #include "paging_tmpl.h"
1014 #undef PTTYPE
1016 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1018 struct kvm_mmu *context = &vcpu->mmu;
1020 ASSERT(is_pae(vcpu));
1021 context->new_cr3 = paging_new_cr3;
1022 context->page_fault = paging64_page_fault;
1023 context->gva_to_gpa = paging64_gva_to_gpa;
1024 context->free = paging_free;
1025 context->root_level = level;
1026 context->shadow_root_level = level;
1027 mmu_alloc_roots(vcpu);
1028 ASSERT(VALID_PAGE(context->root_hpa));
1029 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1030 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1031 return 0;
1034 static int paging64_init_context(struct kvm_vcpu *vcpu)
1036 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1039 static int paging32_init_context(struct kvm_vcpu *vcpu)
1041 struct kvm_mmu *context = &vcpu->mmu;
1043 context->new_cr3 = paging_new_cr3;
1044 context->page_fault = paging32_page_fault;
1045 context->gva_to_gpa = paging32_gva_to_gpa;
1046 context->free = paging_free;
1047 context->root_level = PT32_ROOT_LEVEL;
1048 context->shadow_root_level = PT32E_ROOT_LEVEL;
1049 mmu_alloc_roots(vcpu);
1050 ASSERT(VALID_PAGE(context->root_hpa));
1051 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1052 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1053 return 0;
1056 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1058 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1061 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1063 ASSERT(vcpu);
1064 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1066 if (!is_paging(vcpu))
1067 return nonpaging_init_context(vcpu);
1068 else if (is_long_mode(vcpu))
1069 return paging64_init_context(vcpu);
1070 else if (is_pae(vcpu))
1071 return paging32E_init_context(vcpu);
1072 else
1073 return paging32_init_context(vcpu);
1076 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1078 ASSERT(vcpu);
1079 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1080 vcpu->mmu.free(vcpu);
1081 vcpu->mmu.root_hpa = INVALID_PAGE;
1085 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1087 int r;
1089 destroy_kvm_mmu(vcpu);
1090 r = init_kvm_mmu(vcpu);
1091 if (r < 0)
1092 goto out;
1093 r = mmu_topup_memory_caches(vcpu);
1094 out:
1095 return r;
1098 void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1100 gfn_t gfn = gpa >> PAGE_SHIFT;
1101 struct kvm_mmu_page *page;
1102 struct kvm_mmu_page *child;
1103 struct hlist_node *node, *n;
1104 struct hlist_head *bucket;
1105 unsigned index;
1106 u64 *spte;
1107 u64 pte;
1108 unsigned offset = offset_in_page(gpa);
1109 unsigned pte_size;
1110 unsigned page_offset;
1111 unsigned misaligned;
1112 int level;
1113 int flooded = 0;
1115 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1116 if (gfn == vcpu->last_pt_write_gfn) {
1117 ++vcpu->last_pt_write_count;
1118 if (vcpu->last_pt_write_count >= 3)
1119 flooded = 1;
1120 } else {
1121 vcpu->last_pt_write_gfn = gfn;
1122 vcpu->last_pt_write_count = 1;
1124 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1125 bucket = &vcpu->kvm->mmu_page_hash[index];
1126 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1127 if (page->gfn != gfn || page->role.metaphysical)
1128 continue;
1129 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1130 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1131 if (misaligned || flooded) {
1133 * Misaligned accesses are too much trouble to fix
1134 * up; also, they usually indicate a page is not used
1135 * as a page table.
1137 * If we're seeing too many writes to a page,
1138 * it may no longer be a page table, or we may be
1139 * forking, in which case it is better to unmap the
1140 * page.
1142 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1143 gpa, bytes, page->role.word);
1144 kvm_mmu_zap_page(vcpu, page);
1145 continue;
1147 page_offset = offset;
1148 level = page->role.level;
1149 if (page->role.glevels == PT32_ROOT_LEVEL) {
1150 page_offset <<= 1; /* 32->64 */
1151 page_offset &= ~PAGE_MASK;
1153 spte = __va(page->page_hpa);
1154 spte += page_offset / sizeof(*spte);
1155 pte = *spte;
1156 if (is_present_pte(pte)) {
1157 if (level == PT_PAGE_TABLE_LEVEL)
1158 rmap_remove(vcpu, spte);
1159 else {
1160 child = page_header(pte & PT64_BASE_ADDR_MASK);
1161 mmu_page_remove_parent_pte(vcpu, child, spte);
1164 *spte = 0;
1168 void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1172 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1174 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1176 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1179 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1181 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1182 struct kvm_mmu_page *page;
1184 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1185 struct kvm_mmu_page, link);
1186 kvm_mmu_zap_page(vcpu, page);
1189 EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
1191 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1193 struct kvm_mmu_page *page;
1195 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1196 page = container_of(vcpu->kvm->active_mmu_pages.next,
1197 struct kvm_mmu_page, link);
1198 kvm_mmu_zap_page(vcpu, page);
1200 while (!list_empty(&vcpu->free_pages)) {
1201 page = list_entry(vcpu->free_pages.next,
1202 struct kvm_mmu_page, link);
1203 list_del(&page->link);
1204 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
1205 page->page_hpa = INVALID_PAGE;
1207 free_page((unsigned long)vcpu->mmu.pae_root);
1210 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1212 struct page *page;
1213 int i;
1215 ASSERT(vcpu);
1217 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
1218 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
1220 INIT_LIST_HEAD(&page_header->link);
1221 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1222 goto error_1;
1223 page->private = (unsigned long)page_header;
1224 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1225 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1226 list_add(&page_header->link, &vcpu->free_pages);
1227 ++vcpu->kvm->n_free_mmu_pages;
1231 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1232 * Therefore we need to allocate shadow page tables in the first
1233 * 4GB of memory, which happens to fit the DMA32 zone.
1235 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1236 if (!page)
1237 goto error_1;
1238 vcpu->mmu.pae_root = page_address(page);
1239 for (i = 0; i < 4; ++i)
1240 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1242 return 0;
1244 error_1:
1245 free_mmu_pages(vcpu);
1246 return -ENOMEM;
1249 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1251 ASSERT(vcpu);
1252 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1253 ASSERT(list_empty(&vcpu->free_pages));
1255 return alloc_mmu_pages(vcpu);
1258 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1260 ASSERT(vcpu);
1261 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1262 ASSERT(!list_empty(&vcpu->free_pages));
1264 return init_kvm_mmu(vcpu);
1267 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1269 ASSERT(vcpu);
1271 destroy_kvm_mmu(vcpu);
1272 free_mmu_pages(vcpu);
1273 mmu_free_memory_caches(vcpu);
1276 void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
1278 struct kvm *kvm = vcpu->kvm;
1279 struct kvm_mmu_page *page;
1281 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1282 int i;
1283 u64 *pt;
1285 if (!test_bit(slot, &page->slot_bitmap))
1286 continue;
1288 pt = __va(page->page_hpa);
1289 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1290 /* avoid RMW */
1291 if (pt[i] & PT_WRITABLE_MASK) {
1292 rmap_remove(vcpu, &pt[i]);
1293 pt[i] &= ~PT_WRITABLE_MASK;
1298 #ifdef AUDIT
1300 static const char *audit_msg;
1302 static gva_t canonicalize(gva_t gva)
1304 #ifdef CONFIG_X86_64
1305 gva = (long long)(gva << 16) >> 16;
1306 #endif
1307 return gva;
1310 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1311 gva_t va, int level)
1313 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1314 int i;
1315 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1317 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1318 u64 ent = pt[i];
1320 if (!ent & PT_PRESENT_MASK)
1321 continue;
1323 va = canonicalize(va);
1324 if (level > 1)
1325 audit_mappings_page(vcpu, ent, va, level - 1);
1326 else {
1327 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1328 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1330 if ((ent & PT_PRESENT_MASK)
1331 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1332 printk(KERN_ERR "audit error: (%s) levels %d"
1333 " gva %lx gpa %llx hpa %llx ent %llx\n",
1334 audit_msg, vcpu->mmu.root_level,
1335 va, gpa, hpa, ent);
1340 static void audit_mappings(struct kvm_vcpu *vcpu)
1342 int i;
1344 if (vcpu->mmu.root_level == 4)
1345 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1346 else
1347 for (i = 0; i < 4; ++i)
1348 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1349 audit_mappings_page(vcpu,
1350 vcpu->mmu.pae_root[i],
1351 i << 30,
1355 static int count_rmaps(struct kvm_vcpu *vcpu)
1357 int nmaps = 0;
1358 int i, j, k;
1360 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1361 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1362 struct kvm_rmap_desc *d;
1364 for (j = 0; j < m->npages; ++j) {
1365 struct page *page = m->phys_mem[j];
1367 if (!page->private)
1368 continue;
1369 if (!(page->private & 1)) {
1370 ++nmaps;
1371 continue;
1373 d = (struct kvm_rmap_desc *)(page->private & ~1ul);
1374 while (d) {
1375 for (k = 0; k < RMAP_EXT; ++k)
1376 if (d->shadow_ptes[k])
1377 ++nmaps;
1378 else
1379 break;
1380 d = d->more;
1384 return nmaps;
1387 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1389 int nmaps = 0;
1390 struct kvm_mmu_page *page;
1391 int i;
1393 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1394 u64 *pt = __va(page->page_hpa);
1396 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1397 continue;
1399 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1400 u64 ent = pt[i];
1402 if (!(ent & PT_PRESENT_MASK))
1403 continue;
1404 if (!(ent & PT_WRITABLE_MASK))
1405 continue;
1406 ++nmaps;
1409 return nmaps;
1412 static void audit_rmap(struct kvm_vcpu *vcpu)
1414 int n_rmap = count_rmaps(vcpu);
1415 int n_actual = count_writable_mappings(vcpu);
1417 if (n_rmap != n_actual)
1418 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1419 __FUNCTION__, audit_msg, n_rmap, n_actual);
1422 static void audit_write_protection(struct kvm_vcpu *vcpu)
1424 struct kvm_mmu_page *page;
1426 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1427 hfn_t hfn;
1428 struct page *pg;
1430 if (page->role.metaphysical)
1431 continue;
1433 hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
1434 >> PAGE_SHIFT;
1435 pg = pfn_to_page(hfn);
1436 if (pg->private)
1437 printk(KERN_ERR "%s: (%s) shadow page has writable"
1438 " mappings: gfn %lx role %x\n",
1439 __FUNCTION__, audit_msg, page->gfn,
1440 page->role.word);
1444 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1446 int olddbg = dbg;
1448 dbg = 0;
1449 audit_msg = msg;
1450 audit_rmap(vcpu);
1451 audit_write_protection(vcpu);
1452 audit_mappings(vcpu);
1453 dbg = olddbg;
1456 #endif