Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / x86 / kvm / paging_tmpl.h
blob79edcc69ec6056f966952ff61ace9fa0d9030d8c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * MMU support
9 * Copyright (C) 2006 Qumranet, Inc.
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
25 #if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
38 #else
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
42 #elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
52 #define PT_MAX_FULL_LEVELS 2
53 #define CMPXCHG cmpxchg
54 #else
55 #error Invalid PTTYPE value
56 #endif
58 #define gpte_to_gfn FNAME(gpte_to_gfn)
59 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
65 struct guest_walker {
66 int level;
67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
70 unsigned pt_access;
71 unsigned pte_access;
72 gfn_t gfn;
73 u32 error_code;
76 static gfn_t gpte_to_gfn(pt_element_t gpte)
78 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
81 static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
83 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
86 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
87 gfn_t table_gfn, unsigned index,
88 pt_element_t orig_pte, pt_element_t new_pte)
90 pt_element_t ret;
91 pt_element_t *table;
92 struct page *page;
94 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
95 =======
96 down_read(&current->mm->mmap_sem);
97 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
98 page = gfn_to_page(kvm, table_gfn);
99 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
100 =======
101 up_read(&current->mm->mmap_sem);
103 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
104 table = kmap_atomic(page, KM_USER0);
106 ret = CMPXCHG(&table[index], orig_pte, new_pte);
108 kunmap_atomic(table, KM_USER0);
110 kvm_release_page_dirty(page);
112 return (ret != orig_pte);
115 static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
117 unsigned access;
119 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
120 #if PTTYPE == 64
121 if (is_nx(vcpu))
122 access &= ~(gpte >> PT64_NX_SHIFT);
123 #endif
124 return access;
128 * Fetch a guest pte for a guest virtual address
130 static int FNAME(walk_addr)(struct guest_walker *walker,
131 struct kvm_vcpu *vcpu, gva_t addr,
132 int write_fault, int user_fault, int fetch_fault)
134 pt_element_t pte;
135 gfn_t table_gfn;
136 unsigned index, pt_access, pte_access;
137 gpa_t pte_gpa;
139 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
140 walk:
141 walker->level = vcpu->arch.mmu.root_level;
142 pte = vcpu->arch.cr3;
143 #if PTTYPE == 64
144 if (!is_long_mode(vcpu)) {
145 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
146 if (!is_present_pte(pte))
147 goto not_present;
148 --walker->level;
150 #endif
151 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
152 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
153 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
154 =======
155 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
156 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
158 pt_access = ACC_ALL;
160 for (;;) {
161 index = PT_INDEX(addr, walker->level);
163 table_gfn = gpte_to_gfn(pte);
164 pte_gpa = gfn_to_gpa(table_gfn);
165 pte_gpa += index * sizeof(pt_element_t);
166 walker->table_gfn[walker->level - 1] = table_gfn;
167 walker->pte_gpa[walker->level - 1] = pte_gpa;
168 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
169 walker->level - 1, table_gfn);
171 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
173 if (!is_present_pte(pte))
174 goto not_present;
176 if (write_fault && !is_writeble_pte(pte))
177 if (user_fault || is_write_protection(vcpu))
178 goto access_error;
180 if (user_fault && !(pte & PT_USER_MASK))
181 goto access_error;
183 #if PTTYPE == 64
184 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
185 goto access_error;
186 #endif
188 if (!(pte & PT_ACCESSED_MASK)) {
189 mark_page_dirty(vcpu->kvm, table_gfn);
190 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
191 index, pte, pte|PT_ACCESSED_MASK))
192 goto walk;
193 pte |= PT_ACCESSED_MASK;
196 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
198 walker->ptes[walker->level - 1] = pte;
200 if (walker->level == PT_PAGE_TABLE_LEVEL) {
201 walker->gfn = gpte_to_gfn(pte);
202 break;
205 if (walker->level == PT_DIRECTORY_LEVEL
206 && (pte & PT_PAGE_SIZE_MASK)
207 && (PTTYPE == 64 || is_pse(vcpu))) {
208 walker->gfn = gpte_to_gfn_pde(pte);
209 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
210 if (PTTYPE == 32 && is_cpuid_PSE36())
211 walker->gfn += pse36_gfn_delta(pte);
212 break;
215 pt_access = pte_access;
216 --walker->level;
219 if (write_fault && !is_dirty_pte(pte)) {
220 bool ret;
222 mark_page_dirty(vcpu->kvm, table_gfn);
223 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
224 pte|PT_DIRTY_MASK);
225 if (ret)
226 goto walk;
227 pte |= PT_DIRTY_MASK;
228 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
229 walker->ptes[walker->level - 1] = pte;
232 walker->pt_access = pt_access;
233 walker->pte_access = pte_access;
234 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
235 __FUNCTION__, (u64)pte, pt_access, pte_access);
236 return 1;
238 not_present:
239 walker->error_code = 0;
240 goto err;
242 access_error:
243 walker->error_code = PFERR_PRESENT_MASK;
245 err:
246 if (write_fault)
247 walker->error_code |= PFERR_WRITE_MASK;
248 if (user_fault)
249 walker->error_code |= PFERR_USER_MASK;
250 if (fetch_fault)
251 walker->error_code |= PFERR_FETCH_MASK;
252 return 0;
255 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
256 u64 *spte, const void *pte, int bytes,
257 int offset_in_pte)
259 pt_element_t gpte;
260 unsigned pte_access;
261 struct page *npage;
263 gpte = *(const pt_element_t *)pte;
264 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
265 if (!offset_in_pte && !is_present_pte(gpte))
266 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
267 return;
269 if (bytes < sizeof(pt_element_t))
270 return;
271 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
272 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
273 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
274 return;
275 npage = vcpu->arch.update_pte.page;
276 if (!npage)
277 return;
278 get_page(npage);
279 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
280 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
284 * Fetch a shadow pte for a specific level in the paging hierarchy.
286 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
287 struct guest_walker *walker,
288 int user_fault, int write_fault, int *ptwrite,
289 struct page *page)
291 hpa_t shadow_addr;
292 int level;
293 u64 *shadow_ent;
294 unsigned access = walker->pt_access;
296 if (!is_present_pte(walker->ptes[walker->level - 1]))
297 return NULL;
299 shadow_addr = vcpu->arch.mmu.root_hpa;
300 level = vcpu->arch.mmu.shadow_root_level;
301 if (level == PT32E_ROOT_LEVEL) {
302 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
303 shadow_addr &= PT64_BASE_ADDR_MASK;
304 --level;
307 for (; ; level--) {
308 u32 index = SHADOW_PT_INDEX(addr, level);
309 struct kvm_mmu_page *shadow_page;
310 u64 shadow_pte;
311 int metaphysical;
312 gfn_t table_gfn;
313 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
314 bool new_page = 0;
315 =======
316 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
318 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
319 if (level == PT_PAGE_TABLE_LEVEL)
320 break;
321 if (is_shadow_present_pte(*shadow_ent)) {
322 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
323 continue;
326 if (level - 1 == PT_PAGE_TABLE_LEVEL
327 && walker->level == PT_DIRECTORY_LEVEL) {
328 metaphysical = 1;
329 if (!is_dirty_pte(walker->ptes[level - 1]))
330 access &= ~ACC_WRITE_MASK;
331 table_gfn = gpte_to_gfn(walker->ptes[level - 1]);
332 } else {
333 metaphysical = 0;
334 table_gfn = walker->table_gfn[level - 2];
336 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
337 metaphysical, access,
338 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
339 shadow_ent, &new_page);
340 if (new_page && !metaphysical) {
341 =======
342 shadow_ent);
343 if (!metaphysical) {
344 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
345 int r;
346 pt_element_t curr_pte;
347 r = kvm_read_guest_atomic(vcpu->kvm,
348 walker->pte_gpa[level - 2],
349 &curr_pte, sizeof(curr_pte));
350 if (r || curr_pte != walker->ptes[level - 2]) {
351 kvm_release_page_clean(page);
352 return NULL;
355 shadow_addr = __pa(shadow_page->spt);
356 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
357 | PT_WRITABLE_MASK | PT_USER_MASK;
358 *shadow_ent = shadow_pte;
361 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
362 user_fault, write_fault,
363 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
364 ptwrite, walker->gfn, page);
366 return shadow_ent;
370 * Page fault handler. There are several causes for a page fault:
371 * - there is no shadow pte for the guest pte
372 * - write access through a shadow pte marked read only so that we can set
373 * the dirty bit
374 * - write access to a shadow pte marked read only so we can update the page
375 * dirty bitmap, when userspace requests it
376 * - mmio access; in this case we will never install a present shadow pte
377 * - normal guest page fault due to the guest pte marked not present, not
378 * writable, or not executable
380 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
381 * a negative value on error.
383 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
384 u32 error_code)
386 int write_fault = error_code & PFERR_WRITE_MASK;
387 int user_fault = error_code & PFERR_USER_MASK;
388 int fetch_fault = error_code & PFERR_FETCH_MASK;
389 struct guest_walker walker;
390 u64 *shadow_pte;
391 int write_pt = 0;
392 int r;
393 struct page *page;
395 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
396 kvm_mmu_audit(vcpu, "pre page fault");
398 r = mmu_topup_memory_caches(vcpu);
399 if (r)
400 return r;
402 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
403 down_read(&current->mm->mmap_sem);
404 =======
405 down_read(&vcpu->kvm->slots_lock);
406 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
408 * Look up the shadow pte for the faulting address.
410 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
411 fetch_fault);
414 * The page is not mapped by the guest. Let the guest handle it.
416 if (!r) {
417 pgprintk("%s: guest page fault\n", __FUNCTION__);
418 inject_page_fault(vcpu, addr, walker.error_code);
419 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
420 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
421 up_read(&current->mm->mmap_sem);
422 =======
423 up_read(&vcpu->kvm->slots_lock);
424 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
425 return 0;
428 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
429 =======
430 down_read(&current->mm->mmap_sem);
431 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
432 page = gfn_to_page(vcpu->kvm, walker.gfn);
433 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
434 =======
435 up_read(&current->mm->mmap_sem);
436 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
438 spin_lock(&vcpu->kvm->mmu_lock);
439 kvm_mmu_free_some_pages(vcpu);
440 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
441 &write_pt, page);
442 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
443 shadow_pte, *shadow_pte, write_pt);
445 if (!write_pt)
446 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
449 * mmio: emulate if accessible, otherwise its a guest fault.
451 if (shadow_pte && is_io_pte(*shadow_pte)) {
452 spin_unlock(&vcpu->kvm->mmu_lock);
453 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
454 up_read(&current->mm->mmap_sem);
455 =======
456 up_read(&vcpu->kvm->slots_lock);
457 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
458 return 1;
461 ++vcpu->stat.pf_fixed;
462 kvm_mmu_audit(vcpu, "post page fault (fixed)");
463 spin_unlock(&vcpu->kvm->mmu_lock);
464 <<<<<<< HEAD:arch/x86/kvm/paging_tmpl.h
465 up_read(&current->mm->mmap_sem);
466 =======
467 up_read(&vcpu->kvm->slots_lock);
468 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/kvm/paging_tmpl.h
470 return write_pt;
473 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
475 struct guest_walker walker;
476 gpa_t gpa = UNMAPPED_GVA;
477 int r;
479 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
481 if (r) {
482 gpa = gfn_to_gpa(walker.gfn);
483 gpa |= vaddr & ~PAGE_MASK;
486 return gpa;
489 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
490 struct kvm_mmu_page *sp)
492 int i, offset = 0, r = 0;
493 pt_element_t pt;
495 if (sp->role.metaphysical
496 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
497 nonpaging_prefetch_page(vcpu, sp);
498 return;
501 if (PTTYPE == 32)
502 offset = sp->role.quadrant << PT64_LEVEL_BITS;
504 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
505 gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
506 pte_gpa += (i+offset) * sizeof(pt_element_t);
508 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
509 sizeof(pt_element_t));
510 if (r || is_present_pte(pt))
511 sp->spt[i] = shadow_trap_nonpresent_pte;
512 else
513 sp->spt[i] = shadow_notrap_nonpresent_pte;
517 #undef pt_element_t
518 #undef guest_walker
519 #undef FNAME
520 #undef PT_BASE_ADDR_MASK
521 #undef PT_INDEX
522 #undef SHADOW_PT_INDEX
523 #undef PT_LEVEL_MASK
524 #undef PT_DIR_BASE_ADDR_MASK
525 #undef PT_LEVEL_BITS
526 #undef PT_MAX_FULL_LEVELS
527 #undef gpte_to_gfn
528 #undef gpte_to_gfn_pde
529 #undef CMPXCHG