hwmon: (it87) Expose the PWM/temperature mappings
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kvm / paging_tmpl.h
blobede2131a9225eb00530aafb62962e383ab2a7101
1 /*
2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * MMU support
9 * Copyright (C) 2006 Qumranet, Inc.
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
25 #if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
31 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
38 #else
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
42 #elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
52 #define PT_MAX_FULL_LEVELS 2
53 #define CMPXCHG cmpxchg
54 #else
55 #error Invalid PTTYPE value
56 #endif
58 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
59 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
65 struct guest_walker {
66 int level;
67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
70 unsigned pt_access;
71 unsigned pte_access;
72 gfn_t gfn;
73 u32 error_code;
76 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
78 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
81 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
82 gfn_t table_gfn, unsigned index,
83 pt_element_t orig_pte, pt_element_t new_pte)
85 pt_element_t ret;
86 pt_element_t *table;
87 struct page *page;
89 page = gfn_to_page(kvm, table_gfn);
91 table = kmap_atomic(page, KM_USER0);
92 ret = CMPXCHG(&table[index], orig_pte, new_pte);
93 kunmap_atomic(table, KM_USER0);
95 kvm_release_page_dirty(page);
97 return (ret != orig_pte);
100 static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
102 unsigned access;
104 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
105 #if PTTYPE == 64
106 if (is_nx(vcpu))
107 access &= ~(gpte >> PT64_NX_SHIFT);
108 #endif
109 return access;
113 * Fetch a guest pte for a guest virtual address
115 static int FNAME(walk_addr)(struct guest_walker *walker,
116 struct kvm_vcpu *vcpu, gva_t addr,
117 int write_fault, int user_fault, int fetch_fault)
119 pt_element_t pte;
120 gfn_t table_gfn;
121 unsigned index, pt_access, pte_access;
122 gpa_t pte_gpa;
123 int rsvd_fault = 0;
125 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
126 fetch_fault);
127 walk:
128 walker->level = vcpu->arch.mmu.root_level;
129 pte = vcpu->arch.cr3;
130 #if PTTYPE == 64
131 if (!is_long_mode(vcpu)) {
132 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
133 trace_kvm_mmu_paging_element(pte, walker->level);
134 if (!is_present_gpte(pte))
135 goto not_present;
136 --walker->level;
138 #endif
139 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
140 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
142 pt_access = ACC_ALL;
144 for (;;) {
145 index = PT_INDEX(addr, walker->level);
147 table_gfn = gpte_to_gfn(pte);
148 pte_gpa = gfn_to_gpa(table_gfn);
149 pte_gpa += index * sizeof(pt_element_t);
150 walker->table_gfn[walker->level - 1] = table_gfn;
151 walker->pte_gpa[walker->level - 1] = pte_gpa;
153 if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)))
154 goto not_present;
156 trace_kvm_mmu_paging_element(pte, walker->level);
158 if (!is_present_gpte(pte))
159 goto not_present;
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
162 if (rsvd_fault)
163 goto access_error;
165 if (write_fault && !is_writeble_pte(pte))
166 if (user_fault || is_write_protection(vcpu))
167 goto access_error;
169 if (user_fault && !(pte & PT_USER_MASK))
170 goto access_error;
172 #if PTTYPE == 64
173 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
174 goto access_error;
175 #endif
177 if (!(pte & PT_ACCESSED_MASK)) {
178 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
179 sizeof(pte));
180 mark_page_dirty(vcpu->kvm, table_gfn);
181 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
182 index, pte, pte|PT_ACCESSED_MASK))
183 goto walk;
184 pte |= PT_ACCESSED_MASK;
187 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
189 walker->ptes[walker->level - 1] = pte;
191 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
192 ((walker->level == PT_DIRECTORY_LEVEL) &&
193 (pte & PT_PAGE_SIZE_MASK) &&
194 (PTTYPE == 64 || is_pse(vcpu))) ||
195 ((walker->level == PT_PDPE_LEVEL) &&
196 (pte & PT_PAGE_SIZE_MASK) &&
197 is_long_mode(vcpu))) {
198 int lvl = walker->level;
200 walker->gfn = gpte_to_gfn_lvl(pte, lvl);
201 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
202 >> PAGE_SHIFT;
204 if (PTTYPE == 32 &&
205 walker->level == PT_DIRECTORY_LEVEL &&
206 is_cpuid_PSE36())
207 walker->gfn += pse36_gfn_delta(pte);
209 break;
212 pt_access = pte_access;
213 --walker->level;
216 if (write_fault && !is_dirty_gpte(pte)) {
217 bool ret;
219 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
220 mark_page_dirty(vcpu->kvm, table_gfn);
221 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
222 pte|PT_DIRTY_MASK);
223 if (ret)
224 goto walk;
225 pte |= PT_DIRTY_MASK;
226 walker->ptes[walker->level - 1] = pte;
229 walker->pt_access = pt_access;
230 walker->pte_access = pte_access;
231 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
232 __func__, (u64)pte, pt_access, pte_access);
233 return 1;
235 not_present:
236 walker->error_code = 0;
237 goto err;
239 access_error:
240 walker->error_code = PFERR_PRESENT_MASK;
242 err:
243 if (write_fault)
244 walker->error_code |= PFERR_WRITE_MASK;
245 if (user_fault)
246 walker->error_code |= PFERR_USER_MASK;
247 if (fetch_fault)
248 walker->error_code |= PFERR_FETCH_MASK;
249 if (rsvd_fault)
250 walker->error_code |= PFERR_RSVD_MASK;
251 trace_kvm_mmu_walker_error(walker->error_code);
252 return 0;
255 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
256 u64 *spte, const void *pte)
258 pt_element_t gpte;
259 unsigned pte_access;
260 pfn_t pfn;
262 gpte = *(const pt_element_t *)pte;
263 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
264 if (!is_present_gpte(gpte))
265 __set_spte(spte, shadow_notrap_nonpresent_pte);
266 return;
268 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
269 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
270 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
271 return;
272 pfn = vcpu->arch.update_pte.pfn;
273 if (is_error_pfn(pfn))
274 return;
275 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
276 return;
277 kvm_get_pfn(pfn);
279 * we call mmu_set_spte() with reset_host_protection = true beacuse that
280 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
282 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
283 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
284 gpte_to_gfn(gpte), pfn, true, true);
288 * Fetch a shadow pte for a specific level in the paging hierarchy.
290 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
291 struct guest_walker *gw,
292 int user_fault, int write_fault, int hlevel,
293 int *ptwrite, pfn_t pfn)
295 unsigned access = gw->pt_access;
296 struct kvm_mmu_page *shadow_page;
297 u64 spte, *sptep = NULL;
298 int direct;
299 gfn_t table_gfn;
300 int r;
301 int level;
302 pt_element_t curr_pte;
303 struct kvm_shadow_walk_iterator iterator;
305 if (!is_present_gpte(gw->ptes[gw->level - 1]))
306 return NULL;
308 for_each_shadow_entry(vcpu, addr, iterator) {
309 level = iterator.level;
310 sptep = iterator.sptep;
311 if (iterator.level == hlevel) {
312 mmu_set_spte(vcpu, sptep, access,
313 gw->pte_access & access,
314 user_fault, write_fault,
315 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
316 ptwrite, level,
317 gw->gfn, pfn, false, true);
318 break;
321 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
322 continue;
324 if (is_large_pte(*sptep)) {
325 rmap_remove(vcpu->kvm, sptep);
326 __set_spte(sptep, shadow_trap_nonpresent_pte);
327 kvm_flush_remote_tlbs(vcpu->kvm);
330 if (level <= gw->level) {
331 int delta = level - gw->level + 1;
332 direct = 1;
333 if (!is_dirty_gpte(gw->ptes[level - delta]))
334 access &= ~ACC_WRITE_MASK;
335 table_gfn = gpte_to_gfn(gw->ptes[level - delta]);
336 /* advance table_gfn when emulating 1gb pages with 4k */
337 if (delta == 0)
338 table_gfn += PT_INDEX(addr, level);
339 } else {
340 direct = 0;
341 table_gfn = gw->table_gfn[level - 2];
343 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
344 direct, access, sptep);
345 if (!direct) {
346 r = kvm_read_guest_atomic(vcpu->kvm,
347 gw->pte_gpa[level - 2],
348 &curr_pte, sizeof(curr_pte));
349 if (r || curr_pte != gw->ptes[level - 2]) {
350 kvm_mmu_put_page(shadow_page, sptep);
351 kvm_release_pfn_clean(pfn);
352 sptep = NULL;
353 break;
357 spte = __pa(shadow_page->spt)
358 | PT_PRESENT_MASK | PT_ACCESSED_MASK
359 | PT_WRITABLE_MASK | PT_USER_MASK;
360 *sptep = spte;
363 return sptep;
367 * Page fault handler. There are several causes for a page fault:
368 * - there is no shadow pte for the guest pte
369 * - write access through a shadow pte marked read only so that we can set
370 * the dirty bit
371 * - write access to a shadow pte marked read only so we can update the page
372 * dirty bitmap, when userspace requests it
373 * - mmio access; in this case we will never install a present shadow pte
374 * - normal guest page fault due to the guest pte marked not present, not
375 * writable, or not executable
377 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
378 * a negative value on error.
380 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
381 u32 error_code)
383 int write_fault = error_code & PFERR_WRITE_MASK;
384 int user_fault = error_code & PFERR_USER_MASK;
385 int fetch_fault = error_code & PFERR_FETCH_MASK;
386 struct guest_walker walker;
387 u64 *sptep;
388 int write_pt = 0;
389 int r;
390 pfn_t pfn;
391 int level = PT_PAGE_TABLE_LEVEL;
392 unsigned long mmu_seq;
394 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
395 kvm_mmu_audit(vcpu, "pre page fault");
397 r = mmu_topup_memory_caches(vcpu);
398 if (r)
399 return r;
402 * Look up the guest pte for the faulting address.
404 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
405 fetch_fault);
408 * The page is not mapped by the guest. Let the guest handle it.
410 if (!r) {
411 pgprintk("%s: guest page fault\n", __func__);
412 inject_page_fault(vcpu, addr, walker.error_code);
413 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
414 return 0;
417 if (walker.level >= PT_DIRECTORY_LEVEL) {
418 level = min(walker.level, mapping_level(vcpu, walker.gfn));
419 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
422 mmu_seq = vcpu->kvm->mmu_notifier_seq;
423 smp_rmb();
424 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
426 /* mmio */
427 if (is_error_pfn(pfn)) {
428 pgprintk("gfn %lx is mmio\n", walker.gfn);
429 kvm_release_pfn_clean(pfn);
430 return 1;
433 spin_lock(&vcpu->kvm->mmu_lock);
434 if (mmu_notifier_retry(vcpu, mmu_seq))
435 goto out_unlock;
436 kvm_mmu_free_some_pages(vcpu);
437 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
438 level, &write_pt, pfn);
439 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
440 sptep, *sptep, write_pt);
442 if (!write_pt)
443 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
445 ++vcpu->stat.pf_fixed;
446 kvm_mmu_audit(vcpu, "post page fault (fixed)");
447 spin_unlock(&vcpu->kvm->mmu_lock);
449 return write_pt;
451 out_unlock:
452 spin_unlock(&vcpu->kvm->mmu_lock);
453 kvm_release_pfn_clean(pfn);
454 return 0;
457 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
459 struct kvm_shadow_walk_iterator iterator;
460 int level;
461 u64 *sptep;
462 int need_flush = 0;
464 spin_lock(&vcpu->kvm->mmu_lock);
466 for_each_shadow_entry(vcpu, gva, iterator) {
467 level = iterator.level;
468 sptep = iterator.sptep;
470 if (level == PT_PAGE_TABLE_LEVEL ||
471 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
472 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
474 if (is_shadow_present_pte(*sptep)) {
475 rmap_remove(vcpu->kvm, sptep);
476 if (is_large_pte(*sptep))
477 --vcpu->kvm->stat.lpages;
478 need_flush = 1;
480 __set_spte(sptep, shadow_trap_nonpresent_pte);
481 break;
484 if (!is_shadow_present_pte(*sptep))
485 break;
488 if (need_flush)
489 kvm_flush_remote_tlbs(vcpu->kvm);
490 spin_unlock(&vcpu->kvm->mmu_lock);
493 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
495 struct guest_walker walker;
496 gpa_t gpa = UNMAPPED_GVA;
497 int r;
499 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
501 if (r) {
502 gpa = gfn_to_gpa(walker.gfn);
503 gpa |= vaddr & ~PAGE_MASK;
506 return gpa;
509 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
510 struct kvm_mmu_page *sp)
512 int i, j, offset, r;
513 pt_element_t pt[256 / sizeof(pt_element_t)];
514 gpa_t pte_gpa;
516 if (sp->role.direct
517 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
518 nonpaging_prefetch_page(vcpu, sp);
519 return;
522 pte_gpa = gfn_to_gpa(sp->gfn);
523 if (PTTYPE == 32) {
524 offset = sp->role.quadrant << PT64_LEVEL_BITS;
525 pte_gpa += offset * sizeof(pt_element_t);
528 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
529 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
530 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
531 for (j = 0; j < ARRAY_SIZE(pt); ++j)
532 if (r || is_present_gpte(pt[j]))
533 sp->spt[i+j] = shadow_trap_nonpresent_pte;
534 else
535 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
540 * Using the cached information from sp->gfns is safe because:
541 * - The spte has a reference to the struct page, so the pfn for a given gfn
542 * can't change unless all sptes pointing to it are nuked first.
543 * - Alias changes zap the entire shadow cache.
545 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
547 int i, offset, nr_present;
548 bool reset_host_protection;
550 offset = nr_present = 0;
552 if (PTTYPE == 32)
553 offset = sp->role.quadrant << PT64_LEVEL_BITS;
555 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
556 unsigned pte_access;
557 pt_element_t gpte;
558 gpa_t pte_gpa;
559 gfn_t gfn = sp->gfns[i];
561 if (!is_shadow_present_pte(sp->spt[i]))
562 continue;
564 pte_gpa = gfn_to_gpa(sp->gfn);
565 pte_gpa += (i+offset) * sizeof(pt_element_t);
567 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
568 sizeof(pt_element_t)))
569 return -EINVAL;
571 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
572 !(gpte & PT_ACCESSED_MASK)) {
573 u64 nonpresent;
575 rmap_remove(vcpu->kvm, &sp->spt[i]);
576 if (is_present_gpte(gpte))
577 nonpresent = shadow_trap_nonpresent_pte;
578 else
579 nonpresent = shadow_notrap_nonpresent_pte;
580 __set_spte(&sp->spt[i], nonpresent);
581 continue;
584 nr_present++;
585 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
586 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
587 pte_access &= ~ACC_WRITE_MASK;
588 reset_host_protection = 0;
589 } else {
590 reset_host_protection = 1;
592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
593 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
594 spte_to_pfn(sp->spt[i]), true, false,
595 reset_host_protection);
598 return !nr_present;
601 #undef pt_element_t
602 #undef guest_walker
603 #undef FNAME
604 #undef PT_BASE_ADDR_MASK
605 #undef PT_INDEX
606 #undef PT_LEVEL_MASK
607 #undef PT_LVL_ADDR_MASK
608 #undef PT_LVL_OFFSET_MASK
609 #undef PT_LEVEL_BITS
610 #undef PT_MAX_FULL_LEVELS
611 #undef gpte_to_gfn
612 #undef gpte_to_gfn_lvl
613 #undef CMPXCHG