2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
28 #include <asm/tlbflush.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/kvm_book3s.h>
31 #include <asm/mmu-hash64.h>
32 #include <asm/hvcall.h>
33 #include <asm/synch.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/cputable.h>
37 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
38 #define MAX_LPID_970 63
39 #define NR_LPIDS (LPID_RSVD + 1)
40 unsigned long lpid_inuse
[BITS_TO_LONGS(NR_LPIDS
)];
42 long kvmppc_alloc_hpt(struct kvm
*kvm
)
46 struct revmap_entry
*rev
;
48 /* Allocate guest's hashed page table */
49 hpt
= __get_free_pages(GFP_KERNEL
|__GFP_ZERO
|__GFP_REPEAT
|__GFP_NOWARN
,
50 HPT_ORDER
- PAGE_SHIFT
);
52 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
55 kvm
->arch
.hpt_virt
= hpt
;
57 /* Allocate reverse map array */
58 rev
= vmalloc(sizeof(struct revmap_entry
) * HPT_NPTE
);
60 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
63 kvm
->arch
.revmap
= rev
;
65 /* Allocate the guest's logical partition ID */
67 lpid
= find_first_zero_bit(lpid_inuse
, NR_LPIDS
);
68 if (lpid
>= NR_LPIDS
) {
69 pr_err("kvm_alloc_hpt: No LPIDs free\n");
72 } while (test_and_set_bit(lpid
, lpid_inuse
));
74 kvm
->arch
.sdr1
= __pa(hpt
) | (HPT_ORDER
- 18);
75 kvm
->arch
.lpid
= lpid
;
77 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt
, lpid
);
83 free_pages(hpt
, HPT_ORDER
- PAGE_SHIFT
);
87 void kvmppc_free_hpt(struct kvm
*kvm
)
89 clear_bit(kvm
->arch
.lpid
, lpid_inuse
);
90 vfree(kvm
->arch
.revmap
);
91 free_pages(kvm
->arch
.hpt_virt
, HPT_ORDER
- PAGE_SHIFT
);
94 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
95 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize
)
97 return (pgsize
> 0x1000) ? HPTE_V_LARGE
: 0;
100 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
101 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize
)
103 return (pgsize
== 0x10000) ? 0x1000 : 0;
106 void kvmppc_map_vrma(struct kvm_vcpu
*vcpu
, struct kvm_memory_slot
*memslot
,
107 unsigned long porder
)
110 unsigned long npages
;
111 unsigned long hp_v
, hp_r
;
112 unsigned long addr
, hash
;
114 unsigned long hp0
, hp1
;
117 psize
= 1ul << porder
;
118 npages
= memslot
->npages
>> (porder
- PAGE_SHIFT
);
120 /* VRMA can't be > 1TB */
121 if (npages
> 1ul << (40 - porder
))
122 npages
= 1ul << (40 - porder
);
123 /* Can't use more than 1 HPTE per HPTEG */
124 if (npages
> HPT_NPTEG
)
127 hp0
= HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)) |
128 HPTE_V_BOLTED
| hpte0_pgsize_encoding(psize
);
129 hp1
= hpte1_pgsize_encoding(psize
) |
130 HPTE_R_R
| HPTE_R_C
| HPTE_R_M
| PP_RWXX
;
132 for (i
= 0; i
< npages
; ++i
) {
134 /* can't use hpt_hash since va > 64 bits */
135 hash
= (i
^ (VRMA_VSID
^ (VRMA_VSID
<< 25))) & HPT_HASH_MASK
;
137 * We assume that the hash table is empty and no
138 * vcpus are using it at this stage. Since we create
139 * at most one HPTE per HPTEG, we just assume entry 7
140 * is available and use it.
142 hash
= (hash
<< 3) + 7;
143 hp_v
= hp0
| ((addr
>> 16) & ~0x7fUL
);
145 ret
= kvmppc_virtmode_h_enter(vcpu
, H_EXACT
, hash
, hp_v
, hp_r
);
146 if (ret
!= H_SUCCESS
) {
147 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
154 int kvmppc_mmu_hv_init(void)
156 unsigned long host_lpid
, rsvd_lpid
;
158 if (!cpu_has_feature(CPU_FTR_HVMODE
))
161 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
163 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
164 host_lpid
= mfspr(SPRN_LPID
); /* POWER7 */
165 rsvd_lpid
= LPID_RSVD
;
167 host_lpid
= 0; /* PPC970 */
168 rsvd_lpid
= MAX_LPID_970
;
171 set_bit(host_lpid
, lpid_inuse
);
172 /* rsvd_lpid is reserved for use in partition switching */
173 set_bit(rsvd_lpid
, lpid_inuse
);
178 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
182 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu
*vcpu
)
184 kvmppc_set_msr(vcpu
, MSR_SF
| MSR_ME
);
188 * This is called to get a reference to a guest page if there isn't
189 * one already in the kvm->arch.slot_phys[][] arrays.
191 static long kvmppc_get_guest_page(struct kvm
*kvm
, unsigned long gfn
,
192 struct kvm_memory_slot
*memslot
,
197 struct page
*page
, *hpage
, *pages
[1];
198 unsigned long s
, pgsize
;
199 unsigned long *physp
;
200 unsigned int is_io
, got
, pgorder
;
201 struct vm_area_struct
*vma
;
202 unsigned long pfn
, i
, npages
;
204 physp
= kvm
->arch
.slot_phys
[memslot
->id
];
207 if (physp
[gfn
- memslot
->base_gfn
])
215 start
= gfn_to_hva_memslot(memslot
, gfn
);
217 /* Instantiate and get the page we want access to */
218 np
= get_user_pages_fast(start
, 1, 1, pages
);
220 /* Look up the vma for the page */
221 down_read(¤t
->mm
->mmap_sem
);
222 vma
= find_vma(current
->mm
, start
);
223 if (!vma
|| vma
->vm_start
> start
||
224 start
+ psize
> vma
->vm_end
||
225 !(vma
->vm_flags
& VM_PFNMAP
))
227 is_io
= hpte_cache_bits(pgprot_val(vma
->vm_page_prot
));
228 pfn
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
229 /* check alignment of pfn vs. requested page size */
230 if (psize
> PAGE_SIZE
&& (pfn
& ((psize
>> PAGE_SHIFT
) - 1)))
232 up_read(¤t
->mm
->mmap_sem
);
236 got
= KVMPPC_GOT_PAGE
;
238 /* See if this is a large page */
240 if (PageHuge(page
)) {
241 hpage
= compound_head(page
);
242 s
<<= compound_order(hpage
);
243 /* Get the whole large page if slot alignment is ok */
244 if (s
> psize
&& slot_is_aligned(memslot
, s
) &&
245 !(memslot
->userspace_addr
& (s
- 1))) {
253 pfn
= page_to_pfn(page
);
256 npages
= pgsize
>> PAGE_SHIFT
;
257 pgorder
= __ilog2(npages
);
258 physp
+= (gfn
- memslot
->base_gfn
) & ~(npages
- 1);
259 spin_lock(&kvm
->arch
.slot_phys_lock
);
260 for (i
= 0; i
< npages
; ++i
) {
262 physp
[i
] = ((pfn
+ i
) << PAGE_SHIFT
) +
263 got
+ is_io
+ pgorder
;
267 spin_unlock(&kvm
->arch
.slot_phys_lock
);
273 page
= compound_head(page
);
279 up_read(¤t
->mm
->mmap_sem
);
284 * We come here on a H_ENTER call from the guest when we are not
285 * using mmu notifiers and we don't have the requested page pinned
288 long kvmppc_virtmode_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
289 long pte_index
, unsigned long pteh
, unsigned long ptel
)
291 struct kvm
*kvm
= vcpu
->kvm
;
292 unsigned long psize
, gpa
, gfn
;
293 struct kvm_memory_slot
*memslot
;
296 if (kvm
->arch
.using_mmu_notifiers
)
299 psize
= hpte_page_size(pteh
, ptel
);
303 pteh
&= ~(HPTE_V_HVLOCK
| HPTE_V_ABSENT
| HPTE_V_VALID
);
305 /* Find the memslot (if any) for this address */
306 gpa
= (ptel
& HPTE_R_RPN
) & ~(psize
- 1);
307 gfn
= gpa
>> PAGE_SHIFT
;
308 memslot
= gfn_to_memslot(kvm
, gfn
);
309 if (memslot
&& !(memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
310 if (!slot_is_aligned(memslot
, psize
))
312 if (kvmppc_get_guest_page(kvm
, gfn
, memslot
, psize
) < 0)
317 /* Protect linux PTE lookup from page table destruction */
318 rcu_read_lock_sched(); /* this disables preemption too */
319 vcpu
->arch
.pgdir
= current
->mm
->pgd
;
320 ret
= kvmppc_h_enter(vcpu
, flags
, pte_index
, pteh
, ptel
);
321 rcu_read_unlock_sched();
322 if (ret
== H_TOO_HARD
) {
323 /* this can't happen */
324 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
325 ret
= H_RESOURCE
; /* or something */
331 static struct kvmppc_slb
*kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu
*vcpu
,
337 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
338 if (!(vcpu
->arch
.slb
[i
].orige
& SLB_ESID_V
))
341 if (vcpu
->arch
.slb
[i
].origv
& SLB_VSID_B_1T
)
346 if (((vcpu
->arch
.slb
[i
].orige
^ eaddr
) & mask
) == 0)
347 return &vcpu
->arch
.slb
[i
];
352 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v
, unsigned long r
,
355 unsigned long ra_mask
;
357 ra_mask
= hpte_page_size(v
, r
) - 1;
358 return (r
& HPTE_R_RPN
& ~ra_mask
) | (ea
& ra_mask
);
361 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
362 struct kvmppc_pte
*gpte
, bool data
)
364 struct kvm
*kvm
= vcpu
->kvm
;
365 struct kvmppc_slb
*slbe
;
367 unsigned long pp
, key
;
369 unsigned long *hptep
;
371 int virtmode
= vcpu
->arch
.shregs
.msr
& (data
? MSR_DR
: MSR_IR
);
375 slbe
= kvmppc_mmu_book3s_hv_find_slbe(vcpu
, eaddr
);
380 /* real mode access */
381 slb_v
= vcpu
->kvm
->arch
.vrma_slb_v
;
384 /* Find the HPTE in the hash table */
385 index
= kvmppc_hv_find_lock_hpte(kvm
, eaddr
, slb_v
,
386 HPTE_V_VALID
| HPTE_V_ABSENT
);
389 hptep
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (index
<< 4));
390 v
= hptep
[0] & ~HPTE_V_HVLOCK
;
391 gr
= kvm
->arch
.revmap
[index
].guest_rpte
;
393 /* Unlock the HPTE */
394 asm volatile("lwsync" : : : "memory");
398 gpte
->vpage
= ((v
& HPTE_V_AVPN
) << 4) | ((eaddr
>> 12) & 0xfff);
400 /* Get PP bits and key for permission check */
401 pp
= gr
& (HPTE_R_PP0
| HPTE_R_PP
);
402 key
= (vcpu
->arch
.shregs
.msr
& MSR_PR
) ? SLB_VSID_KP
: SLB_VSID_KS
;
405 /* Calculate permissions */
406 gpte
->may_read
= hpte_read_permission(pp
, key
);
407 gpte
->may_write
= hpte_write_permission(pp
, key
);
408 gpte
->may_execute
= gpte
->may_read
&& !(gr
& (HPTE_R_N
| HPTE_R_G
));
410 /* Storage key permission check for POWER7 */
411 if (data
&& virtmode
&& cpu_has_feature(CPU_FTR_ARCH_206
)) {
412 int amrfield
= hpte_get_skey_perm(gr
, vcpu
->arch
.amr
);
419 /* Get the guest physical address */
420 gpte
->raddr
= kvmppc_mmu_get_real_addr(v
, gr
, eaddr
);
425 * Quick test for whether an instruction is a load or a store.
426 * If the instruction is a load or a store, then this will indicate
427 * which it is, at least on server processors. (Embedded processors
428 * have some external PID instructions that don't follow the rule
429 * embodied here.) If the instruction isn't a load or store, then
430 * this doesn't return anything useful.
432 static int instruction_is_store(unsigned int instr
)
437 if ((instr
& 0xfc000000) == 0x7c000000)
438 mask
= 0x100; /* major opcode 31 */
439 return (instr
& mask
) != 0;
442 static int kvmppc_hv_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
443 unsigned long gpa
, int is_store
)
447 unsigned long srr0
= kvmppc_get_pc(vcpu
);
449 /* We try to load the last instruction. We don't let
450 * emulate_instruction do it as it doesn't check what
452 * If we fail, we just return to the guest and try executing it again.
454 if (vcpu
->arch
.last_inst
== KVM_INST_FETCH_FAILED
) {
455 ret
= kvmppc_ld(vcpu
, &srr0
, sizeof(u32
), &last_inst
, false);
456 if (ret
!= EMULATE_DONE
|| last_inst
== KVM_INST_FETCH_FAILED
)
458 vcpu
->arch
.last_inst
= last_inst
;
462 * WARNING: We do not know for sure whether the instruction we just
463 * read from memory is the same that caused the fault in the first
464 * place. If the instruction we read is neither an load or a store,
465 * then it can't access memory, so we don't need to worry about
466 * enforcing access permissions. So, assuming it is a load or
467 * store, we just check that its direction (load or store) is
468 * consistent with the original fault, since that's what we
469 * checked the access permissions against. If there is a mismatch
470 * we just return and retry the instruction.
473 if (instruction_is_store(vcpu
->arch
.last_inst
) != !!is_store
)
477 * Emulated accesses are emulated by looking at the hash for
478 * translation once, then performing the access later. The
479 * translation could be invalidated in the meantime in which
480 * point performing the subsequent memory access on the old
481 * physical address could possibly be a security hole for the
482 * guest (but not the host).
484 * This is less of an issue for MMIO stores since they aren't
485 * globally visible. It could be an issue for MMIO loads to
486 * a certain extent but we'll ignore it for now.
489 vcpu
->arch
.paddr_accessed
= gpa
;
490 return kvmppc_emulate_mmio(run
, vcpu
);
493 int kvmppc_book3s_hv_page_fault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
494 unsigned long ea
, unsigned long dsisr
)
496 struct kvm
*kvm
= vcpu
->kvm
;
497 unsigned long *hptep
, hpte
[3], r
;
498 unsigned long mmu_seq
, psize
, pte_size
;
499 unsigned long gfn
, hva
, pfn
;
500 struct kvm_memory_slot
*memslot
;
502 struct revmap_entry
*rev
;
503 struct page
*page
, *pages
[1];
504 long index
, ret
, npages
;
506 unsigned int writing
, write_ok
;
507 struct vm_area_struct
*vma
;
508 unsigned long rcbits
;
511 * Real-mode code has already searched the HPT and found the
512 * entry we're interested in. Lock the entry and check that
513 * it hasn't changed. If it has, just return and re-execute the
516 if (ea
!= vcpu
->arch
.pgfault_addr
)
518 index
= vcpu
->arch
.pgfault_index
;
519 hptep
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (index
<< 4));
520 rev
= &kvm
->arch
.revmap
[index
];
522 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
524 hpte
[0] = hptep
[0] & ~HPTE_V_HVLOCK
;
526 hpte
[2] = r
= rev
->guest_rpte
;
527 asm volatile("lwsync" : : : "memory");
531 if (hpte
[0] != vcpu
->arch
.pgfault_hpte
[0] ||
532 hpte
[1] != vcpu
->arch
.pgfault_hpte
[1])
535 /* Translate the logical address and get the page */
536 psize
= hpte_page_size(hpte
[0], r
);
537 gfn
= hpte_rpn(r
, psize
);
538 memslot
= gfn_to_memslot(kvm
, gfn
);
540 /* No memslot means it's an emulated MMIO region */
541 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
542 unsigned long gpa
= (gfn
<< PAGE_SHIFT
) | (ea
& (psize
- 1));
543 return kvmppc_hv_emulate_mmio(run
, vcpu
, gpa
,
544 dsisr
& DSISR_ISSTORE
);
547 if (!kvm
->arch
.using_mmu_notifiers
)
548 return -EFAULT
; /* should never get here */
550 /* used to check for invalidations in progress */
551 mmu_seq
= kvm
->mmu_notifier_seq
;
557 pte_size
= PAGE_SIZE
;
558 writing
= (dsisr
& DSISR_ISSTORE
) != 0;
559 /* If writing != 0, then the HPTE must allow writing, if we get here */
561 hva
= gfn_to_hva_memslot(memslot
, gfn
);
562 npages
= get_user_pages_fast(hva
, 1, writing
, pages
);
564 /* Check if it's an I/O mapping */
565 down_read(¤t
->mm
->mmap_sem
);
566 vma
= find_vma(current
->mm
, hva
);
567 if (vma
&& vma
->vm_start
<= hva
&& hva
+ psize
<= vma
->vm_end
&&
568 (vma
->vm_flags
& VM_PFNMAP
)) {
569 pfn
= vma
->vm_pgoff
+
570 ((hva
- vma
->vm_start
) >> PAGE_SHIFT
);
572 is_io
= hpte_cache_bits(pgprot_val(vma
->vm_page_prot
));
573 write_ok
= vma
->vm_flags
& VM_WRITE
;
575 up_read(¤t
->mm
->mmap_sem
);
580 if (PageHuge(page
)) {
581 page
= compound_head(page
);
582 pte_size
<<= compound_order(page
);
584 /* if the guest wants write access, see if that is OK */
585 if (!writing
&& hpte_is_writable(r
)) {
589 * We need to protect against page table destruction
590 * while looking up and updating the pte.
592 rcu_read_lock_sched();
593 ptep
= find_linux_pte_or_hugepte(current
->mm
->pgd
,
595 if (ptep
&& pte_present(*ptep
)) {
596 pte
= kvmppc_read_update_linux_pte(ptep
, 1);
600 rcu_read_unlock_sched();
602 pfn
= page_to_pfn(page
);
606 if (psize
> pte_size
)
609 /* Check WIMG vs. the actual page we're accessing */
610 if (!hpte_cache_flags_ok(r
, is_io
)) {
614 * Allow guest to map emulated device memory as
615 * uncacheable, but actually make it cacheable.
617 r
= (r
& ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
)) | HPTE_R_M
;
620 /* Set the HPTE to point to pfn */
621 r
= (r
& ~(HPTE_R_PP0
- pte_size
)) | (pfn
<< PAGE_SHIFT
);
622 if (hpte_is_writable(r
) && !write_ok
)
623 r
= hpte_make_readonly(r
);
626 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
628 if ((hptep
[0] & ~HPTE_V_HVLOCK
) != hpte
[0] || hptep
[1] != hpte
[1] ||
629 rev
->guest_rpte
!= hpte
[2])
630 /* HPTE has been changed under us; let the guest retry */
632 hpte
[0] = (hpte
[0] & ~HPTE_V_ABSENT
) | HPTE_V_VALID
;
634 rmap
= &memslot
->rmap
[gfn
- memslot
->base_gfn
];
637 /* Check if we might have been invalidated; let the guest retry if so */
639 if (mmu_notifier_retry(vcpu
, mmu_seq
)) {
644 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
645 rcbits
= *rmap
>> KVMPPC_RMAP_RC_SHIFT
;
646 r
&= rcbits
| ~(HPTE_R_R
| HPTE_R_C
);
648 if (hptep
[0] & HPTE_V_VALID
) {
649 /* HPTE was previously valid, so we need to invalidate it */
651 hptep
[0] |= HPTE_V_ABSENT
;
652 kvmppc_invalidate_hpte(kvm
, hptep
, index
);
653 /* don't lose previous R and C bits */
654 r
|= hptep
[1] & (HPTE_R_R
| HPTE_R_C
);
656 kvmppc_add_revmap_chain(kvm
, rev
, rmap
, index
, 0);
662 asm volatile("ptesync" : : : "memory");
664 if (page
&& hpte_is_writable(r
))
673 hptep
[0] &= ~HPTE_V_HVLOCK
;
678 static int kvm_handle_hva(struct kvm
*kvm
, unsigned long hva
,
679 int (*handler
)(struct kvm
*kvm
, unsigned long *rmapp
,
684 struct kvm_memslots
*slots
;
685 struct kvm_memory_slot
*memslot
;
687 slots
= kvm_memslots(kvm
);
688 kvm_for_each_memslot(memslot
, slots
) {
689 unsigned long start
= memslot
->userspace_addr
;
692 end
= start
+ (memslot
->npages
<< PAGE_SHIFT
);
693 if (hva
>= start
&& hva
< end
) {
694 gfn_t gfn_offset
= (hva
- start
) >> PAGE_SHIFT
;
696 ret
= handler(kvm
, &memslot
->rmap
[gfn_offset
],
697 memslot
->base_gfn
+ gfn_offset
);
705 static int kvm_unmap_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
708 struct revmap_entry
*rev
= kvm
->arch
.revmap
;
709 unsigned long h
, i
, j
;
710 unsigned long *hptep
;
711 unsigned long ptel
, psize
, rcbits
;
715 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
721 * To avoid an ABBA deadlock with the HPTE lock bit,
722 * we can't spin on the HPTE lock while holding the
725 i
= *rmapp
& KVMPPC_RMAP_INDEX
;
726 hptep
= (unsigned long *) (kvm
->arch
.hpt_virt
+ (i
<< 4));
727 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
728 /* unlock rmap before spinning on the HPTE lock */
730 while (hptep
[0] & HPTE_V_HVLOCK
)
736 /* chain is now empty */
737 *rmapp
&= ~(KVMPPC_RMAP_PRESENT
| KVMPPC_RMAP_INDEX
);
739 /* remove i from chain */
743 rev
[i
].forw
= rev
[i
].back
= i
;
744 *rmapp
= (*rmapp
& ~KVMPPC_RMAP_INDEX
) | j
;
747 /* Now check and modify the HPTE */
748 ptel
= rev
[i
].guest_rpte
;
749 psize
= hpte_page_size(hptep
[0], ptel
);
750 if ((hptep
[0] & HPTE_V_VALID
) &&
751 hpte_rpn(ptel
, psize
) == gfn
) {
752 hptep
[0] |= HPTE_V_ABSENT
;
753 kvmppc_invalidate_hpte(kvm
, hptep
, i
);
754 /* Harvest R and C */
755 rcbits
= hptep
[1] & (HPTE_R_R
| HPTE_R_C
);
756 *rmapp
|= rcbits
<< KVMPPC_RMAP_RC_SHIFT
;
757 rev
[i
].guest_rpte
= ptel
| rcbits
;
760 hptep
[0] &= ~HPTE_V_HVLOCK
;
765 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
767 if (kvm
->arch
.using_mmu_notifiers
)
768 kvm_handle_hva(kvm
, hva
, kvm_unmap_rmapp
);
772 static int kvm_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
775 if (!kvm
->arch
.using_mmu_notifiers
)
777 if (!(*rmapp
& KVMPPC_RMAP_REFERENCED
))
779 kvm_unmap_rmapp(kvm
, rmapp
, gfn
);
780 while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT
, rmapp
))
782 *rmapp
&= ~KVMPPC_RMAP_REFERENCED
;
783 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT
, rmapp
);
787 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
789 if (!kvm
->arch
.using_mmu_notifiers
)
791 return kvm_handle_hva(kvm
, hva
, kvm_age_rmapp
);
794 static int kvm_test_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
797 return !!(*rmapp
& KVMPPC_RMAP_REFERENCED
);
800 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
802 if (!kvm
->arch
.using_mmu_notifiers
)
804 return kvm_handle_hva(kvm
, hva
, kvm_test_age_rmapp
);
807 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
809 if (!kvm
->arch
.using_mmu_notifiers
)
811 kvm_handle_hva(kvm
, hva
, kvm_unmap_rmapp
);
814 void *kvmppc_pin_guest_page(struct kvm
*kvm
, unsigned long gpa
,
815 unsigned long *nb_ret
)
817 struct kvm_memory_slot
*memslot
;
818 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
819 struct page
*page
, *pages
[1];
821 unsigned long hva
, psize
, offset
;
823 unsigned long *physp
;
825 memslot
= gfn_to_memslot(kvm
, gfn
);
826 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
828 if (!kvm
->arch
.using_mmu_notifiers
) {
829 physp
= kvm
->arch
.slot_phys
[memslot
->id
];
832 physp
+= gfn
- memslot
->base_gfn
;
835 if (kvmppc_get_guest_page(kvm
, gfn
, memslot
,
840 page
= pfn_to_page(pa
>> PAGE_SHIFT
);
842 hva
= gfn_to_hva_memslot(memslot
, gfn
);
843 npages
= get_user_pages_fast(hva
, 1, 1, pages
);
849 if (PageHuge(page
)) {
850 page
= compound_head(page
);
851 psize
<<= compound_order(page
);
853 if (!kvm
->arch
.using_mmu_notifiers
)
855 offset
= gpa
& (psize
- 1);
857 *nb_ret
= psize
- offset
;
858 return page_address(page
) + offset
;
861 void kvmppc_unpin_guest_page(struct kvm
*kvm
, void *va
)
863 struct page
*page
= virt_to_page(va
);
865 page
= compound_head(page
);
869 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu
*vcpu
)
871 struct kvmppc_mmu
*mmu
= &vcpu
->arch
.mmu
;
873 if (cpu_has_feature(CPU_FTR_ARCH_206
))
874 vcpu
->arch
.slb_nr
= 32; /* POWER7 */
876 vcpu
->arch
.slb_nr
= 64;
878 mmu
->xlate
= kvmppc_mmu_book3s_64_hv_xlate
;
879 mmu
->reset_msr
= kvmppc_mmu_book3s_64_hv_reset_msr
;
881 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_SLB
;