2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x
)
27 unsigned long addr
= (unsigned long) x
;
30 p
= find_linux_pte(swapper_pg_dir
, addr
);
31 if (!p
|| !pte_present(*p
))
33 /* assume we don't have huge pages in vmalloc space... */
34 addr
= (pte_pfn(*p
) << PAGE_SHIFT
) | (addr
& ~PAGE_MASK
);
39 * Add this HPTE into the chain for the real page.
40 * Must be called with the chain locked; it unlocks the chain.
42 void kvmppc_add_revmap_chain(struct kvm
*kvm
, struct revmap_entry
*rev
,
43 unsigned long *rmap
, long pte_index
, int realmode
)
45 struct revmap_entry
*head
, *tail
;
48 if (*rmap
& KVMPPC_RMAP_PRESENT
) {
49 i
= *rmap
& KVMPPC_RMAP_INDEX
;
50 head
= &kvm
->arch
.revmap
[i
];
52 head
= real_vmalloc_addr(head
);
53 tail
= &kvm
->arch
.revmap
[head
->back
];
55 tail
= real_vmalloc_addr(tail
);
57 rev
->back
= head
->back
;
58 tail
->forw
= pte_index
;
59 head
->back
= pte_index
;
61 rev
->forw
= rev
->back
= pte_index
;
65 *rmap
= i
| KVMPPC_RMAP_REFERENCED
| KVMPPC_RMAP_PRESENT
; /* unlock */
67 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain
);
69 /* Remove this HPTE from the chain for a real page */
70 static void remove_revmap_chain(struct kvm
*kvm
, long pte_index
,
71 struct revmap_entry
*rev
,
72 unsigned long hpte_v
, unsigned long hpte_r
)
74 struct revmap_entry
*next
, *prev
;
75 unsigned long gfn
, ptel
, head
;
76 struct kvm_memory_slot
*memslot
;
80 rcbits
= hpte_r
& (HPTE_R_R
| HPTE_R_C
);
81 ptel
= rev
->guest_rpte
|= rcbits
;
82 gfn
= hpte_rpn(ptel
, hpte_page_size(hpte_v
, ptel
));
83 memslot
= __gfn_to_memslot(kvm_memslots(kvm
), gfn
);
84 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
87 rmap
= real_vmalloc_addr(&memslot
->rmap
[gfn
- memslot
->base_gfn
]);
90 head
= *rmap
& KVMPPC_RMAP_INDEX
;
91 next
= real_vmalloc_addr(&kvm
->arch
.revmap
[rev
->forw
]);
92 prev
= real_vmalloc_addr(&kvm
->arch
.revmap
[rev
->back
]);
93 next
->back
= rev
->back
;
94 prev
->forw
= rev
->forw
;
95 if (head
== pte_index
) {
97 if (head
== pte_index
)
98 *rmap
&= ~(KVMPPC_RMAP_PRESENT
| KVMPPC_RMAP_INDEX
);
100 *rmap
= (*rmap
& ~KVMPPC_RMAP_INDEX
) | head
;
102 *rmap
|= rcbits
<< KVMPPC_RMAP_RC_SHIFT
;
106 static pte_t
lookup_linux_pte(struct kvm_vcpu
*vcpu
, unsigned long hva
,
107 int writing
, unsigned long *pte_sizep
)
110 unsigned long ps
= *pte_sizep
;
113 ptep
= find_linux_pte_or_hugepte(vcpu
->arch
.pgdir
, hva
, &shift
);
117 *pte_sizep
= 1ul << shift
;
119 *pte_sizep
= PAGE_SIZE
;
122 if (!pte_present(*ptep
))
124 return kvmppc_read_update_linux_pte(ptep
, writing
);
127 static inline void unlock_hpte(unsigned long *hpte
, unsigned long hpte_v
)
129 asm volatile(PPC_RELEASE_BARRIER
"" : : : "memory");
133 long kvmppc_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
134 long pte_index
, unsigned long pteh
, unsigned long ptel
)
136 struct kvm
*kvm
= vcpu
->kvm
;
137 unsigned long i
, pa
, gpa
, gfn
, psize
;
138 unsigned long slot_fn
, hva
;
140 struct revmap_entry
*rev
;
141 unsigned long g_ptel
= ptel
;
142 struct kvm_memory_slot
*memslot
;
143 unsigned long *physp
, pte_size
;
147 unsigned int writing
;
148 unsigned long mmu_seq
;
149 unsigned long rcbits
;
150 bool realmode
= vcpu
->arch
.vcore
->vcore_state
== VCORE_RUNNING
;
152 psize
= hpte_page_size(pteh
, ptel
);
155 writing
= hpte_is_writable(ptel
);
156 pteh
&= ~(HPTE_V_HVLOCK
| HPTE_V_ABSENT
| HPTE_V_VALID
);
158 /* used later to detect if we might have been invalidated */
159 mmu_seq
= kvm
->mmu_notifier_seq
;
162 /* Find the memslot (if any) for this address */
163 gpa
= (ptel
& HPTE_R_RPN
) & ~(psize
- 1);
164 gfn
= gpa
>> PAGE_SHIFT
;
165 memslot
= __gfn_to_memslot(kvm_memslots(kvm
), gfn
);
169 if (!(memslot
&& !(memslot
->flags
& KVM_MEMSLOT_INVALID
))) {
170 /* PPC970 can't do emulated MMIO */
171 if (!cpu_has_feature(CPU_FTR_ARCH_206
))
173 /* Emulated MMIO - mark this with key=31 */
174 pteh
|= HPTE_V_ABSENT
;
175 ptel
|= HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
179 /* Check if the requested page fits entirely in the memslot. */
180 if (!slot_is_aligned(memslot
, psize
))
182 slot_fn
= gfn
- memslot
->base_gfn
;
183 rmap
= &memslot
->rmap
[slot_fn
];
185 if (!kvm
->arch
.using_mmu_notifiers
) {
186 physp
= kvm
->arch
.slot_phys
[memslot
->id
];
191 physp
= real_vmalloc_addr(physp
);
195 is_io
= pa
& (HPTE_R_I
| HPTE_R_W
);
196 pte_size
= PAGE_SIZE
<< (pa
& KVMPPC_PAGE_ORDER_MASK
);
199 /* Translate to host virtual address */
200 hva
= gfn_to_hva_memslot(memslot
, gfn
);
202 /* Look up the Linux PTE for the backing page */
204 pte
= lookup_linux_pte(vcpu
, hva
, writing
, &pte_size
);
205 if (pte_present(pte
)) {
206 if (writing
&& !pte_write(pte
))
207 /* make the actual HPTE be read-only */
208 ptel
= hpte_make_readonly(ptel
);
209 is_io
= hpte_cache_bits(pte_val(pte
));
210 pa
= pte_pfn(pte
) << PAGE_SHIFT
;
213 if (pte_size
< psize
)
215 if (pa
&& pte_size
> psize
)
216 pa
|= gpa
& (pte_size
- 1);
218 ptel
&= ~(HPTE_R_PP0
- psize
);
222 pteh
|= HPTE_V_VALID
;
224 pteh
|= HPTE_V_ABSENT
;
227 if (is_io
!= ~0ul && !hpte_cache_flags_ok(ptel
, is_io
)) {
231 * Allow guest to map emulated device memory as
232 * uncacheable, but actually make it cacheable.
234 ptel
&= ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
);
238 /* Find and lock the HPTEG slot to use */
240 if (pte_index
>= HPT_NPTE
)
242 if (likely((flags
& H_EXACT
) == 0)) {
244 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
245 for (i
= 0; i
< 8; ++i
) {
246 if ((*hpte
& HPTE_V_VALID
) == 0 &&
247 try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
|
254 * Since try_lock_hpte doesn't retry (not even stdcx.
255 * failures), it could be that there is a free slot
256 * but we transiently failed to lock it. Try again,
257 * actually locking each slot and checking it.
260 for (i
= 0; i
< 8; ++i
) {
261 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
263 if (!(*hpte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)))
265 *hpte
&= ~HPTE_V_HVLOCK
;
273 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
274 if (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
|
276 /* Lock the slot and check again */
277 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
279 if (*hpte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)) {
280 *hpte
&= ~HPTE_V_HVLOCK
;
286 /* Save away the guest's idea of the second HPTE dword */
287 rev
= &kvm
->arch
.revmap
[pte_index
];
289 rev
= real_vmalloc_addr(rev
);
291 rev
->guest_rpte
= g_ptel
;
293 /* Link HPTE into reverse-map chain */
294 if (pteh
& HPTE_V_VALID
) {
296 rmap
= real_vmalloc_addr(rmap
);
298 /* Check for pending invalidations under the rmap chain lock */
299 if (kvm
->arch
.using_mmu_notifiers
&&
300 mmu_notifier_retry(vcpu
, mmu_seq
)) {
301 /* inval in progress, write a non-present HPTE */
302 pteh
|= HPTE_V_ABSENT
;
303 pteh
&= ~HPTE_V_VALID
;
306 kvmppc_add_revmap_chain(kvm
, rev
, rmap
, pte_index
,
308 /* Only set R/C in real HPTE if already set in *rmap */
309 rcbits
= *rmap
>> KVMPPC_RMAP_RC_SHIFT
;
310 ptel
&= rcbits
| ~(HPTE_R_R
| HPTE_R_C
);
316 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
319 asm volatile("ptesync" : : : "memory");
321 vcpu
->arch
.gpr
[4] = pte_index
;
324 EXPORT_SYMBOL_GPL(kvmppc_h_enter
);
326 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
328 static inline int try_lock_tlbie(unsigned int *lock
)
330 unsigned int tmp
, old
;
331 unsigned int token
= LOCK_TOKEN
;
333 asm volatile("1:lwarx %1,0,%2\n"
340 : "=&r" (tmp
), "=&r" (old
)
341 : "r" (lock
), "r" (token
)
346 long kvmppc_h_remove(struct kvm_vcpu
*vcpu
, unsigned long flags
,
347 unsigned long pte_index
, unsigned long avpn
,
350 struct kvm
*kvm
= vcpu
->kvm
;
352 unsigned long v
, r
, rb
;
353 struct revmap_entry
*rev
;
355 if (pte_index
>= HPT_NPTE
)
357 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
358 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
360 if ((hpte
[0] & (HPTE_V_ABSENT
| HPTE_V_VALID
)) == 0 ||
361 ((flags
& H_AVPN
) && (hpte
[0] & ~0x7fUL
) != avpn
) ||
362 ((flags
& H_ANDCOND
) && (hpte
[0] & avpn
) != 0)) {
363 hpte
[0] &= ~HPTE_V_HVLOCK
;
367 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
368 v
= hpte
[0] & ~HPTE_V_HVLOCK
;
369 if (v
& HPTE_V_VALID
) {
370 hpte
[0] &= ~HPTE_V_VALID
;
371 rb
= compute_tlbie_rb(v
, hpte
[1], pte_index
);
372 if (!(flags
& H_LOCAL
) && atomic_read(&kvm
->online_vcpus
) > 1) {
373 while (!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
375 asm volatile("ptesync" : : : "memory");
376 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
377 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
378 asm volatile("ptesync" : : : "memory");
379 kvm
->arch
.tlbie_lock
= 0;
381 asm volatile("ptesync" : : : "memory");
382 asm volatile("tlbiel %0" : : "r" (rb
));
383 asm volatile("ptesync" : : : "memory");
385 /* Read PTE low word after tlbie to get final R/C values */
386 remove_revmap_chain(kvm
, pte_index
, rev
, v
, hpte
[1]);
389 unlock_hpte(hpte
, 0);
391 vcpu
->arch
.gpr
[4] = v
;
392 vcpu
->arch
.gpr
[5] = r
;
396 long kvmppc_h_bulk_remove(struct kvm_vcpu
*vcpu
)
398 struct kvm
*kvm
= vcpu
->kvm
;
399 unsigned long *args
= &vcpu
->arch
.gpr
[4];
400 unsigned long *hp
, *hptes
[4], tlbrb
[4];
401 long int i
, j
, k
, n
, found
, indexes
[4];
402 unsigned long flags
, req
, pte_index
, rcbits
;
404 long int ret
= H_SUCCESS
;
405 struct revmap_entry
*rev
, *revs
[4];
407 if (atomic_read(&kvm
->online_vcpus
) == 1)
409 for (i
= 0; i
< 4 && ret
== H_SUCCESS
; ) {
414 flags
= pte_index
>> 56;
415 pte_index
&= ((1ul << 56) - 1);
418 if (req
== 3) { /* no more requests */
422 if (req
!= 1 || flags
== 3 || pte_index
>= HPT_NPTE
) {
423 /* parameter error */
424 args
[j
] = ((0xa0 | flags
) << 56) + pte_index
;
428 hp
= (unsigned long *)
429 (kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
430 /* to avoid deadlock, don't spin except for first */
431 if (!try_lock_hpte(hp
, HPTE_V_HVLOCK
)) {
434 while (!try_lock_hpte(hp
, HPTE_V_HVLOCK
))
438 if (hp
[0] & (HPTE_V_ABSENT
| HPTE_V_VALID
)) {
440 case 0: /* absolute */
443 case 1: /* andcond */
444 if (!(hp
[0] & args
[j
+ 1]))
448 if ((hp
[0] & ~0x7fUL
) == args
[j
+ 1])
454 hp
[0] &= ~HPTE_V_HVLOCK
;
455 args
[j
] = ((0x90 | flags
) << 56) + pte_index
;
459 args
[j
] = ((0x80 | flags
) << 56) + pte_index
;
460 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
462 if (!(hp
[0] & HPTE_V_VALID
)) {
463 /* insert R and C bits from PTE */
464 rcbits
= rev
->guest_rpte
& (HPTE_R_R
|HPTE_R_C
);
465 args
[j
] |= rcbits
<< (56 - 5);
470 hp
[0] &= ~HPTE_V_VALID
; /* leave it locked */
471 tlbrb
[n
] = compute_tlbie_rb(hp
[0], hp
[1], pte_index
);
481 /* Now that we've collected a batch, do the tlbies */
483 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
485 asm volatile("ptesync" : : : "memory");
486 for (k
= 0; k
< n
; ++k
)
487 asm volatile(PPC_TLBIE(%1,%0) : :
489 "r" (kvm
->arch
.lpid
));
490 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
491 kvm
->arch
.tlbie_lock
= 0;
493 asm volatile("ptesync" : : : "memory");
494 for (k
= 0; k
< n
; ++k
)
495 asm volatile("tlbiel %0" : : "r" (tlbrb
[k
]));
496 asm volatile("ptesync" : : : "memory");
499 /* Read PTE low words after tlbie to get final R/C values */
500 for (k
= 0; k
< n
; ++k
) {
502 pte_index
= args
[j
] & ((1ul << 56) - 1);
505 remove_revmap_chain(kvm
, pte_index
, rev
, hp
[0], hp
[1]);
506 rcbits
= rev
->guest_rpte
& (HPTE_R_R
|HPTE_R_C
);
507 args
[j
] |= rcbits
<< (56 - 5);
515 long kvmppc_h_protect(struct kvm_vcpu
*vcpu
, unsigned long flags
,
516 unsigned long pte_index
, unsigned long avpn
,
519 struct kvm
*kvm
= vcpu
->kvm
;
521 struct revmap_entry
*rev
;
522 unsigned long v
, r
, rb
, mask
, bits
;
524 if (pte_index
>= HPT_NPTE
)
527 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
528 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
530 if ((hpte
[0] & (HPTE_V_ABSENT
| HPTE_V_VALID
)) == 0 ||
531 ((flags
& H_AVPN
) && (hpte
[0] & ~0x7fUL
) != avpn
)) {
532 hpte
[0] &= ~HPTE_V_HVLOCK
;
536 if (atomic_read(&kvm
->online_vcpus
) == 1)
539 bits
= (flags
<< 55) & HPTE_R_PP0
;
540 bits
|= (flags
<< 48) & HPTE_R_KEY_HI
;
541 bits
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
543 /* Update guest view of 2nd HPTE dword */
544 mask
= HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
|
545 HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
546 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
548 r
= (rev
->guest_rpte
& ~mask
) | bits
;
551 r
= (hpte
[1] & ~mask
) | bits
;
554 if (v
& HPTE_V_VALID
) {
555 rb
= compute_tlbie_rb(v
, r
, pte_index
);
556 hpte
[0] = v
& ~HPTE_V_VALID
;
557 if (!(flags
& H_LOCAL
)) {
558 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
560 asm volatile("ptesync" : : : "memory");
561 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
562 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
563 asm volatile("ptesync" : : : "memory");
564 kvm
->arch
.tlbie_lock
= 0;
566 asm volatile("ptesync" : : : "memory");
567 asm volatile("tlbiel %0" : : "r" (rb
));
568 asm volatile("ptesync" : : : "memory");
573 hpte
[0] = v
& ~HPTE_V_HVLOCK
;
574 asm volatile("ptesync" : : : "memory");
578 long kvmppc_h_read(struct kvm_vcpu
*vcpu
, unsigned long flags
,
579 unsigned long pte_index
)
581 struct kvm
*kvm
= vcpu
->kvm
;
582 unsigned long *hpte
, v
, r
;
584 struct revmap_entry
*rev
= NULL
;
586 if (pte_index
>= HPT_NPTE
)
588 if (flags
& H_READ_4
) {
592 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
593 for (i
= 0; i
< n
; ++i
, ++pte_index
) {
594 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
595 v
= hpte
[0] & ~HPTE_V_HVLOCK
;
597 if (v
& HPTE_V_ABSENT
) {
601 if (v
& HPTE_V_VALID
)
602 r
= rev
[i
].guest_rpte
| (r
& (HPTE_R_R
| HPTE_R_C
));
603 vcpu
->arch
.gpr
[4 + i
* 2] = v
;
604 vcpu
->arch
.gpr
[5 + i
* 2] = r
;
609 void kvmppc_invalidate_hpte(struct kvm
*kvm
, unsigned long *hptep
,
610 unsigned long pte_index
)
614 hptep
[0] &= ~HPTE_V_VALID
;
615 rb
= compute_tlbie_rb(hptep
[0], hptep
[1], pte_index
);
616 while (!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
618 asm volatile("ptesync" : : : "memory");
619 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
620 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
621 asm volatile("ptesync" : : : "memory");
622 kvm
->arch
.tlbie_lock
= 0;
624 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte
);
626 void kvmppc_clear_ref_hpte(struct kvm
*kvm
, unsigned long *hptep
,
627 unsigned long pte_index
)
632 rb
= compute_tlbie_rb(hptep
[0], hptep
[1], pte_index
);
633 rbyte
= (hptep
[1] & ~HPTE_R_R
) >> 8;
634 /* modify only the second-last byte, which contains the ref bit */
635 *((char *)hptep
+ 14) = rbyte
;
636 while (!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
638 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
639 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
640 asm volatile("ptesync" : : : "memory");
641 kvm
->arch
.tlbie_lock
= 0;
643 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte
);
645 static int slb_base_page_shift
[4] = {
649 20, /* 1M, unsupported */
652 long kvmppc_hv_find_lock_hpte(struct kvm
*kvm
, gva_t eaddr
, unsigned long slb_v
,
657 unsigned long somask
;
658 unsigned long vsid
, hash
;
661 unsigned long mask
, val
;
664 /* Get page shift, work out hash and AVPN etc. */
665 mask
= SLB_VSID_B
| HPTE_V_AVPN
| HPTE_V_SECONDARY
;
668 if (slb_v
& SLB_VSID_L
) {
669 mask
|= HPTE_V_LARGE
;
671 pshift
= slb_base_page_shift
[(slb_v
& SLB_VSID_LP
) >> 4];
673 if (slb_v
& SLB_VSID_B_1T
) {
674 somask
= (1UL << 40) - 1;
675 vsid
= (slb_v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT_1T
;
678 somask
= (1UL << 28) - 1;
679 vsid
= (slb_v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT
;
681 hash
= (vsid
^ ((eaddr
& somask
) >> pshift
)) & HPT_HASH_MASK
;
682 avpn
= slb_v
& ~(somask
>> 16); /* also includes B */
683 avpn
|= (eaddr
& somask
) >> 16;
686 avpn
&= ~((1UL << (pshift
- 16)) - 1);
692 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (hash
<< 7));
694 for (i
= 0; i
< 16; i
+= 2) {
695 /* Read the PTE racily */
696 v
= hpte
[i
] & ~HPTE_V_HVLOCK
;
698 /* Check valid/absent, hash, segment size and AVPN */
699 if (!(v
& valid
) || (v
& mask
) != val
)
702 /* Lock the PTE and read it under the lock */
703 while (!try_lock_hpte(&hpte
[i
], HPTE_V_HVLOCK
))
705 v
= hpte
[i
] & ~HPTE_V_HVLOCK
;
709 * Check the HPTE again, including large page size
710 * Since we don't currently allow any MPSS (mixed
711 * page-size segment) page sizes, it is sufficient
712 * to check against the actual page size.
714 if ((v
& valid
) && (v
& mask
) == val
&&
715 hpte_page_size(v
, r
) == (1ul << pshift
))
716 /* Return with the HPTE still locked */
717 return (hash
<< 3) + (i
>> 1);
719 /* Unlock and move on */
723 if (val
& HPTE_V_SECONDARY
)
725 val
|= HPTE_V_SECONDARY
;
726 hash
= hash
^ HPT_HASH_MASK
;
730 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte
);
733 * Called in real mode to check whether an HPTE not found fault
734 * is due to accessing a paged-out page or an emulated MMIO page,
735 * or if a protection fault is due to accessing a page that the
736 * guest wanted read/write access to but which we made read-only.
737 * Returns a possibly modified status (DSISR) value if not
738 * (i.e. pass the interrupt to the guest),
739 * -1 to pass the fault up to host kernel mode code, -2 to do that
740 * and also load the instruction word (for MMIO emulation),
741 * or 0 if we should make the guest retry the access.
743 long kvmppc_hpte_hv_fault(struct kvm_vcpu
*vcpu
, unsigned long addr
,
744 unsigned long slb_v
, unsigned int status
, bool data
)
746 struct kvm
*kvm
= vcpu
->kvm
;
748 unsigned long v
, r
, gr
;
751 struct revmap_entry
*rev
;
752 unsigned long pp
, key
;
754 /* For protection fault, expect to find a valid HPTE */
755 valid
= HPTE_V_VALID
;
756 if (status
& DSISR_NOHPTE
)
757 valid
|= HPTE_V_ABSENT
;
759 index
= kvmppc_hv_find_lock_hpte(kvm
, addr
, slb_v
, valid
);
761 if (status
& DSISR_NOHPTE
)
762 return status
; /* there really was no HPTE */
763 return 0; /* for prot fault, HPTE disappeared */
765 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (index
<< 4));
766 v
= hpte
[0] & ~HPTE_V_HVLOCK
;
768 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[index
]);
769 gr
= rev
->guest_rpte
;
771 unlock_hpte(hpte
, v
);
773 /* For not found, if the HPTE is valid by now, retry the instruction */
774 if ((status
& DSISR_NOHPTE
) && (v
& HPTE_V_VALID
))
777 /* Check access permissions to the page */
778 pp
= gr
& (HPTE_R_PP0
| HPTE_R_PP
);
779 key
= (vcpu
->arch
.shregs
.msr
& MSR_PR
) ? SLB_VSID_KP
: SLB_VSID_KS
;
780 status
&= ~DSISR_NOHPTE
; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
782 if (gr
& (HPTE_R_N
| HPTE_R_G
))
783 return status
| SRR1_ISI_N_OR_G
;
784 if (!hpte_read_permission(pp
, slb_v
& key
))
785 return status
| SRR1_ISI_PROT
;
786 } else if (status
& DSISR_ISSTORE
) {
787 /* check write permission */
788 if (!hpte_write_permission(pp
, slb_v
& key
))
789 return status
| DSISR_PROTFAULT
;
791 if (!hpte_read_permission(pp
, slb_v
& key
))
792 return status
| DSISR_PROTFAULT
;
795 /* Check storage key, if applicable */
796 if (data
&& (vcpu
->arch
.shregs
.msr
& MSR_DR
)) {
797 unsigned int perm
= hpte_get_skey_perm(gr
, vcpu
->arch
.amr
);
798 if (status
& DSISR_ISSTORE
)
801 return status
| DSISR_KEYFAULT
;
804 /* Save HPTE info for virtual-mode handler */
805 vcpu
->arch
.pgfault_addr
= addr
;
806 vcpu
->arch
.pgfault_index
= index
;
807 vcpu
->arch
.pgfault_hpte
[0] = v
;
808 vcpu
->arch
.pgfault_hpte
[1] = r
;
810 /* Check the storage key to see if it is possibly emulated MMIO */
811 if (data
&& (vcpu
->arch
.shregs
.msr
& MSR_IR
) &&
812 (r
& (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
)) ==
813 (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
))
814 return -2; /* MMIO emulation - load instr word */
816 return -1; /* send fault up to host kernel mode */