2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <linux/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/kvm_book3s.h>
34 #include <asm/mmu_context.h>
35 #include <asm/switch_to.h>
36 #include <asm/firmware.h>
37 #include <asm/setup.h>
38 #include <linux/gfp.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/highmem.h>
42 #include <linux/module.h>
43 #include <linux/miscdevice.h>
44 #include <asm/asm-prototypes.h>
49 #define CREATE_TRACE_POINTS
52 /* #define EXIT_DEBUG */
53 /* #define DEBUG_EXT */
55 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
57 #ifdef CONFIG_PPC_BOOK3S_64
58 static int kvmppc_handle_fac(struct kvm_vcpu
*vcpu
, ulong fac
);
61 /* Some compatibility defines */
62 #ifdef CONFIG_PPC_BOOK3S_32
63 #define MSR_USER32 MSR_USER
64 #define MSR_USER64 MSR_USER
65 #define HW_PAGE_SIZE PAGE_SIZE
66 #define HPTE_R_M _PAGE_COHERENT
69 static bool kvmppc_is_split_real(struct kvm_vcpu
*vcpu
)
71 ulong msr
= kvmppc_get_msr(vcpu
);
72 return (msr
& (MSR_IR
|MSR_DR
)) == MSR_DR
;
75 static void kvmppc_fixup_split_real(struct kvm_vcpu
*vcpu
)
77 ulong msr
= kvmppc_get_msr(vcpu
);
78 ulong pc
= kvmppc_get_pc(vcpu
);
80 /* We are in DR only split real mode */
81 if ((msr
& (MSR_IR
|MSR_DR
)) != MSR_DR
)
84 /* We have not fixed up the guest already */
85 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
)
88 /* The code is in fixupable address space */
89 if (pc
& SPLIT_HACK_MASK
)
92 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_SPLIT_HACK
;
93 kvmppc_set_pc(vcpu
, pc
| SPLIT_HACK_OFFS
);
96 void kvmppc_unfixup_split_real(struct kvm_vcpu
*vcpu
);
98 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu
*vcpu
, int cpu
)
100 #ifdef CONFIG_PPC_BOOK3S_64
101 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
102 memcpy(svcpu
->slb
, to_book3s(vcpu
)->slb_shadow
, sizeof(svcpu
->slb
));
103 svcpu
->slb_max
= to_book3s(vcpu
)->slb_shadow_max
;
108 /* Disable AIL if supported */
109 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
110 cpu_has_feature(CPU_FTR_ARCH_207S
))
111 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) & ~LPCR_AIL
);
113 vcpu
->cpu
= smp_processor_id();
114 #ifdef CONFIG_PPC_BOOK3S_32
115 current
->thread
.kvm_shadow_vcpu
= vcpu
->arch
.shadow_vcpu
;
118 if (kvmppc_is_split_real(vcpu
))
119 kvmppc_fixup_split_real(vcpu
);
121 kvmppc_restore_tm_pr(vcpu
);
124 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu
*vcpu
)
126 #ifdef CONFIG_PPC_BOOK3S_64
127 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
129 kvmppc_copy_from_svcpu(vcpu
);
131 memcpy(to_book3s(vcpu
)->slb_shadow
, svcpu
->slb
, sizeof(svcpu
->slb
));
132 to_book3s(vcpu
)->slb_shadow_max
= svcpu
->slb_max
;
136 if (kvmppc_is_split_real(vcpu
))
137 kvmppc_unfixup_split_real(vcpu
);
139 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
140 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
141 kvmppc_save_tm_pr(vcpu
);
143 /* Enable AIL if supported */
144 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
145 cpu_has_feature(CPU_FTR_ARCH_207S
))
146 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_AIL_3
);
151 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
152 void kvmppc_copy_to_svcpu(struct kvm_vcpu
*vcpu
)
154 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
156 svcpu
->gpr
[0] = vcpu
->arch
.regs
.gpr
[0];
157 svcpu
->gpr
[1] = vcpu
->arch
.regs
.gpr
[1];
158 svcpu
->gpr
[2] = vcpu
->arch
.regs
.gpr
[2];
159 svcpu
->gpr
[3] = vcpu
->arch
.regs
.gpr
[3];
160 svcpu
->gpr
[4] = vcpu
->arch
.regs
.gpr
[4];
161 svcpu
->gpr
[5] = vcpu
->arch
.regs
.gpr
[5];
162 svcpu
->gpr
[6] = vcpu
->arch
.regs
.gpr
[6];
163 svcpu
->gpr
[7] = vcpu
->arch
.regs
.gpr
[7];
164 svcpu
->gpr
[8] = vcpu
->arch
.regs
.gpr
[8];
165 svcpu
->gpr
[9] = vcpu
->arch
.regs
.gpr
[9];
166 svcpu
->gpr
[10] = vcpu
->arch
.regs
.gpr
[10];
167 svcpu
->gpr
[11] = vcpu
->arch
.regs
.gpr
[11];
168 svcpu
->gpr
[12] = vcpu
->arch
.regs
.gpr
[12];
169 svcpu
->gpr
[13] = vcpu
->arch
.regs
.gpr
[13];
170 svcpu
->cr
= vcpu
->arch
.cr
;
171 svcpu
->xer
= vcpu
->arch
.regs
.xer
;
172 svcpu
->ctr
= vcpu
->arch
.regs
.ctr
;
173 svcpu
->lr
= vcpu
->arch
.regs
.link
;
174 svcpu
->pc
= vcpu
->arch
.regs
.nip
;
175 #ifdef CONFIG_PPC_BOOK3S_64
176 svcpu
->shadow_fscr
= vcpu
->arch
.shadow_fscr
;
179 * Now also save the current time base value. We use this
180 * to find the guest purr and spurr value.
182 vcpu
->arch
.entry_tb
= get_tb();
183 vcpu
->arch
.entry_vtb
= get_vtb();
184 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
185 vcpu
->arch
.entry_ic
= mfspr(SPRN_IC
);
186 svcpu
->in_use
= true;
191 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu
*vcpu
)
193 ulong guest_msr
= kvmppc_get_msr(vcpu
);
194 ulong smsr
= guest_msr
;
196 /* Guest MSR values */
197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_LE
|
199 MSR_TM
| MSR_TS_MASK
;
201 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_LE
;
203 /* Process MSR values */
204 smsr
|= MSR_ME
| MSR_RI
| MSR_IR
| MSR_DR
| MSR_PR
| MSR_EE
;
205 /* External providers the guest reserved */
206 smsr
|= (guest_msr
& vcpu
->arch
.guest_owned_ext
);
207 /* 64-bit Process MSR values */
208 #ifdef CONFIG_PPC_BOOK3S_64
209 smsr
|= MSR_ISF
| MSR_HV
;
211 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
213 * in guest privileged state, we want to fail all TM transactions.
214 * So disable MSR TM bit so that all tbegin. will be able to be
217 if (!(guest_msr
& MSR_PR
))
220 vcpu
->arch
.shadow_msr
= smsr
;
223 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
224 void kvmppc_copy_from_svcpu(struct kvm_vcpu
*vcpu
)
226 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
227 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
232 * Maybe we were already preempted and synced the svcpu from
233 * our preempt notifiers. Don't bother touching this svcpu then.
238 vcpu
->arch
.regs
.gpr
[0] = svcpu
->gpr
[0];
239 vcpu
->arch
.regs
.gpr
[1] = svcpu
->gpr
[1];
240 vcpu
->arch
.regs
.gpr
[2] = svcpu
->gpr
[2];
241 vcpu
->arch
.regs
.gpr
[3] = svcpu
->gpr
[3];
242 vcpu
->arch
.regs
.gpr
[4] = svcpu
->gpr
[4];
243 vcpu
->arch
.regs
.gpr
[5] = svcpu
->gpr
[5];
244 vcpu
->arch
.regs
.gpr
[6] = svcpu
->gpr
[6];
245 vcpu
->arch
.regs
.gpr
[7] = svcpu
->gpr
[7];
246 vcpu
->arch
.regs
.gpr
[8] = svcpu
->gpr
[8];
247 vcpu
->arch
.regs
.gpr
[9] = svcpu
->gpr
[9];
248 vcpu
->arch
.regs
.gpr
[10] = svcpu
->gpr
[10];
249 vcpu
->arch
.regs
.gpr
[11] = svcpu
->gpr
[11];
250 vcpu
->arch
.regs
.gpr
[12] = svcpu
->gpr
[12];
251 vcpu
->arch
.regs
.gpr
[13] = svcpu
->gpr
[13];
252 vcpu
->arch
.cr
= svcpu
->cr
;
253 vcpu
->arch
.regs
.xer
= svcpu
->xer
;
254 vcpu
->arch
.regs
.ctr
= svcpu
->ctr
;
255 vcpu
->arch
.regs
.link
= svcpu
->lr
;
256 vcpu
->arch
.regs
.nip
= svcpu
->pc
;
257 vcpu
->arch
.shadow_srr1
= svcpu
->shadow_srr1
;
258 vcpu
->arch
.fault_dar
= svcpu
->fault_dar
;
259 vcpu
->arch
.fault_dsisr
= svcpu
->fault_dsisr
;
260 vcpu
->arch
.last_inst
= svcpu
->last_inst
;
261 #ifdef CONFIG_PPC_BOOK3S_64
262 vcpu
->arch
.shadow_fscr
= svcpu
->shadow_fscr
;
265 * Update purr and spurr using time base on exit.
267 vcpu
->arch
.purr
+= get_tb() - vcpu
->arch
.entry_tb
;
268 vcpu
->arch
.spurr
+= get_tb() - vcpu
->arch
.entry_tb
;
269 to_book3s(vcpu
)->vtb
+= get_vtb() - vcpu
->arch
.entry_vtb
;
270 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
271 vcpu
->arch
.ic
+= mfspr(SPRN_IC
) - vcpu
->arch
.entry_ic
;
273 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
275 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
277 * modified by unprivileged instructions like "tbegin"/"tend"/
278 * "tresume"/"tsuspend" in PR KVM guest.
280 * It is necessary to sync here to calculate a correct shadow_msr.
282 * privileged guest's tbegin will be failed at present. So we
283 * only take care of problem state guest.
285 old_msr
= kvmppc_get_msr(vcpu
);
286 if (unlikely((old_msr
& MSR_PR
) &&
287 (vcpu
->arch
.shadow_srr1
& (MSR_TS_MASK
)) !=
288 (old_msr
& (MSR_TS_MASK
)))) {
289 old_msr
&= ~(MSR_TS_MASK
);
290 old_msr
|= (vcpu
->arch
.shadow_srr1
& (MSR_TS_MASK
));
291 kvmppc_set_msr_fast(vcpu
, old_msr
);
292 kvmppc_recalc_shadow_msr(vcpu
);
296 svcpu
->in_use
= false;
302 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
303 void kvmppc_save_tm_sprs(struct kvm_vcpu
*vcpu
)
306 vcpu
->arch
.tfhar
= mfspr(SPRN_TFHAR
);
307 vcpu
->arch
.texasr
= mfspr(SPRN_TEXASR
);
308 vcpu
->arch
.tfiar
= mfspr(SPRN_TFIAR
);
312 void kvmppc_restore_tm_sprs(struct kvm_vcpu
*vcpu
)
315 mtspr(SPRN_TFHAR
, vcpu
->arch
.tfhar
);
316 mtspr(SPRN_TEXASR
, vcpu
->arch
.texasr
);
317 mtspr(SPRN_TFIAR
, vcpu
->arch
.tfiar
);
321 /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
324 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu
*vcpu
)
327 ulong ext_diff
= (kvmppc_get_msr(vcpu
) & ~vcpu
->arch
.guest_owned_ext
) &
328 (MSR_FP
| MSR_VEC
| MSR_VSX
);
333 if (ext_diff
== MSR_FP
)
334 exit_nr
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
335 else if (ext_diff
== MSR_VEC
)
336 exit_nr
= BOOK3S_INTERRUPT_ALTIVEC
;
338 exit_nr
= BOOK3S_INTERRUPT_VSX
;
340 kvmppc_handle_ext(vcpu
, exit_nr
, ext_diff
);
343 void kvmppc_save_tm_pr(struct kvm_vcpu
*vcpu
)
345 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu
)))) {
346 kvmppc_save_tm_sprs(vcpu
);
350 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
351 kvmppc_giveup_ext(vcpu
, MSR_VSX
);
354 _kvmppc_save_tm_pr(vcpu
, mfmsr());
358 void kvmppc_restore_tm_pr(struct kvm_vcpu
*vcpu
)
360 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu
))) {
361 kvmppc_restore_tm_sprs(vcpu
);
362 if (kvmppc_get_msr(vcpu
) & MSR_TM
) {
363 kvmppc_handle_lost_math_exts(vcpu
);
364 if (vcpu
->arch
.fscr
& FSCR_TAR
)
365 kvmppc_handle_fac(vcpu
, FSCR_TAR_LG
);
371 _kvmppc_restore_tm_pr(vcpu
, kvmppc_get_msr(vcpu
));
374 if (kvmppc_get_msr(vcpu
) & MSR_TM
) {
375 kvmppc_handle_lost_math_exts(vcpu
);
376 if (vcpu
->arch
.fscr
& FSCR_TAR
)
377 kvmppc_handle_fac(vcpu
, FSCR_TAR_LG
);
382 static int kvmppc_core_check_requests_pr(struct kvm_vcpu
*vcpu
)
384 int r
= 1; /* Indicate we want to get back into the guest */
386 /* We misuse TLB_FLUSH to indicate that we want to clear
387 all shadow cache entries */
388 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
389 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
394 /************* MMU Notifiers *************/
395 static void do_kvm_unmap_hva(struct kvm
*kvm
, unsigned long start
,
399 struct kvm_vcpu
*vcpu
;
400 struct kvm_memslots
*slots
;
401 struct kvm_memory_slot
*memslot
;
403 slots
= kvm_memslots(kvm
);
404 kvm_for_each_memslot(memslot
, slots
) {
405 unsigned long hva_start
, hva_end
;
408 hva_start
= max(start
, memslot
->userspace_addr
);
409 hva_end
= min(end
, memslot
->userspace_addr
+
410 (memslot
->npages
<< PAGE_SHIFT
));
411 if (hva_start
>= hva_end
)
414 * {gfn(page) | page intersects with [hva_start, hva_end)} =
415 * {gfn, gfn+1, ..., gfn_end-1}.
417 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
418 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
419 kvm_for_each_vcpu(i
, vcpu
, kvm
)
420 kvmppc_mmu_pte_pflush(vcpu
, gfn
<< PAGE_SHIFT
,
421 gfn_end
<< PAGE_SHIFT
);
425 static int kvm_unmap_hva_range_pr(struct kvm
*kvm
, unsigned long start
,
428 do_kvm_unmap_hva(kvm
, start
, end
);
433 static int kvm_age_hva_pr(struct kvm
*kvm
, unsigned long start
,
436 /* XXX could be more clever ;) */
440 static int kvm_test_age_hva_pr(struct kvm
*kvm
, unsigned long hva
)
442 /* XXX could be more clever ;) */
446 static void kvm_set_spte_hva_pr(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
448 /* The page will get remapped properly on its next fault */
449 do_kvm_unmap_hva(kvm
, hva
, hva
+ PAGE_SIZE
);
452 /*****************************************/
454 static void kvmppc_set_msr_pr(struct kvm_vcpu
*vcpu
, u64 msr
)
458 /* For PAPR guest, make sure MSR reflects guest mode */
459 if (vcpu
->arch
.papr_enabled
)
460 msr
= (msr
& ~MSR_HV
) | MSR_ME
;
463 printk(KERN_INFO
"KVM: Set MSR to 0x%llx\n", msr
);
466 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
467 /* We should never target guest MSR to TS=10 && PR=0,
468 * since we always fail transaction for guest privilege
471 if (!(msr
& MSR_PR
) && MSR_TM_TRANSACTIONAL(msr
))
472 kvmppc_emulate_tabort(vcpu
,
473 TM_CAUSE_KVM_FAC_UNAV
| TM_CAUSE_PERSISTENT
);
476 old_msr
= kvmppc_get_msr(vcpu
);
477 msr
&= to_book3s(vcpu
)->msr_mask
;
478 kvmppc_set_msr_fast(vcpu
, msr
);
479 kvmppc_recalc_shadow_msr(vcpu
);
482 if (!vcpu
->arch
.pending_exceptions
) {
483 kvm_vcpu_block(vcpu
);
484 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
485 vcpu
->stat
.halt_wakeup
++;
487 /* Unset POW bit after we woke up */
489 kvmppc_set_msr_fast(vcpu
, msr
);
493 if (kvmppc_is_split_real(vcpu
))
494 kvmppc_fixup_split_real(vcpu
);
496 kvmppc_unfixup_split_real(vcpu
);
498 if ((kvmppc_get_msr(vcpu
) & (MSR_PR
|MSR_IR
|MSR_DR
)) !=
499 (old_msr
& (MSR_PR
|MSR_IR
|MSR_DR
))) {
500 kvmppc_mmu_flush_segments(vcpu
);
501 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
503 /* Preload magic page segment when in kernel mode */
504 if (!(msr
& MSR_PR
) && vcpu
->arch
.magic_page_pa
) {
505 struct kvm_vcpu_arch
*a
= &vcpu
->arch
;
508 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_ea
);
510 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_pa
);
515 * When switching from 32 to 64-bit, we may have a stale 32-bit
516 * magic page around, we need to flush it. Typically 32-bit magic
517 * page will be instantiated when calling into RTAS. Note: We
518 * assume that such transition only happens while in kernel mode,
519 * ie, we never transition from user 32-bit to kernel 64-bit with
520 * a 32-bit magic page around.
522 if (vcpu
->arch
.magic_page_pa
&&
523 !(old_msr
& MSR_PR
) && !(old_msr
& MSR_SF
) && (msr
& MSR_SF
)) {
524 /* going from RTAS to normal kernel code */
525 kvmppc_mmu_pte_flush(vcpu
, (uint32_t)vcpu
->arch
.magic_page_pa
,
529 /* Preload FPU if it's enabled */
530 if (kvmppc_get_msr(vcpu
) & MSR_FP
)
531 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
533 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
534 if (kvmppc_get_msr(vcpu
) & MSR_TM
)
535 kvmppc_handle_lost_math_exts(vcpu
);
539 void kvmppc_set_pvr_pr(struct kvm_vcpu
*vcpu
, u32 pvr
)
543 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SLB
;
544 vcpu
->arch
.pvr
= pvr
;
545 #ifdef CONFIG_PPC_BOOK3S_64
546 if ((pvr
>= 0x330000) && (pvr
< 0x70330000)) {
547 kvmppc_mmu_book3s_64_init(vcpu
);
548 if (!to_book3s(vcpu
)->hior_explicit
)
549 to_book3s(vcpu
)->hior
= 0xfff00000;
550 to_book3s(vcpu
)->msr_mask
= 0xffffffffffffffffULL
;
551 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
555 kvmppc_mmu_book3s_32_init(vcpu
);
556 if (!to_book3s(vcpu
)->hior_explicit
)
557 to_book3s(vcpu
)->hior
= 0;
558 to_book3s(vcpu
)->msr_mask
= 0xffffffffULL
;
559 vcpu
->arch
.cpu_type
= KVM_CPU_3S_32
;
562 kvmppc_sanity_check(vcpu
);
564 /* If we are in hypervisor level on 970, we can tell the CPU to
565 * treat DCBZ as 32 bytes store */
566 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_DCBZ32
;
567 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) && (mfmsr() & MSR_HV
) &&
568 !strcmp(cur_cpu_spec
->platform
, "ppc970"))
569 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
571 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
572 really needs them in a VM on Cell and force disable them. */
573 if (!strcmp(cur_cpu_spec
->platform
, "ppc-cell-be"))
574 to_book3s(vcpu
)->msr_mask
&= ~(MSR_FE0
| MSR_FE1
);
577 * If they're asking for POWER6 or later, set the flag
578 * indicating that we can do multiple large page sizes
580 * Also set the flag that indicates that tlbie has the large
581 * page bit in the RB operand instead of the instruction.
583 switch (PVR_VER(pvr
)) {
590 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_MULTI_PGSIZE
|
591 BOOK3S_HFLAG_NEW_TLBIE
;
595 #ifdef CONFIG_PPC_BOOK3S_32
596 /* 32 bit Book3S always has 32 byte dcbz */
597 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
600 /* On some CPUs we can execute paired single operations natively */
601 asm ( "mfpvr %0" : "=r"(host_pvr
));
603 case 0x00080200: /* lonestar 2.0 */
604 case 0x00088202: /* lonestar 2.2 */
605 case 0x70000100: /* gekko 1.0 */
606 case 0x00080100: /* gekko 2.0 */
607 case 0x00083203: /* gekko 2.3a */
608 case 0x00083213: /* gekko 2.3b */
609 case 0x00083204: /* gekko 2.4 */
610 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
611 case 0x00087200: /* broadway */
612 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_NATIVE_PS
;
613 /* Enable HID2.PSE - in case we need it later */
614 mtspr(SPRN_HID2_GEKKO
, mfspr(SPRN_HID2_GEKKO
) | (1 << 29));
618 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
619 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
620 * emulate 32 bytes dcbz length.
622 * The Book3s_64 inventors also realized this case and implemented a special bit
623 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
625 * My approach here is to patch the dcbz instruction on executing pages.
627 static void kvmppc_patch_dcbz(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
634 hpage
= gfn_to_page(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
635 if (is_error_page(hpage
))
638 hpage_offset
= pte
->raddr
& ~PAGE_MASK
;
639 hpage_offset
&= ~0xFFFULL
;
643 page
= kmap_atomic(hpage
);
645 /* patch dcbz into reserved instruction, so we trap */
646 for (i
=hpage_offset
; i
< hpage_offset
+ (HW_PAGE_SIZE
/ 4); i
++)
647 if ((be32_to_cpu(page
[i
]) & 0xff0007ff) == INS_DCBZ
)
648 page
[i
] &= cpu_to_be32(0xfffffff7);
654 static bool kvmppc_visible_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
656 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
658 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
659 mp_pa
= (uint32_t)mp_pa
;
662 if (unlikely(mp_pa
) && unlikely((mp_pa
& KVM_PAM
) == (gpa
& KVM_PAM
))) {
666 return kvm_is_visible_gfn(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
669 int kvmppc_handle_pagefault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
670 ulong eaddr
, int vec
)
672 bool data
= (vec
== BOOK3S_INTERRUPT_DATA_STORAGE
);
673 bool iswrite
= false;
674 int r
= RESUME_GUEST
;
677 struct kvmppc_pte pte
= { 0 };
678 bool dr
= (kvmppc_get_msr(vcpu
) & MSR_DR
) ? true : false;
679 bool ir
= (kvmppc_get_msr(vcpu
) & MSR_IR
) ? true : false;
682 relocated
= data
? dr
: ir
;
683 if (data
&& (vcpu
->arch
.fault_dsisr
& DSISR_ISSTORE
))
686 /* Resolve real address if translation turned on */
688 page_found
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, &pte
, data
, iswrite
);
690 pte
.may_execute
= true;
692 pte
.may_write
= true;
693 pte
.raddr
= eaddr
& KVM_PAM
;
695 pte
.vpage
= eaddr
>> 12;
696 pte
.page_size
= MMU_PAGE_64K
;
700 switch (kvmppc_get_msr(vcpu
) & (MSR_DR
|MSR_IR
)) {
702 pte
.vpage
|= ((u64
)VSID_REAL
<< (SID_SHIFT
- 12));
706 (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
707 ((pte
.raddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
708 pte
.raddr
&= ~SPLIT_HACK_MASK
;
711 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, eaddr
>> SID_SHIFT
, &vsid
);
713 if ((kvmppc_get_msr(vcpu
) & (MSR_DR
|MSR_IR
)) == MSR_DR
)
714 pte
.vpage
|= ((u64
)VSID_REAL_DR
<< (SID_SHIFT
- 12));
716 pte
.vpage
|= ((u64
)VSID_REAL_IR
<< (SID_SHIFT
- 12));
720 page_found
= -EINVAL
;
724 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
725 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
727 * If we do the dcbz hack, we have to NX on every execution,
728 * so we can patch the executing code. This renders our guest
731 pte
.may_execute
= !data
;
734 if (page_found
== -ENOENT
|| page_found
== -EPERM
) {
735 /* Page not found in guest PTE entries, or protection fault */
738 if (page_found
== -EPERM
)
739 flags
= DSISR_PROTFAULT
;
741 flags
= DSISR_NOHPTE
;
743 flags
|= vcpu
->arch
.fault_dsisr
& DSISR_ISSTORE
;
744 kvmppc_core_queue_data_storage(vcpu
, eaddr
, flags
);
746 kvmppc_core_queue_inst_storage(vcpu
, flags
);
748 } else if (page_found
== -EINVAL
) {
749 /* Page not found in guest SLB */
750 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
751 kvmppc_book3s_queue_irqprio(vcpu
, vec
+ 0x80);
752 } else if (kvmppc_visible_gpa(vcpu
, pte
.raddr
)) {
753 if (data
&& !(vcpu
->arch
.fault_dsisr
& DSISR_NOHPTE
)) {
755 * There is already a host HPTE there, presumably
756 * a read-only one for a page the guest thinks
757 * is writable, so get rid of it first.
759 kvmppc_mmu_unmap_page(vcpu
, &pte
);
761 /* The guest's PTE is not mapped yet. Map on the host */
762 if (kvmppc_mmu_map_page(vcpu
, &pte
, iswrite
) == -EIO
) {
763 /* Exit KVM if mapping failed */
764 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
768 vcpu
->stat
.sp_storage
++;
769 else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
770 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
)))
771 kvmppc_patch_dcbz(vcpu
, &pte
);
774 vcpu
->stat
.mmio_exits
++;
775 vcpu
->arch
.paddr_accessed
= pte
.raddr
;
776 vcpu
->arch
.vaddr_accessed
= pte
.eaddr
;
777 r
= kvmppc_emulate_mmio(run
, vcpu
);
778 if ( r
== RESUME_HOST_NV
)
785 /* Give up external provider (FPU, Altivec, VSX) */
786 void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
)
788 struct thread_struct
*t
= ¤t
->thread
;
791 * VSX instructions can access FP and vector registers, so if
792 * we are giving up VSX, make sure we give up FP and VMX as well.
795 msr
|= MSR_FP
| MSR_VEC
;
797 msr
&= vcpu
->arch
.guest_owned_ext
;
802 printk(KERN_INFO
"Giving up ext 0x%lx\n", msr
);
807 * Note that on CPUs with VSX, giveup_fpu stores
808 * both the traditional FP registers and the added VSX
809 * registers into thread.fp_state.fpr[].
811 if (t
->regs
->msr
& MSR_FP
)
813 t
->fp_save_area
= NULL
;
816 #ifdef CONFIG_ALTIVEC
818 if (current
->thread
.regs
->msr
& MSR_VEC
)
819 giveup_altivec(current
);
820 t
->vr_save_area
= NULL
;
824 vcpu
->arch
.guest_owned_ext
&= ~(msr
| MSR_VSX
);
825 kvmppc_recalc_shadow_msr(vcpu
);
828 /* Give up facility (TAR / EBB / DSCR) */
829 void kvmppc_giveup_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
831 #ifdef CONFIG_PPC_BOOK3S_64
832 if (!(vcpu
->arch
.shadow_fscr
& (1ULL << fac
))) {
833 /* Facility not available to the guest, ignore giveup request*/
839 vcpu
->arch
.tar
= mfspr(SPRN_TAR
);
840 mtspr(SPRN_TAR
, current
->thread
.tar
);
841 vcpu
->arch
.shadow_fscr
&= ~FSCR_TAR
;
847 /* Handle external providers (FPU, Altivec, VSX) */
848 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
851 struct thread_struct
*t
= ¤t
->thread
;
853 /* When we have paired singles, we emulate in software */
854 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
)
857 if (!(kvmppc_get_msr(vcpu
) & msr
)) {
858 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
862 if (msr
== MSR_VSX
) {
863 /* No VSX? Give an illegal instruction interrupt */
865 if (!cpu_has_feature(CPU_FTR_VSX
))
868 kvmppc_core_queue_program(vcpu
, SRR1_PROGILL
);
873 * We have to load up all the FP and VMX registers before
874 * we can let the guest use VSX instructions.
876 msr
= MSR_FP
| MSR_VEC
| MSR_VSX
;
879 /* See if we already own all the ext(s) needed */
880 msr
&= ~vcpu
->arch
.guest_owned_ext
;
885 printk(KERN_INFO
"Loading up ext 0x%lx\n", msr
);
891 load_fp_state(&vcpu
->arch
.fp
);
893 t
->fp_save_area
= &vcpu
->arch
.fp
;
898 #ifdef CONFIG_ALTIVEC
900 enable_kernel_altivec();
901 load_vr_state(&vcpu
->arch
.vr
);
902 disable_kernel_altivec();
903 t
->vr_save_area
= &vcpu
->arch
.vr
;
909 vcpu
->arch
.guest_owned_ext
|= msr
;
910 kvmppc_recalc_shadow_msr(vcpu
);
916 * Kernel code using FP or VMX could have flushed guest state to
917 * the thread_struct; if so, get it back now.
919 static void kvmppc_handle_lost_ext(struct kvm_vcpu
*vcpu
)
921 unsigned long lost_ext
;
923 lost_ext
= vcpu
->arch
.guest_owned_ext
& ~current
->thread
.regs
->msr
;
927 if (lost_ext
& MSR_FP
) {
930 load_fp_state(&vcpu
->arch
.fp
);
934 #ifdef CONFIG_ALTIVEC
935 if (lost_ext
& MSR_VEC
) {
937 enable_kernel_altivec();
938 load_vr_state(&vcpu
->arch
.vr
);
939 disable_kernel_altivec();
943 current
->thread
.regs
->msr
|= lost_ext
;
946 #ifdef CONFIG_PPC_BOOK3S_64
948 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu
*vcpu
, ulong fac
)
950 /* Inject the Interrupt Cause field and trigger a guest interrupt */
951 vcpu
->arch
.fscr
&= ~(0xffULL
<< 56);
952 vcpu
->arch
.fscr
|= (fac
<< 56);
953 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_FAC_UNAVAIL
);
956 static void kvmppc_emulate_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
958 enum emulation_result er
= EMULATE_FAIL
;
960 if (!(kvmppc_get_msr(vcpu
) & MSR_PR
))
961 er
= kvmppc_emulate_instruction(vcpu
->run
, vcpu
);
963 if ((er
!= EMULATE_DONE
) && (er
!= EMULATE_AGAIN
)) {
964 /* Couldn't emulate, trigger interrupt in guest */
965 kvmppc_trigger_fac_interrupt(vcpu
, fac
);
969 /* Enable facilities (TAR, EBB, DSCR) for the guest */
970 static int kvmppc_handle_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
972 bool guest_fac_enabled
;
973 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S
));
976 * Not every facility is enabled by FSCR bits, check whether the
977 * guest has this facility enabled at all.
982 guest_fac_enabled
= (vcpu
->arch
.fscr
& (1ULL << fac
));
985 guest_fac_enabled
= kvmppc_get_msr(vcpu
) & MSR_TM
;
988 guest_fac_enabled
= false;
992 if (!guest_fac_enabled
) {
993 /* Facility not enabled by the guest */
994 kvmppc_trigger_fac_interrupt(vcpu
, fac
);
1000 /* TAR switching isn't lazy in Linux yet */
1001 current
->thread
.tar
= mfspr(SPRN_TAR
);
1002 mtspr(SPRN_TAR
, vcpu
->arch
.tar
);
1003 vcpu
->arch
.shadow_fscr
|= FSCR_TAR
;
1006 kvmppc_emulate_fac(vcpu
, fac
);
1010 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1011 /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1012 * for TM spr can trigger TM fac unavailable. In this case, the
1013 * emulation is handled by kvmppc_emulate_fac(), which invokes
1014 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1015 * RT for NV registers. So it need to restore those NV reg to reflect
1018 if ((fac
== FSCR_TM_LG
) && !(kvmppc_get_msr(vcpu
) & MSR_PR
))
1019 return RESUME_GUEST_NV
;
1022 return RESUME_GUEST
;
1025 void kvmppc_set_fscr(struct kvm_vcpu
*vcpu
, u64 fscr
)
1027 if ((vcpu
->arch
.fscr
& FSCR_TAR
) && !(fscr
& FSCR_TAR
)) {
1028 /* TAR got dropped, drop it in shadow too */
1029 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
1030 } else if (!(vcpu
->arch
.fscr
& FSCR_TAR
) && (fscr
& FSCR_TAR
)) {
1031 vcpu
->arch
.fscr
= fscr
;
1032 kvmppc_handle_fac(vcpu
, FSCR_TAR_LG
);
1036 vcpu
->arch
.fscr
= fscr
;
1040 static void kvmppc_setup_debug(struct kvm_vcpu
*vcpu
)
1042 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
1043 u64 msr
= kvmppc_get_msr(vcpu
);
1045 kvmppc_set_msr(vcpu
, msr
| MSR_SE
);
1049 static void kvmppc_clear_debug(struct kvm_vcpu
*vcpu
)
1051 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
1052 u64 msr
= kvmppc_get_msr(vcpu
);
1054 kvmppc_set_msr(vcpu
, msr
& ~MSR_SE
);
1058 static int kvmppc_exit_pr_progint(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1059 unsigned int exit_nr
)
1061 enum emulation_result er
;
1067 * shadow_srr1 only contains valid flags if we came here via a program
1068 * exception. The other exceptions (emulation assist, FP unavailable,
1069 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1070 * exception when injecting a program interrupt into the guest.
1072 if (exit_nr
== BOOK3S_INTERRUPT_PROGRAM
)
1073 flags
= vcpu
->arch
.shadow_srr1
& 0x1f0000ull
;
1075 flags
= SRR1_PROGILL
;
1077 emul
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
1078 if (emul
!= EMULATE_DONE
)
1079 return RESUME_GUEST
;
1081 if (kvmppc_get_msr(vcpu
) & MSR_PR
) {
1083 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1084 kvmppc_get_pc(vcpu
), last_inst
);
1086 if ((last_inst
& 0xff0007ff) != (INS_DCBZ
& 0xfffffff7)) {
1087 kvmppc_core_queue_program(vcpu
, flags
);
1088 return RESUME_GUEST
;
1092 vcpu
->stat
.emulated_inst_exits
++;
1093 er
= kvmppc_emulate_instruction(run
, vcpu
);
1096 r
= RESUME_GUEST_NV
;
1102 pr_crit("%s: emulation at %lx failed (%08x)\n",
1103 __func__
, kvmppc_get_pc(vcpu
), last_inst
);
1104 kvmppc_core_queue_program(vcpu
, flags
);
1107 case EMULATE_DO_MMIO
:
1108 run
->exit_reason
= KVM_EXIT_MMIO
;
1111 case EMULATE_EXIT_USER
:
1121 int kvmppc_handle_exit_pr(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1122 unsigned int exit_nr
)
1124 int r
= RESUME_HOST
;
1127 vcpu
->stat
.sum_exits
++;
1129 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1130 run
->ready_for_interrupt_injection
= 1;
1132 /* We get here with MSR.EE=1 */
1134 trace_kvm_exit(exit_nr
, vcpu
);
1138 case BOOK3S_INTERRUPT_INST_STORAGE
:
1140 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
1141 vcpu
->stat
.pf_instruc
++;
1143 if (kvmppc_is_split_real(vcpu
))
1144 kvmppc_fixup_split_real(vcpu
);
1146 #ifdef CONFIG_PPC_BOOK3S_32
1147 /* We set segments as unused segments when invalidating them. So
1148 * treat the respective fault as segment fault. */
1150 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
1153 svcpu
= svcpu_get(vcpu
);
1154 sr
= svcpu
->sr
[kvmppc_get_pc(vcpu
) >> SID_SHIFT
];
1156 if (sr
== SR_INVALID
) {
1157 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
1164 /* only care about PTEG not found errors, but leave NX alone */
1165 if (shadow_srr1
& 0x40000000) {
1166 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1167 r
= kvmppc_handle_pagefault(run
, vcpu
, kvmppc_get_pc(vcpu
), exit_nr
);
1168 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1169 vcpu
->stat
.sp_instruc
++;
1170 } else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
1171 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
1173 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1174 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1175 * that no guest that needs the dcbz hack does NX.
1177 kvmppc_mmu_pte_flush(vcpu
, kvmppc_get_pc(vcpu
), ~0xFFFUL
);
1180 kvmppc_core_queue_inst_storage(vcpu
,
1181 shadow_srr1
& 0x58000000);
1186 case BOOK3S_INTERRUPT_DATA_STORAGE
:
1188 ulong dar
= kvmppc_get_fault_dar(vcpu
);
1189 u32 fault_dsisr
= vcpu
->arch
.fault_dsisr
;
1190 vcpu
->stat
.pf_storage
++;
1192 #ifdef CONFIG_PPC_BOOK3S_32
1193 /* We set segments as unused segments when invalidating them. So
1194 * treat the respective fault as segment fault. */
1196 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
1199 svcpu
= svcpu_get(vcpu
);
1200 sr
= svcpu
->sr
[dar
>> SID_SHIFT
];
1202 if (sr
== SR_INVALID
) {
1203 kvmppc_mmu_map_segment(vcpu
, dar
);
1211 * We need to handle missing shadow PTEs, and
1212 * protection faults due to us mapping a page read-only
1213 * when the guest thinks it is writable.
1215 if (fault_dsisr
& (DSISR_NOHPTE
| DSISR_PROTFAULT
)) {
1216 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1217 r
= kvmppc_handle_pagefault(run
, vcpu
, dar
, exit_nr
);
1218 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1220 kvmppc_core_queue_data_storage(vcpu
, dar
, fault_dsisr
);
1225 case BOOK3S_INTERRUPT_DATA_SEGMENT
:
1226 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_fault_dar(vcpu
)) < 0) {
1227 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
1228 kvmppc_book3s_queue_irqprio(vcpu
,
1229 BOOK3S_INTERRUPT_DATA_SEGMENT
);
1233 case BOOK3S_INTERRUPT_INST_SEGMENT
:
1234 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
)) < 0) {
1235 kvmppc_book3s_queue_irqprio(vcpu
,
1236 BOOK3S_INTERRUPT_INST_SEGMENT
);
1240 /* We're good on these - the host merely wanted to get our attention */
1241 case BOOK3S_INTERRUPT_DECREMENTER
:
1242 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
1243 case BOOK3S_INTERRUPT_DOORBELL
:
1244 case BOOK3S_INTERRUPT_H_DOORBELL
:
1245 vcpu
->stat
.dec_exits
++;
1248 case BOOK3S_INTERRUPT_EXTERNAL
:
1249 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL
:
1250 case BOOK3S_INTERRUPT_EXTERNAL_HV
:
1251 case BOOK3S_INTERRUPT_H_VIRT
:
1252 vcpu
->stat
.ext_intr_exits
++;
1255 case BOOK3S_INTERRUPT_HMI
:
1256 case BOOK3S_INTERRUPT_PERFMON
:
1257 case BOOK3S_INTERRUPT_SYSTEM_RESET
:
1260 case BOOK3S_INTERRUPT_PROGRAM
:
1261 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
1262 r
= kvmppc_exit_pr_progint(run
, vcpu
, exit_nr
);
1264 case BOOK3S_INTERRUPT_SYSCALL
:
1269 /* Get last sc for papr */
1270 if (vcpu
->arch
.papr_enabled
) {
1271 /* The sc instuction points SRR0 to the next inst */
1272 emul
= kvmppc_get_last_inst(vcpu
, INST_SC
, &last_sc
);
1273 if (emul
!= EMULATE_DONE
) {
1274 kvmppc_set_pc(vcpu
, kvmppc_get_pc(vcpu
) - 4);
1280 if (vcpu
->arch
.papr_enabled
&&
1281 (last_sc
== 0x44000022) &&
1282 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
1283 /* SC 1 papr hypercalls */
1284 ulong cmd
= kvmppc_get_gpr(vcpu
, 3);
1287 #ifdef CONFIG_PPC_BOOK3S_64
1288 if (kvmppc_h_pr(vcpu
, cmd
) == EMULATE_DONE
) {
1294 run
->papr_hcall
.nr
= cmd
;
1295 for (i
= 0; i
< 9; ++i
) {
1296 ulong gpr
= kvmppc_get_gpr(vcpu
, 4 + i
);
1297 run
->papr_hcall
.args
[i
] = gpr
;
1299 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
1300 vcpu
->arch
.hcall_needed
= 1;
1302 } else if (vcpu
->arch
.osi_enabled
&&
1303 (((u32
)kvmppc_get_gpr(vcpu
, 3)) == OSI_SC_MAGIC_R3
) &&
1304 (((u32
)kvmppc_get_gpr(vcpu
, 4)) == OSI_SC_MAGIC_R4
)) {
1305 /* MOL hypercalls */
1306 u64
*gprs
= run
->osi
.gprs
;
1309 run
->exit_reason
= KVM_EXIT_OSI
;
1310 for (i
= 0; i
< 32; i
++)
1311 gprs
[i
] = kvmppc_get_gpr(vcpu
, i
);
1312 vcpu
->arch
.osi_needed
= 1;
1314 } else if (!(kvmppc_get_msr(vcpu
) & MSR_PR
) &&
1315 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
1316 /* KVM PV hypercalls */
1317 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
1320 /* Guest syscalls */
1321 vcpu
->stat
.syscall_exits
++;
1322 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1327 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
1328 case BOOK3S_INTERRUPT_ALTIVEC
:
1329 case BOOK3S_INTERRUPT_VSX
:
1335 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
) {
1336 /* Do paired single instruction emulation */
1337 emul
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
,
1339 if (emul
== EMULATE_DONE
)
1340 r
= kvmppc_exit_pr_progint(run
, vcpu
, exit_nr
);
1347 /* Enable external provider */
1349 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
1353 case BOOK3S_INTERRUPT_ALTIVEC
:
1357 case BOOK3S_INTERRUPT_VSX
:
1362 r
= kvmppc_handle_ext(vcpu
, exit_nr
, ext_msr
);
1365 case BOOK3S_INTERRUPT_ALIGNMENT
:
1368 int emul
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
1370 if (emul
== EMULATE_DONE
) {
1374 dsisr
= kvmppc_alignment_dsisr(vcpu
, last_inst
);
1375 dar
= kvmppc_alignment_dar(vcpu
, last_inst
);
1377 kvmppc_set_dsisr(vcpu
, dsisr
);
1378 kvmppc_set_dar(vcpu
, dar
);
1380 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1385 #ifdef CONFIG_PPC_BOOK3S_64
1386 case BOOK3S_INTERRUPT_FAC_UNAVAIL
:
1387 r
= kvmppc_handle_fac(vcpu
, vcpu
->arch
.shadow_fscr
>> 56);
1390 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
1391 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1394 case BOOK3S_INTERRUPT_TRACE
:
1395 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
1396 run
->exit_reason
= KVM_EXIT_DEBUG
;
1399 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1405 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
1406 /* Ugh - bork here! What did we get? */
1407 printk(KERN_EMERG
"exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1408 exit_nr
, kvmppc_get_pc(vcpu
), shadow_srr1
);
1415 if (!(r
& RESUME_HOST
)) {
1416 /* To avoid clobbering exit_reason, only check for signals if
1417 * we aren't already exiting to userspace for some other
1421 * Interrupts could be timers for the guest which we have to
1422 * inject again, so let's postpone them until we're in the guest
1423 * and if we really did time things so badly, then we just exit
1424 * again due to a host external interrupt.
1426 s
= kvmppc_prepare_to_enter(vcpu
);
1430 /* interrupts now hard-disabled */
1431 kvmppc_fix_ee_before_entry();
1434 kvmppc_handle_lost_ext(vcpu
);
1437 trace_kvm_book3s_reenter(r
, vcpu
);
1442 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu
*vcpu
,
1443 struct kvm_sregs
*sregs
)
1445 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1448 sregs
->pvr
= vcpu
->arch
.pvr
;
1450 sregs
->u
.s
.sdr1
= to_book3s(vcpu
)->sdr1
;
1451 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1452 for (i
= 0; i
< 64; i
++) {
1453 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
| i
;
1454 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
1457 for (i
= 0; i
< 16; i
++)
1458 sregs
->u
.s
.ppc32
.sr
[i
] = kvmppc_get_sr(vcpu
, i
);
1460 for (i
= 0; i
< 8; i
++) {
1461 sregs
->u
.s
.ppc32
.ibat
[i
] = vcpu3s
->ibat
[i
].raw
;
1462 sregs
->u
.s
.ppc32
.dbat
[i
] = vcpu3s
->dbat
[i
].raw
;
1469 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu
*vcpu
,
1470 struct kvm_sregs
*sregs
)
1472 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1475 kvmppc_set_pvr_pr(vcpu
, sregs
->pvr
);
1477 vcpu3s
->sdr1
= sregs
->u
.s
.sdr1
;
1478 #ifdef CONFIG_PPC_BOOK3S_64
1479 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1480 /* Flush all SLB entries */
1481 vcpu
->arch
.mmu
.slbmte(vcpu
, 0, 0);
1482 vcpu
->arch
.mmu
.slbia(vcpu
);
1484 for (i
= 0; i
< 64; i
++) {
1485 u64 rb
= sregs
->u
.s
.ppc64
.slb
[i
].slbe
;
1486 u64 rs
= sregs
->u
.s
.ppc64
.slb
[i
].slbv
;
1488 if (rb
& SLB_ESID_V
)
1489 vcpu
->arch
.mmu
.slbmte(vcpu
, rs
, rb
);
1494 for (i
= 0; i
< 16; i
++) {
1495 vcpu
->arch
.mmu
.mtsrin(vcpu
, i
, sregs
->u
.s
.ppc32
.sr
[i
]);
1497 for (i
= 0; i
< 8; i
++) {
1498 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), false,
1499 (u32
)sregs
->u
.s
.ppc32
.ibat
[i
]);
1500 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), true,
1501 (u32
)(sregs
->u
.s
.ppc32
.ibat
[i
] >> 32));
1502 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), false,
1503 (u32
)sregs
->u
.s
.ppc32
.dbat
[i
]);
1504 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), true,
1505 (u32
)(sregs
->u
.s
.ppc32
.dbat
[i
] >> 32));
1509 /* Flush the MMU after messing with the segments */
1510 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
1515 static int kvmppc_get_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1516 union kvmppc_one_reg
*val
)
1521 case KVM_REG_PPC_DEBUG_INST
:
1522 *val
= get_reg_val(id
, KVMPPC_INST_SW_BREAKPOINT
);
1524 case KVM_REG_PPC_HIOR
:
1525 *val
= get_reg_val(id
, to_book3s(vcpu
)->hior
);
1527 case KVM_REG_PPC_VTB
:
1528 *val
= get_reg_val(id
, to_book3s(vcpu
)->vtb
);
1530 case KVM_REG_PPC_LPCR
:
1531 case KVM_REG_PPC_LPCR_64
:
1533 * We are only interested in the LPCR_ILE bit
1535 if (vcpu
->arch
.intr_msr
& MSR_LE
)
1536 *val
= get_reg_val(id
, LPCR_ILE
);
1538 *val
= get_reg_val(id
, 0);
1540 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1541 case KVM_REG_PPC_TFHAR
:
1542 *val
= get_reg_val(id
, vcpu
->arch
.tfhar
);
1544 case KVM_REG_PPC_TFIAR
:
1545 *val
= get_reg_val(id
, vcpu
->arch
.tfiar
);
1547 case KVM_REG_PPC_TEXASR
:
1548 *val
= get_reg_val(id
, vcpu
->arch
.texasr
);
1550 case KVM_REG_PPC_TM_GPR0
... KVM_REG_PPC_TM_GPR31
:
1551 *val
= get_reg_val(id
,
1552 vcpu
->arch
.gpr_tm
[id
-KVM_REG_PPC_TM_GPR0
]);
1554 case KVM_REG_PPC_TM_VSR0
... KVM_REG_PPC_TM_VSR63
:
1558 i
= id
- KVM_REG_PPC_TM_VSR0
;
1560 for (j
= 0; j
< TS_FPRWIDTH
; j
++)
1561 val
->vsxval
[j
] = vcpu
->arch
.fp_tm
.fpr
[i
][j
];
1563 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1564 val
->vval
= vcpu
->arch
.vr_tm
.vr
[i
-32];
1570 case KVM_REG_PPC_TM_CR
:
1571 *val
= get_reg_val(id
, vcpu
->arch
.cr_tm
);
1573 case KVM_REG_PPC_TM_XER
:
1574 *val
= get_reg_val(id
, vcpu
->arch
.xer_tm
);
1576 case KVM_REG_PPC_TM_LR
:
1577 *val
= get_reg_val(id
, vcpu
->arch
.lr_tm
);
1579 case KVM_REG_PPC_TM_CTR
:
1580 *val
= get_reg_val(id
, vcpu
->arch
.ctr_tm
);
1582 case KVM_REG_PPC_TM_FPSCR
:
1583 *val
= get_reg_val(id
, vcpu
->arch
.fp_tm
.fpscr
);
1585 case KVM_REG_PPC_TM_AMR
:
1586 *val
= get_reg_val(id
, vcpu
->arch
.amr_tm
);
1588 case KVM_REG_PPC_TM_PPR
:
1589 *val
= get_reg_val(id
, vcpu
->arch
.ppr_tm
);
1591 case KVM_REG_PPC_TM_VRSAVE
:
1592 *val
= get_reg_val(id
, vcpu
->arch
.vrsave_tm
);
1594 case KVM_REG_PPC_TM_VSCR
:
1595 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1596 *val
= get_reg_val(id
, vcpu
->arch
.vr_tm
.vscr
.u
[3]);
1600 case KVM_REG_PPC_TM_DSCR
:
1601 *val
= get_reg_val(id
, vcpu
->arch
.dscr_tm
);
1603 case KVM_REG_PPC_TM_TAR
:
1604 *val
= get_reg_val(id
, vcpu
->arch
.tar_tm
);
1615 static void kvmppc_set_lpcr_pr(struct kvm_vcpu
*vcpu
, u64 new_lpcr
)
1617 if (new_lpcr
& LPCR_ILE
)
1618 vcpu
->arch
.intr_msr
|= MSR_LE
;
1620 vcpu
->arch
.intr_msr
&= ~MSR_LE
;
1623 static int kvmppc_set_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1624 union kvmppc_one_reg
*val
)
1629 case KVM_REG_PPC_HIOR
:
1630 to_book3s(vcpu
)->hior
= set_reg_val(id
, *val
);
1631 to_book3s(vcpu
)->hior_explicit
= true;
1633 case KVM_REG_PPC_VTB
:
1634 to_book3s(vcpu
)->vtb
= set_reg_val(id
, *val
);
1636 case KVM_REG_PPC_LPCR
:
1637 case KVM_REG_PPC_LPCR_64
:
1638 kvmppc_set_lpcr_pr(vcpu
, set_reg_val(id
, *val
));
1640 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1641 case KVM_REG_PPC_TFHAR
:
1642 vcpu
->arch
.tfhar
= set_reg_val(id
, *val
);
1644 case KVM_REG_PPC_TFIAR
:
1645 vcpu
->arch
.tfiar
= set_reg_val(id
, *val
);
1647 case KVM_REG_PPC_TEXASR
:
1648 vcpu
->arch
.texasr
= set_reg_val(id
, *val
);
1650 case KVM_REG_PPC_TM_GPR0
... KVM_REG_PPC_TM_GPR31
:
1651 vcpu
->arch
.gpr_tm
[id
- KVM_REG_PPC_TM_GPR0
] =
1652 set_reg_val(id
, *val
);
1654 case KVM_REG_PPC_TM_VSR0
... KVM_REG_PPC_TM_VSR63
:
1658 i
= id
- KVM_REG_PPC_TM_VSR0
;
1660 for (j
= 0; j
< TS_FPRWIDTH
; j
++)
1661 vcpu
->arch
.fp_tm
.fpr
[i
][j
] = val
->vsxval
[j
];
1663 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1664 vcpu
->arch
.vr_tm
.vr
[i
-32] = val
->vval
;
1669 case KVM_REG_PPC_TM_CR
:
1670 vcpu
->arch
.cr_tm
= set_reg_val(id
, *val
);
1672 case KVM_REG_PPC_TM_XER
:
1673 vcpu
->arch
.xer_tm
= set_reg_val(id
, *val
);
1675 case KVM_REG_PPC_TM_LR
:
1676 vcpu
->arch
.lr_tm
= set_reg_val(id
, *val
);
1678 case KVM_REG_PPC_TM_CTR
:
1679 vcpu
->arch
.ctr_tm
= set_reg_val(id
, *val
);
1681 case KVM_REG_PPC_TM_FPSCR
:
1682 vcpu
->arch
.fp_tm
.fpscr
= set_reg_val(id
, *val
);
1684 case KVM_REG_PPC_TM_AMR
:
1685 vcpu
->arch
.amr_tm
= set_reg_val(id
, *val
);
1687 case KVM_REG_PPC_TM_PPR
:
1688 vcpu
->arch
.ppr_tm
= set_reg_val(id
, *val
);
1690 case KVM_REG_PPC_TM_VRSAVE
:
1691 vcpu
->arch
.vrsave_tm
= set_reg_val(id
, *val
);
1693 case KVM_REG_PPC_TM_VSCR
:
1694 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1695 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(id
, *val
);
1699 case KVM_REG_PPC_TM_DSCR
:
1700 vcpu
->arch
.dscr_tm
= set_reg_val(id
, *val
);
1702 case KVM_REG_PPC_TM_TAR
:
1703 vcpu
->arch
.tar_tm
= set_reg_val(id
, *val
);
1714 static struct kvm_vcpu
*kvmppc_core_vcpu_create_pr(struct kvm
*kvm
,
1717 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
1718 struct kvm_vcpu
*vcpu
;
1722 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1726 vcpu_book3s
= vzalloc(sizeof(struct kvmppc_vcpu_book3s
));
1729 vcpu
->arch
.book3s
= vcpu_book3s
;
1731 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1732 vcpu
->arch
.shadow_vcpu
=
1733 kzalloc(sizeof(*vcpu
->arch
.shadow_vcpu
), GFP_KERNEL
);
1734 if (!vcpu
->arch
.shadow_vcpu
)
1738 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
1740 goto free_shadow_vcpu
;
1743 p
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
1746 vcpu
->arch
.shared
= (void *)p
;
1747 #ifdef CONFIG_PPC_BOOK3S_64
1748 /* Always start the shared struct in native endian mode */
1749 #ifdef __BIG_ENDIAN__
1750 vcpu
->arch
.shared_big_endian
= true;
1752 vcpu
->arch
.shared_big_endian
= false;
1756 * Default to the same as the host if we're on sufficiently
1757 * recent machine that we have 1TB segments;
1758 * otherwise default to PPC970FX.
1760 vcpu
->arch
.pvr
= 0x3C0301;
1761 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1762 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
1763 vcpu
->arch
.intr_msr
= MSR_SF
;
1765 /* default to book3s_32 (750) */
1766 vcpu
->arch
.pvr
= 0x84202;
1768 kvmppc_set_pvr_pr(vcpu
, vcpu
->arch
.pvr
);
1769 vcpu
->arch
.slb_nr
= 64;
1771 vcpu
->arch
.shadow_msr
= MSR_USER64
& ~MSR_LE
;
1773 err
= kvmppc_mmu_init(vcpu
);
1780 kvm_vcpu_uninit(vcpu
);
1782 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1783 kfree(vcpu
->arch
.shadow_vcpu
);
1788 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1790 return ERR_PTR(err
);
1793 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu
*vcpu
)
1795 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
1797 free_page((unsigned long)vcpu
->arch
.shared
& PAGE_MASK
);
1798 kvm_vcpu_uninit(vcpu
);
1799 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1800 kfree(vcpu
->arch
.shadow_vcpu
);
1803 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1806 static int kvmppc_vcpu_run_pr(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1809 #ifdef CONFIG_ALTIVEC
1810 unsigned long uninitialized_var(vrsave
);
1813 /* Check if we can run the vcpu at all */
1814 if (!vcpu
->arch
.sane
) {
1815 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1820 kvmppc_setup_debug(vcpu
);
1823 * Interrupts could be timers for the guest which we have to inject
1824 * again, so let's postpone them until we're in the guest and if we
1825 * really did time things so badly, then we just exit again due to
1826 * a host external interrupt.
1828 ret
= kvmppc_prepare_to_enter(vcpu
);
1831 /* interrupts now hard-disabled */
1833 /* Save FPU, Altivec and VSX state */
1834 giveup_all(current
);
1836 /* Preload FPU if it's enabled */
1837 if (kvmppc_get_msr(vcpu
) & MSR_FP
)
1838 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
1840 kvmppc_fix_ee_before_entry();
1842 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
1844 kvmppc_clear_debug(vcpu
);
1846 /* No need for guest_exit. It's done in handle_exit.
1847 We also get here with interrupts enabled. */
1849 /* Make sure we save the guest FPU/Altivec/VSX state */
1850 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
1852 /* Make sure we save the guest TAR/EBB/DSCR state */
1853 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
1856 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1861 * Get (and clear) the dirty memory log for a memory slot.
1863 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm
*kvm
,
1864 struct kvm_dirty_log
*log
)
1866 struct kvm_memslots
*slots
;
1867 struct kvm_memory_slot
*memslot
;
1868 struct kvm_vcpu
*vcpu
;
1874 mutex_lock(&kvm
->slots_lock
);
1876 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1880 /* If nothing is dirty, don't bother messing with page tables. */
1882 slots
= kvm_memslots(kvm
);
1883 memslot
= id_to_memslot(slots
, log
->slot
);
1885 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
1886 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
1888 kvm_for_each_vcpu(n
, vcpu
, kvm
)
1889 kvmppc_mmu_pte_pflush(vcpu
, ga
, ga_end
);
1891 n
= kvm_dirty_bitmap_bytes(memslot
);
1892 memset(memslot
->dirty_bitmap
, 0, n
);
1897 mutex_unlock(&kvm
->slots_lock
);
1901 static void kvmppc_core_flush_memslot_pr(struct kvm
*kvm
,
1902 struct kvm_memory_slot
*memslot
)
1907 static int kvmppc_core_prepare_memory_region_pr(struct kvm
*kvm
,
1908 struct kvm_memory_slot
*memslot
,
1909 const struct kvm_userspace_memory_region
*mem
)
1914 static void kvmppc_core_commit_memory_region_pr(struct kvm
*kvm
,
1915 const struct kvm_userspace_memory_region
*mem
,
1916 const struct kvm_memory_slot
*old
,
1917 const struct kvm_memory_slot
*new)
1922 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot
*free
,
1923 struct kvm_memory_slot
*dont
)
1928 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot
*slot
,
1929 unsigned long npages
)
1936 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1937 struct kvm_ppc_smmu_info
*info
)
1940 struct kvm_vcpu
*vcpu
;
1944 /* SLB is always 64 entries */
1945 info
->slb_size
= 64;
1947 /* Standard 4k base page size segment */
1948 info
->sps
[0].page_shift
= 12;
1949 info
->sps
[0].slb_enc
= 0;
1950 info
->sps
[0].enc
[0].page_shift
= 12;
1951 info
->sps
[0].enc
[0].pte_enc
= 0;
1954 * 64k large page size.
1955 * We only want to put this in if the CPUs we're emulating
1956 * support it, but unfortunately we don't have a vcpu easily
1957 * to hand here to test. Just pick the first vcpu, and if
1958 * that doesn't exist yet, report the minimum capability,
1959 * i.e., no 64k pages.
1960 * 1T segment support goes along with 64k pages.
1963 vcpu
= kvm_get_vcpu(kvm
, 0);
1964 if (vcpu
&& (vcpu
->arch
.hflags
& BOOK3S_HFLAG_MULTI_PGSIZE
)) {
1965 info
->flags
= KVM_PPC_1T_SEGMENTS
;
1966 info
->sps
[i
].page_shift
= 16;
1967 info
->sps
[i
].slb_enc
= SLB_VSID_L
| SLB_VSID_LP_01
;
1968 info
->sps
[i
].enc
[0].page_shift
= 16;
1969 info
->sps
[i
].enc
[0].pte_enc
= 1;
1973 /* Standard 16M large page size segment */
1974 info
->sps
[i
].page_shift
= 24;
1975 info
->sps
[i
].slb_enc
= SLB_VSID_L
;
1976 info
->sps
[i
].enc
[0].page_shift
= 24;
1977 info
->sps
[i
].enc
[0].pte_enc
= 0;
1982 static int kvm_configure_mmu_pr(struct kvm
*kvm
, struct kvm_ppc_mmuv3_cfg
*cfg
)
1984 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
1986 /* Require flags and process table base and size to all be zero. */
1987 if (cfg
->flags
|| cfg
->process_table
)
1993 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1994 struct kvm_ppc_smmu_info
*info
)
1996 /* We should not get called */
1999 #endif /* CONFIG_PPC64 */
2001 static unsigned int kvm_global_user_count
= 0;
2002 static DEFINE_SPINLOCK(kvm_global_user_count_lock
);
2004 static int kvmppc_core_init_vm_pr(struct kvm
*kvm
)
2006 mutex_init(&kvm
->arch
.hpt_mutex
);
2008 #ifdef CONFIG_PPC_BOOK3S_64
2009 /* Start out with the default set of hcalls enabled */
2010 kvmppc_pr_init_default_hcalls(kvm
);
2013 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
2014 spin_lock(&kvm_global_user_count_lock
);
2015 if (++kvm_global_user_count
== 1)
2016 pseries_disable_reloc_on_exc();
2017 spin_unlock(&kvm_global_user_count_lock
);
2022 static void kvmppc_core_destroy_vm_pr(struct kvm
*kvm
)
2025 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
2028 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
2029 spin_lock(&kvm_global_user_count_lock
);
2030 BUG_ON(kvm_global_user_count
== 0);
2031 if (--kvm_global_user_count
== 0)
2032 pseries_enable_reloc_on_exc();
2033 spin_unlock(&kvm_global_user_count_lock
);
2037 static int kvmppc_core_check_processor_compat_pr(void)
2040 * PR KVM can work on POWER9 inside a guest partition
2041 * running in HPT mode. It can't work if we are using
2042 * radix translation (because radix provides no way for
2043 * a process to have unique translations in quadrant 3).
2045 if (cpu_has_feature(CPU_FTR_ARCH_300
) && radix_enabled())
2050 static long kvm_arch_vm_ioctl_pr(struct file
*filp
,
2051 unsigned int ioctl
, unsigned long arg
)
2056 static struct kvmppc_ops kvm_ops_pr
= {
2057 .get_sregs
= kvm_arch_vcpu_ioctl_get_sregs_pr
,
2058 .set_sregs
= kvm_arch_vcpu_ioctl_set_sregs_pr
,
2059 .get_one_reg
= kvmppc_get_one_reg_pr
,
2060 .set_one_reg
= kvmppc_set_one_reg_pr
,
2061 .vcpu_load
= kvmppc_core_vcpu_load_pr
,
2062 .vcpu_put
= kvmppc_core_vcpu_put_pr
,
2063 .set_msr
= kvmppc_set_msr_pr
,
2064 .vcpu_run
= kvmppc_vcpu_run_pr
,
2065 .vcpu_create
= kvmppc_core_vcpu_create_pr
,
2066 .vcpu_free
= kvmppc_core_vcpu_free_pr
,
2067 .check_requests
= kvmppc_core_check_requests_pr
,
2068 .get_dirty_log
= kvm_vm_ioctl_get_dirty_log_pr
,
2069 .flush_memslot
= kvmppc_core_flush_memslot_pr
,
2070 .prepare_memory_region
= kvmppc_core_prepare_memory_region_pr
,
2071 .commit_memory_region
= kvmppc_core_commit_memory_region_pr
,
2072 .unmap_hva_range
= kvm_unmap_hva_range_pr
,
2073 .age_hva
= kvm_age_hva_pr
,
2074 .test_age_hva
= kvm_test_age_hva_pr
,
2075 .set_spte_hva
= kvm_set_spte_hva_pr
,
2076 .mmu_destroy
= kvmppc_mmu_destroy_pr
,
2077 .free_memslot
= kvmppc_core_free_memslot_pr
,
2078 .create_memslot
= kvmppc_core_create_memslot_pr
,
2079 .init_vm
= kvmppc_core_init_vm_pr
,
2080 .destroy_vm
= kvmppc_core_destroy_vm_pr
,
2081 .get_smmu_info
= kvm_vm_ioctl_get_smmu_info_pr
,
2082 .emulate_op
= kvmppc_core_emulate_op_pr
,
2083 .emulate_mtspr
= kvmppc_core_emulate_mtspr_pr
,
2084 .emulate_mfspr
= kvmppc_core_emulate_mfspr_pr
,
2085 .fast_vcpu_kick
= kvm_vcpu_kick
,
2086 .arch_vm_ioctl
= kvm_arch_vm_ioctl_pr
,
2087 #ifdef CONFIG_PPC_BOOK3S_64
2088 .hcall_implemented
= kvmppc_hcall_impl_pr
,
2089 .configure_mmu
= kvm_configure_mmu_pr
,
2091 .giveup_ext
= kvmppc_giveup_ext
,
2095 int kvmppc_book3s_init_pr(void)
2099 r
= kvmppc_core_check_processor_compat_pr();
2103 kvm_ops_pr
.owner
= THIS_MODULE
;
2104 kvmppc_pr_ops
= &kvm_ops_pr
;
2106 r
= kvmppc_mmu_hpte_sysinit();
2110 void kvmppc_book3s_exit_pr(void)
2112 kvmppc_pr_ops
= NULL
;
2113 kvmppc_mmu_hpte_sysexit();
2117 * We only support separate modules for book3s 64
2119 #ifdef CONFIG_PPC_BOOK3S_64
2121 module_init(kvmppc_book3s_init_pr
);
2122 module_exit(kvmppc_book3s_exit_pr
);
2124 MODULE_LICENSE("GPL");
2125 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2126 MODULE_ALIAS("devname:kvm");