2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
43 unsigned long kvmppc_booke_handlers
;
45 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
46 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48 struct kvm_stats_debugfs_item debugfs_entries
[] = {
49 { "mmio", VCPU_STAT(mmio_exits
) },
50 { "dcr", VCPU_STAT(dcr_exits
) },
51 { "sig", VCPU_STAT(signal_exits
) },
52 { "itlb_r", VCPU_STAT(itlb_real_miss_exits
) },
53 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits
) },
54 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits
) },
55 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits
) },
56 { "sysc", VCPU_STAT(syscall_exits
) },
57 { "isi", VCPU_STAT(isi_exits
) },
58 { "dsi", VCPU_STAT(dsi_exits
) },
59 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
60 { "dec", VCPU_STAT(dec_exits
) },
61 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
63 { "doorbell", VCPU_STAT(dbell_exits
) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits
) },
68 /* TODO: use vcpu_printf() */
69 void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
)
73 printk("pc: %08lx msr: %08llx\n", vcpu
->arch
.pc
, vcpu
->arch
.shared
->msr
);
74 printk("lr: %08lx ctr: %08lx\n", vcpu
->arch
.lr
, vcpu
->arch
.ctr
);
75 printk("srr0: %08llx srr1: %08llx\n", vcpu
->arch
.shared
->srr0
,
76 vcpu
->arch
.shared
->srr1
);
78 printk("exceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
80 for (i
= 0; i
< 32; i
+= 4) {
81 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i
,
82 kvmppc_get_gpr(vcpu
, i
),
83 kvmppc_get_gpr(vcpu
, i
+1),
84 kvmppc_get_gpr(vcpu
, i
+2),
85 kvmppc_get_gpr(vcpu
, i
+3));
90 void kvmppc_vcpu_disable_spe(struct kvm_vcpu
*vcpu
)
94 kvmppc_save_guest_spe(vcpu
);
95 vcpu
->arch
.shadow_msr
&= ~MSR_SPE
;
99 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu
*vcpu
)
103 kvmppc_load_guest_spe(vcpu
);
104 vcpu
->arch
.shadow_msr
|= MSR_SPE
;
108 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu
*vcpu
)
110 if (vcpu
->arch
.shared
->msr
& MSR_SPE
) {
111 if (!(vcpu
->arch
.shadow_msr
& MSR_SPE
))
112 kvmppc_vcpu_enable_spe(vcpu
);
113 } else if (vcpu
->arch
.shadow_msr
& MSR_SPE
) {
114 kvmppc_vcpu_disable_spe(vcpu
);
118 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu
*vcpu
)
124 * Helper function for "full" MSR writes. No need to call this if only
125 * EE/CE/ME/DE/RI are changing.
127 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u32 new_msr
)
129 u32 old_msr
= vcpu
->arch
.shared
->msr
;
131 #ifdef CONFIG_KVM_BOOKE_HV
135 vcpu
->arch
.shared
->msr
= new_msr
;
137 kvmppc_mmu_msr_notify(vcpu
, old_msr
);
138 kvmppc_vcpu_sync_spe(vcpu
);
141 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu
*vcpu
,
142 unsigned int priority
)
144 set_bit(priority
, &vcpu
->arch
.pending_exceptions
);
147 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
,
148 ulong dear_flags
, ulong esr_flags
)
150 vcpu
->arch
.queued_dear
= dear_flags
;
151 vcpu
->arch
.queued_esr
= esr_flags
;
152 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DTLB_MISS
);
155 static void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
156 ulong dear_flags
, ulong esr_flags
)
158 vcpu
->arch
.queued_dear
= dear_flags
;
159 vcpu
->arch
.queued_esr
= esr_flags
;
160 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DATA_STORAGE
);
163 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
166 vcpu
->arch
.queued_esr
= esr_flags
;
167 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_INST_STORAGE
);
170 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong esr_flags
)
172 vcpu
->arch
.queued_esr
= esr_flags
;
173 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_PROGRAM
);
176 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
178 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DECREMENTER
);
181 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
183 return test_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
186 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
188 clear_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
191 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
192 struct kvm_interrupt
*irq
)
194 unsigned int prio
= BOOKE_IRQPRIO_EXTERNAL
;
196 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
197 prio
= BOOKE_IRQPRIO_EXTERNAL_LEVEL
;
199 kvmppc_booke_queue_irqprio(vcpu
, prio
);
202 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
,
203 struct kvm_interrupt
*irq
)
205 clear_bit(BOOKE_IRQPRIO_EXTERNAL
, &vcpu
->arch
.pending_exceptions
);
206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL
, &vcpu
->arch
.pending_exceptions
);
209 static void set_guest_srr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
211 #ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0
, srr0
);
213 mtspr(SPRN_GSRR1
, srr1
);
215 vcpu
->arch
.shared
->srr0
= srr0
;
216 vcpu
->arch
.shared
->srr1
= srr1
;
220 static void set_guest_csrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
222 vcpu
->arch
.csrr0
= srr0
;
223 vcpu
->arch
.csrr1
= srr1
;
226 static void set_guest_dsrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
228 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC
)) {
229 vcpu
->arch
.dsrr0
= srr0
;
230 vcpu
->arch
.dsrr1
= srr1
;
232 set_guest_csrr(vcpu
, srr0
, srr1
);
236 static void set_guest_mcsrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
238 vcpu
->arch
.mcsrr0
= srr0
;
239 vcpu
->arch
.mcsrr1
= srr1
;
242 static unsigned long get_guest_dear(struct kvm_vcpu
*vcpu
)
244 #ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR
);
247 return vcpu
->arch
.shared
->dar
;
251 static void set_guest_dear(struct kvm_vcpu
*vcpu
, unsigned long dear
)
253 #ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR
, dear
);
256 vcpu
->arch
.shared
->dar
= dear
;
260 static unsigned long get_guest_esr(struct kvm_vcpu
*vcpu
)
262 #ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR
);
265 return vcpu
->arch
.shared
->esr
;
269 static void set_guest_esr(struct kvm_vcpu
*vcpu
, u32 esr
)
271 #ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR
, esr
);
274 vcpu
->arch
.shared
->esr
= esr
;
278 /* Deliver the interrupt of the corresponding priority, if possible. */
279 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu
*vcpu
,
280 unsigned int priority
)
284 bool update_esr
= false, update_dear
= false;
285 ulong crit_raw
= vcpu
->arch
.shared
->critical
;
286 ulong crit_r1
= kvmppc_get_gpr(vcpu
, 1);
288 bool keep_irq
= false;
289 enum int_class int_class
;
291 /* Truncate crit indicators in 32 bit mode */
292 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
293 crit_raw
&= 0xffffffff;
294 crit_r1
&= 0xffffffff;
297 /* Critical section when crit == r1 */
298 crit
= (crit_raw
== crit_r1
);
299 /* ... and we're in supervisor mode */
300 crit
= crit
&& !(vcpu
->arch
.shared
->msr
& MSR_PR
);
302 if (priority
== BOOKE_IRQPRIO_EXTERNAL_LEVEL
) {
303 priority
= BOOKE_IRQPRIO_EXTERNAL
;
308 case BOOKE_IRQPRIO_DTLB_MISS
:
309 case BOOKE_IRQPRIO_DATA_STORAGE
:
312 case BOOKE_IRQPRIO_INST_STORAGE
:
313 case BOOKE_IRQPRIO_PROGRAM
:
316 case BOOKE_IRQPRIO_ITLB_MISS
:
317 case BOOKE_IRQPRIO_SYSCALL
:
318 case BOOKE_IRQPRIO_FP_UNAVAIL
:
319 case BOOKE_IRQPRIO_SPE_UNAVAIL
:
320 case BOOKE_IRQPRIO_SPE_FP_DATA
:
321 case BOOKE_IRQPRIO_SPE_FP_ROUND
:
322 case BOOKE_IRQPRIO_AP_UNAVAIL
:
323 case BOOKE_IRQPRIO_ALIGNMENT
:
325 msr_mask
= MSR_CE
| MSR_ME
| MSR_DE
;
326 int_class
= INT_CLASS_NONCRIT
;
328 case BOOKE_IRQPRIO_CRITICAL
:
329 case BOOKE_IRQPRIO_DBELL_CRIT
:
330 allowed
= vcpu
->arch
.shared
->msr
& MSR_CE
;
331 allowed
= allowed
&& !crit
;
333 int_class
= INT_CLASS_CRIT
;
335 case BOOKE_IRQPRIO_MACHINE_CHECK
:
336 allowed
= vcpu
->arch
.shared
->msr
& MSR_ME
;
337 allowed
= allowed
&& !crit
;
338 int_class
= INT_CLASS_MC
;
340 case BOOKE_IRQPRIO_DECREMENTER
:
341 case BOOKE_IRQPRIO_FIT
:
344 case BOOKE_IRQPRIO_EXTERNAL
:
345 case BOOKE_IRQPRIO_DBELL
:
346 allowed
= vcpu
->arch
.shared
->msr
& MSR_EE
;
347 allowed
= allowed
&& !crit
;
348 msr_mask
= MSR_CE
| MSR_ME
| MSR_DE
;
349 int_class
= INT_CLASS_NONCRIT
;
351 case BOOKE_IRQPRIO_DEBUG
:
352 allowed
= vcpu
->arch
.shared
->msr
& MSR_DE
;
353 allowed
= allowed
&& !crit
;
355 int_class
= INT_CLASS_CRIT
;
361 case INT_CLASS_NONCRIT
:
362 set_guest_srr(vcpu
, vcpu
->arch
.pc
,
363 vcpu
->arch
.shared
->msr
);
366 set_guest_csrr(vcpu
, vcpu
->arch
.pc
,
367 vcpu
->arch
.shared
->msr
);
370 set_guest_dsrr(vcpu
, vcpu
->arch
.pc
,
371 vcpu
->arch
.shared
->msr
);
374 set_guest_mcsrr(vcpu
, vcpu
->arch
.pc
,
375 vcpu
->arch
.shared
->msr
);
379 vcpu
->arch
.pc
= vcpu
->arch
.ivpr
| vcpu
->arch
.ivor
[priority
];
380 if (update_esr
== true)
381 set_guest_esr(vcpu
, vcpu
->arch
.queued_esr
);
382 if (update_dear
== true)
383 set_guest_dear(vcpu
, vcpu
->arch
.queued_dear
);
384 kvmppc_set_msr(vcpu
, vcpu
->arch
.shared
->msr
& msr_mask
);
387 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
390 #ifdef CONFIG_KVM_BOOKE_HV
392 * If an interrupt is pending but masked, raise a guest doorbell
393 * so that we are notified when the guest enables the relevant
396 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQMASK_EE
)
397 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_NONCRIT
);
398 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQMASK_CE
)
399 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_CRIT
);
400 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQPRIO_MACHINE_CHECK
)
401 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_MC
);
407 static void update_timer_ints(struct kvm_vcpu
*vcpu
)
409 if ((vcpu
->arch
.tcr
& TCR_DIE
) && (vcpu
->arch
.tsr
& TSR_DIS
))
410 kvmppc_core_queue_dec(vcpu
);
412 kvmppc_core_dequeue_dec(vcpu
);
415 static void kvmppc_core_check_exceptions(struct kvm_vcpu
*vcpu
)
417 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
418 unsigned int priority
;
420 if (vcpu
->requests
) {
421 if (kvm_check_request(KVM_REQ_PENDING_TIMER
, vcpu
)) {
423 update_timer_ints(vcpu
);
427 priority
= __ffs(*pending
);
428 while (priority
< BOOKE_IRQPRIO_MAX
) {
429 if (kvmppc_booke_irqprio_deliver(vcpu
, priority
))
432 priority
= find_next_bit(pending
,
433 BITS_PER_BYTE
* sizeof(*pending
),
437 /* Tell the guest about our interrupt status */
438 vcpu
->arch
.shared
->int_pending
= !!*pending
;
441 /* Check pending exceptions and deliver one, if possible. */
442 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
445 WARN_ON_ONCE(!irqs_disabled());
447 kvmppc_core_check_exceptions(vcpu
);
449 if (vcpu
->arch
.shared
->msr
& MSR_WE
) {
451 kvm_vcpu_block(vcpu
);
452 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
455 kvmppc_set_exit_type(vcpu
, EMULATED_MTMSRWE_EXITS
);
463 * Common checks before entering the guest world. Call with interrupts
466 * returns !0 if a signal is pending and check_signal is true
468 static int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
472 WARN_ON_ONCE(!irqs_disabled());
474 if (need_resched()) {
481 if (signal_pending(current
)) {
486 if (kvmppc_core_prepare_to_enter(vcpu
)) {
487 /* interrupts got enabled in between, so we
488 are back at square 1 */
498 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
501 #ifdef CONFIG_PPC_FPU
507 if (!vcpu
->arch
.sane
) {
508 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
513 if (kvmppc_prepare_to_enter(vcpu
)) {
514 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
521 #ifdef CONFIG_PPC_FPU
522 /* Save userspace FPU state in stack */
524 memcpy(fpr
, current
->thread
.fpr
, sizeof(current
->thread
.fpr
));
525 fpscr
= current
->thread
.fpscr
.val
;
526 fpexc_mode
= current
->thread
.fpexc_mode
;
528 /* Restore guest FPU state to thread */
529 memcpy(current
->thread
.fpr
, vcpu
->arch
.fpr
, sizeof(vcpu
->arch
.fpr
));
530 current
->thread
.fpscr
.val
= vcpu
->arch
.fpscr
;
533 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
534 * as always using the FPU. Kernel usage of FP (via
535 * enable_kernel_fp()) in this thread must not occur while
536 * vcpu->fpu_active is set.
538 vcpu
->fpu_active
= 1;
540 kvmppc_load_guest_fp(vcpu
);
543 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
545 #ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu
);
548 vcpu
->fpu_active
= 0;
550 /* Save guest FPU state from thread */
551 memcpy(vcpu
->arch
.fpr
, current
->thread
.fpr
, sizeof(vcpu
->arch
.fpr
));
552 vcpu
->arch
.fpscr
= current
->thread
.fpscr
.val
;
554 /* Restore userspace FPU state from stack */
555 memcpy(current
->thread
.fpr
, fpr
, sizeof(current
->thread
.fpr
));
556 current
->thread
.fpscr
.val
= fpscr
;
557 current
->thread
.fpexc_mode
= fpexc_mode
;
567 static int emulation_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
569 enum emulation_result er
;
571 er
= kvmppc_emulate_instruction(run
, vcpu
);
574 /* don't overwrite subtypes, just account kvm_stats */
575 kvmppc_account_exit_stat(vcpu
, EMULATED_INST_EXITS
);
576 /* Future optimization: only reload non-volatiles if
577 * they were actually modified by emulation. */
578 return RESUME_GUEST_NV
;
581 run
->exit_reason
= KVM_EXIT_DCR
;
585 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
586 __func__
, vcpu
->arch
.pc
, vcpu
->arch
.last_inst
);
587 /* For debugging, encode the failing instruction and
588 * report it to userspace. */
589 run
->hw
.hardware_exit_reason
= ~0ULL << 32;
590 run
->hw
.hardware_exit_reason
|= vcpu
->arch
.last_inst
;
591 kvmppc_core_queue_program(vcpu
, ESR_PIL
);
599 static void kvmppc_fill_pt_regs(struct pt_regs
*regs
)
601 ulong r1
, ip
, msr
, lr
;
603 asm("mr %0, 1" : "=r"(r1
));
604 asm("mflr %0" : "=r"(lr
));
605 asm("mfmsr %0" : "=r"(msr
));
606 asm("bl 1f; 1: mflr %0" : "=r"(ip
));
608 memset(regs
, 0, sizeof(*regs
));
615 static void kvmppc_restart_interrupt(struct kvm_vcpu
*vcpu
,
616 unsigned int exit_nr
)
621 case BOOKE_INTERRUPT_EXTERNAL
:
622 kvmppc_fill_pt_regs(®s
);
625 case BOOKE_INTERRUPT_DECREMENTER
:
626 kvmppc_fill_pt_regs(®s
);
627 timer_interrupt(®s
);
629 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
630 case BOOKE_INTERRUPT_DOORBELL
:
631 kvmppc_fill_pt_regs(®s
);
632 doorbell_exception(®s
);
635 case BOOKE_INTERRUPT_MACHINE_CHECK
:
638 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR
:
639 kvmppc_fill_pt_regs(®s
);
640 performance_monitor_exception(®s
);
648 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
650 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
651 unsigned int exit_nr
)
655 /* update before a new last_exit_type is rewritten */
656 kvmppc_update_timing_stats(vcpu
);
658 /* restart interrupts if they were meant for the host */
659 kvmppc_restart_interrupt(vcpu
, exit_nr
);
663 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
664 run
->ready_for_interrupt_injection
= 1;
667 case BOOKE_INTERRUPT_MACHINE_CHECK
:
668 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR
));
669 kvmppc_dump_vcpu(vcpu
);
670 /* For debugging, send invalid exit reason to user space */
671 run
->hw
.hardware_exit_reason
= ~1ULL << 32;
672 run
->hw
.hardware_exit_reason
|= mfspr(SPRN_MCSR
);
676 case BOOKE_INTERRUPT_EXTERNAL
:
677 kvmppc_account_exit(vcpu
, EXT_INTR_EXITS
);
681 case BOOKE_INTERRUPT_DECREMENTER
:
682 kvmppc_account_exit(vcpu
, DEC_EXITS
);
686 case BOOKE_INTERRUPT_DOORBELL
:
687 kvmppc_account_exit(vcpu
, DBELL_EXITS
);
691 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT
:
692 kvmppc_account_exit(vcpu
, GDBELL_EXITS
);
695 * We are here because there is a pending guest interrupt
696 * which could not be delivered as MSR_CE or MSR_ME was not
697 * set. Once we break from here we will retry delivery.
702 case BOOKE_INTERRUPT_GUEST_DBELL
:
703 kvmppc_account_exit(vcpu
, GDBELL_EXITS
);
706 * We are here because there is a pending guest interrupt
707 * which could not be delivered as MSR_EE was not set. Once
708 * we break from here we will retry delivery.
713 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR
:
717 case BOOKE_INTERRUPT_HV_PRIV
:
718 r
= emulation_exit(run
, vcpu
);
721 case BOOKE_INTERRUPT_PROGRAM
:
722 if (vcpu
->arch
.shared
->msr
& (MSR_PR
| MSR_GS
)) {
724 * Program traps generated by user-level software must
725 * be handled by the guest kernel.
727 * In GS mode, hypervisor privileged instructions trap
728 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
729 * actual program interrupts, handled by the guest.
731 kvmppc_core_queue_program(vcpu
, vcpu
->arch
.fault_esr
);
733 kvmppc_account_exit(vcpu
, USR_PR_INST
);
737 r
= emulation_exit(run
, vcpu
);
740 case BOOKE_INTERRUPT_FP_UNAVAIL
:
741 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_FP_UNAVAIL
);
742 kvmppc_account_exit(vcpu
, FP_UNAVAIL
);
747 case BOOKE_INTERRUPT_SPE_UNAVAIL
: {
748 if (vcpu
->arch
.shared
->msr
& MSR_SPE
)
749 kvmppc_vcpu_enable_spe(vcpu
);
751 kvmppc_booke_queue_irqprio(vcpu
,
752 BOOKE_IRQPRIO_SPE_UNAVAIL
);
757 case BOOKE_INTERRUPT_SPE_FP_DATA
:
758 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_DATA
);
762 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
763 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_ROUND
);
767 case BOOKE_INTERRUPT_SPE_UNAVAIL
:
769 * Guest wants SPE, but host kernel doesn't support it. Send
770 * an "unimplemented operation" program check to the guest.
772 kvmppc_core_queue_program(vcpu
, ESR_PUO
| ESR_SPV
);
777 * These really should never happen without CONFIG_SPE,
778 * as we should never enable the real MSR[SPE] in the guest.
780 case BOOKE_INTERRUPT_SPE_FP_DATA
:
781 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
782 printk(KERN_CRIT
"%s: unexpected SPE interrupt %u at %08lx\n",
783 __func__
, exit_nr
, vcpu
->arch
.pc
);
784 run
->hw
.hardware_exit_reason
= exit_nr
;
789 case BOOKE_INTERRUPT_DATA_STORAGE
:
790 kvmppc_core_queue_data_storage(vcpu
, vcpu
->arch
.fault_dear
,
791 vcpu
->arch
.fault_esr
);
792 kvmppc_account_exit(vcpu
, DSI_EXITS
);
796 case BOOKE_INTERRUPT_INST_STORAGE
:
797 kvmppc_core_queue_inst_storage(vcpu
, vcpu
->arch
.fault_esr
);
798 kvmppc_account_exit(vcpu
, ISI_EXITS
);
802 #ifdef CONFIG_KVM_BOOKE_HV
803 case BOOKE_INTERRUPT_HV_SYSCALL
:
804 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
805 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
808 * hcall from guest userspace -- send privileged
809 * instruction program check.
811 kvmppc_core_queue_program(vcpu
, ESR_PPR
);
817 case BOOKE_INTERRUPT_SYSCALL
:
818 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
819 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
820 /* KVM PV hypercalls */
821 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
825 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SYSCALL
);
827 kvmppc_account_exit(vcpu
, SYSCALL_EXITS
);
832 case BOOKE_INTERRUPT_DTLB_MISS
: {
833 unsigned long eaddr
= vcpu
->arch
.fault_dear
;
838 #ifdef CONFIG_KVM_E500V2
839 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
840 (eaddr
& PAGE_MASK
) == vcpu
->arch
.magic_page_ea
) {
841 kvmppc_map_magic(vcpu
);
842 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
849 /* Check the guest TLB. */
850 gtlb_index
= kvmppc_mmu_dtlb_index(vcpu
, eaddr
);
851 if (gtlb_index
< 0) {
852 /* The guest didn't have a mapping for it. */
853 kvmppc_core_queue_dtlb_miss(vcpu
,
854 vcpu
->arch
.fault_dear
,
855 vcpu
->arch
.fault_esr
);
856 kvmppc_mmu_dtlb_miss(vcpu
);
857 kvmppc_account_exit(vcpu
, DTLB_REAL_MISS_EXITS
);
862 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
863 gfn
= gpaddr
>> PAGE_SHIFT
;
865 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
866 /* The guest TLB had a mapping, but the shadow TLB
867 * didn't, and it is RAM. This could be because:
868 * a) the entry is mapping the host kernel, or
869 * b) the guest used a large mapping which we're faking
870 * Either way, we need to satisfy the fault without
871 * invoking the guest. */
872 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
873 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
876 /* Guest has mapped and accessed a page which is not
878 vcpu
->arch
.paddr_accessed
= gpaddr
;
879 vcpu
->arch
.vaddr_accessed
= eaddr
;
880 r
= kvmppc_emulate_mmio(run
, vcpu
);
881 kvmppc_account_exit(vcpu
, MMIO_EXITS
);
887 case BOOKE_INTERRUPT_ITLB_MISS
: {
888 unsigned long eaddr
= vcpu
->arch
.pc
;
895 /* Check the guest TLB. */
896 gtlb_index
= kvmppc_mmu_itlb_index(vcpu
, eaddr
);
897 if (gtlb_index
< 0) {
898 /* The guest didn't have a mapping for it. */
899 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_ITLB_MISS
);
900 kvmppc_mmu_itlb_miss(vcpu
);
901 kvmppc_account_exit(vcpu
, ITLB_REAL_MISS_EXITS
);
905 kvmppc_account_exit(vcpu
, ITLB_VIRT_MISS_EXITS
);
907 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
908 gfn
= gpaddr
>> PAGE_SHIFT
;
910 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
911 /* The guest TLB had a mapping, but the shadow TLB
912 * didn't. This could be because:
913 * a) the entry is mapping the host kernel, or
914 * b) the guest used a large mapping which we're faking
915 * Either way, we need to satisfy the fault without
916 * invoking the guest. */
917 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
919 /* Guest mapped and leaped at non-RAM! */
920 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_MACHINE_CHECK
);
926 case BOOKE_INTERRUPT_DEBUG
: {
929 vcpu
->arch
.pc
= mfspr(SPRN_CSRR0
);
931 /* clear IAC events in DBSR register */
932 dbsr
= mfspr(SPRN_DBSR
);
933 dbsr
&= DBSR_IAC1
| DBSR_IAC2
| DBSR_IAC3
| DBSR_IAC4
;
934 mtspr(SPRN_DBSR
, dbsr
);
936 run
->exit_reason
= KVM_EXIT_DEBUG
;
937 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
943 printk(KERN_EMERG
"exit_nr %d\n", exit_nr
);
948 * To avoid clobbering exit_reason, only check for signals if we
949 * aren't already exiting to userspace for some other reason.
951 if (!(r
& RESUME_HOST
)) {
953 if (kvmppc_prepare_to_enter(vcpu
)) {
954 run
->exit_reason
= KVM_EXIT_INTR
;
955 r
= (-EINTR
<< 2) | RESUME_HOST
| (r
& RESUME_FLAG_NV
);
956 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
963 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
964 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
970 vcpu
->arch
.shared
->pir
= vcpu
->vcpu_id
;
971 kvmppc_set_gpr(vcpu
, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
972 kvmppc_set_msr(vcpu
, 0);
974 #ifndef CONFIG_KVM_BOOKE_HV
975 vcpu
->arch
.shadow_msr
= MSR_USER
| MSR_DE
| MSR_IS
| MSR_DS
;
976 vcpu
->arch
.shadow_pid
= 1;
977 vcpu
->arch
.shared
->msr
= 0;
980 /* Eye-catching numbers so we know if the guest takes an interrupt
981 * before it's programmed its own IVPR/IVORs. */
982 vcpu
->arch
.ivpr
= 0x55550000;
983 for (i
= 0; i
< BOOKE_IRQPRIO_MAX
; i
++)
984 vcpu
->arch
.ivor
[i
] = 0x7700 | i
* 4;
986 kvmppc_init_timing_stats(vcpu
);
988 r
= kvmppc_core_vcpu_setup(vcpu
);
989 kvmppc_sanity_check(vcpu
);
993 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
997 regs
->pc
= vcpu
->arch
.pc
;
998 regs
->cr
= kvmppc_get_cr(vcpu
);
999 regs
->ctr
= vcpu
->arch
.ctr
;
1000 regs
->lr
= vcpu
->arch
.lr
;
1001 regs
->xer
= kvmppc_get_xer(vcpu
);
1002 regs
->msr
= vcpu
->arch
.shared
->msr
;
1003 regs
->srr0
= vcpu
->arch
.shared
->srr0
;
1004 regs
->srr1
= vcpu
->arch
.shared
->srr1
;
1005 regs
->pid
= vcpu
->arch
.pid
;
1006 regs
->sprg0
= vcpu
->arch
.shared
->sprg0
;
1007 regs
->sprg1
= vcpu
->arch
.shared
->sprg1
;
1008 regs
->sprg2
= vcpu
->arch
.shared
->sprg2
;
1009 regs
->sprg3
= vcpu
->arch
.shared
->sprg3
;
1010 regs
->sprg4
= vcpu
->arch
.shared
->sprg4
;
1011 regs
->sprg5
= vcpu
->arch
.shared
->sprg5
;
1012 regs
->sprg6
= vcpu
->arch
.shared
->sprg6
;
1013 regs
->sprg7
= vcpu
->arch
.shared
->sprg7
;
1015 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
1016 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
1021 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1025 vcpu
->arch
.pc
= regs
->pc
;
1026 kvmppc_set_cr(vcpu
, regs
->cr
);
1027 vcpu
->arch
.ctr
= regs
->ctr
;
1028 vcpu
->arch
.lr
= regs
->lr
;
1029 kvmppc_set_xer(vcpu
, regs
->xer
);
1030 kvmppc_set_msr(vcpu
, regs
->msr
);
1031 vcpu
->arch
.shared
->srr0
= regs
->srr0
;
1032 vcpu
->arch
.shared
->srr1
= regs
->srr1
;
1033 kvmppc_set_pid(vcpu
, regs
->pid
);
1034 vcpu
->arch
.shared
->sprg0
= regs
->sprg0
;
1035 vcpu
->arch
.shared
->sprg1
= regs
->sprg1
;
1036 vcpu
->arch
.shared
->sprg2
= regs
->sprg2
;
1037 vcpu
->arch
.shared
->sprg3
= regs
->sprg3
;
1038 vcpu
->arch
.shared
->sprg4
= regs
->sprg4
;
1039 vcpu
->arch
.shared
->sprg5
= regs
->sprg5
;
1040 vcpu
->arch
.shared
->sprg6
= regs
->sprg6
;
1041 vcpu
->arch
.shared
->sprg7
= regs
->sprg7
;
1043 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
1044 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
1049 static void get_sregs_base(struct kvm_vcpu
*vcpu
,
1050 struct kvm_sregs
*sregs
)
1054 sregs
->u
.e
.features
|= KVM_SREGS_E_BASE
;
1056 sregs
->u
.e
.csrr0
= vcpu
->arch
.csrr0
;
1057 sregs
->u
.e
.csrr1
= vcpu
->arch
.csrr1
;
1058 sregs
->u
.e
.mcsr
= vcpu
->arch
.mcsr
;
1059 sregs
->u
.e
.esr
= get_guest_esr(vcpu
);
1060 sregs
->u
.e
.dear
= get_guest_dear(vcpu
);
1061 sregs
->u
.e
.tsr
= vcpu
->arch
.tsr
;
1062 sregs
->u
.e
.tcr
= vcpu
->arch
.tcr
;
1063 sregs
->u
.e
.dec
= kvmppc_get_dec(vcpu
, tb
);
1065 sregs
->u
.e
.vrsave
= vcpu
->arch
.vrsave
;
1068 static int set_sregs_base(struct kvm_vcpu
*vcpu
,
1069 struct kvm_sregs
*sregs
)
1071 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_BASE
))
1074 vcpu
->arch
.csrr0
= sregs
->u
.e
.csrr0
;
1075 vcpu
->arch
.csrr1
= sregs
->u
.e
.csrr1
;
1076 vcpu
->arch
.mcsr
= sregs
->u
.e
.mcsr
;
1077 set_guest_esr(vcpu
, sregs
->u
.e
.esr
);
1078 set_guest_dear(vcpu
, sregs
->u
.e
.dear
);
1079 vcpu
->arch
.vrsave
= sregs
->u
.e
.vrsave
;
1080 kvmppc_set_tcr(vcpu
, sregs
->u
.e
.tcr
);
1082 if (sregs
->u
.e
.update_special
& KVM_SREGS_E_UPDATE_DEC
) {
1083 vcpu
->arch
.dec
= sregs
->u
.e
.dec
;
1084 kvmppc_emulate_dec(vcpu
);
1087 if (sregs
->u
.e
.update_special
& KVM_SREGS_E_UPDATE_TSR
) {
1088 vcpu
->arch
.tsr
= sregs
->u
.e
.tsr
;
1089 update_timer_ints(vcpu
);
1095 static void get_sregs_arch206(struct kvm_vcpu
*vcpu
,
1096 struct kvm_sregs
*sregs
)
1098 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206
;
1100 sregs
->u
.e
.pir
= vcpu
->vcpu_id
;
1101 sregs
->u
.e
.mcsrr0
= vcpu
->arch
.mcsrr0
;
1102 sregs
->u
.e
.mcsrr1
= vcpu
->arch
.mcsrr1
;
1103 sregs
->u
.e
.decar
= vcpu
->arch
.decar
;
1104 sregs
->u
.e
.ivpr
= vcpu
->arch
.ivpr
;
1107 static int set_sregs_arch206(struct kvm_vcpu
*vcpu
,
1108 struct kvm_sregs
*sregs
)
1110 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206
))
1113 if (sregs
->u
.e
.pir
!= vcpu
->vcpu_id
)
1116 vcpu
->arch
.mcsrr0
= sregs
->u
.e
.mcsrr0
;
1117 vcpu
->arch
.mcsrr1
= sregs
->u
.e
.mcsrr1
;
1118 vcpu
->arch
.decar
= sregs
->u
.e
.decar
;
1119 vcpu
->arch
.ivpr
= sregs
->u
.e
.ivpr
;
1124 void kvmppc_get_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1126 sregs
->u
.e
.features
|= KVM_SREGS_E_IVOR
;
1128 sregs
->u
.e
.ivor_low
[0] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_CRITICAL
];
1129 sregs
->u
.e
.ivor_low
[1] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_MACHINE_CHECK
];
1130 sregs
->u
.e
.ivor_low
[2] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
];
1131 sregs
->u
.e
.ivor_low
[3] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_INST_STORAGE
];
1132 sregs
->u
.e
.ivor_low
[4] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_EXTERNAL
];
1133 sregs
->u
.e
.ivor_low
[5] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALIGNMENT
];
1134 sregs
->u
.e
.ivor_low
[6] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PROGRAM
];
1135 sregs
->u
.e
.ivor_low
[7] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FP_UNAVAIL
];
1136 sregs
->u
.e
.ivor_low
[8] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
];
1137 sregs
->u
.e
.ivor_low
[9] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_AP_UNAVAIL
];
1138 sregs
->u
.e
.ivor_low
[10] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DECREMENTER
];
1139 sregs
->u
.e
.ivor_low
[11] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FIT
];
1140 sregs
->u
.e
.ivor_low
[12] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_WATCHDOG
];
1141 sregs
->u
.e
.ivor_low
[13] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DTLB_MISS
];
1142 sregs
->u
.e
.ivor_low
[14] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ITLB_MISS
];
1143 sregs
->u
.e
.ivor_low
[15] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DEBUG
];
1146 int kvmppc_set_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1148 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
1151 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_CRITICAL
] = sregs
->u
.e
.ivor_low
[0];
1152 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_MACHINE_CHECK
] = sregs
->u
.e
.ivor_low
[1];
1153 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
] = sregs
->u
.e
.ivor_low
[2];
1154 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_INST_STORAGE
] = sregs
->u
.e
.ivor_low
[3];
1155 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_EXTERNAL
] = sregs
->u
.e
.ivor_low
[4];
1156 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALIGNMENT
] = sregs
->u
.e
.ivor_low
[5];
1157 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PROGRAM
] = sregs
->u
.e
.ivor_low
[6];
1158 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FP_UNAVAIL
] = sregs
->u
.e
.ivor_low
[7];
1159 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
] = sregs
->u
.e
.ivor_low
[8];
1160 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_AP_UNAVAIL
] = sregs
->u
.e
.ivor_low
[9];
1161 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DECREMENTER
] = sregs
->u
.e
.ivor_low
[10];
1162 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FIT
] = sregs
->u
.e
.ivor_low
[11];
1163 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_WATCHDOG
] = sregs
->u
.e
.ivor_low
[12];
1164 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DTLB_MISS
] = sregs
->u
.e
.ivor_low
[13];
1165 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ITLB_MISS
] = sregs
->u
.e
.ivor_low
[14];
1166 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DEBUG
] = sregs
->u
.e
.ivor_low
[15];
1171 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1172 struct kvm_sregs
*sregs
)
1174 sregs
->pvr
= vcpu
->arch
.pvr
;
1176 get_sregs_base(vcpu
, sregs
);
1177 get_sregs_arch206(vcpu
, sregs
);
1178 kvmppc_core_get_sregs(vcpu
, sregs
);
1182 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1183 struct kvm_sregs
*sregs
)
1187 if (vcpu
->arch
.pvr
!= sregs
->pvr
)
1190 ret
= set_sregs_base(vcpu
, sregs
);
1194 ret
= set_sregs_arch206(vcpu
, sregs
);
1198 return kvmppc_core_set_sregs(vcpu
, sregs
);
1201 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1206 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1211 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1216 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1221 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1222 struct kvm_translation
*tr
)
1226 r
= kvmppc_core_vcpu_translate(vcpu
, tr
);
1230 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1235 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1236 struct kvm_userspace_memory_region
*mem
)
1241 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1242 struct kvm_userspace_memory_region
*mem
)
1246 void kvmppc_set_tcr(struct kvm_vcpu
*vcpu
, u32 new_tcr
)
1248 vcpu
->arch
.tcr
= new_tcr
;
1249 update_timer_ints(vcpu
);
1252 void kvmppc_set_tsr_bits(struct kvm_vcpu
*vcpu
, u32 tsr_bits
)
1254 set_bits(tsr_bits
, &vcpu
->arch
.tsr
);
1256 kvm_make_request(KVM_REQ_PENDING_TIMER
, vcpu
);
1257 kvm_vcpu_kick(vcpu
);
1260 void kvmppc_clr_tsr_bits(struct kvm_vcpu
*vcpu
, u32 tsr_bits
)
1262 clear_bits(tsr_bits
, &vcpu
->arch
.tsr
);
1263 update_timer_ints(vcpu
);
1266 void kvmppc_decrementer_func(unsigned long data
)
1268 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
1270 kvmppc_set_tsr_bits(vcpu
, TSR_DIS
);
1273 void kvmppc_booke_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1275 current
->thread
.kvm_vcpu
= vcpu
;
1278 void kvmppc_booke_vcpu_put(struct kvm_vcpu
*vcpu
)
1280 current
->thread
.kvm_vcpu
= NULL
;
1283 int __init
kvmppc_booke_init(void)
1285 #ifndef CONFIG_KVM_BOOKE_HV
1286 unsigned long ivor
[16];
1287 unsigned long max_ivor
= 0;
1290 /* We install our own exception handlers by hijacking IVPR. IVPR must
1291 * be 16-bit aligned, so we need a 64KB allocation. */
1292 kvmppc_booke_handlers
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1294 if (!kvmppc_booke_handlers
)
1297 /* XXX make sure our handlers are smaller than Linux's */
1299 /* Copy our interrupt handlers to match host IVORs. That way we don't
1300 * have to swap the IVORs on every guest/host transition. */
1301 ivor
[0] = mfspr(SPRN_IVOR0
);
1302 ivor
[1] = mfspr(SPRN_IVOR1
);
1303 ivor
[2] = mfspr(SPRN_IVOR2
);
1304 ivor
[3] = mfspr(SPRN_IVOR3
);
1305 ivor
[4] = mfspr(SPRN_IVOR4
);
1306 ivor
[5] = mfspr(SPRN_IVOR5
);
1307 ivor
[6] = mfspr(SPRN_IVOR6
);
1308 ivor
[7] = mfspr(SPRN_IVOR7
);
1309 ivor
[8] = mfspr(SPRN_IVOR8
);
1310 ivor
[9] = mfspr(SPRN_IVOR9
);
1311 ivor
[10] = mfspr(SPRN_IVOR10
);
1312 ivor
[11] = mfspr(SPRN_IVOR11
);
1313 ivor
[12] = mfspr(SPRN_IVOR12
);
1314 ivor
[13] = mfspr(SPRN_IVOR13
);
1315 ivor
[14] = mfspr(SPRN_IVOR14
);
1316 ivor
[15] = mfspr(SPRN_IVOR15
);
1318 for (i
= 0; i
< 16; i
++) {
1319 if (ivor
[i
] > max_ivor
)
1322 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
1323 kvmppc_handlers_start
+ i
* kvmppc_handler_len
,
1324 kvmppc_handler_len
);
1326 flush_icache_range(kvmppc_booke_handlers
,
1327 kvmppc_booke_handlers
+ max_ivor
+ kvmppc_handler_len
);
1328 #endif /* !BOOKE_HV */
1332 void __exit
kvmppc_booke_exit(void)
1334 free_pages(kvmppc_booke_handlers
, VCPU_SIZE_ORDER
);