2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
44 unsigned long kvmppc_booke_handlers
;
46 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
49 struct kvm_stats_debugfs_item debugfs_entries
[] = {
50 { "mmio", VCPU_STAT(mmio_exits
) },
51 { "dcr", VCPU_STAT(dcr_exits
) },
52 { "sig", VCPU_STAT(signal_exits
) },
53 { "itlb_r", VCPU_STAT(itlb_real_miss_exits
) },
54 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits
) },
55 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits
) },
56 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits
) },
57 { "sysc", VCPU_STAT(syscall_exits
) },
58 { "isi", VCPU_STAT(isi_exits
) },
59 { "dsi", VCPU_STAT(dsi_exits
) },
60 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
61 { "dec", VCPU_STAT(dec_exits
) },
62 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
63 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
64 { "doorbell", VCPU_STAT(dbell_exits
) },
65 { "guest doorbell", VCPU_STAT(gdbell_exits
) },
69 /* TODO: use vcpu_printf() */
70 void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
)
74 printk("pc: %08lx msr: %08llx\n", vcpu
->arch
.pc
, vcpu
->arch
.shared
->msr
);
75 printk("lr: %08lx ctr: %08lx\n", vcpu
->arch
.lr
, vcpu
->arch
.ctr
);
76 printk("srr0: %08llx srr1: %08llx\n", vcpu
->arch
.shared
->srr0
,
77 vcpu
->arch
.shared
->srr1
);
79 printk("exceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
81 for (i
= 0; i
< 32; i
+= 4) {
82 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i
,
83 kvmppc_get_gpr(vcpu
, i
),
84 kvmppc_get_gpr(vcpu
, i
+1),
85 kvmppc_get_gpr(vcpu
, i
+2),
86 kvmppc_get_gpr(vcpu
, i
+3));
91 void kvmppc_vcpu_disable_spe(struct kvm_vcpu
*vcpu
)
95 kvmppc_save_guest_spe(vcpu
);
96 vcpu
->arch
.shadow_msr
&= ~MSR_SPE
;
100 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu
*vcpu
)
104 kvmppc_load_guest_spe(vcpu
);
105 vcpu
->arch
.shadow_msr
|= MSR_SPE
;
109 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu
*vcpu
)
111 if (vcpu
->arch
.shared
->msr
& MSR_SPE
) {
112 if (!(vcpu
->arch
.shadow_msr
& MSR_SPE
))
113 kvmppc_vcpu_enable_spe(vcpu
);
114 } else if (vcpu
->arch
.shadow_msr
& MSR_SPE
) {
115 kvmppc_vcpu_disable_spe(vcpu
);
119 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu
*vcpu
)
125 * Helper function for "full" MSR writes. No need to call this if only
126 * EE/CE/ME/DE/RI are changing.
128 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u32 new_msr
)
130 u32 old_msr
= vcpu
->arch
.shared
->msr
;
132 #ifdef CONFIG_KVM_BOOKE_HV
136 vcpu
->arch
.shared
->msr
= new_msr
;
138 kvmppc_mmu_msr_notify(vcpu
, old_msr
);
139 kvmppc_vcpu_sync_spe(vcpu
);
142 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu
*vcpu
,
143 unsigned int priority
)
145 set_bit(priority
, &vcpu
->arch
.pending_exceptions
);
148 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
,
149 ulong dear_flags
, ulong esr_flags
)
151 vcpu
->arch
.queued_dear
= dear_flags
;
152 vcpu
->arch
.queued_esr
= esr_flags
;
153 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DTLB_MISS
);
156 static void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
157 ulong dear_flags
, ulong esr_flags
)
159 vcpu
->arch
.queued_dear
= dear_flags
;
160 vcpu
->arch
.queued_esr
= esr_flags
;
161 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DATA_STORAGE
);
164 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
167 vcpu
->arch
.queued_esr
= esr_flags
;
168 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_INST_STORAGE
);
171 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong esr_flags
)
173 vcpu
->arch
.queued_esr
= esr_flags
;
174 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_PROGRAM
);
177 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
179 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DECREMENTER
);
182 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
184 return test_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
187 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
189 clear_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
192 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
193 struct kvm_interrupt
*irq
)
195 unsigned int prio
= BOOKE_IRQPRIO_EXTERNAL
;
197 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
198 prio
= BOOKE_IRQPRIO_EXTERNAL_LEVEL
;
200 kvmppc_booke_queue_irqprio(vcpu
, prio
);
203 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
,
204 struct kvm_interrupt
*irq
)
206 clear_bit(BOOKE_IRQPRIO_EXTERNAL
, &vcpu
->arch
.pending_exceptions
);
207 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL
, &vcpu
->arch
.pending_exceptions
);
210 static void set_guest_srr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
212 #ifdef CONFIG_KVM_BOOKE_HV
213 mtspr(SPRN_GSRR0
, srr0
);
214 mtspr(SPRN_GSRR1
, srr1
);
216 vcpu
->arch
.shared
->srr0
= srr0
;
217 vcpu
->arch
.shared
->srr1
= srr1
;
221 static void set_guest_csrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
223 vcpu
->arch
.csrr0
= srr0
;
224 vcpu
->arch
.csrr1
= srr1
;
227 static void set_guest_dsrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
229 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC
)) {
230 vcpu
->arch
.dsrr0
= srr0
;
231 vcpu
->arch
.dsrr1
= srr1
;
233 set_guest_csrr(vcpu
, srr0
, srr1
);
237 static void set_guest_mcsrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
239 vcpu
->arch
.mcsrr0
= srr0
;
240 vcpu
->arch
.mcsrr1
= srr1
;
243 static unsigned long get_guest_dear(struct kvm_vcpu
*vcpu
)
245 #ifdef CONFIG_KVM_BOOKE_HV
246 return mfspr(SPRN_GDEAR
);
248 return vcpu
->arch
.shared
->dar
;
252 static void set_guest_dear(struct kvm_vcpu
*vcpu
, unsigned long dear
)
254 #ifdef CONFIG_KVM_BOOKE_HV
255 mtspr(SPRN_GDEAR
, dear
);
257 vcpu
->arch
.shared
->dar
= dear
;
261 static unsigned long get_guest_esr(struct kvm_vcpu
*vcpu
)
263 #ifdef CONFIG_KVM_BOOKE_HV
264 return mfspr(SPRN_GESR
);
266 return vcpu
->arch
.shared
->esr
;
270 static void set_guest_esr(struct kvm_vcpu
*vcpu
, u32 esr
)
272 #ifdef CONFIG_KVM_BOOKE_HV
273 mtspr(SPRN_GESR
, esr
);
275 vcpu
->arch
.shared
->esr
= esr
;
279 /* Deliver the interrupt of the corresponding priority, if possible. */
280 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu
*vcpu
,
281 unsigned int priority
)
285 bool update_esr
= false, update_dear
= false;
286 ulong crit_raw
= vcpu
->arch
.shared
->critical
;
287 ulong crit_r1
= kvmppc_get_gpr(vcpu
, 1);
289 bool keep_irq
= false;
290 enum int_class int_class
;
292 /* Truncate crit indicators in 32 bit mode */
293 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
294 crit_raw
&= 0xffffffff;
295 crit_r1
&= 0xffffffff;
298 /* Critical section when crit == r1 */
299 crit
= (crit_raw
== crit_r1
);
300 /* ... and we're in supervisor mode */
301 crit
= crit
&& !(vcpu
->arch
.shared
->msr
& MSR_PR
);
303 if (priority
== BOOKE_IRQPRIO_EXTERNAL_LEVEL
) {
304 priority
= BOOKE_IRQPRIO_EXTERNAL
;
309 case BOOKE_IRQPRIO_DTLB_MISS
:
310 case BOOKE_IRQPRIO_DATA_STORAGE
:
313 case BOOKE_IRQPRIO_INST_STORAGE
:
314 case BOOKE_IRQPRIO_PROGRAM
:
317 case BOOKE_IRQPRIO_ITLB_MISS
:
318 case BOOKE_IRQPRIO_SYSCALL
:
319 case BOOKE_IRQPRIO_FP_UNAVAIL
:
320 case BOOKE_IRQPRIO_SPE_UNAVAIL
:
321 case BOOKE_IRQPRIO_SPE_FP_DATA
:
322 case BOOKE_IRQPRIO_SPE_FP_ROUND
:
323 case BOOKE_IRQPRIO_AP_UNAVAIL
:
324 case BOOKE_IRQPRIO_ALIGNMENT
:
326 msr_mask
= MSR_CE
| MSR_ME
| MSR_DE
;
327 int_class
= INT_CLASS_NONCRIT
;
329 case BOOKE_IRQPRIO_CRITICAL
:
330 case BOOKE_IRQPRIO_DBELL_CRIT
:
331 allowed
= vcpu
->arch
.shared
->msr
& MSR_CE
;
332 allowed
= allowed
&& !crit
;
334 int_class
= INT_CLASS_CRIT
;
336 case BOOKE_IRQPRIO_MACHINE_CHECK
:
337 allowed
= vcpu
->arch
.shared
->msr
& MSR_ME
;
338 allowed
= allowed
&& !crit
;
339 int_class
= INT_CLASS_MC
;
341 case BOOKE_IRQPRIO_DECREMENTER
:
342 case BOOKE_IRQPRIO_FIT
:
345 case BOOKE_IRQPRIO_EXTERNAL
:
346 case BOOKE_IRQPRIO_DBELL
:
347 allowed
= vcpu
->arch
.shared
->msr
& MSR_EE
;
348 allowed
= allowed
&& !crit
;
349 msr_mask
= MSR_CE
| MSR_ME
| MSR_DE
;
350 int_class
= INT_CLASS_NONCRIT
;
352 case BOOKE_IRQPRIO_DEBUG
:
353 allowed
= vcpu
->arch
.shared
->msr
& MSR_DE
;
354 allowed
= allowed
&& !crit
;
356 int_class
= INT_CLASS_CRIT
;
362 case INT_CLASS_NONCRIT
:
363 set_guest_srr(vcpu
, vcpu
->arch
.pc
,
364 vcpu
->arch
.shared
->msr
);
367 set_guest_csrr(vcpu
, vcpu
->arch
.pc
,
368 vcpu
->arch
.shared
->msr
);
371 set_guest_dsrr(vcpu
, vcpu
->arch
.pc
,
372 vcpu
->arch
.shared
->msr
);
375 set_guest_mcsrr(vcpu
, vcpu
->arch
.pc
,
376 vcpu
->arch
.shared
->msr
);
380 vcpu
->arch
.pc
= vcpu
->arch
.ivpr
| vcpu
->arch
.ivor
[priority
];
381 if (update_esr
== true)
382 set_guest_esr(vcpu
, vcpu
->arch
.queued_esr
);
383 if (update_dear
== true)
384 set_guest_dear(vcpu
, vcpu
->arch
.queued_dear
);
385 kvmppc_set_msr(vcpu
, vcpu
->arch
.shared
->msr
& msr_mask
);
388 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
391 #ifdef CONFIG_KVM_BOOKE_HV
393 * If an interrupt is pending but masked, raise a guest doorbell
394 * so that we are notified when the guest enables the relevant
397 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQMASK_EE
)
398 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_NONCRIT
);
399 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQMASK_CE
)
400 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_CRIT
);
401 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQPRIO_MACHINE_CHECK
)
402 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_MC
);
408 static void update_timer_ints(struct kvm_vcpu
*vcpu
)
410 if ((vcpu
->arch
.tcr
& TCR_DIE
) && (vcpu
->arch
.tsr
& TSR_DIS
))
411 kvmppc_core_queue_dec(vcpu
);
413 kvmppc_core_dequeue_dec(vcpu
);
416 static void kvmppc_core_check_exceptions(struct kvm_vcpu
*vcpu
)
418 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
419 unsigned int priority
;
421 if (vcpu
->requests
) {
422 if (kvm_check_request(KVM_REQ_PENDING_TIMER
, vcpu
)) {
424 update_timer_ints(vcpu
);
428 priority
= __ffs(*pending
);
429 while (priority
< BOOKE_IRQPRIO_MAX
) {
430 if (kvmppc_booke_irqprio_deliver(vcpu
, priority
))
433 priority
= find_next_bit(pending
,
434 BITS_PER_BYTE
* sizeof(*pending
),
438 /* Tell the guest about our interrupt status */
439 vcpu
->arch
.shared
->int_pending
= !!*pending
;
442 /* Check pending exceptions and deliver one, if possible. */
443 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
446 WARN_ON_ONCE(!irqs_disabled());
448 kvmppc_core_check_exceptions(vcpu
);
450 if (vcpu
->arch
.shared
->msr
& MSR_WE
) {
452 kvm_vcpu_block(vcpu
);
453 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
456 kvmppc_set_exit_type(vcpu
, EMULATED_MTMSRWE_EXITS
);
464 * Common checks before entering the guest world. Call with interrupts
467 * returns !0 if a signal is pending and check_signal is true
469 static int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
473 WARN_ON_ONCE(!irqs_disabled());
475 if (need_resched()) {
482 if (signal_pending(current
)) {
487 if (kvmppc_core_prepare_to_enter(vcpu
)) {
488 /* interrupts got enabled in between, so we
489 are back at square 1 */
499 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
502 #ifdef CONFIG_PPC_FPU
508 if (!vcpu
->arch
.sane
) {
509 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
514 if (kvmppc_prepare_to_enter(vcpu
)) {
515 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
522 #ifdef CONFIG_PPC_FPU
523 /* Save userspace FPU state in stack */
525 memcpy(fpr
, current
->thread
.fpr
, sizeof(current
->thread
.fpr
));
526 fpscr
= current
->thread
.fpscr
.val
;
527 fpexc_mode
= current
->thread
.fpexc_mode
;
529 /* Restore guest FPU state to thread */
530 memcpy(current
->thread
.fpr
, vcpu
->arch
.fpr
, sizeof(vcpu
->arch
.fpr
));
531 current
->thread
.fpscr
.val
= vcpu
->arch
.fpscr
;
534 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
535 * as always using the FPU. Kernel usage of FP (via
536 * enable_kernel_fp()) in this thread must not occur while
537 * vcpu->fpu_active is set.
539 vcpu
->fpu_active
= 1;
541 kvmppc_load_guest_fp(vcpu
);
544 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
546 #ifdef CONFIG_PPC_FPU
547 kvmppc_save_guest_fp(vcpu
);
549 vcpu
->fpu_active
= 0;
551 /* Save guest FPU state from thread */
552 memcpy(vcpu
->arch
.fpr
, current
->thread
.fpr
, sizeof(vcpu
->arch
.fpr
));
553 vcpu
->arch
.fpscr
= current
->thread
.fpscr
.val
;
555 /* Restore userspace FPU state from stack */
556 memcpy(current
->thread
.fpr
, fpr
, sizeof(current
->thread
.fpr
));
557 current
->thread
.fpscr
.val
= fpscr
;
558 current
->thread
.fpexc_mode
= fpexc_mode
;
568 static int emulation_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
570 enum emulation_result er
;
572 er
= kvmppc_emulate_instruction(run
, vcpu
);
575 /* don't overwrite subtypes, just account kvm_stats */
576 kvmppc_account_exit_stat(vcpu
, EMULATED_INST_EXITS
);
577 /* Future optimization: only reload non-volatiles if
578 * they were actually modified by emulation. */
579 return RESUME_GUEST_NV
;
582 run
->exit_reason
= KVM_EXIT_DCR
;
586 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
587 __func__
, vcpu
->arch
.pc
, vcpu
->arch
.last_inst
);
588 /* For debugging, encode the failing instruction and
589 * report it to userspace. */
590 run
->hw
.hardware_exit_reason
= ~0ULL << 32;
591 run
->hw
.hardware_exit_reason
|= vcpu
->arch
.last_inst
;
592 kvmppc_core_queue_program(vcpu
, ESR_PIL
);
600 static void kvmppc_fill_pt_regs(struct pt_regs
*regs
)
602 ulong r1
, ip
, msr
, lr
;
604 asm("mr %0, 1" : "=r"(r1
));
605 asm("mflr %0" : "=r"(lr
));
606 asm("mfmsr %0" : "=r"(msr
));
607 asm("bl 1f; 1: mflr %0" : "=r"(ip
));
609 memset(regs
, 0, sizeof(*regs
));
617 * For interrupts needed to be handled by host interrupt handlers,
618 * corresponding host handler are called from here in similar way
619 * (but not exact) as they are called from low level handler
620 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
622 static void kvmppc_restart_interrupt(struct kvm_vcpu
*vcpu
,
623 unsigned int exit_nr
)
628 case BOOKE_INTERRUPT_EXTERNAL
:
629 kvmppc_fill_pt_regs(®s
);
632 case BOOKE_INTERRUPT_DECREMENTER
:
633 kvmppc_fill_pt_regs(®s
);
634 timer_interrupt(®s
);
636 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
637 case BOOKE_INTERRUPT_DOORBELL
:
638 kvmppc_fill_pt_regs(®s
);
639 doorbell_exception(®s
);
642 case BOOKE_INTERRUPT_MACHINE_CHECK
:
645 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR
:
646 kvmppc_fill_pt_regs(®s
);
647 performance_monitor_exception(®s
);
649 case BOOKE_INTERRUPT_WATCHDOG
:
650 kvmppc_fill_pt_regs(®s
);
651 #ifdef CONFIG_BOOKE_WDT
652 WatchdogException(®s
);
654 unknown_exception(®s
);
657 case BOOKE_INTERRUPT_CRITICAL
:
658 unknown_exception(®s
);
666 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
668 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
669 unsigned int exit_nr
)
673 /* update before a new last_exit_type is rewritten */
674 kvmppc_update_timing_stats(vcpu
);
676 /* restart interrupts if they were meant for the host */
677 kvmppc_restart_interrupt(vcpu
, exit_nr
);
681 trace_kvm_exit(exit_nr
, vcpu
);
683 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
684 run
->ready_for_interrupt_injection
= 1;
687 case BOOKE_INTERRUPT_MACHINE_CHECK
:
688 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR
));
689 kvmppc_dump_vcpu(vcpu
);
690 /* For debugging, send invalid exit reason to user space */
691 run
->hw
.hardware_exit_reason
= ~1ULL << 32;
692 run
->hw
.hardware_exit_reason
|= mfspr(SPRN_MCSR
);
696 case BOOKE_INTERRUPT_EXTERNAL
:
697 kvmppc_account_exit(vcpu
, EXT_INTR_EXITS
);
701 case BOOKE_INTERRUPT_DECREMENTER
:
702 kvmppc_account_exit(vcpu
, DEC_EXITS
);
706 case BOOKE_INTERRUPT_WATCHDOG
:
710 case BOOKE_INTERRUPT_DOORBELL
:
711 kvmppc_account_exit(vcpu
, DBELL_EXITS
);
715 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT
:
716 kvmppc_account_exit(vcpu
, GDBELL_EXITS
);
719 * We are here because there is a pending guest interrupt
720 * which could not be delivered as MSR_CE or MSR_ME was not
721 * set. Once we break from here we will retry delivery.
726 case BOOKE_INTERRUPT_GUEST_DBELL
:
727 kvmppc_account_exit(vcpu
, GDBELL_EXITS
);
730 * We are here because there is a pending guest interrupt
731 * which could not be delivered as MSR_EE was not set. Once
732 * we break from here we will retry delivery.
737 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR
:
741 case BOOKE_INTERRUPT_HV_PRIV
:
742 r
= emulation_exit(run
, vcpu
);
745 case BOOKE_INTERRUPT_PROGRAM
:
746 if (vcpu
->arch
.shared
->msr
& (MSR_PR
| MSR_GS
)) {
748 * Program traps generated by user-level software must
749 * be handled by the guest kernel.
751 * In GS mode, hypervisor privileged instructions trap
752 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
753 * actual program interrupts, handled by the guest.
755 kvmppc_core_queue_program(vcpu
, vcpu
->arch
.fault_esr
);
757 kvmppc_account_exit(vcpu
, USR_PR_INST
);
761 r
= emulation_exit(run
, vcpu
);
764 case BOOKE_INTERRUPT_FP_UNAVAIL
:
765 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_FP_UNAVAIL
);
766 kvmppc_account_exit(vcpu
, FP_UNAVAIL
);
771 case BOOKE_INTERRUPT_SPE_UNAVAIL
: {
772 if (vcpu
->arch
.shared
->msr
& MSR_SPE
)
773 kvmppc_vcpu_enable_spe(vcpu
);
775 kvmppc_booke_queue_irqprio(vcpu
,
776 BOOKE_IRQPRIO_SPE_UNAVAIL
);
781 case BOOKE_INTERRUPT_SPE_FP_DATA
:
782 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_DATA
);
786 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
787 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_ROUND
);
791 case BOOKE_INTERRUPT_SPE_UNAVAIL
:
793 * Guest wants SPE, but host kernel doesn't support it. Send
794 * an "unimplemented operation" program check to the guest.
796 kvmppc_core_queue_program(vcpu
, ESR_PUO
| ESR_SPV
);
801 * These really should never happen without CONFIG_SPE,
802 * as we should never enable the real MSR[SPE] in the guest.
804 case BOOKE_INTERRUPT_SPE_FP_DATA
:
805 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
806 printk(KERN_CRIT
"%s: unexpected SPE interrupt %u at %08lx\n",
807 __func__
, exit_nr
, vcpu
->arch
.pc
);
808 run
->hw
.hardware_exit_reason
= exit_nr
;
813 case BOOKE_INTERRUPT_DATA_STORAGE
:
814 kvmppc_core_queue_data_storage(vcpu
, vcpu
->arch
.fault_dear
,
815 vcpu
->arch
.fault_esr
);
816 kvmppc_account_exit(vcpu
, DSI_EXITS
);
820 case BOOKE_INTERRUPT_INST_STORAGE
:
821 kvmppc_core_queue_inst_storage(vcpu
, vcpu
->arch
.fault_esr
);
822 kvmppc_account_exit(vcpu
, ISI_EXITS
);
826 #ifdef CONFIG_KVM_BOOKE_HV
827 case BOOKE_INTERRUPT_HV_SYSCALL
:
828 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
829 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
832 * hcall from guest userspace -- send privileged
833 * instruction program check.
835 kvmppc_core_queue_program(vcpu
, ESR_PPR
);
841 case BOOKE_INTERRUPT_SYSCALL
:
842 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
843 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
844 /* KVM PV hypercalls */
845 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
849 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SYSCALL
);
851 kvmppc_account_exit(vcpu
, SYSCALL_EXITS
);
856 case BOOKE_INTERRUPT_DTLB_MISS
: {
857 unsigned long eaddr
= vcpu
->arch
.fault_dear
;
862 #ifdef CONFIG_KVM_E500V2
863 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
864 (eaddr
& PAGE_MASK
) == vcpu
->arch
.magic_page_ea
) {
865 kvmppc_map_magic(vcpu
);
866 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
873 /* Check the guest TLB. */
874 gtlb_index
= kvmppc_mmu_dtlb_index(vcpu
, eaddr
);
875 if (gtlb_index
< 0) {
876 /* The guest didn't have a mapping for it. */
877 kvmppc_core_queue_dtlb_miss(vcpu
,
878 vcpu
->arch
.fault_dear
,
879 vcpu
->arch
.fault_esr
);
880 kvmppc_mmu_dtlb_miss(vcpu
);
881 kvmppc_account_exit(vcpu
, DTLB_REAL_MISS_EXITS
);
886 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
887 gfn
= gpaddr
>> PAGE_SHIFT
;
889 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
890 /* The guest TLB had a mapping, but the shadow TLB
891 * didn't, and it is RAM. This could be because:
892 * a) the entry is mapping the host kernel, or
893 * b) the guest used a large mapping which we're faking
894 * Either way, we need to satisfy the fault without
895 * invoking the guest. */
896 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
897 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
900 /* Guest has mapped and accessed a page which is not
902 vcpu
->arch
.paddr_accessed
= gpaddr
;
903 vcpu
->arch
.vaddr_accessed
= eaddr
;
904 r
= kvmppc_emulate_mmio(run
, vcpu
);
905 kvmppc_account_exit(vcpu
, MMIO_EXITS
);
911 case BOOKE_INTERRUPT_ITLB_MISS
: {
912 unsigned long eaddr
= vcpu
->arch
.pc
;
919 /* Check the guest TLB. */
920 gtlb_index
= kvmppc_mmu_itlb_index(vcpu
, eaddr
);
921 if (gtlb_index
< 0) {
922 /* The guest didn't have a mapping for it. */
923 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_ITLB_MISS
);
924 kvmppc_mmu_itlb_miss(vcpu
);
925 kvmppc_account_exit(vcpu
, ITLB_REAL_MISS_EXITS
);
929 kvmppc_account_exit(vcpu
, ITLB_VIRT_MISS_EXITS
);
931 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
932 gfn
= gpaddr
>> PAGE_SHIFT
;
934 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
935 /* The guest TLB had a mapping, but the shadow TLB
936 * didn't. This could be because:
937 * a) the entry is mapping the host kernel, or
938 * b) the guest used a large mapping which we're faking
939 * Either way, we need to satisfy the fault without
940 * invoking the guest. */
941 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
943 /* Guest mapped and leaped at non-RAM! */
944 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_MACHINE_CHECK
);
950 case BOOKE_INTERRUPT_DEBUG
: {
953 vcpu
->arch
.pc
= mfspr(SPRN_CSRR0
);
955 /* clear IAC events in DBSR register */
956 dbsr
= mfspr(SPRN_DBSR
);
957 dbsr
&= DBSR_IAC1
| DBSR_IAC2
| DBSR_IAC3
| DBSR_IAC4
;
958 mtspr(SPRN_DBSR
, dbsr
);
960 run
->exit_reason
= KVM_EXIT_DEBUG
;
961 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
967 printk(KERN_EMERG
"exit_nr %d\n", exit_nr
);
972 * To avoid clobbering exit_reason, only check for signals if we
973 * aren't already exiting to userspace for some other reason.
975 if (!(r
& RESUME_HOST
)) {
977 if (kvmppc_prepare_to_enter(vcpu
)) {
978 run
->exit_reason
= KVM_EXIT_INTR
;
979 r
= (-EINTR
<< 2) | RESUME_HOST
| (r
& RESUME_FLAG_NV
);
980 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
987 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
988 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
994 vcpu
->arch
.shared
->pir
= vcpu
->vcpu_id
;
995 kvmppc_set_gpr(vcpu
, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
996 kvmppc_set_msr(vcpu
, 0);
998 #ifndef CONFIG_KVM_BOOKE_HV
999 vcpu
->arch
.shadow_msr
= MSR_USER
| MSR_DE
| MSR_IS
| MSR_DS
;
1000 vcpu
->arch
.shadow_pid
= 1;
1001 vcpu
->arch
.shared
->msr
= 0;
1004 /* Eye-catching numbers so we know if the guest takes an interrupt
1005 * before it's programmed its own IVPR/IVORs. */
1006 vcpu
->arch
.ivpr
= 0x55550000;
1007 for (i
= 0; i
< BOOKE_IRQPRIO_MAX
; i
++)
1008 vcpu
->arch
.ivor
[i
] = 0x7700 | i
* 4;
1010 kvmppc_init_timing_stats(vcpu
);
1012 r
= kvmppc_core_vcpu_setup(vcpu
);
1013 kvmppc_sanity_check(vcpu
);
1017 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1021 regs
->pc
= vcpu
->arch
.pc
;
1022 regs
->cr
= kvmppc_get_cr(vcpu
);
1023 regs
->ctr
= vcpu
->arch
.ctr
;
1024 regs
->lr
= vcpu
->arch
.lr
;
1025 regs
->xer
= kvmppc_get_xer(vcpu
);
1026 regs
->msr
= vcpu
->arch
.shared
->msr
;
1027 regs
->srr0
= vcpu
->arch
.shared
->srr0
;
1028 regs
->srr1
= vcpu
->arch
.shared
->srr1
;
1029 regs
->pid
= vcpu
->arch
.pid
;
1030 regs
->sprg0
= vcpu
->arch
.shared
->sprg0
;
1031 regs
->sprg1
= vcpu
->arch
.shared
->sprg1
;
1032 regs
->sprg2
= vcpu
->arch
.shared
->sprg2
;
1033 regs
->sprg3
= vcpu
->arch
.shared
->sprg3
;
1034 regs
->sprg4
= vcpu
->arch
.shared
->sprg4
;
1035 regs
->sprg5
= vcpu
->arch
.shared
->sprg5
;
1036 regs
->sprg6
= vcpu
->arch
.shared
->sprg6
;
1037 regs
->sprg7
= vcpu
->arch
.shared
->sprg7
;
1039 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
1040 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
1045 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1049 vcpu
->arch
.pc
= regs
->pc
;
1050 kvmppc_set_cr(vcpu
, regs
->cr
);
1051 vcpu
->arch
.ctr
= regs
->ctr
;
1052 vcpu
->arch
.lr
= regs
->lr
;
1053 kvmppc_set_xer(vcpu
, regs
->xer
);
1054 kvmppc_set_msr(vcpu
, regs
->msr
);
1055 vcpu
->arch
.shared
->srr0
= regs
->srr0
;
1056 vcpu
->arch
.shared
->srr1
= regs
->srr1
;
1057 kvmppc_set_pid(vcpu
, regs
->pid
);
1058 vcpu
->arch
.shared
->sprg0
= regs
->sprg0
;
1059 vcpu
->arch
.shared
->sprg1
= regs
->sprg1
;
1060 vcpu
->arch
.shared
->sprg2
= regs
->sprg2
;
1061 vcpu
->arch
.shared
->sprg3
= regs
->sprg3
;
1062 vcpu
->arch
.shared
->sprg4
= regs
->sprg4
;
1063 vcpu
->arch
.shared
->sprg5
= regs
->sprg5
;
1064 vcpu
->arch
.shared
->sprg6
= regs
->sprg6
;
1065 vcpu
->arch
.shared
->sprg7
= regs
->sprg7
;
1067 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
1068 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
1073 static void get_sregs_base(struct kvm_vcpu
*vcpu
,
1074 struct kvm_sregs
*sregs
)
1078 sregs
->u
.e
.features
|= KVM_SREGS_E_BASE
;
1080 sregs
->u
.e
.csrr0
= vcpu
->arch
.csrr0
;
1081 sregs
->u
.e
.csrr1
= vcpu
->arch
.csrr1
;
1082 sregs
->u
.e
.mcsr
= vcpu
->arch
.mcsr
;
1083 sregs
->u
.e
.esr
= get_guest_esr(vcpu
);
1084 sregs
->u
.e
.dear
= get_guest_dear(vcpu
);
1085 sregs
->u
.e
.tsr
= vcpu
->arch
.tsr
;
1086 sregs
->u
.e
.tcr
= vcpu
->arch
.tcr
;
1087 sregs
->u
.e
.dec
= kvmppc_get_dec(vcpu
, tb
);
1089 sregs
->u
.e
.vrsave
= vcpu
->arch
.vrsave
;
1092 static int set_sregs_base(struct kvm_vcpu
*vcpu
,
1093 struct kvm_sregs
*sregs
)
1095 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_BASE
))
1098 vcpu
->arch
.csrr0
= sregs
->u
.e
.csrr0
;
1099 vcpu
->arch
.csrr1
= sregs
->u
.e
.csrr1
;
1100 vcpu
->arch
.mcsr
= sregs
->u
.e
.mcsr
;
1101 set_guest_esr(vcpu
, sregs
->u
.e
.esr
);
1102 set_guest_dear(vcpu
, sregs
->u
.e
.dear
);
1103 vcpu
->arch
.vrsave
= sregs
->u
.e
.vrsave
;
1104 kvmppc_set_tcr(vcpu
, sregs
->u
.e
.tcr
);
1106 if (sregs
->u
.e
.update_special
& KVM_SREGS_E_UPDATE_DEC
) {
1107 vcpu
->arch
.dec
= sregs
->u
.e
.dec
;
1108 kvmppc_emulate_dec(vcpu
);
1111 if (sregs
->u
.e
.update_special
& KVM_SREGS_E_UPDATE_TSR
) {
1112 vcpu
->arch
.tsr
= sregs
->u
.e
.tsr
;
1113 update_timer_ints(vcpu
);
1119 static void get_sregs_arch206(struct kvm_vcpu
*vcpu
,
1120 struct kvm_sregs
*sregs
)
1122 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206
;
1124 sregs
->u
.e
.pir
= vcpu
->vcpu_id
;
1125 sregs
->u
.e
.mcsrr0
= vcpu
->arch
.mcsrr0
;
1126 sregs
->u
.e
.mcsrr1
= vcpu
->arch
.mcsrr1
;
1127 sregs
->u
.e
.decar
= vcpu
->arch
.decar
;
1128 sregs
->u
.e
.ivpr
= vcpu
->arch
.ivpr
;
1131 static int set_sregs_arch206(struct kvm_vcpu
*vcpu
,
1132 struct kvm_sregs
*sregs
)
1134 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206
))
1137 if (sregs
->u
.e
.pir
!= vcpu
->vcpu_id
)
1140 vcpu
->arch
.mcsrr0
= sregs
->u
.e
.mcsrr0
;
1141 vcpu
->arch
.mcsrr1
= sregs
->u
.e
.mcsrr1
;
1142 vcpu
->arch
.decar
= sregs
->u
.e
.decar
;
1143 vcpu
->arch
.ivpr
= sregs
->u
.e
.ivpr
;
1148 void kvmppc_get_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1150 sregs
->u
.e
.features
|= KVM_SREGS_E_IVOR
;
1152 sregs
->u
.e
.ivor_low
[0] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_CRITICAL
];
1153 sregs
->u
.e
.ivor_low
[1] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_MACHINE_CHECK
];
1154 sregs
->u
.e
.ivor_low
[2] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
];
1155 sregs
->u
.e
.ivor_low
[3] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_INST_STORAGE
];
1156 sregs
->u
.e
.ivor_low
[4] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_EXTERNAL
];
1157 sregs
->u
.e
.ivor_low
[5] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALIGNMENT
];
1158 sregs
->u
.e
.ivor_low
[6] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PROGRAM
];
1159 sregs
->u
.e
.ivor_low
[7] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FP_UNAVAIL
];
1160 sregs
->u
.e
.ivor_low
[8] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
];
1161 sregs
->u
.e
.ivor_low
[9] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_AP_UNAVAIL
];
1162 sregs
->u
.e
.ivor_low
[10] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DECREMENTER
];
1163 sregs
->u
.e
.ivor_low
[11] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FIT
];
1164 sregs
->u
.e
.ivor_low
[12] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_WATCHDOG
];
1165 sregs
->u
.e
.ivor_low
[13] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DTLB_MISS
];
1166 sregs
->u
.e
.ivor_low
[14] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ITLB_MISS
];
1167 sregs
->u
.e
.ivor_low
[15] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DEBUG
];
1170 int kvmppc_set_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1172 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
1175 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_CRITICAL
] = sregs
->u
.e
.ivor_low
[0];
1176 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_MACHINE_CHECK
] = sregs
->u
.e
.ivor_low
[1];
1177 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
] = sregs
->u
.e
.ivor_low
[2];
1178 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_INST_STORAGE
] = sregs
->u
.e
.ivor_low
[3];
1179 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_EXTERNAL
] = sregs
->u
.e
.ivor_low
[4];
1180 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALIGNMENT
] = sregs
->u
.e
.ivor_low
[5];
1181 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PROGRAM
] = sregs
->u
.e
.ivor_low
[6];
1182 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FP_UNAVAIL
] = sregs
->u
.e
.ivor_low
[7];
1183 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
] = sregs
->u
.e
.ivor_low
[8];
1184 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_AP_UNAVAIL
] = sregs
->u
.e
.ivor_low
[9];
1185 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DECREMENTER
] = sregs
->u
.e
.ivor_low
[10];
1186 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FIT
] = sregs
->u
.e
.ivor_low
[11];
1187 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_WATCHDOG
] = sregs
->u
.e
.ivor_low
[12];
1188 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DTLB_MISS
] = sregs
->u
.e
.ivor_low
[13];
1189 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ITLB_MISS
] = sregs
->u
.e
.ivor_low
[14];
1190 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DEBUG
] = sregs
->u
.e
.ivor_low
[15];
1195 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1196 struct kvm_sregs
*sregs
)
1198 sregs
->pvr
= vcpu
->arch
.pvr
;
1200 get_sregs_base(vcpu
, sregs
);
1201 get_sregs_arch206(vcpu
, sregs
);
1202 kvmppc_core_get_sregs(vcpu
, sregs
);
1206 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1207 struct kvm_sregs
*sregs
)
1211 if (vcpu
->arch
.pvr
!= sregs
->pvr
)
1214 ret
= set_sregs_base(vcpu
, sregs
);
1218 ret
= set_sregs_arch206(vcpu
, sregs
);
1222 return kvmppc_core_set_sregs(vcpu
, sregs
);
1225 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1230 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1235 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1240 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1245 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1246 struct kvm_translation
*tr
)
1250 r
= kvmppc_core_vcpu_translate(vcpu
, tr
);
1254 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1259 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1260 struct kvm_userspace_memory_region
*mem
)
1265 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1266 struct kvm_userspace_memory_region
*mem
)
1270 void kvmppc_set_tcr(struct kvm_vcpu
*vcpu
, u32 new_tcr
)
1272 vcpu
->arch
.tcr
= new_tcr
;
1273 update_timer_ints(vcpu
);
1276 void kvmppc_set_tsr_bits(struct kvm_vcpu
*vcpu
, u32 tsr_bits
)
1278 set_bits(tsr_bits
, &vcpu
->arch
.tsr
);
1280 kvm_make_request(KVM_REQ_PENDING_TIMER
, vcpu
);
1281 kvm_vcpu_kick(vcpu
);
1284 void kvmppc_clr_tsr_bits(struct kvm_vcpu
*vcpu
, u32 tsr_bits
)
1286 clear_bits(tsr_bits
, &vcpu
->arch
.tsr
);
1287 update_timer_ints(vcpu
);
1290 void kvmppc_decrementer_func(unsigned long data
)
1292 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
1294 if (vcpu
->arch
.tcr
& TCR_ARE
) {
1295 vcpu
->arch
.dec
= vcpu
->arch
.decar
;
1296 kvmppc_emulate_dec(vcpu
);
1299 kvmppc_set_tsr_bits(vcpu
, TSR_DIS
);
1302 void kvmppc_booke_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1304 current
->thread
.kvm_vcpu
= vcpu
;
1307 void kvmppc_booke_vcpu_put(struct kvm_vcpu
*vcpu
)
1309 current
->thread
.kvm_vcpu
= NULL
;
1312 int __init
kvmppc_booke_init(void)
1314 #ifndef CONFIG_KVM_BOOKE_HV
1315 unsigned long ivor
[16];
1316 unsigned long max_ivor
= 0;
1319 /* We install our own exception handlers by hijacking IVPR. IVPR must
1320 * be 16-bit aligned, so we need a 64KB allocation. */
1321 kvmppc_booke_handlers
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1323 if (!kvmppc_booke_handlers
)
1326 /* XXX make sure our handlers are smaller than Linux's */
1328 /* Copy our interrupt handlers to match host IVORs. That way we don't
1329 * have to swap the IVORs on every guest/host transition. */
1330 ivor
[0] = mfspr(SPRN_IVOR0
);
1331 ivor
[1] = mfspr(SPRN_IVOR1
);
1332 ivor
[2] = mfspr(SPRN_IVOR2
);
1333 ivor
[3] = mfspr(SPRN_IVOR3
);
1334 ivor
[4] = mfspr(SPRN_IVOR4
);
1335 ivor
[5] = mfspr(SPRN_IVOR5
);
1336 ivor
[6] = mfspr(SPRN_IVOR6
);
1337 ivor
[7] = mfspr(SPRN_IVOR7
);
1338 ivor
[8] = mfspr(SPRN_IVOR8
);
1339 ivor
[9] = mfspr(SPRN_IVOR9
);
1340 ivor
[10] = mfspr(SPRN_IVOR10
);
1341 ivor
[11] = mfspr(SPRN_IVOR11
);
1342 ivor
[12] = mfspr(SPRN_IVOR12
);
1343 ivor
[13] = mfspr(SPRN_IVOR13
);
1344 ivor
[14] = mfspr(SPRN_IVOR14
);
1345 ivor
[15] = mfspr(SPRN_IVOR15
);
1347 for (i
= 0; i
< 16; i
++) {
1348 if (ivor
[i
] > max_ivor
)
1351 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
1352 kvmppc_handlers_start
+ i
* kvmppc_handler_len
,
1353 kvmppc_handler_len
);
1355 flush_icache_range(kvmppc_booke_handlers
,
1356 kvmppc_booke_handlers
+ max_ivor
+ kvmppc_handler_len
);
1357 #endif /* !BOOKE_HV */
1361 void __exit
kvmppc_booke_exit(void)
1363 free_pages(kvmppc_booke_handlers
, VCPU_SIZE_ORDER
);