2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops
*kvmppc_hv_ops
;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
45 struct kvmppc_ops
*kvmppc_pr_ops
;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
51 return !!(v
->arch
.pending_exceptions
) ||
55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
61 * Common checks before entering the guest world. Call with interrupts
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
69 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
73 WARN_ON_ONCE(!irqs_disabled());
82 if (signal_pending(current
)) {
83 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
84 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
89 vcpu
->mode
= IN_GUEST_MODE
;
92 * Reading vcpu->requests must happen after setting vcpu->mode,
93 * so we don't miss a request because the requester sees
94 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
95 * before next entering the guest (and thus doesn't IPI).
100 /* Make sure we process requests preemptable */
102 trace_kvm_check_requests(vcpu
);
103 r
= kvmppc_core_check_requests(vcpu
);
110 if (kvmppc_core_prepare_to_enter(vcpu
)) {
111 /* interrupts got enabled in between, so we
112 are back at square 1 */
119 if (lazy_irq_pending()) {
120 /* Got an interrupt in between, try again */
134 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
136 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
138 int nr
= kvmppc_get_gpr(vcpu
, 11);
140 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
141 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
142 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
143 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
144 unsigned long r2
= 0;
146 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
148 param1
&= 0xffffffff;
149 param2
&= 0xffffffff;
150 param3
&= 0xffffffff;
151 param4
&= 0xffffffff;
155 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
157 vcpu
->arch
.magic_page_pa
= param1
;
158 vcpu
->arch
.magic_page_ea
= param2
;
160 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
165 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
167 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
168 /* XXX Missing magic page on 44x */
169 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
172 /* Second return value is in r4 */
174 case EV_HCALL_TOKEN(EV_IDLE
):
176 kvm_vcpu_block(vcpu
);
177 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
180 r
= EV_UNIMPLEMENTED
;
184 kvmppc_set_gpr(vcpu
, 4, r2
);
188 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
190 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
194 /* We have to know what CPU to virtualize */
198 /* PAPR only works with book3s_64 */
199 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
202 /* HV KVM can only do PAPR mode for now */
203 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
206 #ifdef CONFIG_KVM_BOOKE_HV
207 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
215 return r
? 0 : -EINVAL
;
217 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
219 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
221 enum emulation_result er
;
224 er
= kvmppc_emulate_instruction(run
, vcpu
);
227 /* Future optimization: only reload non-volatiles if they were
228 * actually modified. */
231 case EMULATE_DO_MMIO
:
232 run
->exit_reason
= KVM_EXIT_MMIO
;
233 /* We must reload nonvolatiles because "update" load/store
234 * instructions modify register state. */
235 /* Future optimization: only reload non-volatiles if they were
236 * actually modified. */
240 /* XXX Deliver Program interrupt to guest. */
241 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
242 kvmppc_get_last_inst(vcpu
));
252 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
254 int kvm_arch_hardware_enable(void *garbage
)
259 void kvm_arch_hardware_disable(void *garbage
)
263 int kvm_arch_hardware_setup(void)
268 void kvm_arch_hardware_unsetup(void)
272 void kvm_arch_check_processor_compat(void *rtn
)
274 *(int *)rtn
= kvmppc_core_check_processor_compat();
277 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
279 struct kvmppc_ops
*kvm_ops
= NULL
;
281 * if we have both HV and PR enabled, default is HV
285 kvm_ops
= kvmppc_hv_ops
;
287 kvm_ops
= kvmppc_pr_ops
;
290 } else if (type
== KVM_VM_PPC_HV
) {
293 kvm_ops
= kvmppc_hv_ops
;
294 } else if (type
== KVM_VM_PPC_PR
) {
297 kvm_ops
= kvmppc_pr_ops
;
301 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
304 kvm
->arch
.kvm_ops
= kvm_ops
;
305 return kvmppc_core_init_vm(kvm
);
310 void kvm_arch_destroy_vm(struct kvm
*kvm
)
313 struct kvm_vcpu
*vcpu
;
315 kvm_for_each_vcpu(i
, vcpu
, kvm
)
316 kvm_arch_vcpu_free(vcpu
);
318 mutex_lock(&kvm
->lock
);
319 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
320 kvm
->vcpus
[i
] = NULL
;
322 atomic_set(&kvm
->online_vcpus
, 0);
324 kvmppc_core_destroy_vm(kvm
);
326 mutex_unlock(&kvm
->lock
);
328 /* drop the module reference */
329 module_put(kvm
->arch
.kvm_ops
->owner
);
332 void kvm_arch_sync_events(struct kvm
*kvm
)
336 int kvm_dev_ioctl_check_extension(long ext
)
340 * Should some of this be vm ioctl ? is it possible now ?
342 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
346 case KVM_CAP_PPC_BOOKE_SREGS
:
347 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
348 case KVM_CAP_PPC_EPR
:
350 case KVM_CAP_PPC_SEGSTATE
:
351 case KVM_CAP_PPC_HIOR
:
352 case KVM_CAP_PPC_PAPR
:
354 case KVM_CAP_PPC_UNSET_IRQ
:
355 case KVM_CAP_PPC_IRQ_LEVEL
:
356 case KVM_CAP_ENABLE_CAP
:
357 case KVM_CAP_ONE_REG
:
358 case KVM_CAP_IOEVENTFD
:
359 case KVM_CAP_DEVICE_CTRL
:
362 case KVM_CAP_PPC_PAIRED_SINGLES
:
363 case KVM_CAP_PPC_OSI
:
364 case KVM_CAP_PPC_GET_PVINFO
:
365 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
368 /* We support this only for PR */
371 #ifdef CONFIG_KVM_MMIO
372 case KVM_CAP_COALESCED_MMIO
:
373 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
376 #ifdef CONFIG_KVM_MPIC
377 case KVM_CAP_IRQ_MPIC
:
382 #ifdef CONFIG_PPC_BOOK3S_64
383 case KVM_CAP_SPAPR_TCE
:
384 case KVM_CAP_PPC_ALLOC_HTAB
:
385 case KVM_CAP_PPC_RTAS
:
386 #ifdef CONFIG_KVM_XICS
387 case KVM_CAP_IRQ_XICS
:
391 #endif /* CONFIG_PPC_BOOK3S_64 */
392 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
393 case KVM_CAP_PPC_SMT
:
395 r
= threads_per_core
;
399 case KVM_CAP_PPC_RMA
:
401 /* PPC970 requires an RMA */
402 if (r
&& cpu_has_feature(CPU_FTR_ARCH_201
))
406 case KVM_CAP_SYNC_MMU
:
407 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
409 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
412 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
418 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
419 case KVM_CAP_PPC_HTAB_FD
:
423 case KVM_CAP_NR_VCPUS
:
425 * Recommending a number of CPUs is somewhat arbitrary; we
426 * return the number of present CPUs for -HV (since a host
427 * will have secondary threads "offline"), and for other KVM
428 * implementations just count online CPUs.
431 r
= num_present_cpus();
433 r
= num_online_cpus();
435 case KVM_CAP_MAX_VCPUS
:
438 #ifdef CONFIG_PPC_BOOK3S_64
439 case KVM_CAP_PPC_GET_SMMU_INFO
:
451 long kvm_arch_dev_ioctl(struct file
*filp
,
452 unsigned int ioctl
, unsigned long arg
)
457 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
458 struct kvm_memory_slot
*dont
)
460 kvmppc_core_free_memslot(kvm
, free
, dont
);
463 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
464 unsigned long npages
)
466 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
469 void kvm_arch_memslots_updated(struct kvm
*kvm
)
473 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
474 struct kvm_memory_slot
*memslot
,
475 struct kvm_userspace_memory_region
*mem
,
476 enum kvm_mr_change change
)
478 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
481 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
482 struct kvm_userspace_memory_region
*mem
,
483 const struct kvm_memory_slot
*old
,
484 enum kvm_mr_change change
)
486 kvmppc_core_commit_memory_region(kvm
, mem
, old
);
489 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
493 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
494 struct kvm_memory_slot
*slot
)
496 kvmppc_core_flush_memslot(kvm
, slot
);
499 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
501 struct kvm_vcpu
*vcpu
;
502 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
504 vcpu
->arch
.wqp
= &vcpu
->wq
;
505 kvmppc_create_vcpu_debugfs(vcpu
, id
);
510 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
515 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
517 /* Make sure we're not using the vcpu anymore */
518 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
519 tasklet_kill(&vcpu
->arch
.tasklet
);
521 kvmppc_remove_vcpu_debugfs(vcpu
);
523 switch (vcpu
->arch
.irq_type
) {
524 case KVMPPC_IRQ_MPIC
:
525 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
527 case KVMPPC_IRQ_XICS
:
528 kvmppc_xics_free_icp(vcpu
);
532 kvmppc_core_vcpu_free(vcpu
);
535 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
537 kvm_arch_vcpu_free(vcpu
);
540 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
542 return kvmppc_core_pending_dec(vcpu
);
546 * low level hrtimer wake routine. Because this runs in hardirq context
547 * we schedule a tasklet to do the real work.
549 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
551 struct kvm_vcpu
*vcpu
;
553 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
554 tasklet_schedule(&vcpu
->arch
.tasklet
);
556 return HRTIMER_NORESTART
;
559 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
563 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
564 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
565 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
566 vcpu
->arch
.dec_expires
= ~(u64
)0;
568 #ifdef CONFIG_KVM_EXIT_TIMING
569 mutex_init(&vcpu
->arch
.exit_timing_lock
);
571 ret
= kvmppc_subarch_vcpu_init(vcpu
);
575 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
577 kvmppc_mmu_destroy(vcpu
);
578 kvmppc_subarch_vcpu_uninit(vcpu
);
581 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
585 * vrsave (formerly usprg0) isn't used by Linux, but may
586 * be used by the guest.
588 * On non-booke this is associated with Altivec and
589 * is handled by code in book3s.c.
591 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
593 kvmppc_core_vcpu_load(vcpu
, cpu
);
596 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
598 kvmppc_core_vcpu_put(vcpu
);
600 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
604 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
607 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
610 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
613 u64
uninitialized_var(gpr
);
615 if (run
->mmio
.len
> sizeof(gpr
)) {
616 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
620 if (vcpu
->arch
.mmio_is_bigendian
) {
621 switch (run
->mmio
.len
) {
622 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
623 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
624 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
625 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
628 /* Convert BE data from userland back to LE. */
629 switch (run
->mmio
.len
) {
630 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
631 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
632 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
636 if (vcpu
->arch
.mmio_sign_extend
) {
637 switch (run
->mmio
.len
) {
652 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
654 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
655 case KVM_MMIO_REG_GPR
:
656 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
658 case KVM_MMIO_REG_FPR
:
659 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
661 #ifdef CONFIG_PPC_BOOK3S
662 case KVM_MMIO_REG_QPR
:
663 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
665 case KVM_MMIO_REG_FQPR
:
666 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
667 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
675 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
676 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
680 if (bytes
> sizeof(run
->mmio
.data
)) {
681 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
685 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
686 run
->mmio
.len
= bytes
;
687 run
->mmio
.is_write
= 0;
689 vcpu
->arch
.io_gpr
= rt
;
690 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
691 vcpu
->mmio_needed
= 1;
692 vcpu
->mmio_is_write
= 0;
693 vcpu
->arch
.mmio_sign_extend
= 0;
695 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
697 ret
= kvm_io_bus_read(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
698 bytes
, &run
->mmio
.data
);
700 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
703 kvmppc_complete_mmio_load(vcpu
, run
);
704 vcpu
->mmio_needed
= 0;
708 return EMULATE_DO_MMIO
;
710 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
712 /* Same as above, but sign extends */
713 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
714 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
718 vcpu
->arch
.mmio_sign_extend
= 1;
719 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
724 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
725 u64 val
, unsigned int bytes
, int is_bigendian
)
727 void *data
= run
->mmio
.data
;
730 if (bytes
> sizeof(run
->mmio
.data
)) {
731 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
735 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
736 run
->mmio
.len
= bytes
;
737 run
->mmio
.is_write
= 1;
738 vcpu
->mmio_needed
= 1;
739 vcpu
->mmio_is_write
= 1;
741 /* Store the value at the lowest bytes in 'data'. */
744 case 8: *(u64
*)data
= val
; break;
745 case 4: *(u32
*)data
= val
; break;
746 case 2: *(u16
*)data
= val
; break;
747 case 1: *(u8
*)data
= val
; break;
750 /* Store LE value into 'data'. */
752 case 4: st_le32(data
, val
); break;
753 case 2: st_le16(data
, val
); break;
754 case 1: *(u8
*)data
= val
; break;
758 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
760 ret
= kvm_io_bus_write(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
761 bytes
, &run
->mmio
.data
);
763 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
766 vcpu
->mmio_needed
= 0;
770 return EMULATE_DO_MMIO
;
772 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
774 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
779 if (vcpu
->sigset_active
)
780 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
782 if (vcpu
->mmio_needed
) {
783 if (!vcpu
->mmio_is_write
)
784 kvmppc_complete_mmio_load(vcpu
, run
);
785 vcpu
->mmio_needed
= 0;
786 } else if (vcpu
->arch
.dcr_needed
) {
787 if (!vcpu
->arch
.dcr_is_write
)
788 kvmppc_complete_dcr_load(vcpu
, run
);
789 vcpu
->arch
.dcr_needed
= 0;
790 } else if (vcpu
->arch
.osi_needed
) {
791 u64
*gprs
= run
->osi
.gprs
;
794 for (i
= 0; i
< 32; i
++)
795 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
796 vcpu
->arch
.osi_needed
= 0;
797 } else if (vcpu
->arch
.hcall_needed
) {
800 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
801 for (i
= 0; i
< 9; ++i
)
802 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
803 vcpu
->arch
.hcall_needed
= 0;
805 } else if (vcpu
->arch
.epr_needed
) {
806 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
807 vcpu
->arch
.epr_needed
= 0;
811 r
= kvmppc_vcpu_run(run
, vcpu
);
813 if (vcpu
->sigset_active
)
814 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
819 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
821 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
822 kvmppc_core_dequeue_external(vcpu
);
826 kvmppc_core_queue_external(vcpu
, irq
);
833 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
834 struct kvm_enable_cap
*cap
)
842 case KVM_CAP_PPC_OSI
:
844 vcpu
->arch
.osi_enabled
= true;
846 case KVM_CAP_PPC_PAPR
:
848 vcpu
->arch
.papr_enabled
= true;
850 case KVM_CAP_PPC_EPR
:
853 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
855 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
858 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
860 vcpu
->arch
.watchdog_enabled
= true;
863 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
864 case KVM_CAP_SW_TLB
: {
865 struct kvm_config_tlb cfg
;
866 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
869 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
872 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
876 #ifdef CONFIG_KVM_MPIC
877 case KVM_CAP_IRQ_MPIC
: {
879 struct kvm_device
*dev
;
882 f
= fdget(cap
->args
[0]);
887 dev
= kvm_device_from_filp(f
.file
);
889 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
895 #ifdef CONFIG_KVM_XICS
896 case KVM_CAP_IRQ_XICS
: {
898 struct kvm_device
*dev
;
901 f
= fdget(cap
->args
[0]);
906 dev
= kvm_device_from_filp(f
.file
);
908 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
913 #endif /* CONFIG_KVM_XICS */
920 r
= kvmppc_sanity_check(vcpu
);
925 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
926 struct kvm_mp_state
*mp_state
)
931 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
932 struct kvm_mp_state
*mp_state
)
937 long kvm_arch_vcpu_ioctl(struct file
*filp
,
938 unsigned int ioctl
, unsigned long arg
)
940 struct kvm_vcpu
*vcpu
= filp
->private_data
;
941 void __user
*argp
= (void __user
*)arg
;
945 case KVM_INTERRUPT
: {
946 struct kvm_interrupt irq
;
948 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
950 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
956 struct kvm_enable_cap cap
;
958 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
960 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
964 case KVM_SET_ONE_REG
:
965 case KVM_GET_ONE_REG
:
967 struct kvm_one_reg reg
;
969 if (copy_from_user(®
, argp
, sizeof(reg
)))
971 if (ioctl
== KVM_SET_ONE_REG
)
972 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
974 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
978 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
979 case KVM_DIRTY_TLB
: {
980 struct kvm_dirty_tlb dirty
;
982 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
984 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
996 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
998 return VM_FAULT_SIGBUS
;
1001 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
1003 u32 inst_nop
= 0x60000000;
1004 #ifdef CONFIG_KVM_BOOKE_HV
1005 u32 inst_sc1
= 0x44000022;
1006 pvinfo
->hcall
[0] = inst_sc1
;
1007 pvinfo
->hcall
[1] = inst_nop
;
1008 pvinfo
->hcall
[2] = inst_nop
;
1009 pvinfo
->hcall
[3] = inst_nop
;
1011 u32 inst_lis
= 0x3c000000;
1012 u32 inst_ori
= 0x60000000;
1013 u32 inst_sc
= 0x44000002;
1014 u32 inst_imm_mask
= 0xffff;
1017 * The hypercall to get into KVM from within guest context is as
1020 * lis r0, r0, KVM_SC_MAGIC_R0@h
1021 * ori r0, KVM_SC_MAGIC_R0@l
1025 pvinfo
->hcall
[0] = inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
);
1026 pvinfo
->hcall
[1] = inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
);
1027 pvinfo
->hcall
[2] = inst_sc
;
1028 pvinfo
->hcall
[3] = inst_nop
;
1031 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
1036 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
1039 if (!irqchip_in_kernel(kvm
))
1042 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
1043 irq_event
->irq
, irq_event
->level
,
1048 long kvm_arch_vm_ioctl(struct file
*filp
,
1049 unsigned int ioctl
, unsigned long arg
)
1051 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
1052 void __user
*argp
= (void __user
*)arg
;
1056 case KVM_PPC_GET_PVINFO
: {
1057 struct kvm_ppc_pvinfo pvinfo
;
1058 memset(&pvinfo
, 0, sizeof(pvinfo
));
1059 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
1060 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
1067 #ifdef CONFIG_PPC_BOOK3S_64
1068 case KVM_CREATE_SPAPR_TCE
: {
1069 struct kvm_create_spapr_tce create_tce
;
1072 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
1074 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
1077 case KVM_PPC_GET_SMMU_INFO
: {
1078 struct kvm_ppc_smmu_info info
;
1079 struct kvm
*kvm
= filp
->private_data
;
1081 memset(&info
, 0, sizeof(info
));
1082 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
1083 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1087 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
1088 struct kvm
*kvm
= filp
->private_data
;
1090 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
1094 struct kvm
*kvm
= filp
->private_data
;
1095 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
1097 #else /* CONFIG_PPC_BOOK3S_64 */
1106 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1107 static unsigned long nr_lpids
;
1109 long kvmppc_alloc_lpid(void)
1114 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1115 if (lpid
>= nr_lpids
) {
1116 pr_err("%s: No LPIDs free\n", __func__
);
1119 } while (test_and_set_bit(lpid
, lpid_inuse
));
1123 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
1125 void kvmppc_claim_lpid(long lpid
)
1127 set_bit(lpid
, lpid_inuse
);
1129 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
1131 void kvmppc_free_lpid(long lpid
)
1133 clear_bit(lpid
, lpid_inuse
);
1135 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
1137 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1139 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1140 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1142 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
1144 int kvm_arch_init(void *opaque
)
1149 void kvm_arch_exit(void)