2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hrtimer.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
33 #include "../mm/mmu_decl.h"
35 #define CREATE_TRACE_POINTS
38 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
43 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
45 return !(v
->arch
.msr
& MSR_WE
) || !!(v
->arch
.pending_exceptions
);
49 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
51 enum emulation_result er
;
54 er
= kvmppc_emulate_instruction(run
, vcpu
);
57 /* Future optimization: only reload non-volatiles if they were
58 * actually modified. */
62 run
->exit_reason
= KVM_EXIT_MMIO
;
63 /* We must reload nonvolatiles because "update" load/store
64 * instructions modify register state. */
65 /* Future optimization: only reload non-volatiles if they were
66 * actually modified. */
70 /* XXX Deliver Program interrupt to guest. */
71 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
72 vcpu
->arch
.last_inst
);
82 int kvm_arch_hardware_enable(void *garbage
)
87 void kvm_arch_hardware_disable(void *garbage
)
91 int kvm_arch_hardware_setup(void)
96 void kvm_arch_hardware_unsetup(void)
100 void kvm_arch_check_processor_compat(void *rtn
)
102 *(int *)rtn
= kvmppc_core_check_processor_compat();
105 struct kvm
*kvm_arch_create_vm(void)
109 kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
111 return ERR_PTR(-ENOMEM
);
116 static void kvmppc_free_vcpus(struct kvm
*kvm
)
119 struct kvm_vcpu
*vcpu
;
121 kvm_for_each_vcpu(i
, vcpu
, kvm
)
122 kvm_arch_vcpu_free(vcpu
);
124 mutex_lock(&kvm
->lock
);
125 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
126 kvm
->vcpus
[i
] = NULL
;
128 atomic_set(&kvm
->online_vcpus
, 0);
129 mutex_unlock(&kvm
->lock
);
132 void kvm_arch_sync_events(struct kvm
*kvm
)
136 void kvm_arch_destroy_vm(struct kvm
*kvm
)
138 kvmppc_free_vcpus(kvm
);
139 kvm_free_physmem(kvm
);
143 int kvm_dev_ioctl_check_extension(long ext
)
148 case KVM_CAP_PPC_SEGSTATE
:
151 case KVM_CAP_COALESCED_MMIO
:
152 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
162 long kvm_arch_dev_ioctl(struct file
*filp
,
163 unsigned int ioctl
, unsigned long arg
)
168 int kvm_arch_set_memory_region(struct kvm
*kvm
,
169 struct kvm_userspace_memory_region
*mem
,
170 struct kvm_memory_slot old
,
176 void kvm_arch_flush_shadow(struct kvm
*kvm
)
180 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
182 struct kvm_vcpu
*vcpu
;
183 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
184 kvmppc_create_vcpu_debugfs(vcpu
, id
);
188 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
190 kvmppc_remove_vcpu_debugfs(vcpu
);
191 kvmppc_core_vcpu_free(vcpu
);
194 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
196 kvm_arch_vcpu_free(vcpu
);
199 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
201 return kvmppc_core_pending_dec(vcpu
);
204 static void kvmppc_decrementer_func(unsigned long data
)
206 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
208 kvmppc_core_queue_dec(vcpu
);
210 if (waitqueue_active(&vcpu
->wq
)) {
211 wake_up_interruptible(&vcpu
->wq
);
212 vcpu
->stat
.halt_wakeup
++;
217 * low level hrtimer wake routine. Because this runs in hardirq context
218 * we schedule a tasklet to do the real work.
220 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
222 struct kvm_vcpu
*vcpu
;
224 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
225 tasklet_schedule(&vcpu
->arch
.tasklet
);
227 return HRTIMER_NORESTART
;
230 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
232 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
233 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
234 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
239 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
241 kvmppc_mmu_destroy(vcpu
);
244 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
246 kvmppc_core_vcpu_load(vcpu
, cpu
);
249 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
251 kvmppc_core_vcpu_put(vcpu
);
254 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
255 struct kvm_guest_debug
*dbg
)
260 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
263 ulong
*gpr
= &vcpu
->arch
.gpr
[vcpu
->arch
.io_gpr
];
264 *gpr
= run
->dcr
.data
;
267 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
270 ulong
*gpr
= &vcpu
->arch
.gpr
[vcpu
->arch
.io_gpr
];
272 if (run
->mmio
.len
> sizeof(*gpr
)) {
273 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
277 if (vcpu
->arch
.mmio_is_bigendian
) {
278 switch (run
->mmio
.len
) {
279 case 4: *gpr
= *(u32
*)run
->mmio
.data
; break;
280 case 2: *gpr
= *(u16
*)run
->mmio
.data
; break;
281 case 1: *gpr
= *(u8
*)run
->mmio
.data
; break;
284 /* Convert BE data from userland back to LE. */
285 switch (run
->mmio
.len
) {
286 case 4: *gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
287 case 2: *gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
288 case 1: *gpr
= *(u8
*)run
->mmio
.data
; break;
293 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
294 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
296 if (bytes
> sizeof(run
->mmio
.data
)) {
297 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
301 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
302 run
->mmio
.len
= bytes
;
303 run
->mmio
.is_write
= 0;
305 vcpu
->arch
.io_gpr
= rt
;
306 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
307 vcpu
->mmio_needed
= 1;
308 vcpu
->mmio_is_write
= 0;
310 return EMULATE_DO_MMIO
;
313 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
314 u32 val
, unsigned int bytes
, int is_bigendian
)
316 void *data
= run
->mmio
.data
;
318 if (bytes
> sizeof(run
->mmio
.data
)) {
319 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
323 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
324 run
->mmio
.len
= bytes
;
325 run
->mmio
.is_write
= 1;
326 vcpu
->mmio_needed
= 1;
327 vcpu
->mmio_is_write
= 1;
329 /* Store the value at the lowest bytes in 'data'. */
332 case 4: *(u32
*)data
= val
; break;
333 case 2: *(u16
*)data
= val
; break;
334 case 1: *(u8
*)data
= val
; break;
337 /* Store LE value into 'data'. */
339 case 4: st_le32(data
, val
); break;
340 case 2: st_le16(data
, val
); break;
341 case 1: *(u8
*)data
= val
; break;
345 return EMULATE_DO_MMIO
;
348 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
355 if (vcpu
->sigset_active
)
356 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
358 if (vcpu
->mmio_needed
) {
359 if (!vcpu
->mmio_is_write
)
360 kvmppc_complete_mmio_load(vcpu
, run
);
361 vcpu
->mmio_needed
= 0;
362 } else if (vcpu
->arch
.dcr_needed
) {
363 if (!vcpu
->arch
.dcr_is_write
)
364 kvmppc_complete_dcr_load(vcpu
, run
);
365 vcpu
->arch
.dcr_needed
= 0;
368 kvmppc_core_deliver_interrupts(vcpu
);
372 r
= __kvmppc_vcpu_run(run
, vcpu
);
376 if (vcpu
->sigset_active
)
377 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
384 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
386 kvmppc_core_queue_external(vcpu
, irq
);
388 if (waitqueue_active(&vcpu
->wq
)) {
389 wake_up_interruptible(&vcpu
->wq
);
390 vcpu
->stat
.halt_wakeup
++;
396 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
397 struct kvm_mp_state
*mp_state
)
402 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
403 struct kvm_mp_state
*mp_state
)
408 long kvm_arch_vcpu_ioctl(struct file
*filp
,
409 unsigned int ioctl
, unsigned long arg
)
411 struct kvm_vcpu
*vcpu
= filp
->private_data
;
412 void __user
*argp
= (void __user
*)arg
;
416 case KVM_INTERRUPT
: {
417 struct kvm_interrupt irq
;
419 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
421 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
432 long kvm_arch_vm_ioctl(struct file
*filp
,
433 unsigned int ioctl
, unsigned long arg
)
445 int kvm_arch_init(void *opaque
)
450 void kvm_arch_exit(void)