2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
7 * Jerone Young <jyoung5@us.ibm.com>
8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9 * Hollis Blanchard <hollisb@us.ibm.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
20 #include <linux/kvm.h>
22 #include "qemu-common.h"
23 #include "qemu-timer.h"
28 #include "device_tree.h"
33 #define dprintf(fmt, ...) \
34 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
36 #define dprintf(fmt, ...) \
40 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
44 static int cap_interrupt_unset
= false;
45 static int cap_interrupt_level
= false;
47 /* XXX We have a race condition where we actually have a level triggered
48 * interrupt, but the infrastructure can't expose that yet, so the guest
49 * takes but ignores it, goes to sleep and never gets notified that there's
50 * still an interrupt pending.
52 * As a quick workaround, let's just wake up again 20 ms after we injected
53 * an interrupt. That way we can assure that we're always reinjecting
54 * interrupts in case the guest swallowed them.
56 static QEMUTimer
*idle_timer
;
58 static void kvm_kick_env(void *env
)
63 int kvm_arch_init(KVMState
*s
)
65 #ifdef KVM_CAP_PPC_UNSET_IRQ
66 cap_interrupt_unset
= kvm_check_extension(s
, KVM_CAP_PPC_UNSET_IRQ
);
68 #ifdef KVM_CAP_PPC_IRQ_LEVEL
69 cap_interrupt_level
= kvm_check_extension(s
, KVM_CAP_PPC_IRQ_LEVEL
);
72 if (!cap_interrupt_level
) {
73 fprintf(stderr
, "KVM: Couldn't find level irq capability. Expect the "
74 "VM to stall at times!\n");
80 int kvm_arch_init_vcpu(CPUState
*cenv
)
83 struct kvm_sregs sregs
;
85 sregs
.pvr
= cenv
->spr
[SPR_PVR
];
86 ret
= kvm_vcpu_ioctl(cenv
, KVM_SET_SREGS
, &sregs
);
88 idle_timer
= qemu_new_timer(vm_clock
, kvm_kick_env
, cenv
);
93 void kvm_arch_reset_vcpu(CPUState
*env
)
97 int kvm_arch_put_registers(CPUState
*env
, int level
)
103 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
113 regs
.srr0
= env
->spr
[SPR_SRR0
];
114 regs
.srr1
= env
->spr
[SPR_SRR1
];
116 regs
.sprg0
= env
->spr
[SPR_SPRG0
];
117 regs
.sprg1
= env
->spr
[SPR_SPRG1
];
118 regs
.sprg2
= env
->spr
[SPR_SPRG2
];
119 regs
.sprg3
= env
->spr
[SPR_SPRG3
];
120 regs
.sprg4
= env
->spr
[SPR_SPRG4
];
121 regs
.sprg5
= env
->spr
[SPR_SPRG5
];
122 regs
.sprg6
= env
->spr
[SPR_SPRG6
];
123 regs
.sprg7
= env
->spr
[SPR_SPRG7
];
125 for (i
= 0;i
< 32; i
++)
126 regs
.gpr
[i
] = env
->gpr
[i
];
128 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
135 int kvm_arch_get_registers(CPUState
*env
)
137 struct kvm_regs regs
;
138 struct kvm_sregs sregs
;
141 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
145 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
155 env
->spr
[SPR_SRR0
] = regs
.srr0
;
156 env
->spr
[SPR_SRR1
] = regs
.srr1
;
158 env
->spr
[SPR_SPRG0
] = regs
.sprg0
;
159 env
->spr
[SPR_SPRG1
] = regs
.sprg1
;
160 env
->spr
[SPR_SPRG2
] = regs
.sprg2
;
161 env
->spr
[SPR_SPRG3
] = regs
.sprg3
;
162 env
->spr
[SPR_SPRG4
] = regs
.sprg4
;
163 env
->spr
[SPR_SPRG5
] = regs
.sprg5
;
164 env
->spr
[SPR_SPRG6
] = regs
.sprg6
;
165 env
->spr
[SPR_SPRG7
] = regs
.sprg7
;
167 for (i
= 0;i
< 32; i
++)
168 env
->gpr
[i
] = regs
.gpr
[i
];
170 #ifdef KVM_CAP_PPC_SEGSTATE
171 if (kvm_check_extension(env
->kvm_state
, KVM_CAP_PPC_SEGSTATE
)) {
172 env
->sdr1
= sregs
.u
.s
.sdr1
;
176 for (i
= 0; i
< 64; i
++) {
177 ppc_store_slb(env
, sregs
.u
.s
.ppc64
.slb
[i
].slbe
,
178 sregs
.u
.s
.ppc64
.slb
[i
].slbv
);
183 for (i
= 0; i
< 16; i
++) {
184 env
->sr
[i
] = sregs
.u
.s
.ppc32
.sr
[i
];
188 for (i
= 0; i
< 8; i
++) {
189 env
->DBAT
[0][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] & 0xffffffff;
190 env
->DBAT
[1][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] >> 32;
191 env
->IBAT
[0][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] & 0xffffffff;
192 env
->IBAT
[1][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] >> 32;
200 int kvmppc_set_interrupt(CPUState
*env
, int irq
, int level
)
202 unsigned virq
= level
? KVM_INTERRUPT_SET_LEVEL
: KVM_INTERRUPT_UNSET
;
204 if (irq
!= PPC_INTERRUPT_EXT
) {
208 if (!kvm_enabled() || !cap_interrupt_unset
|| !cap_interrupt_level
) {
212 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &virq
);
217 #if defined(TARGET_PPCEMB)
218 #define PPC_INPUT_INT PPC40x_INPUT_INT
219 #elif defined(TARGET_PPC64)
220 #define PPC_INPUT_INT PPC970_INPUT_INT
222 #define PPC_INPUT_INT PPC6xx_INPUT_INT
225 int kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
230 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
231 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
232 if (!cap_interrupt_level
&&
233 run
->ready_for_interrupt_injection
&&
234 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
235 (env
->irq_input_state
& (1<<PPC_INPUT_INT
)))
237 /* For now KVM disregards the 'irq' argument. However, in the
238 * future KVM could cache it in-kernel to avoid a heavyweight exit
239 * when reading the UIC.
241 irq
= KVM_INTERRUPT_SET
;
243 dprintf("injected interrupt %d\n", irq
);
244 r
= kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &irq
);
246 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
248 /* Always wake up soon in case the interrupt was level based */
249 qemu_mod_timer(idle_timer
, qemu_get_clock(vm_clock
) +
250 (get_ticks_per_sec() / 50));
253 /* We don't know if there are more interrupts pending after this. However,
254 * the guest will return to userspace in the course of handling this one
255 * anyways, so we will get a chance to deliver the rest. */
259 void kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
263 void kvm_arch_process_irqchip_events(CPUState
*env
)
267 static int kvmppc_handle_halt(CPUState
*env
)
269 if (!(env
->interrupt_request
& CPU_INTERRUPT_HARD
) && (msr_ee
)) {
271 env
->exception_index
= EXCP_HLT
;
277 /* map dcr access to existing qemu dcr emulation */
278 static int kvmppc_handle_dcr_read(CPUState
*env
, uint32_t dcrn
, uint32_t *data
)
280 if (ppc_dcr_read(env
->dcr_env
, dcrn
, data
) < 0)
281 fprintf(stderr
, "Read to unhandled DCR (0x%x)\n", dcrn
);
286 static int kvmppc_handle_dcr_write(CPUState
*env
, uint32_t dcrn
, uint32_t data
)
288 if (ppc_dcr_write(env
->dcr_env
, dcrn
, data
) < 0)
289 fprintf(stderr
, "Write to unhandled DCR (0x%x)\n", dcrn
);
294 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
298 switch (run
->exit_reason
) {
300 if (run
->dcr
.is_write
) {
301 dprintf("handle dcr write\n");
302 ret
= kvmppc_handle_dcr_write(env
, run
->dcr
.dcrn
, run
->dcr
.data
);
304 dprintf("handle dcr read\n");
305 ret
= kvmppc_handle_dcr_read(env
, run
->dcr
.dcrn
, &run
->dcr
.data
);
309 dprintf("handle halt\n");
310 ret
= kvmppc_handle_halt(env
);
313 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
321 static int read_cpuinfo(const char *field
, char *value
, int len
)
325 int field_len
= strlen(field
);
328 f
= fopen("/proc/cpuinfo", "r");
334 if(!fgets(line
, sizeof(line
), f
)) {
337 if (!strncmp(line
, field
, field_len
)) {
338 strncpy(value
, line
, len
);
349 uint32_t kvmppc_get_tbfreq(void)
353 uint32_t retval
= get_ticks_per_sec();
355 if (read_cpuinfo("timebase", line
, sizeof(line
))) {
359 if (!(ns
= strchr(line
, ':'))) {
369 int kvmppc_get_hypercall(CPUState
*env
, uint8_t *buf
, int buf_len
)
371 uint32_t *hc
= (uint32_t*)buf
;
373 #ifdef KVM_CAP_PPC_GET_PVINFO
374 struct kvm_ppc_pvinfo pvinfo
;
376 if (kvm_check_extension(env
->kvm_state
, KVM_CAP_PPC_GET_PVINFO
) &&
377 !kvm_vm_ioctl(env
->kvm_state
, KVM_PPC_GET_PVINFO
, &pvinfo
)) {
378 memcpy(buf
, pvinfo
.hcall
, buf_len
);
385 * Fallback to always fail hypercalls:
401 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
406 int kvm_arch_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
411 int kvm_arch_on_sigbus(int code
, void *addr
)