2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
7 * Jerone Young <jyoung5@us.ibm.com>
8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9 * Hollis Blanchard <hollisb@us.ibm.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
20 #include <linux/kvm.h>
22 #include "qemu-common.h"
23 #include "qemu-timer.h"
28 #include "device_tree.h"
33 #define dprintf(fmt, ...) \
34 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
36 #define dprintf(fmt, ...) \
40 static int cap_interrupt_unset
= false;
41 static int cap_interrupt_level
= false;
43 /* XXX We have a race condition where we actually have a level triggered
44 * interrupt, but the infrastructure can't expose that yet, so the guest
45 * takes but ignores it, goes to sleep and never gets notified that there's
46 * still an interrupt pending.
48 * As a quick workaround, let's just wake up again 20 ms after we injected
49 * an interrupt. That way we can assure that we're always reinjecting
50 * interrupts in case the guest swallowed them.
52 static QEMUTimer
*idle_timer
;
54 static void kvm_kick_env(void *env
)
59 int kvm_arch_init(KVMState
*s
, int smp_cpus
)
61 #ifdef KVM_CAP_PPC_UNSET_IRQ
62 cap_interrupt_unset
= kvm_check_extension(s
, KVM_CAP_PPC_UNSET_IRQ
);
64 #ifdef KVM_CAP_PPC_IRQ_LEVEL
65 cap_interrupt_level
= kvm_check_extension(s
, KVM_CAP_PPC_IRQ_LEVEL
);
68 if (!cap_interrupt_level
) {
69 fprintf(stderr
, "KVM: Couldn't find level irq capability. Expect the "
70 "VM to stall at times!\n");
76 int kvm_arch_init_vcpu(CPUState
*cenv
)
79 struct kvm_sregs sregs
;
81 sregs
.pvr
= cenv
->spr
[SPR_PVR
];
82 ret
= kvm_vcpu_ioctl(cenv
, KVM_SET_SREGS
, &sregs
);
84 idle_timer
= qemu_new_timer(vm_clock
, kvm_kick_env
, cenv
);
89 void kvm_arch_reset_vcpu(CPUState
*env
)
93 int kvm_arch_put_registers(CPUState
*env
, int level
)
99 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
109 regs
.srr0
= env
->spr
[SPR_SRR0
];
110 regs
.srr1
= env
->spr
[SPR_SRR1
];
112 regs
.sprg0
= env
->spr
[SPR_SPRG0
];
113 regs
.sprg1
= env
->spr
[SPR_SPRG1
];
114 regs
.sprg2
= env
->spr
[SPR_SPRG2
];
115 regs
.sprg3
= env
->spr
[SPR_SPRG3
];
116 regs
.sprg4
= env
->spr
[SPR_SPRG4
];
117 regs
.sprg5
= env
->spr
[SPR_SPRG5
];
118 regs
.sprg6
= env
->spr
[SPR_SPRG6
];
119 regs
.sprg7
= env
->spr
[SPR_SPRG7
];
121 for (i
= 0;i
< 32; i
++)
122 regs
.gpr
[i
] = env
->gpr
[i
];
124 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
131 int kvm_arch_get_registers(CPUState
*env
)
133 struct kvm_regs regs
;
134 struct kvm_sregs sregs
;
137 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
141 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
151 env
->spr
[SPR_SRR0
] = regs
.srr0
;
152 env
->spr
[SPR_SRR1
] = regs
.srr1
;
154 env
->spr
[SPR_SPRG0
] = regs
.sprg0
;
155 env
->spr
[SPR_SPRG1
] = regs
.sprg1
;
156 env
->spr
[SPR_SPRG2
] = regs
.sprg2
;
157 env
->spr
[SPR_SPRG3
] = regs
.sprg3
;
158 env
->spr
[SPR_SPRG4
] = regs
.sprg4
;
159 env
->spr
[SPR_SPRG5
] = regs
.sprg5
;
160 env
->spr
[SPR_SPRG6
] = regs
.sprg6
;
161 env
->spr
[SPR_SPRG7
] = regs
.sprg7
;
163 for (i
= 0;i
< 32; i
++)
164 env
->gpr
[i
] = regs
.gpr
[i
];
166 #ifdef KVM_CAP_PPC_SEGSTATE
167 if (kvm_check_extension(env
->kvm_state
, KVM_CAP_PPC_SEGSTATE
)) {
168 env
->sdr1
= sregs
.u
.s
.sdr1
;
172 for (i
= 0; i
< 64; i
++) {
173 ppc_store_slb(env
, sregs
.u
.s
.ppc64
.slb
[i
].slbe
,
174 sregs
.u
.s
.ppc64
.slb
[i
].slbv
);
179 for (i
= 0; i
< 16; i
++) {
180 env
->sr
[i
] = sregs
.u
.s
.ppc32
.sr
[i
];
184 for (i
= 0; i
< 8; i
++) {
185 env
->DBAT
[0][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] & 0xffffffff;
186 env
->DBAT
[1][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] >> 32;
187 env
->IBAT
[0][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] & 0xffffffff;
188 env
->IBAT
[1][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] >> 32;
196 int kvmppc_set_interrupt(CPUState
*env
, int irq
, int level
)
198 unsigned virq
= level
? KVM_INTERRUPT_SET_LEVEL
: KVM_INTERRUPT_UNSET
;
200 if (irq
!= PPC_INTERRUPT_EXT
) {
204 if (!kvm_enabled() || !cap_interrupt_unset
|| !cap_interrupt_level
) {
208 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &virq
);
213 #if defined(TARGET_PPCEMB)
214 #define PPC_INPUT_INT PPC40x_INPUT_INT
215 #elif defined(TARGET_PPC64)
216 #define PPC_INPUT_INT PPC970_INPUT_INT
218 #define PPC_INPUT_INT PPC6xx_INPUT_INT
221 int kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
226 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
227 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
228 if (!cap_interrupt_level
&&
229 run
->ready_for_interrupt_injection
&&
230 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
231 (env
->irq_input_state
& (1<<PPC_INPUT_INT
)))
233 /* For now KVM disregards the 'irq' argument. However, in the
234 * future KVM could cache it in-kernel to avoid a heavyweight exit
235 * when reading the UIC.
237 irq
= KVM_INTERRUPT_SET
;
239 dprintf("injected interrupt %d\n", irq
);
240 r
= kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &irq
);
242 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
244 /* Always wake up soon in case the interrupt was level based */
245 qemu_mod_timer(idle_timer
, qemu_get_clock(vm_clock
) +
246 (get_ticks_per_sec() / 50));
249 /* We don't know if there are more interrupts pending after this. However,
250 * the guest will return to userspace in the course of handling this one
251 * anyways, so we will get a chance to deliver the rest. */
255 int kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
260 int kvm_arch_process_irqchip_events(CPUState
*env
)
265 static int kvmppc_handle_halt(CPUState
*env
)
267 if (!(env
->interrupt_request
& CPU_INTERRUPT_HARD
) && (msr_ee
)) {
269 env
->exception_index
= EXCP_HLT
;
275 /* map dcr access to existing qemu dcr emulation */
276 static int kvmppc_handle_dcr_read(CPUState
*env
, uint32_t dcrn
, uint32_t *data
)
278 if (ppc_dcr_read(env
->dcr_env
, dcrn
, data
) < 0)
279 fprintf(stderr
, "Read to unhandled DCR (0x%x)\n", dcrn
);
284 static int kvmppc_handle_dcr_write(CPUState
*env
, uint32_t dcrn
, uint32_t data
)
286 if (ppc_dcr_write(env
->dcr_env
, dcrn
, data
) < 0)
287 fprintf(stderr
, "Write to unhandled DCR (0x%x)\n", dcrn
);
292 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
296 switch (run
->exit_reason
) {
298 if (run
->dcr
.is_write
) {
299 dprintf("handle dcr write\n");
300 ret
= kvmppc_handle_dcr_write(env
, run
->dcr
.dcrn
, run
->dcr
.data
);
302 dprintf("handle dcr read\n");
303 ret
= kvmppc_handle_dcr_read(env
, run
->dcr
.dcrn
, &run
->dcr
.data
);
307 dprintf("handle halt\n");
308 ret
= kvmppc_handle_halt(env
);
315 static int read_cpuinfo(const char *field
, char *value
, int len
)
319 int field_len
= strlen(field
);
322 f
= fopen("/proc/cpuinfo", "r");
328 if(!fgets(line
, sizeof(line
), f
)) {
331 if (!strncmp(line
, field
, field_len
)) {
332 strncpy(value
, line
, len
);
343 uint32_t kvmppc_get_tbfreq(void)
347 uint32_t retval
= get_ticks_per_sec();
349 if (read_cpuinfo("timebase", line
, sizeof(line
))) {
353 if (!(ns
= strchr(line
, ':'))) {
363 int kvmppc_get_hypercall(CPUState
*env
, uint8_t *buf
, int buf_len
)
365 uint32_t *hc
= (uint32_t*)buf
;
367 #ifdef KVM_CAP_PPC_GET_PVINFO
368 struct kvm_ppc_pvinfo pvinfo
;
370 if (kvm_check_extension(env
->kvm_state
, KVM_CAP_PPC_GET_PVINFO
) &&
371 !kvm_vm_ioctl(env
->kvm_state
, KVM_PPC_GET_PVINFO
, &pvinfo
)) {
372 memcpy(buf
, pvinfo
.hcall
, buf_len
);
379 * Fallback to always fail hypercalls:
395 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)