get_maintainer: update to match qemu tree
[qemu.git] / target-ppc / kvm.c
blobe7b1b10c6908049c53f4a849c3a5ae81df78bf95
1 /*
2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
17 #include <sys/types.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu-timer.h"
25 #include "sysemu.h"
26 #include "kvm.h"
27 #include "kvm_ppc.h"
28 #include "cpu.h"
29 #include "device_tree.h"
31 //#define DEBUG_KVM
33 #ifdef DEBUG_KVM
34 #define dprintf(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
36 #else
37 #define dprintf(fmt, ...) \
38 do { } while (0)
39 #endif
41 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
42 KVM_CAP_LAST_INFO
45 static int cap_interrupt_unset = false;
46 static int cap_interrupt_level = false;
47 static int cap_segstate;
48 static int cap_booke_sregs;
50 /* XXX We have a race condition where we actually have a level triggered
51 * interrupt, but the infrastructure can't expose that yet, so the guest
52 * takes but ignores it, goes to sleep and never gets notified that there's
53 * still an interrupt pending.
55 * As a quick workaround, let's just wake up again 20 ms after we injected
56 * an interrupt. That way we can assure that we're always reinjecting
57 * interrupts in case the guest swallowed them.
59 static QEMUTimer *idle_timer;
61 static void kvm_kick_env(void *env)
63 qemu_cpu_kick(env);
66 int kvm_arch_init(KVMState *s)
68 #ifdef KVM_CAP_PPC_UNSET_IRQ
69 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
70 #endif
71 #ifdef KVM_CAP_PPC_IRQ_LEVEL
72 cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
73 #endif
74 #ifdef KVM_CAP_PPC_SEGSTATE
75 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
76 #endif
77 #ifdef KVM_CAP_PPC_BOOKE_SREGS
78 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
79 #endif
81 if (!cap_interrupt_level) {
82 fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
83 "VM to stall at times!\n");
86 return 0;
89 static int kvm_arch_sync_sregs(CPUState *cenv)
91 struct kvm_sregs sregs;
92 int ret;
94 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
95 /* What we're really trying to say is "if we're on BookE, we use
96 the native PVR for now". This is the only sane way to check
97 it though, so we potentially confuse users that they can run
98 BookE guests on BookS. Let's hope nobody dares enough :) */
99 return 0;
100 } else {
101 if (!cap_segstate) {
102 fprintf(stderr, "kvm error: missing PVR setting capability\n");
103 return -ENOSYS;
107 #if !defined(CONFIG_KVM_PPC_PVR)
108 if (1) {
109 fprintf(stderr, "kvm error: missing PVR setting capability\n");
110 return -ENOSYS;
112 #endif
114 ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
115 if (ret) {
116 return ret;
119 #ifdef CONFIG_KVM_PPC_PVR
120 sregs.pvr = cenv->spr[SPR_PVR];
121 #endif
122 return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
125 int kvm_arch_init_vcpu(CPUState *cenv)
127 int ret;
129 ret = kvm_arch_sync_sregs(cenv);
130 if (ret) {
131 return ret;
134 idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv);
136 return ret;
139 void kvm_arch_reset_vcpu(CPUState *env)
143 int kvm_arch_put_registers(CPUState *env, int level)
145 struct kvm_regs regs;
146 int ret;
147 int i;
149 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
150 if (ret < 0)
151 return ret;
153 regs.ctr = env->ctr;
154 regs.lr = env->lr;
155 regs.xer = env->xer;
156 regs.msr = env->msr;
157 regs.pc = env->nip;
159 regs.srr0 = env->spr[SPR_SRR0];
160 regs.srr1 = env->spr[SPR_SRR1];
162 regs.sprg0 = env->spr[SPR_SPRG0];
163 regs.sprg1 = env->spr[SPR_SPRG1];
164 regs.sprg2 = env->spr[SPR_SPRG2];
165 regs.sprg3 = env->spr[SPR_SPRG3];
166 regs.sprg4 = env->spr[SPR_SPRG4];
167 regs.sprg5 = env->spr[SPR_SPRG5];
168 regs.sprg6 = env->spr[SPR_SPRG6];
169 regs.sprg7 = env->spr[SPR_SPRG7];
171 regs.pid = env->spr[SPR_BOOKE_PID];
173 for (i = 0;i < 32; i++)
174 regs.gpr[i] = env->gpr[i];
176 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
177 if (ret < 0)
178 return ret;
180 return ret;
183 int kvm_arch_get_registers(CPUState *env)
185 struct kvm_regs regs;
186 struct kvm_sregs sregs;
187 uint32_t cr;
188 int i, ret;
190 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
191 if (ret < 0)
192 return ret;
194 cr = regs.cr;
195 for (i = 7; i >= 0; i--) {
196 env->crf[i] = cr & 15;
197 cr >>= 4;
200 env->ctr = regs.ctr;
201 env->lr = regs.lr;
202 env->xer = regs.xer;
203 env->msr = regs.msr;
204 env->nip = regs.pc;
206 env->spr[SPR_SRR0] = regs.srr0;
207 env->spr[SPR_SRR1] = regs.srr1;
209 env->spr[SPR_SPRG0] = regs.sprg0;
210 env->spr[SPR_SPRG1] = regs.sprg1;
211 env->spr[SPR_SPRG2] = regs.sprg2;
212 env->spr[SPR_SPRG3] = regs.sprg3;
213 env->spr[SPR_SPRG4] = regs.sprg4;
214 env->spr[SPR_SPRG5] = regs.sprg5;
215 env->spr[SPR_SPRG6] = regs.sprg6;
216 env->spr[SPR_SPRG7] = regs.sprg7;
218 env->spr[SPR_BOOKE_PID] = regs.pid;
220 for (i = 0;i < 32; i++)
221 env->gpr[i] = regs.gpr[i];
223 if (cap_booke_sregs) {
224 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
225 if (ret < 0) {
226 return ret;
229 #ifdef KVM_CAP_PPC_BOOKE_SREGS
230 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
231 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
232 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
233 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
234 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
235 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
236 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
237 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
238 env->spr[SPR_DECR] = sregs.u.e.dec;
239 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
240 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
241 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
244 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
245 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
246 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
247 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
248 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
249 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
252 if (sregs.u.e.features & KVM_SREGS_E_64) {
253 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
256 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
257 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
260 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
261 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
262 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
263 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
264 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
265 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
266 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
267 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
268 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
269 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
270 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
271 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
272 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
273 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
274 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
275 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
276 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
278 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
279 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
280 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
281 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
284 if (sregs.u.e.features & KVM_SREGS_E_PM) {
285 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
288 if (sregs.u.e.features & KVM_SREGS_E_PC) {
289 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
290 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
294 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
295 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
296 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
297 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
298 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
299 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
300 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
301 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
302 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
303 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
304 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
307 if (sregs.u.e.features & KVM_SREGS_EXP) {
308 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
311 if (sregs.u.e.features & KVM_SREGS_E_PD) {
312 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
313 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
316 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
317 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
318 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
319 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
321 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
322 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
323 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
326 #endif
329 if (cap_segstate) {
330 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
331 if (ret < 0) {
332 return ret;
335 #ifdef KVM_CAP_PPC_SEGSTATE
336 ppc_store_sdr1(env, sregs.u.s.sdr1);
338 /* Sync SLB */
339 #ifdef TARGET_PPC64
340 for (i = 0; i < 64; i++) {
341 ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
342 sregs.u.s.ppc64.slb[i].slbv);
344 #endif
346 /* Sync SRs */
347 for (i = 0; i < 16; i++) {
348 env->sr[i] = sregs.u.s.ppc32.sr[i];
351 /* Sync BATs */
352 for (i = 0; i < 8; i++) {
353 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
354 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
355 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
356 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
358 #endif
361 return 0;
364 int kvmppc_set_interrupt(CPUState *env, int irq, int level)
366 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
368 if (irq != PPC_INTERRUPT_EXT) {
369 return 0;
372 if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
373 return 0;
376 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
378 return 0;
381 #if defined(TARGET_PPCEMB)
382 #define PPC_INPUT_INT PPC40x_INPUT_INT
383 #elif defined(TARGET_PPC64)
384 #define PPC_INPUT_INT PPC970_INPUT_INT
385 #else
386 #define PPC_INPUT_INT PPC6xx_INPUT_INT
387 #endif
389 void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
391 int r;
392 unsigned irq;
394 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
395 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
396 if (!cap_interrupt_level &&
397 run->ready_for_interrupt_injection &&
398 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
399 (env->irq_input_state & (1<<PPC_INPUT_INT)))
401 /* For now KVM disregards the 'irq' argument. However, in the
402 * future KVM could cache it in-kernel to avoid a heavyweight exit
403 * when reading the UIC.
405 irq = KVM_INTERRUPT_SET;
407 dprintf("injected interrupt %d\n", irq);
408 r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
409 if (r < 0)
410 printf("cpu %d fail inject %x\n", env->cpu_index, irq);
412 /* Always wake up soon in case the interrupt was level based */
413 qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
414 (get_ticks_per_sec() / 50));
417 /* We don't know if there are more interrupts pending after this. However,
418 * the guest will return to userspace in the course of handling this one
419 * anyways, so we will get a chance to deliver the rest. */
422 void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
426 int kvm_arch_process_async_events(CPUState *env)
428 return 0;
431 static int kvmppc_handle_halt(CPUState *env)
433 if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
434 env->halted = 1;
435 env->exception_index = EXCP_HLT;
438 return 0;
441 /* map dcr access to existing qemu dcr emulation */
442 static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
444 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
445 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
447 return 0;
450 static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
452 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
453 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
455 return 0;
458 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
460 int ret;
462 switch (run->exit_reason) {
463 case KVM_EXIT_DCR:
464 if (run->dcr.is_write) {
465 dprintf("handle dcr write\n");
466 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
467 } else {
468 dprintf("handle dcr read\n");
469 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
471 break;
472 case KVM_EXIT_HLT:
473 dprintf("handle halt\n");
474 ret = kvmppc_handle_halt(env);
475 break;
476 default:
477 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
478 ret = -1;
479 break;
482 return ret;
485 static int read_cpuinfo(const char *field, char *value, int len)
487 FILE *f;
488 int ret = -1;
489 int field_len = strlen(field);
490 char line[512];
492 f = fopen("/proc/cpuinfo", "r");
493 if (!f) {
494 return -1;
497 do {
498 if(!fgets(line, sizeof(line), f)) {
499 break;
501 if (!strncmp(line, field, field_len)) {
502 strncpy(value, line, len);
503 ret = 0;
504 break;
506 } while(*line);
508 fclose(f);
510 return ret;
513 uint32_t kvmppc_get_tbfreq(void)
515 char line[512];
516 char *ns;
517 uint32_t retval = get_ticks_per_sec();
519 if (read_cpuinfo("timebase", line, sizeof(line))) {
520 return retval;
523 if (!(ns = strchr(line, ':'))) {
524 return retval;
527 ns++;
529 retval = atoi(ns);
530 return retval;
533 int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len)
535 uint32_t *hc = (uint32_t*)buf;
537 #ifdef KVM_CAP_PPC_GET_PVINFO
538 struct kvm_ppc_pvinfo pvinfo;
540 if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
541 !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
542 memcpy(buf, pvinfo.hcall, buf_len);
544 return 0;
546 #endif
549 * Fallback to always fail hypercalls:
551 * li r3, -1
552 * nop
553 * nop
554 * nop
557 hc[0] = 0x3860ffff;
558 hc[1] = 0x60000000;
559 hc[2] = 0x60000000;
560 hc[3] = 0x60000000;
562 return 0;
565 bool kvm_arch_stop_on_emulation_error(CPUState *env)
567 return true;
570 int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
572 return 1;
575 int kvm_arch_on_sigbus(int code, void *addr)
577 return 1;