savevm: flush after saving vm state
[qemu-kvm.git] / target-ppc / kvm.c
blobc09cc39c78a7962b032b5a01c1d1d6a6132d065f
1 /*
2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
17 #include <dirent.h>
18 #include <sys/types.h>
19 #include <sys/ioctl.h>
20 #include <sys/mman.h>
22 #include <linux/kvm.h>
24 #include "qemu-common.h"
25 #include "qemu-timer.h"
26 #include "sysemu.h"
27 #include "kvm.h"
28 #include "kvm_ppc.h"
29 #include "cpu.h"
30 #include "cpus.h"
31 #include "device_tree.h"
32 #include "hw/sysbus.h"
33 #include "hw/spapr.h"
35 #include "hw/sysbus.h"
36 #include "hw/spapr.h"
37 #include "hw/spapr_vio.h"
39 //#define DEBUG_KVM
41 #ifdef DEBUG_KVM
42 #define dprintf(fmt, ...) \
43 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
44 #else
45 #define dprintf(fmt, ...) \
46 do { } while (0)
47 #endif
49 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
51 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
52 KVM_CAP_LAST_INFO
55 static int cap_interrupt_unset = false;
56 static int cap_interrupt_level = false;
57 static int cap_segstate;
58 static int cap_booke_sregs;
59 static int cap_ppc_smt;
60 static int cap_ppc_rma;
61 static int cap_spapr_tce;
63 /* XXX We have a race condition where we actually have a level triggered
64 * interrupt, but the infrastructure can't expose that yet, so the guest
65 * takes but ignores it, goes to sleep and never gets notified that there's
66 * still an interrupt pending.
68 * As a quick workaround, let's just wake up again 20 ms after we injected
69 * an interrupt. That way we can assure that we're always reinjecting
70 * interrupts in case the guest swallowed them.
72 static QEMUTimer *idle_timer;
74 static void kvm_kick_env(void *env)
76 qemu_cpu_kick(env);
79 int kvm_arch_init(KVMState *s)
81 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
82 cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
83 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
84 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
85 cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
86 cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
87 cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
89 if (!cap_interrupt_level) {
90 fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
91 "VM to stall at times!\n");
94 return 0;
97 static int kvm_arch_sync_sregs(CPUPPCState *cenv)
99 struct kvm_sregs sregs;
100 int ret;
102 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
103 /* What we're really trying to say is "if we're on BookE, we use
104 the native PVR for now". This is the only sane way to check
105 it though, so we potentially confuse users that they can run
106 BookE guests on BookS. Let's hope nobody dares enough :) */
107 return 0;
108 } else {
109 if (!cap_segstate) {
110 fprintf(stderr, "kvm error: missing PVR setting capability\n");
111 return -ENOSYS;
115 ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
116 if (ret) {
117 return ret;
120 sregs.pvr = cenv->spr[SPR_PVR];
121 return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
124 /* Set up a shared TLB array with KVM */
125 static int kvm_booke206_tlb_init(CPUPPCState *env)
127 struct kvm_book3e_206_tlb_params params = {};
128 struct kvm_config_tlb cfg = {};
129 struct kvm_enable_cap encap = {};
130 unsigned int entries = 0;
131 int ret, i;
133 if (!kvm_enabled() ||
134 !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) {
135 return 0;
138 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
140 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
141 params.tlb_sizes[i] = booke206_tlb_size(env, i);
142 params.tlb_ways[i] = booke206_tlb_ways(env, i);
143 entries += params.tlb_sizes[i];
146 assert(entries == env->nb_tlb);
147 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
149 env->tlb_dirty = true;
151 cfg.array = (uintptr_t)env->tlb.tlbm;
152 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
153 cfg.params = (uintptr_t)&params;
154 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
156 encap.cap = KVM_CAP_SW_TLB;
157 encap.args[0] = (uintptr_t)&cfg;
159 ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap);
160 if (ret < 0) {
161 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
162 __func__, strerror(-ret));
163 return ret;
166 env->kvm_sw_tlb = true;
167 return 0;
170 int kvm_arch_init_vcpu(CPUPPCState *cenv)
172 int ret;
174 ret = kvm_arch_sync_sregs(cenv);
175 if (ret) {
176 return ret;
179 idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv);
181 /* Some targets support access to KVM's guest TLB. */
182 switch (cenv->mmu_model) {
183 case POWERPC_MMU_BOOKE206:
184 ret = kvm_booke206_tlb_init(cenv);
185 break;
186 default:
187 break;
190 return ret;
193 void kvm_arch_reset_vcpu(CPUPPCState *env)
197 static void kvm_sw_tlb_put(CPUPPCState *env)
199 struct kvm_dirty_tlb dirty_tlb;
200 unsigned char *bitmap;
201 int ret;
203 if (!env->kvm_sw_tlb) {
204 return;
207 bitmap = g_malloc((env->nb_tlb + 7) / 8);
208 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
210 dirty_tlb.bitmap = (uintptr_t)bitmap;
211 dirty_tlb.num_dirty = env->nb_tlb;
213 ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb);
214 if (ret) {
215 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
216 __func__, strerror(-ret));
219 g_free(bitmap);
222 int kvm_arch_put_registers(CPUPPCState *env, int level)
224 struct kvm_regs regs;
225 int ret;
226 int i;
228 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
229 if (ret < 0)
230 return ret;
232 regs.ctr = env->ctr;
233 regs.lr = env->lr;
234 regs.xer = env->xer;
235 regs.msr = env->msr;
236 regs.pc = env->nip;
238 regs.srr0 = env->spr[SPR_SRR0];
239 regs.srr1 = env->spr[SPR_SRR1];
241 regs.sprg0 = env->spr[SPR_SPRG0];
242 regs.sprg1 = env->spr[SPR_SPRG1];
243 regs.sprg2 = env->spr[SPR_SPRG2];
244 regs.sprg3 = env->spr[SPR_SPRG3];
245 regs.sprg4 = env->spr[SPR_SPRG4];
246 regs.sprg5 = env->spr[SPR_SPRG5];
247 regs.sprg6 = env->spr[SPR_SPRG6];
248 regs.sprg7 = env->spr[SPR_SPRG7];
250 regs.pid = env->spr[SPR_BOOKE_PID];
252 for (i = 0;i < 32; i++)
253 regs.gpr[i] = env->gpr[i];
255 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
256 if (ret < 0)
257 return ret;
259 if (env->tlb_dirty) {
260 kvm_sw_tlb_put(env);
261 env->tlb_dirty = false;
264 return ret;
267 int kvm_arch_get_registers(CPUPPCState *env)
269 struct kvm_regs regs;
270 struct kvm_sregs sregs;
271 uint32_t cr;
272 int i, ret;
274 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
275 if (ret < 0)
276 return ret;
278 cr = regs.cr;
279 for (i = 7; i >= 0; i--) {
280 env->crf[i] = cr & 15;
281 cr >>= 4;
284 env->ctr = regs.ctr;
285 env->lr = regs.lr;
286 env->xer = regs.xer;
287 env->msr = regs.msr;
288 env->nip = regs.pc;
290 env->spr[SPR_SRR0] = regs.srr0;
291 env->spr[SPR_SRR1] = regs.srr1;
293 env->spr[SPR_SPRG0] = regs.sprg0;
294 env->spr[SPR_SPRG1] = regs.sprg1;
295 env->spr[SPR_SPRG2] = regs.sprg2;
296 env->spr[SPR_SPRG3] = regs.sprg3;
297 env->spr[SPR_SPRG4] = regs.sprg4;
298 env->spr[SPR_SPRG5] = regs.sprg5;
299 env->spr[SPR_SPRG6] = regs.sprg6;
300 env->spr[SPR_SPRG7] = regs.sprg7;
302 env->spr[SPR_BOOKE_PID] = regs.pid;
304 for (i = 0;i < 32; i++)
305 env->gpr[i] = regs.gpr[i];
307 if (cap_booke_sregs) {
308 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
309 if (ret < 0) {
310 return ret;
313 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
314 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
315 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
316 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
317 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
318 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
319 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
320 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
321 env->spr[SPR_DECR] = sregs.u.e.dec;
322 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
323 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
324 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
327 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
328 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
329 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
330 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
331 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
332 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
335 if (sregs.u.e.features & KVM_SREGS_E_64) {
336 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
339 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
340 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
343 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
344 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
345 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
346 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
347 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
348 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
349 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
350 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
351 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
352 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
353 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
354 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
355 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
356 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
357 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
358 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
359 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
361 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
362 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
363 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
364 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
367 if (sregs.u.e.features & KVM_SREGS_E_PM) {
368 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
371 if (sregs.u.e.features & KVM_SREGS_E_PC) {
372 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
373 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
377 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
378 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
379 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
380 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
381 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
382 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
383 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
384 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
385 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
386 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
387 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
390 if (sregs.u.e.features & KVM_SREGS_EXP) {
391 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
394 if (sregs.u.e.features & KVM_SREGS_E_PD) {
395 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
396 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
399 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
400 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
401 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
402 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
404 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
405 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
406 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
411 if (cap_segstate) {
412 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
413 if (ret < 0) {
414 return ret;
417 ppc_store_sdr1(env, sregs.u.s.sdr1);
419 /* Sync SLB */
420 #ifdef TARGET_PPC64
421 for (i = 0; i < 64; i++) {
422 ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
423 sregs.u.s.ppc64.slb[i].slbv);
425 #endif
427 /* Sync SRs */
428 for (i = 0; i < 16; i++) {
429 env->sr[i] = sregs.u.s.ppc32.sr[i];
432 /* Sync BATs */
433 for (i = 0; i < 8; i++) {
434 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
435 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
436 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
437 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
441 return 0;
444 int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level)
446 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
448 if (irq != PPC_INTERRUPT_EXT) {
449 return 0;
452 if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
453 return 0;
456 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
458 return 0;
461 #if defined(TARGET_PPCEMB)
462 #define PPC_INPUT_INT PPC40x_INPUT_INT
463 #elif defined(TARGET_PPC64)
464 #define PPC_INPUT_INT PPC970_INPUT_INT
465 #else
466 #define PPC_INPUT_INT PPC6xx_INPUT_INT
467 #endif
469 void kvm_arch_pre_run(CPUPPCState *env, struct kvm_run *run)
471 int r;
472 unsigned irq;
474 /* PowerPC QEMU tracks the various core input pins (interrupt, critical
475 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
476 if (!cap_interrupt_level &&
477 run->ready_for_interrupt_injection &&
478 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
479 (env->irq_input_state & (1<<PPC_INPUT_INT)))
481 /* For now KVM disregards the 'irq' argument. However, in the
482 * future KVM could cache it in-kernel to avoid a heavyweight exit
483 * when reading the UIC.
485 irq = KVM_INTERRUPT_SET;
487 dprintf("injected interrupt %d\n", irq);
488 r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
489 if (r < 0)
490 printf("cpu %d fail inject %x\n", env->cpu_index, irq);
492 /* Always wake up soon in case the interrupt was level based */
493 qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
494 (get_ticks_per_sec() / 50));
497 /* We don't know if there are more interrupts pending after this. However,
498 * the guest will return to userspace in the course of handling this one
499 * anyways, so we will get a chance to deliver the rest. */
502 void kvm_arch_post_run(CPUPPCState *env, struct kvm_run *run)
506 int kvm_arch_process_async_events(CPUPPCState *env)
508 return env->halted;
511 static int kvmppc_handle_halt(CPUPPCState *env)
513 if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
514 env->halted = 1;
515 env->exception_index = EXCP_HLT;
518 return 0;
521 /* map dcr access to existing qemu dcr emulation */
522 static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
524 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
525 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
527 return 0;
530 static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
532 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
533 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
535 return 0;
538 int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run)
540 int ret;
542 switch (run->exit_reason) {
543 case KVM_EXIT_DCR:
544 if (run->dcr.is_write) {
545 dprintf("handle dcr write\n");
546 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
547 } else {
548 dprintf("handle dcr read\n");
549 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
551 break;
552 case KVM_EXIT_HLT:
553 dprintf("handle halt\n");
554 ret = kvmppc_handle_halt(env);
555 break;
556 #ifdef CONFIG_PSERIES
557 case KVM_EXIT_PAPR_HCALL:
558 dprintf("handle PAPR hypercall\n");
559 run->papr_hcall.ret = spapr_hypercall(env, run->papr_hcall.nr,
560 run->papr_hcall.args);
561 ret = 1;
562 break;
563 #endif
564 default:
565 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
566 ret = -1;
567 break;
570 return ret;
573 static int read_cpuinfo(const char *field, char *value, int len)
575 FILE *f;
576 int ret = -1;
577 int field_len = strlen(field);
578 char line[512];
580 f = fopen("/proc/cpuinfo", "r");
581 if (!f) {
582 return -1;
585 do {
586 if(!fgets(line, sizeof(line), f)) {
587 break;
589 if (!strncmp(line, field, field_len)) {
590 strncpy(value, line, len);
591 ret = 0;
592 break;
594 } while(*line);
596 fclose(f);
598 return ret;
601 uint32_t kvmppc_get_tbfreq(void)
603 char line[512];
604 char *ns;
605 uint32_t retval = get_ticks_per_sec();
607 if (read_cpuinfo("timebase", line, sizeof(line))) {
608 return retval;
611 if (!(ns = strchr(line, ':'))) {
612 return retval;
615 ns++;
617 retval = atoi(ns);
618 return retval;
621 /* Try to find a device tree node for a CPU with clock-frequency property */
622 static int kvmppc_find_cpu_dt(char *buf, int buf_len)
624 struct dirent *dirp;
625 DIR *dp;
627 if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
628 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
629 return -1;
632 buf[0] = '\0';
633 while ((dirp = readdir(dp)) != NULL) {
634 FILE *f;
635 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
636 dirp->d_name);
637 f = fopen(buf, "r");
638 if (f) {
639 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
640 fclose(f);
641 break;
643 buf[0] = '\0';
645 closedir(dp);
646 if (buf[0] == '\0') {
647 printf("Unknown host!\n");
648 return -1;
651 return 0;
654 /* Read a CPU node property from the host device tree that's a single
655 * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
656 * (can't find or open the property, or doesn't understand the
657 * format) */
658 static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
660 char buf[PATH_MAX];
661 union {
662 uint32_t v32;
663 uint64_t v64;
664 } u;
665 FILE *f;
666 int len;
668 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
669 return -1;
672 strncat(buf, "/", sizeof(buf) - strlen(buf));
673 strncat(buf, propname, sizeof(buf) - strlen(buf));
675 f = fopen(buf, "rb");
676 if (!f) {
677 return -1;
680 len = fread(&u, 1, sizeof(u), f);
681 fclose(f);
682 switch (len) {
683 case 4:
684 /* property is a 32-bit quantity */
685 return be32_to_cpu(u.v32);
686 case 8:
687 return be64_to_cpu(u.v64);
690 return 0;
693 uint64_t kvmppc_get_clockfreq(void)
695 return kvmppc_read_int_cpu_dt("clock-frequency");
698 uint32_t kvmppc_get_vmx(void)
700 return kvmppc_read_int_cpu_dt("ibm,vmx");
703 uint32_t kvmppc_get_dfp(void)
705 return kvmppc_read_int_cpu_dt("ibm,dfp");
708 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
710 uint32_t *hc = (uint32_t*)buf;
712 struct kvm_ppc_pvinfo pvinfo;
714 if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
715 !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
716 memcpy(buf, pvinfo.hcall, buf_len);
718 return 0;
722 * Fallback to always fail hypercalls:
724 * li r3, -1
725 * nop
726 * nop
727 * nop
730 hc[0] = 0x3860ffff;
731 hc[1] = 0x60000000;
732 hc[2] = 0x60000000;
733 hc[3] = 0x60000000;
735 return 0;
738 void kvmppc_set_papr(CPUPPCState *env)
740 struct kvm_enable_cap cap = {};
741 struct kvm_one_reg reg = {};
742 struct kvm_sregs sregs = {};
743 int ret;
744 uint64_t hior = env->spr[SPR_HIOR];
746 cap.cap = KVM_CAP_PPC_PAPR;
747 ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap);
749 if (ret) {
750 goto fail;
754 * XXX We set HIOR here. It really should be a qdev property of
755 * the CPU node, but we don't have CPUs converted to qdev yet.
757 * Once we have qdev CPUs, move HIOR to a qdev property and
758 * remove this chunk.
760 reg.id = KVM_REG_PPC_HIOR;
761 reg.addr = (uintptr_t)&hior;
762 ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, &reg);
763 if (ret) {
764 fprintf(stderr, "Couldn't set HIOR. Maybe you're running an old \n"
765 "kernel with support for HV KVM but no PAPR PR \n"
766 "KVM in which case things will work. If they don't \n"
767 "please update your host kernel!\n");
770 /* Set SDR1 so kernel space finds the HTAB */
771 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
772 if (ret) {
773 goto fail;
776 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
778 ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
779 if (ret) {
780 goto fail;
783 return;
785 fail:
786 cpu_abort(env, "This KVM version does not support PAPR\n");
789 int kvmppc_smt_threads(void)
791 return cap_ppc_smt ? cap_ppc_smt : 1;
794 off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
796 void *rma;
797 off_t size;
798 int fd;
799 struct kvm_allocate_rma ret;
800 MemoryRegion *rma_region;
802 /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
803 * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
804 * not necessary on this hardware
805 * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
807 * FIXME: We should allow the user to force contiguous RMA
808 * allocation in the cap_ppc_rma==1 case.
810 if (cap_ppc_rma < 2) {
811 return 0;
814 fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
815 if (fd < 0) {
816 fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
817 strerror(errno));
818 return -1;
821 size = MIN(ret.rma_size, 256ul << 20);
823 rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
824 if (rma == MAP_FAILED) {
825 fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
826 return -1;
829 rma_region = g_new(MemoryRegion, 1);
830 memory_region_init_ram_ptr(rma_region, name, size, rma);
831 vmstate_register_ram_global(rma_region);
832 memory_region_add_subregion(sysmem, 0, rma_region);
834 return size;
837 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
839 struct kvm_create_spapr_tce args = {
840 .liobn = liobn,
841 .window_size = window_size,
843 long len;
844 int fd;
845 void *table;
847 /* Must set fd to -1 so we don't try to munmap when called for
848 * destroying the table, which the upper layers -will- do
850 *pfd = -1;
851 if (!cap_spapr_tce) {
852 return NULL;
855 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
856 if (fd < 0) {
857 fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
858 liobn);
859 return NULL;
862 len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE) * sizeof(VIOsPAPR_RTCE);
863 /* FIXME: round this up to page size */
865 table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
866 if (table == MAP_FAILED) {
867 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
868 liobn);
869 close(fd);
870 return NULL;
873 *pfd = fd;
874 return table;
877 int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
879 long len;
881 if (fd < 0) {
882 return -1;
885 len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE)*sizeof(VIOsPAPR_RTCE);
886 if ((munmap(table, len) < 0) ||
887 (close(fd) < 0)) {
888 fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
889 strerror(errno));
890 /* Leak the table */
893 return 0;
896 static inline uint32_t mfpvr(void)
898 uint32_t pvr;
900 asm ("mfpvr %0"
901 : "=r"(pvr));
902 return pvr;
905 static void alter_insns(uint64_t *word, uint64_t flags, bool on)
907 if (on) {
908 *word |= flags;
909 } else {
910 *word &= ~flags;
914 const ppc_def_t *kvmppc_host_cpu_def(void)
916 uint32_t host_pvr = mfpvr();
917 const ppc_def_t *base_spec;
918 ppc_def_t *spec;
919 uint32_t vmx = kvmppc_get_vmx();
920 uint32_t dfp = kvmppc_get_dfp();
922 base_spec = ppc_find_by_pvr(host_pvr);
924 spec = g_malloc0(sizeof(*spec));
925 memcpy(spec, base_spec, sizeof(*spec));
927 /* Now fix up the spec with information we can query from the host */
929 if (vmx != -1) {
930 /* Only override when we know what the host supports */
931 alter_insns(&spec->insns_flags, PPC_ALTIVEC, vmx > 0);
932 alter_insns(&spec->insns_flags2, PPC2_VSX, vmx > 1);
934 if (dfp != -1) {
935 /* Only override when we know what the host supports */
936 alter_insns(&spec->insns_flags2, PPC2_DFP, dfp);
939 return spec;
942 int kvmppc_fixup_cpu(CPUPPCState *env)
944 int smt;
946 /* Adjust cpu index for SMT */
947 smt = kvmppc_smt_threads();
948 env->cpu_index = (env->cpu_index / smp_threads) * smt
949 + (env->cpu_index % smp_threads);
951 return 0;
955 bool kvm_arch_stop_on_emulation_error(CPUPPCState *env)
957 return true;
960 int kvm_arch_on_sigbus_vcpu(CPUPPCState *env, int code, void *addr)
962 return 1;
965 int kvm_arch_on_sigbus(int code, void *addr)
967 return 1;