memory: Fix old portio word accesses
[qemu.git] / target-ppc / kvm.c
blob75832d83b83c75fbc59654b9fa9513777865648b
1 /*
2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
17 #include <dirent.h>
18 #include <sys/types.h>
19 #include <sys/ioctl.h>
20 #include <sys/mman.h>
22 #include <linux/kvm.h>
24 #include "qemu-common.h"
25 #include "qemu-timer.h"
26 #include "sysemu.h"
27 #include "kvm.h"
28 #include "kvm_ppc.h"
29 #include "cpu.h"
30 #include "device_tree.h"
32 #include "hw/sysbus.h"
33 #include "hw/spapr.h"
34 #include "hw/spapr_vio.h"
36 //#define DEBUG_KVM
38 #ifdef DEBUG_KVM
39 #define dprintf(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
41 #else
42 #define dprintf(fmt, ...) \
43 do { } while (0)
44 #endif
46 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
48 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
49 KVM_CAP_LAST_INFO
52 static int cap_interrupt_unset = false;
53 static int cap_interrupt_level = false;
54 static int cap_segstate;
55 static int cap_booke_sregs;
57 /* XXX We have a race condition where we actually have a level triggered
58 * interrupt, but the infrastructure can't expose that yet, so the guest
59 * takes but ignores it, goes to sleep and never gets notified that there's
60 * still an interrupt pending.
62 * As a quick workaround, let's just wake up again 20 ms after we injected
63 * an interrupt. That way we can assure that we're always reinjecting
64 * interrupts in case the guest swallowed them.
66 static QEMUTimer *idle_timer;
68 static void kvm_kick_env(void *env)
70 qemu_cpu_kick(env);
73 int kvm_arch_init(KVMState *s)
75 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
76 cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
77 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
78 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
80 if (!cap_interrupt_level) {
81 fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
82 "VM to stall at times!\n");
85 return 0;
88 static int kvm_arch_sync_sregs(CPUState *cenv)
90 struct kvm_sregs sregs;
91 int ret;
93 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
94 /* What we're really trying to say is "if we're on BookE, we use
95 the native PVR for now". This is the only sane way to check
96 it though, so we potentially confuse users that they can run
97 BookE guests on BookS. Let's hope nobody dares enough :) */
98 return 0;
99 } else {
100 if (!cap_segstate) {
101 fprintf(stderr, "kvm error: missing PVR setting capability\n");
102 return -ENOSYS;
106 ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
107 if (ret) {
108 return ret;
111 sregs.pvr = cenv->spr[SPR_PVR];
112 return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
115 /* Set up a shared TLB array with KVM */
116 static int kvm_booke206_tlb_init(CPUState *env)
118 struct kvm_book3e_206_tlb_params params = {};
119 struct kvm_config_tlb cfg = {};
120 struct kvm_enable_cap encap = {};
121 unsigned int entries = 0;
122 int ret, i;
124 if (!kvm_enabled() ||
125 !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) {
126 return 0;
129 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
131 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
132 params.tlb_sizes[i] = booke206_tlb_size(env, i);
133 params.tlb_ways[i] = booke206_tlb_ways(env, i);
134 entries += params.tlb_sizes[i];
137 assert(entries == env->nb_tlb);
138 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
140 env->tlb_dirty = true;
142 cfg.array = (uintptr_t)env->tlb.tlbm;
143 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
144 cfg.params = (uintptr_t)&params;
145 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
147 encap.cap = KVM_CAP_SW_TLB;
148 encap.args[0] = (uintptr_t)&cfg;
150 ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap);
151 if (ret < 0) {
152 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
153 __func__, strerror(-ret));
154 return ret;
157 env->kvm_sw_tlb = true;
158 return 0;
161 int kvm_arch_init_vcpu(CPUState *cenv)
163 int ret;
165 ret = kvm_arch_sync_sregs(cenv);
166 if (ret) {
167 return ret;
170 idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv);
172 /* Some targets support access to KVM's guest TLB. */
173 switch (cenv->mmu_model) {
174 case POWERPC_MMU_BOOKE206:
175 ret = kvm_booke206_tlb_init(cenv);
176 break;
177 default:
178 break;
181 return ret;
184 void kvm_arch_reset_vcpu(CPUState *env)
188 static void kvm_sw_tlb_put(CPUState *env)
190 struct kvm_dirty_tlb dirty_tlb;
191 unsigned char *bitmap;
192 int ret;
194 if (!env->kvm_sw_tlb) {
195 return;
198 bitmap = g_malloc((env->nb_tlb + 7) / 8);
199 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
201 dirty_tlb.bitmap = (uintptr_t)bitmap;
202 dirty_tlb.num_dirty = env->nb_tlb;
204 ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb);
205 if (ret) {
206 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
207 __func__, strerror(-ret));
210 g_free(bitmap);
213 int kvm_arch_put_registers(CPUState *env, int level)
215 struct kvm_regs regs;
216 int ret;
217 int i;
219 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
220 if (ret < 0)
221 return ret;
223 regs.ctr = env->ctr;
224 regs.lr = env->lr;
225 regs.xer = env->xer;
226 regs.msr = env->msr;
227 regs.pc = env->nip;
229 regs.srr0 = env->spr[SPR_SRR0];
230 regs.srr1 = env->spr[SPR_SRR1];
232 regs.sprg0 = env->spr[SPR_SPRG0];
233 regs.sprg1 = env->spr[SPR_SPRG1];
234 regs.sprg2 = env->spr[SPR_SPRG2];
235 regs.sprg3 = env->spr[SPR_SPRG3];
236 regs.sprg4 = env->spr[SPR_SPRG4];
237 regs.sprg5 = env->spr[SPR_SPRG5];
238 regs.sprg6 = env->spr[SPR_SPRG6];
239 regs.sprg7 = env->spr[SPR_SPRG7];
241 regs.pid = env->spr[SPR_BOOKE_PID];
243 for (i = 0;i < 32; i++)
244 regs.gpr[i] = env->gpr[i];
246 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
247 if (ret < 0)
248 return ret;
250 if (env->tlb_dirty) {
251 kvm_sw_tlb_put(env);
252 env->tlb_dirty = false;
255 return ret;
258 int kvm_arch_get_registers(CPUState *env)
260 struct kvm_regs regs;
261 struct kvm_sregs sregs;
262 uint32_t cr;
263 int i, ret;
265 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
266 if (ret < 0)
267 return ret;
269 cr = regs.cr;
270 for (i = 7; i >= 0; i--) {
271 env->crf[i] = cr & 15;
272 cr >>= 4;
275 env->ctr = regs.ctr;
276 env->lr = regs.lr;
277 env->xer = regs.xer;
278 env->msr = regs.msr;
279 env->nip = regs.pc;
281 env->spr[SPR_SRR0] = regs.srr0;
282 env->spr[SPR_SRR1] = regs.srr1;
284 env->spr[SPR_SPRG0] = regs.sprg0;
285 env->spr[SPR_SPRG1] = regs.sprg1;
286 env->spr[SPR_SPRG2] = regs.sprg2;
287 env->spr[SPR_SPRG3] = regs.sprg3;
288 env->spr[SPR_SPRG4] = regs.sprg4;
289 env->spr[SPR_SPRG5] = regs.sprg5;
290 env->spr[SPR_SPRG6] = regs.sprg6;
291 env->spr[SPR_SPRG7] = regs.sprg7;
293 env->spr[SPR_BOOKE_PID] = regs.pid;
295 for (i = 0;i < 32; i++)
296 env->gpr[i] = regs.gpr[i];
298 if (cap_booke_sregs) {
299 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
300 if (ret < 0) {
301 return ret;
304 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
305 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
306 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
307 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
308 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
309 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
310 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
311 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
312 env->spr[SPR_DECR] = sregs.u.e.dec;
313 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
314 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
315 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
318 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
319 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
320 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
321 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
322 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
323 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
326 if (sregs.u.e.features & KVM_SREGS_E_64) {
327 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
330 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
331 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
334 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
335 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
336 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
337 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
338 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
339 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
340 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
341 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
342 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
343 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
344 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
345 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
346 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
347 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
348 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
349 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
350 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
352 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
353 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
354 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
355 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
358 if (sregs.u.e.features & KVM_SREGS_E_PM) {
359 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
362 if (sregs.u.e.features & KVM_SREGS_E_PC) {
363 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
364 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
368 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
369 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
370 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
371 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
372 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
373 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
374 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
375 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
376 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
377 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
378 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
381 if (sregs.u.e.features & KVM_SREGS_EXP) {
382 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
385 if (sregs.u.e.features & KVM_SREGS_E_PD) {
386 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
387 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
390 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
391 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
392 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
393 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
395 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
396 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
397 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
402 if (cap_segstate) {
403 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
404 if (ret < 0) {
405 return ret;
408 ppc_store_sdr1(env, sregs.u.s.sdr1);
410 /* Sync SLB */
411 #ifdef TARGET_PPC64
412 for (i = 0; i < 64; i++) {
413 ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
414 sregs.u.s.ppc64.slb[i].slbv);
416 #endif
418 /* Sync SRs */
419 for (i = 0; i < 16; i++) {
420 env->sr[i] = sregs.u.s.ppc32.sr[i];
423 /* Sync BATs */
424 for (i = 0; i < 8; i++) {
425 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
426 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
427 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
428 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
432 return 0;
435 int kvmppc_set_interrupt(CPUState *env, int irq, int level)
437 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
439 if (irq != PPC_INTERRUPT_EXT) {
440 return 0;
443 if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
444 return 0;
447 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
449 return 0;
452 #if defined(TARGET_PPCEMB)
453 #define PPC_INPUT_INT PPC40x_INPUT_INT
454 #elif defined(TARGET_PPC64)
455 #define PPC_INPUT_INT PPC970_INPUT_INT
456 #else
457 #define PPC_INPUT_INT PPC6xx_INPUT_INT
458 #endif
460 void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
462 int r;
463 unsigned irq;
465 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
466 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
467 if (!cap_interrupt_level &&
468 run->ready_for_interrupt_injection &&
469 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
470 (env->irq_input_state & (1<<PPC_INPUT_INT)))
472 /* For now KVM disregards the 'irq' argument. However, in the
473 * future KVM could cache it in-kernel to avoid a heavyweight exit
474 * when reading the UIC.
476 irq = KVM_INTERRUPT_SET;
478 dprintf("injected interrupt %d\n", irq);
479 r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
480 if (r < 0)
481 printf("cpu %d fail inject %x\n", env->cpu_index, irq);
483 /* Always wake up soon in case the interrupt was level based */
484 qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
485 (get_ticks_per_sec() / 50));
488 /* We don't know if there are more interrupts pending after this. However,
489 * the guest will return to userspace in the course of handling this one
490 * anyways, so we will get a chance to deliver the rest. */
493 void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
497 int kvm_arch_process_async_events(CPUState *env)
499 return 0;
502 static int kvmppc_handle_halt(CPUState *env)
504 if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
505 env->halted = 1;
506 env->exception_index = EXCP_HLT;
509 return 0;
512 /* map dcr access to existing qemu dcr emulation */
513 static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
515 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
516 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
518 return 0;
521 static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
523 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
524 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
526 return 0;
529 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
531 int ret;
533 switch (run->exit_reason) {
534 case KVM_EXIT_DCR:
535 if (run->dcr.is_write) {
536 dprintf("handle dcr write\n");
537 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
538 } else {
539 dprintf("handle dcr read\n");
540 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
542 break;
543 case KVM_EXIT_HLT:
544 dprintf("handle halt\n");
545 ret = kvmppc_handle_halt(env);
546 break;
547 #ifdef CONFIG_PSERIES
548 case KVM_EXIT_PAPR_HCALL:
549 dprintf("handle PAPR hypercall\n");
550 run->papr_hcall.ret = spapr_hypercall(env, run->papr_hcall.nr,
551 run->papr_hcall.args);
552 ret = 1;
553 break;
554 #endif
555 default:
556 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
557 ret = -1;
558 break;
561 return ret;
564 static int read_cpuinfo(const char *field, char *value, int len)
566 FILE *f;
567 int ret = -1;
568 int field_len = strlen(field);
569 char line[512];
571 f = fopen("/proc/cpuinfo", "r");
572 if (!f) {
573 return -1;
576 do {
577 if(!fgets(line, sizeof(line), f)) {
578 break;
580 if (!strncmp(line, field, field_len)) {
581 strncpy(value, line, len);
582 ret = 0;
583 break;
585 } while(*line);
587 fclose(f);
589 return ret;
592 uint32_t kvmppc_get_tbfreq(void)
594 char line[512];
595 char *ns;
596 uint32_t retval = get_ticks_per_sec();
598 if (read_cpuinfo("timebase", line, sizeof(line))) {
599 return retval;
602 if (!(ns = strchr(line, ':'))) {
603 return retval;
606 ns++;
608 retval = atoi(ns);
609 return retval;
612 /* Try to find a device tree node for a CPU with clock-frequency property */
613 static int kvmppc_find_cpu_dt(char *buf, int buf_len)
615 struct dirent *dirp;
616 DIR *dp;
618 if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
619 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
620 return -1;
623 buf[0] = '\0';
624 while ((dirp = readdir(dp)) != NULL) {
625 FILE *f;
626 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
627 dirp->d_name);
628 f = fopen(buf, "r");
629 if (f) {
630 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
631 fclose(f);
632 break;
634 buf[0] = '\0';
636 closedir(dp);
637 if (buf[0] == '\0') {
638 printf("Unknown host!\n");
639 return -1;
642 return 0;
645 uint64_t kvmppc_get_clockfreq(void)
647 char buf[512];
648 uint32_t tb[2];
649 FILE *f;
650 int len;
652 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
653 return 0;
656 strncat(buf, "/clock-frequency", sizeof(buf) - strlen(buf));
658 f = fopen(buf, "rb");
659 if (!f) {
660 return -1;
663 len = fread(tb, sizeof(tb[0]), 2, f);
664 fclose(f);
665 switch (len) {
666 case 1:
667 /* freq is only a single cell */
668 return tb[0];
669 case 2:
670 return *(uint64_t*)tb;
673 return 0;
676 int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len)
678 uint32_t *hc = (uint32_t*)buf;
680 struct kvm_ppc_pvinfo pvinfo;
682 if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
683 !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
684 memcpy(buf, pvinfo.hcall, buf_len);
686 return 0;
690 * Fallback to always fail hypercalls:
692 * li r3, -1
693 * nop
694 * nop
695 * nop
698 hc[0] = 0x3860ffff;
699 hc[1] = 0x60000000;
700 hc[2] = 0x60000000;
701 hc[3] = 0x60000000;
703 return 0;
706 void kvmppc_set_papr(CPUState *env)
708 struct kvm_enable_cap cap = {};
709 struct kvm_one_reg reg = {};
710 struct kvm_sregs sregs = {};
711 int ret;
713 cap.cap = KVM_CAP_PPC_PAPR;
714 ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap);
716 if (ret) {
717 goto fail;
721 * XXX We set HIOR here. It really should be a qdev property of
722 * the CPU node, but we don't have CPUs converted to qdev yet.
724 * Once we have qdev CPUs, move HIOR to a qdev property and
725 * remove this chunk.
727 reg.id = KVM_ONE_REG_PPC_HIOR;
728 reg.u.reg64 = env->spr[SPR_HIOR];
729 ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, &reg);
730 if (ret) {
731 goto fail;
734 /* Set SDR1 so kernel space finds the HTAB */
735 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
736 if (ret) {
737 goto fail;
740 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
742 ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
743 if (ret) {
744 goto fail;
747 return;
749 fail:
750 cpu_abort(env, "This KVM version does not support PAPR\n");
753 bool kvm_arch_stop_on_emulation_error(CPUState *env)
755 return true;
758 int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
760 return 1;
763 int kvm_arch_on_sigbus(int code, void *addr)
765 return 1;