2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
18 #include <sys/types.h>
19 #include <sys/ioctl.h>
22 #include <linux/kvm.h>
24 #include "qemu-common.h"
25 #include "qemu-timer.h"
30 #include "device_tree.h"
32 #include "hw/sysbus.h"
34 #include "hw/spapr_vio.h"
39 #define dprintf(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
42 #define dprintf(fmt, ...) \
46 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
48 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
52 static int cap_interrupt_unset
= false;
53 static int cap_interrupt_level
= false;
54 static int cap_segstate
;
55 static int cap_booke_sregs
;
57 /* XXX We have a race condition where we actually have a level triggered
58 * interrupt, but the infrastructure can't expose that yet, so the guest
59 * takes but ignores it, goes to sleep and never gets notified that there's
60 * still an interrupt pending.
62 * As a quick workaround, let's just wake up again 20 ms after we injected
63 * an interrupt. That way we can assure that we're always reinjecting
64 * interrupts in case the guest swallowed them.
66 static QEMUTimer
*idle_timer
;
68 static void kvm_kick_env(void *env
)
73 int kvm_arch_init(KVMState
*s
)
75 cap_interrupt_unset
= kvm_check_extension(s
, KVM_CAP_PPC_UNSET_IRQ
);
76 cap_interrupt_level
= kvm_check_extension(s
, KVM_CAP_PPC_IRQ_LEVEL
);
77 cap_segstate
= kvm_check_extension(s
, KVM_CAP_PPC_SEGSTATE
);
78 cap_booke_sregs
= kvm_check_extension(s
, KVM_CAP_PPC_BOOKE_SREGS
);
80 if (!cap_interrupt_level
) {
81 fprintf(stderr
, "KVM: Couldn't find level irq capability. Expect the "
82 "VM to stall at times!\n");
88 static int kvm_arch_sync_sregs(CPUState
*cenv
)
90 struct kvm_sregs sregs
;
93 if (cenv
->excp_model
== POWERPC_EXCP_BOOKE
) {
94 /* What we're really trying to say is "if we're on BookE, we use
95 the native PVR for now". This is the only sane way to check
96 it though, so we potentially confuse users that they can run
97 BookE guests on BookS. Let's hope nobody dares enough :) */
101 fprintf(stderr
, "kvm error: missing PVR setting capability\n");
106 ret
= kvm_vcpu_ioctl(cenv
, KVM_GET_SREGS
, &sregs
);
111 sregs
.pvr
= cenv
->spr
[SPR_PVR
];
112 return kvm_vcpu_ioctl(cenv
, KVM_SET_SREGS
, &sregs
);
115 /* Set up a shared TLB array with KVM */
116 static int kvm_booke206_tlb_init(CPUState
*env
)
118 struct kvm_book3e_206_tlb_params params
= {};
119 struct kvm_config_tlb cfg
= {};
120 struct kvm_enable_cap encap
= {};
121 unsigned int entries
= 0;
124 if (!kvm_enabled() ||
125 !kvm_check_extension(env
->kvm_state
, KVM_CAP_SW_TLB
)) {
129 assert(ARRAY_SIZE(params
.tlb_sizes
) == BOOKE206_MAX_TLBN
);
131 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
132 params
.tlb_sizes
[i
] = booke206_tlb_size(env
, i
);
133 params
.tlb_ways
[i
] = booke206_tlb_ways(env
, i
);
134 entries
+= params
.tlb_sizes
[i
];
137 assert(entries
== env
->nb_tlb
);
138 assert(sizeof(struct kvm_book3e_206_tlb_entry
) == sizeof(ppcmas_tlb_t
));
140 env
->tlb_dirty
= true;
142 cfg
.array
= (uintptr_t)env
->tlb
.tlbm
;
143 cfg
.array_len
= sizeof(ppcmas_tlb_t
) * entries
;
144 cfg
.params
= (uintptr_t)¶ms
;
145 cfg
.mmu_type
= KVM_MMU_FSL_BOOKE_NOHV
;
147 encap
.cap
= KVM_CAP_SW_TLB
;
148 encap
.args
[0] = (uintptr_t)&cfg
;
150 ret
= kvm_vcpu_ioctl(env
, KVM_ENABLE_CAP
, &encap
);
152 fprintf(stderr
, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
153 __func__
, strerror(-ret
));
157 env
->kvm_sw_tlb
= true;
161 int kvm_arch_init_vcpu(CPUState
*cenv
)
165 ret
= kvm_arch_sync_sregs(cenv
);
170 idle_timer
= qemu_new_timer_ns(vm_clock
, kvm_kick_env
, cenv
);
172 /* Some targets support access to KVM's guest TLB. */
173 switch (cenv
->mmu_model
) {
174 case POWERPC_MMU_BOOKE206
:
175 ret
= kvm_booke206_tlb_init(cenv
);
184 void kvm_arch_reset_vcpu(CPUState
*env
)
188 static void kvm_sw_tlb_put(CPUState
*env
)
190 struct kvm_dirty_tlb dirty_tlb
;
191 unsigned char *bitmap
;
194 if (!env
->kvm_sw_tlb
) {
198 bitmap
= g_malloc((env
->nb_tlb
+ 7) / 8);
199 memset(bitmap
, 0xFF, (env
->nb_tlb
+ 7) / 8);
201 dirty_tlb
.bitmap
= (uintptr_t)bitmap
;
202 dirty_tlb
.num_dirty
= env
->nb_tlb
;
204 ret
= kvm_vcpu_ioctl(env
, KVM_DIRTY_TLB
, &dirty_tlb
);
206 fprintf(stderr
, "%s: KVM_DIRTY_TLB: %s\n",
207 __func__
, strerror(-ret
));
213 int kvm_arch_put_registers(CPUState
*env
, int level
)
215 struct kvm_regs regs
;
219 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
229 regs
.srr0
= env
->spr
[SPR_SRR0
];
230 regs
.srr1
= env
->spr
[SPR_SRR1
];
232 regs
.sprg0
= env
->spr
[SPR_SPRG0
];
233 regs
.sprg1
= env
->spr
[SPR_SPRG1
];
234 regs
.sprg2
= env
->spr
[SPR_SPRG2
];
235 regs
.sprg3
= env
->spr
[SPR_SPRG3
];
236 regs
.sprg4
= env
->spr
[SPR_SPRG4
];
237 regs
.sprg5
= env
->spr
[SPR_SPRG5
];
238 regs
.sprg6
= env
->spr
[SPR_SPRG6
];
239 regs
.sprg7
= env
->spr
[SPR_SPRG7
];
241 regs
.pid
= env
->spr
[SPR_BOOKE_PID
];
243 for (i
= 0;i
< 32; i
++)
244 regs
.gpr
[i
] = env
->gpr
[i
];
246 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
250 if (env
->tlb_dirty
) {
252 env
->tlb_dirty
= false;
258 int kvm_arch_get_registers(CPUState
*env
)
260 struct kvm_regs regs
;
261 struct kvm_sregs sregs
;
265 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
270 for (i
= 7; i
>= 0; i
--) {
271 env
->crf
[i
] = cr
& 15;
281 env
->spr
[SPR_SRR0
] = regs
.srr0
;
282 env
->spr
[SPR_SRR1
] = regs
.srr1
;
284 env
->spr
[SPR_SPRG0
] = regs
.sprg0
;
285 env
->spr
[SPR_SPRG1
] = regs
.sprg1
;
286 env
->spr
[SPR_SPRG2
] = regs
.sprg2
;
287 env
->spr
[SPR_SPRG3
] = regs
.sprg3
;
288 env
->spr
[SPR_SPRG4
] = regs
.sprg4
;
289 env
->spr
[SPR_SPRG5
] = regs
.sprg5
;
290 env
->spr
[SPR_SPRG6
] = regs
.sprg6
;
291 env
->spr
[SPR_SPRG7
] = regs
.sprg7
;
293 env
->spr
[SPR_BOOKE_PID
] = regs
.pid
;
295 for (i
= 0;i
< 32; i
++)
296 env
->gpr
[i
] = regs
.gpr
[i
];
298 if (cap_booke_sregs
) {
299 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
304 if (sregs
.u
.e
.features
& KVM_SREGS_E_BASE
) {
305 env
->spr
[SPR_BOOKE_CSRR0
] = sregs
.u
.e
.csrr0
;
306 env
->spr
[SPR_BOOKE_CSRR1
] = sregs
.u
.e
.csrr1
;
307 env
->spr
[SPR_BOOKE_ESR
] = sregs
.u
.e
.esr
;
308 env
->spr
[SPR_BOOKE_DEAR
] = sregs
.u
.e
.dear
;
309 env
->spr
[SPR_BOOKE_MCSR
] = sregs
.u
.e
.mcsr
;
310 env
->spr
[SPR_BOOKE_TSR
] = sregs
.u
.e
.tsr
;
311 env
->spr
[SPR_BOOKE_TCR
] = sregs
.u
.e
.tcr
;
312 env
->spr
[SPR_DECR
] = sregs
.u
.e
.dec
;
313 env
->spr
[SPR_TBL
] = sregs
.u
.e
.tb
& 0xffffffff;
314 env
->spr
[SPR_TBU
] = sregs
.u
.e
.tb
>> 32;
315 env
->spr
[SPR_VRSAVE
] = sregs
.u
.e
.vrsave
;
318 if (sregs
.u
.e
.features
& KVM_SREGS_E_ARCH206
) {
319 env
->spr
[SPR_BOOKE_PIR
] = sregs
.u
.e
.pir
;
320 env
->spr
[SPR_BOOKE_MCSRR0
] = sregs
.u
.e
.mcsrr0
;
321 env
->spr
[SPR_BOOKE_MCSRR1
] = sregs
.u
.e
.mcsrr1
;
322 env
->spr
[SPR_BOOKE_DECAR
] = sregs
.u
.e
.decar
;
323 env
->spr
[SPR_BOOKE_IVPR
] = sregs
.u
.e
.ivpr
;
326 if (sregs
.u
.e
.features
& KVM_SREGS_E_64
) {
327 env
->spr
[SPR_BOOKE_EPCR
] = sregs
.u
.e
.epcr
;
330 if (sregs
.u
.e
.features
& KVM_SREGS_E_SPRG8
) {
331 env
->spr
[SPR_BOOKE_SPRG8
] = sregs
.u
.e
.sprg8
;
334 if (sregs
.u
.e
.features
& KVM_SREGS_E_IVOR
) {
335 env
->spr
[SPR_BOOKE_IVOR0
] = sregs
.u
.e
.ivor_low
[0];
336 env
->spr
[SPR_BOOKE_IVOR1
] = sregs
.u
.e
.ivor_low
[1];
337 env
->spr
[SPR_BOOKE_IVOR2
] = sregs
.u
.e
.ivor_low
[2];
338 env
->spr
[SPR_BOOKE_IVOR3
] = sregs
.u
.e
.ivor_low
[3];
339 env
->spr
[SPR_BOOKE_IVOR4
] = sregs
.u
.e
.ivor_low
[4];
340 env
->spr
[SPR_BOOKE_IVOR5
] = sregs
.u
.e
.ivor_low
[5];
341 env
->spr
[SPR_BOOKE_IVOR6
] = sregs
.u
.e
.ivor_low
[6];
342 env
->spr
[SPR_BOOKE_IVOR7
] = sregs
.u
.e
.ivor_low
[7];
343 env
->spr
[SPR_BOOKE_IVOR8
] = sregs
.u
.e
.ivor_low
[8];
344 env
->spr
[SPR_BOOKE_IVOR9
] = sregs
.u
.e
.ivor_low
[9];
345 env
->spr
[SPR_BOOKE_IVOR10
] = sregs
.u
.e
.ivor_low
[10];
346 env
->spr
[SPR_BOOKE_IVOR11
] = sregs
.u
.e
.ivor_low
[11];
347 env
->spr
[SPR_BOOKE_IVOR12
] = sregs
.u
.e
.ivor_low
[12];
348 env
->spr
[SPR_BOOKE_IVOR13
] = sregs
.u
.e
.ivor_low
[13];
349 env
->spr
[SPR_BOOKE_IVOR14
] = sregs
.u
.e
.ivor_low
[14];
350 env
->spr
[SPR_BOOKE_IVOR15
] = sregs
.u
.e
.ivor_low
[15];
352 if (sregs
.u
.e
.features
& KVM_SREGS_E_SPE
) {
353 env
->spr
[SPR_BOOKE_IVOR32
] = sregs
.u
.e
.ivor_high
[0];
354 env
->spr
[SPR_BOOKE_IVOR33
] = sregs
.u
.e
.ivor_high
[1];
355 env
->spr
[SPR_BOOKE_IVOR34
] = sregs
.u
.e
.ivor_high
[2];
358 if (sregs
.u
.e
.features
& KVM_SREGS_E_PM
) {
359 env
->spr
[SPR_BOOKE_IVOR35
] = sregs
.u
.e
.ivor_high
[3];
362 if (sregs
.u
.e
.features
& KVM_SREGS_E_PC
) {
363 env
->spr
[SPR_BOOKE_IVOR36
] = sregs
.u
.e
.ivor_high
[4];
364 env
->spr
[SPR_BOOKE_IVOR37
] = sregs
.u
.e
.ivor_high
[5];
368 if (sregs
.u
.e
.features
& KVM_SREGS_E_ARCH206_MMU
) {
369 env
->spr
[SPR_BOOKE_MAS0
] = sregs
.u
.e
.mas0
;
370 env
->spr
[SPR_BOOKE_MAS1
] = sregs
.u
.e
.mas1
;
371 env
->spr
[SPR_BOOKE_MAS2
] = sregs
.u
.e
.mas2
;
372 env
->spr
[SPR_BOOKE_MAS3
] = sregs
.u
.e
.mas7_3
& 0xffffffff;
373 env
->spr
[SPR_BOOKE_MAS4
] = sregs
.u
.e
.mas4
;
374 env
->spr
[SPR_BOOKE_MAS6
] = sregs
.u
.e
.mas6
;
375 env
->spr
[SPR_BOOKE_MAS7
] = sregs
.u
.e
.mas7_3
>> 32;
376 env
->spr
[SPR_MMUCFG
] = sregs
.u
.e
.mmucfg
;
377 env
->spr
[SPR_BOOKE_TLB0CFG
] = sregs
.u
.e
.tlbcfg
[0];
378 env
->spr
[SPR_BOOKE_TLB1CFG
] = sregs
.u
.e
.tlbcfg
[1];
381 if (sregs
.u
.e
.features
& KVM_SREGS_EXP
) {
382 env
->spr
[SPR_BOOKE_EPR
] = sregs
.u
.e
.epr
;
385 if (sregs
.u
.e
.features
& KVM_SREGS_E_PD
) {
386 env
->spr
[SPR_BOOKE_EPLC
] = sregs
.u
.e
.eplc
;
387 env
->spr
[SPR_BOOKE_EPSC
] = sregs
.u
.e
.epsc
;
390 if (sregs
.u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
391 env
->spr
[SPR_E500_SVR
] = sregs
.u
.e
.impl
.fsl
.svr
;
392 env
->spr
[SPR_Exxx_MCAR
] = sregs
.u
.e
.impl
.fsl
.mcar
;
393 env
->spr
[SPR_HID0
] = sregs
.u
.e
.impl
.fsl
.hid0
;
395 if (sregs
.u
.e
.impl
.fsl
.features
& KVM_SREGS_E_FSL_PIDn
) {
396 env
->spr
[SPR_BOOKE_PID1
] = sregs
.u
.e
.impl
.fsl
.pid1
;
397 env
->spr
[SPR_BOOKE_PID2
] = sregs
.u
.e
.impl
.fsl
.pid2
;
403 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
408 ppc_store_sdr1(env
, sregs
.u
.s
.sdr1
);
412 for (i
= 0; i
< 64; i
++) {
413 ppc_store_slb(env
, sregs
.u
.s
.ppc64
.slb
[i
].slbe
,
414 sregs
.u
.s
.ppc64
.slb
[i
].slbv
);
419 for (i
= 0; i
< 16; i
++) {
420 env
->sr
[i
] = sregs
.u
.s
.ppc32
.sr
[i
];
424 for (i
= 0; i
< 8; i
++) {
425 env
->DBAT
[0][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] & 0xffffffff;
426 env
->DBAT
[1][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] >> 32;
427 env
->IBAT
[0][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] & 0xffffffff;
428 env
->IBAT
[1][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] >> 32;
435 int kvmppc_set_interrupt(CPUState
*env
, int irq
, int level
)
437 unsigned virq
= level
? KVM_INTERRUPT_SET_LEVEL
: KVM_INTERRUPT_UNSET
;
439 if (irq
!= PPC_INTERRUPT_EXT
) {
443 if (!kvm_enabled() || !cap_interrupt_unset
|| !cap_interrupt_level
) {
447 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &virq
);
452 #if defined(TARGET_PPCEMB)
453 #define PPC_INPUT_INT PPC40x_INPUT_INT
454 #elif defined(TARGET_PPC64)
455 #define PPC_INPUT_INT PPC970_INPUT_INT
457 #define PPC_INPUT_INT PPC6xx_INPUT_INT
460 void kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
465 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
466 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
467 if (!cap_interrupt_level
&&
468 run
->ready_for_interrupt_injection
&&
469 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
470 (env
->irq_input_state
& (1<<PPC_INPUT_INT
)))
472 /* For now KVM disregards the 'irq' argument. However, in the
473 * future KVM could cache it in-kernel to avoid a heavyweight exit
474 * when reading the UIC.
476 irq
= KVM_INTERRUPT_SET
;
478 dprintf("injected interrupt %d\n", irq
);
479 r
= kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &irq
);
481 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
483 /* Always wake up soon in case the interrupt was level based */
484 qemu_mod_timer(idle_timer
, qemu_get_clock_ns(vm_clock
) +
485 (get_ticks_per_sec() / 50));
488 /* We don't know if there are more interrupts pending after this. However,
489 * the guest will return to userspace in the course of handling this one
490 * anyways, so we will get a chance to deliver the rest. */
493 void kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
497 int kvm_arch_process_async_events(CPUState
*env
)
502 static int kvmppc_handle_halt(CPUState
*env
)
504 if (!(env
->interrupt_request
& CPU_INTERRUPT_HARD
) && (msr_ee
)) {
506 env
->exception_index
= EXCP_HLT
;
512 /* map dcr access to existing qemu dcr emulation */
513 static int kvmppc_handle_dcr_read(CPUState
*env
, uint32_t dcrn
, uint32_t *data
)
515 if (ppc_dcr_read(env
->dcr_env
, dcrn
, data
) < 0)
516 fprintf(stderr
, "Read to unhandled DCR (0x%x)\n", dcrn
);
521 static int kvmppc_handle_dcr_write(CPUState
*env
, uint32_t dcrn
, uint32_t data
)
523 if (ppc_dcr_write(env
->dcr_env
, dcrn
, data
) < 0)
524 fprintf(stderr
, "Write to unhandled DCR (0x%x)\n", dcrn
);
529 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
533 switch (run
->exit_reason
) {
535 if (run
->dcr
.is_write
) {
536 dprintf("handle dcr write\n");
537 ret
= kvmppc_handle_dcr_write(env
, run
->dcr
.dcrn
, run
->dcr
.data
);
539 dprintf("handle dcr read\n");
540 ret
= kvmppc_handle_dcr_read(env
, run
->dcr
.dcrn
, &run
->dcr
.data
);
544 dprintf("handle halt\n");
545 ret
= kvmppc_handle_halt(env
);
547 #ifdef CONFIG_PSERIES
548 case KVM_EXIT_PAPR_HCALL
:
549 dprintf("handle PAPR hypercall\n");
550 run
->papr_hcall
.ret
= spapr_hypercall(env
, run
->papr_hcall
.nr
,
551 run
->papr_hcall
.args
);
556 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
564 static int read_cpuinfo(const char *field
, char *value
, int len
)
568 int field_len
= strlen(field
);
571 f
= fopen("/proc/cpuinfo", "r");
577 if(!fgets(line
, sizeof(line
), f
)) {
580 if (!strncmp(line
, field
, field_len
)) {
581 strncpy(value
, line
, len
);
592 uint32_t kvmppc_get_tbfreq(void)
596 uint32_t retval
= get_ticks_per_sec();
598 if (read_cpuinfo("timebase", line
, sizeof(line
))) {
602 if (!(ns
= strchr(line
, ':'))) {
612 /* Try to find a device tree node for a CPU with clock-frequency property */
613 static int kvmppc_find_cpu_dt(char *buf
, int buf_len
)
618 if ((dp
= opendir(PROC_DEVTREE_CPU
)) == NULL
) {
619 printf("Can't open directory " PROC_DEVTREE_CPU
"\n");
624 while ((dirp
= readdir(dp
)) != NULL
) {
626 snprintf(buf
, buf_len
, "%s%s/clock-frequency", PROC_DEVTREE_CPU
,
630 snprintf(buf
, buf_len
, "%s%s", PROC_DEVTREE_CPU
, dirp
->d_name
);
637 if (buf
[0] == '\0') {
638 printf("Unknown host!\n");
645 uint64_t kvmppc_get_clockfreq(void)
652 if (kvmppc_find_cpu_dt(buf
, sizeof(buf
))) {
656 strncat(buf
, "/clock-frequency", sizeof(buf
) - strlen(buf
));
658 f
= fopen(buf
, "rb");
663 len
= fread(tb
, sizeof(tb
[0]), 2, f
);
667 /* freq is only a single cell */
670 return *(uint64_t*)tb
;
676 int kvmppc_get_hypercall(CPUState
*env
, uint8_t *buf
, int buf_len
)
678 uint32_t *hc
= (uint32_t*)buf
;
680 struct kvm_ppc_pvinfo pvinfo
;
682 if (kvm_check_extension(env
->kvm_state
, KVM_CAP_PPC_GET_PVINFO
) &&
683 !kvm_vm_ioctl(env
->kvm_state
, KVM_PPC_GET_PVINFO
, &pvinfo
)) {
684 memcpy(buf
, pvinfo
.hcall
, buf_len
);
690 * Fallback to always fail hypercalls:
706 void kvmppc_set_papr(CPUState
*env
)
708 struct kvm_enable_cap cap
= {};
709 struct kvm_one_reg reg
= {};
710 struct kvm_sregs sregs
= {};
713 cap
.cap
= KVM_CAP_PPC_PAPR
;
714 ret
= kvm_vcpu_ioctl(env
, KVM_ENABLE_CAP
, &cap
);
721 * XXX We set HIOR here. It really should be a qdev property of
722 * the CPU node, but we don't have CPUs converted to qdev yet.
724 * Once we have qdev CPUs, move HIOR to a qdev property and
727 reg
.id
= KVM_ONE_REG_PPC_HIOR
;
728 reg
.u
.reg64
= env
->spr
[SPR_HIOR
];
729 ret
= kvm_vcpu_ioctl(env
, KVM_SET_ONE_REG
, ®
);
734 /* Set SDR1 so kernel space finds the HTAB */
735 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
740 sregs
.u
.s
.sdr1
= env
->spr
[SPR_SDR1
];
742 ret
= kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
750 cpu_abort(env
, "This KVM version does not support PAPR\n");
753 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
758 int kvm_arch_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
763 int kvm_arch_on_sigbus(int code
, void *addr
)