block/copy-before-write: relax permission requirements when no parents
[qemu.git] / target / ppc / kvm.c
blobdc93b99189ea242fa8c05c95f91dc08fed999c9b
1 /*
2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
17 #include "qemu/osdep.h"
18 #include <dirent.h>
19 #include <sys/ioctl.h>
20 #include <sys/vfs.h>
22 #include <linux/kvm.h>
24 #include "qemu-common.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "cpu.h"
28 #include "cpu-models.h"
29 #include "qemu/timer.h"
30 #include "sysemu/hw_accel.h"
31 #include "kvm_ppc.h"
32 #include "sysemu/cpus.h"
33 #include "sysemu/device_tree.h"
34 #include "mmu-hash64.h"
36 #include "hw/sysbus.h"
37 #include "hw/ppc/spapr.h"
38 #include "hw/ppc/spapr_cpu_core.h"
39 #include "hw/hw.h"
40 #include "hw/ppc/ppc.h"
41 #include "migration/qemu-file-types.h"
42 #include "sysemu/watchdog.h"
43 #include "trace.h"
44 #include "exec/gdbstub.h"
45 #include "exec/memattrs.h"
46 #include "exec/ram_addr.h"
47 #include "sysemu/hostmem.h"
48 #include "qemu/cutils.h"
49 #include "qemu/main-loop.h"
50 #include "qemu/mmap-alloc.h"
51 #include "elf.h"
52 #include "sysemu/kvm_int.h"
54 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
56 #define DEBUG_RETURN_GUEST 0
57 #define DEBUG_RETURN_GDB 1
59 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
60 KVM_CAP_LAST_INFO
63 static int cap_interrupt_unset;
64 static int cap_segstate;
65 static int cap_booke_sregs;
66 static int cap_ppc_smt;
67 static int cap_ppc_smt_possible;
68 static int cap_spapr_tce;
69 static int cap_spapr_tce_64;
70 static int cap_spapr_multitce;
71 static int cap_spapr_vfio;
72 static int cap_hior;
73 static int cap_one_reg;
74 static int cap_epr;
75 static int cap_ppc_watchdog;
76 static int cap_papr;
77 static int cap_htab_fd;
78 static int cap_fixup_hcalls;
79 static int cap_htm; /* Hardware transactional memory support */
80 static int cap_mmu_radix;
81 static int cap_mmu_hash_v3;
82 static int cap_xive;
83 static int cap_resize_hpt;
84 static int cap_ppc_pvr_compat;
85 static int cap_ppc_safe_cache;
86 static int cap_ppc_safe_bounds_check;
87 static int cap_ppc_safe_indirect_branch;
88 static int cap_ppc_count_cache_flush_assist;
89 static int cap_ppc_nested_kvm_hv;
90 static int cap_large_decr;
91 static int cap_fwnmi;
92 static int cap_rpt_invalidate;
94 static uint32_t debug_inst_opcode;
97 * Check whether we are running with KVM-PR (instead of KVM-HV). This
98 * should only be used for fallback tests - generally we should use
99 * explicit capabilities for the features we want, rather than
100 * assuming what is/isn't available depending on the KVM variant.
102 static bool kvmppc_is_pr(KVMState *ks)
104 /* Assume KVM-PR if the GET_PVINFO capability is available */
105 return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
108 static int kvm_ppc_register_host_cpu_type(void);
109 static void kvmppc_get_cpu_characteristics(KVMState *s);
110 static int kvmppc_get_dec_bits(void);
112 int kvm_arch_init(MachineState *ms, KVMState *s)
114 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
115 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
116 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
117 cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
118 cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
119 cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
120 cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
121 cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
122 cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
123 cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
124 cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
125 cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
127 * Note: we don't set cap_papr here, because this capability is
128 * only activated after this by kvmppc_set_papr()
130 cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
131 cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
132 cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
133 cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
134 cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
135 cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
136 cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
137 cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
138 kvmppc_get_cpu_characteristics(s);
139 cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
140 cap_large_decr = kvmppc_get_dec_bits();
141 cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
143 * Note: setting it to false because there is not such capability
144 * in KVM at this moment.
146 * TODO: call kvm_vm_check_extension() with the right capability
147 * after the kernel starts implementing it.
149 cap_ppc_pvr_compat = false;
151 if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
152 error_report("KVM: Host kernel doesn't have level irq capability");
153 exit(1);
156 cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
157 kvm_ppc_register_host_cpu_type();
159 return 0;
162 int kvm_arch_irqchip_create(KVMState *s)
164 return 0;
167 static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
169 CPUPPCState *cenv = &cpu->env;
170 CPUState *cs = CPU(cpu);
171 struct kvm_sregs sregs;
172 int ret;
174 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
176 * What we're really trying to say is "if we're on BookE, we
177 * use the native PVR for now". This is the only sane way to
178 * check it though, so we potentially confuse users that they
179 * can run BookE guests on BookS. Let's hope nobody dares
180 * enough :)
182 return 0;
183 } else {
184 if (!cap_segstate) {
185 fprintf(stderr, "kvm error: missing PVR setting capability\n");
186 return -ENOSYS;
190 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
191 if (ret) {
192 return ret;
195 sregs.pvr = cenv->spr[SPR_PVR];
196 return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
199 /* Set up a shared TLB array with KVM */
200 static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
202 CPUPPCState *env = &cpu->env;
203 CPUState *cs = CPU(cpu);
204 struct kvm_book3e_206_tlb_params params = {};
205 struct kvm_config_tlb cfg = {};
206 unsigned int entries = 0;
207 int ret, i;
209 if (!kvm_enabled() ||
210 !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
211 return 0;
214 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
216 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
217 params.tlb_sizes[i] = booke206_tlb_size(env, i);
218 params.tlb_ways[i] = booke206_tlb_ways(env, i);
219 entries += params.tlb_sizes[i];
222 assert(entries == env->nb_tlb);
223 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
225 env->tlb_dirty = true;
227 cfg.array = (uintptr_t)env->tlb.tlbm;
228 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
229 cfg.params = (uintptr_t)&params;
230 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
232 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
233 if (ret < 0) {
234 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
235 __func__, strerror(-ret));
236 return ret;
239 env->kvm_sw_tlb = true;
240 return 0;
244 #if defined(TARGET_PPC64)
245 static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
247 int ret;
249 assert(kvm_state != NULL);
251 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
252 error_setg(errp, "KVM doesn't expose the MMU features it supports");
253 error_append_hint(errp, "Consider switching to a newer KVM\n");
254 return;
257 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
258 if (ret == 0) {
259 return;
262 error_setg_errno(errp, -ret,
263 "KVM failed to provide the MMU features it supports");
266 struct ppc_radix_page_info *kvm_get_radix_page_info(void)
268 KVMState *s = KVM_STATE(current_accel());
269 struct ppc_radix_page_info *radix_page_info;
270 struct kvm_ppc_rmmu_info rmmu_info;
271 int i;
273 if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
274 return NULL;
276 if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
277 return NULL;
279 radix_page_info = g_malloc0(sizeof(*radix_page_info));
280 radix_page_info->count = 0;
281 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
282 if (rmmu_info.ap_encodings[i]) {
283 radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
284 radix_page_info->count++;
287 return radix_page_info;
290 target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
291 bool radix, bool gtse,
292 uint64_t proc_tbl)
294 CPUState *cs = CPU(cpu);
295 int ret;
296 uint64_t flags = 0;
297 struct kvm_ppc_mmuv3_cfg cfg = {
298 .process_table = proc_tbl,
301 if (radix) {
302 flags |= KVM_PPC_MMUV3_RADIX;
304 if (gtse) {
305 flags |= KVM_PPC_MMUV3_GTSE;
307 cfg.flags = flags;
308 ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
309 switch (ret) {
310 case 0:
311 return H_SUCCESS;
312 case -EINVAL:
313 return H_PARAMETER;
314 case -ENODEV:
315 return H_NOT_AVAILABLE;
316 default:
317 return H_HARDWARE;
321 bool kvmppc_hpt_needs_host_contiguous_pages(void)
323 static struct kvm_ppc_smmu_info smmu_info;
325 if (!kvm_enabled()) {
326 return false;
329 kvm_get_smmu_info(&smmu_info, &error_fatal);
330 return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
333 void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
335 struct kvm_ppc_smmu_info smmu_info;
336 int iq, ik, jq, jk;
337 Error *local_err = NULL;
339 /* For now, we only have anything to check on hash64 MMUs */
340 if (!cpu->hash64_opts || !kvm_enabled()) {
341 return;
344 kvm_get_smmu_info(&smmu_info, &local_err);
345 if (local_err) {
346 error_propagate(errp, local_err);
347 return;
350 if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
351 && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
352 error_setg(errp,
353 "KVM does not support 1TiB segments which guest expects");
354 return;
357 if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
358 error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
359 smmu_info.slb_size, cpu->hash64_opts->slb_size);
360 return;
364 * Verify that every pagesize supported by the cpu model is
365 * supported by KVM with the same encodings
367 for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
368 PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
369 struct kvm_ppc_one_seg_page_size *ksps;
371 for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
372 if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
373 break;
376 if (ik >= ARRAY_SIZE(smmu_info.sps)) {
377 error_setg(errp, "KVM doesn't support for base page shift %u",
378 qsps->page_shift);
379 return;
382 ksps = &smmu_info.sps[ik];
383 if (ksps->slb_enc != qsps->slb_enc) {
384 error_setg(errp,
385 "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
386 ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
387 return;
390 for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
391 for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
392 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
393 break;
397 if (jk >= ARRAY_SIZE(ksps->enc)) {
398 error_setg(errp, "KVM doesn't support page shift %u/%u",
399 qsps->enc[jq].page_shift, qsps->page_shift);
400 return;
402 if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
403 error_setg(errp,
404 "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
405 ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
406 qsps->page_shift, qsps->enc[jq].pte_enc);
407 return;
412 if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
414 * Mostly what guest pagesizes we can use are related to the
415 * host pages used to map guest RAM, which is handled in the
416 * platform code. Cache-Inhibited largepages (64k) however are
417 * used for I/O, so if they're mapped to the host at all it
418 * will be a normal mapping, not a special hugepage one used
419 * for RAM.
421 if (qemu_real_host_page_size < 0x10000) {
422 error_setg(errp,
423 "KVM can't supply 64kiB CI pages, which guest expects");
427 #endif /* !defined (TARGET_PPC64) */
429 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
431 return POWERPC_CPU(cpu)->vcpu_id;
435 * e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
436 * only 1 watchpoint, so array size of 4 is sufficient for now.
438 #define MAX_HW_BKPTS 4
440 static struct HWBreakpoint {
441 target_ulong addr;
442 int type;
443 } hw_debug_points[MAX_HW_BKPTS];
445 static CPUWatchpoint hw_watchpoint;
447 /* Default there is no breakpoint and watchpoint supported */
448 static int max_hw_breakpoint;
449 static int max_hw_watchpoint;
450 static int nb_hw_breakpoint;
451 static int nb_hw_watchpoint;
453 static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
455 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
456 max_hw_breakpoint = 2;
457 max_hw_watchpoint = 2;
460 if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
461 fprintf(stderr, "Error initializing h/w breakpoints\n");
462 return;
466 int kvm_arch_init_vcpu(CPUState *cs)
468 PowerPCCPU *cpu = POWERPC_CPU(cs);
469 CPUPPCState *cenv = &cpu->env;
470 int ret;
472 /* Synchronize sregs with kvm */
473 ret = kvm_arch_sync_sregs(cpu);
474 if (ret) {
475 if (ret == -EINVAL) {
476 error_report("Register sync failed... If you're using kvm-hv.ko,"
477 " only \"-cpu host\" is possible");
479 return ret;
482 switch (cenv->mmu_model) {
483 case POWERPC_MMU_BOOKE206:
484 /* This target supports access to KVM's guest TLB */
485 ret = kvm_booke206_tlb_init(cpu);
486 break;
487 case POWERPC_MMU_2_07:
488 if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
490 * KVM-HV has transactional memory on POWER8 also without
491 * the KVM_CAP_PPC_HTM extension, so enable it here
492 * instead as long as it's available to userspace on the
493 * host.
495 if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
496 cap_htm = true;
499 break;
500 default:
501 break;
504 kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
505 kvmppc_hw_debug_points_init(cenv);
507 return ret;
510 int kvm_arch_destroy_vcpu(CPUState *cs)
512 return 0;
515 static void kvm_sw_tlb_put(PowerPCCPU *cpu)
517 CPUPPCState *env = &cpu->env;
518 CPUState *cs = CPU(cpu);
519 struct kvm_dirty_tlb dirty_tlb;
520 unsigned char *bitmap;
521 int ret;
523 if (!env->kvm_sw_tlb) {
524 return;
527 bitmap = g_malloc((env->nb_tlb + 7) / 8);
528 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
530 dirty_tlb.bitmap = (uintptr_t)bitmap;
531 dirty_tlb.num_dirty = env->nb_tlb;
533 ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
534 if (ret) {
535 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
536 __func__, strerror(-ret));
539 g_free(bitmap);
542 static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
544 PowerPCCPU *cpu = POWERPC_CPU(cs);
545 CPUPPCState *env = &cpu->env;
546 union {
547 uint32_t u32;
548 uint64_t u64;
549 } val;
550 struct kvm_one_reg reg = {
551 .id = id,
552 .addr = (uintptr_t) &val,
554 int ret;
556 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
557 if (ret != 0) {
558 trace_kvm_failed_spr_get(spr, strerror(errno));
559 } else {
560 switch (id & KVM_REG_SIZE_MASK) {
561 case KVM_REG_SIZE_U32:
562 env->spr[spr] = val.u32;
563 break;
565 case KVM_REG_SIZE_U64:
566 env->spr[spr] = val.u64;
567 break;
569 default:
570 /* Don't handle this size yet */
571 abort();
576 static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
578 PowerPCCPU *cpu = POWERPC_CPU(cs);
579 CPUPPCState *env = &cpu->env;
580 union {
581 uint32_t u32;
582 uint64_t u64;
583 } val;
584 struct kvm_one_reg reg = {
585 .id = id,
586 .addr = (uintptr_t) &val,
588 int ret;
590 switch (id & KVM_REG_SIZE_MASK) {
591 case KVM_REG_SIZE_U32:
592 val.u32 = env->spr[spr];
593 break;
595 case KVM_REG_SIZE_U64:
596 val.u64 = env->spr[spr];
597 break;
599 default:
600 /* Don't handle this size yet */
601 abort();
604 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
605 if (ret != 0) {
606 trace_kvm_failed_spr_set(spr, strerror(errno));
610 static int kvm_put_fp(CPUState *cs)
612 PowerPCCPU *cpu = POWERPC_CPU(cs);
613 CPUPPCState *env = &cpu->env;
614 struct kvm_one_reg reg;
615 int i;
616 int ret;
618 if (env->insns_flags & PPC_FLOAT) {
619 uint64_t fpscr = env->fpscr;
620 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
622 reg.id = KVM_REG_PPC_FPSCR;
623 reg.addr = (uintptr_t)&fpscr;
624 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
625 if (ret < 0) {
626 trace_kvm_failed_fpscr_set(strerror(errno));
627 return ret;
630 for (i = 0; i < 32; i++) {
631 uint64_t vsr[2];
632 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
633 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
635 #ifdef HOST_WORDS_BIGENDIAN
636 vsr[0] = float64_val(*fpr);
637 vsr[1] = *vsrl;
638 #else
639 vsr[0] = *vsrl;
640 vsr[1] = float64_val(*fpr);
641 #endif
642 reg.addr = (uintptr_t) &vsr;
643 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
645 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
646 if (ret < 0) {
647 trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
648 strerror(errno));
649 return ret;
654 if (env->insns_flags & PPC_ALTIVEC) {
655 reg.id = KVM_REG_PPC_VSCR;
656 reg.addr = (uintptr_t)&env->vscr;
657 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
658 if (ret < 0) {
659 trace_kvm_failed_vscr_set(strerror(errno));
660 return ret;
663 for (i = 0; i < 32; i++) {
664 reg.id = KVM_REG_PPC_VR(i);
665 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
666 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
667 if (ret < 0) {
668 trace_kvm_failed_vr_set(i, strerror(errno));
669 return ret;
674 return 0;
677 static int kvm_get_fp(CPUState *cs)
679 PowerPCCPU *cpu = POWERPC_CPU(cs);
680 CPUPPCState *env = &cpu->env;
681 struct kvm_one_reg reg;
682 int i;
683 int ret;
685 if (env->insns_flags & PPC_FLOAT) {
686 uint64_t fpscr;
687 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
689 reg.id = KVM_REG_PPC_FPSCR;
690 reg.addr = (uintptr_t)&fpscr;
691 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
692 if (ret < 0) {
693 trace_kvm_failed_fpscr_get(strerror(errno));
694 return ret;
695 } else {
696 env->fpscr = fpscr;
699 for (i = 0; i < 32; i++) {
700 uint64_t vsr[2];
701 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
702 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
704 reg.addr = (uintptr_t) &vsr;
705 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
707 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
708 if (ret < 0) {
709 trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
710 strerror(errno));
711 return ret;
712 } else {
713 #ifdef HOST_WORDS_BIGENDIAN
714 *fpr = vsr[0];
715 if (vsx) {
716 *vsrl = vsr[1];
718 #else
719 *fpr = vsr[1];
720 if (vsx) {
721 *vsrl = vsr[0];
723 #endif
728 if (env->insns_flags & PPC_ALTIVEC) {
729 reg.id = KVM_REG_PPC_VSCR;
730 reg.addr = (uintptr_t)&env->vscr;
731 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
732 if (ret < 0) {
733 trace_kvm_failed_vscr_get(strerror(errno));
734 return ret;
737 for (i = 0; i < 32; i++) {
738 reg.id = KVM_REG_PPC_VR(i);
739 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
740 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
741 if (ret < 0) {
742 trace_kvm_failed_vr_get(i, strerror(errno));
743 return ret;
748 return 0;
751 #if defined(TARGET_PPC64)
752 static int kvm_get_vpa(CPUState *cs)
754 PowerPCCPU *cpu = POWERPC_CPU(cs);
755 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
756 struct kvm_one_reg reg;
757 int ret;
759 reg.id = KVM_REG_PPC_VPA_ADDR;
760 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
761 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
762 if (ret < 0) {
763 trace_kvm_failed_vpa_addr_get(strerror(errno));
764 return ret;
767 assert((uintptr_t)&spapr_cpu->slb_shadow_size
768 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
769 reg.id = KVM_REG_PPC_VPA_SLB;
770 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
771 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
772 if (ret < 0) {
773 trace_kvm_failed_slb_get(strerror(errno));
774 return ret;
777 assert((uintptr_t)&spapr_cpu->dtl_size
778 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
779 reg.id = KVM_REG_PPC_VPA_DTL;
780 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
781 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
782 if (ret < 0) {
783 trace_kvm_failed_dtl_get(strerror(errno));
784 return ret;
787 return 0;
790 static int kvm_put_vpa(CPUState *cs)
792 PowerPCCPU *cpu = POWERPC_CPU(cs);
793 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
794 struct kvm_one_reg reg;
795 int ret;
798 * SLB shadow or DTL can't be registered unless a master VPA is
799 * registered. That means when restoring state, if a VPA *is*
800 * registered, we need to set that up first. If not, we need to
801 * deregister the others before deregistering the master VPA
803 assert(spapr_cpu->vpa_addr
804 || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
806 if (spapr_cpu->vpa_addr) {
807 reg.id = KVM_REG_PPC_VPA_ADDR;
808 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
809 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
810 if (ret < 0) {
811 trace_kvm_failed_vpa_addr_set(strerror(errno));
812 return ret;
816 assert((uintptr_t)&spapr_cpu->slb_shadow_size
817 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
818 reg.id = KVM_REG_PPC_VPA_SLB;
819 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
820 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
821 if (ret < 0) {
822 trace_kvm_failed_slb_set(strerror(errno));
823 return ret;
826 assert((uintptr_t)&spapr_cpu->dtl_size
827 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
828 reg.id = KVM_REG_PPC_VPA_DTL;
829 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
830 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
831 if (ret < 0) {
832 trace_kvm_failed_dtl_set(strerror(errno));
833 return ret;
836 if (!spapr_cpu->vpa_addr) {
837 reg.id = KVM_REG_PPC_VPA_ADDR;
838 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
839 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
840 if (ret < 0) {
841 trace_kvm_failed_null_vpa_addr_set(strerror(errno));
842 return ret;
846 return 0;
848 #endif /* TARGET_PPC64 */
850 int kvmppc_put_books_sregs(PowerPCCPU *cpu)
852 CPUPPCState *env = &cpu->env;
853 struct kvm_sregs sregs;
854 int i;
856 sregs.pvr = env->spr[SPR_PVR];
858 if (cpu->vhyp) {
859 PPCVirtualHypervisorClass *vhc =
860 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
861 sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
862 } else {
863 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
866 /* Sync SLB */
867 #ifdef TARGET_PPC64
868 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
869 sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
870 if (env->slb[i].esid & SLB_ESID_V) {
871 sregs.u.s.ppc64.slb[i].slbe |= i;
873 sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
875 #endif
877 /* Sync SRs */
878 for (i = 0; i < 16; i++) {
879 sregs.u.s.ppc32.sr[i] = env->sr[i];
882 /* Sync BATs */
883 for (i = 0; i < 8; i++) {
884 /* Beware. We have to swap upper and lower bits here */
885 sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
886 | env->DBAT[1][i];
887 sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
888 | env->IBAT[1][i];
891 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
894 int kvm_arch_put_registers(CPUState *cs, int level)
896 PowerPCCPU *cpu = POWERPC_CPU(cs);
897 CPUPPCState *env = &cpu->env;
898 struct kvm_regs regs;
899 int ret;
900 int i;
902 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
903 if (ret < 0) {
904 return ret;
907 regs.ctr = env->ctr;
908 regs.lr = env->lr;
909 regs.xer = cpu_read_xer(env);
910 regs.msr = env->msr;
911 regs.pc = env->nip;
913 regs.srr0 = env->spr[SPR_SRR0];
914 regs.srr1 = env->spr[SPR_SRR1];
916 regs.sprg0 = env->spr[SPR_SPRG0];
917 regs.sprg1 = env->spr[SPR_SPRG1];
918 regs.sprg2 = env->spr[SPR_SPRG2];
919 regs.sprg3 = env->spr[SPR_SPRG3];
920 regs.sprg4 = env->spr[SPR_SPRG4];
921 regs.sprg5 = env->spr[SPR_SPRG5];
922 regs.sprg6 = env->spr[SPR_SPRG6];
923 regs.sprg7 = env->spr[SPR_SPRG7];
925 regs.pid = env->spr[SPR_BOOKE_PID];
927 for (i = 0; i < 32; i++) {
928 regs.gpr[i] = env->gpr[i];
931 regs.cr = 0;
932 for (i = 0; i < 8; i++) {
933 regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
936 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
937 if (ret < 0) {
938 return ret;
941 kvm_put_fp(cs);
943 if (env->tlb_dirty) {
944 kvm_sw_tlb_put(cpu);
945 env->tlb_dirty = false;
948 if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
949 ret = kvmppc_put_books_sregs(cpu);
950 if (ret < 0) {
951 return ret;
955 if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
956 kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
959 if (cap_one_reg) {
960 int i;
963 * We deliberately ignore errors here, for kernels which have
964 * the ONE_REG calls, but don't support the specific
965 * registers, there's a reasonable chance things will still
966 * work, at least until we try to migrate.
968 for (i = 0; i < 1024; i++) {
969 uint64_t id = env->spr_cb[i].one_reg_id;
971 if (id != 0) {
972 kvm_put_one_spr(cs, id, i);
976 #ifdef TARGET_PPC64
977 if (msr_ts) {
978 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
979 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
981 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
982 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
984 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
985 kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
986 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
987 kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
988 kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
989 kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
990 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
991 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
992 kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
993 kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
996 if (cap_papr) {
997 if (kvm_put_vpa(cs) < 0) {
998 trace_kvm_failed_put_vpa();
1002 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1004 if (level > KVM_PUT_RUNTIME_STATE) {
1005 kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1007 #endif /* TARGET_PPC64 */
1010 return ret;
1013 static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1015 env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1018 static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1020 CPUPPCState *env = &cpu->env;
1021 struct kvm_sregs sregs;
1022 int ret;
1024 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1025 if (ret < 0) {
1026 return ret;
1029 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
1030 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
1031 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
1032 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
1033 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
1034 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
1035 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
1036 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
1037 env->spr[SPR_DECR] = sregs.u.e.dec;
1038 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
1039 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
1040 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
1043 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
1044 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
1045 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
1046 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
1047 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
1048 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
1051 if (sregs.u.e.features & KVM_SREGS_E_64) {
1052 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
1055 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
1056 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
1059 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
1060 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1061 kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
1062 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1063 kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
1064 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1065 kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
1066 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1067 kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
1068 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1069 kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
1070 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1071 kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
1072 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1073 kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
1074 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1075 kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
1076 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1077 kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
1078 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1079 kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
1080 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1081 kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
1082 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1083 kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
1084 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1085 kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
1086 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1087 kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
1088 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1089 kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
1090 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1091 kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
1093 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
1094 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1095 kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
1096 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1097 kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
1098 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1099 kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
1102 if (sregs.u.e.features & KVM_SREGS_E_PM) {
1103 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1104 kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
1107 if (sregs.u.e.features & KVM_SREGS_E_PC) {
1108 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1109 kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
1110 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1111 kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
1115 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1116 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
1117 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
1118 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
1119 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
1120 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1121 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1122 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1123 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1124 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1125 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1128 if (sregs.u.e.features & KVM_SREGS_EXP) {
1129 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1132 if (sregs.u.e.features & KVM_SREGS_E_PD) {
1133 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1134 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1137 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1138 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1139 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1140 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1142 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1143 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1144 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1148 return 0;
1151 static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1153 CPUPPCState *env = &cpu->env;
1154 struct kvm_sregs sregs;
1155 int ret;
1156 int i;
1158 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1159 if (ret < 0) {
1160 return ret;
1163 if (!cpu->vhyp) {
1164 ppc_store_sdr1(env, sregs.u.s.sdr1);
1167 /* Sync SLB */
1168 #ifdef TARGET_PPC64
1170 * The packed SLB array we get from KVM_GET_SREGS only contains
1171 * information about valid entries. So we flush our internal copy
1172 * to get rid of stale ones, then put all valid SLB entries back
1173 * in.
1175 memset(env->slb, 0, sizeof(env->slb));
1176 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
1177 target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1178 target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1180 * Only restore valid entries
1182 if (rb & SLB_ESID_V) {
1183 ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
1186 #endif
1188 /* Sync SRs */
1189 for (i = 0; i < 16; i++) {
1190 env->sr[i] = sregs.u.s.ppc32.sr[i];
1193 /* Sync BATs */
1194 for (i = 0; i < 8; i++) {
1195 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1196 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1197 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1198 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1201 return 0;
1204 int kvm_arch_get_registers(CPUState *cs)
1206 PowerPCCPU *cpu = POWERPC_CPU(cs);
1207 CPUPPCState *env = &cpu->env;
1208 struct kvm_regs regs;
1209 uint32_t cr;
1210 int i, ret;
1212 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1213 if (ret < 0) {
1214 return ret;
1217 cr = regs.cr;
1218 for (i = 7; i >= 0; i--) {
1219 env->crf[i] = cr & 15;
1220 cr >>= 4;
1223 env->ctr = regs.ctr;
1224 env->lr = regs.lr;
1225 cpu_write_xer(env, regs.xer);
1226 env->msr = regs.msr;
1227 env->nip = regs.pc;
1229 env->spr[SPR_SRR0] = regs.srr0;
1230 env->spr[SPR_SRR1] = regs.srr1;
1232 env->spr[SPR_SPRG0] = regs.sprg0;
1233 env->spr[SPR_SPRG1] = regs.sprg1;
1234 env->spr[SPR_SPRG2] = regs.sprg2;
1235 env->spr[SPR_SPRG3] = regs.sprg3;
1236 env->spr[SPR_SPRG4] = regs.sprg4;
1237 env->spr[SPR_SPRG5] = regs.sprg5;
1238 env->spr[SPR_SPRG6] = regs.sprg6;
1239 env->spr[SPR_SPRG7] = regs.sprg7;
1241 env->spr[SPR_BOOKE_PID] = regs.pid;
1243 for (i = 0; i < 32; i++) {
1244 env->gpr[i] = regs.gpr[i];
1247 kvm_get_fp(cs);
1249 if (cap_booke_sregs) {
1250 ret = kvmppc_get_booke_sregs(cpu);
1251 if (ret < 0) {
1252 return ret;
1256 if (cap_segstate) {
1257 ret = kvmppc_get_books_sregs(cpu);
1258 if (ret < 0) {
1259 return ret;
1263 if (cap_hior) {
1264 kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1267 if (cap_one_reg) {
1268 int i;
1271 * We deliberately ignore errors here, for kernels which have
1272 * the ONE_REG calls, but don't support the specific
1273 * registers, there's a reasonable chance things will still
1274 * work, at least until we try to migrate.
1276 for (i = 0; i < 1024; i++) {
1277 uint64_t id = env->spr_cb[i].one_reg_id;
1279 if (id != 0) {
1280 kvm_get_one_spr(cs, id, i);
1284 #ifdef TARGET_PPC64
1285 if (msr_ts) {
1286 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
1287 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
1289 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
1290 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
1292 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
1293 kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
1294 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
1295 kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
1296 kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
1297 kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
1298 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
1299 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
1300 kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
1301 kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
1304 if (cap_papr) {
1305 if (kvm_get_vpa(cs) < 0) {
1306 trace_kvm_failed_get_vpa();
1310 kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1311 kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1312 #endif
1315 return 0;
1318 int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1320 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1322 if (irq != PPC_INTERRUPT_EXT) {
1323 return 0;
1326 if (!kvm_enabled() || !cap_interrupt_unset) {
1327 return 0;
1330 kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1332 return 0;
1335 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1337 return;
1340 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1342 return MEMTXATTRS_UNSPECIFIED;
1345 int kvm_arch_process_async_events(CPUState *cs)
1347 return cs->halted;
1350 static int kvmppc_handle_halt(PowerPCCPU *cpu)
1352 CPUState *cs = CPU(cpu);
1353 CPUPPCState *env = &cpu->env;
1355 if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1356 cs->halted = 1;
1357 cs->exception_index = EXCP_HLT;
1360 return 0;
1363 /* map dcr access to existing qemu dcr emulation */
1364 static int kvmppc_handle_dcr_read(CPUPPCState *env,
1365 uint32_t dcrn, uint32_t *data)
1367 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
1368 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1371 return 0;
1374 static int kvmppc_handle_dcr_write(CPUPPCState *env,
1375 uint32_t dcrn, uint32_t data)
1377 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
1378 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1381 return 0;
1384 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1386 /* Mixed endian case is not handled */
1387 uint32_t sc = debug_inst_opcode;
1389 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1390 sizeof(sc), 0) ||
1391 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
1392 return -EINVAL;
1395 return 0;
1398 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1400 uint32_t sc;
1402 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
1403 sc != debug_inst_opcode ||
1404 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1405 sizeof(sc), 1)) {
1406 return -EINVAL;
1409 return 0;
1412 static int find_hw_breakpoint(target_ulong addr, int type)
1414 int n;
1416 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1417 <= ARRAY_SIZE(hw_debug_points));
1419 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1420 if (hw_debug_points[n].addr == addr &&
1421 hw_debug_points[n].type == type) {
1422 return n;
1426 return -1;
1429 static int find_hw_watchpoint(target_ulong addr, int *flag)
1431 int n;
1433 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
1434 if (n >= 0) {
1435 *flag = BP_MEM_ACCESS;
1436 return n;
1439 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
1440 if (n >= 0) {
1441 *flag = BP_MEM_WRITE;
1442 return n;
1445 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
1446 if (n >= 0) {
1447 *flag = BP_MEM_READ;
1448 return n;
1451 return -1;
1454 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1455 target_ulong len, int type)
1457 if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
1458 return -ENOBUFS;
1461 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
1462 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
1464 switch (type) {
1465 case GDB_BREAKPOINT_HW:
1466 if (nb_hw_breakpoint >= max_hw_breakpoint) {
1467 return -ENOBUFS;
1470 if (find_hw_breakpoint(addr, type) >= 0) {
1471 return -EEXIST;
1474 nb_hw_breakpoint++;
1475 break;
1477 case GDB_WATCHPOINT_WRITE:
1478 case GDB_WATCHPOINT_READ:
1479 case GDB_WATCHPOINT_ACCESS:
1480 if (nb_hw_watchpoint >= max_hw_watchpoint) {
1481 return -ENOBUFS;
1484 if (find_hw_breakpoint(addr, type) >= 0) {
1485 return -EEXIST;
1488 nb_hw_watchpoint++;
1489 break;
1491 default:
1492 return -ENOSYS;
1495 return 0;
1498 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1499 target_ulong len, int type)
1501 int n;
1503 n = find_hw_breakpoint(addr, type);
1504 if (n < 0) {
1505 return -ENOENT;
1508 switch (type) {
1509 case GDB_BREAKPOINT_HW:
1510 nb_hw_breakpoint--;
1511 break;
1513 case GDB_WATCHPOINT_WRITE:
1514 case GDB_WATCHPOINT_READ:
1515 case GDB_WATCHPOINT_ACCESS:
1516 nb_hw_watchpoint--;
1517 break;
1519 default:
1520 return -ENOSYS;
1522 hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
1524 return 0;
1527 void kvm_arch_remove_all_hw_breakpoints(void)
1529 nb_hw_breakpoint = nb_hw_watchpoint = 0;
1532 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1534 int n;
1536 /* Software Breakpoint updates */
1537 if (kvm_sw_breakpoints_active(cs)) {
1538 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1541 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1542 <= ARRAY_SIZE(hw_debug_points));
1543 assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
1545 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1546 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1547 memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
1548 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1549 switch (hw_debug_points[n].type) {
1550 case GDB_BREAKPOINT_HW:
1551 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
1552 break;
1553 case GDB_WATCHPOINT_WRITE:
1554 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
1555 break;
1556 case GDB_WATCHPOINT_READ:
1557 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
1558 break;
1559 case GDB_WATCHPOINT_ACCESS:
1560 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
1561 KVMPPC_DEBUG_WATCH_READ;
1562 break;
1563 default:
1564 cpu_abort(cs, "Unsupported breakpoint type\n");
1566 dbg->arch.bp[n].addr = hw_debug_points[n].addr;
1571 static int kvm_handle_hw_breakpoint(CPUState *cs,
1572 struct kvm_debug_exit_arch *arch_info)
1574 int handle = DEBUG_RETURN_GUEST;
1575 int n;
1576 int flag = 0;
1578 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1579 if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
1580 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
1581 if (n >= 0) {
1582 handle = DEBUG_RETURN_GDB;
1584 } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
1585 KVMPPC_DEBUG_WATCH_WRITE)) {
1586 n = find_hw_watchpoint(arch_info->address, &flag);
1587 if (n >= 0) {
1588 handle = DEBUG_RETURN_GDB;
1589 cs->watchpoint_hit = &hw_watchpoint;
1590 hw_watchpoint.vaddr = hw_debug_points[n].addr;
1591 hw_watchpoint.flags = flag;
1595 return handle;
1598 static int kvm_handle_singlestep(void)
1600 return DEBUG_RETURN_GDB;
1603 static int kvm_handle_sw_breakpoint(void)
1605 return DEBUG_RETURN_GDB;
1608 static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
1610 CPUState *cs = CPU(cpu);
1611 CPUPPCState *env = &cpu->env;
1612 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1614 if (cs->singlestep_enabled) {
1615 return kvm_handle_singlestep();
1618 if (arch_info->status) {
1619 return kvm_handle_hw_breakpoint(cs, arch_info);
1622 if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1623 return kvm_handle_sw_breakpoint();
1627 * QEMU is not able to handle debug exception, so inject
1628 * program exception to guest;
1629 * Yes program exception NOT debug exception !!
1630 * When QEMU is using debug resources then debug exception must
1631 * be always set. To achieve this we set MSR_DE and also set
1632 * MSRP_DEP so guest cannot change MSR_DE.
1633 * When emulating debug resource for guest we want guest
1634 * to control MSR_DE (enable/disable debug interrupt on need).
1635 * Supporting both configurations are NOT possible.
1636 * So the result is that we cannot share debug resources
1637 * between QEMU and Guest on BOOKE architecture.
1638 * In the current design QEMU gets the priority over guest,
1639 * this means that if QEMU is using debug resources then guest
1640 * cannot use them;
1641 * For software breakpoint QEMU uses a privileged instruction;
1642 * So there cannot be any reason that we are here for guest
1643 * set debug exception, only possibility is guest executed a
1644 * privileged / illegal instruction and that's why we are
1645 * injecting a program interrupt.
1647 cpu_synchronize_state(cs);
1649 * env->nip is PC, so increment this by 4 to use
1650 * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
1652 env->nip += 4;
1653 cs->exception_index = POWERPC_EXCP_PROGRAM;
1654 env->error_code = POWERPC_EXCP_INVAL;
1655 ppc_cpu_do_interrupt(cs);
1657 return DEBUG_RETURN_GUEST;
1660 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1662 PowerPCCPU *cpu = POWERPC_CPU(cs);
1663 CPUPPCState *env = &cpu->env;
1664 int ret;
1666 qemu_mutex_lock_iothread();
1668 switch (run->exit_reason) {
1669 case KVM_EXIT_DCR:
1670 if (run->dcr.is_write) {
1671 trace_kvm_handle_dcr_write();
1672 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1673 } else {
1674 trace_kvm_handle_dcr_read();
1675 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1677 break;
1678 case KVM_EXIT_HLT:
1679 trace_kvm_handle_halt();
1680 ret = kvmppc_handle_halt(cpu);
1681 break;
1682 #if defined(TARGET_PPC64)
1683 case KVM_EXIT_PAPR_HCALL:
1684 trace_kvm_handle_papr_hcall();
1685 run->papr_hcall.ret = spapr_hypercall(cpu,
1686 run->papr_hcall.nr,
1687 run->papr_hcall.args);
1688 ret = 0;
1689 break;
1690 #endif
1691 case KVM_EXIT_EPR:
1692 trace_kvm_handle_epr();
1693 run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
1694 ret = 0;
1695 break;
1696 case KVM_EXIT_WATCHDOG:
1697 trace_kvm_handle_watchdog_expiry();
1698 watchdog_perform_action();
1699 ret = 0;
1700 break;
1702 case KVM_EXIT_DEBUG:
1703 trace_kvm_handle_debug_exception();
1704 if (kvm_handle_debug(cpu, run)) {
1705 ret = EXCP_DEBUG;
1706 break;
1708 /* re-enter, this exception was guest-internal */
1709 ret = 0;
1710 break;
1712 #if defined(TARGET_PPC64)
1713 case KVM_EXIT_NMI:
1714 trace_kvm_handle_nmi_exception();
1715 ret = kvm_handle_nmi(cpu, run);
1716 break;
1717 #endif
1719 default:
1720 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1721 ret = -1;
1722 break;
1725 qemu_mutex_unlock_iothread();
1726 return ret;
1729 int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1731 CPUState *cs = CPU(cpu);
1732 uint32_t bits = tsr_bits;
1733 struct kvm_one_reg reg = {
1734 .id = KVM_REG_PPC_OR_TSR,
1735 .addr = (uintptr_t) &bits,
1738 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1741 int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1744 CPUState *cs = CPU(cpu);
1745 uint32_t bits = tsr_bits;
1746 struct kvm_one_reg reg = {
1747 .id = KVM_REG_PPC_CLEAR_TSR,
1748 .addr = (uintptr_t) &bits,
1751 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1754 int kvmppc_set_tcr(PowerPCCPU *cpu)
1756 CPUState *cs = CPU(cpu);
1757 CPUPPCState *env = &cpu->env;
1758 uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1760 struct kvm_one_reg reg = {
1761 .id = KVM_REG_PPC_TCR,
1762 .addr = (uintptr_t) &tcr,
1765 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1768 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1770 CPUState *cs = CPU(cpu);
1771 int ret;
1773 if (!kvm_enabled()) {
1774 return -1;
1777 if (!cap_ppc_watchdog) {
1778 printf("warning: KVM does not support watchdog");
1779 return -1;
1782 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
1783 if (ret < 0) {
1784 fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1785 __func__, strerror(-ret));
1786 return ret;
1789 return ret;
1792 static int read_cpuinfo(const char *field, char *value, int len)
1794 FILE *f;
1795 int ret = -1;
1796 int field_len = strlen(field);
1797 char line[512];
1799 f = fopen("/proc/cpuinfo", "r");
1800 if (!f) {
1801 return -1;
1804 do {
1805 if (!fgets(line, sizeof(line), f)) {
1806 break;
1808 if (!strncmp(line, field, field_len)) {
1809 pstrcpy(value, len, line);
1810 ret = 0;
1811 break;
1813 } while (*line);
1815 fclose(f);
1817 return ret;
1820 static uint32_t kvmppc_get_tbfreq_procfs(void)
1822 char line[512];
1823 char *ns;
1824 uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
1825 uint32_t tbfreq_procfs;
1827 if (read_cpuinfo("timebase", line, sizeof(line))) {
1828 return tbfreq_fallback;
1831 ns = strchr(line, ':');
1832 if (!ns) {
1833 return tbfreq_fallback;
1836 tbfreq_procfs = atoi(++ns);
1838 /* 0 is certainly not acceptable by the guest, return fallback value */
1839 return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
1842 uint32_t kvmppc_get_tbfreq(void)
1844 static uint32_t cached_tbfreq;
1846 if (!cached_tbfreq) {
1847 cached_tbfreq = kvmppc_get_tbfreq_procfs();
1850 return cached_tbfreq;
1853 bool kvmppc_get_host_serial(char **value)
1855 return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1856 NULL);
1859 bool kvmppc_get_host_model(char **value)
1861 return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1864 /* Try to find a device tree node for a CPU with clock-frequency property */
1865 static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1867 struct dirent *dirp;
1868 DIR *dp;
1870 dp = opendir(PROC_DEVTREE_CPU);
1871 if (!dp) {
1872 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1873 return -1;
1876 buf[0] = '\0';
1877 while ((dirp = readdir(dp)) != NULL) {
1878 FILE *f;
1879 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1880 dirp->d_name);
1881 f = fopen(buf, "r");
1882 if (f) {
1883 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1884 fclose(f);
1885 break;
1887 buf[0] = '\0';
1889 closedir(dp);
1890 if (buf[0] == '\0') {
1891 printf("Unknown host!\n");
1892 return -1;
1895 return 0;
1898 static uint64_t kvmppc_read_int_dt(const char *filename)
1900 union {
1901 uint32_t v32;
1902 uint64_t v64;
1903 } u;
1904 FILE *f;
1905 int len;
1907 f = fopen(filename, "rb");
1908 if (!f) {
1909 return -1;
1912 len = fread(&u, 1, sizeof(u), f);
1913 fclose(f);
1914 switch (len) {
1915 case 4:
1916 /* property is a 32-bit quantity */
1917 return be32_to_cpu(u.v32);
1918 case 8:
1919 return be64_to_cpu(u.v64);
1922 return 0;
1926 * Read a CPU node property from the host device tree that's a single
1927 * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
1928 * (can't find or open the property, or doesn't understand the format)
1930 static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
1932 char buf[PATH_MAX], *tmp;
1933 uint64_t val;
1935 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
1936 return -1;
1939 tmp = g_strdup_printf("%s/%s", buf, propname);
1940 val = kvmppc_read_int_dt(tmp);
1941 g_free(tmp);
1943 return val;
1946 uint64_t kvmppc_get_clockfreq(void)
1948 return kvmppc_read_int_cpu_dt("clock-frequency");
1951 static int kvmppc_get_dec_bits(void)
1953 int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
1955 if (nr_bits > 0) {
1956 return nr_bits;
1958 return 0;
1961 static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1963 CPUState *cs = env_cpu(env);
1965 if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1966 !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1967 return 0;
1970 return 1;
1973 int kvmppc_get_hasidle(CPUPPCState *env)
1975 struct kvm_ppc_pvinfo pvinfo;
1977 if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1978 (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1979 return 1;
1982 return 0;
1985 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
1987 uint32_t *hc = (uint32_t *)buf;
1988 struct kvm_ppc_pvinfo pvinfo;
1990 if (!kvmppc_get_pvinfo(env, &pvinfo)) {
1991 memcpy(buf, pvinfo.hcall, buf_len);
1992 return 0;
1996 * Fallback to always fail hypercalls regardless of endianness:
1998 * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
1999 * li r3, -1
2000 * b .+8 (becomes nop in wrong endian)
2001 * bswap32(li r3, -1)
2004 hc[0] = cpu_to_be32(0x08000048);
2005 hc[1] = cpu_to_be32(0x3860ffff);
2006 hc[2] = cpu_to_be32(0x48000008);
2007 hc[3] = cpu_to_be32(bswap32(0x3860ffff));
2009 return 1;
2012 static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2014 return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2017 void kvmppc_enable_logical_ci_hcalls(void)
2020 * FIXME: it would be nice if we could detect the cases where
2021 * we're using a device which requires the in kernel
2022 * implementation of these hcalls, but the kernel lacks them and
2023 * produce a warning.
2025 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2026 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2029 void kvmppc_enable_set_mode_hcall(void)
2031 kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2034 void kvmppc_enable_clear_ref_mod_hcalls(void)
2036 kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
2037 kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
2040 void kvmppc_enable_h_page_init(void)
2042 kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
2045 void kvmppc_enable_h_rpt_invalidate(void)
2047 kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
2050 void kvmppc_set_papr(PowerPCCPU *cpu)
2052 CPUState *cs = CPU(cpu);
2053 int ret;
2055 if (!kvm_enabled()) {
2056 return;
2059 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2060 if (ret) {
2061 error_report("This vCPU type or KVM version does not support PAPR");
2062 exit(1);
2066 * Update the capability flag so we sync the right information
2067 * with kvm
2069 cap_papr = 1;
2072 int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
2074 return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
2077 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
2079 CPUState *cs = CPU(cpu);
2080 int ret;
2082 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
2083 if (ret && mpic_proxy) {
2084 error_report("This KVM version does not support EPR");
2085 exit(1);
2089 bool kvmppc_get_fwnmi(void)
2091 return cap_fwnmi;
2094 int kvmppc_set_fwnmi(PowerPCCPU *cpu)
2096 CPUState *cs = CPU(cpu);
2098 return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
2101 int kvmppc_smt_threads(void)
2103 return cap_ppc_smt ? cap_ppc_smt : 1;
2106 int kvmppc_set_smt_threads(int smt)
2108 int ret;
2110 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2111 if (!ret) {
2112 cap_ppc_smt = smt;
2114 return ret;
2117 void kvmppc_error_append_smt_possible_hint(Error *const *errp)
2119 int i;
2120 GString *g;
2121 char *s;
2123 assert(kvm_enabled());
2124 if (cap_ppc_smt_possible) {
2125 g = g_string_new("Available VSMT modes:");
2126 for (i = 63; i >= 0; i--) {
2127 if ((1UL << i) & cap_ppc_smt_possible) {
2128 g_string_append_printf(g, " %lu", (1UL << i));
2131 s = g_string_free(g, false);
2132 error_append_hint(errp, "%s.\n", s);
2133 g_free(s);
2134 } else {
2135 error_append_hint(errp,
2136 "This KVM seems to be too old to support VSMT.\n");
2141 #ifdef TARGET_PPC64
2142 uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
2144 struct kvm_ppc_smmu_info info;
2145 long rampagesize, best_page_shift;
2146 int i;
2149 * Find the largest hardware supported page size that's less than
2150 * or equal to the (logical) backing page size of guest RAM
2152 kvm_get_smmu_info(&info, &error_fatal);
2153 rampagesize = qemu_minrampagesize();
2154 best_page_shift = 0;
2156 for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2157 struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2159 if (!sps->page_shift) {
2160 continue;
2163 if ((sps->page_shift > best_page_shift)
2164 && ((1UL << sps->page_shift) <= rampagesize)) {
2165 best_page_shift = sps->page_shift;
2169 return 1ULL << (best_page_shift + hash_shift - 7);
2171 #endif
2173 bool kvmppc_spapr_use_multitce(void)
2175 return cap_spapr_multitce;
2178 int kvmppc_spapr_enable_inkernel_multitce(void)
2180 int ret;
2182 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2183 H_PUT_TCE_INDIRECT, 1);
2184 if (!ret) {
2185 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2186 H_STUFF_TCE, 1);
2189 return ret;
2192 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2193 uint64_t bus_offset, uint32_t nb_table,
2194 int *pfd, bool need_vfio)
2196 long len;
2197 int fd;
2198 void *table;
2201 * Must set fd to -1 so we don't try to munmap when called for
2202 * destroying the table, which the upper layers -will- do
2204 *pfd = -1;
2205 if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
2206 return NULL;
2209 if (cap_spapr_tce_64) {
2210 struct kvm_create_spapr_tce_64 args = {
2211 .liobn = liobn,
2212 .page_shift = page_shift,
2213 .offset = bus_offset >> page_shift,
2214 .size = nb_table,
2215 .flags = 0
2217 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2218 if (fd < 0) {
2219 fprintf(stderr,
2220 "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2221 liobn);
2222 return NULL;
2224 } else if (cap_spapr_tce) {
2225 uint64_t window_size = (uint64_t) nb_table << page_shift;
2226 struct kvm_create_spapr_tce args = {
2227 .liobn = liobn,
2228 .window_size = window_size,
2230 if ((window_size != args.window_size) || bus_offset) {
2231 return NULL;
2233 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
2234 if (fd < 0) {
2235 fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2236 liobn);
2237 return NULL;
2239 } else {
2240 return NULL;
2243 len = nb_table * sizeof(uint64_t);
2244 /* FIXME: round this up to page size */
2246 table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2247 if (table == MAP_FAILED) {
2248 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2249 liobn);
2250 close(fd);
2251 return NULL;
2254 *pfd = fd;
2255 return table;
2258 int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
2260 long len;
2262 if (fd < 0) {
2263 return -1;
2266 len = nb_table * sizeof(uint64_t);
2267 if ((munmap(table, len) < 0) ||
2268 (close(fd) < 0)) {
2269 fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2270 strerror(errno));
2271 /* Leak the table */
2274 return 0;
2277 int kvmppc_reset_htab(int shift_hint)
2279 uint32_t shift = shift_hint;
2281 if (!kvm_enabled()) {
2282 /* Full emulation, tell caller to allocate htab itself */
2283 return 0;
2285 if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
2286 int ret;
2287 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2288 if (ret == -ENOTTY) {
2290 * At least some versions of PR KVM advertise the
2291 * capability, but don't implement the ioctl(). Oops.
2292 * Return 0 so that we allocate the htab in qemu, as is
2293 * correct for PR.
2295 return 0;
2296 } else if (ret < 0) {
2297 return ret;
2299 return shift;
2303 * We have a kernel that predates the htab reset calls. For PR
2304 * KVM, we need to allocate the htab ourselves, for an HV KVM of
2305 * this era, it has allocated a 16MB fixed size hash table
2306 * already.
2308 if (kvmppc_is_pr(kvm_state)) {
2309 /* PR - tell caller to allocate htab */
2310 return 0;
2311 } else {
2312 /* HV - assume 16MB kernel allocated htab */
2313 return 24;
2317 static inline uint32_t mfpvr(void)
2319 uint32_t pvr;
2321 asm ("mfpvr %0"
2322 : "=r"(pvr));
2323 return pvr;
2326 static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2328 if (on) {
2329 *word |= flags;
2330 } else {
2331 *word &= ~flags;
2335 static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
2337 PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2338 uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
2339 uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2341 /* Now fix up the class with information we can query from the host */
2342 pcc->pvr = mfpvr();
2344 alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
2345 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
2346 alter_insns(&pcc->insns_flags2, PPC2_VSX,
2347 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
2348 alter_insns(&pcc->insns_flags2, PPC2_DFP,
2349 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
2351 if (dcache_size != -1) {
2352 pcc->l1_dcache_size = dcache_size;
2355 if (icache_size != -1) {
2356 pcc->l1_icache_size = icache_size;
2359 #if defined(TARGET_PPC64)
2360 pcc->radix_page_info = kvm_get_radix_page_info();
2362 if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
2364 * POWER9 DD1 has some bugs which make it not really ISA 3.00
2365 * compliant. More importantly, advertising ISA 3.00
2366 * architected mode may prevent guests from activating
2367 * necessary DD1 workarounds.
2369 pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
2370 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
2372 #endif /* defined(TARGET_PPC64) */
2375 bool kvmppc_has_cap_epr(void)
2377 return cap_epr;
2380 bool kvmppc_has_cap_fixup_hcalls(void)
2382 return cap_fixup_hcalls;
2385 bool kvmppc_has_cap_htm(void)
2387 return cap_htm;
2390 bool kvmppc_has_cap_mmu_radix(void)
2392 return cap_mmu_radix;
2395 bool kvmppc_has_cap_mmu_hash_v3(void)
2397 return cap_mmu_hash_v3;
2400 static bool kvmppc_power8_host(void)
2402 bool ret = false;
2403 #ifdef TARGET_PPC64
2405 uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2406 ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2407 (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2408 (base_pvr == CPU_POWERPC_POWER8_BASE);
2410 #endif /* TARGET_PPC64 */
2411 return ret;
2414 static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
2416 bool l1d_thread_priv_req = !kvmppc_power8_host();
2418 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
2419 return 2;
2420 } else if ((!l1d_thread_priv_req ||
2421 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
2422 (c.character & c.character_mask
2423 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
2424 return 1;
2427 return 0;
2430 static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
2432 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
2433 return 2;
2434 } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
2435 return 1;
2438 return 0;
2441 static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
2443 if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2444 (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2445 (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2446 return SPAPR_CAP_FIXED_NA;
2447 } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2448 return SPAPR_CAP_WORKAROUND;
2449 } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
2450 return SPAPR_CAP_FIXED_CCD;
2451 } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
2452 return SPAPR_CAP_FIXED_IBS;
2455 return 0;
2458 static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
2460 if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
2461 return 1;
2463 return 0;
2466 bool kvmppc_has_cap_xive(void)
2468 return cap_xive;
2471 static void kvmppc_get_cpu_characteristics(KVMState *s)
2473 struct kvm_ppc_cpu_char c;
2474 int ret;
2476 /* Assume broken */
2477 cap_ppc_safe_cache = 0;
2478 cap_ppc_safe_bounds_check = 0;
2479 cap_ppc_safe_indirect_branch = 0;
2481 ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
2482 if (!ret) {
2483 return;
2485 ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
2486 if (ret < 0) {
2487 return;
2490 cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
2491 cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
2492 cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
2493 cap_ppc_count_cache_flush_assist =
2494 parse_cap_ppc_count_cache_flush_assist(c);
2497 int kvmppc_get_cap_safe_cache(void)
2499 return cap_ppc_safe_cache;
2502 int kvmppc_get_cap_safe_bounds_check(void)
2504 return cap_ppc_safe_bounds_check;
2507 int kvmppc_get_cap_safe_indirect_branch(void)
2509 return cap_ppc_safe_indirect_branch;
2512 int kvmppc_get_cap_count_cache_flush_assist(void)
2514 return cap_ppc_count_cache_flush_assist;
2517 bool kvmppc_has_cap_nested_kvm_hv(void)
2519 return !!cap_ppc_nested_kvm_hv;
2522 int kvmppc_set_cap_nested_kvm_hv(int enable)
2524 return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2527 bool kvmppc_has_cap_spapr_vfio(void)
2529 return cap_spapr_vfio;
2532 int kvmppc_get_cap_large_decr(void)
2534 return cap_large_decr;
2537 int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
2539 CPUState *cs = CPU(cpu);
2540 uint64_t lpcr;
2542 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2543 /* Do we need to modify the LPCR? */
2544 if (!!(lpcr & LPCR_LD) != !!enable) {
2545 if (enable) {
2546 lpcr |= LPCR_LD;
2547 } else {
2548 lpcr &= ~LPCR_LD;
2550 kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2551 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2553 if (!!(lpcr & LPCR_LD) != !!enable) {
2554 return -1;
2558 return 0;
2561 int kvmppc_has_cap_rpt_invalidate(void)
2563 return cap_rpt_invalidate;
2566 PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
2568 uint32_t host_pvr = mfpvr();
2569 PowerPCCPUClass *pvr_pcc;
2571 pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
2572 if (pvr_pcc == NULL) {
2573 pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
2576 return pvr_pcc;
2579 static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
2581 MachineClass *mc = MACHINE_CLASS(oc);
2583 mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
2586 static int kvm_ppc_register_host_cpu_type(void)
2588 TypeInfo type_info = {
2589 .name = TYPE_HOST_POWERPC_CPU,
2590 .class_init = kvmppc_host_cpu_class_init,
2592 PowerPCCPUClass *pvr_pcc;
2593 ObjectClass *oc;
2594 DeviceClass *dc;
2595 int i;
2597 pvr_pcc = kvm_ppc_get_host_cpu_class();
2598 if (pvr_pcc == NULL) {
2599 return -1;
2601 type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2602 type_register(&type_info);
2603 /* override TCG default cpu type with 'host' cpu model */
2604 object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
2605 false, NULL);
2607 oc = object_class_by_name(type_info.name);
2608 g_assert(oc);
2611 * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2612 * we want "POWER8" to be a "family" alias that points to the current
2613 * host CPU type, too)
2615 dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2616 for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2617 if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2618 char *suffix;
2620 ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2621 suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2622 if (suffix) {
2623 *suffix = 0;
2625 break;
2629 return 0;
2632 int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2634 struct kvm_rtas_token_args args = {
2635 .token = token,
2638 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2639 return -ENOENT;
2642 strncpy(args.name, function, sizeof(args.name) - 1);
2644 return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2647 int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2649 struct kvm_get_htab_fd s = {
2650 .flags = write ? KVM_GET_HTAB_WRITE : 0,
2651 .start_index = index,
2653 int ret;
2655 if (!cap_htab_fd) {
2656 error_setg(errp, "KVM version doesn't support %s the HPT",
2657 write ? "writing" : "reading");
2658 return -ENOTSUP;
2661 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2662 if (ret < 0) {
2663 error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
2664 write ? "writing" : "reading", write ? "to" : "from",
2665 strerror(errno));
2666 return -errno;
2669 return ret;
2672 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2674 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2675 uint8_t buf[bufsize];
2676 ssize_t rc;
2678 do {
2679 rc = read(fd, buf, bufsize);
2680 if (rc < 0) {
2681 fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2682 strerror(errno));
2683 return rc;
2684 } else if (rc) {
2685 uint8_t *buffer = buf;
2686 ssize_t n = rc;
2687 while (n) {
2688 struct kvm_get_htab_header *head =
2689 (struct kvm_get_htab_header *) buffer;
2690 size_t chunksize = sizeof(*head) +
2691 HASH_PTE_SIZE_64 * head->n_valid;
2693 qemu_put_be32(f, head->index);
2694 qemu_put_be16(f, head->n_valid);
2695 qemu_put_be16(f, head->n_invalid);
2696 qemu_put_buffer(f, (void *)(head + 1),
2697 HASH_PTE_SIZE_64 * head->n_valid);
2699 buffer += chunksize;
2700 n -= chunksize;
2703 } while ((rc != 0)
2704 && ((max_ns < 0) ||
2705 ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2707 return (rc == 0) ? 1 : 0;
2710 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2711 uint16_t n_valid, uint16_t n_invalid, Error **errp)
2713 struct kvm_get_htab_header *buf;
2714 size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
2715 ssize_t rc;
2717 buf = alloca(chunksize);
2718 buf->index = index;
2719 buf->n_valid = n_valid;
2720 buf->n_invalid = n_invalid;
2722 qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
2724 rc = write(fd, buf, chunksize);
2725 if (rc < 0) {
2726 error_setg_errno(errp, errno, "Error writing the KVM hash table");
2727 return -errno;
2729 if (rc != chunksize) {
2730 /* We should never get a short write on a single chunk */
2731 error_setg(errp, "Short write while restoring the KVM hash table");
2732 return -ENOSPC;
2734 return 0;
2737 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
2739 return true;
2742 void kvm_arch_init_irq_routing(KVMState *s)
2746 void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
2748 int fd, rc;
2749 int i;
2751 fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
2753 i = 0;
2754 while (i < n) {
2755 struct kvm_get_htab_header *hdr;
2756 int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
2757 char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
2759 rc = read(fd, buf, sizeof(buf));
2760 if (rc < 0) {
2761 hw_error("kvmppc_read_hptes: Unable to read HPTEs");
2764 hdr = (struct kvm_get_htab_header *)buf;
2765 while ((i < n) && ((char *)hdr < (buf + rc))) {
2766 int invalid = hdr->n_invalid, valid = hdr->n_valid;
2768 if (hdr->index != (ptex + i)) {
2769 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
2770 " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
2773 if (n - i < valid) {
2774 valid = n - i;
2776 memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2777 i += valid;
2779 if ((n - i) < invalid) {
2780 invalid = n - i;
2782 memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2783 i += invalid;
2785 hdr = (struct kvm_get_htab_header *)
2786 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
2790 close(fd);
2793 void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
2795 int fd, rc;
2796 struct {
2797 struct kvm_get_htab_header hdr;
2798 uint64_t pte0;
2799 uint64_t pte1;
2800 } buf;
2802 fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2804 buf.hdr.n_valid = 1;
2805 buf.hdr.n_invalid = 0;
2806 buf.hdr.index = ptex;
2807 buf.pte0 = cpu_to_be64(pte0);
2808 buf.pte1 = cpu_to_be64(pte1);
2810 rc = write(fd, &buf, sizeof(buf));
2811 if (rc != sizeof(buf)) {
2812 hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2814 close(fd);
2817 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2818 uint64_t address, uint32_t data, PCIDevice *dev)
2820 return 0;
2823 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2824 int vector, PCIDevice *dev)
2826 return 0;
2829 int kvm_arch_release_virq_post(int virq)
2831 return 0;
2834 int kvm_arch_msi_data_to_gsi(uint32_t data)
2836 return data & 0xffff;
2839 #if defined(TARGET_PPC64)
2840 int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
2842 uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
2844 cpu_synchronize_state(CPU(cpu));
2846 spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
2848 return 0;
2850 #endif
2852 int kvmppc_enable_hwrng(void)
2854 if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
2855 return -1;
2858 return kvmppc_enable_hcall(kvm_state, H_RANDOM);
2861 void kvmppc_check_papr_resize_hpt(Error **errp)
2863 if (!kvm_enabled()) {
2864 return; /* No KVM, we're good */
2867 if (cap_resize_hpt) {
2868 return; /* Kernel has explicit support, we're good */
2871 /* Otherwise fallback on looking for PR KVM */
2872 if (kvmppc_is_pr(kvm_state)) {
2873 return;
2876 error_setg(errp,
2877 "Hash page table resizing not available with this KVM version");
2880 int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2882 CPUState *cs = CPU(cpu);
2883 struct kvm_ppc_resize_hpt rhpt = {
2884 .flags = flags,
2885 .shift = shift,
2888 if (!cap_resize_hpt) {
2889 return -ENOSYS;
2892 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2895 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2897 CPUState *cs = CPU(cpu);
2898 struct kvm_ppc_resize_hpt rhpt = {
2899 .flags = flags,
2900 .shift = shift,
2903 if (!cap_resize_hpt) {
2904 return -ENOSYS;
2907 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2911 * This is a helper function to detect a post migration scenario
2912 * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2913 * the guest kernel can't handle a PVR value other than the actual host
2914 * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2916 * If we don't have cap_ppc_pvr_compat and we're not running in PR
2917 * (so, we're HV), return true. The workaround itself is done in
2918 * cpu_post_load.
2920 * The order here is important: we'll only check for KVM PR as a
2921 * fallback if the guest kernel can't handle the situation itself.
2922 * We need to avoid as much as possible querying the running KVM type
2923 * in QEMU level.
2925 bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2927 CPUState *cs = CPU(cpu);
2929 if (!kvm_enabled()) {
2930 return false;
2933 if (cap_ppc_pvr_compat) {
2934 return false;
2937 return !kvmppc_is_pr(cs->kvm_state);
2940 void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2942 CPUState *cs = CPU(cpu);
2944 if (kvm_enabled()) {
2945 kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2949 void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
2951 CPUState *cs = CPU(cpu);
2953 if (kvm_enabled()) {
2954 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
2958 bool kvm_arch_cpu_check_are_resettable(void)
2960 return true;