Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210330' into...
[qemu/ar7.git] / target / arm / kvm64.c
blob581335e49d36edefc133dac0c73656da31d971a0
1 /*
2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qemu-common.h"
20 #include "qapi/error.h"
21 #include "cpu.h"
22 #include "qemu/timer.h"
23 #include "qemu/error-report.h"
24 #include "qemu/host-utils.h"
25 #include "qemu/main-loop.h"
26 #include "exec/gdbstub.h"
27 #include "sysemu/runstate.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/kvm_int.h"
30 #include "kvm_arm.h"
31 #include "internals.h"
32 #include "hw/acpi/acpi.h"
33 #include "hw/acpi/ghes.h"
34 #include "hw/arm/virt.h"
36 static bool have_guest_debug;
39 * Although the ARM implementation of hardware assisted debugging
40 * allows for different breakpoints per-core, the current GDB
41 * interface treats them as a global pool of registers (which seems to
42 * be the case for x86, ppc and s390). As a result we store one copy
43 * of registers which is used for all active cores.
45 * Write access is serialised by virtue of the GDB protocol which
46 * updates things. Read access (i.e. when the values are copied to the
47 * vCPU) is also gated by GDB's run control.
49 * This is not unreasonable as most of the time debugging kernels you
50 * never know which core will eventually execute your function.
53 typedef struct {
54 uint64_t bcr;
55 uint64_t bvr;
56 } HWBreakpoint;
58 /* The watchpoint registers can cover more area than the requested
59 * watchpoint so we need to store the additional information
60 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
61 * when the watchpoint is hit.
63 typedef struct {
64 uint64_t wcr;
65 uint64_t wvr;
66 CPUWatchpoint details;
67 } HWWatchpoint;
69 /* Maximum and current break/watch point counts */
70 int max_hw_bps, max_hw_wps;
71 GArray *hw_breakpoints, *hw_watchpoints;
73 #define cur_hw_wps (hw_watchpoints->len)
74 #define cur_hw_bps (hw_breakpoints->len)
75 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
76 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
78 /**
79 * kvm_arm_init_debug() - check for guest debug capabilities
80 * @cs: CPUState
82 * kvm_check_extension returns the number of debug registers we have
83 * or 0 if we have none.
86 static void kvm_arm_init_debug(CPUState *cs)
88 have_guest_debug = kvm_check_extension(cs->kvm_state,
89 KVM_CAP_SET_GUEST_DEBUG);
91 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
92 hw_watchpoints = g_array_sized_new(true, true,
93 sizeof(HWWatchpoint), max_hw_wps);
95 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
96 hw_breakpoints = g_array_sized_new(true, true,
97 sizeof(HWBreakpoint), max_hw_bps);
98 return;
102 * insert_hw_breakpoint()
103 * @addr: address of breakpoint
105 * See ARM ARM D2.9.1 for details but here we are only going to create
106 * simple un-linked breakpoints (i.e. we don't chain breakpoints
107 * together to match address and context or vmid). The hardware is
108 * capable of fancier matching but that will require exposing that
109 * fanciness to GDB's interface
111 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
113 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
114 * +------+------+-------+-----+----+------+-----+------+-----+---+
115 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
116 * +------+------+-------+-----+----+------+-----+------+-----+---+
118 * BT: Breakpoint type (0 = unlinked address match)
119 * LBN: Linked BP number (0 = unused)
120 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
121 * BAS: Byte Address Select (RES1 for AArch64)
122 * E: Enable bit
124 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
126 * 63 53 52 49 48 2 1 0
127 * +------+-----------+----------+-----+
128 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
129 * +------+-----------+----------+-----+
131 * Depending on the addressing mode bits the top bits of the register
132 * are a sign extension of the highest applicable VA bit. Some
133 * versions of GDB don't do it correctly so we ensure they are correct
134 * here so future PC comparisons will work properly.
137 static int insert_hw_breakpoint(target_ulong addr)
139 HWBreakpoint brk = {
140 .bcr = 0x1, /* BCR E=1, enable */
141 .bvr = sextract64(addr, 0, 53)
144 if (cur_hw_bps >= max_hw_bps) {
145 return -ENOBUFS;
148 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */
149 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */
151 g_array_append_val(hw_breakpoints, brk);
153 return 0;
157 * delete_hw_breakpoint()
158 * @pc: address of breakpoint
160 * Delete a breakpoint and shuffle any above down
163 static int delete_hw_breakpoint(target_ulong pc)
165 int i;
166 for (i = 0; i < hw_breakpoints->len; i++) {
167 HWBreakpoint *brk = get_hw_bp(i);
168 if (brk->bvr == pc) {
169 g_array_remove_index(hw_breakpoints, i);
170 return 0;
173 return -ENOENT;
177 * insert_hw_watchpoint()
178 * @addr: address of watch point
179 * @len: size of area
180 * @type: type of watch point
182 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
183 * stuff if we want to. The watch points can be linked with the break
184 * points above to make them context aware. However for simplicity
185 * currently we only deal with simple read/write watch points.
187 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
189 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
190 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
191 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
192 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
194 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
195 * WT: 0 - unlinked, 1 - linked (not currently used)
196 * LBN: Linked BP number (not currently used)
197 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
198 * BAS: Byte Address Select
199 * LSC: Load/Store control (01: load, 10: store, 11: both)
200 * E: Enable
202 * The bottom 2 bits of the value register are masked. Therefore to
203 * break on any sizes smaller than an unaligned word you need to set
204 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
205 * need to ensure you mask the address as required and set BAS=0xff
208 static int insert_hw_watchpoint(target_ulong addr,
209 target_ulong len, int type)
211 HWWatchpoint wp = {
212 .wcr = 1, /* E=1, enable */
213 .wvr = addr & (~0x7ULL),
214 .details = { .vaddr = addr, .len = len }
217 if (cur_hw_wps >= max_hw_wps) {
218 return -ENOBUFS;
222 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
223 * valid whether EL3 is implemented or not
225 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
227 switch (type) {
228 case GDB_WATCHPOINT_READ:
229 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
230 wp.details.flags = BP_MEM_READ;
231 break;
232 case GDB_WATCHPOINT_WRITE:
233 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
234 wp.details.flags = BP_MEM_WRITE;
235 break;
236 case GDB_WATCHPOINT_ACCESS:
237 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
238 wp.details.flags = BP_MEM_ACCESS;
239 break;
240 default:
241 g_assert_not_reached();
242 break;
244 if (len <= 8) {
245 /* we align the address and set the bits in BAS */
246 int off = addr & 0x7;
247 int bas = (1 << len) - 1;
249 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
250 } else {
251 /* For ranges above 8 bytes we need to be a power of 2 */
252 if (is_power_of_2(len)) {
253 int bits = ctz64(len);
255 wp.wvr &= ~((1 << bits) - 1);
256 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
257 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
258 } else {
259 return -ENOBUFS;
263 g_array_append_val(hw_watchpoints, wp);
264 return 0;
268 static bool check_watchpoint_in_range(int i, target_ulong addr)
270 HWWatchpoint *wp = get_hw_wp(i);
271 uint64_t addr_top, addr_bottom = wp->wvr;
272 int bas = extract32(wp->wcr, 5, 8);
273 int mask = extract32(wp->wcr, 24, 4);
275 if (mask) {
276 addr_top = addr_bottom + (1 << mask);
277 } else {
278 /* BAS must be contiguous but can offset against the base
279 * address in DBGWVR */
280 addr_bottom = addr_bottom + ctz32(bas);
281 addr_top = addr_bottom + clo32(bas);
284 if (addr >= addr_bottom && addr <= addr_top) {
285 return true;
288 return false;
292 * delete_hw_watchpoint()
293 * @addr: address of breakpoint
295 * Delete a breakpoint and shuffle any above down
298 static int delete_hw_watchpoint(target_ulong addr,
299 target_ulong len, int type)
301 int i;
302 for (i = 0; i < cur_hw_wps; i++) {
303 if (check_watchpoint_in_range(i, addr)) {
304 g_array_remove_index(hw_watchpoints, i);
305 return 0;
308 return -ENOENT;
312 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
313 target_ulong len, int type)
315 switch (type) {
316 case GDB_BREAKPOINT_HW:
317 return insert_hw_breakpoint(addr);
318 break;
319 case GDB_WATCHPOINT_READ:
320 case GDB_WATCHPOINT_WRITE:
321 case GDB_WATCHPOINT_ACCESS:
322 return insert_hw_watchpoint(addr, len, type);
323 default:
324 return -ENOSYS;
328 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
329 target_ulong len, int type)
331 switch (type) {
332 case GDB_BREAKPOINT_HW:
333 return delete_hw_breakpoint(addr);
334 case GDB_WATCHPOINT_READ:
335 case GDB_WATCHPOINT_WRITE:
336 case GDB_WATCHPOINT_ACCESS:
337 return delete_hw_watchpoint(addr, len, type);
338 default:
339 return -ENOSYS;
344 void kvm_arch_remove_all_hw_breakpoints(void)
346 if (cur_hw_wps > 0) {
347 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
349 if (cur_hw_bps > 0) {
350 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
354 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
356 int i;
357 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
359 for (i = 0; i < max_hw_wps; i++) {
360 HWWatchpoint *wp = get_hw_wp(i);
361 ptr->dbg_wcr[i] = wp->wcr;
362 ptr->dbg_wvr[i] = wp->wvr;
364 for (i = 0; i < max_hw_bps; i++) {
365 HWBreakpoint *bp = get_hw_bp(i);
366 ptr->dbg_bcr[i] = bp->bcr;
367 ptr->dbg_bvr[i] = bp->bvr;
371 bool kvm_arm_hw_debug_active(CPUState *cs)
373 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
376 static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
378 int i;
380 for (i = 0; i < cur_hw_bps; i++) {
381 HWBreakpoint *bp = get_hw_bp(i);
382 if (bp->bvr == pc) {
383 return true;
386 return false;
389 static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
391 int i;
393 for (i = 0; i < cur_hw_wps; i++) {
394 if (check_watchpoint_in_range(i, addr)) {
395 return &get_hw_wp(i)->details;
398 return NULL;
401 static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
402 const char *name)
404 int err;
406 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
407 if (err != 0) {
408 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
409 return false;
412 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
413 if (err != 0) {
414 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
415 return false;
418 return true;
421 void kvm_arm_pmu_init(CPUState *cs)
423 struct kvm_device_attr attr = {
424 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
425 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
428 if (!ARM_CPU(cs)->has_pmu) {
429 return;
431 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
432 error_report("failed to init PMU");
433 abort();
437 void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
439 struct kvm_device_attr attr = {
440 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
441 .addr = (intptr_t)&irq,
442 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
445 if (!ARM_CPU(cs)->has_pmu) {
446 return;
448 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
449 error_report("failed to set irq for PMU");
450 abort();
454 void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
456 struct kvm_device_attr attr = {
457 .group = KVM_ARM_VCPU_PVTIME_CTRL,
458 .attr = KVM_ARM_VCPU_PVTIME_IPA,
459 .addr = (uint64_t)&ipa,
462 if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
463 return;
465 if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
466 error_report("failed to init PVTIME IPA");
467 abort();
471 static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
473 uint64_t ret;
474 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
475 int err;
477 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
478 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
479 if (err < 0) {
480 return -1;
482 *pret = ret;
483 return 0;
486 static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
488 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
490 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
491 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
494 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
496 /* Identify the feature bits corresponding to the host CPU, and
497 * fill out the ARMHostCPUClass fields accordingly. To do this
498 * we have to create a scratch VM, create a single CPU inside it,
499 * and then query that CPU for the relevant ID registers.
501 int fdarray[3];
502 bool sve_supported;
503 uint64_t features = 0;
504 uint64_t t;
505 int err;
507 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
508 * we know these will only support creating one kind of guest CPU,
509 * which is its preferred CPU type. Fortunately these old kernels
510 * support only a very limited number of CPUs.
512 static const uint32_t cpus_to_try[] = {
513 KVM_ARM_TARGET_AEM_V8,
514 KVM_ARM_TARGET_FOUNDATION_V8,
515 KVM_ARM_TARGET_CORTEX_A57,
516 QEMU_KVM_ARM_TARGET_NONE
519 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
520 * to use the preferred target
522 struct kvm_vcpu_init init = { .target = -1, };
524 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
525 return false;
528 ahcf->target = init.target;
529 ahcf->dtb_compatible = "arm,arm-v8";
531 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
532 ARM64_SYS_REG(3, 0, 0, 4, 0));
533 if (unlikely(err < 0)) {
535 * Before v4.15, the kernel only exposed a limited number of system
536 * registers, not including any of the interesting AArch64 ID regs.
537 * For the most part we could leave these fields as zero with minimal
538 * effect, since this does not affect the values seen by the guest.
540 * However, it could cause problems down the line for QEMU,
541 * so provide a minimal v8.0 default.
543 * ??? Could read MIDR and use knowledge from cpu64.c.
544 * ??? Could map a page of memory into our temp guest and
545 * run the tiniest of hand-crafted kernels to extract
546 * the values seen by the guest.
547 * ??? Either of these sounds like too much effort just
548 * to work around running a modern host kernel.
550 ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
551 err = 0;
552 } else {
553 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
554 ARM64_SYS_REG(3, 0, 0, 4, 1));
555 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
556 ARM64_SYS_REG(3, 0, 0, 5, 0));
557 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
558 ARM64_SYS_REG(3, 0, 0, 5, 1));
559 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
560 ARM64_SYS_REG(3, 0, 0, 6, 0));
561 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
562 ARM64_SYS_REG(3, 0, 0, 6, 1));
563 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
564 ARM64_SYS_REG(3, 0, 0, 7, 0));
565 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
566 ARM64_SYS_REG(3, 0, 0, 7, 1));
567 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
568 ARM64_SYS_REG(3, 0, 0, 7, 2));
569 err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
570 ARM64_SYS_REG(3, 3, 9, 12, 0));
573 * Note that if AArch32 support is not present in the host,
574 * the AArch32 sysregs are present to be read, but will
575 * return UNKNOWN values. This is neither better nor worse
576 * than skipping the reads and leaving 0, as we must avoid
577 * considering the values in every case.
579 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
580 ARM64_SYS_REG(3, 0, 0, 1, 0));
581 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
582 ARM64_SYS_REG(3, 0, 0, 1, 1));
583 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
584 ARM64_SYS_REG(3, 0, 0, 3, 4));
585 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
586 ARM64_SYS_REG(3, 0, 0, 1, 2));
587 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
588 ARM64_SYS_REG(3, 0, 0, 1, 4));
589 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
590 ARM64_SYS_REG(3, 0, 0, 1, 5));
591 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
592 ARM64_SYS_REG(3, 0, 0, 1, 6));
593 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
594 ARM64_SYS_REG(3, 0, 0, 1, 7));
595 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
596 ARM64_SYS_REG(3, 0, 0, 2, 0));
597 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
598 ARM64_SYS_REG(3, 0, 0, 2, 1));
599 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
600 ARM64_SYS_REG(3, 0, 0, 2, 2));
601 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
602 ARM64_SYS_REG(3, 0, 0, 2, 3));
603 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
604 ARM64_SYS_REG(3, 0, 0, 2, 4));
605 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
606 ARM64_SYS_REG(3, 0, 0, 2, 5));
607 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
608 ARM64_SYS_REG(3, 0, 0, 2, 6));
609 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
610 ARM64_SYS_REG(3, 0, 0, 2, 7));
612 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
613 ARM64_SYS_REG(3, 0, 0, 3, 0));
614 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
615 ARM64_SYS_REG(3, 0, 0, 3, 1));
616 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
617 ARM64_SYS_REG(3, 0, 0, 3, 2));
620 * DBGDIDR is a bit complicated because the kernel doesn't
621 * provide an accessor for it in 64-bit mode, which is what this
622 * scratch VM is in, and there's no architected "64-bit sysreg
623 * which reads the same as the 32-bit register" the way there is
624 * for other ID registers. Instead we synthesize a value from the
625 * AArch64 ID_AA64DFR0, the same way the kernel code in
626 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
627 * We only do this if the CPU supports AArch32 at EL1.
629 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
630 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
631 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
632 int ctx_cmps =
633 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
634 int version = 6; /* ARMv8 debug architecture */
635 bool has_el3 =
636 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
637 uint32_t dbgdidr = 0;
639 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
640 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
641 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
642 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
643 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
644 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
645 dbgdidr |= (1 << 15); /* RES1 bit */
646 ahcf->isar.dbgdidr = dbgdidr;
650 sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
652 kvm_arm_destroy_scratch_host_vcpu(fdarray);
654 if (err < 0) {
655 return false;
658 /* Add feature bits that can't appear until after VCPU init. */
659 if (sve_supported) {
660 t = ahcf->isar.id_aa64pfr0;
661 t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
662 ahcf->isar.id_aa64pfr0 = t;
666 * We can assume any KVM supporting CPU is at least a v8
667 * with VFPv4+Neon; this in turn implies most of the other
668 * feature bits.
670 features |= 1ULL << ARM_FEATURE_V8;
671 features |= 1ULL << ARM_FEATURE_NEON;
672 features |= 1ULL << ARM_FEATURE_AARCH64;
673 features |= 1ULL << ARM_FEATURE_PMU;
674 features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
676 ahcf->features = features;
678 return true;
681 void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
683 bool has_steal_time = kvm_arm_steal_time_supported();
685 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
686 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
687 cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
688 } else {
689 cpu->kvm_steal_time = ON_OFF_AUTO_ON;
691 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
692 if (!has_steal_time) {
693 error_setg(errp, "'kvm-steal-time' cannot be enabled "
694 "on this host");
695 return;
696 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
698 * DEN0057A chapter 2 says "This specification only covers
699 * systems in which the Execution state of the hypervisor
700 * as well as EL1 of virtual machines is AArch64.". And,
701 * to ensure that, the smc/hvc calls are only specified as
702 * smc64/hvc64.
704 error_setg(errp, "'kvm-steal-time' cannot be enabled "
705 "for AArch32 guests");
706 return;
711 bool kvm_arm_aarch32_supported(void)
713 return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
716 bool kvm_arm_sve_supported(void)
718 return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
721 bool kvm_arm_steal_time_supported(void)
723 return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
726 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
728 void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
730 /* Only call this function if kvm_arm_sve_supported() returns true. */
731 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
732 static bool probed;
733 uint32_t vq = 0;
734 int i, j;
736 bitmap_clear(map, 0, ARM_MAX_VQ);
739 * KVM ensures all host CPUs support the same set of vector lengths.
740 * So we only need to create the scratch VCPUs once and then cache
741 * the results.
743 if (!probed) {
744 struct kvm_vcpu_init init = {
745 .target = -1,
746 .features[0] = (1 << KVM_ARM_VCPU_SVE),
748 struct kvm_one_reg reg = {
749 .id = KVM_REG_ARM64_SVE_VLS,
750 .addr = (uint64_t)&vls[0],
752 int fdarray[3], ret;
754 probed = true;
756 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
757 error_report("failed to create scratch VCPU with SVE enabled");
758 abort();
760 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
761 kvm_arm_destroy_scratch_host_vcpu(fdarray);
762 if (ret) {
763 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
764 strerror(errno));
765 abort();
768 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
769 if (vls[i]) {
770 vq = 64 - clz64(vls[i]) + i * 64;
771 break;
774 if (vq > ARM_MAX_VQ) {
775 warn_report("KVM supports vector lengths larger than "
776 "QEMU can enable");
780 for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
781 if (!vls[i]) {
782 continue;
784 for (j = 1; j <= 64; ++j) {
785 vq = j + i * 64;
786 if (vq > ARM_MAX_VQ) {
787 return;
789 if (vls[i] & (1UL << (j - 1))) {
790 set_bit(vq - 1, map);
796 static int kvm_arm_sve_set_vls(CPUState *cs)
798 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
799 struct kvm_one_reg reg = {
800 .id = KVM_REG_ARM64_SVE_VLS,
801 .addr = (uint64_t)&vls[0],
803 ARMCPU *cpu = ARM_CPU(cs);
804 uint32_t vq;
805 int i, j;
807 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
809 for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
810 if (test_bit(vq - 1, cpu->sve_vq_map)) {
811 i = (vq - 1) / 64;
812 j = (vq - 1) % 64;
813 vls[i] |= 1UL << j;
817 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
820 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
822 int kvm_arch_init_vcpu(CPUState *cs)
824 int ret;
825 uint64_t mpidr;
826 ARMCPU *cpu = ARM_CPU(cs);
827 CPUARMState *env = &cpu->env;
829 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
830 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
831 error_report("KVM is not supported for this guest CPU type");
832 return -EINVAL;
835 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
837 /* Determine init features for this CPU */
838 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
839 if (cs->start_powered_off) {
840 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
842 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
843 cpu->psci_version = 2;
844 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
846 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
847 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
849 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
850 cpu->has_pmu = false;
852 if (cpu->has_pmu) {
853 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
854 } else {
855 env->features &= ~(1ULL << ARM_FEATURE_PMU);
857 if (cpu_isar_feature(aa64_sve, cpu)) {
858 assert(kvm_arm_sve_supported());
859 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
862 /* Do KVM_ARM_VCPU_INIT ioctl */
863 ret = kvm_arm_vcpu_init(cs);
864 if (ret) {
865 return ret;
868 if (cpu_isar_feature(aa64_sve, cpu)) {
869 ret = kvm_arm_sve_set_vls(cs);
870 if (ret) {
871 return ret;
873 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
874 if (ret) {
875 return ret;
880 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
881 * Currently KVM has its own idea about MPIDR assignment, so we
882 * override our defaults with what we get from KVM.
884 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
885 if (ret) {
886 return ret;
888 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
890 kvm_arm_init_debug(cs);
892 /* Check whether user space can specify guest syndrome value */
893 kvm_arm_init_serror_injection(cs);
895 return kvm_arm_init_cpreg_list(cpu);
898 int kvm_arch_destroy_vcpu(CPUState *cs)
900 return 0;
903 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
905 /* Return true if the regidx is a register we should synchronize
906 * via the cpreg_tuples array (ie is not a core or sve reg that
907 * we sync by hand in kvm_arch_get/put_registers())
909 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
910 case KVM_REG_ARM_CORE:
911 case KVM_REG_ARM64_SVE:
912 return false;
913 default:
914 return true;
918 typedef struct CPRegStateLevel {
919 uint64_t regidx;
920 int level;
921 } CPRegStateLevel;
923 /* All system registers not listed in the following table are assumed to be
924 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
925 * often, you must add it to this table with a state of either
926 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
928 static const CPRegStateLevel non_runtime_cpregs[] = {
929 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
932 int kvm_arm_cpreg_level(uint64_t regidx)
934 int i;
936 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
937 const CPRegStateLevel *l = &non_runtime_cpregs[i];
938 if (l->regidx == regidx) {
939 return l->level;
943 return KVM_PUT_RUNTIME_STATE;
946 /* Callers must hold the iothread mutex lock */
947 static void kvm_inject_arm_sea(CPUState *c)
949 ARMCPU *cpu = ARM_CPU(c);
950 CPUARMState *env = &cpu->env;
951 uint32_t esr;
952 bool same_el;
954 c->exception_index = EXCP_DATA_ABORT;
955 env->exception.target_el = 1;
958 * Set the DFSC to synchronous external abort and set FnV to not valid,
959 * this will tell guest the FAR_ELx is UNKNOWN for this abort.
961 same_el = arm_current_el(env) == env->exception.target_el;
962 esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
964 env->exception.syndrome = esr;
966 arm_cpu_do_interrupt(c);
969 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
970 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
972 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
973 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
975 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
976 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
978 static int kvm_arch_put_fpsimd(CPUState *cs)
980 CPUARMState *env = &ARM_CPU(cs)->env;
981 struct kvm_one_reg reg;
982 int i, ret;
984 for (i = 0; i < 32; i++) {
985 uint64_t *q = aa64_vfp_qreg(env, i);
986 #ifdef HOST_WORDS_BIGENDIAN
987 uint64_t fp_val[2] = { q[1], q[0] };
988 reg.addr = (uintptr_t)fp_val;
989 #else
990 reg.addr = (uintptr_t)q;
991 #endif
992 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
993 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
994 if (ret) {
995 return ret;
999 return 0;
1003 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1004 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1005 * code the slice index to zero for now as it's unlikely we'll need more than
1006 * one slice for quite some time.
1008 static int kvm_arch_put_sve(CPUState *cs)
1010 ARMCPU *cpu = ARM_CPU(cs);
1011 CPUARMState *env = &cpu->env;
1012 uint64_t tmp[ARM_MAX_VQ * 2];
1013 uint64_t *r;
1014 struct kvm_one_reg reg;
1015 int n, ret;
1017 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1018 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
1019 reg.addr = (uintptr_t)r;
1020 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1021 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1022 if (ret) {
1023 return ret;
1027 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1028 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
1029 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1030 reg.addr = (uintptr_t)r;
1031 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1032 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1033 if (ret) {
1034 return ret;
1038 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
1039 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1040 reg.addr = (uintptr_t)r;
1041 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1042 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1043 if (ret) {
1044 return ret;
1047 return 0;
1050 int kvm_arch_put_registers(CPUState *cs, int level)
1052 struct kvm_one_reg reg;
1053 uint64_t val;
1054 uint32_t fpr;
1055 int i, ret;
1056 unsigned int el;
1058 ARMCPU *cpu = ARM_CPU(cs);
1059 CPUARMState *env = &cpu->env;
1061 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
1062 * AArch64 registers before pushing them out to 64-bit KVM.
1064 if (!is_a64(env)) {
1065 aarch64_sync_32_to_64(env);
1068 for (i = 0; i < 31; i++) {
1069 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1070 reg.addr = (uintptr_t) &env->xregs[i];
1071 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1072 if (ret) {
1073 return ret;
1077 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1078 * QEMU side we keep the current SP in xregs[31] as well.
1080 aarch64_save_sp(env, 1);
1082 reg.id = AARCH64_CORE_REG(regs.sp);
1083 reg.addr = (uintptr_t) &env->sp_el[0];
1084 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1085 if (ret) {
1086 return ret;
1089 reg.id = AARCH64_CORE_REG(sp_el1);
1090 reg.addr = (uintptr_t) &env->sp_el[1];
1091 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1092 if (ret) {
1093 return ret;
1096 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
1097 if (is_a64(env)) {
1098 val = pstate_read(env);
1099 } else {
1100 val = cpsr_read(env);
1102 reg.id = AARCH64_CORE_REG(regs.pstate);
1103 reg.addr = (uintptr_t) &val;
1104 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1105 if (ret) {
1106 return ret;
1109 reg.id = AARCH64_CORE_REG(regs.pc);
1110 reg.addr = (uintptr_t) &env->pc;
1111 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1112 if (ret) {
1113 return ret;
1116 reg.id = AARCH64_CORE_REG(elr_el1);
1117 reg.addr = (uintptr_t) &env->elr_el[1];
1118 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1119 if (ret) {
1120 return ret;
1123 /* Saved Program State Registers
1125 * Before we restore from the banked_spsr[] array we need to
1126 * ensure that any modifications to env->spsr are correctly
1127 * reflected in the banks.
1129 el = arm_current_el(env);
1130 if (el > 0 && !is_a64(env)) {
1131 i = bank_number(env->uncached_cpsr & CPSR_M);
1132 env->banked_spsr[i] = env->spsr;
1135 /* KVM 0-4 map to QEMU banks 1-5 */
1136 for (i = 0; i < KVM_NR_SPSR; i++) {
1137 reg.id = AARCH64_CORE_REG(spsr[i]);
1138 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1139 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1140 if (ret) {
1141 return ret;
1145 if (cpu_isar_feature(aa64_sve, cpu)) {
1146 ret = kvm_arch_put_sve(cs);
1147 } else {
1148 ret = kvm_arch_put_fpsimd(cs);
1150 if (ret) {
1151 return ret;
1154 reg.addr = (uintptr_t)(&fpr);
1155 fpr = vfp_get_fpsr(env);
1156 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1157 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1158 if (ret) {
1159 return ret;
1162 reg.addr = (uintptr_t)(&fpr);
1163 fpr = vfp_get_fpcr(env);
1164 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1165 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1166 if (ret) {
1167 return ret;
1170 write_cpustate_to_list(cpu, true);
1172 if (!write_list_to_kvmstate(cpu, level)) {
1173 return -EINVAL;
1177 * Setting VCPU events should be triggered after syncing the registers
1178 * to avoid overwriting potential changes made by KVM upon calling
1179 * KVM_SET_VCPU_EVENTS ioctl
1181 ret = kvm_put_vcpu_events(cpu);
1182 if (ret) {
1183 return ret;
1186 kvm_arm_sync_mpstate_to_kvm(cpu);
1188 return ret;
1191 static int kvm_arch_get_fpsimd(CPUState *cs)
1193 CPUARMState *env = &ARM_CPU(cs)->env;
1194 struct kvm_one_reg reg;
1195 int i, ret;
1197 for (i = 0; i < 32; i++) {
1198 uint64_t *q = aa64_vfp_qreg(env, i);
1199 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1200 reg.addr = (uintptr_t)q;
1201 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1202 if (ret) {
1203 return ret;
1204 } else {
1205 #ifdef HOST_WORDS_BIGENDIAN
1206 uint64_t t;
1207 t = q[0], q[0] = q[1], q[1] = t;
1208 #endif
1212 return 0;
1216 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1217 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1218 * code the slice index to zero for now as it's unlikely we'll need more than
1219 * one slice for quite some time.
1221 static int kvm_arch_get_sve(CPUState *cs)
1223 ARMCPU *cpu = ARM_CPU(cs);
1224 CPUARMState *env = &cpu->env;
1225 struct kvm_one_reg reg;
1226 uint64_t *r;
1227 int n, ret;
1229 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1230 r = &env->vfp.zregs[n].d[0];
1231 reg.addr = (uintptr_t)r;
1232 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1233 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1234 if (ret) {
1235 return ret;
1237 sve_bswap64(r, r, cpu->sve_max_vq * 2);
1240 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1241 r = &env->vfp.pregs[n].p[0];
1242 reg.addr = (uintptr_t)r;
1243 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1244 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1245 if (ret) {
1246 return ret;
1248 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1251 r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1252 reg.addr = (uintptr_t)r;
1253 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1254 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1255 if (ret) {
1256 return ret;
1258 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1260 return 0;
1263 int kvm_arch_get_registers(CPUState *cs)
1265 struct kvm_one_reg reg;
1266 uint64_t val;
1267 unsigned int el;
1268 uint32_t fpr;
1269 int i, ret;
1271 ARMCPU *cpu = ARM_CPU(cs);
1272 CPUARMState *env = &cpu->env;
1274 for (i = 0; i < 31; i++) {
1275 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1276 reg.addr = (uintptr_t) &env->xregs[i];
1277 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1278 if (ret) {
1279 return ret;
1283 reg.id = AARCH64_CORE_REG(regs.sp);
1284 reg.addr = (uintptr_t) &env->sp_el[0];
1285 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1286 if (ret) {
1287 return ret;
1290 reg.id = AARCH64_CORE_REG(sp_el1);
1291 reg.addr = (uintptr_t) &env->sp_el[1];
1292 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1293 if (ret) {
1294 return ret;
1297 reg.id = AARCH64_CORE_REG(regs.pstate);
1298 reg.addr = (uintptr_t) &val;
1299 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1300 if (ret) {
1301 return ret;
1304 env->aarch64 = ((val & PSTATE_nRW) == 0);
1305 if (is_a64(env)) {
1306 pstate_write(env, val);
1307 } else {
1308 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1311 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1312 * QEMU side we keep the current SP in xregs[31] as well.
1314 aarch64_restore_sp(env, 1);
1316 reg.id = AARCH64_CORE_REG(regs.pc);
1317 reg.addr = (uintptr_t) &env->pc;
1318 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1319 if (ret) {
1320 return ret;
1323 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1324 * incoming AArch64 regs received from 64-bit KVM.
1325 * We must perform this after all of the registers have been acquired from
1326 * the kernel.
1328 if (!is_a64(env)) {
1329 aarch64_sync_64_to_32(env);
1332 reg.id = AARCH64_CORE_REG(elr_el1);
1333 reg.addr = (uintptr_t) &env->elr_el[1];
1334 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1335 if (ret) {
1336 return ret;
1339 /* Fetch the SPSR registers
1341 * KVM SPSRs 0-4 map to QEMU banks 1-5
1343 for (i = 0; i < KVM_NR_SPSR; i++) {
1344 reg.id = AARCH64_CORE_REG(spsr[i]);
1345 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1346 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1347 if (ret) {
1348 return ret;
1352 el = arm_current_el(env);
1353 if (el > 0 && !is_a64(env)) {
1354 i = bank_number(env->uncached_cpsr & CPSR_M);
1355 env->spsr = env->banked_spsr[i];
1358 if (cpu_isar_feature(aa64_sve, cpu)) {
1359 ret = kvm_arch_get_sve(cs);
1360 } else {
1361 ret = kvm_arch_get_fpsimd(cs);
1363 if (ret) {
1364 return ret;
1367 reg.addr = (uintptr_t)(&fpr);
1368 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1369 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1370 if (ret) {
1371 return ret;
1373 vfp_set_fpsr(env, fpr);
1375 reg.addr = (uintptr_t)(&fpr);
1376 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1377 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1378 if (ret) {
1379 return ret;
1381 vfp_set_fpcr(env, fpr);
1383 ret = kvm_get_vcpu_events(cpu);
1384 if (ret) {
1385 return ret;
1388 if (!write_kvmstate_to_list(cpu)) {
1389 return -EINVAL;
1391 /* Note that it's OK to have registers which aren't in CPUState,
1392 * so we can ignore a failure return here.
1394 write_list_to_cpustate(cpu);
1396 kvm_arm_sync_mpstate_to_qemu(cpu);
1398 /* TODO: other registers */
1399 return ret;
1402 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
1404 ram_addr_t ram_addr;
1405 hwaddr paddr;
1406 Object *obj = qdev_get_machine();
1407 VirtMachineState *vms = VIRT_MACHINE(obj);
1408 bool acpi_enabled = virt_is_acpi_enabled(vms);
1410 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
1412 if (acpi_enabled && addr &&
1413 object_property_get_bool(obj, "ras", NULL)) {
1414 ram_addr = qemu_ram_addr_from_host(addr);
1415 if (ram_addr != RAM_ADDR_INVALID &&
1416 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
1417 kvm_hwpoison_page_add(ram_addr);
1419 * If this is a BUS_MCEERR_AR, we know we have been called
1420 * synchronously from the vCPU thread, so we can easily
1421 * synchronize the state and inject an error.
1423 * TODO: we currently don't tell the guest at all about
1424 * BUS_MCEERR_AO. In that case we might either be being
1425 * called synchronously from the vCPU thread, or a bit
1426 * later from the main thread, so doing the injection of
1427 * the error would be more complicated.
1429 if (code == BUS_MCEERR_AR) {
1430 kvm_cpu_synchronize_state(c);
1431 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
1432 kvm_inject_arm_sea(c);
1433 } else {
1434 error_report("failed to record the error");
1435 abort();
1438 return;
1440 if (code == BUS_MCEERR_AO) {
1441 error_report("Hardware memory error at addr %p for memory used by "
1442 "QEMU itself instead of guest system!", addr);
1446 if (code == BUS_MCEERR_AR) {
1447 error_report("Hardware memory error!");
1448 exit(1);
1452 /* C6.6.29 BRK instruction */
1453 static const uint32_t brk_insn = 0xd4200000;
1455 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1457 if (have_guest_debug) {
1458 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1459 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1460 return -EINVAL;
1462 return 0;
1463 } else {
1464 error_report("guest debug not supported on this kernel");
1465 return -EINVAL;
1469 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1471 static uint32_t brk;
1473 if (have_guest_debug) {
1474 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1475 brk != brk_insn ||
1476 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1477 return -EINVAL;
1479 return 0;
1480 } else {
1481 error_report("guest debug not supported on this kernel");
1482 return -EINVAL;
1486 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1488 * To minimise translating between kernel and user-space the kernel
1489 * ABI just provides user-space with the full exception syndrome
1490 * register value to be decoded in QEMU.
1493 bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1495 int hsr_ec = syn_get_ec(debug_exit->hsr);
1496 ARMCPU *cpu = ARM_CPU(cs);
1497 CPUARMState *env = &cpu->env;
1499 /* Ensure PC is synchronised */
1500 kvm_cpu_synchronize_state(cs);
1502 switch (hsr_ec) {
1503 case EC_SOFTWARESTEP:
1504 if (cs->singlestep_enabled) {
1505 return true;
1506 } else {
1508 * The kernel should have suppressed the guest's ability to
1509 * single step at this point so something has gone wrong.
1511 error_report("%s: guest single-step while debugging unsupported"
1512 " (%"PRIx64", %"PRIx32")",
1513 __func__, env->pc, debug_exit->hsr);
1514 return false;
1516 break;
1517 case EC_AA64_BKPT:
1518 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1519 return true;
1521 break;
1522 case EC_BREAKPOINT:
1523 if (find_hw_breakpoint(cs, env->pc)) {
1524 return true;
1526 break;
1527 case EC_WATCHPOINT:
1529 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1530 if (wp) {
1531 cs->watchpoint_hit = wp;
1532 return true;
1534 break;
1536 default:
1537 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1538 __func__, debug_exit->hsr, env->pc);
1541 /* If we are not handling the debug exception it must belong to
1542 * the guest. Let's re-use the existing TCG interrupt code to set
1543 * everything up properly.
1545 cs->exception_index = EXCP_BKPT;
1546 env->exception.syndrome = debug_exit->hsr;
1547 env->exception.vaddress = debug_exit->far;
1548 env->exception.target_el = 1;
1549 qemu_mutex_lock_iothread();
1550 arm_cpu_do_interrupt(cs);
1551 qemu_mutex_unlock_iothread();
1553 return false;
1556 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1557 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1560 * ESR_EL1
1561 * ISS encoding
1562 * AARCH64: DFSC, bits [5:0]
1563 * AARCH32:
1564 * TTBCR.EAE == 0
1565 * FS[4] - DFSR[10]
1566 * FS[3:0] - DFSR[3:0]
1567 * TTBCR.EAE == 1
1568 * FS, bits [5:0]
1570 #define ESR_DFSC(aarch64, lpae, v) \
1571 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1572 : (((v) >> 6) | ((v) & 0x1F)))
1574 #define ESR_DFSC_EXTABT(aarch64, lpae) \
1575 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1577 bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
1579 uint64_t dfsr_val;
1581 if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
1582 ARMCPU *cpu = ARM_CPU(cs);
1583 CPUARMState *env = &cpu->env;
1584 int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
1585 int lpae = 0;
1587 if (!aarch64_mode) {
1588 uint64_t ttbcr;
1590 if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
1591 lpae = arm_feature(env, ARM_FEATURE_LPAE)
1592 && (ttbcr & TTBCR_EAE);
1596 * The verification here is based on the DFSC bits
1597 * of the ESR_EL1 reg only
1599 return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
1600 ESR_DFSC_EXTABT(aarch64_mode, lpae));
1602 return false;