MAINTAINERS: Add myself as streams maintainer
[qemu/ar7.git] / target / arm / kvm64.c
blobcd8ab6b8aeda36190f4aac876b9dc3a95d5a2410
1 /*
2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qemu-common.h"
20 #include "cpu.h"
21 #include "qemu/timer.h"
22 #include "qemu/error-report.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/runstate.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/kvm_int.h"
29 #include "kvm_arm.h"
30 #include "internals.h"
32 static bool have_guest_debug;
35 * Although the ARM implementation of hardware assisted debugging
36 * allows for different breakpoints per-core, the current GDB
37 * interface treats them as a global pool of registers (which seems to
38 * be the case for x86, ppc and s390). As a result we store one copy
39 * of registers which is used for all active cores.
41 * Write access is serialised by virtue of the GDB protocol which
42 * updates things. Read access (i.e. when the values are copied to the
43 * vCPU) is also gated by GDB's run control.
45 * This is not unreasonable as most of the time debugging kernels you
46 * never know which core will eventually execute your function.
49 typedef struct {
50 uint64_t bcr;
51 uint64_t bvr;
52 } HWBreakpoint;
54 /* The watchpoint registers can cover more area than the requested
55 * watchpoint so we need to store the additional information
56 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
57 * when the watchpoint is hit.
59 typedef struct {
60 uint64_t wcr;
61 uint64_t wvr;
62 CPUWatchpoint details;
63 } HWWatchpoint;
65 /* Maximum and current break/watch point counts */
66 int max_hw_bps, max_hw_wps;
67 GArray *hw_breakpoints, *hw_watchpoints;
69 #define cur_hw_wps (hw_watchpoints->len)
70 #define cur_hw_bps (hw_breakpoints->len)
71 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
74 /**
75 * kvm_arm_init_debug() - check for guest debug capabilities
76 * @cs: CPUState
78 * kvm_check_extension returns the number of debug registers we have
79 * or 0 if we have none.
82 static void kvm_arm_init_debug(CPUState *cs)
84 have_guest_debug = kvm_check_extension(cs->kvm_state,
85 KVM_CAP_SET_GUEST_DEBUG);
87 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
88 hw_watchpoints = g_array_sized_new(true, true,
89 sizeof(HWWatchpoint), max_hw_wps);
91 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
92 hw_breakpoints = g_array_sized_new(true, true,
93 sizeof(HWBreakpoint), max_hw_bps);
94 return;
97 /**
98 * insert_hw_breakpoint()
99 * @addr: address of breakpoint
101 * See ARM ARM D2.9.1 for details but here we are only going to create
102 * simple un-linked breakpoints (i.e. we don't chain breakpoints
103 * together to match address and context or vmid). The hardware is
104 * capable of fancier matching but that will require exposing that
105 * fanciness to GDB's interface
107 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
109 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
110 * +------+------+-------+-----+----+------+-----+------+-----+---+
111 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
112 * +------+------+-------+-----+----+------+-----+------+-----+---+
114 * BT: Breakpoint type (0 = unlinked address match)
115 * LBN: Linked BP number (0 = unused)
116 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
117 * BAS: Byte Address Select (RES1 for AArch64)
118 * E: Enable bit
120 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
122 * 63 53 52 49 48 2 1 0
123 * +------+-----------+----------+-----+
124 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
125 * +------+-----------+----------+-----+
127 * Depending on the addressing mode bits the top bits of the register
128 * are a sign extension of the highest applicable VA bit. Some
129 * versions of GDB don't do it correctly so we ensure they are correct
130 * here so future PC comparisons will work properly.
133 static int insert_hw_breakpoint(target_ulong addr)
135 HWBreakpoint brk = {
136 .bcr = 0x1, /* BCR E=1, enable */
137 .bvr = sextract64(addr, 0, 53)
140 if (cur_hw_bps >= max_hw_bps) {
141 return -ENOBUFS;
144 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */
145 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */
147 g_array_append_val(hw_breakpoints, brk);
149 return 0;
153 * delete_hw_breakpoint()
154 * @pc: address of breakpoint
156 * Delete a breakpoint and shuffle any above down
159 static int delete_hw_breakpoint(target_ulong pc)
161 int i;
162 for (i = 0; i < hw_breakpoints->len; i++) {
163 HWBreakpoint *brk = get_hw_bp(i);
164 if (brk->bvr == pc) {
165 g_array_remove_index(hw_breakpoints, i);
166 return 0;
169 return -ENOENT;
173 * insert_hw_watchpoint()
174 * @addr: address of watch point
175 * @len: size of area
176 * @type: type of watch point
178 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
179 * stuff if we want to. The watch points can be linked with the break
180 * points above to make them context aware. However for simplicity
181 * currently we only deal with simple read/write watch points.
183 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
185 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
186 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
187 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
188 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
190 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
191 * WT: 0 - unlinked, 1 - linked (not currently used)
192 * LBN: Linked BP number (not currently used)
193 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
194 * BAS: Byte Address Select
195 * LSC: Load/Store control (01: load, 10: store, 11: both)
196 * E: Enable
198 * The bottom 2 bits of the value register are masked. Therefore to
199 * break on any sizes smaller than an unaligned word you need to set
200 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
201 * need to ensure you mask the address as required and set BAS=0xff
204 static int insert_hw_watchpoint(target_ulong addr,
205 target_ulong len, int type)
207 HWWatchpoint wp = {
208 .wcr = 1, /* E=1, enable */
209 .wvr = addr & (~0x7ULL),
210 .details = { .vaddr = addr, .len = len }
213 if (cur_hw_wps >= max_hw_wps) {
214 return -ENOBUFS;
218 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
219 * valid whether EL3 is implemented or not
221 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
223 switch (type) {
224 case GDB_WATCHPOINT_READ:
225 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
226 wp.details.flags = BP_MEM_READ;
227 break;
228 case GDB_WATCHPOINT_WRITE:
229 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
230 wp.details.flags = BP_MEM_WRITE;
231 break;
232 case GDB_WATCHPOINT_ACCESS:
233 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
234 wp.details.flags = BP_MEM_ACCESS;
235 break;
236 default:
237 g_assert_not_reached();
238 break;
240 if (len <= 8) {
241 /* we align the address and set the bits in BAS */
242 int off = addr & 0x7;
243 int bas = (1 << len) - 1;
245 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
246 } else {
247 /* For ranges above 8 bytes we need to be a power of 2 */
248 if (is_power_of_2(len)) {
249 int bits = ctz64(len);
251 wp.wvr &= ~((1 << bits) - 1);
252 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
253 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
254 } else {
255 return -ENOBUFS;
259 g_array_append_val(hw_watchpoints, wp);
260 return 0;
264 static bool check_watchpoint_in_range(int i, target_ulong addr)
266 HWWatchpoint *wp = get_hw_wp(i);
267 uint64_t addr_top, addr_bottom = wp->wvr;
268 int bas = extract32(wp->wcr, 5, 8);
269 int mask = extract32(wp->wcr, 24, 4);
271 if (mask) {
272 addr_top = addr_bottom + (1 << mask);
273 } else {
274 /* BAS must be contiguous but can offset against the base
275 * address in DBGWVR */
276 addr_bottom = addr_bottom + ctz32(bas);
277 addr_top = addr_bottom + clo32(bas);
280 if (addr >= addr_bottom && addr <= addr_top) {
281 return true;
284 return false;
288 * delete_hw_watchpoint()
289 * @addr: address of breakpoint
291 * Delete a breakpoint and shuffle any above down
294 static int delete_hw_watchpoint(target_ulong addr,
295 target_ulong len, int type)
297 int i;
298 for (i = 0; i < cur_hw_wps; i++) {
299 if (check_watchpoint_in_range(i, addr)) {
300 g_array_remove_index(hw_watchpoints, i);
301 return 0;
304 return -ENOENT;
308 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
309 target_ulong len, int type)
311 switch (type) {
312 case GDB_BREAKPOINT_HW:
313 return insert_hw_breakpoint(addr);
314 break;
315 case GDB_WATCHPOINT_READ:
316 case GDB_WATCHPOINT_WRITE:
317 case GDB_WATCHPOINT_ACCESS:
318 return insert_hw_watchpoint(addr, len, type);
319 default:
320 return -ENOSYS;
324 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
325 target_ulong len, int type)
327 switch (type) {
328 case GDB_BREAKPOINT_HW:
329 return delete_hw_breakpoint(addr);
330 break;
331 case GDB_WATCHPOINT_READ:
332 case GDB_WATCHPOINT_WRITE:
333 case GDB_WATCHPOINT_ACCESS:
334 return delete_hw_watchpoint(addr, len, type);
335 default:
336 return -ENOSYS;
341 void kvm_arch_remove_all_hw_breakpoints(void)
343 if (cur_hw_wps > 0) {
344 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
346 if (cur_hw_bps > 0) {
347 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
351 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
353 int i;
354 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
356 for (i = 0; i < max_hw_wps; i++) {
357 HWWatchpoint *wp = get_hw_wp(i);
358 ptr->dbg_wcr[i] = wp->wcr;
359 ptr->dbg_wvr[i] = wp->wvr;
361 for (i = 0; i < max_hw_bps; i++) {
362 HWBreakpoint *bp = get_hw_bp(i);
363 ptr->dbg_bcr[i] = bp->bcr;
364 ptr->dbg_bvr[i] = bp->bvr;
368 bool kvm_arm_hw_debug_active(CPUState *cs)
370 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
373 static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
375 int i;
377 for (i = 0; i < cur_hw_bps; i++) {
378 HWBreakpoint *bp = get_hw_bp(i);
379 if (bp->bvr == pc) {
380 return true;
383 return false;
386 static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
388 int i;
390 for (i = 0; i < cur_hw_wps; i++) {
391 if (check_watchpoint_in_range(i, addr)) {
392 return &get_hw_wp(i)->details;
395 return NULL;
398 static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
400 int err;
402 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
403 if (err != 0) {
404 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
405 return false;
408 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
409 if (err != 0) {
410 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
411 return false;
414 return true;
417 void kvm_arm_pmu_init(CPUState *cs)
419 struct kvm_device_attr attr = {
420 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
421 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
424 if (!ARM_CPU(cs)->has_pmu) {
425 return;
427 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
428 error_report("failed to init PMU");
429 abort();
433 void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
435 struct kvm_device_attr attr = {
436 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
437 .addr = (intptr_t)&irq,
438 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
441 if (!ARM_CPU(cs)->has_pmu) {
442 return;
444 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
445 error_report("failed to set irq for PMU");
446 abort();
450 static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
452 uint64_t ret;
453 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
454 int err;
456 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
457 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
458 if (err < 0) {
459 return -1;
461 *pret = ret;
462 return 0;
465 static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
467 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
469 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
470 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
473 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
475 /* Identify the feature bits corresponding to the host CPU, and
476 * fill out the ARMHostCPUClass fields accordingly. To do this
477 * we have to create a scratch VM, create a single CPU inside it,
478 * and then query that CPU for the relevant ID registers.
480 int fdarray[3];
481 bool sve_supported;
482 uint64_t features = 0;
483 uint64_t t;
484 int err;
486 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
487 * we know these will only support creating one kind of guest CPU,
488 * which is its preferred CPU type. Fortunately these old kernels
489 * support only a very limited number of CPUs.
491 static const uint32_t cpus_to_try[] = {
492 KVM_ARM_TARGET_AEM_V8,
493 KVM_ARM_TARGET_FOUNDATION_V8,
494 KVM_ARM_TARGET_CORTEX_A57,
495 QEMU_KVM_ARM_TARGET_NONE
498 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
499 * to use the preferred target
501 struct kvm_vcpu_init init = { .target = -1, };
503 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
504 return false;
507 ahcf->target = init.target;
508 ahcf->dtb_compatible = "arm,arm-v8";
510 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
511 ARM64_SYS_REG(3, 0, 0, 4, 0));
512 if (unlikely(err < 0)) {
514 * Before v4.15, the kernel only exposed a limited number of system
515 * registers, not including any of the interesting AArch64 ID regs.
516 * For the most part we could leave these fields as zero with minimal
517 * effect, since this does not affect the values seen by the guest.
519 * However, it could cause problems down the line for QEMU,
520 * so provide a minimal v8.0 default.
522 * ??? Could read MIDR and use knowledge from cpu64.c.
523 * ??? Could map a page of memory into our temp guest and
524 * run the tiniest of hand-crafted kernels to extract
525 * the values seen by the guest.
526 * ??? Either of these sounds like too much effort just
527 * to work around running a modern host kernel.
529 ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
530 err = 0;
531 } else {
532 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
533 ARM64_SYS_REG(3, 0, 0, 4, 1));
534 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
535 ARM64_SYS_REG(3, 0, 0, 5, 0));
536 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
537 ARM64_SYS_REG(3, 0, 0, 5, 1));
538 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
539 ARM64_SYS_REG(3, 0, 0, 6, 0));
540 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
541 ARM64_SYS_REG(3, 0, 0, 6, 1));
542 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
543 ARM64_SYS_REG(3, 0, 0, 7, 0));
544 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
545 ARM64_SYS_REG(3, 0, 0, 7, 1));
546 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
547 ARM64_SYS_REG(3, 0, 0, 7, 2));
550 * Note that if AArch32 support is not present in the host,
551 * the AArch32 sysregs are present to be read, but will
552 * return UNKNOWN values. This is neither better nor worse
553 * than skipping the reads and leaving 0, as we must avoid
554 * considering the values in every case.
556 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
557 ARM64_SYS_REG(3, 0, 0, 1, 2));
558 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
559 ARM64_SYS_REG(3, 0, 0, 1, 4));
560 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
561 ARM64_SYS_REG(3, 0, 0, 1, 5));
562 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
563 ARM64_SYS_REG(3, 0, 0, 1, 6));
564 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
565 ARM64_SYS_REG(3, 0, 0, 1, 7));
566 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
567 ARM64_SYS_REG(3, 0, 0, 2, 0));
568 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
569 ARM64_SYS_REG(3, 0, 0, 2, 1));
570 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
571 ARM64_SYS_REG(3, 0, 0, 2, 2));
572 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
573 ARM64_SYS_REG(3, 0, 0, 2, 3));
574 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
575 ARM64_SYS_REG(3, 0, 0, 2, 4));
576 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
577 ARM64_SYS_REG(3, 0, 0, 2, 5));
578 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
579 ARM64_SYS_REG(3, 0, 0, 2, 6));
580 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
581 ARM64_SYS_REG(3, 0, 0, 2, 7));
583 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
584 ARM64_SYS_REG(3, 0, 0, 3, 0));
585 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
586 ARM64_SYS_REG(3, 0, 0, 3, 1));
587 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
588 ARM64_SYS_REG(3, 0, 0, 3, 2));
591 * DBGDIDR is a bit complicated because the kernel doesn't
592 * provide an accessor for it in 64-bit mode, which is what this
593 * scratch VM is in, and there's no architected "64-bit sysreg
594 * which reads the same as the 32-bit register" the way there is
595 * for other ID registers. Instead we synthesize a value from the
596 * AArch64 ID_AA64DFR0, the same way the kernel code in
597 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
598 * We only do this if the CPU supports AArch32 at EL1.
600 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
601 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
602 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
603 int ctx_cmps =
604 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
605 int version = 6; /* ARMv8 debug architecture */
606 bool has_el3 =
607 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
608 uint32_t dbgdidr = 0;
610 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
611 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
612 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
613 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
614 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
615 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
616 dbgdidr |= (1 << 15); /* RES1 bit */
617 ahcf->isar.dbgdidr = dbgdidr;
621 sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
623 kvm_arm_destroy_scratch_host_vcpu(fdarray);
625 if (err < 0) {
626 return false;
629 /* Add feature bits that can't appear until after VCPU init. */
630 if (sve_supported) {
631 t = ahcf->isar.id_aa64pfr0;
632 t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
633 ahcf->isar.id_aa64pfr0 = t;
637 * We can assume any KVM supporting CPU is at least a v8
638 * with VFPv4+Neon; this in turn implies most of the other
639 * feature bits.
641 features |= 1ULL << ARM_FEATURE_V8;
642 features |= 1ULL << ARM_FEATURE_NEON;
643 features |= 1ULL << ARM_FEATURE_AARCH64;
644 features |= 1ULL << ARM_FEATURE_PMU;
645 features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
647 ahcf->features = features;
649 return true;
652 bool kvm_arm_aarch32_supported(CPUState *cpu)
654 KVMState *s = KVM_STATE(current_accel());
656 return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
659 bool kvm_arm_sve_supported(CPUState *cpu)
661 KVMState *s = KVM_STATE(current_accel());
663 return kvm_check_extension(s, KVM_CAP_ARM_SVE);
666 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
668 void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
670 /* Only call this function if kvm_arm_sve_supported() returns true. */
671 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
672 static bool probed;
673 uint32_t vq = 0;
674 int i, j;
676 bitmap_clear(map, 0, ARM_MAX_VQ);
679 * KVM ensures all host CPUs support the same set of vector lengths.
680 * So we only need to create the scratch VCPUs once and then cache
681 * the results.
683 if (!probed) {
684 struct kvm_vcpu_init init = {
685 .target = -1,
686 .features[0] = (1 << KVM_ARM_VCPU_SVE),
688 struct kvm_one_reg reg = {
689 .id = KVM_REG_ARM64_SVE_VLS,
690 .addr = (uint64_t)&vls[0],
692 int fdarray[3], ret;
694 probed = true;
696 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
697 error_report("failed to create scratch VCPU with SVE enabled");
698 abort();
700 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
701 kvm_arm_destroy_scratch_host_vcpu(fdarray);
702 if (ret) {
703 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
704 strerror(errno));
705 abort();
708 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
709 if (vls[i]) {
710 vq = 64 - clz64(vls[i]) + i * 64;
711 break;
714 if (vq > ARM_MAX_VQ) {
715 warn_report("KVM supports vector lengths larger than "
716 "QEMU can enable");
720 for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
721 if (!vls[i]) {
722 continue;
724 for (j = 1; j <= 64; ++j) {
725 vq = j + i * 64;
726 if (vq > ARM_MAX_VQ) {
727 return;
729 if (vls[i] & (1UL << (j - 1))) {
730 set_bit(vq - 1, map);
736 static int kvm_arm_sve_set_vls(CPUState *cs)
738 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
739 struct kvm_one_reg reg = {
740 .id = KVM_REG_ARM64_SVE_VLS,
741 .addr = (uint64_t)&vls[0],
743 ARMCPU *cpu = ARM_CPU(cs);
744 uint32_t vq;
745 int i, j;
747 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
749 for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
750 if (test_bit(vq - 1, cpu->sve_vq_map)) {
751 i = (vq - 1) / 64;
752 j = (vq - 1) % 64;
753 vls[i] |= 1UL << j;
757 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
760 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
762 int kvm_arch_init_vcpu(CPUState *cs)
764 int ret;
765 uint64_t mpidr;
766 ARMCPU *cpu = ARM_CPU(cs);
767 CPUARMState *env = &cpu->env;
769 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
770 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
771 error_report("KVM is not supported for this guest CPU type");
772 return -EINVAL;
775 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
777 /* Determine init features for this CPU */
778 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
779 if (cpu->start_powered_off) {
780 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
782 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
783 cpu->psci_version = 2;
784 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
786 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
787 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
789 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
790 cpu->has_pmu = false;
792 if (cpu->has_pmu) {
793 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
794 } else {
795 env->features &= ~(1ULL << ARM_FEATURE_PMU);
797 if (cpu_isar_feature(aa64_sve, cpu)) {
798 assert(kvm_arm_sve_supported(cs));
799 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
802 /* Do KVM_ARM_VCPU_INIT ioctl */
803 ret = kvm_arm_vcpu_init(cs);
804 if (ret) {
805 return ret;
808 if (cpu_isar_feature(aa64_sve, cpu)) {
809 ret = kvm_arm_sve_set_vls(cs);
810 if (ret) {
811 return ret;
813 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
814 if (ret) {
815 return ret;
820 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
821 * Currently KVM has its own idea about MPIDR assignment, so we
822 * override our defaults with what we get from KVM.
824 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
825 if (ret) {
826 return ret;
828 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
830 kvm_arm_init_debug(cs);
832 /* Check whether user space can specify guest syndrome value */
833 kvm_arm_init_serror_injection(cs);
835 return kvm_arm_init_cpreg_list(cpu);
838 int kvm_arch_destroy_vcpu(CPUState *cs)
840 return 0;
843 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
845 /* Return true if the regidx is a register we should synchronize
846 * via the cpreg_tuples array (ie is not a core or sve reg that
847 * we sync by hand in kvm_arch_get/put_registers())
849 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
850 case KVM_REG_ARM_CORE:
851 case KVM_REG_ARM64_SVE:
852 return false;
853 default:
854 return true;
858 typedef struct CPRegStateLevel {
859 uint64_t regidx;
860 int level;
861 } CPRegStateLevel;
863 /* All system registers not listed in the following table are assumed to be
864 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
865 * often, you must add it to this table with a state of either
866 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
868 static const CPRegStateLevel non_runtime_cpregs[] = {
869 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
872 int kvm_arm_cpreg_level(uint64_t regidx)
874 int i;
876 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
877 const CPRegStateLevel *l = &non_runtime_cpregs[i];
878 if (l->regidx == regidx) {
879 return l->level;
883 return KVM_PUT_RUNTIME_STATE;
886 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
887 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
889 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
890 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
892 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
893 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
895 static int kvm_arch_put_fpsimd(CPUState *cs)
897 CPUARMState *env = &ARM_CPU(cs)->env;
898 struct kvm_one_reg reg;
899 int i, ret;
901 for (i = 0; i < 32; i++) {
902 uint64_t *q = aa64_vfp_qreg(env, i);
903 #ifdef HOST_WORDS_BIGENDIAN
904 uint64_t fp_val[2] = { q[1], q[0] };
905 reg.addr = (uintptr_t)fp_val;
906 #else
907 reg.addr = (uintptr_t)q;
908 #endif
909 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
910 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
911 if (ret) {
912 return ret;
916 return 0;
920 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
921 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
922 * code the slice index to zero for now as it's unlikely we'll need more than
923 * one slice for quite some time.
925 static int kvm_arch_put_sve(CPUState *cs)
927 ARMCPU *cpu = ARM_CPU(cs);
928 CPUARMState *env = &cpu->env;
929 uint64_t tmp[ARM_MAX_VQ * 2];
930 uint64_t *r;
931 struct kvm_one_reg reg;
932 int n, ret;
934 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
935 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
936 reg.addr = (uintptr_t)r;
937 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
938 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
939 if (ret) {
940 return ret;
944 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
945 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
946 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
947 reg.addr = (uintptr_t)r;
948 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
949 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
950 if (ret) {
951 return ret;
955 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
956 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
957 reg.addr = (uintptr_t)r;
958 reg.id = KVM_REG_ARM64_SVE_FFR(0);
959 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
960 if (ret) {
961 return ret;
964 return 0;
967 int kvm_arch_put_registers(CPUState *cs, int level)
969 struct kvm_one_reg reg;
970 uint64_t val;
971 uint32_t fpr;
972 int i, ret;
973 unsigned int el;
975 ARMCPU *cpu = ARM_CPU(cs);
976 CPUARMState *env = &cpu->env;
978 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
979 * AArch64 registers before pushing them out to 64-bit KVM.
981 if (!is_a64(env)) {
982 aarch64_sync_32_to_64(env);
985 for (i = 0; i < 31; i++) {
986 reg.id = AARCH64_CORE_REG(regs.regs[i]);
987 reg.addr = (uintptr_t) &env->xregs[i];
988 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
989 if (ret) {
990 return ret;
994 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
995 * QEMU side we keep the current SP in xregs[31] as well.
997 aarch64_save_sp(env, 1);
999 reg.id = AARCH64_CORE_REG(regs.sp);
1000 reg.addr = (uintptr_t) &env->sp_el[0];
1001 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1002 if (ret) {
1003 return ret;
1006 reg.id = AARCH64_CORE_REG(sp_el1);
1007 reg.addr = (uintptr_t) &env->sp_el[1];
1008 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1009 if (ret) {
1010 return ret;
1013 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
1014 if (is_a64(env)) {
1015 val = pstate_read(env);
1016 } else {
1017 val = cpsr_read(env);
1019 reg.id = AARCH64_CORE_REG(regs.pstate);
1020 reg.addr = (uintptr_t) &val;
1021 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1022 if (ret) {
1023 return ret;
1026 reg.id = AARCH64_CORE_REG(regs.pc);
1027 reg.addr = (uintptr_t) &env->pc;
1028 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1029 if (ret) {
1030 return ret;
1033 reg.id = AARCH64_CORE_REG(elr_el1);
1034 reg.addr = (uintptr_t) &env->elr_el[1];
1035 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1036 if (ret) {
1037 return ret;
1040 /* Saved Program State Registers
1042 * Before we restore from the banked_spsr[] array we need to
1043 * ensure that any modifications to env->spsr are correctly
1044 * reflected in the banks.
1046 el = arm_current_el(env);
1047 if (el > 0 && !is_a64(env)) {
1048 i = bank_number(env->uncached_cpsr & CPSR_M);
1049 env->banked_spsr[i] = env->spsr;
1052 /* KVM 0-4 map to QEMU banks 1-5 */
1053 for (i = 0; i < KVM_NR_SPSR; i++) {
1054 reg.id = AARCH64_CORE_REG(spsr[i]);
1055 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1056 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1057 if (ret) {
1058 return ret;
1062 if (cpu_isar_feature(aa64_sve, cpu)) {
1063 ret = kvm_arch_put_sve(cs);
1064 } else {
1065 ret = kvm_arch_put_fpsimd(cs);
1067 if (ret) {
1068 return ret;
1071 reg.addr = (uintptr_t)(&fpr);
1072 fpr = vfp_get_fpsr(env);
1073 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1074 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1075 if (ret) {
1076 return ret;
1079 reg.addr = (uintptr_t)(&fpr);
1080 fpr = vfp_get_fpcr(env);
1081 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1082 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1083 if (ret) {
1084 return ret;
1087 write_cpustate_to_list(cpu, true);
1089 if (!write_list_to_kvmstate(cpu, level)) {
1090 return -EINVAL;
1094 * Setting VCPU events should be triggered after syncing the registers
1095 * to avoid overwriting potential changes made by KVM upon calling
1096 * KVM_SET_VCPU_EVENTS ioctl
1098 ret = kvm_put_vcpu_events(cpu);
1099 if (ret) {
1100 return ret;
1103 kvm_arm_sync_mpstate_to_kvm(cpu);
1105 return ret;
1108 static int kvm_arch_get_fpsimd(CPUState *cs)
1110 CPUARMState *env = &ARM_CPU(cs)->env;
1111 struct kvm_one_reg reg;
1112 int i, ret;
1114 for (i = 0; i < 32; i++) {
1115 uint64_t *q = aa64_vfp_qreg(env, i);
1116 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1117 reg.addr = (uintptr_t)q;
1118 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1119 if (ret) {
1120 return ret;
1121 } else {
1122 #ifdef HOST_WORDS_BIGENDIAN
1123 uint64_t t;
1124 t = q[0], q[0] = q[1], q[1] = t;
1125 #endif
1129 return 0;
1133 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1134 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1135 * code the slice index to zero for now as it's unlikely we'll need more than
1136 * one slice for quite some time.
1138 static int kvm_arch_get_sve(CPUState *cs)
1140 ARMCPU *cpu = ARM_CPU(cs);
1141 CPUARMState *env = &cpu->env;
1142 struct kvm_one_reg reg;
1143 uint64_t *r;
1144 int n, ret;
1146 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1147 r = &env->vfp.zregs[n].d[0];
1148 reg.addr = (uintptr_t)r;
1149 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1150 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1151 if (ret) {
1152 return ret;
1154 sve_bswap64(r, r, cpu->sve_max_vq * 2);
1157 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1158 r = &env->vfp.pregs[n].p[0];
1159 reg.addr = (uintptr_t)r;
1160 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1161 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1162 if (ret) {
1163 return ret;
1165 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1168 r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1169 reg.addr = (uintptr_t)r;
1170 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1171 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1172 if (ret) {
1173 return ret;
1175 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1177 return 0;
1180 int kvm_arch_get_registers(CPUState *cs)
1182 struct kvm_one_reg reg;
1183 uint64_t val;
1184 unsigned int el;
1185 uint32_t fpr;
1186 int i, ret;
1188 ARMCPU *cpu = ARM_CPU(cs);
1189 CPUARMState *env = &cpu->env;
1191 for (i = 0; i < 31; i++) {
1192 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1193 reg.addr = (uintptr_t) &env->xregs[i];
1194 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1195 if (ret) {
1196 return ret;
1200 reg.id = AARCH64_CORE_REG(regs.sp);
1201 reg.addr = (uintptr_t) &env->sp_el[0];
1202 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1203 if (ret) {
1204 return ret;
1207 reg.id = AARCH64_CORE_REG(sp_el1);
1208 reg.addr = (uintptr_t) &env->sp_el[1];
1209 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1210 if (ret) {
1211 return ret;
1214 reg.id = AARCH64_CORE_REG(regs.pstate);
1215 reg.addr = (uintptr_t) &val;
1216 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1217 if (ret) {
1218 return ret;
1221 env->aarch64 = ((val & PSTATE_nRW) == 0);
1222 if (is_a64(env)) {
1223 pstate_write(env, val);
1224 } else {
1225 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1228 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1229 * QEMU side we keep the current SP in xregs[31] as well.
1231 aarch64_restore_sp(env, 1);
1233 reg.id = AARCH64_CORE_REG(regs.pc);
1234 reg.addr = (uintptr_t) &env->pc;
1235 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1236 if (ret) {
1237 return ret;
1240 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1241 * incoming AArch64 regs received from 64-bit KVM.
1242 * We must perform this after all of the registers have been acquired from
1243 * the kernel.
1245 if (!is_a64(env)) {
1246 aarch64_sync_64_to_32(env);
1249 reg.id = AARCH64_CORE_REG(elr_el1);
1250 reg.addr = (uintptr_t) &env->elr_el[1];
1251 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1252 if (ret) {
1253 return ret;
1256 /* Fetch the SPSR registers
1258 * KVM SPSRs 0-4 map to QEMU banks 1-5
1260 for (i = 0; i < KVM_NR_SPSR; i++) {
1261 reg.id = AARCH64_CORE_REG(spsr[i]);
1262 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1263 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1264 if (ret) {
1265 return ret;
1269 el = arm_current_el(env);
1270 if (el > 0 && !is_a64(env)) {
1271 i = bank_number(env->uncached_cpsr & CPSR_M);
1272 env->spsr = env->banked_spsr[i];
1275 if (cpu_isar_feature(aa64_sve, cpu)) {
1276 ret = kvm_arch_get_sve(cs);
1277 } else {
1278 ret = kvm_arch_get_fpsimd(cs);
1280 if (ret) {
1281 return ret;
1284 reg.addr = (uintptr_t)(&fpr);
1285 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1286 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1287 if (ret) {
1288 return ret;
1290 vfp_set_fpsr(env, fpr);
1292 reg.addr = (uintptr_t)(&fpr);
1293 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1294 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1295 if (ret) {
1296 return ret;
1298 vfp_set_fpcr(env, fpr);
1300 ret = kvm_get_vcpu_events(cpu);
1301 if (ret) {
1302 return ret;
1305 if (!write_kvmstate_to_list(cpu)) {
1306 return -EINVAL;
1308 /* Note that it's OK to have registers which aren't in CPUState,
1309 * so we can ignore a failure return here.
1311 write_list_to_cpustate(cpu);
1313 kvm_arm_sync_mpstate_to_qemu(cpu);
1315 /* TODO: other registers */
1316 return ret;
1319 /* C6.6.29 BRK instruction */
1320 static const uint32_t brk_insn = 0xd4200000;
1322 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1324 if (have_guest_debug) {
1325 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1326 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1327 return -EINVAL;
1329 return 0;
1330 } else {
1331 error_report("guest debug not supported on this kernel");
1332 return -EINVAL;
1336 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1338 static uint32_t brk;
1340 if (have_guest_debug) {
1341 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1342 brk != brk_insn ||
1343 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1344 return -EINVAL;
1346 return 0;
1347 } else {
1348 error_report("guest debug not supported on this kernel");
1349 return -EINVAL;
1353 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1355 * To minimise translating between kernel and user-space the kernel
1356 * ABI just provides user-space with the full exception syndrome
1357 * register value to be decoded in QEMU.
1360 bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1362 int hsr_ec = syn_get_ec(debug_exit->hsr);
1363 ARMCPU *cpu = ARM_CPU(cs);
1364 CPUClass *cc = CPU_GET_CLASS(cs);
1365 CPUARMState *env = &cpu->env;
1367 /* Ensure PC is synchronised */
1368 kvm_cpu_synchronize_state(cs);
1370 switch (hsr_ec) {
1371 case EC_SOFTWARESTEP:
1372 if (cs->singlestep_enabled) {
1373 return true;
1374 } else {
1376 * The kernel should have suppressed the guest's ability to
1377 * single step at this point so something has gone wrong.
1379 error_report("%s: guest single-step while debugging unsupported"
1380 " (%"PRIx64", %"PRIx32")",
1381 __func__, env->pc, debug_exit->hsr);
1382 return false;
1384 break;
1385 case EC_AA64_BKPT:
1386 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1387 return true;
1389 break;
1390 case EC_BREAKPOINT:
1391 if (find_hw_breakpoint(cs, env->pc)) {
1392 return true;
1394 break;
1395 case EC_WATCHPOINT:
1397 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1398 if (wp) {
1399 cs->watchpoint_hit = wp;
1400 return true;
1402 break;
1404 default:
1405 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1406 __func__, debug_exit->hsr, env->pc);
1409 /* If we are not handling the debug exception it must belong to
1410 * the guest. Let's re-use the existing TCG interrupt code to set
1411 * everything up properly.
1413 cs->exception_index = EXCP_BKPT;
1414 env->exception.syndrome = debug_exit->hsr;
1415 env->exception.vaddress = debug_exit->far;
1416 env->exception.target_el = 1;
1417 qemu_mutex_lock_iothread();
1418 cc->do_interrupt(cs);
1419 qemu_mutex_unlock_iothread();
1421 return false;