2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qemu-common.h"
21 #include "qemu/timer.h"
22 #include "qemu/error-report.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/runstate.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/kvm_int.h"
30 #include "internals.h"
32 static bool have_guest_debug
;
35 * Although the ARM implementation of hardware assisted debugging
36 * allows for different breakpoints per-core, the current GDB
37 * interface treats them as a global pool of registers (which seems to
38 * be the case for x86, ppc and s390). As a result we store one copy
39 * of registers which is used for all active cores.
41 * Write access is serialised by virtue of the GDB protocol which
42 * updates things. Read access (i.e. when the values are copied to the
43 * vCPU) is also gated by GDB's run control.
45 * This is not unreasonable as most of the time debugging kernels you
46 * never know which core will eventually execute your function.
54 /* The watchpoint registers can cover more area than the requested
55 * watchpoint so we need to store the additional information
56 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
57 * when the watchpoint is hit.
62 CPUWatchpoint details
;
65 /* Maximum and current break/watch point counts */
66 int max_hw_bps
, max_hw_wps
;
67 GArray
*hw_breakpoints
, *hw_watchpoints
;
69 #define cur_hw_wps (hw_watchpoints->len)
70 #define cur_hw_bps (hw_breakpoints->len)
71 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
75 * kvm_arm_init_debug() - check for guest debug capabilities
78 * kvm_check_extension returns the number of debug registers we have
79 * or 0 if we have none.
82 static void kvm_arm_init_debug(CPUState
*cs
)
84 have_guest_debug
= kvm_check_extension(cs
->kvm_state
,
85 KVM_CAP_SET_GUEST_DEBUG
);
87 max_hw_wps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
88 hw_watchpoints
= g_array_sized_new(true, true,
89 sizeof(HWWatchpoint
), max_hw_wps
);
91 max_hw_bps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
92 hw_breakpoints
= g_array_sized_new(true, true,
93 sizeof(HWBreakpoint
), max_hw_bps
);
98 * insert_hw_breakpoint()
99 * @addr: address of breakpoint
101 * See ARM ARM D2.9.1 for details but here we are only going to create
102 * simple un-linked breakpoints (i.e. we don't chain breakpoints
103 * together to match address and context or vmid). The hardware is
104 * capable of fancier matching but that will require exposing that
105 * fanciness to GDB's interface
107 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
109 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
110 * +------+------+-------+-----+----+------+-----+------+-----+---+
111 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
112 * +------+------+-------+-----+----+------+-----+------+-----+---+
114 * BT: Breakpoint type (0 = unlinked address match)
115 * LBN: Linked BP number (0 = unused)
116 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
117 * BAS: Byte Address Select (RES1 for AArch64)
120 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
122 * 63 53 52 49 48 2 1 0
123 * +------+-----------+----------+-----+
124 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
125 * +------+-----------+----------+-----+
127 * Depending on the addressing mode bits the top bits of the register
128 * are a sign extension of the highest applicable VA bit. Some
129 * versions of GDB don't do it correctly so we ensure they are correct
130 * here so future PC comparisons will work properly.
133 static int insert_hw_breakpoint(target_ulong addr
)
136 .bcr
= 0x1, /* BCR E=1, enable */
137 .bvr
= sextract64(addr
, 0, 53)
140 if (cur_hw_bps
>= max_hw_bps
) {
144 brk
.bcr
= deposit32(brk
.bcr
, 1, 2, 0x3); /* PMC = 11 */
145 brk
.bcr
= deposit32(brk
.bcr
, 5, 4, 0xf); /* BAS = RES1 */
147 g_array_append_val(hw_breakpoints
, brk
);
153 * delete_hw_breakpoint()
154 * @pc: address of breakpoint
156 * Delete a breakpoint and shuffle any above down
159 static int delete_hw_breakpoint(target_ulong pc
)
162 for (i
= 0; i
< hw_breakpoints
->len
; i
++) {
163 HWBreakpoint
*brk
= get_hw_bp(i
);
164 if (brk
->bvr
== pc
) {
165 g_array_remove_index(hw_breakpoints
, i
);
173 * insert_hw_watchpoint()
174 * @addr: address of watch point
176 * @type: type of watch point
178 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
179 * stuff if we want to. The watch points can be linked with the break
180 * points above to make them context aware. However for simplicity
181 * currently we only deal with simple read/write watch points.
183 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
185 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
186 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
187 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
188 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
190 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
191 * WT: 0 - unlinked, 1 - linked (not currently used)
192 * LBN: Linked BP number (not currently used)
193 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
194 * BAS: Byte Address Select
195 * LSC: Load/Store control (01: load, 10: store, 11: both)
198 * The bottom 2 bits of the value register are masked. Therefore to
199 * break on any sizes smaller than an unaligned word you need to set
200 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
201 * need to ensure you mask the address as required and set BAS=0xff
204 static int insert_hw_watchpoint(target_ulong addr
,
205 target_ulong len
, int type
)
208 .wcr
= 1, /* E=1, enable */
209 .wvr
= addr
& (~0x7ULL
),
210 .details
= { .vaddr
= addr
, .len
= len
}
213 if (cur_hw_wps
>= max_hw_wps
) {
218 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
219 * valid whether EL3 is implemented or not
221 wp
.wcr
= deposit32(wp
.wcr
, 1, 2, 3);
224 case GDB_WATCHPOINT_READ
:
225 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 1);
226 wp
.details
.flags
= BP_MEM_READ
;
228 case GDB_WATCHPOINT_WRITE
:
229 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 2);
230 wp
.details
.flags
= BP_MEM_WRITE
;
232 case GDB_WATCHPOINT_ACCESS
:
233 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 3);
234 wp
.details
.flags
= BP_MEM_ACCESS
;
237 g_assert_not_reached();
241 /* we align the address and set the bits in BAS */
242 int off
= addr
& 0x7;
243 int bas
= (1 << len
) - 1;
245 wp
.wcr
= deposit32(wp
.wcr
, 5 + off
, 8 - off
, bas
);
247 /* For ranges above 8 bytes we need to be a power of 2 */
248 if (is_power_of_2(len
)) {
249 int bits
= ctz64(len
);
251 wp
.wvr
&= ~((1 << bits
) - 1);
252 wp
.wcr
= deposit32(wp
.wcr
, 24, 4, bits
);
253 wp
.wcr
= deposit32(wp
.wcr
, 5, 8, 0xff);
259 g_array_append_val(hw_watchpoints
, wp
);
264 static bool check_watchpoint_in_range(int i
, target_ulong addr
)
266 HWWatchpoint
*wp
= get_hw_wp(i
);
267 uint64_t addr_top
, addr_bottom
= wp
->wvr
;
268 int bas
= extract32(wp
->wcr
, 5, 8);
269 int mask
= extract32(wp
->wcr
, 24, 4);
272 addr_top
= addr_bottom
+ (1 << mask
);
274 /* BAS must be contiguous but can offset against the base
275 * address in DBGWVR */
276 addr_bottom
= addr_bottom
+ ctz32(bas
);
277 addr_top
= addr_bottom
+ clo32(bas
);
280 if (addr
>= addr_bottom
&& addr
<= addr_top
) {
288 * delete_hw_watchpoint()
289 * @addr: address of breakpoint
291 * Delete a breakpoint and shuffle any above down
294 static int delete_hw_watchpoint(target_ulong addr
,
295 target_ulong len
, int type
)
298 for (i
= 0; i
< cur_hw_wps
; i
++) {
299 if (check_watchpoint_in_range(i
, addr
)) {
300 g_array_remove_index(hw_watchpoints
, i
);
308 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
309 target_ulong len
, int type
)
312 case GDB_BREAKPOINT_HW
:
313 return insert_hw_breakpoint(addr
);
315 case GDB_WATCHPOINT_READ
:
316 case GDB_WATCHPOINT_WRITE
:
317 case GDB_WATCHPOINT_ACCESS
:
318 return insert_hw_watchpoint(addr
, len
, type
);
324 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
325 target_ulong len
, int type
)
328 case GDB_BREAKPOINT_HW
:
329 return delete_hw_breakpoint(addr
);
331 case GDB_WATCHPOINT_READ
:
332 case GDB_WATCHPOINT_WRITE
:
333 case GDB_WATCHPOINT_ACCESS
:
334 return delete_hw_watchpoint(addr
, len
, type
);
341 void kvm_arch_remove_all_hw_breakpoints(void)
343 if (cur_hw_wps
> 0) {
344 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
346 if (cur_hw_bps
> 0) {
347 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
351 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
354 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
356 for (i
= 0; i
< max_hw_wps
; i
++) {
357 HWWatchpoint
*wp
= get_hw_wp(i
);
358 ptr
->dbg_wcr
[i
] = wp
->wcr
;
359 ptr
->dbg_wvr
[i
] = wp
->wvr
;
361 for (i
= 0; i
< max_hw_bps
; i
++) {
362 HWBreakpoint
*bp
= get_hw_bp(i
);
363 ptr
->dbg_bcr
[i
] = bp
->bcr
;
364 ptr
->dbg_bvr
[i
] = bp
->bvr
;
368 bool kvm_arm_hw_debug_active(CPUState
*cs
)
370 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
373 static bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
)
377 for (i
= 0; i
< cur_hw_bps
; i
++) {
378 HWBreakpoint
*bp
= get_hw_bp(i
);
386 static CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
)
390 for (i
= 0; i
< cur_hw_wps
; i
++) {
391 if (check_watchpoint_in_range(i
, addr
)) {
392 return &get_hw_wp(i
)->details
;
398 static bool kvm_arm_pmu_set_attr(CPUState
*cs
, struct kvm_device_attr
*attr
)
402 err
= kvm_vcpu_ioctl(cs
, KVM_HAS_DEVICE_ATTR
, attr
);
404 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err
));
408 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, attr
);
410 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err
));
417 void kvm_arm_pmu_init(CPUState
*cs
)
419 struct kvm_device_attr attr
= {
420 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
421 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
424 if (!ARM_CPU(cs
)->has_pmu
) {
427 if (!kvm_arm_pmu_set_attr(cs
, &attr
)) {
428 error_report("failed to init PMU");
433 void kvm_arm_pmu_set_irq(CPUState
*cs
, int irq
)
435 struct kvm_device_attr attr
= {
436 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
437 .addr
= (intptr_t)&irq
,
438 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
441 if (!ARM_CPU(cs
)->has_pmu
) {
444 if (!kvm_arm_pmu_set_attr(cs
, &attr
)) {
445 error_report("failed to set irq for PMU");
450 static inline void set_feature(uint64_t *features
, int feature
)
452 *features
|= 1ULL << feature
;
455 static inline void unset_feature(uint64_t *features
, int feature
)
457 *features
&= ~(1ULL << feature
);
460 static int read_sys_reg32(int fd
, uint32_t *pret
, uint64_t id
)
463 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)&ret
};
466 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
467 err
= ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
475 static int read_sys_reg64(int fd
, uint64_t *pret
, uint64_t id
)
477 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)pret
};
479 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
480 return ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
483 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
485 /* Identify the feature bits corresponding to the host CPU, and
486 * fill out the ARMHostCPUClass fields accordingly. To do this
487 * we have to create a scratch VM, create a single CPU inside it,
488 * and then query that CPU for the relevant ID registers.
492 uint64_t features
= 0;
496 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
497 * we know these will only support creating one kind of guest CPU,
498 * which is its preferred CPU type. Fortunately these old kernels
499 * support only a very limited number of CPUs.
501 static const uint32_t cpus_to_try
[] = {
502 KVM_ARM_TARGET_AEM_V8
,
503 KVM_ARM_TARGET_FOUNDATION_V8
,
504 KVM_ARM_TARGET_CORTEX_A57
,
505 QEMU_KVM_ARM_TARGET_NONE
508 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
509 * to use the preferred target
511 struct kvm_vcpu_init init
= { .target
= -1, };
513 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
517 ahcf
->target
= init
.target
;
518 ahcf
->dtb_compatible
= "arm,arm-v8";
520 err
= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr0
,
521 ARM64_SYS_REG(3, 0, 0, 4, 0));
522 if (unlikely(err
< 0)) {
524 * Before v4.15, the kernel only exposed a limited number of system
525 * registers, not including any of the interesting AArch64 ID regs.
526 * For the most part we could leave these fields as zero with minimal
527 * effect, since this does not affect the values seen by the guest.
529 * However, it could cause problems down the line for QEMU,
530 * so provide a minimal v8.0 default.
532 * ??? Could read MIDR and use knowledge from cpu64.c.
533 * ??? Could map a page of memory into our temp guest and
534 * run the tiniest of hand-crafted kernels to extract
535 * the values seen by the guest.
536 * ??? Either of these sounds like too much effort just
537 * to work around running a modern host kernel.
539 ahcf
->isar
.id_aa64pfr0
= 0x00000011; /* EL1&0, AArch64 only */
542 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr1
,
543 ARM64_SYS_REG(3, 0, 0, 4, 1));
544 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr0
,
545 ARM64_SYS_REG(3, 0, 0, 5, 0));
546 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr1
,
547 ARM64_SYS_REG(3, 0, 0, 5, 1));
548 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar0
,
549 ARM64_SYS_REG(3, 0, 0, 6, 0));
550 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar1
,
551 ARM64_SYS_REG(3, 0, 0, 6, 1));
552 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr0
,
553 ARM64_SYS_REG(3, 0, 0, 7, 0));
554 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr1
,
555 ARM64_SYS_REG(3, 0, 0, 7, 1));
556 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr2
,
557 ARM64_SYS_REG(3, 0, 0, 7, 2));
560 * Note that if AArch32 support is not present in the host,
561 * the AArch32 sysregs are present to be read, but will
562 * return UNKNOWN values. This is neither better nor worse
563 * than skipping the reads and leaving 0, as we must avoid
564 * considering the values in every case.
566 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_dfr0
,
567 ARM64_SYS_REG(3, 0, 0, 1, 2));
568 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr0
,
569 ARM64_SYS_REG(3, 0, 0, 1, 4));
570 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr1
,
571 ARM64_SYS_REG(3, 0, 0, 1, 5));
572 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr2
,
573 ARM64_SYS_REG(3, 0, 0, 1, 6));
574 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr3
,
575 ARM64_SYS_REG(3, 0, 0, 1, 7));
576 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar0
,
577 ARM64_SYS_REG(3, 0, 0, 2, 0));
578 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar1
,
579 ARM64_SYS_REG(3, 0, 0, 2, 1));
580 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar2
,
581 ARM64_SYS_REG(3, 0, 0, 2, 2));
582 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar3
,
583 ARM64_SYS_REG(3, 0, 0, 2, 3));
584 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar4
,
585 ARM64_SYS_REG(3, 0, 0, 2, 4));
586 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar5
,
587 ARM64_SYS_REG(3, 0, 0, 2, 5));
588 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr4
,
589 ARM64_SYS_REG(3, 0, 0, 2, 6));
590 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar6
,
591 ARM64_SYS_REG(3, 0, 0, 2, 7));
593 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr0
,
594 ARM64_SYS_REG(3, 0, 0, 3, 0));
595 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr1
,
596 ARM64_SYS_REG(3, 0, 0, 3, 1));
597 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr2
,
598 ARM64_SYS_REG(3, 0, 0, 3, 2));
601 * DBGDIDR is a bit complicated because the kernel doesn't
602 * provide an accessor for it in 64-bit mode, which is what this
603 * scratch VM is in, and there's no architected "64-bit sysreg
604 * which reads the same as the 32-bit register" the way there is
605 * for other ID registers. Instead we synthesize a value from the
606 * AArch64 ID_AA64DFR0, the same way the kernel code in
607 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
608 * We only do this if the CPU supports AArch32 at EL1.
610 if (FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL1
) >= 2) {
611 int wrps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
);
612 int brps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
);
614 FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
);
615 int version
= 6; /* ARMv8 debug architecture */
617 !!FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL3
);
618 uint32_t dbgdidr
= 0;
620 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, WRPS
, wrps
);
621 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, BRPS
, brps
);
622 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, CTX_CMPS
, ctx_cmps
);
623 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, VERSION
, version
);
624 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, NSUHD_IMP
, has_el3
);
625 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, SE_IMP
, has_el3
);
626 dbgdidr
|= (1 << 15); /* RES1 bit */
627 ahcf
->isar
.dbgdidr
= dbgdidr
;
631 sve_supported
= ioctl(fdarray
[0], KVM_CHECK_EXTENSION
, KVM_CAP_ARM_SVE
) > 0;
633 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
639 /* Add feature bits that can't appear until after VCPU init. */
641 t
= ahcf
->isar
.id_aa64pfr0
;
642 t
= FIELD_DP64(t
, ID_AA64PFR0
, SVE
, 1);
643 ahcf
->isar
.id_aa64pfr0
= t
;
647 * We can assume any KVM supporting CPU is at least a v8
648 * with VFPv4+Neon; this in turn implies most of the other
651 set_feature(&features
, ARM_FEATURE_V8
);
652 set_feature(&features
, ARM_FEATURE_NEON
);
653 set_feature(&features
, ARM_FEATURE_AARCH64
);
654 set_feature(&features
, ARM_FEATURE_PMU
);
655 set_feature(&features
, ARM_FEATURE_GENERIC_TIMER
);
657 ahcf
->features
= features
;
662 bool kvm_arm_aarch32_supported(CPUState
*cpu
)
664 KVMState
*s
= KVM_STATE(current_accel());
666 return kvm_check_extension(s
, KVM_CAP_ARM_EL1_32BIT
);
669 bool kvm_arm_sve_supported(CPUState
*cpu
)
671 KVMState
*s
= KVM_STATE(current_accel());
673 return kvm_check_extension(s
, KVM_CAP_ARM_SVE
);
676 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN
!= 1);
678 void kvm_arm_sve_get_vls(CPUState
*cs
, unsigned long *map
)
680 /* Only call this function if kvm_arm_sve_supported() returns true. */
681 static uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
];
686 bitmap_clear(map
, 0, ARM_MAX_VQ
);
689 * KVM ensures all host CPUs support the same set of vector lengths.
690 * So we only need to create the scratch VCPUs once and then cache
694 struct kvm_vcpu_init init
= {
696 .features
[0] = (1 << KVM_ARM_VCPU_SVE
),
698 struct kvm_one_reg reg
= {
699 .id
= KVM_REG_ARM64_SVE_VLS
,
700 .addr
= (uint64_t)&vls
[0],
706 if (!kvm_arm_create_scratch_host_vcpu(NULL
, fdarray
, &init
)) {
707 error_report("failed to create scratch VCPU with SVE enabled");
710 ret
= ioctl(fdarray
[2], KVM_GET_ONE_REG
, ®
);
711 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
713 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
718 for (i
= KVM_ARM64_SVE_VLS_WORDS
- 1; i
>= 0; --i
) {
720 vq
= 64 - clz64(vls
[i
]) + i
* 64;
724 if (vq
> ARM_MAX_VQ
) {
725 warn_report("KVM supports vector lengths larger than "
730 for (i
= 0; i
< KVM_ARM64_SVE_VLS_WORDS
; ++i
) {
734 for (j
= 1; j
<= 64; ++j
) {
736 if (vq
> ARM_MAX_VQ
) {
739 if (vls
[i
] & (1UL << (j
- 1))) {
740 set_bit(vq
- 1, map
);
746 static int kvm_arm_sve_set_vls(CPUState
*cs
)
748 uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
] = {0};
749 struct kvm_one_reg reg
= {
750 .id
= KVM_REG_ARM64_SVE_VLS
,
751 .addr
= (uint64_t)&vls
[0],
753 ARMCPU
*cpu
= ARM_CPU(cs
);
757 assert(cpu
->sve_max_vq
<= KVM_ARM64_SVE_VQ_MAX
);
759 for (vq
= 1; vq
<= cpu
->sve_max_vq
; ++vq
) {
760 if (test_bit(vq
- 1, cpu
->sve_vq_map
)) {
767 return kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
770 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
772 int kvm_arch_init_vcpu(CPUState
*cs
)
776 ARMCPU
*cpu
= ARM_CPU(cs
);
777 CPUARMState
*env
= &cpu
->env
;
779 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
780 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
781 error_report("KVM is not supported for this guest CPU type");
785 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change
, cs
);
787 /* Determine init features for this CPU */
788 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
789 if (cpu
->start_powered_off
) {
790 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
792 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
793 cpu
->psci_version
= 2;
794 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
796 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
797 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
799 if (!kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
800 cpu
->has_pmu
= false;
803 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
805 unset_feature(&env
->features
, ARM_FEATURE_PMU
);
807 if (cpu_isar_feature(aa64_sve
, cpu
)) {
808 assert(kvm_arm_sve_supported(cs
));
809 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
812 /* Do KVM_ARM_VCPU_INIT ioctl */
813 ret
= kvm_arm_vcpu_init(cs
);
818 if (cpu_isar_feature(aa64_sve
, cpu
)) {
819 ret
= kvm_arm_sve_set_vls(cs
);
823 ret
= kvm_arm_vcpu_finalize(cs
, KVM_ARM_VCPU_SVE
);
830 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
831 * Currently KVM has its own idea about MPIDR assignment, so we
832 * override our defaults with what we get from KVM.
834 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
838 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
840 kvm_arm_init_debug(cs
);
842 /* Check whether user space can specify guest syndrome value */
843 kvm_arm_init_serror_injection(cs
);
845 return kvm_arm_init_cpreg_list(cpu
);
848 int kvm_arch_destroy_vcpu(CPUState
*cs
)
853 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
855 /* Return true if the regidx is a register we should synchronize
856 * via the cpreg_tuples array (ie is not a core or sve reg that
857 * we sync by hand in kvm_arch_get/put_registers())
859 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
860 case KVM_REG_ARM_CORE
:
861 case KVM_REG_ARM64_SVE
:
868 typedef struct CPRegStateLevel
{
873 /* All system registers not listed in the following table are assumed to be
874 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
875 * often, you must add it to this table with a state of either
876 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
878 static const CPRegStateLevel non_runtime_cpregs
[] = {
879 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
882 int kvm_arm_cpreg_level(uint64_t regidx
)
886 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
887 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
888 if (l
->regidx
== regidx
) {
893 return KVM_PUT_RUNTIME_STATE
;
896 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
897 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
899 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
900 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
902 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
903 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
905 static int kvm_arch_put_fpsimd(CPUState
*cs
)
907 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
908 struct kvm_one_reg reg
;
911 for (i
= 0; i
< 32; i
++) {
912 uint64_t *q
= aa64_vfp_qreg(env
, i
);
913 #ifdef HOST_WORDS_BIGENDIAN
914 uint64_t fp_val
[2] = { q
[1], q
[0] };
915 reg
.addr
= (uintptr_t)fp_val
;
917 reg
.addr
= (uintptr_t)q
;
919 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
920 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
930 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
931 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
932 * code the slice index to zero for now as it's unlikely we'll need more than
933 * one slice for quite some time.
935 static int kvm_arch_put_sve(CPUState
*cs
)
937 ARMCPU
*cpu
= ARM_CPU(cs
);
938 CPUARMState
*env
= &cpu
->env
;
939 uint64_t tmp
[ARM_MAX_VQ
* 2];
941 struct kvm_one_reg reg
;
944 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
945 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[n
].d
[0], cpu
->sve_max_vq
* 2);
946 reg
.addr
= (uintptr_t)r
;
947 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
948 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
954 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
955 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[n
].p
[0],
956 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
957 reg
.addr
= (uintptr_t)r
;
958 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
959 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
965 r
= sve_bswap64(tmp
, &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0],
966 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
967 reg
.addr
= (uintptr_t)r
;
968 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
969 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
977 int kvm_arch_put_registers(CPUState
*cs
, int level
)
979 struct kvm_one_reg reg
;
985 ARMCPU
*cpu
= ARM_CPU(cs
);
986 CPUARMState
*env
= &cpu
->env
;
988 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
989 * AArch64 registers before pushing them out to 64-bit KVM.
992 aarch64_sync_32_to_64(env
);
995 for (i
= 0; i
< 31; i
++) {
996 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
997 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
998 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1004 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1005 * QEMU side we keep the current SP in xregs[31] as well.
1007 aarch64_save_sp(env
, 1);
1009 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
1010 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
1011 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1016 reg
.id
= AARCH64_CORE_REG(sp_el1
);
1017 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
1018 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1023 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
1025 val
= pstate_read(env
);
1027 val
= cpsr_read(env
);
1029 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
1030 reg
.addr
= (uintptr_t) &val
;
1031 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1036 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
1037 reg
.addr
= (uintptr_t) &env
->pc
;
1038 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1043 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1044 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1045 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1050 /* Saved Program State Registers
1052 * Before we restore from the banked_spsr[] array we need to
1053 * ensure that any modifications to env->spsr are correctly
1054 * reflected in the banks.
1056 el
= arm_current_el(env
);
1057 if (el
> 0 && !is_a64(env
)) {
1058 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1059 env
->banked_spsr
[i
] = env
->spsr
;
1062 /* KVM 0-4 map to QEMU banks 1-5 */
1063 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1064 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1065 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1066 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1072 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1073 ret
= kvm_arch_put_sve(cs
);
1075 ret
= kvm_arch_put_fpsimd(cs
);
1081 reg
.addr
= (uintptr_t)(&fpr
);
1082 fpr
= vfp_get_fpsr(env
);
1083 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1084 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1089 reg
.addr
= (uintptr_t)(&fpr
);
1090 fpr
= vfp_get_fpcr(env
);
1091 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1092 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1097 ret
= kvm_put_vcpu_events(cpu
);
1102 write_cpustate_to_list(cpu
, true);
1104 if (!write_list_to_kvmstate(cpu
, level
)) {
1108 kvm_arm_sync_mpstate_to_kvm(cpu
);
1113 static int kvm_arch_get_fpsimd(CPUState
*cs
)
1115 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
1116 struct kvm_one_reg reg
;
1119 for (i
= 0; i
< 32; i
++) {
1120 uint64_t *q
= aa64_vfp_qreg(env
, i
);
1121 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
1122 reg
.addr
= (uintptr_t)q
;
1123 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1127 #ifdef HOST_WORDS_BIGENDIAN
1129 t
= q
[0], q
[0] = q
[1], q
[1] = t
;
1138 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1139 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1140 * code the slice index to zero for now as it's unlikely we'll need more than
1141 * one slice for quite some time.
1143 static int kvm_arch_get_sve(CPUState
*cs
)
1145 ARMCPU
*cpu
= ARM_CPU(cs
);
1146 CPUARMState
*env
= &cpu
->env
;
1147 struct kvm_one_reg reg
;
1151 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
1152 r
= &env
->vfp
.zregs
[n
].d
[0];
1153 reg
.addr
= (uintptr_t)r
;
1154 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
1155 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1159 sve_bswap64(r
, r
, cpu
->sve_max_vq
* 2);
1162 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
1163 r
= &env
->vfp
.pregs
[n
].p
[0];
1164 reg
.addr
= (uintptr_t)r
;
1165 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
1166 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1170 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1173 r
= &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0];
1174 reg
.addr
= (uintptr_t)r
;
1175 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
1176 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1180 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1185 int kvm_arch_get_registers(CPUState
*cs
)
1187 struct kvm_one_reg reg
;
1193 ARMCPU
*cpu
= ARM_CPU(cs
);
1194 CPUARMState
*env
= &cpu
->env
;
1196 for (i
= 0; i
< 31; i
++) {
1197 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
1198 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
1199 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1205 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
1206 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
1207 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1212 reg
.id
= AARCH64_CORE_REG(sp_el1
);
1213 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
1214 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1219 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
1220 reg
.addr
= (uintptr_t) &val
;
1221 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1226 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
1228 pstate_write(env
, val
);
1230 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
1233 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1234 * QEMU side we keep the current SP in xregs[31] as well.
1236 aarch64_restore_sp(env
, 1);
1238 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
1239 reg
.addr
= (uintptr_t) &env
->pc
;
1240 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1245 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1246 * incoming AArch64 regs received from 64-bit KVM.
1247 * We must perform this after all of the registers have been acquired from
1251 aarch64_sync_64_to_32(env
);
1254 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1255 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1256 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1261 /* Fetch the SPSR registers
1263 * KVM SPSRs 0-4 map to QEMU banks 1-5
1265 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1266 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1267 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1268 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1274 el
= arm_current_el(env
);
1275 if (el
> 0 && !is_a64(env
)) {
1276 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1277 env
->spsr
= env
->banked_spsr
[i
];
1280 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1281 ret
= kvm_arch_get_sve(cs
);
1283 ret
= kvm_arch_get_fpsimd(cs
);
1289 reg
.addr
= (uintptr_t)(&fpr
);
1290 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1291 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1295 vfp_set_fpsr(env
, fpr
);
1297 reg
.addr
= (uintptr_t)(&fpr
);
1298 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1299 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1303 vfp_set_fpcr(env
, fpr
);
1305 ret
= kvm_get_vcpu_events(cpu
);
1310 if (!write_kvmstate_to_list(cpu
)) {
1313 /* Note that it's OK to have registers which aren't in CPUState,
1314 * so we can ignore a failure return here.
1316 write_list_to_cpustate(cpu
);
1318 kvm_arm_sync_mpstate_to_qemu(cpu
);
1320 /* TODO: other registers */
1324 /* C6.6.29 BRK instruction */
1325 static const uint32_t brk_insn
= 0xd4200000;
1327 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1329 if (have_guest_debug
) {
1330 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
1331 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
1336 error_report("guest debug not supported on this kernel");
1341 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1343 static uint32_t brk
;
1345 if (have_guest_debug
) {
1346 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
1348 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
1353 error_report("guest debug not supported on this kernel");
1358 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1360 * To minimise translating between kernel and user-space the kernel
1361 * ABI just provides user-space with the full exception syndrome
1362 * register value to be decoded in QEMU.
1365 bool kvm_arm_handle_debug(CPUState
*cs
, struct kvm_debug_exit_arch
*debug_exit
)
1367 int hsr_ec
= syn_get_ec(debug_exit
->hsr
);
1368 ARMCPU
*cpu
= ARM_CPU(cs
);
1369 CPUClass
*cc
= CPU_GET_CLASS(cs
);
1370 CPUARMState
*env
= &cpu
->env
;
1372 /* Ensure PC is synchronised */
1373 kvm_cpu_synchronize_state(cs
);
1376 case EC_SOFTWARESTEP
:
1377 if (cs
->singlestep_enabled
) {
1381 * The kernel should have suppressed the guest's ability to
1382 * single step at this point so something has gone wrong.
1384 error_report("%s: guest single-step while debugging unsupported"
1385 " (%"PRIx64
", %"PRIx32
")",
1386 __func__
, env
->pc
, debug_exit
->hsr
);
1391 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
1396 if (find_hw_breakpoint(cs
, env
->pc
)) {
1402 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
1404 cs
->watchpoint_hit
= wp
;
1410 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")",
1411 __func__
, debug_exit
->hsr
, env
->pc
);
1414 /* If we are not handling the debug exception it must belong to
1415 * the guest. Let's re-use the existing TCG interrupt code to set
1416 * everything up properly.
1418 cs
->exception_index
= EXCP_BKPT
;
1419 env
->exception
.syndrome
= debug_exit
->hsr
;
1420 env
->exception
.vaddress
= debug_exit
->far
;
1421 env
->exception
.target_el
= 1;
1422 qemu_mutex_lock_iothread();
1423 cc
->do_interrupt(cs
);
1424 qemu_mutex_unlock_iothread();