2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qapi/error.h"
21 #include "qemu/timer.h"
22 #include "qemu/error-report.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/runstate.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/kvm_int.h"
30 #include "internals.h"
31 #include "hw/acpi/acpi.h"
32 #include "hw/acpi/ghes.h"
33 #include "hw/arm/virt.h"
35 static bool have_guest_debug
;
38 * Although the ARM implementation of hardware assisted debugging
39 * allows for different breakpoints per-core, the current GDB
40 * interface treats them as a global pool of registers (which seems to
41 * be the case for x86, ppc and s390). As a result we store one copy
42 * of registers which is used for all active cores.
44 * Write access is serialised by virtue of the GDB protocol which
45 * updates things. Read access (i.e. when the values are copied to the
46 * vCPU) is also gated by GDB's run control.
48 * This is not unreasonable as most of the time debugging kernels you
49 * never know which core will eventually execute your function.
57 /* The watchpoint registers can cover more area than the requested
58 * watchpoint so we need to store the additional information
59 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
60 * when the watchpoint is hit.
65 CPUWatchpoint details
;
68 /* Maximum and current break/watch point counts */
69 int max_hw_bps
, max_hw_wps
;
70 GArray
*hw_breakpoints
, *hw_watchpoints
;
72 #define cur_hw_wps (hw_watchpoints->len)
73 #define cur_hw_bps (hw_breakpoints->len)
74 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
75 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
78 * kvm_arm_init_debug() - check for guest debug capabilities
81 * kvm_check_extension returns the number of debug registers we have
82 * or 0 if we have none.
85 static void kvm_arm_init_debug(CPUState
*cs
)
87 have_guest_debug
= kvm_check_extension(cs
->kvm_state
,
88 KVM_CAP_SET_GUEST_DEBUG
);
90 max_hw_wps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
91 hw_watchpoints
= g_array_sized_new(true, true,
92 sizeof(HWWatchpoint
), max_hw_wps
);
94 max_hw_bps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
95 hw_breakpoints
= g_array_sized_new(true, true,
96 sizeof(HWBreakpoint
), max_hw_bps
);
101 * insert_hw_breakpoint()
102 * @addr: address of breakpoint
104 * See ARM ARM D2.9.1 for details but here we are only going to create
105 * simple un-linked breakpoints (i.e. we don't chain breakpoints
106 * together to match address and context or vmid). The hardware is
107 * capable of fancier matching but that will require exposing that
108 * fanciness to GDB's interface
110 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
112 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
113 * +------+------+-------+-----+----+------+-----+------+-----+---+
114 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
115 * +------+------+-------+-----+----+------+-----+------+-----+---+
117 * BT: Breakpoint type (0 = unlinked address match)
118 * LBN: Linked BP number (0 = unused)
119 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
120 * BAS: Byte Address Select (RES1 for AArch64)
123 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
125 * 63 53 52 49 48 2 1 0
126 * +------+-----------+----------+-----+
127 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
128 * +------+-----------+----------+-----+
130 * Depending on the addressing mode bits the top bits of the register
131 * are a sign extension of the highest applicable VA bit. Some
132 * versions of GDB don't do it correctly so we ensure they are correct
133 * here so future PC comparisons will work properly.
136 static int insert_hw_breakpoint(target_ulong addr
)
139 .bcr
= 0x1, /* BCR E=1, enable */
140 .bvr
= sextract64(addr
, 0, 53)
143 if (cur_hw_bps
>= max_hw_bps
) {
147 brk
.bcr
= deposit32(brk
.bcr
, 1, 2, 0x3); /* PMC = 11 */
148 brk
.bcr
= deposit32(brk
.bcr
, 5, 4, 0xf); /* BAS = RES1 */
150 g_array_append_val(hw_breakpoints
, brk
);
156 * delete_hw_breakpoint()
157 * @pc: address of breakpoint
159 * Delete a breakpoint and shuffle any above down
162 static int delete_hw_breakpoint(target_ulong pc
)
165 for (i
= 0; i
< hw_breakpoints
->len
; i
++) {
166 HWBreakpoint
*brk
= get_hw_bp(i
);
167 if (brk
->bvr
== pc
) {
168 g_array_remove_index(hw_breakpoints
, i
);
176 * insert_hw_watchpoint()
177 * @addr: address of watch point
179 * @type: type of watch point
181 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
182 * stuff if we want to. The watch points can be linked with the break
183 * points above to make them context aware. However for simplicity
184 * currently we only deal with simple read/write watch points.
186 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
188 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
189 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
190 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
191 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
193 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
194 * WT: 0 - unlinked, 1 - linked (not currently used)
195 * LBN: Linked BP number (not currently used)
196 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
197 * BAS: Byte Address Select
198 * LSC: Load/Store control (01: load, 10: store, 11: both)
201 * The bottom 2 bits of the value register are masked. Therefore to
202 * break on any sizes smaller than an unaligned word you need to set
203 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
204 * need to ensure you mask the address as required and set BAS=0xff
207 static int insert_hw_watchpoint(target_ulong addr
,
208 target_ulong len
, int type
)
211 .wcr
= R_DBGWCR_E_MASK
, /* E=1, enable */
212 .wvr
= addr
& (~0x7ULL
),
213 .details
= { .vaddr
= addr
, .len
= len
}
216 if (cur_hw_wps
>= max_hw_wps
) {
221 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
222 * valid whether EL3 is implemented or not
224 wp
.wcr
= FIELD_DP64(wp
.wcr
, DBGWCR
, PAC
, 3);
227 case GDB_WATCHPOINT_READ
:
228 wp
.wcr
= FIELD_DP64(wp
.wcr
, DBGWCR
, LSC
, 1);
229 wp
.details
.flags
= BP_MEM_READ
;
231 case GDB_WATCHPOINT_WRITE
:
232 wp
.wcr
= FIELD_DP64(wp
.wcr
, DBGWCR
, LSC
, 2);
233 wp
.details
.flags
= BP_MEM_WRITE
;
235 case GDB_WATCHPOINT_ACCESS
:
236 wp
.wcr
= FIELD_DP64(wp
.wcr
, DBGWCR
, LSC
, 3);
237 wp
.details
.flags
= BP_MEM_ACCESS
;
240 g_assert_not_reached();
244 /* we align the address and set the bits in BAS */
245 int off
= addr
& 0x7;
246 int bas
= (1 << len
) - 1;
248 wp
.wcr
= deposit32(wp
.wcr
, 5 + off
, 8 - off
, bas
);
250 /* For ranges above 8 bytes we need to be a power of 2 */
251 if (is_power_of_2(len
)) {
252 int bits
= ctz64(len
);
254 wp
.wvr
&= ~((1 << bits
) - 1);
255 wp
.wcr
= FIELD_DP64(wp
.wcr
, DBGWCR
, MASK
, bits
);
256 wp
.wcr
= FIELD_DP64(wp
.wcr
, DBGWCR
, BAS
, 0xff);
262 g_array_append_val(hw_watchpoints
, wp
);
267 static bool check_watchpoint_in_range(int i
, target_ulong addr
)
269 HWWatchpoint
*wp
= get_hw_wp(i
);
270 uint64_t addr_top
, addr_bottom
= wp
->wvr
;
271 int bas
= extract32(wp
->wcr
, 5, 8);
272 int mask
= extract32(wp
->wcr
, 24, 4);
275 addr_top
= addr_bottom
+ (1 << mask
);
277 /* BAS must be contiguous but can offset against the base
278 * address in DBGWVR */
279 addr_bottom
= addr_bottom
+ ctz32(bas
);
280 addr_top
= addr_bottom
+ clo32(bas
);
283 if (addr
>= addr_bottom
&& addr
<= addr_top
) {
291 * delete_hw_watchpoint()
292 * @addr: address of breakpoint
294 * Delete a breakpoint and shuffle any above down
297 static int delete_hw_watchpoint(target_ulong addr
,
298 target_ulong len
, int type
)
301 for (i
= 0; i
< cur_hw_wps
; i
++) {
302 if (check_watchpoint_in_range(i
, addr
)) {
303 g_array_remove_index(hw_watchpoints
, i
);
311 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
312 target_ulong len
, int type
)
315 case GDB_BREAKPOINT_HW
:
316 return insert_hw_breakpoint(addr
);
318 case GDB_WATCHPOINT_READ
:
319 case GDB_WATCHPOINT_WRITE
:
320 case GDB_WATCHPOINT_ACCESS
:
321 return insert_hw_watchpoint(addr
, len
, type
);
327 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
328 target_ulong len
, int type
)
331 case GDB_BREAKPOINT_HW
:
332 return delete_hw_breakpoint(addr
);
333 case GDB_WATCHPOINT_READ
:
334 case GDB_WATCHPOINT_WRITE
:
335 case GDB_WATCHPOINT_ACCESS
:
336 return delete_hw_watchpoint(addr
, len
, type
);
343 void kvm_arch_remove_all_hw_breakpoints(void)
345 if (cur_hw_wps
> 0) {
346 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
348 if (cur_hw_bps
> 0) {
349 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
353 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
356 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
358 for (i
= 0; i
< max_hw_wps
; i
++) {
359 HWWatchpoint
*wp
= get_hw_wp(i
);
360 ptr
->dbg_wcr
[i
] = wp
->wcr
;
361 ptr
->dbg_wvr
[i
] = wp
->wvr
;
363 for (i
= 0; i
< max_hw_bps
; i
++) {
364 HWBreakpoint
*bp
= get_hw_bp(i
);
365 ptr
->dbg_bcr
[i
] = bp
->bcr
;
366 ptr
->dbg_bvr
[i
] = bp
->bvr
;
370 bool kvm_arm_hw_debug_active(CPUState
*cs
)
372 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
375 static bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
)
379 for (i
= 0; i
< cur_hw_bps
; i
++) {
380 HWBreakpoint
*bp
= get_hw_bp(i
);
388 static CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
)
392 for (i
= 0; i
< cur_hw_wps
; i
++) {
393 if (check_watchpoint_in_range(i
, addr
)) {
394 return &get_hw_wp(i
)->details
;
400 static bool kvm_arm_set_device_attr(CPUState
*cs
, struct kvm_device_attr
*attr
,
405 err
= kvm_vcpu_ioctl(cs
, KVM_HAS_DEVICE_ATTR
, attr
);
407 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name
, strerror(-err
));
411 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, attr
);
413 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name
, strerror(-err
));
420 void kvm_arm_pmu_init(CPUState
*cs
)
422 struct kvm_device_attr attr
= {
423 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
424 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
427 if (!ARM_CPU(cs
)->has_pmu
) {
430 if (!kvm_arm_set_device_attr(cs
, &attr
, "PMU")) {
431 error_report("failed to init PMU");
436 void kvm_arm_pmu_set_irq(CPUState
*cs
, int irq
)
438 struct kvm_device_attr attr
= {
439 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
440 .addr
= (intptr_t)&irq
,
441 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
444 if (!ARM_CPU(cs
)->has_pmu
) {
447 if (!kvm_arm_set_device_attr(cs
, &attr
, "PMU")) {
448 error_report("failed to set irq for PMU");
453 void kvm_arm_pvtime_init(CPUState
*cs
, uint64_t ipa
)
455 struct kvm_device_attr attr
= {
456 .group
= KVM_ARM_VCPU_PVTIME_CTRL
,
457 .attr
= KVM_ARM_VCPU_PVTIME_IPA
,
458 .addr
= (uint64_t)&ipa
,
461 if (ARM_CPU(cs
)->kvm_steal_time
== ON_OFF_AUTO_OFF
) {
464 if (!kvm_arm_set_device_attr(cs
, &attr
, "PVTIME IPA")) {
465 error_report("failed to init PVTIME IPA");
470 static int read_sys_reg32(int fd
, uint32_t *pret
, uint64_t id
)
473 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)&ret
};
476 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
477 err
= ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
485 static int read_sys_reg64(int fd
, uint64_t *pret
, uint64_t id
)
487 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)pret
};
489 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
490 return ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
493 static bool kvm_arm_pauth_supported(void)
495 return (kvm_check_extension(kvm_state
, KVM_CAP_ARM_PTRAUTH_ADDRESS
) &&
496 kvm_check_extension(kvm_state
, KVM_CAP_ARM_PTRAUTH_GENERIC
));
499 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
501 /* Identify the feature bits corresponding to the host CPU, and
502 * fill out the ARMHostCPUClass fields accordingly. To do this
503 * we have to create a scratch VM, create a single CPU inside it,
504 * and then query that CPU for the relevant ID registers.
508 bool pmu_supported
= false;
509 uint64_t features
= 0;
512 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
513 * we know these will only support creating one kind of guest CPU,
514 * which is its preferred CPU type. Fortunately these old kernels
515 * support only a very limited number of CPUs.
517 static const uint32_t cpus_to_try
[] = {
518 KVM_ARM_TARGET_AEM_V8
,
519 KVM_ARM_TARGET_FOUNDATION_V8
,
520 KVM_ARM_TARGET_CORTEX_A57
,
521 QEMU_KVM_ARM_TARGET_NONE
524 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
525 * to use the preferred target
527 struct kvm_vcpu_init init
= { .target
= -1, };
530 * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
531 * which is otherwise RAZ.
533 sve_supported
= kvm_arm_sve_supported();
535 init
.features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
539 * Ask for Pointer Authentication if supported, so that we get
540 * the unsanitized field values for AA64ISAR1_EL1.
542 if (kvm_arm_pauth_supported()) {
543 init
.features
[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS
|
544 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC
);
547 if (kvm_arm_pmu_supported()) {
548 init
.features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
549 pmu_supported
= true;
552 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
556 ahcf
->target
= init
.target
;
557 ahcf
->dtb_compatible
= "arm,arm-v8";
559 err
= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr0
,
560 ARM64_SYS_REG(3, 0, 0, 4, 0));
561 if (unlikely(err
< 0)) {
563 * Before v4.15, the kernel only exposed a limited number of system
564 * registers, not including any of the interesting AArch64 ID regs.
565 * For the most part we could leave these fields as zero with minimal
566 * effect, since this does not affect the values seen by the guest.
568 * However, it could cause problems down the line for QEMU,
569 * so provide a minimal v8.0 default.
571 * ??? Could read MIDR and use knowledge from cpu64.c.
572 * ??? Could map a page of memory into our temp guest and
573 * run the tiniest of hand-crafted kernels to extract
574 * the values seen by the guest.
575 * ??? Either of these sounds like too much effort just
576 * to work around running a modern host kernel.
578 ahcf
->isar
.id_aa64pfr0
= 0x00000011; /* EL1&0, AArch64 only */
581 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr1
,
582 ARM64_SYS_REG(3, 0, 0, 4, 1));
583 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64smfr0
,
584 ARM64_SYS_REG(3, 0, 0, 4, 5));
585 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr0
,
586 ARM64_SYS_REG(3, 0, 0, 5, 0));
587 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr1
,
588 ARM64_SYS_REG(3, 0, 0, 5, 1));
589 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar0
,
590 ARM64_SYS_REG(3, 0, 0, 6, 0));
591 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar1
,
592 ARM64_SYS_REG(3, 0, 0, 6, 1));
593 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr0
,
594 ARM64_SYS_REG(3, 0, 0, 7, 0));
595 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr1
,
596 ARM64_SYS_REG(3, 0, 0, 7, 1));
597 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr2
,
598 ARM64_SYS_REG(3, 0, 0, 7, 2));
601 * Note that if AArch32 support is not present in the host,
602 * the AArch32 sysregs are present to be read, but will
603 * return UNKNOWN values. This is neither better nor worse
604 * than skipping the reads and leaving 0, as we must avoid
605 * considering the values in every case.
607 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr0
,
608 ARM64_SYS_REG(3, 0, 0, 1, 0));
609 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr1
,
610 ARM64_SYS_REG(3, 0, 0, 1, 1));
611 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr2
,
612 ARM64_SYS_REG(3, 0, 0, 3, 4));
613 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_dfr0
,
614 ARM64_SYS_REG(3, 0, 0, 1, 2));
615 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr0
,
616 ARM64_SYS_REG(3, 0, 0, 1, 4));
617 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr1
,
618 ARM64_SYS_REG(3, 0, 0, 1, 5));
619 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr2
,
620 ARM64_SYS_REG(3, 0, 0, 1, 6));
621 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr3
,
622 ARM64_SYS_REG(3, 0, 0, 1, 7));
623 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar0
,
624 ARM64_SYS_REG(3, 0, 0, 2, 0));
625 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar1
,
626 ARM64_SYS_REG(3, 0, 0, 2, 1));
627 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar2
,
628 ARM64_SYS_REG(3, 0, 0, 2, 2));
629 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar3
,
630 ARM64_SYS_REG(3, 0, 0, 2, 3));
631 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar4
,
632 ARM64_SYS_REG(3, 0, 0, 2, 4));
633 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar5
,
634 ARM64_SYS_REG(3, 0, 0, 2, 5));
635 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr4
,
636 ARM64_SYS_REG(3, 0, 0, 2, 6));
637 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar6
,
638 ARM64_SYS_REG(3, 0, 0, 2, 7));
640 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr0
,
641 ARM64_SYS_REG(3, 0, 0, 3, 0));
642 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr1
,
643 ARM64_SYS_REG(3, 0, 0, 3, 1));
644 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr2
,
645 ARM64_SYS_REG(3, 0, 0, 3, 2));
648 * DBGDIDR is a bit complicated because the kernel doesn't
649 * provide an accessor for it in 64-bit mode, which is what this
650 * scratch VM is in, and there's no architected "64-bit sysreg
651 * which reads the same as the 32-bit register" the way there is
652 * for other ID registers. Instead we synthesize a value from the
653 * AArch64 ID_AA64DFR0, the same way the kernel code in
654 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
655 * We only do this if the CPU supports AArch32 at EL1.
657 if (FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL1
) >= 2) {
658 int wrps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
);
659 int brps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
);
661 FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
);
662 int version
= 6; /* ARMv8 debug architecture */
664 !!FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL3
);
665 uint32_t dbgdidr
= 0;
667 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, WRPS
, wrps
);
668 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, BRPS
, brps
);
669 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, CTX_CMPS
, ctx_cmps
);
670 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, VERSION
, version
);
671 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, NSUHD_IMP
, has_el3
);
672 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, SE_IMP
, has_el3
);
673 dbgdidr
|= (1 << 15); /* RES1 bit */
674 ahcf
->isar
.dbgdidr
= dbgdidr
;
678 /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
679 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.reset_pmcr_el0
,
680 ARM64_SYS_REG(3, 3, 9, 12, 0));
685 * There is a range of kernels between kernel commit 73433762fcae
686 * and f81cb2c3ad41 which have a bug where the kernel doesn't
687 * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
688 * enabled SVE support, which resulted in an error rather than RAZ.
689 * So only read the register if we set KVM_ARM_VCPU_SVE above.
691 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64zfr0
,
692 ARM64_SYS_REG(3, 0, 0, 4, 4));
696 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
703 * We can assume any KVM supporting CPU is at least a v8
704 * with VFPv4+Neon; this in turn implies most of the other
707 features
|= 1ULL << ARM_FEATURE_V8
;
708 features
|= 1ULL << ARM_FEATURE_NEON
;
709 features
|= 1ULL << ARM_FEATURE_AARCH64
;
710 features
|= 1ULL << ARM_FEATURE_PMU
;
711 features
|= 1ULL << ARM_FEATURE_GENERIC_TIMER
;
713 ahcf
->features
= features
;
718 void kvm_arm_steal_time_finalize(ARMCPU
*cpu
, Error
**errp
)
720 bool has_steal_time
= kvm_arm_steal_time_supported();
722 if (cpu
->kvm_steal_time
== ON_OFF_AUTO_AUTO
) {
723 if (!has_steal_time
|| !arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
724 cpu
->kvm_steal_time
= ON_OFF_AUTO_OFF
;
726 cpu
->kvm_steal_time
= ON_OFF_AUTO_ON
;
728 } else if (cpu
->kvm_steal_time
== ON_OFF_AUTO_ON
) {
729 if (!has_steal_time
) {
730 error_setg(errp
, "'kvm-steal-time' cannot be enabled "
733 } else if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
735 * DEN0057A chapter 2 says "This specification only covers
736 * systems in which the Execution state of the hypervisor
737 * as well as EL1 of virtual machines is AArch64.". And,
738 * to ensure that, the smc/hvc calls are only specified as
741 error_setg(errp
, "'kvm-steal-time' cannot be enabled "
742 "for AArch32 guests");
748 bool kvm_arm_aarch32_supported(void)
750 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_EL1_32BIT
);
753 bool kvm_arm_sve_supported(void)
755 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_SVE
);
758 bool kvm_arm_steal_time_supported(void)
760 return kvm_check_extension(kvm_state
, KVM_CAP_STEAL_TIME
);
763 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN
!= 1);
765 uint32_t kvm_arm_sve_get_vls(CPUState
*cs
)
767 /* Only call this function if kvm_arm_sve_supported() returns true. */
768 static uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
];
774 * KVM ensures all host CPUs support the same set of vector lengths.
775 * So we only need to create the scratch VCPUs once and then cache
779 struct kvm_vcpu_init init
= {
781 .features
[0] = (1 << KVM_ARM_VCPU_SVE
),
783 struct kvm_one_reg reg
= {
784 .id
= KVM_REG_ARM64_SVE_VLS
,
785 .addr
= (uint64_t)&vls
[0],
791 if (!kvm_arm_create_scratch_host_vcpu(NULL
, fdarray
, &init
)) {
792 error_report("failed to create scratch VCPU with SVE enabled");
795 ret
= ioctl(fdarray
[2], KVM_GET_ONE_REG
, ®
);
796 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
798 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
803 for (i
= KVM_ARM64_SVE_VLS_WORDS
- 1; i
>= 0; --i
) {
805 vq
= 64 - clz64(vls
[i
]) + i
* 64;
809 if (vq
> ARM_MAX_VQ
) {
810 warn_report("KVM supports vector lengths larger than "
812 vls
[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ
);
819 static int kvm_arm_sve_set_vls(CPUState
*cs
)
821 ARMCPU
*cpu
= ARM_CPU(cs
);
822 uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
] = { cpu
->sve_vq
.map
};
823 struct kvm_one_reg reg
= {
824 .id
= KVM_REG_ARM64_SVE_VLS
,
825 .addr
= (uint64_t)&vls
[0],
828 assert(cpu
->sve_max_vq
<= KVM_ARM64_SVE_VQ_MAX
);
830 return kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
833 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
835 int kvm_arch_init_vcpu(CPUState
*cs
)
839 ARMCPU
*cpu
= ARM_CPU(cs
);
840 CPUARMState
*env
= &cpu
->env
;
843 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
844 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
845 error_report("KVM is not supported for this guest CPU type");
849 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change
, cs
);
851 /* Determine init features for this CPU */
852 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
853 if (cs
->start_powered_off
) {
854 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
856 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
857 cpu
->psci_version
= QEMU_PSCI_VERSION_0_2
;
858 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
860 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
861 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
863 if (!kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
864 cpu
->has_pmu
= false;
867 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
869 env
->features
&= ~(1ULL << ARM_FEATURE_PMU
);
871 if (cpu_isar_feature(aa64_sve
, cpu
)) {
872 assert(kvm_arm_sve_supported());
873 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
875 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
876 cpu
->kvm_init_features
[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS
|
877 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC
);
880 /* Do KVM_ARM_VCPU_INIT ioctl */
881 ret
= kvm_arm_vcpu_init(cs
);
886 if (cpu_isar_feature(aa64_sve
, cpu
)) {
887 ret
= kvm_arm_sve_set_vls(cs
);
891 ret
= kvm_arm_vcpu_finalize(cs
, KVM_ARM_VCPU_SVE
);
898 * KVM reports the exact PSCI version it is implementing via a
899 * special sysreg. If it is present, use its contents to determine
900 * what to report to the guest in the dtb (it is the PSCI version,
901 * in the same 15-bits major 16-bits minor format that PSCI_VERSION
904 if (!kvm_get_one_reg(cs
, KVM_REG_ARM_PSCI_VERSION
, &psciver
)) {
905 cpu
->psci_version
= psciver
;
909 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
910 * Currently KVM has its own idea about MPIDR assignment, so we
911 * override our defaults with what we get from KVM.
913 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
917 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
919 kvm_arm_init_debug(cs
);
921 /* Check whether user space can specify guest syndrome value */
922 kvm_arm_init_serror_injection(cs
);
924 return kvm_arm_init_cpreg_list(cpu
);
927 int kvm_arch_destroy_vcpu(CPUState
*cs
)
932 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
934 /* Return true if the regidx is a register we should synchronize
935 * via the cpreg_tuples array (ie is not a core or sve reg that
936 * we sync by hand in kvm_arch_get/put_registers())
938 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
939 case KVM_REG_ARM_CORE
:
940 case KVM_REG_ARM64_SVE
:
947 typedef struct CPRegStateLevel
{
952 /* All system registers not listed in the following table are assumed to be
953 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
954 * often, you must add it to this table with a state of either
955 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
957 static const CPRegStateLevel non_runtime_cpregs
[] = {
958 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
961 int kvm_arm_cpreg_level(uint64_t regidx
)
965 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
966 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
967 if (l
->regidx
== regidx
) {
972 return KVM_PUT_RUNTIME_STATE
;
975 /* Callers must hold the iothread mutex lock */
976 static void kvm_inject_arm_sea(CPUState
*c
)
978 ARMCPU
*cpu
= ARM_CPU(c
);
979 CPUARMState
*env
= &cpu
->env
;
983 c
->exception_index
= EXCP_DATA_ABORT
;
984 env
->exception
.target_el
= 1;
987 * Set the DFSC to synchronous external abort and set FnV to not valid,
988 * this will tell guest the FAR_ELx is UNKNOWN for this abort.
990 same_el
= arm_current_el(env
) == env
->exception
.target_el
;
991 esr
= syn_data_abort_no_iss(same_el
, 1, 0, 0, 0, 0, 0x10);
993 env
->exception
.syndrome
= esr
;
995 arm_cpu_do_interrupt(c
);
998 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
999 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1001 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
1002 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1004 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
1005 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1007 static int kvm_arch_put_fpsimd(CPUState
*cs
)
1009 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
1010 struct kvm_one_reg reg
;
1013 for (i
= 0; i
< 32; i
++) {
1014 uint64_t *q
= aa64_vfp_qreg(env
, i
);
1016 uint64_t fp_val
[2] = { q
[1], q
[0] };
1017 reg
.addr
= (uintptr_t)fp_val
;
1019 reg
.addr
= (uintptr_t)q
;
1021 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
1022 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1032 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1033 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1034 * code the slice index to zero for now as it's unlikely we'll need more than
1035 * one slice for quite some time.
1037 static int kvm_arch_put_sve(CPUState
*cs
)
1039 ARMCPU
*cpu
= ARM_CPU(cs
);
1040 CPUARMState
*env
= &cpu
->env
;
1041 uint64_t tmp
[ARM_MAX_VQ
* 2];
1043 struct kvm_one_reg reg
;
1046 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
1047 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[n
].d
[0], cpu
->sve_max_vq
* 2);
1048 reg
.addr
= (uintptr_t)r
;
1049 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
1050 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1056 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
1057 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[n
].p
[0],
1058 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1059 reg
.addr
= (uintptr_t)r
;
1060 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
1061 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1067 r
= sve_bswap64(tmp
, &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0],
1068 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1069 reg
.addr
= (uintptr_t)r
;
1070 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
1071 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1079 int kvm_arch_put_registers(CPUState
*cs
, int level
)
1081 struct kvm_one_reg reg
;
1087 ARMCPU
*cpu
= ARM_CPU(cs
);
1088 CPUARMState
*env
= &cpu
->env
;
1090 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
1091 * AArch64 registers before pushing them out to 64-bit KVM.
1094 aarch64_sync_32_to_64(env
);
1097 for (i
= 0; i
< 31; i
++) {
1098 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
1099 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
1100 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1106 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1107 * QEMU side we keep the current SP in xregs[31] as well.
1109 aarch64_save_sp(env
, 1);
1111 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
1112 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
1113 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1118 reg
.id
= AARCH64_CORE_REG(sp_el1
);
1119 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
1120 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1125 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
1127 val
= pstate_read(env
);
1129 val
= cpsr_read(env
);
1131 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
1132 reg
.addr
= (uintptr_t) &val
;
1133 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1138 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
1139 reg
.addr
= (uintptr_t) &env
->pc
;
1140 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1145 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1146 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1147 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1152 /* Saved Program State Registers
1154 * Before we restore from the banked_spsr[] array we need to
1155 * ensure that any modifications to env->spsr are correctly
1156 * reflected in the banks.
1158 el
= arm_current_el(env
);
1159 if (el
> 0 && !is_a64(env
)) {
1160 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1161 env
->banked_spsr
[i
] = env
->spsr
;
1164 /* KVM 0-4 map to QEMU banks 1-5 */
1165 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1166 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1167 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1168 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1174 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1175 ret
= kvm_arch_put_sve(cs
);
1177 ret
= kvm_arch_put_fpsimd(cs
);
1183 reg
.addr
= (uintptr_t)(&fpr
);
1184 fpr
= vfp_get_fpsr(env
);
1185 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1186 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1191 reg
.addr
= (uintptr_t)(&fpr
);
1192 fpr
= vfp_get_fpcr(env
);
1193 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1194 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1199 write_cpustate_to_list(cpu
, true);
1201 if (!write_list_to_kvmstate(cpu
, level
)) {
1206 * Setting VCPU events should be triggered after syncing the registers
1207 * to avoid overwriting potential changes made by KVM upon calling
1208 * KVM_SET_VCPU_EVENTS ioctl
1210 ret
= kvm_put_vcpu_events(cpu
);
1215 kvm_arm_sync_mpstate_to_kvm(cpu
);
1220 static int kvm_arch_get_fpsimd(CPUState
*cs
)
1222 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
1223 struct kvm_one_reg reg
;
1226 for (i
= 0; i
< 32; i
++) {
1227 uint64_t *q
= aa64_vfp_qreg(env
, i
);
1228 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
1229 reg
.addr
= (uintptr_t)q
;
1230 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1236 t
= q
[0], q
[0] = q
[1], q
[1] = t
;
1245 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1246 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1247 * code the slice index to zero for now as it's unlikely we'll need more than
1248 * one slice for quite some time.
1250 static int kvm_arch_get_sve(CPUState
*cs
)
1252 ARMCPU
*cpu
= ARM_CPU(cs
);
1253 CPUARMState
*env
= &cpu
->env
;
1254 struct kvm_one_reg reg
;
1258 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
1259 r
= &env
->vfp
.zregs
[n
].d
[0];
1260 reg
.addr
= (uintptr_t)r
;
1261 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
1262 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1266 sve_bswap64(r
, r
, cpu
->sve_max_vq
* 2);
1269 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
1270 r
= &env
->vfp
.pregs
[n
].p
[0];
1271 reg
.addr
= (uintptr_t)r
;
1272 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
1273 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1277 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1280 r
= &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0];
1281 reg
.addr
= (uintptr_t)r
;
1282 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
1283 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1287 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1292 int kvm_arch_get_registers(CPUState
*cs
)
1294 struct kvm_one_reg reg
;
1300 ARMCPU
*cpu
= ARM_CPU(cs
);
1301 CPUARMState
*env
= &cpu
->env
;
1303 for (i
= 0; i
< 31; i
++) {
1304 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
1305 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
1306 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1312 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
1313 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
1314 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1319 reg
.id
= AARCH64_CORE_REG(sp_el1
);
1320 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
1321 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1326 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
1327 reg
.addr
= (uintptr_t) &val
;
1328 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1333 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
1335 pstate_write(env
, val
);
1337 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
1340 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1341 * QEMU side we keep the current SP in xregs[31] as well.
1343 aarch64_restore_sp(env
, 1);
1345 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
1346 reg
.addr
= (uintptr_t) &env
->pc
;
1347 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1352 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1353 * incoming AArch64 regs received from 64-bit KVM.
1354 * We must perform this after all of the registers have been acquired from
1358 aarch64_sync_64_to_32(env
);
1361 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1362 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1363 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1368 /* Fetch the SPSR registers
1370 * KVM SPSRs 0-4 map to QEMU banks 1-5
1372 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1373 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1374 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1375 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1381 el
= arm_current_el(env
);
1382 if (el
> 0 && !is_a64(env
)) {
1383 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1384 env
->spsr
= env
->banked_spsr
[i
];
1387 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1388 ret
= kvm_arch_get_sve(cs
);
1390 ret
= kvm_arch_get_fpsimd(cs
);
1396 reg
.addr
= (uintptr_t)(&fpr
);
1397 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1398 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1402 vfp_set_fpsr(env
, fpr
);
1404 reg
.addr
= (uintptr_t)(&fpr
);
1405 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1406 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1410 vfp_set_fpcr(env
, fpr
);
1412 ret
= kvm_get_vcpu_events(cpu
);
1417 if (!write_kvmstate_to_list(cpu
)) {
1420 /* Note that it's OK to have registers which aren't in CPUState,
1421 * so we can ignore a failure return here.
1423 write_list_to_cpustate(cpu
);
1425 kvm_arm_sync_mpstate_to_qemu(cpu
);
1427 /* TODO: other registers */
1431 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
1433 ram_addr_t ram_addr
;
1436 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
1438 if (acpi_ghes_present() && addr
) {
1439 ram_addr
= qemu_ram_addr_from_host(addr
);
1440 if (ram_addr
!= RAM_ADDR_INVALID
&&
1441 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
1442 kvm_hwpoison_page_add(ram_addr
);
1444 * If this is a BUS_MCEERR_AR, we know we have been called
1445 * synchronously from the vCPU thread, so we can easily
1446 * synchronize the state and inject an error.
1448 * TODO: we currently don't tell the guest at all about
1449 * BUS_MCEERR_AO. In that case we might either be being
1450 * called synchronously from the vCPU thread, or a bit
1451 * later from the main thread, so doing the injection of
1452 * the error would be more complicated.
1454 if (code
== BUS_MCEERR_AR
) {
1455 kvm_cpu_synchronize_state(c
);
1456 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA
, paddr
)) {
1457 kvm_inject_arm_sea(c
);
1459 error_report("failed to record the error");
1465 if (code
== BUS_MCEERR_AO
) {
1466 error_report("Hardware memory error at addr %p for memory used by "
1467 "QEMU itself instead of guest system!", addr
);
1471 if (code
== BUS_MCEERR_AR
) {
1472 error_report("Hardware memory error!");
1477 /* C6.6.29 BRK instruction */
1478 static const uint32_t brk_insn
= 0xd4200000;
1480 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1482 if (have_guest_debug
) {
1483 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
1484 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
1489 error_report("guest debug not supported on this kernel");
1494 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1496 static uint32_t brk
;
1498 if (have_guest_debug
) {
1499 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
1501 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
1506 error_report("guest debug not supported on this kernel");
1511 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1513 * To minimise translating between kernel and user-space the kernel
1514 * ABI just provides user-space with the full exception syndrome
1515 * register value to be decoded in QEMU.
1518 bool kvm_arm_handle_debug(CPUState
*cs
, struct kvm_debug_exit_arch
*debug_exit
)
1520 int hsr_ec
= syn_get_ec(debug_exit
->hsr
);
1521 ARMCPU
*cpu
= ARM_CPU(cs
);
1522 CPUARMState
*env
= &cpu
->env
;
1524 /* Ensure PC is synchronised */
1525 kvm_cpu_synchronize_state(cs
);
1528 case EC_SOFTWARESTEP
:
1529 if (cs
->singlestep_enabled
) {
1533 * The kernel should have suppressed the guest's ability to
1534 * single step at this point so something has gone wrong.
1536 error_report("%s: guest single-step while debugging unsupported"
1537 " (%"PRIx64
", %"PRIx32
")",
1538 __func__
, env
->pc
, debug_exit
->hsr
);
1543 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
1548 if (find_hw_breakpoint(cs
, env
->pc
)) {
1554 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
1556 cs
->watchpoint_hit
= wp
;
1562 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")",
1563 __func__
, debug_exit
->hsr
, env
->pc
);
1566 /* If we are not handling the debug exception it must belong to
1567 * the guest. Let's re-use the existing TCG interrupt code to set
1568 * everything up properly.
1570 cs
->exception_index
= EXCP_BKPT
;
1571 env
->exception
.syndrome
= debug_exit
->hsr
;
1572 env
->exception
.vaddress
= debug_exit
->far
;
1573 env
->exception
.target_el
= 1;
1574 qemu_mutex_lock_iothread();
1575 arm_cpu_do_interrupt(cs
);
1576 qemu_mutex_unlock_iothread();
1581 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1582 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1587 * AARCH64: DFSC, bits [5:0]
1591 * FS[3:0] - DFSR[3:0]
1595 #define ESR_DFSC(aarch64, lpae, v) \
1596 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1597 : (((v) >> 6) | ((v) & 0x1F)))
1599 #define ESR_DFSC_EXTABT(aarch64, lpae) \
1600 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1602 bool kvm_arm_verify_ext_dabt_pending(CPUState
*cs
)
1606 if (!kvm_get_one_reg(cs
, ARM64_REG_ESR_EL1
, &dfsr_val
)) {
1607 ARMCPU
*cpu
= ARM_CPU(cs
);
1608 CPUARMState
*env
= &cpu
->env
;
1609 int aarch64_mode
= arm_feature(env
, ARM_FEATURE_AARCH64
);
1612 if (!aarch64_mode
) {
1615 if (!kvm_get_one_reg(cs
, ARM64_REG_TCR_EL1
, &ttbcr
)) {
1616 lpae
= arm_feature(env
, ARM_FEATURE_LPAE
)
1617 && (ttbcr
& TTBCR_EAE
);
1621 * The verification here is based on the DFSC bits
1622 * of the ESR_EL1 reg only
1624 return (ESR_DFSC(aarch64_mode
, lpae
, dfsr_val
) ==
1625 ESR_DFSC_EXTABT(aarch64_mode
, lpae
));