2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qemu-common.h"
21 #include "qemu/timer.h"
22 #include "qemu/error-report.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/kvm.h"
27 #include "sysemu/kvm_int.h"
29 #include "hw/boards.h"
30 #include "internals.h"
32 static bool have_guest_debug
;
35 * Although the ARM implementation of hardware assisted debugging
36 * allows for different breakpoints per-core, the current GDB
37 * interface treats them as a global pool of registers (which seems to
38 * be the case for x86, ppc and s390). As a result we store one copy
39 * of registers which is used for all active cores.
41 * Write access is serialised by virtue of the GDB protocol which
42 * updates things. Read access (i.e. when the values are copied to the
43 * vCPU) is also gated by GDB's run control.
45 * This is not unreasonable as most of the time debugging kernels you
46 * never know which core will eventually execute your function.
54 /* The watchpoint registers can cover more area than the requested
55 * watchpoint so we need to store the additional information
56 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
57 * when the watchpoint is hit.
62 CPUWatchpoint details
;
65 /* Maximum and current break/watch point counts */
66 int max_hw_bps
, max_hw_wps
;
67 GArray
*hw_breakpoints
, *hw_watchpoints
;
69 #define cur_hw_wps (hw_watchpoints->len)
70 #define cur_hw_bps (hw_breakpoints->len)
71 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
75 * kvm_arm_init_debug() - check for guest debug capabilities
78 * kvm_check_extension returns the number of debug registers we have
79 * or 0 if we have none.
82 static void kvm_arm_init_debug(CPUState
*cs
)
84 have_guest_debug
= kvm_check_extension(cs
->kvm_state
,
85 KVM_CAP_SET_GUEST_DEBUG
);
87 max_hw_wps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
88 hw_watchpoints
= g_array_sized_new(true, true,
89 sizeof(HWWatchpoint
), max_hw_wps
);
91 max_hw_bps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
92 hw_breakpoints
= g_array_sized_new(true, true,
93 sizeof(HWBreakpoint
), max_hw_bps
);
98 * insert_hw_breakpoint()
99 * @addr: address of breakpoint
101 * See ARM ARM D2.9.1 for details but here we are only going to create
102 * simple un-linked breakpoints (i.e. we don't chain breakpoints
103 * together to match address and context or vmid). The hardware is
104 * capable of fancier matching but that will require exposing that
105 * fanciness to GDB's interface
107 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
109 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
110 * +------+------+-------+-----+----+------+-----+------+-----+---+
111 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
112 * +------+------+-------+-----+----+------+-----+------+-----+---+
114 * BT: Breakpoint type (0 = unlinked address match)
115 * LBN: Linked BP number (0 = unused)
116 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
117 * BAS: Byte Address Select (RES1 for AArch64)
120 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
122 * 63 53 52 49 48 2 1 0
123 * +------+-----------+----------+-----+
124 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
125 * +------+-----------+----------+-----+
127 * Depending on the addressing mode bits the top bits of the register
128 * are a sign extension of the highest applicable VA bit. Some
129 * versions of GDB don't do it correctly so we ensure they are correct
130 * here so future PC comparisons will work properly.
133 static int insert_hw_breakpoint(target_ulong addr
)
136 .bcr
= 0x1, /* BCR E=1, enable */
137 .bvr
= sextract64(addr
, 0, 53)
140 if (cur_hw_bps
>= max_hw_bps
) {
144 brk
.bcr
= deposit32(brk
.bcr
, 1, 2, 0x3); /* PMC = 11 */
145 brk
.bcr
= deposit32(brk
.bcr
, 5, 4, 0xf); /* BAS = RES1 */
147 g_array_append_val(hw_breakpoints
, brk
);
153 * delete_hw_breakpoint()
154 * @pc: address of breakpoint
156 * Delete a breakpoint and shuffle any above down
159 static int delete_hw_breakpoint(target_ulong pc
)
162 for (i
= 0; i
< hw_breakpoints
->len
; i
++) {
163 HWBreakpoint
*brk
= get_hw_bp(i
);
164 if (brk
->bvr
== pc
) {
165 g_array_remove_index(hw_breakpoints
, i
);
173 * insert_hw_watchpoint()
174 * @addr: address of watch point
176 * @type: type of watch point
178 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
179 * stuff if we want to. The watch points can be linked with the break
180 * points above to make them context aware. However for simplicity
181 * currently we only deal with simple read/write watch points.
183 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
185 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
186 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
187 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
188 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
190 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
191 * WT: 0 - unlinked, 1 - linked (not currently used)
192 * LBN: Linked BP number (not currently used)
193 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
194 * BAS: Byte Address Select
195 * LSC: Load/Store control (01: load, 10: store, 11: both)
198 * The bottom 2 bits of the value register are masked. Therefore to
199 * break on any sizes smaller than an unaligned word you need to set
200 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
201 * need to ensure you mask the address as required and set BAS=0xff
204 static int insert_hw_watchpoint(target_ulong addr
,
205 target_ulong len
, int type
)
208 .wcr
= 1, /* E=1, enable */
209 .wvr
= addr
& (~0x7ULL
),
210 .details
= { .vaddr
= addr
, .len
= len
}
213 if (cur_hw_wps
>= max_hw_wps
) {
218 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
219 * valid whether EL3 is implemented or not
221 wp
.wcr
= deposit32(wp
.wcr
, 1, 2, 3);
224 case GDB_WATCHPOINT_READ
:
225 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 1);
226 wp
.details
.flags
= BP_MEM_READ
;
228 case GDB_WATCHPOINT_WRITE
:
229 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 2);
230 wp
.details
.flags
= BP_MEM_WRITE
;
232 case GDB_WATCHPOINT_ACCESS
:
233 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 3);
234 wp
.details
.flags
= BP_MEM_ACCESS
;
237 g_assert_not_reached();
241 /* we align the address and set the bits in BAS */
242 int off
= addr
& 0x7;
243 int bas
= (1 << len
) - 1;
245 wp
.wcr
= deposit32(wp
.wcr
, 5 + off
, 8 - off
, bas
);
247 /* For ranges above 8 bytes we need to be a power of 2 */
248 if (is_power_of_2(len
)) {
249 int bits
= ctz64(len
);
251 wp
.wvr
&= ~((1 << bits
) - 1);
252 wp
.wcr
= deposit32(wp
.wcr
, 24, 4, bits
);
253 wp
.wcr
= deposit32(wp
.wcr
, 5, 8, 0xff);
259 g_array_append_val(hw_watchpoints
, wp
);
264 static bool check_watchpoint_in_range(int i
, target_ulong addr
)
266 HWWatchpoint
*wp
= get_hw_wp(i
);
267 uint64_t addr_top
, addr_bottom
= wp
->wvr
;
268 int bas
= extract32(wp
->wcr
, 5, 8);
269 int mask
= extract32(wp
->wcr
, 24, 4);
272 addr_top
= addr_bottom
+ (1 << mask
);
274 /* BAS must be contiguous but can offset against the base
275 * address in DBGWVR */
276 addr_bottom
= addr_bottom
+ ctz32(bas
);
277 addr_top
= addr_bottom
+ clo32(bas
);
280 if (addr
>= addr_bottom
&& addr
<= addr_top
) {
288 * delete_hw_watchpoint()
289 * @addr: address of breakpoint
291 * Delete a breakpoint and shuffle any above down
294 static int delete_hw_watchpoint(target_ulong addr
,
295 target_ulong len
, int type
)
298 for (i
= 0; i
< cur_hw_wps
; i
++) {
299 if (check_watchpoint_in_range(i
, addr
)) {
300 g_array_remove_index(hw_watchpoints
, i
);
308 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
309 target_ulong len
, int type
)
312 case GDB_BREAKPOINT_HW
:
313 return insert_hw_breakpoint(addr
);
315 case GDB_WATCHPOINT_READ
:
316 case GDB_WATCHPOINT_WRITE
:
317 case GDB_WATCHPOINT_ACCESS
:
318 return insert_hw_watchpoint(addr
, len
, type
);
324 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
325 target_ulong len
, int type
)
328 case GDB_BREAKPOINT_HW
:
329 return delete_hw_breakpoint(addr
);
331 case GDB_WATCHPOINT_READ
:
332 case GDB_WATCHPOINT_WRITE
:
333 case GDB_WATCHPOINT_ACCESS
:
334 return delete_hw_watchpoint(addr
, len
, type
);
341 void kvm_arch_remove_all_hw_breakpoints(void)
343 if (cur_hw_wps
> 0) {
344 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
346 if (cur_hw_bps
> 0) {
347 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
351 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
354 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
356 for (i
= 0; i
< max_hw_wps
; i
++) {
357 HWWatchpoint
*wp
= get_hw_wp(i
);
358 ptr
->dbg_wcr
[i
] = wp
->wcr
;
359 ptr
->dbg_wvr
[i
] = wp
->wvr
;
361 for (i
= 0; i
< max_hw_bps
; i
++) {
362 HWBreakpoint
*bp
= get_hw_bp(i
);
363 ptr
->dbg_bcr
[i
] = bp
->bcr
;
364 ptr
->dbg_bvr
[i
] = bp
->bvr
;
368 bool kvm_arm_hw_debug_active(CPUState
*cs
)
370 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
373 static bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
)
377 for (i
= 0; i
< cur_hw_bps
; i
++) {
378 HWBreakpoint
*bp
= get_hw_bp(i
);
386 static CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
)
390 for (i
= 0; i
< cur_hw_wps
; i
++) {
391 if (check_watchpoint_in_range(i
, addr
)) {
392 return &get_hw_wp(i
)->details
;
398 static bool kvm_arm_pmu_set_attr(CPUState
*cs
, struct kvm_device_attr
*attr
)
402 err
= kvm_vcpu_ioctl(cs
, KVM_HAS_DEVICE_ATTR
, attr
);
404 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err
));
408 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, attr
);
410 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err
));
417 void kvm_arm_pmu_init(CPUState
*cs
)
419 struct kvm_device_attr attr
= {
420 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
421 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
424 if (!ARM_CPU(cs
)->has_pmu
) {
427 if (!kvm_arm_pmu_set_attr(cs
, &attr
)) {
428 error_report("failed to init PMU");
433 void kvm_arm_pmu_set_irq(CPUState
*cs
, int irq
)
435 struct kvm_device_attr attr
= {
436 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
437 .addr
= (intptr_t)&irq
,
438 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
441 if (!ARM_CPU(cs
)->has_pmu
) {
444 if (!kvm_arm_pmu_set_attr(cs
, &attr
)) {
445 error_report("failed to set irq for PMU");
450 static inline void set_feature(uint64_t *features
, int feature
)
452 *features
|= 1ULL << feature
;
455 static inline void unset_feature(uint64_t *features
, int feature
)
457 *features
&= ~(1ULL << feature
);
460 static int read_sys_reg32(int fd
, uint32_t *pret
, uint64_t id
)
463 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)&ret
};
466 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
467 err
= ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
475 static int read_sys_reg64(int fd
, uint64_t *pret
, uint64_t id
)
477 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)pret
};
479 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
480 return ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
483 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
485 /* Identify the feature bits corresponding to the host CPU, and
486 * fill out the ARMHostCPUClass fields accordingly. To do this
487 * we have to create a scratch VM, create a single CPU inside it,
488 * and then query that CPU for the relevant ID registers.
491 uint64_t features
= 0;
494 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
495 * we know these will only support creating one kind of guest CPU,
496 * which is its preferred CPU type. Fortunately these old kernels
497 * support only a very limited number of CPUs.
499 static const uint32_t cpus_to_try
[] = {
500 KVM_ARM_TARGET_AEM_V8
,
501 KVM_ARM_TARGET_FOUNDATION_V8
,
502 KVM_ARM_TARGET_CORTEX_A57
,
503 QEMU_KVM_ARM_TARGET_NONE
506 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
507 * to use the preferred target
509 struct kvm_vcpu_init init
= { .target
= -1, };
511 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
515 ahcf
->target
= init
.target
;
516 ahcf
->dtb_compatible
= "arm,arm-v8";
518 err
= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr0
,
519 ARM64_SYS_REG(3, 0, 0, 4, 0));
520 if (unlikely(err
< 0)) {
522 * Before v4.15, the kernel only exposed a limited number of system
523 * registers, not including any of the interesting AArch64 ID regs.
524 * For the most part we could leave these fields as zero with minimal
525 * effect, since this does not affect the values seen by the guest.
527 * However, it could cause problems down the line for QEMU,
528 * so provide a minimal v8.0 default.
530 * ??? Could read MIDR and use knowledge from cpu64.c.
531 * ??? Could map a page of memory into our temp guest and
532 * run the tiniest of hand-crafted kernels to extract
533 * the values seen by the guest.
534 * ??? Either of these sounds like too much effort just
535 * to work around running a modern host kernel.
537 ahcf
->isar
.id_aa64pfr0
= 0x00000011; /* EL1&0, AArch64 only */
540 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr1
,
541 ARM64_SYS_REG(3, 0, 0, 4, 1));
542 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar0
,
543 ARM64_SYS_REG(3, 0, 0, 6, 0));
544 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar1
,
545 ARM64_SYS_REG(3, 0, 0, 6, 1));
546 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr0
,
547 ARM64_SYS_REG(3, 0, 0, 7, 0));
548 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr1
,
549 ARM64_SYS_REG(3, 0, 0, 7, 1));
552 * Note that if AArch32 support is not present in the host,
553 * the AArch32 sysregs are present to be read, but will
554 * return UNKNOWN values. This is neither better nor worse
555 * than skipping the reads and leaving 0, as we must avoid
556 * considering the values in every case.
558 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar0
,
559 ARM64_SYS_REG(3, 0, 0, 2, 0));
560 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar1
,
561 ARM64_SYS_REG(3, 0, 0, 2, 1));
562 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar2
,
563 ARM64_SYS_REG(3, 0, 0, 2, 2));
564 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar3
,
565 ARM64_SYS_REG(3, 0, 0, 2, 3));
566 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar4
,
567 ARM64_SYS_REG(3, 0, 0, 2, 4));
568 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar5
,
569 ARM64_SYS_REG(3, 0, 0, 2, 5));
570 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar6
,
571 ARM64_SYS_REG(3, 0, 0, 2, 7));
573 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr0
,
574 ARM64_SYS_REG(3, 0, 0, 3, 0));
575 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr1
,
576 ARM64_SYS_REG(3, 0, 0, 3, 1));
577 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr2
,
578 ARM64_SYS_REG(3, 0, 0, 3, 2));
581 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
587 /* We can assume any KVM supporting CPU is at least a v8
588 * with VFPv4+Neon; this in turn implies most of the other
591 set_feature(&features
, ARM_FEATURE_V8
);
592 set_feature(&features
, ARM_FEATURE_VFP4
);
593 set_feature(&features
, ARM_FEATURE_NEON
);
594 set_feature(&features
, ARM_FEATURE_AARCH64
);
595 set_feature(&features
, ARM_FEATURE_PMU
);
597 ahcf
->features
= features
;
602 bool kvm_arm_aarch32_supported(CPUState
*cpu
)
604 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
606 return kvm_check_extension(s
, KVM_CAP_ARM_EL1_32BIT
);
609 bool kvm_arm_sve_supported(CPUState
*cpu
)
611 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
613 return kvm_check_extension(s
, KVM_CAP_ARM_SVE
);
616 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN
!= 1);
618 void kvm_arm_sve_get_vls(CPUState
*cs
, unsigned long *map
)
620 /* Only call this function if kvm_arm_sve_supported() returns true. */
621 static uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
];
626 bitmap_clear(map
, 0, ARM_MAX_VQ
);
629 * KVM ensures all host CPUs support the same set of vector lengths.
630 * So we only need to create the scratch VCPUs once and then cache
634 struct kvm_vcpu_init init
= {
636 .features
[0] = (1 << KVM_ARM_VCPU_SVE
),
638 struct kvm_one_reg reg
= {
639 .id
= KVM_REG_ARM64_SVE_VLS
,
640 .addr
= (uint64_t)&vls
[0],
646 if (!kvm_arm_create_scratch_host_vcpu(NULL
, fdarray
, &init
)) {
647 error_report("failed to create scratch VCPU with SVE enabled");
650 ret
= ioctl(fdarray
[2], KVM_GET_ONE_REG
, ®
);
651 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
653 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
658 for (i
= KVM_ARM64_SVE_VLS_WORDS
- 1; i
>= 0; --i
) {
660 vq
= 64 - clz64(vls
[i
]) + i
* 64;
664 if (vq
> ARM_MAX_VQ
) {
665 warn_report("KVM supports vector lengths larger than "
670 for (i
= 0; i
< KVM_ARM64_SVE_VLS_WORDS
; ++i
) {
674 for (j
= 1; j
<= 64; ++j
) {
676 if (vq
> ARM_MAX_VQ
) {
679 if (vls
[i
] & (1UL << (j
- 1))) {
680 set_bit(vq
- 1, map
);
686 static int kvm_arm_sve_set_vls(CPUState
*cs
)
688 uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
] = {0};
689 struct kvm_one_reg reg
= {
690 .id
= KVM_REG_ARM64_SVE_VLS
,
691 .addr
= (uint64_t)&vls
[0],
693 ARMCPU
*cpu
= ARM_CPU(cs
);
697 assert(cpu
->sve_max_vq
<= KVM_ARM64_SVE_VQ_MAX
);
699 for (vq
= 1; vq
<= cpu
->sve_max_vq
; ++vq
) {
700 if (test_bit(vq
- 1, cpu
->sve_vq_map
)) {
707 return kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
710 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
712 int kvm_arch_init_vcpu(CPUState
*cs
)
716 ARMCPU
*cpu
= ARM_CPU(cs
);
717 CPUARMState
*env
= &cpu
->env
;
719 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
720 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
721 error_report("KVM is not supported for this guest CPU type");
725 /* Determine init features for this CPU */
726 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
727 if (cpu
->start_powered_off
) {
728 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
730 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
731 cpu
->psci_version
= 2;
732 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
734 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
735 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
737 if (!kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
738 cpu
->has_pmu
= false;
741 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
743 unset_feature(&env
->features
, ARM_FEATURE_PMU
);
745 if (cpu_isar_feature(aa64_sve
, cpu
)) {
746 assert(kvm_arm_sve_supported(cs
));
747 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
750 /* Do KVM_ARM_VCPU_INIT ioctl */
751 ret
= kvm_arm_vcpu_init(cs
);
756 if (cpu_isar_feature(aa64_sve
, cpu
)) {
757 ret
= kvm_arm_sve_set_vls(cs
);
761 ret
= kvm_arm_vcpu_finalize(cs
, KVM_ARM_VCPU_SVE
);
768 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
769 * Currently KVM has its own idea about MPIDR assignment, so we
770 * override our defaults with what we get from KVM.
772 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
776 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
778 kvm_arm_init_debug(cs
);
780 /* Check whether user space can specify guest syndrome value */
781 kvm_arm_init_serror_injection(cs
);
783 return kvm_arm_init_cpreg_list(cpu
);
786 int kvm_arch_destroy_vcpu(CPUState
*cs
)
791 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
793 /* Return true if the regidx is a register we should synchronize
794 * via the cpreg_tuples array (ie is not a core or sve reg that
795 * we sync by hand in kvm_arch_get/put_registers())
797 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
798 case KVM_REG_ARM_CORE
:
799 case KVM_REG_ARM64_SVE
:
806 typedef struct CPRegStateLevel
{
811 /* All system registers not listed in the following table are assumed to be
812 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
813 * often, you must add it to this table with a state of either
814 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
816 static const CPRegStateLevel non_runtime_cpregs
[] = {
817 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
820 int kvm_arm_cpreg_level(uint64_t regidx
)
824 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
825 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
826 if (l
->regidx
== regidx
) {
831 return KVM_PUT_RUNTIME_STATE
;
834 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
835 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
837 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
838 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
840 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
841 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
843 static int kvm_arch_put_fpsimd(CPUState
*cs
)
845 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
846 struct kvm_one_reg reg
;
849 for (i
= 0; i
< 32; i
++) {
850 uint64_t *q
= aa64_vfp_qreg(env
, i
);
851 #ifdef HOST_WORDS_BIGENDIAN
852 uint64_t fp_val
[2] = { q
[1], q
[0] };
853 reg
.addr
= (uintptr_t)fp_val
;
855 reg
.addr
= (uintptr_t)q
;
857 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
858 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
868 * SVE registers are encoded in KVM's memory in an endianness-invariant format.
869 * The byte at offset i from the start of the in-memory representation contains
870 * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
871 * lowest offsets are stored in the lowest memory addresses, then that nearly
872 * matches QEMU's representation, which is to use an array of host-endian
873 * uint64_t's, where the lower offsets are at the lower indices. To complete
874 * the translation we just need to byte swap the uint64_t's on big-endian hosts.
876 static uint64_t *sve_bswap64(uint64_t *dst
, uint64_t *src
, int nr
)
878 #ifdef HOST_WORDS_BIGENDIAN
881 for (i
= 0; i
< nr
; ++i
) {
882 dst
[i
] = bswap64(src
[i
]);
892 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
893 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
894 * code the slice index to zero for now as it's unlikely we'll need more than
895 * one slice for quite some time.
897 static int kvm_arch_put_sve(CPUState
*cs
)
899 ARMCPU
*cpu
= ARM_CPU(cs
);
900 CPUARMState
*env
= &cpu
->env
;
901 uint64_t tmp
[ARM_MAX_VQ
* 2];
903 struct kvm_one_reg reg
;
906 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
907 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[n
].d
[0], cpu
->sve_max_vq
* 2);
908 reg
.addr
= (uintptr_t)r
;
909 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
910 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
916 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
917 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[n
].p
[0],
918 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
919 reg
.addr
= (uintptr_t)r
;
920 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
921 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
927 r
= sve_bswap64(tmp
, &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0],
928 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
929 reg
.addr
= (uintptr_t)r
;
930 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
931 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
939 int kvm_arch_put_registers(CPUState
*cs
, int level
)
941 struct kvm_one_reg reg
;
947 ARMCPU
*cpu
= ARM_CPU(cs
);
948 CPUARMState
*env
= &cpu
->env
;
950 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
951 * AArch64 registers before pushing them out to 64-bit KVM.
954 aarch64_sync_32_to_64(env
);
957 for (i
= 0; i
< 31; i
++) {
958 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
959 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
960 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
966 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
967 * QEMU side we keep the current SP in xregs[31] as well.
969 aarch64_save_sp(env
, 1);
971 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
972 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
973 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
978 reg
.id
= AARCH64_CORE_REG(sp_el1
);
979 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
980 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
985 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
987 val
= pstate_read(env
);
989 val
= cpsr_read(env
);
991 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
992 reg
.addr
= (uintptr_t) &val
;
993 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
998 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
999 reg
.addr
= (uintptr_t) &env
->pc
;
1000 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1005 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1006 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1007 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1012 /* Saved Program State Registers
1014 * Before we restore from the banked_spsr[] array we need to
1015 * ensure that any modifications to env->spsr are correctly
1016 * reflected in the banks.
1018 el
= arm_current_el(env
);
1019 if (el
> 0 && !is_a64(env
)) {
1020 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1021 env
->banked_spsr
[i
] = env
->spsr
;
1024 /* KVM 0-4 map to QEMU banks 1-5 */
1025 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1026 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1027 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1028 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1034 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1035 ret
= kvm_arch_put_sve(cs
);
1037 ret
= kvm_arch_put_fpsimd(cs
);
1043 reg
.addr
= (uintptr_t)(&fpr
);
1044 fpr
= vfp_get_fpsr(env
);
1045 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1046 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1051 reg
.addr
= (uintptr_t)(&fpr
);
1052 fpr
= vfp_get_fpcr(env
);
1053 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1054 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
1059 ret
= kvm_put_vcpu_events(cpu
);
1064 write_cpustate_to_list(cpu
, true);
1066 if (!write_list_to_kvmstate(cpu
, level
)) {
1070 kvm_arm_sync_mpstate_to_kvm(cpu
);
1075 static int kvm_arch_get_fpsimd(CPUState
*cs
)
1077 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
1078 struct kvm_one_reg reg
;
1081 for (i
= 0; i
< 32; i
++) {
1082 uint64_t *q
= aa64_vfp_qreg(env
, i
);
1083 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
1084 reg
.addr
= (uintptr_t)q
;
1085 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1089 #ifdef HOST_WORDS_BIGENDIAN
1091 t
= q
[0], q
[0] = q
[1], q
[1] = t
;
1100 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1101 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1102 * code the slice index to zero for now as it's unlikely we'll need more than
1103 * one slice for quite some time.
1105 static int kvm_arch_get_sve(CPUState
*cs
)
1107 ARMCPU
*cpu
= ARM_CPU(cs
);
1108 CPUARMState
*env
= &cpu
->env
;
1109 struct kvm_one_reg reg
;
1113 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
1114 r
= &env
->vfp
.zregs
[n
].d
[0];
1115 reg
.addr
= (uintptr_t)r
;
1116 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
1117 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1121 sve_bswap64(r
, r
, cpu
->sve_max_vq
* 2);
1124 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
1125 r
= &env
->vfp
.pregs
[n
].p
[0];
1126 reg
.addr
= (uintptr_t)r
;
1127 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
1128 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1132 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1135 r
= &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0];
1136 reg
.addr
= (uintptr_t)r
;
1137 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
1138 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1142 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1147 int kvm_arch_get_registers(CPUState
*cs
)
1149 struct kvm_one_reg reg
;
1155 ARMCPU
*cpu
= ARM_CPU(cs
);
1156 CPUARMState
*env
= &cpu
->env
;
1158 for (i
= 0; i
< 31; i
++) {
1159 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
1160 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
1161 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1167 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
1168 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
1169 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1174 reg
.id
= AARCH64_CORE_REG(sp_el1
);
1175 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
1176 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1181 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
1182 reg
.addr
= (uintptr_t) &val
;
1183 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1188 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
1190 pstate_write(env
, val
);
1192 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
1195 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1196 * QEMU side we keep the current SP in xregs[31] as well.
1198 aarch64_restore_sp(env
, 1);
1200 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
1201 reg
.addr
= (uintptr_t) &env
->pc
;
1202 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1207 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1208 * incoming AArch64 regs received from 64-bit KVM.
1209 * We must perform this after all of the registers have been acquired from
1213 aarch64_sync_64_to_32(env
);
1216 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1217 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1218 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1223 /* Fetch the SPSR registers
1225 * KVM SPSRs 0-4 map to QEMU banks 1-5
1227 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1228 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1229 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1230 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1236 el
= arm_current_el(env
);
1237 if (el
> 0 && !is_a64(env
)) {
1238 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1239 env
->spsr
= env
->banked_spsr
[i
];
1242 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1243 ret
= kvm_arch_get_sve(cs
);
1245 ret
= kvm_arch_get_fpsimd(cs
);
1251 reg
.addr
= (uintptr_t)(&fpr
);
1252 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1253 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1257 vfp_set_fpsr(env
, fpr
);
1259 reg
.addr
= (uintptr_t)(&fpr
);
1260 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1261 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1265 vfp_set_fpcr(env
, fpr
);
1267 ret
= kvm_get_vcpu_events(cpu
);
1272 if (!write_kvmstate_to_list(cpu
)) {
1275 /* Note that it's OK to have registers which aren't in CPUState,
1276 * so we can ignore a failure return here.
1278 write_list_to_cpustate(cpu
);
1280 kvm_arm_sync_mpstate_to_qemu(cpu
);
1282 /* TODO: other registers */
1286 /* C6.6.29 BRK instruction */
1287 static const uint32_t brk_insn
= 0xd4200000;
1289 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1291 if (have_guest_debug
) {
1292 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
1293 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
1298 error_report("guest debug not supported on this kernel");
1303 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1305 static uint32_t brk
;
1307 if (have_guest_debug
) {
1308 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
1310 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
1315 error_report("guest debug not supported on this kernel");
1320 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1322 * To minimise translating between kernel and user-space the kernel
1323 * ABI just provides user-space with the full exception syndrome
1324 * register value to be decoded in QEMU.
1327 bool kvm_arm_handle_debug(CPUState
*cs
, struct kvm_debug_exit_arch
*debug_exit
)
1329 int hsr_ec
= syn_get_ec(debug_exit
->hsr
);
1330 ARMCPU
*cpu
= ARM_CPU(cs
);
1331 CPUClass
*cc
= CPU_GET_CLASS(cs
);
1332 CPUARMState
*env
= &cpu
->env
;
1334 /* Ensure PC is synchronised */
1335 kvm_cpu_synchronize_state(cs
);
1338 case EC_SOFTWARESTEP
:
1339 if (cs
->singlestep_enabled
) {
1343 * The kernel should have suppressed the guest's ability to
1344 * single step at this point so something has gone wrong.
1346 error_report("%s: guest single-step while debugging unsupported"
1347 " (%"PRIx64
", %"PRIx32
")",
1348 __func__
, env
->pc
, debug_exit
->hsr
);
1353 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
1358 if (find_hw_breakpoint(cs
, env
->pc
)) {
1364 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
1366 cs
->watchpoint_hit
= wp
;
1372 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")",
1373 __func__
, debug_exit
->hsr
, env
->pc
);
1376 /* If we are not handling the debug exception it must belong to
1377 * the guest. Let's re-use the existing TCG interrupt code to set
1378 * everything up properly.
1380 cs
->exception_index
= EXCP_BKPT
;
1381 env
->exception
.syndrome
= debug_exit
->hsr
;
1382 env
->exception
.vaddress
= debug_exit
->far
;
1383 env
->exception
.target_el
= 1;
1384 qemu_mutex_lock_iothread();
1385 cc
->do_interrupt(cs
);
1386 qemu_mutex_unlock_iothread();