2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qemu-common.h"
21 #include "qemu/timer.h"
22 #include "qemu/error-report.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/kvm.h"
27 #include "sysemu/kvm_int.h"
29 #include "hw/boards.h"
30 #include "internals.h"
32 static bool have_guest_debug
;
35 * Although the ARM implementation of hardware assisted debugging
36 * allows for different breakpoints per-core, the current GDB
37 * interface treats them as a global pool of registers (which seems to
38 * be the case for x86, ppc and s390). As a result we store one copy
39 * of registers which is used for all active cores.
41 * Write access is serialised by virtue of the GDB protocol which
42 * updates things. Read access (i.e. when the values are copied to the
43 * vCPU) is also gated by GDB's run control.
45 * This is not unreasonable as most of the time debugging kernels you
46 * never know which core will eventually execute your function.
54 /* The watchpoint registers can cover more area than the requested
55 * watchpoint so we need to store the additional information
56 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
57 * when the watchpoint is hit.
62 CPUWatchpoint details
;
65 /* Maximum and current break/watch point counts */
66 int max_hw_bps
, max_hw_wps
;
67 GArray
*hw_breakpoints
, *hw_watchpoints
;
69 #define cur_hw_wps (hw_watchpoints->len)
70 #define cur_hw_bps (hw_breakpoints->len)
71 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
75 * kvm_arm_init_debug() - check for guest debug capabilities
78 * kvm_check_extension returns the number of debug registers we have
79 * or 0 if we have none.
82 static void kvm_arm_init_debug(CPUState
*cs
)
84 have_guest_debug
= kvm_check_extension(cs
->kvm_state
,
85 KVM_CAP_SET_GUEST_DEBUG
);
87 max_hw_wps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
88 hw_watchpoints
= g_array_sized_new(true, true,
89 sizeof(HWWatchpoint
), max_hw_wps
);
91 max_hw_bps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
92 hw_breakpoints
= g_array_sized_new(true, true,
93 sizeof(HWBreakpoint
), max_hw_bps
);
98 * insert_hw_breakpoint()
99 * @addr: address of breakpoint
101 * See ARM ARM D2.9.1 for details but here we are only going to create
102 * simple un-linked breakpoints (i.e. we don't chain breakpoints
103 * together to match address and context or vmid). The hardware is
104 * capable of fancier matching but that will require exposing that
105 * fanciness to GDB's interface
107 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
109 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
110 * +------+------+-------+-----+----+------+-----+------+-----+---+
111 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
112 * +------+------+-------+-----+----+------+-----+------+-----+---+
114 * BT: Breakpoint type (0 = unlinked address match)
115 * LBN: Linked BP number (0 = unused)
116 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
117 * BAS: Byte Address Select (RES1 for AArch64)
120 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
122 * 63 53 52 49 48 2 1 0
123 * +------+-----------+----------+-----+
124 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
125 * +------+-----------+----------+-----+
127 * Depending on the addressing mode bits the top bits of the register
128 * are a sign extension of the highest applicable VA bit. Some
129 * versions of GDB don't do it correctly so we ensure they are correct
130 * here so future PC comparisons will work properly.
133 static int insert_hw_breakpoint(target_ulong addr
)
136 .bcr
= 0x1, /* BCR E=1, enable */
137 .bvr
= sextract64(addr
, 0, 53)
140 if (cur_hw_bps
>= max_hw_bps
) {
144 brk
.bcr
= deposit32(brk
.bcr
, 1, 2, 0x3); /* PMC = 11 */
145 brk
.bcr
= deposit32(brk
.bcr
, 5, 4, 0xf); /* BAS = RES1 */
147 g_array_append_val(hw_breakpoints
, brk
);
153 * delete_hw_breakpoint()
154 * @pc: address of breakpoint
156 * Delete a breakpoint and shuffle any above down
159 static int delete_hw_breakpoint(target_ulong pc
)
162 for (i
= 0; i
< hw_breakpoints
->len
; i
++) {
163 HWBreakpoint
*brk
= get_hw_bp(i
);
164 if (brk
->bvr
== pc
) {
165 g_array_remove_index(hw_breakpoints
, i
);
173 * insert_hw_watchpoint()
174 * @addr: address of watch point
176 * @type: type of watch point
178 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
179 * stuff if we want to. The watch points can be linked with the break
180 * points above to make them context aware. However for simplicity
181 * currently we only deal with simple read/write watch points.
183 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
185 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
186 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
187 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
188 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
190 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
191 * WT: 0 - unlinked, 1 - linked (not currently used)
192 * LBN: Linked BP number (not currently used)
193 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
194 * BAS: Byte Address Select
195 * LSC: Load/Store control (01: load, 10: store, 11: both)
198 * The bottom 2 bits of the value register are masked. Therefore to
199 * break on any sizes smaller than an unaligned word you need to set
200 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
201 * need to ensure you mask the address as required and set BAS=0xff
204 static int insert_hw_watchpoint(target_ulong addr
,
205 target_ulong len
, int type
)
208 .wcr
= 1, /* E=1, enable */
209 .wvr
= addr
& (~0x7ULL
),
210 .details
= { .vaddr
= addr
, .len
= len
}
213 if (cur_hw_wps
>= max_hw_wps
) {
218 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
219 * valid whether EL3 is implemented or not
221 wp
.wcr
= deposit32(wp
.wcr
, 1, 2, 3);
224 case GDB_WATCHPOINT_READ
:
225 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 1);
226 wp
.details
.flags
= BP_MEM_READ
;
228 case GDB_WATCHPOINT_WRITE
:
229 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 2);
230 wp
.details
.flags
= BP_MEM_WRITE
;
232 case GDB_WATCHPOINT_ACCESS
:
233 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 3);
234 wp
.details
.flags
= BP_MEM_ACCESS
;
237 g_assert_not_reached();
241 /* we align the address and set the bits in BAS */
242 int off
= addr
& 0x7;
243 int bas
= (1 << len
) - 1;
245 wp
.wcr
= deposit32(wp
.wcr
, 5 + off
, 8 - off
, bas
);
247 /* For ranges above 8 bytes we need to be a power of 2 */
248 if (is_power_of_2(len
)) {
249 int bits
= ctz64(len
);
251 wp
.wvr
&= ~((1 << bits
) - 1);
252 wp
.wcr
= deposit32(wp
.wcr
, 24, 4, bits
);
253 wp
.wcr
= deposit32(wp
.wcr
, 5, 8, 0xff);
259 g_array_append_val(hw_watchpoints
, wp
);
264 static bool check_watchpoint_in_range(int i
, target_ulong addr
)
266 HWWatchpoint
*wp
= get_hw_wp(i
);
267 uint64_t addr_top
, addr_bottom
= wp
->wvr
;
268 int bas
= extract32(wp
->wcr
, 5, 8);
269 int mask
= extract32(wp
->wcr
, 24, 4);
272 addr_top
= addr_bottom
+ (1 << mask
);
274 /* BAS must be contiguous but can offset against the base
275 * address in DBGWVR */
276 addr_bottom
= addr_bottom
+ ctz32(bas
);
277 addr_top
= addr_bottom
+ clo32(bas
);
280 if (addr
>= addr_bottom
&& addr
<= addr_top
) {
288 * delete_hw_watchpoint()
289 * @addr: address of breakpoint
291 * Delete a breakpoint and shuffle any above down
294 static int delete_hw_watchpoint(target_ulong addr
,
295 target_ulong len
, int type
)
298 for (i
= 0; i
< cur_hw_wps
; i
++) {
299 if (check_watchpoint_in_range(i
, addr
)) {
300 g_array_remove_index(hw_watchpoints
, i
);
308 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
309 target_ulong len
, int type
)
312 case GDB_BREAKPOINT_HW
:
313 return insert_hw_breakpoint(addr
);
315 case GDB_WATCHPOINT_READ
:
316 case GDB_WATCHPOINT_WRITE
:
317 case GDB_WATCHPOINT_ACCESS
:
318 return insert_hw_watchpoint(addr
, len
, type
);
324 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
325 target_ulong len
, int type
)
328 case GDB_BREAKPOINT_HW
:
329 return delete_hw_breakpoint(addr
);
331 case GDB_WATCHPOINT_READ
:
332 case GDB_WATCHPOINT_WRITE
:
333 case GDB_WATCHPOINT_ACCESS
:
334 return delete_hw_watchpoint(addr
, len
, type
);
341 void kvm_arch_remove_all_hw_breakpoints(void)
343 if (cur_hw_wps
> 0) {
344 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
346 if (cur_hw_bps
> 0) {
347 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
351 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
354 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
356 for (i
= 0; i
< max_hw_wps
; i
++) {
357 HWWatchpoint
*wp
= get_hw_wp(i
);
358 ptr
->dbg_wcr
[i
] = wp
->wcr
;
359 ptr
->dbg_wvr
[i
] = wp
->wvr
;
361 for (i
= 0; i
< max_hw_bps
; i
++) {
362 HWBreakpoint
*bp
= get_hw_bp(i
);
363 ptr
->dbg_bcr
[i
] = bp
->bcr
;
364 ptr
->dbg_bvr
[i
] = bp
->bvr
;
368 bool kvm_arm_hw_debug_active(CPUState
*cs
)
370 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
373 static bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
)
377 for (i
= 0; i
< cur_hw_bps
; i
++) {
378 HWBreakpoint
*bp
= get_hw_bp(i
);
386 static CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
)
390 for (i
= 0; i
< cur_hw_wps
; i
++) {
391 if (check_watchpoint_in_range(i
, addr
)) {
392 return &get_hw_wp(i
)->details
;
398 static bool kvm_arm_pmu_set_attr(CPUState
*cs
, struct kvm_device_attr
*attr
)
402 err
= kvm_vcpu_ioctl(cs
, KVM_HAS_DEVICE_ATTR
, attr
);
404 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err
));
408 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, attr
);
410 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err
));
417 void kvm_arm_pmu_init(CPUState
*cs
)
419 struct kvm_device_attr attr
= {
420 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
421 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
424 if (!ARM_CPU(cs
)->has_pmu
) {
427 if (!kvm_arm_pmu_set_attr(cs
, &attr
)) {
428 error_report("failed to init PMU");
433 void kvm_arm_pmu_set_irq(CPUState
*cs
, int irq
)
435 struct kvm_device_attr attr
= {
436 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
437 .addr
= (intptr_t)&irq
,
438 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
441 if (!ARM_CPU(cs
)->has_pmu
) {
444 if (!kvm_arm_pmu_set_attr(cs
, &attr
)) {
445 error_report("failed to set irq for PMU");
450 static inline void set_feature(uint64_t *features
, int feature
)
452 *features
|= 1ULL << feature
;
455 static inline void unset_feature(uint64_t *features
, int feature
)
457 *features
&= ~(1ULL << feature
);
460 static int read_sys_reg32(int fd
, uint32_t *pret
, uint64_t id
)
463 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)&ret
};
466 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
467 err
= ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
475 static int read_sys_reg64(int fd
, uint64_t *pret
, uint64_t id
)
477 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)pret
};
479 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
480 return ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
483 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
485 /* Identify the feature bits corresponding to the host CPU, and
486 * fill out the ARMHostCPUClass fields accordingly. To do this
487 * we have to create a scratch VM, create a single CPU inside it,
488 * and then query that CPU for the relevant ID registers.
491 uint64_t features
= 0;
494 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
495 * we know these will only support creating one kind of guest CPU,
496 * which is its preferred CPU type. Fortunately these old kernels
497 * support only a very limited number of CPUs.
499 static const uint32_t cpus_to_try
[] = {
500 KVM_ARM_TARGET_AEM_V8
,
501 KVM_ARM_TARGET_FOUNDATION_V8
,
502 KVM_ARM_TARGET_CORTEX_A57
,
503 QEMU_KVM_ARM_TARGET_NONE
505 struct kvm_vcpu_init init
;
507 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
511 ahcf
->target
= init
.target
;
512 ahcf
->dtb_compatible
= "arm,arm-v8";
514 err
= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr0
,
515 ARM64_SYS_REG(3, 0, 0, 4, 0));
516 if (unlikely(err
< 0)) {
518 * Before v4.15, the kernel only exposed a limited number of system
519 * registers, not including any of the interesting AArch64 ID regs.
520 * For the most part we could leave these fields as zero with minimal
521 * effect, since this does not affect the values seen by the guest.
523 * However, it could cause problems down the line for QEMU,
524 * so provide a minimal v8.0 default.
526 * ??? Could read MIDR and use knowledge from cpu64.c.
527 * ??? Could map a page of memory into our temp guest and
528 * run the tiniest of hand-crafted kernels to extract
529 * the values seen by the guest.
530 * ??? Either of these sounds like too much effort just
531 * to work around running a modern host kernel.
533 ahcf
->isar
.id_aa64pfr0
= 0x00000011; /* EL1&0, AArch64 only */
536 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr1
,
537 ARM64_SYS_REG(3, 0, 0, 4, 1));
538 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar0
,
539 ARM64_SYS_REG(3, 0, 0, 6, 0));
540 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar1
,
541 ARM64_SYS_REG(3, 0, 0, 6, 1));
542 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr0
,
543 ARM64_SYS_REG(3, 0, 0, 7, 0));
544 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr1
,
545 ARM64_SYS_REG(3, 0, 0, 7, 1));
548 * Note that if AArch32 support is not present in the host,
549 * the AArch32 sysregs are present to be read, but will
550 * return UNKNOWN values. This is neither better nor worse
551 * than skipping the reads and leaving 0, as we must avoid
552 * considering the values in every case.
554 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar0
,
555 ARM64_SYS_REG(3, 0, 0, 2, 0));
556 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar1
,
557 ARM64_SYS_REG(3, 0, 0, 2, 1));
558 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar2
,
559 ARM64_SYS_REG(3, 0, 0, 2, 2));
560 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar3
,
561 ARM64_SYS_REG(3, 0, 0, 2, 3));
562 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar4
,
563 ARM64_SYS_REG(3, 0, 0, 2, 4));
564 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar5
,
565 ARM64_SYS_REG(3, 0, 0, 2, 5));
566 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar6
,
567 ARM64_SYS_REG(3, 0, 0, 2, 7));
569 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr0
,
570 ARM64_SYS_REG(3, 0, 0, 3, 0));
571 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr1
,
572 ARM64_SYS_REG(3, 0, 0, 3, 1));
573 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr2
,
574 ARM64_SYS_REG(3, 0, 0, 3, 2));
577 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
583 /* We can assume any KVM supporting CPU is at least a v8
584 * with VFPv4+Neon; this in turn implies most of the other
587 set_feature(&features
, ARM_FEATURE_V8
);
588 set_feature(&features
, ARM_FEATURE_VFP4
);
589 set_feature(&features
, ARM_FEATURE_NEON
);
590 set_feature(&features
, ARM_FEATURE_AARCH64
);
591 set_feature(&features
, ARM_FEATURE_PMU
);
593 ahcf
->features
= features
;
598 bool kvm_arm_aarch32_supported(CPUState
*cpu
)
600 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
602 return kvm_check_extension(s
, KVM_CAP_ARM_EL1_32BIT
);
605 bool kvm_arm_sve_supported(CPUState
*cpu
)
607 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
609 return kvm_check_extension(s
, KVM_CAP_ARM_SVE
);
612 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
614 int kvm_arch_init_vcpu(CPUState
*cs
)
618 ARMCPU
*cpu
= ARM_CPU(cs
);
619 CPUARMState
*env
= &cpu
->env
;
621 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
622 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
623 fprintf(stderr
, "KVM is not supported for this guest CPU type\n");
627 /* Determine init features for this CPU */
628 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
629 if (cpu
->start_powered_off
) {
630 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
632 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
633 cpu
->psci_version
= 2;
634 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
636 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
637 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
639 if (!kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
640 cpu
->has_pmu
= false;
643 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
645 unset_feature(&env
->features
, ARM_FEATURE_PMU
);
647 if (cpu_isar_feature(aa64_sve
, cpu
)) {
648 assert(kvm_arm_sve_supported(cs
));
649 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
652 /* Do KVM_ARM_VCPU_INIT ioctl */
653 ret
= kvm_arm_vcpu_init(cs
);
658 if (cpu_isar_feature(aa64_sve
, cpu
)) {
659 ret
= kvm_arm_vcpu_finalize(cs
, KVM_ARM_VCPU_SVE
);
666 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
667 * Currently KVM has its own idea about MPIDR assignment, so we
668 * override our defaults with what we get from KVM.
670 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
674 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
676 kvm_arm_init_debug(cs
);
678 /* Check whether user space can specify guest syndrome value */
679 kvm_arm_init_serror_injection(cs
);
681 return kvm_arm_init_cpreg_list(cpu
);
684 int kvm_arch_destroy_vcpu(CPUState
*cs
)
689 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
691 /* Return true if the regidx is a register we should synchronize
692 * via the cpreg_tuples array (ie is not a core or sve reg that
693 * we sync by hand in kvm_arch_get/put_registers())
695 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
696 case KVM_REG_ARM_CORE
:
697 case KVM_REG_ARM64_SVE
:
704 typedef struct CPRegStateLevel
{
709 /* All system registers not listed in the following table are assumed to be
710 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
711 * often, you must add it to this table with a state of either
712 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
714 static const CPRegStateLevel non_runtime_cpregs
[] = {
715 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
718 int kvm_arm_cpreg_level(uint64_t regidx
)
722 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
723 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
724 if (l
->regidx
== regidx
) {
729 return KVM_PUT_RUNTIME_STATE
;
732 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
733 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
735 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
736 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
738 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
739 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
741 static int kvm_arch_put_fpsimd(CPUState
*cs
)
743 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
744 struct kvm_one_reg reg
;
747 for (i
= 0; i
< 32; i
++) {
748 uint64_t *q
= aa64_vfp_qreg(env
, i
);
749 #ifdef HOST_WORDS_BIGENDIAN
750 uint64_t fp_val
[2] = { q
[1], q
[0] };
751 reg
.addr
= (uintptr_t)fp_val
;
753 reg
.addr
= (uintptr_t)q
;
755 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
756 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
766 * SVE registers are encoded in KVM's memory in an endianness-invariant format.
767 * The byte at offset i from the start of the in-memory representation contains
768 * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
769 * lowest offsets are stored in the lowest memory addresses, then that nearly
770 * matches QEMU's representation, which is to use an array of host-endian
771 * uint64_t's, where the lower offsets are at the lower indices. To complete
772 * the translation we just need to byte swap the uint64_t's on big-endian hosts.
774 static uint64_t *sve_bswap64(uint64_t *dst
, uint64_t *src
, int nr
)
776 #ifdef HOST_WORDS_BIGENDIAN
779 for (i
= 0; i
< nr
; ++i
) {
780 dst
[i
] = bswap64(src
[i
]);
790 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
791 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
792 * code the slice index to zero for now as it's unlikely we'll need more than
793 * one slice for quite some time.
795 static int kvm_arch_put_sve(CPUState
*cs
)
797 ARMCPU
*cpu
= ARM_CPU(cs
);
798 CPUARMState
*env
= &cpu
->env
;
799 uint64_t tmp
[ARM_MAX_VQ
* 2];
801 struct kvm_one_reg reg
;
804 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
805 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[n
].d
[0], cpu
->sve_max_vq
* 2);
806 reg
.addr
= (uintptr_t)r
;
807 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
808 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
814 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
815 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[n
].p
[0],
816 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
817 reg
.addr
= (uintptr_t)r
;
818 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
819 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
825 r
= sve_bswap64(tmp
, &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0],
826 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
827 reg
.addr
= (uintptr_t)r
;
828 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
829 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
837 int kvm_arch_put_registers(CPUState
*cs
, int level
)
839 struct kvm_one_reg reg
;
845 ARMCPU
*cpu
= ARM_CPU(cs
);
846 CPUARMState
*env
= &cpu
->env
;
848 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
849 * AArch64 registers before pushing them out to 64-bit KVM.
852 aarch64_sync_32_to_64(env
);
855 for (i
= 0; i
< 31; i
++) {
856 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
857 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
858 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
864 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
865 * QEMU side we keep the current SP in xregs[31] as well.
867 aarch64_save_sp(env
, 1);
869 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
870 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
871 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
876 reg
.id
= AARCH64_CORE_REG(sp_el1
);
877 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
878 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
883 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
885 val
= pstate_read(env
);
887 val
= cpsr_read(env
);
889 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
890 reg
.addr
= (uintptr_t) &val
;
891 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
896 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
897 reg
.addr
= (uintptr_t) &env
->pc
;
898 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
903 reg
.id
= AARCH64_CORE_REG(elr_el1
);
904 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
905 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
910 /* Saved Program State Registers
912 * Before we restore from the banked_spsr[] array we need to
913 * ensure that any modifications to env->spsr are correctly
914 * reflected in the banks.
916 el
= arm_current_el(env
);
917 if (el
> 0 && !is_a64(env
)) {
918 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
919 env
->banked_spsr
[i
] = env
->spsr
;
922 /* KVM 0-4 map to QEMU banks 1-5 */
923 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
924 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
925 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
926 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
932 if (cpu_isar_feature(aa64_sve
, cpu
)) {
933 ret
= kvm_arch_put_sve(cs
);
935 ret
= kvm_arch_put_fpsimd(cs
);
941 reg
.addr
= (uintptr_t)(&fpr
);
942 fpr
= vfp_get_fpsr(env
);
943 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
944 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
949 reg
.addr
= (uintptr_t)(&fpr
);
950 fpr
= vfp_get_fpcr(env
);
951 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
952 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
957 ret
= kvm_put_vcpu_events(cpu
);
962 write_cpustate_to_list(cpu
, true);
964 if (!write_list_to_kvmstate(cpu
, level
)) {
968 kvm_arm_sync_mpstate_to_kvm(cpu
);
973 static int kvm_arch_get_fpsimd(CPUState
*cs
)
975 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
976 struct kvm_one_reg reg
;
979 for (i
= 0; i
< 32; i
++) {
980 uint64_t *q
= aa64_vfp_qreg(env
, i
);
981 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
982 reg
.addr
= (uintptr_t)q
;
983 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
987 #ifdef HOST_WORDS_BIGENDIAN
989 t
= q
[0], q
[0] = q
[1], q
[1] = t
;
998 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
999 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1000 * code the slice index to zero for now as it's unlikely we'll need more than
1001 * one slice for quite some time.
1003 static int kvm_arch_get_sve(CPUState
*cs
)
1005 ARMCPU
*cpu
= ARM_CPU(cs
);
1006 CPUARMState
*env
= &cpu
->env
;
1007 struct kvm_one_reg reg
;
1011 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
1012 r
= &env
->vfp
.zregs
[n
].d
[0];
1013 reg
.addr
= (uintptr_t)r
;
1014 reg
.id
= KVM_REG_ARM64_SVE_ZREG(n
, 0);
1015 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1019 sve_bswap64(r
, r
, cpu
->sve_max_vq
* 2);
1022 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
1023 r
= &env
->vfp
.pregs
[n
].p
[0];
1024 reg
.addr
= (uintptr_t)r
;
1025 reg
.id
= KVM_REG_ARM64_SVE_PREG(n
, 0);
1026 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1030 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1033 r
= &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0];
1034 reg
.addr
= (uintptr_t)r
;
1035 reg
.id
= KVM_REG_ARM64_SVE_FFR(0);
1036 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1040 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
1045 int kvm_arch_get_registers(CPUState
*cs
)
1047 struct kvm_one_reg reg
;
1053 ARMCPU
*cpu
= ARM_CPU(cs
);
1054 CPUARMState
*env
= &cpu
->env
;
1056 for (i
= 0; i
< 31; i
++) {
1057 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
1058 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
1059 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1065 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
1066 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
1067 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1072 reg
.id
= AARCH64_CORE_REG(sp_el1
);
1073 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
1074 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1079 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
1080 reg
.addr
= (uintptr_t) &val
;
1081 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1086 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
1088 pstate_write(env
, val
);
1090 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
1093 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1094 * QEMU side we keep the current SP in xregs[31] as well.
1096 aarch64_restore_sp(env
, 1);
1098 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
1099 reg
.addr
= (uintptr_t) &env
->pc
;
1100 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1105 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1106 * incoming AArch64 regs received from 64-bit KVM.
1107 * We must perform this after all of the registers have been acquired from
1111 aarch64_sync_64_to_32(env
);
1114 reg
.id
= AARCH64_CORE_REG(elr_el1
);
1115 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
1116 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1121 /* Fetch the SPSR registers
1123 * KVM SPSRs 0-4 map to QEMU banks 1-5
1125 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1126 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
1127 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
1128 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1134 el
= arm_current_el(env
);
1135 if (el
> 0 && !is_a64(env
)) {
1136 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1137 env
->spsr
= env
->banked_spsr
[i
];
1140 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1141 ret
= kvm_arch_get_sve(cs
);
1143 ret
= kvm_arch_get_fpsimd(cs
);
1149 reg
.addr
= (uintptr_t)(&fpr
);
1150 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
1151 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1155 vfp_set_fpsr(env
, fpr
);
1157 reg
.addr
= (uintptr_t)(&fpr
);
1158 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
1159 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
1163 vfp_set_fpcr(env
, fpr
);
1165 ret
= kvm_get_vcpu_events(cpu
);
1170 if (!write_kvmstate_to_list(cpu
)) {
1173 /* Note that it's OK to have registers which aren't in CPUState,
1174 * so we can ignore a failure return here.
1176 write_list_to_cpustate(cpu
);
1178 kvm_arm_sync_mpstate_to_qemu(cpu
);
1180 /* TODO: other registers */
1184 /* C6.6.29 BRK instruction */
1185 static const uint32_t brk_insn
= 0xd4200000;
1187 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1189 if (have_guest_debug
) {
1190 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
1191 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
1196 error_report("guest debug not supported on this kernel");
1201 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1203 static uint32_t brk
;
1205 if (have_guest_debug
) {
1206 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
1208 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
1213 error_report("guest debug not supported on this kernel");
1218 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1220 * To minimise translating between kernel and user-space the kernel
1221 * ABI just provides user-space with the full exception syndrome
1222 * register value to be decoded in QEMU.
1225 bool kvm_arm_handle_debug(CPUState
*cs
, struct kvm_debug_exit_arch
*debug_exit
)
1227 int hsr_ec
= syn_get_ec(debug_exit
->hsr
);
1228 ARMCPU
*cpu
= ARM_CPU(cs
);
1229 CPUClass
*cc
= CPU_GET_CLASS(cs
);
1230 CPUARMState
*env
= &cpu
->env
;
1232 /* Ensure PC is synchronised */
1233 kvm_cpu_synchronize_state(cs
);
1236 case EC_SOFTWARESTEP
:
1237 if (cs
->singlestep_enabled
) {
1241 * The kernel should have suppressed the guest's ability to
1242 * single step at this point so something has gone wrong.
1244 error_report("%s: guest single-step while debugging unsupported"
1245 " (%"PRIx64
", %"PRIx32
")",
1246 __func__
, env
->pc
, debug_exit
->hsr
);
1251 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
1256 if (find_hw_breakpoint(cs
, env
->pc
)) {
1262 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
1264 cs
->watchpoint_hit
= wp
;
1270 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")",
1271 __func__
, debug_exit
->hsr
, env
->pc
);
1274 /* If we are not handling the debug exception it must belong to
1275 * the guest. Let's re-use the existing TCG interrupt code to set
1276 * everything up properly.
1278 cs
->exception_index
= EXCP_BKPT
;
1279 env
->exception
.syndrome
= debug_exit
->hsr
;
1280 env
->exception
.vaddress
= debug_exit
->far
;
1281 env
->exception
.target_el
= 1;
1282 qemu_mutex_lock_iothread();
1283 cc
->do_interrupt(cs
);
1284 qemu_mutex_unlock_iothread();