2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
15 #include <sys/ptrace.h>
17 #include <linux/elf.h>
18 #include <linux/kvm.h>
20 #include "qemu-common.h"
22 #include "qemu/timer.h"
23 #include "qemu/error-report.h"
24 #include "qemu/host-utils.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/sysemu.h"
27 #include "sysemu/kvm.h"
29 #include "internals.h"
30 #include "hw/arm/arm.h"
32 static bool have_guest_debug
;
35 * Although the ARM implementation of hardware assisted debugging
36 * allows for different breakpoints per-core, the current GDB
37 * interface treats them as a global pool of registers (which seems to
38 * be the case for x86, ppc and s390). As a result we store one copy
39 * of registers which is used for all active cores.
41 * Write access is serialised by virtue of the GDB protocol which
42 * updates things. Read access (i.e. when the values are copied to the
43 * vCPU) is also gated by GDB's run control.
45 * This is not unreasonable as most of the time debugging kernels you
46 * never know which core will eventually execute your function.
54 /* The watchpoint registers can cover more area than the requested
55 * watchpoint so we need to store the additional information
56 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
57 * when the watchpoint is hit.
62 CPUWatchpoint details
;
65 /* Maximum and current break/watch point counts */
66 int max_hw_bps
, max_hw_wps
;
67 GArray
*hw_breakpoints
, *hw_watchpoints
;
69 #define cur_hw_wps (hw_watchpoints->len)
70 #define cur_hw_bps (hw_breakpoints->len)
71 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
75 * kvm_arm_init_debug() - check for guest debug capabilities
78 * kvm_check_extension returns the number of debug registers we have
79 * or 0 if we have none.
82 static void kvm_arm_init_debug(CPUState
*cs
)
84 have_guest_debug
= kvm_check_extension(cs
->kvm_state
,
85 KVM_CAP_SET_GUEST_DEBUG
);
87 max_hw_wps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
88 hw_watchpoints
= g_array_sized_new(true, true,
89 sizeof(HWWatchpoint
), max_hw_wps
);
91 max_hw_bps
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
92 hw_breakpoints
= g_array_sized_new(true, true,
93 sizeof(HWBreakpoint
), max_hw_bps
);
98 * insert_hw_breakpoint()
99 * @addr: address of breakpoint
101 * See ARM ARM D2.9.1 for details but here we are only going to create
102 * simple un-linked breakpoints (i.e. we don't chain breakpoints
103 * together to match address and context or vmid). The hardware is
104 * capable of fancier matching but that will require exposing that
105 * fanciness to GDB's interface
107 * D7.3.2 DBGBCR<n>_EL1, Debug Breakpoint Control Registers
109 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
110 * +------+------+-------+-----+----+------+-----+------+-----+---+
111 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
112 * +------+------+-------+-----+----+------+-----+------+-----+---+
114 * BT: Breakpoint type (0 = unlinked address match)
115 * LBN: Linked BP number (0 = unused)
116 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
117 * BAS: Byte Address Select (RES1 for AArch64)
120 static int insert_hw_breakpoint(target_ulong addr
)
123 .bcr
= 0x1, /* BCR E=1, enable */
127 if (cur_hw_bps
>= max_hw_bps
) {
131 brk
.bcr
= deposit32(brk
.bcr
, 1, 2, 0x3); /* PMC = 11 */
132 brk
.bcr
= deposit32(brk
.bcr
, 5, 4, 0xf); /* BAS = RES1 */
134 g_array_append_val(hw_breakpoints
, brk
);
140 * delete_hw_breakpoint()
141 * @pc: address of breakpoint
143 * Delete a breakpoint and shuffle any above down
146 static int delete_hw_breakpoint(target_ulong pc
)
149 for (i
= 0; i
< hw_breakpoints
->len
; i
++) {
150 HWBreakpoint
*brk
= get_hw_bp(i
);
151 if (brk
->bvr
== pc
) {
152 g_array_remove_index(hw_breakpoints
, i
);
160 * insert_hw_watchpoint()
161 * @addr: address of watch point
163 * @type: type of watch point
165 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
166 * stuff if we want to. The watch points can be linked with the break
167 * points above to make them context aware. However for simplicity
168 * currently we only deal with simple read/write watch points.
170 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
172 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
173 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
174 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
175 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
177 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
178 * WT: 0 - unlinked, 1 - linked (not currently used)
179 * LBN: Linked BP number (not currently used)
180 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
181 * BAS: Byte Address Select
182 * LSC: Load/Store control (01: load, 10: store, 11: both)
185 * The bottom 2 bits of the value register are masked. Therefore to
186 * break on any sizes smaller than an unaligned word you need to set
187 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
188 * need to ensure you mask the address as required and set BAS=0xff
191 static int insert_hw_watchpoint(target_ulong addr
,
192 target_ulong len
, int type
)
195 .wcr
= 1, /* E=1, enable */
196 .wvr
= addr
& (~0x7ULL
),
197 .details
= { .vaddr
= addr
, .len
= len
}
200 if (cur_hw_wps
>= max_hw_wps
) {
205 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
206 * valid whether EL3 is implemented or not
208 wp
.wcr
= deposit32(wp
.wcr
, 1, 2, 3);
211 case GDB_WATCHPOINT_READ
:
212 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 1);
213 wp
.details
.flags
= BP_MEM_READ
;
215 case GDB_WATCHPOINT_WRITE
:
216 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 2);
217 wp
.details
.flags
= BP_MEM_WRITE
;
219 case GDB_WATCHPOINT_ACCESS
:
220 wp
.wcr
= deposit32(wp
.wcr
, 3, 2, 3);
221 wp
.details
.flags
= BP_MEM_ACCESS
;
224 g_assert_not_reached();
228 /* we align the address and set the bits in BAS */
229 int off
= addr
& 0x7;
230 int bas
= (1 << len
) - 1;
232 wp
.wcr
= deposit32(wp
.wcr
, 5 + off
, 8 - off
, bas
);
234 /* For ranges above 8 bytes we need to be a power of 2 */
235 if (is_power_of_2(len
)) {
236 int bits
= ctz64(len
);
238 wp
.wvr
&= ~((1 << bits
) - 1);
239 wp
.wcr
= deposit32(wp
.wcr
, 24, 4, bits
);
240 wp
.wcr
= deposit32(wp
.wcr
, 5, 8, 0xff);
246 g_array_append_val(hw_watchpoints
, wp
);
251 static bool check_watchpoint_in_range(int i
, target_ulong addr
)
253 HWWatchpoint
*wp
= get_hw_wp(i
);
254 uint64_t addr_top
, addr_bottom
= wp
->wvr
;
255 int bas
= extract32(wp
->wcr
, 5, 8);
256 int mask
= extract32(wp
->wcr
, 24, 4);
259 addr_top
= addr_bottom
+ (1 << mask
);
261 /* BAS must be contiguous but can offset against the base
262 * address in DBGWVR */
263 addr_bottom
= addr_bottom
+ ctz32(bas
);
264 addr_top
= addr_bottom
+ clo32(bas
);
267 if (addr
>= addr_bottom
&& addr
<= addr_top
) {
275 * delete_hw_watchpoint()
276 * @addr: address of breakpoint
278 * Delete a breakpoint and shuffle any above down
281 static int delete_hw_watchpoint(target_ulong addr
,
282 target_ulong len
, int type
)
285 for (i
= 0; i
< cur_hw_wps
; i
++) {
286 if (check_watchpoint_in_range(i
, addr
)) {
287 g_array_remove_index(hw_watchpoints
, i
);
295 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
296 target_ulong len
, int type
)
299 case GDB_BREAKPOINT_HW
:
300 return insert_hw_breakpoint(addr
);
302 case GDB_WATCHPOINT_READ
:
303 case GDB_WATCHPOINT_WRITE
:
304 case GDB_WATCHPOINT_ACCESS
:
305 return insert_hw_watchpoint(addr
, len
, type
);
311 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
312 target_ulong len
, int type
)
315 case GDB_BREAKPOINT_HW
:
316 return delete_hw_breakpoint(addr
);
318 case GDB_WATCHPOINT_READ
:
319 case GDB_WATCHPOINT_WRITE
:
320 case GDB_WATCHPOINT_ACCESS
:
321 return delete_hw_watchpoint(addr
, len
, type
);
328 void kvm_arch_remove_all_hw_breakpoints(void)
330 if (cur_hw_wps
> 0) {
331 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
333 if (cur_hw_bps
> 0) {
334 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
338 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
341 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
343 for (i
= 0; i
< max_hw_wps
; i
++) {
344 HWWatchpoint
*wp
= get_hw_wp(i
);
345 ptr
->dbg_wcr
[i
] = wp
->wcr
;
346 ptr
->dbg_wvr
[i
] = wp
->wvr
;
348 for (i
= 0; i
< max_hw_bps
; i
++) {
349 HWBreakpoint
*bp
= get_hw_bp(i
);
350 ptr
->dbg_bcr
[i
] = bp
->bcr
;
351 ptr
->dbg_bvr
[i
] = bp
->bvr
;
355 bool kvm_arm_hw_debug_active(CPUState
*cs
)
357 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
360 static bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
)
364 for (i
= 0; i
< cur_hw_bps
; i
++) {
365 HWBreakpoint
*bp
= get_hw_bp(i
);
373 static CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
)
377 for (i
= 0; i
< cur_hw_wps
; i
++) {
378 if (check_watchpoint_in_range(i
, addr
)) {
379 return &get_hw_wp(i
)->details
;
385 static bool kvm_arm_pmu_support_ctrl(CPUState
*cs
, struct kvm_device_attr
*attr
)
387 return kvm_vcpu_ioctl(cs
, KVM_HAS_DEVICE_ATTR
, attr
) == 0;
390 int kvm_arm_pmu_create(CPUState
*cs
, int irq
)
394 struct kvm_device_attr attr
= {
395 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
396 .addr
= (intptr_t)&irq
,
397 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
401 if (!kvm_arm_pmu_support_ctrl(cs
, &attr
)) {
405 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, &attr
);
407 fprintf(stderr
, "KVM_SET_DEVICE_ATTR failed: %s\n",
412 attr
.group
= KVM_ARM_VCPU_PMU_V3_CTRL
;
413 attr
.attr
= KVM_ARM_VCPU_PMU_V3_INIT
;
417 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, &attr
);
419 fprintf(stderr
, "KVM_SET_DEVICE_ATTR failed: %s\n",
427 static inline void set_feature(uint64_t *features
, int feature
)
429 *features
|= 1ULL << feature
;
432 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass
*ahcc
)
434 /* Identify the feature bits corresponding to the host CPU, and
435 * fill out the ARMHostCPUClass fields accordingly. To do this
436 * we have to create a scratch VM, create a single CPU inside it,
437 * and then query that CPU for the relevant ID registers.
438 * For AArch64 we currently don't care about ID registers at
439 * all; we just want to know the CPU type.
442 uint64_t features
= 0;
443 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
444 * we know these will only support creating one kind of guest CPU,
445 * which is its preferred CPU type. Fortunately these old kernels
446 * support only a very limited number of CPUs.
448 static const uint32_t cpus_to_try
[] = {
449 KVM_ARM_TARGET_AEM_V8
,
450 KVM_ARM_TARGET_FOUNDATION_V8
,
451 KVM_ARM_TARGET_CORTEX_A57
,
452 QEMU_KVM_ARM_TARGET_NONE
454 struct kvm_vcpu_init init
;
456 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
460 ahcc
->target
= init
.target
;
461 ahcc
->dtb_compatible
= "arm,arm-v8";
463 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
465 /* We can assume any KVM supporting CPU is at least a v8
466 * with VFPv4+Neon; this in turn implies most of the other
469 set_feature(&features
, ARM_FEATURE_V8
);
470 set_feature(&features
, ARM_FEATURE_VFP4
);
471 set_feature(&features
, ARM_FEATURE_NEON
);
472 set_feature(&features
, ARM_FEATURE_AARCH64
);
474 ahcc
->features
= features
;
479 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
481 int kvm_arch_init_vcpu(CPUState
*cs
)
485 ARMCPU
*cpu
= ARM_CPU(cs
);
487 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
488 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
489 fprintf(stderr
, "KVM is not supported for this guest CPU type\n");
493 /* Determine init features for this CPU */
494 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
495 if (cpu
->start_powered_off
) {
496 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
498 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
499 cpu
->psci_version
= 2;
500 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
502 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
503 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
505 if (kvm_irqchip_in_kernel() &&
506 kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
508 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
511 /* Do KVM_ARM_VCPU_INIT ioctl */
512 ret
= kvm_arm_vcpu_init(cs
);
518 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
519 * Currently KVM has its own idea about MPIDR assignment, so we
520 * override our defaults with what we get from KVM.
522 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
526 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
528 kvm_arm_init_debug(cs
);
530 return kvm_arm_init_cpreg_list(cpu
);
533 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
535 /* Return true if the regidx is a register we should synchronize
536 * via the cpreg_tuples array (ie is not a core reg we sync by
537 * hand in kvm_arch_get/put_registers())
539 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
540 case KVM_REG_ARM_CORE
:
547 typedef struct CPRegStateLevel
{
552 /* All system registers not listed in the following table are assumed to be
553 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
554 * often, you must add it to this table with a state of either
555 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
557 static const CPRegStateLevel non_runtime_cpregs
[] = {
558 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
561 int kvm_arm_cpreg_level(uint64_t regidx
)
565 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
566 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
567 if (l
->regidx
== regidx
) {
572 return KVM_PUT_RUNTIME_STATE
;
575 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
576 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
578 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
579 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
581 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
582 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
584 int kvm_arch_put_registers(CPUState
*cs
, int level
)
586 struct kvm_one_reg reg
;
593 ARMCPU
*cpu
= ARM_CPU(cs
);
594 CPUARMState
*env
= &cpu
->env
;
596 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
597 * AArch64 registers before pushing them out to 64-bit KVM.
600 aarch64_sync_32_to_64(env
);
603 for (i
= 0; i
< 31; i
++) {
604 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
605 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
606 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
612 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
613 * QEMU side we keep the current SP in xregs[31] as well.
615 aarch64_save_sp(env
, 1);
617 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
618 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
619 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
624 reg
.id
= AARCH64_CORE_REG(sp_el1
);
625 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
626 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
631 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
633 val
= pstate_read(env
);
635 val
= cpsr_read(env
);
637 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
638 reg
.addr
= (uintptr_t) &val
;
639 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
644 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
645 reg
.addr
= (uintptr_t) &env
->pc
;
646 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
651 reg
.id
= AARCH64_CORE_REG(elr_el1
);
652 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
653 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
658 /* Saved Program State Registers
660 * Before we restore from the banked_spsr[] array we need to
661 * ensure that any modifications to env->spsr are correctly
662 * reflected in the banks.
664 el
= arm_current_el(env
);
665 if (el
> 0 && !is_a64(env
)) {
666 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
667 env
->banked_spsr
[i
] = env
->spsr
;
670 /* KVM 0-4 map to QEMU banks 1-5 */
671 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
672 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
673 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
674 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
680 /* Advanced SIMD and FP registers
681 * We map Qn = regs[2n+1]:regs[2n]
683 for (i
= 0; i
< 32; i
++) {
686 #ifdef HOST_WORDS_BIGENDIAN
687 fp_val
[0] = env
->vfp
.regs
[rd
+ 1];
688 fp_val
[1] = env
->vfp
.regs
[rd
];
690 fp_val
[1] = env
->vfp
.regs
[rd
+ 1];
691 fp_val
[0] = env
->vfp
.regs
[rd
];
693 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
694 reg
.addr
= (uintptr_t)(&fp_val
);
695 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
701 reg
.addr
= (uintptr_t)(&fpr
);
702 fpr
= vfp_get_fpsr(env
);
703 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
704 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
709 fpr
= vfp_get_fpcr(env
);
710 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
711 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
716 if (!write_list_to_kvmstate(cpu
, level
)) {
720 kvm_arm_sync_mpstate_to_kvm(cpu
);
725 int kvm_arch_get_registers(CPUState
*cs
)
727 struct kvm_one_reg reg
;
734 ARMCPU
*cpu
= ARM_CPU(cs
);
735 CPUARMState
*env
= &cpu
->env
;
737 for (i
= 0; i
< 31; i
++) {
738 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
739 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
740 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
746 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
747 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
748 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
753 reg
.id
= AARCH64_CORE_REG(sp_el1
);
754 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
755 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
760 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
761 reg
.addr
= (uintptr_t) &val
;
762 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
767 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
769 pstate_write(env
, val
);
771 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
774 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
775 * QEMU side we keep the current SP in xregs[31] as well.
777 aarch64_restore_sp(env
, 1);
779 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
780 reg
.addr
= (uintptr_t) &env
->pc
;
781 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
786 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
787 * incoming AArch64 regs received from 64-bit KVM.
788 * We must perform this after all of the registers have been acquired from
792 aarch64_sync_64_to_32(env
);
795 reg
.id
= AARCH64_CORE_REG(elr_el1
);
796 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
797 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
802 /* Fetch the SPSR registers
804 * KVM SPSRs 0-4 map to QEMU banks 1-5
806 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
807 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
808 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
809 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
815 el
= arm_current_el(env
);
816 if (el
> 0 && !is_a64(env
)) {
817 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
818 env
->spsr
= env
->banked_spsr
[i
];
821 /* Advanced SIMD and FP registers
822 * We map Qn = regs[2n+1]:regs[2n]
824 for (i
= 0; i
< 32; i
++) {
826 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
827 reg
.addr
= (uintptr_t)(&fp_val
);
828 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
833 #ifdef HOST_WORDS_BIGENDIAN
834 env
->vfp
.regs
[rd
+ 1] = fp_val
[0];
835 env
->vfp
.regs
[rd
] = fp_val
[1];
837 env
->vfp
.regs
[rd
+ 1] = fp_val
[1];
838 env
->vfp
.regs
[rd
] = fp_val
[0];
843 reg
.addr
= (uintptr_t)(&fpr
);
844 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
845 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
849 vfp_set_fpsr(env
, fpr
);
851 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
852 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
856 vfp_set_fpcr(env
, fpr
);
858 if (!write_kvmstate_to_list(cpu
)) {
861 /* Note that it's OK to have registers which aren't in CPUState,
862 * so we can ignore a failure return here.
864 write_list_to_cpustate(cpu
);
866 kvm_arm_sync_mpstate_to_qemu(cpu
);
868 /* TODO: other registers */
872 /* C6.6.29 BRK instruction */
873 static const uint32_t brk_insn
= 0xd4200000;
875 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
877 if (have_guest_debug
) {
878 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
879 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
884 error_report("guest debug not supported on this kernel");
889 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
893 if (have_guest_debug
) {
894 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
896 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
901 error_report("guest debug not supported on this kernel");
906 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
908 * To minimise translating between kernel and user-space the kernel
909 * ABI just provides user-space with the full exception syndrome
910 * register value to be decoded in QEMU.
913 bool kvm_arm_handle_debug(CPUState
*cs
, struct kvm_debug_exit_arch
*debug_exit
)
915 int hsr_ec
= debug_exit
->hsr
>> ARM_EL_EC_SHIFT
;
916 ARMCPU
*cpu
= ARM_CPU(cs
);
917 CPUClass
*cc
= CPU_GET_CLASS(cs
);
918 CPUARMState
*env
= &cpu
->env
;
920 /* Ensure PC is synchronised */
921 kvm_cpu_synchronize_state(cs
);
924 case EC_SOFTWARESTEP
:
925 if (cs
->singlestep_enabled
) {
929 * The kernel should have suppressed the guest's ability to
930 * single step at this point so something has gone wrong.
932 error_report("%s: guest single-step while debugging unsupported"
933 " (%"PRIx64
", %"PRIx32
")\n",
934 __func__
, env
->pc
, debug_exit
->hsr
);
939 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
944 if (find_hw_breakpoint(cs
, env
->pc
)) {
950 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
952 cs
->watchpoint_hit
= wp
;
958 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")\n",
959 __func__
, debug_exit
->hsr
, env
->pc
);
962 /* If we are not handling the debug exception it must belong to
963 * the guest. Let's re-use the existing TCG interrupt code to set
964 * everything up properly.
966 cs
->exception_index
= EXCP_BKPT
;
967 env
->exception
.syndrome
= debug_exit
->hsr
;
968 env
->exception
.vaddress
= debug_exit
->far
;
969 cc
->do_interrupt(cs
);