2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
16 #include <linux/kvm.h>
18 #include "config-host.h"
19 #include "qemu-common.h"
20 #include "qemu/timer.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
25 #include "internals.h"
26 #include "hw/arm/arm.h"
28 static inline void set_feature(uint64_t *features
, int feature
)
30 *features
|= 1ULL << feature
;
33 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass
*ahcc
)
35 /* Identify the feature bits corresponding to the host CPU, and
36 * fill out the ARMHostCPUClass fields accordingly. To do this
37 * we have to create a scratch VM, create a single CPU inside it,
38 * and then query that CPU for the relevant ID registers.
39 * For AArch64 we currently don't care about ID registers at
40 * all; we just want to know the CPU type.
43 uint64_t features
= 0;
44 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
45 * we know these will only support creating one kind of guest CPU,
46 * which is its preferred CPU type. Fortunately these old kernels
47 * support only a very limited number of CPUs.
49 static const uint32_t cpus_to_try
[] = {
50 KVM_ARM_TARGET_AEM_V8
,
51 KVM_ARM_TARGET_FOUNDATION_V8
,
52 KVM_ARM_TARGET_CORTEX_A57
,
53 QEMU_KVM_ARM_TARGET_NONE
55 struct kvm_vcpu_init init
;
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
61 ahcc
->target
= init
.target
;
62 ahcc
->dtb_compatible
= "arm,arm-v8";
64 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
66 /* We can assume any KVM supporting CPU is at least a v8
67 * with VFPv4+Neon; this in turn implies most of the other
70 set_feature(&features
, ARM_FEATURE_V8
);
71 set_feature(&features
, ARM_FEATURE_VFP4
);
72 set_feature(&features
, ARM_FEATURE_NEON
);
73 set_feature(&features
, ARM_FEATURE_AARCH64
);
75 ahcc
->features
= features
;
80 #define ARM_MPIDR_HWID_BITMASK 0xFF00FFFFFFULL
81 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
83 int kvm_arch_init_vcpu(CPUState
*cs
)
87 ARMCPU
*cpu
= ARM_CPU(cs
);
89 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
90 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
91 fprintf(stderr
, "KVM is not supported for this guest CPU type\n");
95 /* Determine init features for this CPU */
96 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
97 if (cpu
->start_powered_off
) {
98 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
100 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
101 cpu
->psci_version
= 2;
102 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
104 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
105 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
108 /* Do KVM_ARM_VCPU_INIT ioctl */
109 ret
= kvm_arm_vcpu_init(cs
);
115 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
116 * Currently KVM has its own idea about MPIDR assignment, so we
117 * override our defaults with what we get from KVM.
119 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
123 cpu
->mp_affinity
= mpidr
& ARM_MPIDR_HWID_BITMASK
;
125 return kvm_arm_init_cpreg_list(cpu
);
128 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
130 /* Return true if the regidx is a register we should synchronize
131 * via the cpreg_tuples array (ie is not a core reg we sync by
132 * hand in kvm_arch_get/put_registers())
134 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
135 case KVM_REG_ARM_CORE
:
142 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
143 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
145 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
146 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
148 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
149 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
151 int kvm_arch_put_registers(CPUState
*cs
, int level
)
153 struct kvm_one_reg reg
;
160 ARMCPU
*cpu
= ARM_CPU(cs
);
161 CPUARMState
*env
= &cpu
->env
;
163 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
164 * AArch64 registers before pushing them out to 64-bit KVM.
167 aarch64_sync_32_to_64(env
);
170 for (i
= 0; i
< 31; i
++) {
171 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
172 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
173 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
179 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
180 * QEMU side we keep the current SP in xregs[31] as well.
182 aarch64_save_sp(env
, 1);
184 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
185 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
186 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
191 reg
.id
= AARCH64_CORE_REG(sp_el1
);
192 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
193 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
198 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
200 val
= pstate_read(env
);
202 val
= cpsr_read(env
);
204 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
205 reg
.addr
= (uintptr_t) &val
;
206 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
211 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
212 reg
.addr
= (uintptr_t) &env
->pc
;
213 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
218 reg
.id
= AARCH64_CORE_REG(elr_el1
);
219 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
220 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
225 /* Saved Program State Registers
227 * Before we restore from the banked_spsr[] array we need to
228 * ensure that any modifications to env->spsr are correctly
229 * reflected in the banks.
231 el
= arm_current_el(env
);
232 if (el
> 0 && !is_a64(env
)) {
233 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
234 env
->banked_spsr
[i
] = env
->spsr
;
237 /* KVM 0-4 map to QEMU banks 1-5 */
238 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
239 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
240 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
241 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
247 /* Advanced SIMD and FP registers
248 * We map Qn = regs[2n+1]:regs[2n]
250 for (i
= 0; i
< 32; i
++) {
253 #ifdef HOST_WORDS_BIGENDIAN
254 fp_val
[0] = env
->vfp
.regs
[rd
+ 1];
255 fp_val
[1] = env
->vfp
.regs
[rd
];
257 fp_val
[1] = env
->vfp
.regs
[rd
+ 1];
258 fp_val
[0] = env
->vfp
.regs
[rd
];
260 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
261 reg
.addr
= (uintptr_t)(&fp_val
);
262 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
268 reg
.addr
= (uintptr_t)(&fpr
);
269 fpr
= vfp_get_fpsr(env
);
270 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
271 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
276 fpr
= vfp_get_fpcr(env
);
277 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
278 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
283 if (!write_list_to_kvmstate(cpu
)) {
287 kvm_arm_sync_mpstate_to_kvm(cpu
);
292 int kvm_arch_get_registers(CPUState
*cs
)
294 struct kvm_one_reg reg
;
301 ARMCPU
*cpu
= ARM_CPU(cs
);
302 CPUARMState
*env
= &cpu
->env
;
304 for (i
= 0; i
< 31; i
++) {
305 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
306 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
307 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
313 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
314 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
315 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
320 reg
.id
= AARCH64_CORE_REG(sp_el1
);
321 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
322 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
327 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
328 reg
.addr
= (uintptr_t) &val
;
329 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
334 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
336 pstate_write(env
, val
);
338 env
->uncached_cpsr
= val
& CPSR_M
;
339 cpsr_write(env
, val
, 0xffffffff);
342 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
343 * QEMU side we keep the current SP in xregs[31] as well.
345 aarch64_restore_sp(env
, 1);
347 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
348 reg
.addr
= (uintptr_t) &env
->pc
;
349 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
354 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
355 * incoming AArch64 regs received from 64-bit KVM.
356 * We must perform this after all of the registers have been acquired from
360 aarch64_sync_64_to_32(env
);
363 reg
.id
= AARCH64_CORE_REG(elr_el1
);
364 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
365 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
370 /* Fetch the SPSR registers
372 * KVM SPSRs 0-4 map to QEMU banks 1-5
374 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
375 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
376 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
377 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
383 el
= arm_current_el(env
);
384 if (el
> 0 && !is_a64(env
)) {
385 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
386 env
->spsr
= env
->banked_spsr
[i
];
389 /* Advanced SIMD and FP registers
390 * We map Qn = regs[2n+1]:regs[2n]
392 for (i
= 0; i
< 32; i
++) {
394 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
395 reg
.addr
= (uintptr_t)(&fp_val
);
396 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
401 #ifdef HOST_WORDS_BIGENDIAN
402 env
->vfp
.regs
[rd
+ 1] = fp_val
[0];
403 env
->vfp
.regs
[rd
] = fp_val
[1];
405 env
->vfp
.regs
[rd
+ 1] = fp_val
[1];
406 env
->vfp
.regs
[rd
] = fp_val
[0];
411 reg
.addr
= (uintptr_t)(&fpr
);
412 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
413 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
417 vfp_set_fpsr(env
, fpr
);
419 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
420 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
424 vfp_set_fpcr(env
, fpr
);
426 if (!write_kvmstate_to_list(cpu
)) {
429 /* Note that it's OK to have registers which aren't in CPUState,
430 * so we can ignore a failure return here.
432 write_list_to_cpustate(cpu
);
434 kvm_arm_sync_mpstate_to_qemu(cpu
);
436 /* TODO: other registers */