2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
16 #include <linux/kvm.h>
18 #include "config-host.h"
19 #include "qemu-common.h"
20 #include "qemu/timer.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
25 #include "internals.h"
26 #include "hw/arm/arm.h"
28 static inline void set_feature(uint64_t *features
, int feature
)
30 *features
|= 1ULL << feature
;
33 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass
*ahcc
)
35 /* Identify the feature bits corresponding to the host CPU, and
36 * fill out the ARMHostCPUClass fields accordingly. To do this
37 * we have to create a scratch VM, create a single CPU inside it,
38 * and then query that CPU for the relevant ID registers.
39 * For AArch64 we currently don't care about ID registers at
40 * all; we just want to know the CPU type.
43 uint64_t features
= 0;
44 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
45 * we know these will only support creating one kind of guest CPU,
46 * which is its preferred CPU type. Fortunately these old kernels
47 * support only a very limited number of CPUs.
49 static const uint32_t cpus_to_try
[] = {
50 KVM_ARM_TARGET_AEM_V8
,
51 KVM_ARM_TARGET_FOUNDATION_V8
,
52 KVM_ARM_TARGET_CORTEX_A57
,
53 QEMU_KVM_ARM_TARGET_NONE
55 struct kvm_vcpu_init init
;
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
61 ahcc
->target
= init
.target
;
62 ahcc
->dtb_compatible
= "arm,arm-v8";
64 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
66 /* We can assume any KVM supporting CPU is at least a v8
67 * with VFPv4+Neon; this in turn implies most of the other
70 set_feature(&features
, ARM_FEATURE_V8
);
71 set_feature(&features
, ARM_FEATURE_VFP4
);
72 set_feature(&features
, ARM_FEATURE_NEON
);
73 set_feature(&features
, ARM_FEATURE_AARCH64
);
75 ahcc
->features
= features
;
80 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
82 int kvm_arch_init_vcpu(CPUState
*cs
)
86 ARMCPU
*cpu
= ARM_CPU(cs
);
88 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
89 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
90 fprintf(stderr
, "KVM is not supported for this guest CPU type\n");
94 /* Determine init features for this CPU */
95 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
96 if (cpu
->start_powered_off
) {
97 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
99 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
100 cpu
->psci_version
= 2;
101 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
103 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
104 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
107 /* Do KVM_ARM_VCPU_INIT ioctl */
108 ret
= kvm_arm_vcpu_init(cs
);
114 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
115 * Currently KVM has its own idea about MPIDR assignment, so we
116 * override our defaults with what we get from KVM.
118 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
122 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
124 return kvm_arm_init_cpreg_list(cpu
);
127 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
129 /* Return true if the regidx is a register we should synchronize
130 * via the cpreg_tuples array (ie is not a core reg we sync by
131 * hand in kvm_arch_get/put_registers())
133 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
134 case KVM_REG_ARM_CORE
:
141 typedef struct CPRegStateLevel
{
146 /* All system registers not listed in the following table are assumed to be
147 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
148 * often, you must add it to this table with a state of either
149 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
151 static const CPRegStateLevel non_runtime_cpregs
[] = {
152 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
155 int kvm_arm_cpreg_level(uint64_t regidx
)
159 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
160 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
161 if (l
->regidx
== regidx
) {
166 return KVM_PUT_RUNTIME_STATE
;
169 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
170 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
172 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
173 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
175 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
176 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
178 int kvm_arch_put_registers(CPUState
*cs
, int level
)
180 struct kvm_one_reg reg
;
187 ARMCPU
*cpu
= ARM_CPU(cs
);
188 CPUARMState
*env
= &cpu
->env
;
190 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
191 * AArch64 registers before pushing them out to 64-bit KVM.
194 aarch64_sync_32_to_64(env
);
197 for (i
= 0; i
< 31; i
++) {
198 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
199 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
200 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
206 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
207 * QEMU side we keep the current SP in xregs[31] as well.
209 aarch64_save_sp(env
, 1);
211 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
212 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
213 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
218 reg
.id
= AARCH64_CORE_REG(sp_el1
);
219 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
220 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
225 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
227 val
= pstate_read(env
);
229 val
= cpsr_read(env
);
231 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
232 reg
.addr
= (uintptr_t) &val
;
233 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
238 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
239 reg
.addr
= (uintptr_t) &env
->pc
;
240 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
245 reg
.id
= AARCH64_CORE_REG(elr_el1
);
246 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
247 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
252 /* Saved Program State Registers
254 * Before we restore from the banked_spsr[] array we need to
255 * ensure that any modifications to env->spsr are correctly
256 * reflected in the banks.
258 el
= arm_current_el(env
);
259 if (el
> 0 && !is_a64(env
)) {
260 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
261 env
->banked_spsr
[i
] = env
->spsr
;
264 /* KVM 0-4 map to QEMU banks 1-5 */
265 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
266 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
267 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
268 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
274 /* Advanced SIMD and FP registers
275 * We map Qn = regs[2n+1]:regs[2n]
277 for (i
= 0; i
< 32; i
++) {
280 #ifdef HOST_WORDS_BIGENDIAN
281 fp_val
[0] = env
->vfp
.regs
[rd
+ 1];
282 fp_val
[1] = env
->vfp
.regs
[rd
];
284 fp_val
[1] = env
->vfp
.regs
[rd
+ 1];
285 fp_val
[0] = env
->vfp
.regs
[rd
];
287 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
288 reg
.addr
= (uintptr_t)(&fp_val
);
289 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
295 reg
.addr
= (uintptr_t)(&fpr
);
296 fpr
= vfp_get_fpsr(env
);
297 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
298 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
303 fpr
= vfp_get_fpcr(env
);
304 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
305 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
310 if (!write_list_to_kvmstate(cpu
, level
)) {
314 kvm_arm_sync_mpstate_to_kvm(cpu
);
319 int kvm_arch_get_registers(CPUState
*cs
)
321 struct kvm_one_reg reg
;
328 ARMCPU
*cpu
= ARM_CPU(cs
);
329 CPUARMState
*env
= &cpu
->env
;
331 for (i
= 0; i
< 31; i
++) {
332 reg
.id
= AARCH64_CORE_REG(regs
.regs
[i
]);
333 reg
.addr
= (uintptr_t) &env
->xregs
[i
];
334 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
340 reg
.id
= AARCH64_CORE_REG(regs
.sp
);
341 reg
.addr
= (uintptr_t) &env
->sp_el
[0];
342 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
347 reg
.id
= AARCH64_CORE_REG(sp_el1
);
348 reg
.addr
= (uintptr_t) &env
->sp_el
[1];
349 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
354 reg
.id
= AARCH64_CORE_REG(regs
.pstate
);
355 reg
.addr
= (uintptr_t) &val
;
356 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
361 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
363 pstate_write(env
, val
);
365 env
->uncached_cpsr
= val
& CPSR_M
;
366 cpsr_write(env
, val
, 0xffffffff);
369 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
370 * QEMU side we keep the current SP in xregs[31] as well.
372 aarch64_restore_sp(env
, 1);
374 reg
.id
= AARCH64_CORE_REG(regs
.pc
);
375 reg
.addr
= (uintptr_t) &env
->pc
;
376 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
381 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
382 * incoming AArch64 regs received from 64-bit KVM.
383 * We must perform this after all of the registers have been acquired from
387 aarch64_sync_64_to_32(env
);
390 reg
.id
= AARCH64_CORE_REG(elr_el1
);
391 reg
.addr
= (uintptr_t) &env
->elr_el
[1];
392 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
397 /* Fetch the SPSR registers
399 * KVM SPSRs 0-4 map to QEMU banks 1-5
401 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
402 reg
.id
= AARCH64_CORE_REG(spsr
[i
]);
403 reg
.addr
= (uintptr_t) &env
->banked_spsr
[i
+ 1];
404 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
410 el
= arm_current_el(env
);
411 if (el
> 0 && !is_a64(env
)) {
412 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
413 env
->spsr
= env
->banked_spsr
[i
];
416 /* Advanced SIMD and FP registers
417 * We map Qn = regs[2n+1]:regs[2n]
419 for (i
= 0; i
< 32; i
++) {
421 reg
.id
= AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]);
422 reg
.addr
= (uintptr_t)(&fp_val
);
423 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
428 #ifdef HOST_WORDS_BIGENDIAN
429 env
->vfp
.regs
[rd
+ 1] = fp_val
[0];
430 env
->vfp
.regs
[rd
] = fp_val
[1];
432 env
->vfp
.regs
[rd
+ 1] = fp_val
[1];
433 env
->vfp
.regs
[rd
] = fp_val
[0];
438 reg
.addr
= (uintptr_t)(&fpr
);
439 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
);
440 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
444 vfp_set_fpsr(env
, fpr
);
446 reg
.id
= AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
);
447 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
451 vfp_set_fpcr(env
, fpr
);
453 if (!write_kvmstate_to_list(cpu
)) {
456 /* Note that it's OK to have registers which aren't in CPUState,
457 * so we can ignore a failure return here.
459 write_list_to_cpustate(cpu
);
461 kvm_arm_sync_mpstate_to_qemu(cpu
);
463 /* TODO: other registers */