Merge remote-tracking branch 'remotes/ehabkost/tags/x86-pull-request' into staging
[qemu/ar7.git] / target-arm / kvm64.c
blobceebfeb7740f331b1820f671ca2f9d7318eb6cf4
1 /*
2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include <stdio.h>
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
14 #include <sys/mman.h>
16 #include <linux/kvm.h>
18 #include "config-host.h"
19 #include "qemu-common.h"
20 #include "qemu/timer.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_arm.h"
24 #include "cpu.h"
25 #include "internals.h"
26 #include "hw/arm/arm.h"
28 static inline void set_feature(uint64_t *features, int feature)
30 *features |= 1ULL << feature;
33 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
35 /* Identify the feature bits corresponding to the host CPU, and
36 * fill out the ARMHostCPUClass fields accordingly. To do this
37 * we have to create a scratch VM, create a single CPU inside it,
38 * and then query that CPU for the relevant ID registers.
39 * For AArch64 we currently don't care about ID registers at
40 * all; we just want to know the CPU type.
42 int fdarray[3];
43 uint64_t features = 0;
44 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
45 * we know these will only support creating one kind of guest CPU,
46 * which is its preferred CPU type. Fortunately these old kernels
47 * support only a very limited number of CPUs.
49 static const uint32_t cpus_to_try[] = {
50 KVM_ARM_TARGET_AEM_V8,
51 KVM_ARM_TARGET_FOUNDATION_V8,
52 KVM_ARM_TARGET_CORTEX_A57,
53 QEMU_KVM_ARM_TARGET_NONE
55 struct kvm_vcpu_init init;
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
58 return false;
61 ahcc->target = init.target;
62 ahcc->dtb_compatible = "arm,arm-v8";
64 kvm_arm_destroy_scratch_host_vcpu(fdarray);
66 /* We can assume any KVM supporting CPU is at least a v8
67 * with VFPv4+Neon; this in turn implies most of the other
68 * feature bits.
70 set_feature(&features, ARM_FEATURE_V8);
71 set_feature(&features, ARM_FEATURE_VFP4);
72 set_feature(&features, ARM_FEATURE_NEON);
73 set_feature(&features, ARM_FEATURE_AARCH64);
75 ahcc->features = features;
77 return true;
80 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
82 int kvm_arch_init_vcpu(CPUState *cs)
84 int ret;
85 uint64_t mpidr;
86 ARMCPU *cpu = ARM_CPU(cs);
88 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
89 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
90 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
91 return -EINVAL;
94 /* Determine init features for this CPU */
95 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
96 if (cpu->start_powered_off) {
97 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
99 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
100 cpu->psci_version = 2;
101 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
103 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
104 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
107 /* Do KVM_ARM_VCPU_INIT ioctl */
108 ret = kvm_arm_vcpu_init(cs);
109 if (ret) {
110 return ret;
114 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
115 * Currently KVM has its own idea about MPIDR assignment, so we
116 * override our defaults with what we get from KVM.
118 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
119 if (ret) {
120 return ret;
122 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
124 return kvm_arm_init_cpreg_list(cpu);
127 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
129 /* Return true if the regidx is a register we should synchronize
130 * via the cpreg_tuples array (ie is not a core reg we sync by
131 * hand in kvm_arch_get/put_registers())
133 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
134 case KVM_REG_ARM_CORE:
135 return false;
136 default:
137 return true;
141 typedef struct CPRegStateLevel {
142 uint64_t regidx;
143 int level;
144 } CPRegStateLevel;
146 /* All system registers not listed in the following table are assumed to be
147 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
148 * often, you must add it to this table with a state of either
149 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
151 static const CPRegStateLevel non_runtime_cpregs[] = {
152 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
155 int kvm_arm_cpreg_level(uint64_t regidx)
157 int i;
159 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
160 const CPRegStateLevel *l = &non_runtime_cpregs[i];
161 if (l->regidx == regidx) {
162 return l->level;
166 return KVM_PUT_RUNTIME_STATE;
169 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
170 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
172 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
173 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
175 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
176 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
178 int kvm_arch_put_registers(CPUState *cs, int level)
180 struct kvm_one_reg reg;
181 uint32_t fpr;
182 uint64_t val;
183 int i;
184 int ret;
185 unsigned int el;
187 ARMCPU *cpu = ARM_CPU(cs);
188 CPUARMState *env = &cpu->env;
190 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
191 * AArch64 registers before pushing them out to 64-bit KVM.
193 if (!is_a64(env)) {
194 aarch64_sync_32_to_64(env);
197 for (i = 0; i < 31; i++) {
198 reg.id = AARCH64_CORE_REG(regs.regs[i]);
199 reg.addr = (uintptr_t) &env->xregs[i];
200 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
201 if (ret) {
202 return ret;
206 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
207 * QEMU side we keep the current SP in xregs[31] as well.
209 aarch64_save_sp(env, 1);
211 reg.id = AARCH64_CORE_REG(regs.sp);
212 reg.addr = (uintptr_t) &env->sp_el[0];
213 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
214 if (ret) {
215 return ret;
218 reg.id = AARCH64_CORE_REG(sp_el1);
219 reg.addr = (uintptr_t) &env->sp_el[1];
220 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
221 if (ret) {
222 return ret;
225 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
226 if (is_a64(env)) {
227 val = pstate_read(env);
228 } else {
229 val = cpsr_read(env);
231 reg.id = AARCH64_CORE_REG(regs.pstate);
232 reg.addr = (uintptr_t) &val;
233 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
234 if (ret) {
235 return ret;
238 reg.id = AARCH64_CORE_REG(regs.pc);
239 reg.addr = (uintptr_t) &env->pc;
240 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
241 if (ret) {
242 return ret;
245 reg.id = AARCH64_CORE_REG(elr_el1);
246 reg.addr = (uintptr_t) &env->elr_el[1];
247 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
248 if (ret) {
249 return ret;
252 /* Saved Program State Registers
254 * Before we restore from the banked_spsr[] array we need to
255 * ensure that any modifications to env->spsr are correctly
256 * reflected in the banks.
258 el = arm_current_el(env);
259 if (el > 0 && !is_a64(env)) {
260 i = bank_number(env->uncached_cpsr & CPSR_M);
261 env->banked_spsr[i] = env->spsr;
264 /* KVM 0-4 map to QEMU banks 1-5 */
265 for (i = 0; i < KVM_NR_SPSR; i++) {
266 reg.id = AARCH64_CORE_REG(spsr[i]);
267 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
268 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
269 if (ret) {
270 return ret;
274 /* Advanced SIMD and FP registers
275 * We map Qn = regs[2n+1]:regs[2n]
277 for (i = 0; i < 32; i++) {
278 int rd = i << 1;
279 uint64_t fp_val[2];
280 #ifdef HOST_WORDS_BIGENDIAN
281 fp_val[0] = env->vfp.regs[rd + 1];
282 fp_val[1] = env->vfp.regs[rd];
283 #else
284 fp_val[1] = env->vfp.regs[rd + 1];
285 fp_val[0] = env->vfp.regs[rd];
286 #endif
287 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
288 reg.addr = (uintptr_t)(&fp_val);
289 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
290 if (ret) {
291 return ret;
295 reg.addr = (uintptr_t)(&fpr);
296 fpr = vfp_get_fpsr(env);
297 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
298 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
299 if (ret) {
300 return ret;
303 fpr = vfp_get_fpcr(env);
304 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
305 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
306 if (ret) {
307 return ret;
310 if (!write_list_to_kvmstate(cpu, level)) {
311 return EINVAL;
314 kvm_arm_sync_mpstate_to_kvm(cpu);
316 return ret;
319 int kvm_arch_get_registers(CPUState *cs)
321 struct kvm_one_reg reg;
322 uint64_t val;
323 uint32_t fpr;
324 unsigned int el;
325 int i;
326 int ret;
328 ARMCPU *cpu = ARM_CPU(cs);
329 CPUARMState *env = &cpu->env;
331 for (i = 0; i < 31; i++) {
332 reg.id = AARCH64_CORE_REG(regs.regs[i]);
333 reg.addr = (uintptr_t) &env->xregs[i];
334 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
335 if (ret) {
336 return ret;
340 reg.id = AARCH64_CORE_REG(regs.sp);
341 reg.addr = (uintptr_t) &env->sp_el[0];
342 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
343 if (ret) {
344 return ret;
347 reg.id = AARCH64_CORE_REG(sp_el1);
348 reg.addr = (uintptr_t) &env->sp_el[1];
349 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
350 if (ret) {
351 return ret;
354 reg.id = AARCH64_CORE_REG(regs.pstate);
355 reg.addr = (uintptr_t) &val;
356 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
357 if (ret) {
358 return ret;
361 env->aarch64 = ((val & PSTATE_nRW) == 0);
362 if (is_a64(env)) {
363 pstate_write(env, val);
364 } else {
365 env->uncached_cpsr = val & CPSR_M;
366 cpsr_write(env, val, 0xffffffff);
369 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
370 * QEMU side we keep the current SP in xregs[31] as well.
372 aarch64_restore_sp(env, 1);
374 reg.id = AARCH64_CORE_REG(regs.pc);
375 reg.addr = (uintptr_t) &env->pc;
376 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
377 if (ret) {
378 return ret;
381 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
382 * incoming AArch64 regs received from 64-bit KVM.
383 * We must perform this after all of the registers have been acquired from
384 * the kernel.
386 if (!is_a64(env)) {
387 aarch64_sync_64_to_32(env);
390 reg.id = AARCH64_CORE_REG(elr_el1);
391 reg.addr = (uintptr_t) &env->elr_el[1];
392 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
393 if (ret) {
394 return ret;
397 /* Fetch the SPSR registers
399 * KVM SPSRs 0-4 map to QEMU banks 1-5
401 for (i = 0; i < KVM_NR_SPSR; i++) {
402 reg.id = AARCH64_CORE_REG(spsr[i]);
403 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
404 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
405 if (ret) {
406 return ret;
410 el = arm_current_el(env);
411 if (el > 0 && !is_a64(env)) {
412 i = bank_number(env->uncached_cpsr & CPSR_M);
413 env->spsr = env->banked_spsr[i];
416 /* Advanced SIMD and FP registers
417 * We map Qn = regs[2n+1]:regs[2n]
419 for (i = 0; i < 32; i++) {
420 uint64_t fp_val[2];
421 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
422 reg.addr = (uintptr_t)(&fp_val);
423 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
424 if (ret) {
425 return ret;
426 } else {
427 int rd = i << 1;
428 #ifdef HOST_WORDS_BIGENDIAN
429 env->vfp.regs[rd + 1] = fp_val[0];
430 env->vfp.regs[rd] = fp_val[1];
431 #else
432 env->vfp.regs[rd + 1] = fp_val[1];
433 env->vfp.regs[rd] = fp_val[0];
434 #endif
438 reg.addr = (uintptr_t)(&fpr);
439 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
440 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
441 if (ret) {
442 return ret;
444 vfp_set_fpsr(env, fpr);
446 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
447 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
448 if (ret) {
449 return ret;
451 vfp_set_fpcr(env, fpr);
453 if (!write_kvmstate_to_list(cpu)) {
454 return EINVAL;
456 /* Note that it's OK to have registers which aren't in CPUState,
457 * so we can ignore a failure return here.
459 write_list_to_cpustate(cpu);
461 kvm_arm_sync_mpstate_to_qemu(cpu);
463 /* TODO: other registers */
464 return ret;