trace-events: Add hmp completion
[qemu/ar7.git] / target-arm / kvm64.c
blobbd60889d12e615f3a51071aa9c9c5b2893c0bff5
1 /*
2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include <stdio.h>
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
14 #include <sys/mman.h>
16 #include <linux/kvm.h>
18 #include "config-host.h"
19 #include "qemu-common.h"
20 #include "qemu/timer.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_arm.h"
24 #include "cpu.h"
25 #include "internals.h"
26 #include "hw/arm/arm.h"
28 static inline void set_feature(uint64_t *features, int feature)
30 *features |= 1ULL << feature;
33 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
35 /* Identify the feature bits corresponding to the host CPU, and
36 * fill out the ARMHostCPUClass fields accordingly. To do this
37 * we have to create a scratch VM, create a single CPU inside it,
38 * and then query that CPU for the relevant ID registers.
39 * For AArch64 we currently don't care about ID registers at
40 * all; we just want to know the CPU type.
42 int fdarray[3];
43 uint64_t features = 0;
44 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
45 * we know these will only support creating one kind of guest CPU,
46 * which is its preferred CPU type. Fortunately these old kernels
47 * support only a very limited number of CPUs.
49 static const uint32_t cpus_to_try[] = {
50 KVM_ARM_TARGET_AEM_V8,
51 KVM_ARM_TARGET_FOUNDATION_V8,
52 KVM_ARM_TARGET_CORTEX_A57,
53 QEMU_KVM_ARM_TARGET_NONE
55 struct kvm_vcpu_init init;
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
58 return false;
61 ahcc->target = init.target;
62 ahcc->dtb_compatible = "arm,arm-v8";
64 kvm_arm_destroy_scratch_host_vcpu(fdarray);
66 /* We can assume any KVM supporting CPU is at least a v8
67 * with VFPv4+Neon; this in turn implies most of the other
68 * feature bits.
70 set_feature(&features, ARM_FEATURE_V8);
71 set_feature(&features, ARM_FEATURE_VFP4);
72 set_feature(&features, ARM_FEATURE_NEON);
73 set_feature(&features, ARM_FEATURE_AARCH64);
75 ahcc->features = features;
77 return true;
80 #define ARM_MPIDR_HWID_BITMASK 0xFF00FFFFFFULL
81 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
83 int kvm_arch_init_vcpu(CPUState *cs)
85 int ret;
86 uint64_t mpidr;
87 ARMCPU *cpu = ARM_CPU(cs);
89 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
90 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
91 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
92 return -EINVAL;
95 /* Determine init features for this CPU */
96 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
97 if (cpu->start_powered_off) {
98 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
100 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
101 cpu->psci_version = 2;
102 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
104 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
105 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
108 /* Do KVM_ARM_VCPU_INIT ioctl */
109 ret = kvm_arm_vcpu_init(cs);
110 if (ret) {
111 return ret;
115 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
116 * Currently KVM has its own idea about MPIDR assignment, so we
117 * override our defaults with what we get from KVM.
119 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
120 if (ret) {
121 return ret;
123 cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
125 return kvm_arm_init_cpreg_list(cpu);
128 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
130 /* Return true if the regidx is a register we should synchronize
131 * via the cpreg_tuples array (ie is not a core reg we sync by
132 * hand in kvm_arch_get/put_registers())
134 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
135 case KVM_REG_ARM_CORE:
136 return false;
137 default:
138 return true;
142 typedef struct CPRegStateLevel {
143 uint64_t regidx;
144 int level;
145 } CPRegStateLevel;
147 /* All system registers not listed in the following table are assumed to be
148 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
149 * often, you must add it to this table with a state of either
150 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
152 static const CPRegStateLevel non_runtime_cpregs[] = {
153 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
156 int kvm_arm_cpreg_level(uint64_t regidx)
158 int i;
160 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
161 const CPRegStateLevel *l = &non_runtime_cpregs[i];
162 if (l->regidx == regidx) {
163 return l->level;
167 return KVM_PUT_RUNTIME_STATE;
170 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
171 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
173 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
174 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
176 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
177 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
179 int kvm_arch_put_registers(CPUState *cs, int level)
181 struct kvm_one_reg reg;
182 uint32_t fpr;
183 uint64_t val;
184 int i;
185 int ret;
186 unsigned int el;
188 ARMCPU *cpu = ARM_CPU(cs);
189 CPUARMState *env = &cpu->env;
191 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
192 * AArch64 registers before pushing them out to 64-bit KVM.
194 if (!is_a64(env)) {
195 aarch64_sync_32_to_64(env);
198 for (i = 0; i < 31; i++) {
199 reg.id = AARCH64_CORE_REG(regs.regs[i]);
200 reg.addr = (uintptr_t) &env->xregs[i];
201 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
202 if (ret) {
203 return ret;
207 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
208 * QEMU side we keep the current SP in xregs[31] as well.
210 aarch64_save_sp(env, 1);
212 reg.id = AARCH64_CORE_REG(regs.sp);
213 reg.addr = (uintptr_t) &env->sp_el[0];
214 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
215 if (ret) {
216 return ret;
219 reg.id = AARCH64_CORE_REG(sp_el1);
220 reg.addr = (uintptr_t) &env->sp_el[1];
221 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
222 if (ret) {
223 return ret;
226 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
227 if (is_a64(env)) {
228 val = pstate_read(env);
229 } else {
230 val = cpsr_read(env);
232 reg.id = AARCH64_CORE_REG(regs.pstate);
233 reg.addr = (uintptr_t) &val;
234 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
235 if (ret) {
236 return ret;
239 reg.id = AARCH64_CORE_REG(regs.pc);
240 reg.addr = (uintptr_t) &env->pc;
241 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
242 if (ret) {
243 return ret;
246 reg.id = AARCH64_CORE_REG(elr_el1);
247 reg.addr = (uintptr_t) &env->elr_el[1];
248 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
249 if (ret) {
250 return ret;
253 /* Saved Program State Registers
255 * Before we restore from the banked_spsr[] array we need to
256 * ensure that any modifications to env->spsr are correctly
257 * reflected in the banks.
259 el = arm_current_el(env);
260 if (el > 0 && !is_a64(env)) {
261 i = bank_number(env->uncached_cpsr & CPSR_M);
262 env->banked_spsr[i] = env->spsr;
265 /* KVM 0-4 map to QEMU banks 1-5 */
266 for (i = 0; i < KVM_NR_SPSR; i++) {
267 reg.id = AARCH64_CORE_REG(spsr[i]);
268 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
269 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
270 if (ret) {
271 return ret;
275 /* Advanced SIMD and FP registers
276 * We map Qn = regs[2n+1]:regs[2n]
278 for (i = 0; i < 32; i++) {
279 int rd = i << 1;
280 uint64_t fp_val[2];
281 #ifdef HOST_WORDS_BIGENDIAN
282 fp_val[0] = env->vfp.regs[rd + 1];
283 fp_val[1] = env->vfp.regs[rd];
284 #else
285 fp_val[1] = env->vfp.regs[rd + 1];
286 fp_val[0] = env->vfp.regs[rd];
287 #endif
288 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
289 reg.addr = (uintptr_t)(&fp_val);
290 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
291 if (ret) {
292 return ret;
296 reg.addr = (uintptr_t)(&fpr);
297 fpr = vfp_get_fpsr(env);
298 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
299 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
300 if (ret) {
301 return ret;
304 fpr = vfp_get_fpcr(env);
305 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
306 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
307 if (ret) {
308 return ret;
311 if (!write_list_to_kvmstate(cpu, level)) {
312 return EINVAL;
315 kvm_arm_sync_mpstate_to_kvm(cpu);
317 return ret;
320 int kvm_arch_get_registers(CPUState *cs)
322 struct kvm_one_reg reg;
323 uint64_t val;
324 uint32_t fpr;
325 unsigned int el;
326 int i;
327 int ret;
329 ARMCPU *cpu = ARM_CPU(cs);
330 CPUARMState *env = &cpu->env;
332 for (i = 0; i < 31; i++) {
333 reg.id = AARCH64_CORE_REG(regs.regs[i]);
334 reg.addr = (uintptr_t) &env->xregs[i];
335 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
336 if (ret) {
337 return ret;
341 reg.id = AARCH64_CORE_REG(regs.sp);
342 reg.addr = (uintptr_t) &env->sp_el[0];
343 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
344 if (ret) {
345 return ret;
348 reg.id = AARCH64_CORE_REG(sp_el1);
349 reg.addr = (uintptr_t) &env->sp_el[1];
350 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
351 if (ret) {
352 return ret;
355 reg.id = AARCH64_CORE_REG(regs.pstate);
356 reg.addr = (uintptr_t) &val;
357 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
358 if (ret) {
359 return ret;
362 env->aarch64 = ((val & PSTATE_nRW) == 0);
363 if (is_a64(env)) {
364 pstate_write(env, val);
365 } else {
366 env->uncached_cpsr = val & CPSR_M;
367 cpsr_write(env, val, 0xffffffff);
370 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
371 * QEMU side we keep the current SP in xregs[31] as well.
373 aarch64_restore_sp(env, 1);
375 reg.id = AARCH64_CORE_REG(regs.pc);
376 reg.addr = (uintptr_t) &env->pc;
377 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
378 if (ret) {
379 return ret;
382 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
383 * incoming AArch64 regs received from 64-bit KVM.
384 * We must perform this after all of the registers have been acquired from
385 * the kernel.
387 if (!is_a64(env)) {
388 aarch64_sync_64_to_32(env);
391 reg.id = AARCH64_CORE_REG(elr_el1);
392 reg.addr = (uintptr_t) &env->elr_el[1];
393 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
394 if (ret) {
395 return ret;
398 /* Fetch the SPSR registers
400 * KVM SPSRs 0-4 map to QEMU banks 1-5
402 for (i = 0; i < KVM_NR_SPSR; i++) {
403 reg.id = AARCH64_CORE_REG(spsr[i]);
404 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
405 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
406 if (ret) {
407 return ret;
411 el = arm_current_el(env);
412 if (el > 0 && !is_a64(env)) {
413 i = bank_number(env->uncached_cpsr & CPSR_M);
414 env->spsr = env->banked_spsr[i];
417 /* Advanced SIMD and FP registers
418 * We map Qn = regs[2n+1]:regs[2n]
420 for (i = 0; i < 32; i++) {
421 uint64_t fp_val[2];
422 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
423 reg.addr = (uintptr_t)(&fp_val);
424 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
425 if (ret) {
426 return ret;
427 } else {
428 int rd = i << 1;
429 #ifdef HOST_WORDS_BIGENDIAN
430 env->vfp.regs[rd + 1] = fp_val[0];
431 env->vfp.regs[rd] = fp_val[1];
432 #else
433 env->vfp.regs[rd + 1] = fp_val[1];
434 env->vfp.regs[rd] = fp_val[0];
435 #endif
439 reg.addr = (uintptr_t)(&fpr);
440 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
441 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
442 if (ret) {
443 return ret;
445 vfp_set_fpsr(env, fpr);
447 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
448 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
449 if (ret) {
450 return ret;
452 vfp_set_fpcr(env, fpr);
454 if (!write_kvmstate_to_list(cpu)) {
455 return EINVAL;
457 /* Note that it's OK to have registers which aren't in CPUState,
458 * so we can ignore a failure return here.
460 write_list_to_cpustate(cpu);
462 kvm_arm_sync_mpstate_to_qemu(cpu);
464 /* TODO: other registers */
465 return ret;