target-arm: kvm64 sync FP register state
[qemu/qmp-unstable.git] / target-arm / kvm64.c
blobd6c83b0fb2cd4c4f8a96ad63f4fa5a3056ed209c
1 /*
2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include <stdio.h>
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
14 #include <sys/mman.h>
16 #include <linux/kvm.h>
18 #include "config-host.h"
19 #include "qemu-common.h"
20 #include "qemu/timer.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_arm.h"
24 #include "cpu.h"
25 #include "internals.h"
26 #include "hw/arm/arm.h"
28 static inline void set_feature(uint64_t *features, int feature)
30 *features |= 1ULL << feature;
33 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
35 /* Identify the feature bits corresponding to the host CPU, and
36 * fill out the ARMHostCPUClass fields accordingly. To do this
37 * we have to create a scratch VM, create a single CPU inside it,
38 * and then query that CPU for the relevant ID registers.
39 * For AArch64 we currently don't care about ID registers at
40 * all; we just want to know the CPU type.
42 int fdarray[3];
43 uint64_t features = 0;
44 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
45 * we know these will only support creating one kind of guest CPU,
46 * which is its preferred CPU type. Fortunately these old kernels
47 * support only a very limited number of CPUs.
49 static const uint32_t cpus_to_try[] = {
50 KVM_ARM_TARGET_AEM_V8,
51 KVM_ARM_TARGET_FOUNDATION_V8,
52 KVM_ARM_TARGET_CORTEX_A57,
53 QEMU_KVM_ARM_TARGET_NONE
55 struct kvm_vcpu_init init;
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
58 return false;
61 ahcc->target = init.target;
62 ahcc->dtb_compatible = "arm,arm-v8";
64 kvm_arm_destroy_scratch_host_vcpu(fdarray);
66 /* We can assume any KVM supporting CPU is at least a v8
67 * with VFPv4+Neon; this in turn implies most of the other
68 * feature bits.
70 set_feature(&features, ARM_FEATURE_V8);
71 set_feature(&features, ARM_FEATURE_VFP4);
72 set_feature(&features, ARM_FEATURE_NEON);
73 set_feature(&features, ARM_FEATURE_AARCH64);
75 ahcc->features = features;
77 return true;
80 int kvm_arch_init_vcpu(CPUState *cs)
82 int ret;
83 ARMCPU *cpu = ARM_CPU(cs);
85 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
86 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
87 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
88 return -EINVAL;
91 /* Determine init features for this CPU */
92 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
93 if (cpu->start_powered_off) {
94 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
96 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
97 cpu->psci_version = 2;
98 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
100 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
101 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
104 /* Do KVM_ARM_VCPU_INIT ioctl */
105 ret = kvm_arm_vcpu_init(cs);
106 if (ret) {
107 return ret;
110 return kvm_arm_init_cpreg_list(cpu);
113 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
115 /* Return true if the regidx is a register we should synchronize
116 * via the cpreg_tuples array (ie is not a core reg we sync by
117 * hand in kvm_arch_get/put_registers())
119 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
120 case KVM_REG_ARM_CORE:
121 return false;
122 default:
123 return true;
127 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
128 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
130 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
131 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
133 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
134 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
136 int kvm_arch_put_registers(CPUState *cs, int level)
138 struct kvm_one_reg reg;
139 uint32_t fpr;
140 uint64_t val;
141 int i;
142 int ret;
144 ARMCPU *cpu = ARM_CPU(cs);
145 CPUARMState *env = &cpu->env;
147 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
148 * AArch64 registers before pushing them out to 64-bit KVM.
150 if (!is_a64(env)) {
151 aarch64_sync_32_to_64(env);
154 for (i = 0; i < 31; i++) {
155 reg.id = AARCH64_CORE_REG(regs.regs[i]);
156 reg.addr = (uintptr_t) &env->xregs[i];
157 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
158 if (ret) {
159 return ret;
163 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
164 * QEMU side we keep the current SP in xregs[31] as well.
166 aarch64_save_sp(env, 1);
168 reg.id = AARCH64_CORE_REG(regs.sp);
169 reg.addr = (uintptr_t) &env->sp_el[0];
170 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
171 if (ret) {
172 return ret;
175 reg.id = AARCH64_CORE_REG(sp_el1);
176 reg.addr = (uintptr_t) &env->sp_el[1];
177 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
178 if (ret) {
179 return ret;
182 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
183 if (is_a64(env)) {
184 val = pstate_read(env);
185 } else {
186 val = cpsr_read(env);
188 reg.id = AARCH64_CORE_REG(regs.pstate);
189 reg.addr = (uintptr_t) &val;
190 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
191 if (ret) {
192 return ret;
195 reg.id = AARCH64_CORE_REG(regs.pc);
196 reg.addr = (uintptr_t) &env->pc;
197 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
198 if (ret) {
199 return ret;
202 reg.id = AARCH64_CORE_REG(elr_el1);
203 reg.addr = (uintptr_t) &env->elr_el[1];
204 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
205 if (ret) {
206 return ret;
209 for (i = 0; i < KVM_NR_SPSR; i++) {
210 reg.id = AARCH64_CORE_REG(spsr[i]);
211 reg.addr = (uintptr_t) &env->banked_spsr[i - 1];
212 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
213 if (ret) {
214 return ret;
218 /* Advanced SIMD and FP registers
219 * We map Qn = regs[2n+1]:regs[2n]
221 for (i = 0; i < 32; i++) {
222 int rd = i << 1;
223 uint64_t fp_val[2];
224 #ifdef HOST_WORDS_BIGENDIAN
225 fp_val[0] = env->vfp.regs[rd + 1];
226 fp_val[1] = env->vfp.regs[rd];
227 #else
228 fp_val[1] = env->vfp.regs[rd + 1];
229 fp_val[0] = env->vfp.regs[rd];
230 #endif
231 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
232 reg.addr = (uintptr_t)(&fp_val);
233 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
234 if (ret) {
235 return ret;
239 reg.addr = (uintptr_t)(&fpr);
240 fpr = vfp_get_fpsr(env);
241 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
242 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
243 if (ret) {
244 return ret;
247 fpr = vfp_get_fpcr(env);
248 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
249 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
250 if (ret) {
251 return ret;
254 if (!write_list_to_kvmstate(cpu)) {
255 return EINVAL;
258 kvm_arm_sync_mpstate_to_kvm(cpu);
260 return ret;
263 int kvm_arch_get_registers(CPUState *cs)
265 struct kvm_one_reg reg;
266 uint64_t val;
267 uint32_t fpr;
268 int i;
269 int ret;
271 ARMCPU *cpu = ARM_CPU(cs);
272 CPUARMState *env = &cpu->env;
274 for (i = 0; i < 31; i++) {
275 reg.id = AARCH64_CORE_REG(regs.regs[i]);
276 reg.addr = (uintptr_t) &env->xregs[i];
277 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
278 if (ret) {
279 return ret;
283 reg.id = AARCH64_CORE_REG(regs.sp);
284 reg.addr = (uintptr_t) &env->sp_el[0];
285 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
286 if (ret) {
287 return ret;
290 reg.id = AARCH64_CORE_REG(sp_el1);
291 reg.addr = (uintptr_t) &env->sp_el[1];
292 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
293 if (ret) {
294 return ret;
297 reg.id = AARCH64_CORE_REG(regs.pstate);
298 reg.addr = (uintptr_t) &val;
299 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
300 if (ret) {
301 return ret;
304 env->aarch64 = ((val & PSTATE_nRW) == 0);
305 if (is_a64(env)) {
306 pstate_write(env, val);
307 } else {
308 env->uncached_cpsr = val & CPSR_M;
309 cpsr_write(env, val, 0xffffffff);
312 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
313 * QEMU side we keep the current SP in xregs[31] as well.
315 aarch64_restore_sp(env, 1);
317 reg.id = AARCH64_CORE_REG(regs.pc);
318 reg.addr = (uintptr_t) &env->pc;
319 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
320 if (ret) {
321 return ret;
324 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
325 * incoming AArch64 regs received from 64-bit KVM.
326 * We must perform this after all of the registers have been acquired from
327 * the kernel.
329 if (!is_a64(env)) {
330 aarch64_sync_64_to_32(env);
333 reg.id = AARCH64_CORE_REG(elr_el1);
334 reg.addr = (uintptr_t) &env->elr_el[1];
335 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
336 if (ret) {
337 return ret;
340 for (i = 0; i < KVM_NR_SPSR; i++) {
341 reg.id = AARCH64_CORE_REG(spsr[i]);
342 reg.addr = (uintptr_t) &env->banked_spsr[i - 1];
343 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
344 if (ret) {
345 return ret;
349 /* Advanced SIMD and FP registers
350 * We map Qn = regs[2n+1]:regs[2n]
352 for (i = 0; i < 32; i++) {
353 uint64_t fp_val[2];
354 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
355 reg.addr = (uintptr_t)(&fp_val);
356 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
357 if (ret) {
358 return ret;
359 } else {
360 int rd = i << 1;
361 #ifdef HOST_WORDS_BIGENDIAN
362 env->vfp.regs[rd + 1] = fp_val[0];
363 env->vfp.regs[rd] = fp_val[1];
364 #else
365 env->vfp.regs[rd + 1] = fp_val[1];
366 env->vfp.regs[rd] = fp_val[0];
367 #endif
371 reg.addr = (uintptr_t)(&fpr);
372 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
373 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
374 if (ret) {
375 return ret;
377 vfp_set_fpsr(env, fpr);
379 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
380 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
381 if (ret) {
382 return ret;
384 vfp_set_fpcr(env, fpr);
386 if (!write_kvmstate_to_list(cpu)) {
387 return EINVAL;
389 /* Note that it's OK to have registers which aren't in CPUState,
390 * so we can ignore a failure return here.
392 write_list_to_cpustate(cpu);
394 kvm_arm_sync_mpstate_to_qemu(cpu);
396 /* TODO: other registers */
397 return ret;