target/arm/kvm: Implement virtual time adjustment
[qemu/ar7.git] / target / arm / kvm32.c
blob3a8b437eef0bbd4b2ba215eaad4fc2417f6a12be
1 /*
2 * ARM implementation of KVM hooks, 32 bit specific code.
4 * Copyright Christoffer Dall 2009-2010
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include <sys/ioctl.h>
14 #include <linux/kvm.h>
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "qemu/timer.h"
19 #include "sysemu/runstate.h"
20 #include "sysemu/kvm.h"
21 #include "kvm_arm.h"
22 #include "internals.h"
23 #include "qemu/log.h"
25 static inline void set_feature(uint64_t *features, int feature)
27 *features |= 1ULL << feature;
30 static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
32 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
34 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32);
35 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
38 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
40 /* Identify the feature bits corresponding to the host CPU, and
41 * fill out the ARMHostCPUClass fields accordingly. To do this
42 * we have to create a scratch VM, create a single CPU inside it,
43 * and then query that CPU for the relevant ID registers.
45 int err = 0, fdarray[3];
46 uint32_t midr, id_pfr0;
47 uint64_t features = 0;
49 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
50 * we know these will only support creating one kind of guest CPU,
51 * which is its preferred CPU type.
53 static const uint32_t cpus_to_try[] = {
54 QEMU_KVM_ARM_TARGET_CORTEX_A15,
55 QEMU_KVM_ARM_TARGET_NONE
58 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
59 * to use the preferred target
61 struct kvm_vcpu_init init = { .target = -1, };
63 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
64 return false;
67 ahcf->target = init.target;
69 /* This is not strictly blessed by the device tree binding docs yet,
70 * but in practice the kernel does not care about this string so
71 * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
73 ahcf->dtb_compatible = "arm,arm-v7";
75 err |= read_sys_reg32(fdarray[2], &midr, ARM_CP15_REG32(0, 0, 0, 0));
76 err |= read_sys_reg32(fdarray[2], &id_pfr0, ARM_CP15_REG32(0, 0, 1, 0));
78 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
79 ARM_CP15_REG32(0, 0, 2, 0));
80 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
81 ARM_CP15_REG32(0, 0, 2, 1));
82 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
83 ARM_CP15_REG32(0, 0, 2, 2));
84 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
85 ARM_CP15_REG32(0, 0, 2, 3));
86 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
87 ARM_CP15_REG32(0, 0, 2, 4));
88 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
89 ARM_CP15_REG32(0, 0, 2, 5));
90 if (read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
91 ARM_CP15_REG32(0, 0, 2, 7))) {
93 * Older kernels don't support reading ID_ISAR6. This register was
94 * only introduced in ARMv8, so we can assume that it is zero on a
95 * CPU that a kernel this old is running on.
97 ahcf->isar.id_isar6 = 0;
100 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
101 KVM_REG_ARM | KVM_REG_SIZE_U32 |
102 KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0);
103 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
104 KVM_REG_ARM | KVM_REG_SIZE_U32 |
105 KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1);
107 * FIXME: There is not yet a way to read MVFR2.
108 * Fortunately there is not yet anything in there that affects migration.
111 kvm_arm_destroy_scratch_host_vcpu(fdarray);
113 if (err < 0) {
114 return false;
117 /* Now we've retrieved all the register information we can
118 * set the feature bits based on the ID register fields.
119 * We can assume any KVM supporting CPU is at least a v7
120 * with VFPv3, virtualization extensions, and the generic
121 * timers; this in turn implies most of the other feature
122 * bits, but a few must be tested.
124 set_feature(&features, ARM_FEATURE_V7VE);
125 set_feature(&features, ARM_FEATURE_VFP3);
126 set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
128 if (extract32(id_pfr0, 12, 4) == 1) {
129 set_feature(&features, ARM_FEATURE_THUMB2EE);
131 if (extract32(ahcf->isar.mvfr1, 12, 4) == 1) {
132 set_feature(&features, ARM_FEATURE_NEON);
134 if (extract32(ahcf->isar.mvfr1, 28, 4) == 1) {
135 /* FMAC support implies VFPv4 */
136 set_feature(&features, ARM_FEATURE_VFP4);
139 ahcf->features = features;
141 return true;
144 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
146 /* Return true if the regidx is a register we should synchronize
147 * via the cpreg_tuples array (ie is not a core reg we sync by
148 * hand in kvm_arch_get/put_registers())
150 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
151 case KVM_REG_ARM_CORE:
152 case KVM_REG_ARM_VFP:
153 return false;
154 default:
155 return true;
159 typedef struct CPRegStateLevel {
160 uint64_t regidx;
161 int level;
162 } CPRegStateLevel;
164 /* All coprocessor registers not listed in the following table are assumed to
165 * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
166 * often, you must add it to this table with a state of either
167 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
169 static const CPRegStateLevel non_runtime_cpregs[] = {
170 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
173 int kvm_arm_cpreg_level(uint64_t regidx)
175 int i;
177 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
178 const CPRegStateLevel *l = &non_runtime_cpregs[i];
179 if (l->regidx == regidx) {
180 return l->level;
184 return KVM_PUT_RUNTIME_STATE;
187 #define ARM_CPU_ID_MPIDR 0, 0, 0, 5
189 int kvm_arch_init_vcpu(CPUState *cs)
191 int ret;
192 uint64_t v;
193 uint32_t mpidr;
194 struct kvm_one_reg r;
195 ARMCPU *cpu = ARM_CPU(cs);
197 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
198 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
199 return -EINVAL;
202 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
204 /* Determine init features for this CPU */
205 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
206 if (cpu->start_powered_off) {
207 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
209 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
210 cpu->psci_version = 2;
211 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
214 /* Do KVM_ARM_VCPU_INIT ioctl */
215 ret = kvm_arm_vcpu_init(cs);
216 if (ret) {
217 return ret;
220 /* Query the kernel to make sure it supports 32 VFP
221 * registers: QEMU's "cortex-a15" CPU is always a
222 * VFP-D32 core. The simplest way to do this is just
223 * to attempt to read register d31.
225 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
226 r.addr = (uintptr_t)(&v);
227 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
228 if (ret == -ENOENT) {
229 return -EINVAL;
233 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
234 * Currently KVM has its own idea about MPIDR assignment, so we
235 * override our defaults with what we get from KVM.
237 ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
238 if (ret) {
239 return ret;
241 cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
243 /* Check whether userspace can specify guest syndrome value */
244 kvm_arm_init_serror_injection(cs);
246 return kvm_arm_init_cpreg_list(cpu);
249 int kvm_arch_destroy_vcpu(CPUState *cs)
251 return 0;
254 typedef struct Reg {
255 uint64_t id;
256 int offset;
257 } Reg;
259 #define COREREG(KERNELNAME, QEMUFIELD) \
261 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
262 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
263 offsetof(CPUARMState, QEMUFIELD) \
266 #define VFPSYSREG(R) \
268 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
269 KVM_REG_ARM_VFP_##R, \
270 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
273 /* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
274 #define COREREG64(KERNELNAME, QEMUFIELD) \
276 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
277 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
278 offsetoflow32(CPUARMState, QEMUFIELD) \
281 static const Reg regs[] = {
282 /* R0_usr .. R14_usr */
283 COREREG(usr_regs.uregs[0], regs[0]),
284 COREREG(usr_regs.uregs[1], regs[1]),
285 COREREG(usr_regs.uregs[2], regs[2]),
286 COREREG(usr_regs.uregs[3], regs[3]),
287 COREREG(usr_regs.uregs[4], regs[4]),
288 COREREG(usr_regs.uregs[5], regs[5]),
289 COREREG(usr_regs.uregs[6], regs[6]),
290 COREREG(usr_regs.uregs[7], regs[7]),
291 COREREG(usr_regs.uregs[8], usr_regs[0]),
292 COREREG(usr_regs.uregs[9], usr_regs[1]),
293 COREREG(usr_regs.uregs[10], usr_regs[2]),
294 COREREG(usr_regs.uregs[11], usr_regs[3]),
295 COREREG(usr_regs.uregs[12], usr_regs[4]),
296 COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
297 COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
298 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
299 COREREG(svc_regs[0], banked_r13[BANK_SVC]),
300 COREREG(svc_regs[1], banked_r14[BANK_SVC]),
301 COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
302 COREREG(abt_regs[0], banked_r13[BANK_ABT]),
303 COREREG(abt_regs[1], banked_r14[BANK_ABT]),
304 COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
305 COREREG(und_regs[0], banked_r13[BANK_UND]),
306 COREREG(und_regs[1], banked_r14[BANK_UND]),
307 COREREG64(und_regs[2], banked_spsr[BANK_UND]),
308 COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
309 COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
310 COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
311 /* R8_fiq .. R14_fiq and SPSR_fiq */
312 COREREG(fiq_regs[0], fiq_regs[0]),
313 COREREG(fiq_regs[1], fiq_regs[1]),
314 COREREG(fiq_regs[2], fiq_regs[2]),
315 COREREG(fiq_regs[3], fiq_regs[3]),
316 COREREG(fiq_regs[4], fiq_regs[4]),
317 COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
318 COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
319 COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
320 /* R15 */
321 COREREG(usr_regs.uregs[15], regs[15]),
322 /* VFP system registers */
323 VFPSYSREG(FPSID),
324 VFPSYSREG(MVFR1),
325 VFPSYSREG(MVFR0),
326 VFPSYSREG(FPEXC),
327 VFPSYSREG(FPINST),
328 VFPSYSREG(FPINST2),
331 int kvm_arch_put_registers(CPUState *cs, int level)
333 ARMCPU *cpu = ARM_CPU(cs);
334 CPUARMState *env = &cpu->env;
335 struct kvm_one_reg r;
336 int mode, bn;
337 int ret, i;
338 uint32_t cpsr, fpscr;
340 /* Make sure the banked regs are properly set */
341 mode = env->uncached_cpsr & CPSR_M;
342 bn = bank_number(mode);
343 if (mode == ARM_CPU_MODE_FIQ) {
344 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
345 } else {
346 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
348 env->banked_r13[bn] = env->regs[13];
349 env->banked_spsr[bn] = env->spsr;
350 env->banked_r14[r14_bank_number(mode)] = env->regs[14];
352 /* Now we can safely copy stuff down to the kernel */
353 for (i = 0; i < ARRAY_SIZE(regs); i++) {
354 r.id = regs[i].id;
355 r.addr = (uintptr_t)(env) + regs[i].offset;
356 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
357 if (ret) {
358 return ret;
362 /* Special cases which aren't a single CPUARMState field */
363 cpsr = cpsr_read(env);
364 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
365 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
366 r.addr = (uintptr_t)(&cpsr);
367 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
368 if (ret) {
369 return ret;
372 /* VFP registers */
373 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
374 for (i = 0; i < 32; i++) {
375 r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
376 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
377 if (ret) {
378 return ret;
380 r.id++;
383 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
384 KVM_REG_ARM_VFP_FPSCR;
385 fpscr = vfp_get_fpscr(env);
386 r.addr = (uintptr_t)&fpscr;
387 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
388 if (ret) {
389 return ret;
392 ret = kvm_put_vcpu_events(cpu);
393 if (ret) {
394 return ret;
397 write_cpustate_to_list(cpu, true);
399 if (!write_list_to_kvmstate(cpu, level)) {
400 return EINVAL;
403 kvm_arm_sync_mpstate_to_kvm(cpu);
405 return ret;
408 int kvm_arch_get_registers(CPUState *cs)
410 ARMCPU *cpu = ARM_CPU(cs);
411 CPUARMState *env = &cpu->env;
412 struct kvm_one_reg r;
413 int mode, bn;
414 int ret, i;
415 uint32_t cpsr, fpscr;
417 for (i = 0; i < ARRAY_SIZE(regs); i++) {
418 r.id = regs[i].id;
419 r.addr = (uintptr_t)(env) + regs[i].offset;
420 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
421 if (ret) {
422 return ret;
426 /* Special cases which aren't a single CPUARMState field */
427 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
428 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
429 r.addr = (uintptr_t)(&cpsr);
430 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
431 if (ret) {
432 return ret;
434 cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
436 /* Make sure the current mode regs are properly set */
437 mode = env->uncached_cpsr & CPSR_M;
438 bn = bank_number(mode);
439 if (mode == ARM_CPU_MODE_FIQ) {
440 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
441 } else {
442 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
444 env->regs[13] = env->banked_r13[bn];
445 env->spsr = env->banked_spsr[bn];
446 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
448 /* VFP registers */
449 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
450 for (i = 0; i < 32; i++) {
451 r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
452 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
453 if (ret) {
454 return ret;
456 r.id++;
459 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
460 KVM_REG_ARM_VFP_FPSCR;
461 r.addr = (uintptr_t)&fpscr;
462 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
463 if (ret) {
464 return ret;
466 vfp_set_fpscr(env, fpscr);
468 ret = kvm_get_vcpu_events(cpu);
469 if (ret) {
470 return ret;
473 if (!write_kvmstate_to_list(cpu)) {
474 return EINVAL;
476 /* Note that it's OK to have registers which aren't in CPUState,
477 * so we can ignore a failure return here.
479 write_list_to_cpustate(cpu);
481 kvm_arm_sync_mpstate_to_qemu(cpu);
483 return 0;
486 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
488 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
489 return -EINVAL;
492 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
494 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
495 return -EINVAL;
498 bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
500 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
501 return false;
504 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
505 target_ulong len, int type)
507 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
508 return -EINVAL;
511 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
512 target_ulong len, int type)
514 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
515 return -EINVAL;
518 void kvm_arch_remove_all_hw_breakpoints(void)
520 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
523 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
525 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
528 bool kvm_arm_hw_debug_active(CPUState *cs)
530 return false;
533 void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
535 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
538 void kvm_arm_pmu_init(CPUState *cs)
540 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);