2 * ARM implementation of KVM hooks, 32 bit specific code.
4 * Copyright Christoffer Dall 2009-2010
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
16 #include <linux/kvm.h>
18 #include "qemu-common.h"
19 #include "qemu/timer.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/kvm.h"
24 #include "internals.h"
25 #include "hw/arm/arm.h"
27 static inline void set_feature(uint64_t *features
, int feature
)
29 *features
|= 1ULL << feature
;
32 bool kvm_arm_get_host_cpu_features(ARMHostCPUClass
*ahcc
)
34 /* Identify the feature bits corresponding to the host CPU, and
35 * fill out the ARMHostCPUClass fields accordingly. To do this
36 * we have to create a scratch VM, create a single CPU inside it,
37 * and then query that CPU for the relevant ID registers.
39 int i
, ret
, fdarray
[3];
40 uint32_t midr
, id_pfr0
, id_isar0
, mvfr1
;
41 uint64_t features
= 0;
42 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
43 * we know these will only support creating one kind of guest CPU,
44 * which is its preferred CPU type.
46 static const uint32_t cpus_to_try
[] = {
47 QEMU_KVM_ARM_TARGET_CORTEX_A15
,
48 QEMU_KVM_ARM_TARGET_NONE
50 struct kvm_vcpu_init init
;
51 struct kvm_one_reg idregs
[] = {
53 .id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
54 | ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
55 .addr
= (uintptr_t)&midr
,
58 .id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
59 | ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
60 .addr
= (uintptr_t)&id_pfr0
,
63 .id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
64 | ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
65 .addr
= (uintptr_t)&id_isar0
,
68 .id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
69 | KVM_REG_ARM_VFP
| KVM_REG_ARM_VFP_MVFR1
,
70 .addr
= (uintptr_t)&mvfr1
,
74 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
78 ahcc
->target
= init
.target
;
80 /* This is not strictly blessed by the device tree binding docs yet,
81 * but in practice the kernel does not care about this string so
82 * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
84 ahcc
->dtb_compatible
= "arm,arm-v7";
86 for (i
= 0; i
< ARRAY_SIZE(idregs
); i
++) {
87 ret
= ioctl(fdarray
[2], KVM_GET_ONE_REG
, &idregs
[i
]);
93 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
99 /* Now we've retrieved all the register information we can
100 * set the feature bits based on the ID register fields.
101 * We can assume any KVM supporting CPU is at least a v7
102 * with VFPv3, LPAE and the generic timers; this in turn implies
103 * most of the other feature bits, but a few must be tested.
105 set_feature(&features
, ARM_FEATURE_V7
);
106 set_feature(&features
, ARM_FEATURE_VFP3
);
107 set_feature(&features
, ARM_FEATURE_LPAE
);
108 set_feature(&features
, ARM_FEATURE_GENERIC_TIMER
);
110 switch (extract32(id_isar0
, 24, 4)) {
112 set_feature(&features
, ARM_FEATURE_THUMB_DIV
);
115 set_feature(&features
, ARM_FEATURE_ARM_DIV
);
116 set_feature(&features
, ARM_FEATURE_THUMB_DIV
);
122 if (extract32(id_pfr0
, 12, 4) == 1) {
123 set_feature(&features
, ARM_FEATURE_THUMB2EE
);
125 if (extract32(mvfr1
, 20, 4) == 1) {
126 set_feature(&features
, ARM_FEATURE_VFP_FP16
);
128 if (extract32(mvfr1
, 12, 4) == 1) {
129 set_feature(&features
, ARM_FEATURE_NEON
);
131 if (extract32(mvfr1
, 28, 4) == 1) {
132 /* FMAC support implies VFPv4 */
133 set_feature(&features
, ARM_FEATURE_VFP4
);
136 ahcc
->features
= features
;
141 static bool reg_syncs_via_tuple_list(uint64_t regidx
)
143 /* Return true if the regidx is a register we should synchronize
144 * via the cpreg_tuples array (ie is not a core reg we sync by
145 * hand in kvm_arch_get/put_registers())
147 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
148 case KVM_REG_ARM_CORE
:
149 case KVM_REG_ARM_VFP
:
156 static int compare_u64(const void *a
, const void *b
)
158 if (*(uint64_t *)a
> *(uint64_t *)b
) {
161 if (*(uint64_t *)a
< *(uint64_t *)b
) {
167 int kvm_arch_init_vcpu(CPUState
*cs
)
169 int i
, ret
, arraylen
;
171 struct kvm_one_reg r
;
172 struct kvm_reg_list rl
;
173 struct kvm_reg_list
*rlp
;
174 ARMCPU
*cpu
= ARM_CPU(cs
);
176 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
) {
177 fprintf(stderr
, "KVM is not supported for this guest CPU type\n");
181 /* Determine init features for this CPU */
182 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
183 if (cpu
->start_powered_off
) {
184 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
186 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
187 cpu
->psci_version
= 2;
188 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
191 /* Do KVM_ARM_VCPU_INIT ioctl */
192 ret
= kvm_arm_vcpu_init(cs
);
197 /* Query the kernel to make sure it supports 32 VFP
198 * registers: QEMU's "cortex-a15" CPU is always a
199 * VFP-D32 core. The simplest way to do this is just
200 * to attempt to read register d31.
202 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
| 31;
203 r
.addr
= (uintptr_t)(&v
);
204 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
205 if (ret
== -ENOENT
) {
209 /* Populate the cpreg list based on the kernel's idea
210 * of what registers exist (and throw away the TCG-created list).
213 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_REG_LIST
, &rl
);
217 rlp
= g_malloc(sizeof(struct kvm_reg_list
) + rl
.n
* sizeof(uint64_t));
219 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_REG_LIST
, rlp
);
223 /* Sort the list we get back from the kernel, since cpreg_tuples
224 * must be in strictly ascending order.
226 qsort(&rlp
->reg
, rlp
->n
, sizeof(rlp
->reg
[0]), compare_u64
);
228 for (i
= 0, arraylen
= 0; i
< rlp
->n
; i
++) {
229 if (!reg_syncs_via_tuple_list(rlp
->reg
[i
])) {
232 switch (rlp
->reg
[i
] & KVM_REG_SIZE_MASK
) {
233 case KVM_REG_SIZE_U32
:
234 case KVM_REG_SIZE_U64
:
237 fprintf(stderr
, "Can't handle size of register in kernel list\n");
245 cpu
->cpreg_indexes
= g_renew(uint64_t, cpu
->cpreg_indexes
, arraylen
);
246 cpu
->cpreg_values
= g_renew(uint64_t, cpu
->cpreg_values
, arraylen
);
247 cpu
->cpreg_vmstate_indexes
= g_renew(uint64_t, cpu
->cpreg_vmstate_indexes
,
249 cpu
->cpreg_vmstate_values
= g_renew(uint64_t, cpu
->cpreg_vmstate_values
,
251 cpu
->cpreg_array_len
= arraylen
;
252 cpu
->cpreg_vmstate_array_len
= arraylen
;
254 for (i
= 0, arraylen
= 0; i
< rlp
->n
; i
++) {
255 uint64_t regidx
= rlp
->reg
[i
];
256 if (!reg_syncs_via_tuple_list(regidx
)) {
259 cpu
->cpreg_indexes
[arraylen
] = regidx
;
262 assert(cpu
->cpreg_array_len
== arraylen
);
264 if (!write_kvmstate_to_list(cpu
)) {
265 /* Shouldn't happen unless kernel is inconsistent about
266 * what registers exist.
268 fprintf(stderr
, "Initial read of kernel register state failed\n");
273 /* Save a copy of the initial register values so that we can
274 * feed it back to the kernel on VCPU reset.
276 cpu
->cpreg_reset_values
= g_memdup(cpu
->cpreg_values
,
277 cpu
->cpreg_array_len
*
278 sizeof(cpu
->cpreg_values
[0]));
290 #define COREREG(KERNELNAME, QEMUFIELD) \
292 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
293 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
294 offsetof(CPUARMState, QEMUFIELD) \
297 #define VFPSYSREG(R) \
299 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
300 KVM_REG_ARM_VFP_##R, \
301 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
304 /* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
305 #define COREREG64(KERNELNAME, QEMUFIELD) \
307 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
308 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
309 offsetoflow32(CPUARMState, QEMUFIELD) \
312 static const Reg regs
[] = {
313 /* R0_usr .. R14_usr */
314 COREREG(usr_regs
.uregs
[0], regs
[0]),
315 COREREG(usr_regs
.uregs
[1], regs
[1]),
316 COREREG(usr_regs
.uregs
[2], regs
[2]),
317 COREREG(usr_regs
.uregs
[3], regs
[3]),
318 COREREG(usr_regs
.uregs
[4], regs
[4]),
319 COREREG(usr_regs
.uregs
[5], regs
[5]),
320 COREREG(usr_regs
.uregs
[6], regs
[6]),
321 COREREG(usr_regs
.uregs
[7], regs
[7]),
322 COREREG(usr_regs
.uregs
[8], usr_regs
[0]),
323 COREREG(usr_regs
.uregs
[9], usr_regs
[1]),
324 COREREG(usr_regs
.uregs
[10], usr_regs
[2]),
325 COREREG(usr_regs
.uregs
[11], usr_regs
[3]),
326 COREREG(usr_regs
.uregs
[12], usr_regs
[4]),
327 COREREG(usr_regs
.uregs
[13], banked_r13
[0]),
328 COREREG(usr_regs
.uregs
[14], banked_r14
[0]),
329 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
330 COREREG(svc_regs
[0], banked_r13
[1]),
331 COREREG(svc_regs
[1], banked_r14
[1]),
332 COREREG64(svc_regs
[2], banked_spsr
[1]),
333 COREREG(abt_regs
[0], banked_r13
[2]),
334 COREREG(abt_regs
[1], banked_r14
[2]),
335 COREREG64(abt_regs
[2], banked_spsr
[2]),
336 COREREG(und_regs
[0], banked_r13
[3]),
337 COREREG(und_regs
[1], banked_r14
[3]),
338 COREREG64(und_regs
[2], banked_spsr
[3]),
339 COREREG(irq_regs
[0], banked_r13
[4]),
340 COREREG(irq_regs
[1], banked_r14
[4]),
341 COREREG64(irq_regs
[2], banked_spsr
[4]),
342 /* R8_fiq .. R14_fiq and SPSR_fiq */
343 COREREG(fiq_regs
[0], fiq_regs
[0]),
344 COREREG(fiq_regs
[1], fiq_regs
[1]),
345 COREREG(fiq_regs
[2], fiq_regs
[2]),
346 COREREG(fiq_regs
[3], fiq_regs
[3]),
347 COREREG(fiq_regs
[4], fiq_regs
[4]),
348 COREREG(fiq_regs
[5], banked_r13
[5]),
349 COREREG(fiq_regs
[6], banked_r14
[5]),
350 COREREG64(fiq_regs
[7], banked_spsr
[5]),
352 COREREG(usr_regs
.uregs
[15], regs
[15]),
353 /* VFP system registers */
362 int kvm_arch_put_registers(CPUState
*cs
, int level
)
364 ARMCPU
*cpu
= ARM_CPU(cs
);
365 CPUARMState
*env
= &cpu
->env
;
366 struct kvm_one_reg r
;
369 uint32_t cpsr
, fpscr
;
371 /* Make sure the banked regs are properly set */
372 mode
= env
->uncached_cpsr
& CPSR_M
;
373 bn
= bank_number(mode
);
374 if (mode
== ARM_CPU_MODE_FIQ
) {
375 memcpy(env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
377 memcpy(env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
379 env
->banked_r13
[bn
] = env
->regs
[13];
380 env
->banked_r14
[bn
] = env
->regs
[14];
381 env
->banked_spsr
[bn
] = env
->spsr
;
383 /* Now we can safely copy stuff down to the kernel */
384 for (i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
386 r
.addr
= (uintptr_t)(env
) + regs
[i
].offset
;
387 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
393 /* Special cases which aren't a single CPUARMState field */
394 cpsr
= cpsr_read(env
);
395 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
|
396 KVM_REG_ARM_CORE
| KVM_REG_ARM_CORE_REG(usr_regs
.ARM_cpsr
);
397 r
.addr
= (uintptr_t)(&cpsr
);
398 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
404 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
;
405 for (i
= 0; i
< 32; i
++) {
406 r
.addr
= (uintptr_t)(&env
->vfp
.regs
[i
]);
407 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
414 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_VFP
|
415 KVM_REG_ARM_VFP_FPSCR
;
416 fpscr
= vfp_get_fpscr(env
);
417 r
.addr
= (uintptr_t)&fpscr
;
418 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
423 /* Note that we do not call write_cpustate_to_list()
424 * here, so we are only writing the tuple list back to
425 * KVM. This is safe because nothing can change the
426 * CPUARMState cp15 fields (in particular gdb accesses cannot)
427 * and so there are no changes to sync. In fact syncing would
428 * be wrong at this point: for a constant register where TCG and
429 * KVM disagree about its value, the preceding write_list_to_cpustate()
430 * would not have had any effect on the CPUARMState value (since the
431 * register is read-only), and a write_cpustate_to_list() here would
432 * then try to write the TCG value back into KVM -- this would either
433 * fail or incorrectly change the value the guest sees.
435 * If we ever want to allow the user to modify cp15 registers via
436 * the gdb stub, we would need to be more clever here (for instance
437 * tracking the set of registers kvm_arch_get_registers() successfully
438 * managed to update the CPUARMState with, and only allowing those
439 * to be written back up into the kernel).
441 if (!write_list_to_kvmstate(cpu
)) {
448 int kvm_arch_get_registers(CPUState
*cs
)
450 ARMCPU
*cpu
= ARM_CPU(cs
);
451 CPUARMState
*env
= &cpu
->env
;
452 struct kvm_one_reg r
;
455 uint32_t cpsr
, fpscr
;
457 for (i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
459 r
.addr
= (uintptr_t)(env
) + regs
[i
].offset
;
460 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
466 /* Special cases which aren't a single CPUARMState field */
467 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
|
468 KVM_REG_ARM_CORE
| KVM_REG_ARM_CORE_REG(usr_regs
.ARM_cpsr
);
469 r
.addr
= (uintptr_t)(&cpsr
);
470 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
474 cpsr_write(env
, cpsr
, 0xffffffff);
476 /* Make sure the current mode regs are properly set */
477 mode
= env
->uncached_cpsr
& CPSR_M
;
478 bn
= bank_number(mode
);
479 if (mode
== ARM_CPU_MODE_FIQ
) {
480 memcpy(env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
482 memcpy(env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
484 env
->regs
[13] = env
->banked_r13
[bn
];
485 env
->regs
[14] = env
->banked_r14
[bn
];
486 env
->spsr
= env
->banked_spsr
[bn
];
489 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
;
490 for (i
= 0; i
< 32; i
++) {
491 r
.addr
= (uintptr_t)(&env
->vfp
.regs
[i
]);
492 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
499 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_VFP
|
500 KVM_REG_ARM_VFP_FPSCR
;
501 r
.addr
= (uintptr_t)&fpscr
;
502 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
506 vfp_set_fpscr(env
, fpscr
);
508 if (!write_kvmstate_to_list(cpu
)) {
511 /* Note that it's OK to have registers which aren't in CPUState,
512 * so we can ignore a failure return here.
514 write_list_to_cpustate(cpu
);
519 void kvm_arm_reset_vcpu(ARMCPU
*cpu
)
521 /* Feed the kernel back its initial register state */
522 memmove(cpu
->cpreg_values
, cpu
->cpreg_reset_values
,
523 cpu
->cpreg_array_len
* sizeof(cpu
->cpreg_values
[0]));
525 if (!write_list_to_kvmstate(cpu
)) {