cpu-exec: Unlock tb_lock if we longjmp out of code generation
[qemu.git] / target-arm / kvm.c
blob39202d7eeaece93bba9ec6d462848b8aba653849
1 /*
2 * ARM implementation of KVM hooks
4 * Copyright Christoffer Dall 2009-2010
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include <stdio.h>
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
14 #include <sys/mman.h>
16 #include <linux/kvm.h>
18 #include "qemu-common.h"
19 #include "qemu/timer.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/kvm.h"
22 #include "kvm_arm.h"
23 #include "cpu.h"
24 #include "hw/arm/arm.h"
26 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
27 KVM_CAP_LAST_INFO
30 bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
31 int *fdarray,
32 struct kvm_vcpu_init *init)
34 int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
36 kvmfd = qemu_open("/dev/kvm", O_RDWR);
37 if (kvmfd < 0) {
38 goto err;
40 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
41 if (vmfd < 0) {
42 goto err;
44 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
45 if (cpufd < 0) {
46 goto err;
49 ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
50 if (ret >= 0) {
51 ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
52 if (ret < 0) {
53 goto err;
55 } else {
56 /* Old kernel which doesn't know about the
57 * PREFERRED_TARGET ioctl: we know it will only support
58 * creating one kind of guest CPU which is its preferred
59 * CPU type.
61 while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
62 init->target = *cpus_to_try++;
63 memset(init->features, 0, sizeof(init->features));
64 ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
65 if (ret >= 0) {
66 break;
69 if (ret < 0) {
70 goto err;
74 fdarray[0] = kvmfd;
75 fdarray[1] = vmfd;
76 fdarray[2] = cpufd;
78 return true;
80 err:
81 if (cpufd >= 0) {
82 close(cpufd);
84 if (vmfd >= 0) {
85 close(vmfd);
87 if (kvmfd >= 0) {
88 close(kvmfd);
91 return false;
94 void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
96 int i;
98 for (i = 2; i >= 0; i--) {
99 close(fdarray[i]);
103 static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data)
105 ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc);
107 /* All we really need to set up for the 'host' CPU
108 * is the feature bits -- we rely on the fact that the
109 * various ID register values in ARMCPU are only used for
110 * TCG CPUs.
112 if (!kvm_arm_get_host_cpu_features(ahcc)) {
113 fprintf(stderr, "Failed to retrieve host CPU features!\n");
114 abort();
118 static void kvm_arm_host_cpu_initfn(Object *obj)
120 ARMHostCPUClass *ahcc = ARM_HOST_CPU_GET_CLASS(obj);
121 ARMCPU *cpu = ARM_CPU(obj);
122 CPUARMState *env = &cpu->env;
124 cpu->kvm_target = ahcc->target;
125 cpu->dtb_compatible = ahcc->dtb_compatible;
126 env->features = ahcc->features;
129 static const TypeInfo host_arm_cpu_type_info = {
130 .name = TYPE_ARM_HOST_CPU,
131 #ifdef TARGET_AARCH64
132 .parent = TYPE_AARCH64_CPU,
133 #else
134 .parent = TYPE_ARM_CPU,
135 #endif
136 .instance_init = kvm_arm_host_cpu_initfn,
137 .class_init = kvm_arm_host_cpu_class_init,
138 .class_size = sizeof(ARMHostCPUClass),
141 int kvm_arch_init(KVMState *s)
143 /* For ARM interrupt delivery is always asynchronous,
144 * whether we are using an in-kernel VGIC or not.
146 kvm_async_interrupts_allowed = true;
148 type_register_static(&host_arm_cpu_type_info);
150 return 0;
153 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
155 return cpu->cpu_index;
158 /* We track all the KVM devices which need their memory addresses
159 * passing to the kernel in a list of these structures.
160 * When board init is complete we run through the list and
161 * tell the kernel the base addresses of the memory regions.
162 * We use a MemoryListener to track mapping and unmapping of
163 * the regions during board creation, so the board models don't
164 * need to do anything special for the KVM case.
166 typedef struct KVMDevice {
167 struct kvm_arm_device_addr kda;
168 struct kvm_device_attr kdattr;
169 MemoryRegion *mr;
170 QSLIST_ENTRY(KVMDevice) entries;
171 int dev_fd;
172 } KVMDevice;
174 static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
176 static void kvm_arm_devlistener_add(MemoryListener *listener,
177 MemoryRegionSection *section)
179 KVMDevice *kd;
181 QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
182 if (section->mr == kd->mr) {
183 kd->kda.addr = section->offset_within_address_space;
188 static void kvm_arm_devlistener_del(MemoryListener *listener,
189 MemoryRegionSection *section)
191 KVMDevice *kd;
193 QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
194 if (section->mr == kd->mr) {
195 kd->kda.addr = -1;
200 static MemoryListener devlistener = {
201 .region_add = kvm_arm_devlistener_add,
202 .region_del = kvm_arm_devlistener_del,
205 static void kvm_arm_set_device_addr(KVMDevice *kd)
207 struct kvm_device_attr *attr = &kd->kdattr;
208 int ret;
210 /* If the device control API is available and we have a device fd on the
211 * KVMDevice struct, let's use the newer API
213 if (kd->dev_fd >= 0) {
214 uint64_t addr = kd->kda.addr;
215 attr->addr = (uintptr_t)&addr;
216 ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
217 } else {
218 ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
221 if (ret < 0) {
222 fprintf(stderr, "Failed to set device address: %s\n",
223 strerror(-ret));
224 abort();
228 static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
230 KVMDevice *kd, *tkd;
232 memory_listener_unregister(&devlistener);
233 QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
234 if (kd->kda.addr != -1) {
235 kvm_arm_set_device_addr(kd);
237 memory_region_unref(kd->mr);
238 g_free(kd);
242 static Notifier notify = {
243 .notify = kvm_arm_machine_init_done,
246 void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
247 uint64_t attr, int dev_fd)
249 KVMDevice *kd;
251 if (!kvm_irqchip_in_kernel()) {
252 return;
255 if (QSLIST_EMPTY(&kvm_devices_head)) {
256 memory_listener_register(&devlistener, NULL);
257 qemu_add_machine_init_done_notifier(&notify);
259 kd = g_new0(KVMDevice, 1);
260 kd->mr = mr;
261 kd->kda.id = devid;
262 kd->kda.addr = -1;
263 kd->kdattr.flags = 0;
264 kd->kdattr.group = group;
265 kd->kdattr.attr = attr;
266 kd->dev_fd = dev_fd;
267 QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
268 memory_region_ref(kd->mr);
271 bool write_kvmstate_to_list(ARMCPU *cpu)
273 CPUState *cs = CPU(cpu);
274 int i;
275 bool ok = true;
277 for (i = 0; i < cpu->cpreg_array_len; i++) {
278 struct kvm_one_reg r;
279 uint64_t regidx = cpu->cpreg_indexes[i];
280 uint32_t v32;
281 int ret;
283 r.id = regidx;
285 switch (regidx & KVM_REG_SIZE_MASK) {
286 case KVM_REG_SIZE_U32:
287 r.addr = (uintptr_t)&v32;
288 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
289 if (!ret) {
290 cpu->cpreg_values[i] = v32;
292 break;
293 case KVM_REG_SIZE_U64:
294 r.addr = (uintptr_t)(cpu->cpreg_values + i);
295 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
296 break;
297 default:
298 abort();
300 if (ret) {
301 ok = false;
304 return ok;
307 bool write_list_to_kvmstate(ARMCPU *cpu)
309 CPUState *cs = CPU(cpu);
310 int i;
311 bool ok = true;
313 for (i = 0; i < cpu->cpreg_array_len; i++) {
314 struct kvm_one_reg r;
315 uint64_t regidx = cpu->cpreg_indexes[i];
316 uint32_t v32;
317 int ret;
319 r.id = regidx;
320 switch (regidx & KVM_REG_SIZE_MASK) {
321 case KVM_REG_SIZE_U32:
322 v32 = cpu->cpreg_values[i];
323 r.addr = (uintptr_t)&v32;
324 break;
325 case KVM_REG_SIZE_U64:
326 r.addr = (uintptr_t)(cpu->cpreg_values + i);
327 break;
328 default:
329 abort();
331 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
332 if (ret) {
333 /* We might fail for "unknown register" and also for
334 * "you tried to set a register which is constant with
335 * a different value from what it actually contains".
337 ok = false;
340 return ok;
343 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
347 void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
351 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
353 return 0;
356 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
358 return true;
361 int kvm_arch_process_async_events(CPUState *cs)
363 return 0;
366 int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
368 return 1;
371 int kvm_arch_on_sigbus(int code, void *addr)
373 return 1;
376 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
378 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
381 int kvm_arch_insert_sw_breakpoint(CPUState *cs,
382 struct kvm_sw_breakpoint *bp)
384 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
385 return -EINVAL;
388 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
389 target_ulong len, int type)
391 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
392 return -EINVAL;
395 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
396 target_ulong len, int type)
398 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
399 return -EINVAL;
402 int kvm_arch_remove_sw_breakpoint(CPUState *cs,
403 struct kvm_sw_breakpoint *bp)
405 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
406 return -EINVAL;
409 void kvm_arch_remove_all_hw_breakpoints(void)
411 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
414 void kvm_arch_init_irq_routing(KVMState *s)
418 int kvm_arch_irqchip_create(KVMState *s)
420 int ret;
422 /* If we can create the VGIC using the newer device control API, we
423 * let the device do this when it initializes itself, otherwise we
424 * fall back to the old API */
426 ret = kvm_create_device(s, KVM_DEV_TYPE_ARM_VGIC_V2, true);
427 if (ret == 0) {
428 return 1;
431 return 0;