qemu-kvm: Fix GSI handling with in-kernel irqchip
[qemu-kvm.git] / qemu-kvm.c
blob7e6cbf9699f1bdfa24147c213796a56392b64488
1 /*
2 * qemu/kvm integration
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
7 */
8 #include "config.h"
9 #include "config-host.h"
11 #include <assert.h>
12 #include <string.h>
13 #include "hw/hw.h"
14 #include "sysemu.h"
15 #include "qemu-common.h"
16 #include "console.h"
17 #include "block.h"
18 #include "compatfd.h"
19 #include "gdbstub.h"
20 #include "monitor.h"
21 #include "cpus.h"
23 #include "qemu-kvm.h"
25 #define EXPECTED_KVM_API_VERSION 12
27 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
28 #error libkvm: userspace and kernel version mismatch
29 #endif
31 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
33 static inline void set_gsi(KVMState *s, unsigned int gsi)
35 uint32_t *bitmap = s->used_gsi_bitmap;
37 if (gsi < s->max_gsi) {
38 bitmap[gsi / 32] |= 1U << (gsi % 32);
39 } else {
40 DPRINTF("Invalid GSI %u\n", gsi);
44 static inline void clear_gsi(KVMState *s, unsigned int gsi)
46 uint32_t *bitmap = s->used_gsi_bitmap;
48 if (gsi < s->max_gsi) {
49 bitmap[gsi / 32] &= ~(1U << (gsi % 32));
50 } else {
51 DPRINTF("Invalid GSI %u\n", gsi);
55 static int kvm_init_irq_routing(KVMState *s)
57 #ifdef KVM_CAP_IRQ_ROUTING
58 int r, gsi_count;
60 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
61 if (gsi_count > 0) {
62 int gsi_bits, i;
64 /* Round up so we can search ints using ffs */
65 gsi_bits = ALIGN(gsi_count, 32);
66 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
67 s->max_gsi = gsi_bits;
69 /* Mark any over-allocated bits as already in use */
70 for (i = gsi_count; i < gsi_bits; i++) {
71 set_gsi(s, i);
75 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
76 s->nr_allocated_irq_routes = 0;
78 r = kvm_arch_init_irq_routing();
79 if (r < 0) {
80 return r;
82 #endif
84 return 0;
87 int kvm_create_irqchip(KVMState *s)
89 #ifdef KVM_CAP_IRQCHIP
90 int r;
92 if (!kvm_irqchip || !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
93 return 0;
96 r = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
97 if (r < 0) {
98 fprintf(stderr, "Create kernel PIC irqchip failed\n");
99 return r;
102 s->irqchip_inject_ioctl = KVM_IRQ_LINE;
103 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
104 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
105 s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
107 #endif
108 s->irqchip_in_kernel = 1;
110 r = kvm_init_irq_routing(s);
111 if (r < 0) {
112 return r;
114 #endif
116 return 0;
119 #ifdef KVM_CAP_IRQCHIP
121 int kvm_set_irq(int irq, int level, int *status)
123 struct kvm_irq_level event;
124 int r;
126 if (!kvm_state->irqchip_in_kernel) {
127 return 0;
129 event.level = level;
130 event.irq = irq;
131 r = kvm_vm_ioctl(kvm_state, kvm_state->irqchip_inject_ioctl,
132 &event);
133 if (r < 0) {
134 perror("kvm_set_irq");
137 if (status) {
138 #ifdef KVM_CAP_IRQ_INJECT_STATUS
139 *status = (kvm_state->irqchip_inject_ioctl == KVM_IRQ_LINE) ?
140 1 : event.status;
141 #else
142 *status = 1;
143 #endif
146 return 1;
149 int kvm_get_irqchip(KVMState *s, struct kvm_irqchip *chip)
151 int r;
153 if (!s->irqchip_in_kernel) {
154 return 0;
156 r = kvm_vm_ioctl(s, KVM_GET_IRQCHIP, chip);
157 if (r < 0) {
158 perror("kvm_get_irqchip\n");
160 return r;
163 int kvm_set_irqchip(KVMState *s, struct kvm_irqchip *chip)
165 int r;
167 if (!s->irqchip_in_kernel) {
168 return 0;
170 r = kvm_vm_ioctl(s, KVM_SET_IRQCHIP, chip);
171 if (r < 0) {
172 perror("kvm_set_irqchip\n");
174 return r;
177 #endif
179 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
180 int kvm_assign_pci_device(KVMState *s,
181 struct kvm_assigned_pci_dev *assigned_dev)
183 return kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, assigned_dev);
186 static int kvm_old_assign_irq(KVMState *s,
187 struct kvm_assigned_irq *assigned_irq)
189 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, assigned_irq);
192 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
193 int kvm_assign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq)
195 int ret;
197 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
198 if (ret > 0) {
199 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, assigned_irq);
202 return kvm_old_assign_irq(s, assigned_irq);
205 int kvm_deassign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq)
207 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, assigned_irq);
209 #else
210 int kvm_assign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq)
212 return kvm_old_assign_irq(s, assigned_irq);
214 #endif
215 #endif
217 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
218 int kvm_deassign_pci_device(KVMState *s,
219 struct kvm_assigned_pci_dev *assigned_dev)
221 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, assigned_dev);
223 #endif
225 int kvm_reinject_control(KVMState *s, int pit_reinject)
227 #ifdef KVM_CAP_REINJECT_CONTROL
228 int r;
229 struct kvm_reinject_control control;
231 control.pit_reinject = pit_reinject;
233 r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
234 if (r > 0) {
235 return kvm_vm_ioctl(s, KVM_REINJECT_CONTROL, &control);
237 #endif
238 return -ENOSYS;
241 int kvm_clear_gsi_routes(void)
243 #ifdef KVM_CAP_IRQ_ROUTING
244 kvm_state->irq_routes->nr = 0;
245 return 0;
246 #else
247 return -EINVAL;
248 #endif
251 int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry)
253 #ifdef KVM_CAP_IRQ_ROUTING
254 KVMState *s = kvm_state;
255 struct kvm_irq_routing *z;
256 struct kvm_irq_routing_entry *new;
257 int n, size;
259 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
260 n = s->nr_allocated_irq_routes * 2;
261 if (n < 64) {
262 n = 64;
264 size = sizeof(struct kvm_irq_routing);
265 size += n * sizeof(*new);
266 z = realloc(s->irq_routes, size);
267 if (!z) {
268 return -ENOMEM;
270 s->nr_allocated_irq_routes = n;
271 s->irq_routes = z;
273 n = s->irq_routes->nr++;
274 new = &s->irq_routes->entries[n];
275 memset(new, 0, sizeof(*new));
276 new->gsi = entry->gsi;
277 new->type = entry->type;
278 new->flags = entry->flags;
279 new->u = entry->u;
281 set_gsi(s, entry->gsi);
283 return 0;
284 #else
285 return -ENOSYS;
286 #endif
289 int kvm_add_irq_route(int gsi, int irqchip, int pin)
291 #ifdef KVM_CAP_IRQ_ROUTING
292 struct kvm_irq_routing_entry e;
294 e.gsi = gsi;
295 e.type = KVM_IRQ_ROUTING_IRQCHIP;
296 e.flags = 0;
297 e.u.irqchip.irqchip = irqchip;
298 e.u.irqchip.pin = pin;
299 return kvm_add_routing_entry(&e);
300 #else
301 return -ENOSYS;
302 #endif
305 int kvm_del_routing_entry(struct kvm_irq_routing_entry *entry)
307 #ifdef KVM_CAP_IRQ_ROUTING
308 KVMState *s = kvm_state;
309 struct kvm_irq_routing_entry *e, *p;
310 int i, gsi, found = 0;
312 gsi = entry->gsi;
314 for (i = 0; i < s->irq_routes->nr; ++i) {
315 e = &s->irq_routes->entries[i];
316 if (e->type == entry->type && e->gsi == gsi) {
317 switch (e->type) {
318 case KVM_IRQ_ROUTING_IRQCHIP:{
319 if (e->u.irqchip.irqchip ==
320 entry->u.irqchip.irqchip
321 && e->u.irqchip.pin == entry->u.irqchip.pin) {
322 p = &s->irq_routes->entries[--s->irq_routes->nr];
323 *e = *p;
324 found = 1;
326 break;
328 case KVM_IRQ_ROUTING_MSI:{
329 if (e->u.msi.address_lo ==
330 entry->u.msi.address_lo
331 && e->u.msi.address_hi ==
332 entry->u.msi.address_hi
333 && e->u.msi.data == entry->u.msi.data) {
334 p = &s->irq_routes->entries[--s->irq_routes->nr];
335 *e = *p;
336 found = 1;
338 break;
340 default:
341 break;
343 if (found) {
344 /* If there are no other users of this GSI
345 * mark it available in the bitmap */
346 for (i = 0; i < s->irq_routes->nr; i++) {
347 e = &s->irq_routes->entries[i];
348 if (e->gsi == gsi)
349 break;
351 if (i == s->irq_routes->nr) {
352 clear_gsi(s, gsi);
355 return 0;
359 return -ESRCH;
360 #else
361 return -ENOSYS;
362 #endif
365 int kvm_update_routing_entry(struct kvm_irq_routing_entry *entry,
366 struct kvm_irq_routing_entry *newentry)
368 #ifdef KVM_CAP_IRQ_ROUTING
369 KVMState *s = kvm_state;
370 struct kvm_irq_routing_entry *e;
371 int i;
373 if (entry->gsi != newentry->gsi || entry->type != newentry->type) {
374 return -EINVAL;
377 for (i = 0; i < s->irq_routes->nr; ++i) {
378 e = &s->irq_routes->entries[i];
379 if (e->type != entry->type || e->gsi != entry->gsi) {
380 continue;
382 switch (e->type) {
383 case KVM_IRQ_ROUTING_IRQCHIP:
384 if (e->u.irqchip.irqchip == entry->u.irqchip.irqchip &&
385 e->u.irqchip.pin == entry->u.irqchip.pin) {
386 memcpy(&e->u.irqchip, &newentry->u.irqchip,
387 sizeof e->u.irqchip);
388 return 0;
390 break;
391 case KVM_IRQ_ROUTING_MSI:
392 if (e->u.msi.address_lo == entry->u.msi.address_lo &&
393 e->u.msi.address_hi == entry->u.msi.address_hi &&
394 e->u.msi.data == entry->u.msi.data) {
395 memcpy(&e->u.msi, &newentry->u.msi, sizeof e->u.msi);
396 return 0;
398 break;
399 default:
400 break;
403 return -ESRCH;
404 #else
405 return -ENOSYS;
406 #endif
409 int kvm_del_irq_route(int gsi, int irqchip, int pin)
411 #ifdef KVM_CAP_IRQ_ROUTING
412 struct kvm_irq_routing_entry e;
414 e.gsi = gsi;
415 e.type = KVM_IRQ_ROUTING_IRQCHIP;
416 e.flags = 0;
417 e.u.irqchip.irqchip = irqchip;
418 e.u.irqchip.pin = pin;
419 return kvm_del_routing_entry(&e);
420 #else
421 return -ENOSYS;
422 #endif
425 int kvm_commit_irq_routes(void)
427 #ifdef KVM_CAP_IRQ_ROUTING
428 KVMState *s = kvm_state;
430 s->irq_routes->flags = 0;
431 return kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
432 #else
433 return -ENOSYS;
434 #endif
437 int kvm_get_irq_route_gsi(void)
439 KVMState *s = kvm_state;
440 int i, bit;
441 uint32_t *buf = s->used_gsi_bitmap;
443 /* Return the lowest unused GSI in the bitmap */
444 for (i = 0; i < s->max_gsi / 32; i++) {
445 bit = ffs(~buf[i]);
446 if (!bit) {
447 continue;
450 return bit - 1 + i * 32;
453 return -ENOSPC;
456 static void kvm_msi_routing_entry(struct kvm_irq_routing_entry *e,
457 KVMMsiMessage *msg)
460 e->gsi = msg->gsi;
461 e->type = KVM_IRQ_ROUTING_MSI;
462 e->flags = 0;
463 e->u.msi.address_lo = msg->addr_lo;
464 e->u.msi.address_hi = msg->addr_hi;
465 e->u.msi.data = msg->data;
468 int kvm_msi_message_add(KVMMsiMessage *msg)
470 struct kvm_irq_routing_entry e;
471 int ret;
473 ret = kvm_get_irq_route_gsi();
474 if (ret < 0) {
475 return ret;
477 msg->gsi = ret;
479 kvm_msi_routing_entry(&e, msg);
480 return kvm_add_routing_entry(&e);
483 int kvm_msi_message_del(KVMMsiMessage *msg)
485 struct kvm_irq_routing_entry e;
487 kvm_msi_routing_entry(&e, msg);
488 return kvm_del_routing_entry(&e);
491 int kvm_msi_message_update(KVMMsiMessage *old, KVMMsiMessage *new)
493 struct kvm_irq_routing_entry e1, e2;
494 int ret;
496 new->gsi = old->gsi;
497 if (memcmp(old, new, sizeof(KVMMsiMessage)) == 0) {
498 return 0;
501 kvm_msi_routing_entry(&e1, old);
502 kvm_msi_routing_entry(&e2, new);
504 ret = kvm_update_routing_entry(&e1, &e2);
505 if (ret < 0) {
506 return ret;
509 return 1;
513 #ifdef KVM_CAP_DEVICE_MSIX
514 int kvm_assign_set_msix_nr(KVMState *s, struct kvm_assigned_msix_nr *msix_nr)
516 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, msix_nr);
519 int kvm_assign_set_msix_entry(KVMState *s,
520 struct kvm_assigned_msix_entry *entry)
522 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, entry);
524 #endif
526 #ifdef TARGET_I386
527 void kvm_hpet_disable_kpit(void)
529 struct kvm_pit_state2 ps2;
531 kvm_get_pit2(kvm_state, &ps2);
532 ps2.flags |= KVM_PIT_FLAGS_HPET_LEGACY;
533 kvm_set_pit2(kvm_state, &ps2);
536 void kvm_hpet_enable_kpit(void)
538 struct kvm_pit_state2 ps2;
540 kvm_get_pit2(kvm_state, &ps2);
541 ps2.flags &= ~KVM_PIT_FLAGS_HPET_LEGACY;
542 kvm_set_pit2(kvm_state, &ps2);
544 #endif
546 #if !defined(TARGET_I386)
547 int kvm_arch_init_irq_routing(void)
549 return 0;
551 #endif
553 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
554 typedef struct KVMIOPortRegion {
555 unsigned long start;
556 unsigned long size;
557 int status;
558 QLIST_ENTRY(KVMIOPortRegion) entry;
559 } KVMIOPortRegion;
561 static QLIST_HEAD(, KVMIOPortRegion) ioport_regions;
563 static void do_set_ioport_access(void *data)
565 KVMIOPortRegion *region = data;
566 bool enable = region->status > 0;
567 int r;
569 r = kvm_arch_set_ioport_access(region->start, region->size, enable);
570 if (r < 0) {
571 region->status = r;
572 } else {
573 region->status = 1;
577 int kvm_add_ioport_region(unsigned long start, unsigned long size,
578 bool is_hot_plug)
580 KVMIOPortRegion *region = g_malloc0(sizeof(KVMIOPortRegion));
581 CPUState *env;
582 int r = 0;
584 region->start = start;
585 region->size = size;
586 region->status = 1;
587 QLIST_INSERT_HEAD(&ioport_regions, region, entry);
589 if (is_hot_plug) {
590 for (env = first_cpu; env != NULL; env = env->next_cpu) {
591 run_on_cpu(env, do_set_ioport_access, region);
592 if (region->status < 0) {
593 r = region->status;
594 kvm_remove_ioport_region(start, size, is_hot_plug);
595 break;
599 return r;
602 int kvm_remove_ioport_region(unsigned long start, unsigned long size,
603 bool is_hot_unplug)
605 KVMIOPortRegion *region, *tmp;
606 CPUState *env;
607 int r = -ENOENT;
609 QLIST_FOREACH_SAFE(region, &ioport_regions, entry, tmp) {
610 if (region->start == start && region->size == size) {
611 region->status = 0;
613 if (is_hot_unplug) {
614 for (env = first_cpu; env != NULL; env = env->next_cpu) {
615 run_on_cpu(env, do_set_ioport_access, region);
618 QLIST_REMOVE(region, entry);
619 g_free(region);
620 r = 0;
622 return r;
624 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
626 int kvm_update_ioport_access(CPUState *env)
628 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
629 KVMIOPortRegion *region;
630 int r;
632 assert(qemu_cpu_is_self(env));
634 QLIST_FOREACH(region, &ioport_regions, entry) {
635 bool enable = region->status > 0;
637 r = kvm_arch_set_ioport_access(region->start, region->size, enable);
638 if (r < 0) {
639 return r;
642 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
643 return 0;