device-assignment: don't touch pci command register
[qemu-kvm.git] / qemu-kvm.c
blob2047ebbfd82b48aa7ef8a8314da53262cac828ed
1 /*
2 * qemu/kvm integration
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
7 */
8 #include "config.h"
9 #include "config-host.h"
11 #include <assert.h>
12 #include <string.h>
13 #include "hw/hw.h"
14 #include "sysemu.h"
15 #include "qemu-common.h"
16 #include "console.h"
17 #include "block.h"
18 #include "compatfd.h"
19 #include "gdbstub.h"
20 #include "monitor.h"
21 #include "cpus.h"
23 #include "qemu-kvm.h"
25 #define EXPECTED_KVM_API_VERSION 12
27 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
28 #error libkvm: userspace and kernel version mismatch
29 #endif
31 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
33 static inline void clear_gsi(KVMState *s, unsigned int gsi)
35 uint32_t *bitmap = s->used_gsi_bitmap;
37 if (gsi < s->max_gsi) {
38 bitmap[gsi / 32] &= ~(1U << (gsi % 32));
39 } else {
40 DPRINTF("Invalid GSI %u\n", gsi);
44 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
45 int kvm_assign_pci_device(KVMState *s,
46 struct kvm_assigned_pci_dev *assigned_dev)
48 return kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, assigned_dev);
51 static int kvm_old_assign_irq(KVMState *s,
52 struct kvm_assigned_irq *assigned_irq)
54 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, assigned_irq);
57 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
59 struct kvm_assigned_pci_dev assigned_dev;
61 assigned_dev.assigned_dev_id = dev_id;
62 assigned_dev.flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0;
63 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &assigned_dev);
66 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
67 int kvm_assign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq)
69 int ret;
71 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
72 if (ret > 0) {
73 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, assigned_irq);
76 return kvm_old_assign_irq(s, assigned_irq);
79 int kvm_deassign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq)
81 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, assigned_irq);
83 #else
84 int kvm_assign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq)
86 return kvm_old_assign_irq(s, assigned_irq);
88 #endif
89 #endif
91 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
92 int kvm_deassign_pci_device(KVMState *s,
93 struct kvm_assigned_pci_dev *assigned_dev)
95 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, assigned_dev);
97 #endif
99 int kvm_reinject_control(KVMState *s, int pit_reinject)
101 #ifdef KVM_CAP_REINJECT_CONTROL
102 int r;
103 struct kvm_reinject_control control;
105 control.pit_reinject = pit_reinject;
107 r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
108 if (r > 0) {
109 return kvm_vm_ioctl(s, KVM_REINJECT_CONTROL, &control);
111 #endif
112 return -ENOSYS;
115 int kvm_clear_gsi_routes(void)
117 #ifdef KVM_CAP_IRQ_ROUTING
118 kvm_state->irq_routes->nr = 0;
119 return 0;
120 #else
121 return -EINVAL;
122 #endif
125 int kvm_del_routing_entry(struct kvm_irq_routing_entry *entry)
127 #ifdef KVM_CAP_IRQ_ROUTING
128 KVMState *s = kvm_state;
129 struct kvm_irq_routing_entry *e, *p;
130 int i, gsi, found = 0;
132 gsi = entry->gsi;
134 for (i = 0; i < s->irq_routes->nr; ++i) {
135 e = &s->irq_routes->entries[i];
136 if (e->type == entry->type && e->gsi == gsi) {
137 switch (e->type) {
138 case KVM_IRQ_ROUTING_IRQCHIP:{
139 if (e->u.irqchip.irqchip ==
140 entry->u.irqchip.irqchip
141 && e->u.irqchip.pin == entry->u.irqchip.pin) {
142 p = &s->irq_routes->entries[--s->irq_routes->nr];
143 *e = *p;
144 found = 1;
146 break;
148 case KVM_IRQ_ROUTING_MSI:{
149 if (e->u.msi.address_lo ==
150 entry->u.msi.address_lo
151 && e->u.msi.address_hi ==
152 entry->u.msi.address_hi
153 && e->u.msi.data == entry->u.msi.data) {
154 p = &s->irq_routes->entries[--s->irq_routes->nr];
155 *e = *p;
156 found = 1;
158 break;
160 default:
161 break;
163 if (found) {
164 /* If there are no other users of this GSI
165 * mark it available in the bitmap */
166 for (i = 0; i < s->irq_routes->nr; i++) {
167 e = &s->irq_routes->entries[i];
168 if (e->gsi == gsi)
169 break;
171 if (i == s->irq_routes->nr) {
172 clear_gsi(s, gsi);
175 return 0;
179 return -ESRCH;
180 #else
181 return -ENOSYS;
182 #endif
185 int kvm_update_routing_entry(struct kvm_irq_routing_entry *entry,
186 struct kvm_irq_routing_entry *newentry)
188 #ifdef KVM_CAP_IRQ_ROUTING
189 KVMState *s = kvm_state;
190 struct kvm_irq_routing_entry *e;
191 int i;
193 if (entry->gsi != newentry->gsi || entry->type != newentry->type) {
194 return -EINVAL;
197 for (i = 0; i < s->irq_routes->nr; ++i) {
198 e = &s->irq_routes->entries[i];
199 if (e->type != entry->type || e->gsi != entry->gsi) {
200 continue;
202 switch (e->type) {
203 case KVM_IRQ_ROUTING_IRQCHIP:
204 if (e->u.irqchip.irqchip == entry->u.irqchip.irqchip &&
205 e->u.irqchip.pin == entry->u.irqchip.pin) {
206 memcpy(&e->u.irqchip, &newentry->u.irqchip,
207 sizeof e->u.irqchip);
208 return 0;
210 break;
211 case KVM_IRQ_ROUTING_MSI:
212 if (e->u.msi.address_lo == entry->u.msi.address_lo &&
213 e->u.msi.address_hi == entry->u.msi.address_hi &&
214 e->u.msi.data == entry->u.msi.data) {
215 memcpy(&e->u.msi, &newentry->u.msi, sizeof e->u.msi);
216 return 0;
218 break;
219 default:
220 break;
223 return -ESRCH;
224 #else
225 return -ENOSYS;
226 #endif
229 int kvm_del_irq_route(int gsi, int irqchip, int pin)
231 #ifdef KVM_CAP_IRQ_ROUTING
232 struct kvm_irq_routing_entry e;
234 e.gsi = gsi;
235 e.type = KVM_IRQ_ROUTING_IRQCHIP;
236 e.flags = 0;
237 e.u.irqchip.irqchip = irqchip;
238 e.u.irqchip.pin = pin;
239 return kvm_del_routing_entry(&e);
240 #else
241 return -ENOSYS;
242 #endif
245 int kvm_get_irq_route_gsi(void)
247 KVMState *s = kvm_state;
248 int i, bit;
249 uint32_t *buf = s->used_gsi_bitmap;
251 /* Return the lowest unused GSI in the bitmap */
252 for (i = 0; i < s->max_gsi / 32; i++) {
253 bit = ffs(~buf[i]);
254 if (!bit) {
255 continue;
258 return bit - 1 + i * 32;
261 return -ENOSPC;
264 static void kvm_msi_routing_entry(struct kvm_irq_routing_entry *e,
265 KVMMsiMessage *msg)
268 e->gsi = msg->gsi;
269 e->type = KVM_IRQ_ROUTING_MSI;
270 e->flags = 0;
271 e->u.msi.address_lo = msg->addr_lo;
272 e->u.msi.address_hi = msg->addr_hi;
273 e->u.msi.data = msg->data;
276 int kvm_msi_message_add(KVMMsiMessage *msg)
278 struct kvm_irq_routing_entry e;
279 int ret;
281 ret = kvm_get_irq_route_gsi();
282 if (ret < 0) {
283 return ret;
285 msg->gsi = ret;
287 kvm_msi_routing_entry(&e, msg);
288 kvm_add_routing_entry(kvm_state, &e);
289 return 0;
292 int kvm_msi_message_del(KVMMsiMessage *msg)
294 struct kvm_irq_routing_entry e;
296 kvm_msi_routing_entry(&e, msg);
297 return kvm_del_routing_entry(&e);
300 int kvm_msi_message_update(KVMMsiMessage *old, KVMMsiMessage *new)
302 struct kvm_irq_routing_entry e1, e2;
303 int ret;
305 new->gsi = old->gsi;
306 if (memcmp(old, new, sizeof(KVMMsiMessage)) == 0) {
307 return 0;
310 kvm_msi_routing_entry(&e1, old);
311 kvm_msi_routing_entry(&e2, new);
313 ret = kvm_update_routing_entry(&e1, &e2);
314 if (ret < 0) {
315 return ret;
318 return 1;
322 #ifdef KVM_CAP_DEVICE_MSIX
323 int kvm_assign_set_msix_nr(KVMState *s, struct kvm_assigned_msix_nr *msix_nr)
325 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, msix_nr);
328 int kvm_assign_set_msix_entry(KVMState *s,
329 struct kvm_assigned_msix_entry *entry)
331 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, entry);
333 #endif
335 #ifdef TARGET_I386
336 void kvm_hpet_disable_kpit(void)
338 struct kvm_pit_state2 ps2;
340 kvm_get_pit2(kvm_state, &ps2);
341 ps2.flags |= KVM_PIT_FLAGS_HPET_LEGACY;
342 kvm_set_pit2(kvm_state, &ps2);
345 void kvm_hpet_enable_kpit(void)
347 struct kvm_pit_state2 ps2;
349 kvm_get_pit2(kvm_state, &ps2);
350 ps2.flags &= ~KVM_PIT_FLAGS_HPET_LEGACY;
351 kvm_set_pit2(kvm_state, &ps2);
353 #endif
355 #if !defined(TARGET_I386)
356 int kvm_arch_init_irq_routing(void)
358 return 0;
360 #endif
362 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
363 typedef struct KVMIOPortRegion {
364 unsigned long start;
365 unsigned long size;
366 int status;
367 QLIST_ENTRY(KVMIOPortRegion) entry;
368 } KVMIOPortRegion;
370 static QLIST_HEAD(, KVMIOPortRegion) ioport_regions;
372 static void do_set_ioport_access(void *data)
374 KVMIOPortRegion *region = data;
375 bool enable = region->status > 0;
376 int r;
378 r = kvm_arch_set_ioport_access(region->start, region->size, enable);
379 if (r < 0) {
380 region->status = r;
381 } else {
382 region->status = 1;
386 int kvm_add_ioport_region(unsigned long start, unsigned long size,
387 bool is_hot_plug)
389 KVMIOPortRegion *region = g_malloc0(sizeof(KVMIOPortRegion));
390 CPUState *env;
391 int r = 0;
393 region->start = start;
394 region->size = size;
395 region->status = 1;
396 QLIST_INSERT_HEAD(&ioport_regions, region, entry);
398 if (is_hot_plug) {
399 for (env = first_cpu; env != NULL; env = env->next_cpu) {
400 run_on_cpu(env, do_set_ioport_access, region);
401 if (region->status < 0) {
402 r = region->status;
403 kvm_remove_ioport_region(start, size, is_hot_plug);
404 break;
408 return r;
411 int kvm_remove_ioport_region(unsigned long start, unsigned long size,
412 bool is_hot_unplug)
414 KVMIOPortRegion *region, *tmp;
415 CPUState *env;
416 int r = -ENOENT;
418 QLIST_FOREACH_SAFE(region, &ioport_regions, entry, tmp) {
419 if (region->start == start && region->size == size) {
420 region->status = 0;
422 if (is_hot_unplug) {
423 for (env = first_cpu; env != NULL; env = env->next_cpu) {
424 run_on_cpu(env, do_set_ioport_access, region);
427 QLIST_REMOVE(region, entry);
428 g_free(region);
429 r = 0;
431 return r;
433 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
435 int kvm_update_ioport_access(CPUState *env)
437 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
438 KVMIOPortRegion *region;
439 int r;
441 assert(qemu_cpu_is_self(env));
443 QLIST_FOREACH(region, &ioport_regions, entry) {
444 bool enable = region->status > 0;
446 r = kvm_arch_set_ioport_access(region->start, region->size, enable);
447 if (r < 0) {
448 return r;
451 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
452 return 0;