vfio-pci: Add PCIe capability mangling based on bus type
[qemu/kevin.git] / hw / vfio_pci.c
blob0f74dbb6cfe0220ff083c5699510ac8b586fb9a1
1 /*
2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include <dirent.h>
22 #include <unistd.h>
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <linux/vfio.h>
29 #include "config.h"
30 #include "qemu/event_notifier.h"
31 #include "exec/address-spaces.h"
32 #include "sysemu/kvm.h"
33 #include "exec/memory.h"
34 #include "hw/pci/msi.h"
35 #include "hw/pci/msix.h"
36 #include "hw/pci/pci.h"
37 #include "qemu-common.h"
38 #include "qemu/error-report.h"
39 #include "qemu/queue.h"
40 #include "qemu/range.h"
42 /* #define DEBUG_VFIO */
43 #ifdef DEBUG_VFIO
44 #define DPRINTF(fmt, ...) \
45 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 typedef struct VFIOBAR {
52 off_t fd_offset; /* offset of BAR within device fd */
53 int fd; /* device fd, allows us to pass VFIOBAR as opaque data */
54 MemoryRegion mem; /* slow, read/write access */
55 MemoryRegion mmap_mem; /* direct mapped access */
56 void *mmap;
57 size_t size;
58 uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
59 uint8_t nr; /* cache the BAR number for debug */
60 } VFIOBAR;
62 typedef struct VFIOINTx {
63 bool pending; /* interrupt pending */
64 bool kvm_accel; /* set when QEMU bypass through KVM enabled */
65 uint8_t pin; /* which pin to pull for qemu_set_irq */
66 EventNotifier interrupt; /* eventfd triggered on interrupt */
67 EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
68 PCIINTxRoute route; /* routing info for QEMU bypass */
69 uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
70 QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
71 } VFIOINTx;
73 struct VFIODevice;
75 typedef struct VFIOMSIVector {
76 EventNotifier interrupt; /* eventfd triggered on interrupt */
77 struct VFIODevice *vdev; /* back pointer to device */
78 int virq; /* KVM irqchip route for QEMU bypass */
79 bool use;
80 } VFIOMSIVector;
82 enum {
83 VFIO_INT_NONE = 0,
84 VFIO_INT_INTx = 1,
85 VFIO_INT_MSI = 2,
86 VFIO_INT_MSIX = 3,
89 struct VFIOGroup;
91 typedef struct VFIOContainer {
92 int fd; /* /dev/vfio/vfio, empowered by the attached groups */
93 struct {
94 /* enable abstraction to support various iommu backends */
95 union {
96 MemoryListener listener; /* Used by type1 iommu */
98 void (*release)(struct VFIOContainer *);
99 } iommu_data;
100 QLIST_HEAD(, VFIOGroup) group_list;
101 QLIST_ENTRY(VFIOContainer) next;
102 } VFIOContainer;
104 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
105 typedef struct VFIOMSIXInfo {
106 uint8_t table_bar;
107 uint8_t pba_bar;
108 uint16_t entries;
109 uint32_t table_offset;
110 uint32_t pba_offset;
111 MemoryRegion mmap_mem;
112 void *mmap;
113 } VFIOMSIXInfo;
115 typedef struct VFIODevice {
116 PCIDevice pdev;
117 int fd;
118 VFIOINTx intx;
119 unsigned int config_size;
120 uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
121 off_t config_offset; /* Offset of config space region within device fd */
122 unsigned int rom_size;
123 off_t rom_offset; /* Offset of ROM region within device fd */
124 int msi_cap_size;
125 VFIOMSIVector *msi_vectors;
126 VFIOMSIXInfo *msix;
127 int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
128 int interrupt; /* Current interrupt type */
129 VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
130 PCIHostDeviceAddress host;
131 QLIST_ENTRY(VFIODevice) next;
132 struct VFIOGroup *group;
133 bool reset_works;
134 } VFIODevice;
136 typedef struct VFIOGroup {
137 int fd;
138 int groupid;
139 VFIOContainer *container;
140 QLIST_HEAD(, VFIODevice) device_list;
141 QLIST_ENTRY(VFIOGroup) next;
142 QLIST_ENTRY(VFIOGroup) container_next;
143 } VFIOGroup;
145 #define MSIX_CAP_LENGTH 12
147 static QLIST_HEAD(, VFIOContainer)
148 container_list = QLIST_HEAD_INITIALIZER(container_list);
150 static QLIST_HEAD(, VFIOGroup)
151 group_list = QLIST_HEAD_INITIALIZER(group_list);
153 static void vfio_disable_interrupts(VFIODevice *vdev);
154 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
155 static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled);
158 * Common VFIO interrupt disable
160 static void vfio_disable_irqindex(VFIODevice *vdev, int index)
162 struct vfio_irq_set irq_set = {
163 .argsz = sizeof(irq_set),
164 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
165 .index = index,
166 .start = 0,
167 .count = 0,
170 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
174 * INTx
176 static void vfio_unmask_intx(VFIODevice *vdev)
178 struct vfio_irq_set irq_set = {
179 .argsz = sizeof(irq_set),
180 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
181 .index = VFIO_PCI_INTX_IRQ_INDEX,
182 .start = 0,
183 .count = 1,
186 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
189 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
190 static void vfio_mask_intx(VFIODevice *vdev)
192 struct vfio_irq_set irq_set = {
193 .argsz = sizeof(irq_set),
194 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
195 .index = VFIO_PCI_INTX_IRQ_INDEX,
196 .start = 0,
197 .count = 1,
200 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
202 #endif
205 * Disabling BAR mmaping can be slow, but toggling it around INTx can
206 * also be a huge overhead. We try to get the best of both worlds by
207 * waiting until an interrupt to disable mmaps (subsequent transitions
208 * to the same state are effectively no overhead). If the interrupt has
209 * been serviced and the time gap is long enough, we re-enable mmaps for
210 * performance. This works well for things like graphics cards, which
211 * may not use their interrupt at all and are penalized to an unusable
212 * level by read/write BAR traps. Other devices, like NICs, have more
213 * regular interrupts and see much better latency by staying in non-mmap
214 * mode. We therefore set the default mmap_timeout such that a ping
215 * is just enough to keep the mmap disabled. Users can experiment with
216 * other options with the x-intx-mmap-timeout-ms parameter (a value of
217 * zero disables the timer).
219 static void vfio_intx_mmap_enable(void *opaque)
221 VFIODevice *vdev = opaque;
223 if (vdev->intx.pending) {
224 qemu_mod_timer(vdev->intx.mmap_timer,
225 qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
226 return;
229 vfio_mmap_set_enabled(vdev, true);
232 static void vfio_intx_interrupt(void *opaque)
234 VFIODevice *vdev = opaque;
236 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
237 return;
240 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain,
241 vdev->host.bus, vdev->host.slot, vdev->host.function,
242 'A' + vdev->intx.pin);
244 vdev->intx.pending = true;
245 qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
246 vfio_mmap_set_enabled(vdev, false);
247 if (vdev->intx.mmap_timeout) {
248 qemu_mod_timer(vdev->intx.mmap_timer,
249 qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
253 static void vfio_eoi(VFIODevice *vdev)
255 if (!vdev->intx.pending) {
256 return;
259 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain,
260 vdev->host.bus, vdev->host.slot, vdev->host.function);
262 vdev->intx.pending = false;
263 qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
264 vfio_unmask_intx(vdev);
267 static void vfio_enable_intx_kvm(VFIODevice *vdev)
269 #ifdef CONFIG_KVM
270 struct kvm_irqfd irqfd = {
271 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
272 .gsi = vdev->intx.route.irq,
273 .flags = KVM_IRQFD_FLAG_RESAMPLE,
275 struct vfio_irq_set *irq_set;
276 int ret, argsz;
277 int32_t *pfd;
279 if (!kvm_irqfds_enabled() ||
280 vdev->intx.route.mode != PCI_INTX_ENABLED ||
281 !kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
282 return;
285 /* Get to a known interrupt state */
286 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
287 vfio_mask_intx(vdev);
288 vdev->intx.pending = false;
289 qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
291 /* Get an eventfd for resample/unmask */
292 if (event_notifier_init(&vdev->intx.unmask, 0)) {
293 error_report("vfio: Error: event_notifier_init failed eoi");
294 goto fail;
297 /* KVM triggers it, VFIO listens for it */
298 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
300 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
301 error_report("vfio: Error: Failed to setup resample irqfd: %m");
302 goto fail_irqfd;
305 argsz = sizeof(*irq_set) + sizeof(*pfd);
307 irq_set = g_malloc0(argsz);
308 irq_set->argsz = argsz;
309 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
310 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
311 irq_set->start = 0;
312 irq_set->count = 1;
313 pfd = (int32_t *)&irq_set->data;
315 *pfd = irqfd.resamplefd;
317 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
318 g_free(irq_set);
319 if (ret) {
320 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
321 goto fail_vfio;
324 /* Let'em rip */
325 vfio_unmask_intx(vdev);
327 vdev->intx.kvm_accel = true;
329 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
330 __func__, vdev->host.domain, vdev->host.bus,
331 vdev->host.slot, vdev->host.function);
333 return;
335 fail_vfio:
336 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
337 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
338 fail_irqfd:
339 event_notifier_cleanup(&vdev->intx.unmask);
340 fail:
341 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
342 vfio_unmask_intx(vdev);
343 #endif
346 static void vfio_disable_intx_kvm(VFIODevice *vdev)
348 #ifdef CONFIG_KVM
349 struct kvm_irqfd irqfd = {
350 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
351 .gsi = vdev->intx.route.irq,
352 .flags = KVM_IRQFD_FLAG_DEASSIGN,
355 if (!vdev->intx.kvm_accel) {
356 return;
360 * Get to a known state, hardware masked, QEMU ready to accept new
361 * interrupts, QEMU IRQ de-asserted.
363 vfio_mask_intx(vdev);
364 vdev->intx.pending = false;
365 qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
367 /* Tell KVM to stop listening for an INTx irqfd */
368 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
369 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
372 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
373 event_notifier_cleanup(&vdev->intx.unmask);
375 /* QEMU starts listening for interrupt events. */
376 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
378 vdev->intx.kvm_accel = false;
380 /* If we've missed an event, let it re-fire through QEMU */
381 vfio_unmask_intx(vdev);
383 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
384 __func__, vdev->host.domain, vdev->host.bus,
385 vdev->host.slot, vdev->host.function);
386 #endif
389 static void vfio_update_irq(PCIDevice *pdev)
391 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
392 PCIINTxRoute route;
394 if (vdev->interrupt != VFIO_INT_INTx) {
395 return;
398 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
400 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
401 return; /* Nothing changed */
404 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
405 vdev->host.domain, vdev->host.bus, vdev->host.slot,
406 vdev->host.function, vdev->intx.route.irq, route.irq);
408 vfio_disable_intx_kvm(vdev);
410 vdev->intx.route = route;
412 if (route.mode != PCI_INTX_ENABLED) {
413 return;
416 vfio_enable_intx_kvm(vdev);
418 /* Re-enable the interrupt in cased we missed an EOI */
419 vfio_eoi(vdev);
422 static int vfio_enable_intx(VFIODevice *vdev)
424 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
425 int ret, argsz;
426 struct vfio_irq_set *irq_set;
427 int32_t *pfd;
429 if (!pin) {
430 return 0;
433 vfio_disable_interrupts(vdev);
435 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
437 #ifdef CONFIG_KVM
439 * Only conditional to avoid generating error messages on platforms
440 * where we won't actually use the result anyway.
442 if (kvm_irqfds_enabled() &&
443 kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
444 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
445 vdev->intx.pin);
447 #endif
449 ret = event_notifier_init(&vdev->intx.interrupt, 0);
450 if (ret) {
451 error_report("vfio: Error: event_notifier_init failed");
452 return ret;
455 argsz = sizeof(*irq_set) + sizeof(*pfd);
457 irq_set = g_malloc0(argsz);
458 irq_set->argsz = argsz;
459 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
460 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
461 irq_set->start = 0;
462 irq_set->count = 1;
463 pfd = (int32_t *)&irq_set->data;
465 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
466 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
468 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
469 g_free(irq_set);
470 if (ret) {
471 error_report("vfio: Error: Failed to setup INTx fd: %m");
472 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
473 event_notifier_cleanup(&vdev->intx.interrupt);
474 return -errno;
477 vfio_enable_intx_kvm(vdev);
479 vdev->interrupt = VFIO_INT_INTx;
481 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
482 vdev->host.bus, vdev->host.slot, vdev->host.function);
484 return 0;
487 static void vfio_disable_intx(VFIODevice *vdev)
489 int fd;
491 qemu_del_timer(vdev->intx.mmap_timer);
492 vfio_disable_intx_kvm(vdev);
493 vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX);
494 vdev->intx.pending = false;
495 qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
496 vfio_mmap_set_enabled(vdev, true);
498 fd = event_notifier_get_fd(&vdev->intx.interrupt);
499 qemu_set_fd_handler(fd, NULL, NULL, vdev);
500 event_notifier_cleanup(&vdev->intx.interrupt);
502 vdev->interrupt = VFIO_INT_NONE;
504 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
505 vdev->host.bus, vdev->host.slot, vdev->host.function);
509 * MSI/X
511 static void vfio_msi_interrupt(void *opaque)
513 VFIOMSIVector *vector = opaque;
514 VFIODevice *vdev = vector->vdev;
515 int nr = vector - vdev->msi_vectors;
517 if (!event_notifier_test_and_clear(&vector->interrupt)) {
518 return;
521 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__,
522 vdev->host.domain, vdev->host.bus, vdev->host.slot,
523 vdev->host.function, nr);
525 if (vdev->interrupt == VFIO_INT_MSIX) {
526 msix_notify(&vdev->pdev, nr);
527 } else if (vdev->interrupt == VFIO_INT_MSI) {
528 msi_notify(&vdev->pdev, nr);
529 } else {
530 error_report("vfio: MSI interrupt receieved, but not enabled?");
534 static int vfio_enable_vectors(VFIODevice *vdev, bool msix)
536 struct vfio_irq_set *irq_set;
537 int ret = 0, i, argsz;
538 int32_t *fds;
540 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
542 irq_set = g_malloc0(argsz);
543 irq_set->argsz = argsz;
544 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
545 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
546 irq_set->start = 0;
547 irq_set->count = vdev->nr_vectors;
548 fds = (int32_t *)&irq_set->data;
550 for (i = 0; i < vdev->nr_vectors; i++) {
551 if (!vdev->msi_vectors[i].use) {
552 fds[i] = -1;
553 continue;
556 fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
559 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
561 g_free(irq_set);
563 return ret;
566 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
567 MSIMessage *msg, IOHandler *handler)
569 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
570 VFIOMSIVector *vector;
571 int ret;
573 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__,
574 vdev->host.domain, vdev->host.bus, vdev->host.slot,
575 vdev->host.function, nr);
577 vector = &vdev->msi_vectors[nr];
578 vector->vdev = vdev;
579 vector->use = true;
581 msix_vector_use(pdev, nr);
583 if (event_notifier_init(&vector->interrupt, 0)) {
584 error_report("vfio: Error: event_notifier_init failed");
588 * Attempt to enable route through KVM irqchip,
589 * default to userspace handling if unavailable.
591 vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1;
592 if (vector->virq < 0 ||
593 kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
594 vector->virq) < 0) {
595 if (vector->virq >= 0) {
596 kvm_irqchip_release_virq(kvm_state, vector->virq);
597 vector->virq = -1;
599 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
600 handler, NULL, vector);
604 * We don't want to have the host allocate all possible MSI vectors
605 * for a device if they're not in use, so we shutdown and incrementally
606 * increase them as needed.
608 if (vdev->nr_vectors < nr + 1) {
609 vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
610 vdev->nr_vectors = nr + 1;
611 ret = vfio_enable_vectors(vdev, true);
612 if (ret) {
613 error_report("vfio: failed to enable vectors, %d", ret);
615 } else {
616 int argsz;
617 struct vfio_irq_set *irq_set;
618 int32_t *pfd;
620 argsz = sizeof(*irq_set) + sizeof(*pfd);
622 irq_set = g_malloc0(argsz);
623 irq_set->argsz = argsz;
624 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
625 VFIO_IRQ_SET_ACTION_TRIGGER;
626 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
627 irq_set->start = nr;
628 irq_set->count = 1;
629 pfd = (int32_t *)&irq_set->data;
631 *pfd = event_notifier_get_fd(&vector->interrupt);
633 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
634 g_free(irq_set);
635 if (ret) {
636 error_report("vfio: failed to modify vector, %d", ret);
640 return 0;
643 static int vfio_msix_vector_use(PCIDevice *pdev,
644 unsigned int nr, MSIMessage msg)
646 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
649 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
651 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
652 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
653 int argsz;
654 struct vfio_irq_set *irq_set;
655 int32_t *pfd;
657 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__,
658 vdev->host.domain, vdev->host.bus, vdev->host.slot,
659 vdev->host.function, nr);
662 * XXX What's the right thing to do here? This turns off the interrupt
663 * completely, but do we really just want to switch the interrupt to
664 * bouncing through userspace and let msix.c drop it? Not sure.
666 msix_vector_unuse(pdev, nr);
668 argsz = sizeof(*irq_set) + sizeof(*pfd);
670 irq_set = g_malloc0(argsz);
671 irq_set->argsz = argsz;
672 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
673 VFIO_IRQ_SET_ACTION_TRIGGER;
674 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
675 irq_set->start = nr;
676 irq_set->count = 1;
677 pfd = (int32_t *)&irq_set->data;
679 *pfd = -1;
681 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
683 g_free(irq_set);
685 if (vector->virq < 0) {
686 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
687 NULL, NULL, NULL);
688 } else {
689 kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt,
690 vector->virq);
691 kvm_irqchip_release_virq(kvm_state, vector->virq);
692 vector->virq = -1;
695 event_notifier_cleanup(&vector->interrupt);
696 vector->use = false;
699 static void vfio_enable_msix(VFIODevice *vdev)
701 vfio_disable_interrupts(vdev);
703 vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
705 vdev->interrupt = VFIO_INT_MSIX;
708 * Some communication channels between VF & PF or PF & fw rely on the
709 * physical state of the device and expect that enabling MSI-X from the
710 * guest enables the same on the host. When our guest is Linux, the
711 * guest driver call to pci_enable_msix() sets the enabling bit in the
712 * MSI-X capability, but leaves the vector table masked. We therefore
713 * can't rely on a vector_use callback (from request_irq() in the guest)
714 * to switch the physical device into MSI-X mode because that may come a
715 * long time after pci_enable_msix(). This code enables vector 0 with
716 * triggering to userspace, then immediately release the vector, leaving
717 * the physical device with no vectors enabled, but MSI-X enabled, just
718 * like the guest view.
720 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
721 vfio_msix_vector_release(&vdev->pdev, 0);
723 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
724 vfio_msix_vector_release, NULL)) {
725 error_report("vfio: msix_set_vector_notifiers failed");
728 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
729 vdev->host.bus, vdev->host.slot, vdev->host.function);
732 static void vfio_enable_msi(VFIODevice *vdev)
734 int ret, i;
736 vfio_disable_interrupts(vdev);
738 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
739 retry:
740 vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
742 for (i = 0; i < vdev->nr_vectors; i++) {
743 MSIMessage msg;
744 VFIOMSIVector *vector = &vdev->msi_vectors[i];
746 vector->vdev = vdev;
747 vector->use = true;
749 if (event_notifier_init(&vector->interrupt, 0)) {
750 error_report("vfio: Error: event_notifier_init failed");
753 msg = msi_get_message(&vdev->pdev, i);
756 * Attempt to enable route through KVM irqchip,
757 * default to userspace handling if unavailable.
759 vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg);
760 if (vector->virq < 0 ||
761 kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
762 vector->virq) < 0) {
763 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
764 vfio_msi_interrupt, NULL, vector);
768 ret = vfio_enable_vectors(vdev, false);
769 if (ret) {
770 if (ret < 0) {
771 error_report("vfio: Error: Failed to setup MSI fds: %m");
772 } else if (ret != vdev->nr_vectors) {
773 error_report("vfio: Error: Failed to enable %d "
774 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
777 for (i = 0; i < vdev->nr_vectors; i++) {
778 VFIOMSIVector *vector = &vdev->msi_vectors[i];
779 if (vector->virq >= 0) {
780 kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt,
781 vector->virq);
782 kvm_irqchip_release_virq(kvm_state, vector->virq);
783 vector->virq = -1;
784 } else {
785 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
786 NULL, NULL, NULL);
788 event_notifier_cleanup(&vector->interrupt);
791 g_free(vdev->msi_vectors);
793 if (ret > 0 && ret != vdev->nr_vectors) {
794 vdev->nr_vectors = ret;
795 goto retry;
797 vdev->nr_vectors = 0;
799 return;
802 vdev->interrupt = VFIO_INT_MSI;
804 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__,
805 vdev->host.domain, vdev->host.bus, vdev->host.slot,
806 vdev->host.function, vdev->nr_vectors);
809 static void vfio_disable_msi_common(VFIODevice *vdev)
811 g_free(vdev->msi_vectors);
812 vdev->msi_vectors = NULL;
813 vdev->nr_vectors = 0;
814 vdev->interrupt = VFIO_INT_NONE;
816 vfio_enable_intx(vdev);
819 static void vfio_disable_msix(VFIODevice *vdev)
821 msix_unset_vector_notifiers(&vdev->pdev);
823 if (vdev->nr_vectors) {
824 vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
827 vfio_disable_msi_common(vdev);
829 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
830 vdev->host.bus, vdev->host.slot, vdev->host.function);
833 static void vfio_disable_msi(VFIODevice *vdev)
835 int i;
837 vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX);
839 for (i = 0; i < vdev->nr_vectors; i++) {
840 VFIOMSIVector *vector = &vdev->msi_vectors[i];
842 if (!vector->use) {
843 continue;
846 if (vector->virq >= 0) {
847 kvm_irqchip_remove_irqfd_notifier(kvm_state,
848 &vector->interrupt, vector->virq);
849 kvm_irqchip_release_virq(kvm_state, vector->virq);
850 vector->virq = -1;
851 } else {
852 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
853 NULL, NULL, NULL);
856 event_notifier_cleanup(&vector->interrupt);
859 vfio_disable_msi_common(vdev);
861 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
862 vdev->host.bus, vdev->host.slot, vdev->host.function);
866 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
868 static void vfio_bar_write(void *opaque, hwaddr addr,
869 uint64_t data, unsigned size)
871 VFIOBAR *bar = opaque;
872 union {
873 uint8_t byte;
874 uint16_t word;
875 uint32_t dword;
876 uint64_t qword;
877 } buf;
879 switch (size) {
880 case 1:
881 buf.byte = data;
882 break;
883 case 2:
884 buf.word = cpu_to_le16(data);
885 break;
886 case 4:
887 buf.dword = cpu_to_le32(data);
888 break;
889 default:
890 hw_error("vfio: unsupported write size, %d bytes\n", size);
891 break;
894 if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
895 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
896 __func__, addr, data, size);
899 DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n",
900 __func__, bar->nr, addr, data, size);
903 * A read or write to a BAR always signals an INTx EOI. This will
904 * do nothing if not pending (including not in INTx mode). We assume
905 * that a BAR access is in response to an interrupt and that BAR
906 * accesses will service the interrupt. Unfortunately, we don't know
907 * which access will service the interrupt, so we're potentially
908 * getting quite a few host interrupts per guest interrupt.
910 vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr]));
913 static uint64_t vfio_bar_read(void *opaque,
914 hwaddr addr, unsigned size)
916 VFIOBAR *bar = opaque;
917 union {
918 uint8_t byte;
919 uint16_t word;
920 uint32_t dword;
921 uint64_t qword;
922 } buf;
923 uint64_t data = 0;
925 if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
926 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
927 __func__, addr, size);
928 return (uint64_t)-1;
931 switch (size) {
932 case 1:
933 data = buf.byte;
934 break;
935 case 2:
936 data = le16_to_cpu(buf.word);
937 break;
938 case 4:
939 data = le32_to_cpu(buf.dword);
940 break;
941 default:
942 hw_error("vfio: unsupported read size, %d bytes\n", size);
943 break;
946 DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n",
947 __func__, bar->nr, addr, size, data);
949 /* Same as write above */
950 vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr]));
952 return data;
955 static const MemoryRegionOps vfio_bar_ops = {
956 .read = vfio_bar_read,
957 .write = vfio_bar_write,
958 .endianness = DEVICE_LITTLE_ENDIAN,
962 * PCI config space
964 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
966 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
967 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
969 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
970 emu_bits = le32_to_cpu(emu_bits);
972 if (emu_bits) {
973 emu_val = pci_default_read_config(pdev, addr, len);
976 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
977 ssize_t ret;
979 ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr);
980 if (ret != len) {
981 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
982 __func__, vdev->host.domain, vdev->host.bus,
983 vdev->host.slot, vdev->host.function, addr, len);
984 return -errno;
986 phys_val = le32_to_cpu(phys_val);
989 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
991 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__,
992 vdev->host.domain, vdev->host.bus, vdev->host.slot,
993 vdev->host.function, addr, len, val);
995 return val;
998 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
999 uint32_t val, int len)
1001 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
1002 uint32_t val_le = cpu_to_le32(val);
1004 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__,
1005 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1006 vdev->host.function, addr, val, len);
1008 /* Write everything to VFIO, let it filter out what we can't write */
1009 if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) {
1010 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
1011 __func__, vdev->host.domain, vdev->host.bus,
1012 vdev->host.slot, vdev->host.function, addr, val, len);
1015 /* MSI/MSI-X Enabling/Disabling */
1016 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1017 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1018 int is_enabled, was_enabled = msi_enabled(pdev);
1020 pci_default_write_config(pdev, addr, val, len);
1022 is_enabled = msi_enabled(pdev);
1024 if (!was_enabled && is_enabled) {
1025 vfio_enable_msi(vdev);
1026 } else if (was_enabled && !is_enabled) {
1027 vfio_disable_msi(vdev);
1029 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1030 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1031 int is_enabled, was_enabled = msix_enabled(pdev);
1033 pci_default_write_config(pdev, addr, val, len);
1035 is_enabled = msix_enabled(pdev);
1037 if (!was_enabled && is_enabled) {
1038 vfio_enable_msix(vdev);
1039 } else if (was_enabled && !is_enabled) {
1040 vfio_disable_msix(vdev);
1042 } else {
1043 /* Write everything to QEMU to keep emulated bits correct */
1044 pci_default_write_config(pdev, addr, val, len);
1049 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
1051 static int vfio_dma_unmap(VFIOContainer *container,
1052 hwaddr iova, ram_addr_t size)
1054 struct vfio_iommu_type1_dma_unmap unmap = {
1055 .argsz = sizeof(unmap),
1056 .flags = 0,
1057 .iova = iova,
1058 .size = size,
1061 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
1062 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno);
1063 return -errno;
1066 return 0;
1069 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
1070 ram_addr_t size, void *vaddr, bool readonly)
1072 struct vfio_iommu_type1_dma_map map = {
1073 .argsz = sizeof(map),
1074 .flags = VFIO_DMA_MAP_FLAG_READ,
1075 .vaddr = (__u64)(uintptr_t)vaddr,
1076 .iova = iova,
1077 .size = size,
1080 if (!readonly) {
1081 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
1085 * Try the mapping, if it fails with EBUSY, unmap the region and try
1086 * again. This shouldn't be necessary, but we sometimes see it in
1087 * the the VGA ROM space.
1089 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
1090 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
1091 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
1092 return 0;
1095 DPRINTF("VFIO_MAP_DMA: %d\n", -errno);
1096 return -errno;
1099 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
1101 return !memory_region_is_ram(section->mr);
1104 static void vfio_listener_region_add(MemoryListener *listener,
1105 MemoryRegionSection *section)
1107 VFIOContainer *container = container_of(listener, VFIOContainer,
1108 iommu_data.listener);
1109 hwaddr iova, end;
1110 void *vaddr;
1111 int ret;
1113 if (vfio_listener_skipped_section(section)) {
1114 DPRINTF("vfio: SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n",
1115 section->offset_within_address_space,
1116 section->offset_within_address_space + section->size - 1);
1117 return;
1120 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
1121 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
1122 error_report("%s received unaligned region", __func__);
1123 return;
1126 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
1127 end = (section->offset_within_address_space + section->size) &
1128 TARGET_PAGE_MASK;
1130 if (iova >= end) {
1131 return;
1134 vaddr = memory_region_get_ram_ptr(section->mr) +
1135 section->offset_within_region +
1136 (iova - section->offset_within_address_space);
1138 DPRINTF("vfio: region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n",
1139 iova, end - 1, vaddr);
1141 ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
1142 if (ret) {
1143 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
1144 "0x%"HWADDR_PRIx", %p) = %d (%m)",
1145 container, iova, end - iova, vaddr, ret);
1149 static void vfio_listener_region_del(MemoryListener *listener,
1150 MemoryRegionSection *section)
1152 VFIOContainer *container = container_of(listener, VFIOContainer,
1153 iommu_data.listener);
1154 hwaddr iova, end;
1155 int ret;
1157 if (vfio_listener_skipped_section(section)) {
1158 DPRINTF("vfio: SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n",
1159 section->offset_within_address_space,
1160 section->offset_within_address_space + section->size - 1);
1161 return;
1164 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
1165 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
1166 error_report("%s received unaligned region", __func__);
1167 return;
1170 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
1171 end = (section->offset_within_address_space + section->size) &
1172 TARGET_PAGE_MASK;
1174 if (iova >= end) {
1175 return;
1178 DPRINTF("vfio: region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
1179 iova, end - 1);
1181 ret = vfio_dma_unmap(container, iova, end - iova);
1182 if (ret) {
1183 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
1184 "0x%"HWADDR_PRIx") = %d (%m)",
1185 container, iova, end - iova, ret);
1189 static MemoryListener vfio_memory_listener = {
1190 .region_add = vfio_listener_region_add,
1191 .region_del = vfio_listener_region_del,
1194 static void vfio_listener_release(VFIOContainer *container)
1196 memory_listener_unregister(&container->iommu_data.listener);
1200 * Interrupt setup
1202 static void vfio_disable_interrupts(VFIODevice *vdev)
1204 switch (vdev->interrupt) {
1205 case VFIO_INT_INTx:
1206 vfio_disable_intx(vdev);
1207 break;
1208 case VFIO_INT_MSI:
1209 vfio_disable_msi(vdev);
1210 break;
1211 case VFIO_INT_MSIX:
1212 vfio_disable_msix(vdev);
1213 break;
1217 static int vfio_setup_msi(VFIODevice *vdev, int pos)
1219 uint16_t ctrl;
1220 bool msi_64bit, msi_maskbit;
1221 int ret, entries;
1223 if (pread(vdev->fd, &ctrl, sizeof(ctrl),
1224 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1225 return -errno;
1227 ctrl = le16_to_cpu(ctrl);
1229 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1230 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1231 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1233 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain,
1234 vdev->host.bus, vdev->host.slot, vdev->host.function, pos);
1236 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
1237 if (ret < 0) {
1238 if (ret == -ENOTSUP) {
1239 return 0;
1241 error_report("vfio: msi_init failed");
1242 return ret;
1244 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1246 return 0;
1250 * We don't have any control over how pci_add_capability() inserts
1251 * capabilities into the chain. In order to setup MSI-X we need a
1252 * MemoryRegion for the BAR. In order to setup the BAR and not
1253 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1254 * need to first look for where the MSI-X table lives. So we
1255 * unfortunately split MSI-X setup across two functions.
1257 static int vfio_early_setup_msix(VFIODevice *vdev)
1259 uint8_t pos;
1260 uint16_t ctrl;
1261 uint32_t table, pba;
1263 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1264 if (!pos) {
1265 return 0;
1268 if (pread(vdev->fd, &ctrl, sizeof(ctrl),
1269 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1270 return -errno;
1273 if (pread(vdev->fd, &table, sizeof(table),
1274 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1275 return -errno;
1278 if (pread(vdev->fd, &pba, sizeof(pba),
1279 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1280 return -errno;
1283 ctrl = le16_to_cpu(ctrl);
1284 table = le32_to_cpu(table);
1285 pba = le32_to_cpu(pba);
1287 vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
1288 vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1289 vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1290 vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1291 vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1292 vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1294 DPRINTF("%04x:%02x:%02x.%x "
1295 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
1296 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1297 vdev->host.function, pos, vdev->msix->table_bar,
1298 vdev->msix->table_offset, vdev->msix->entries);
1300 return 0;
1303 static int vfio_setup_msix(VFIODevice *vdev, int pos)
1305 int ret;
1307 ret = msix_init(&vdev->pdev, vdev->msix->entries,
1308 &vdev->bars[vdev->msix->table_bar].mem,
1309 vdev->msix->table_bar, vdev->msix->table_offset,
1310 &vdev->bars[vdev->msix->pba_bar].mem,
1311 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
1312 if (ret < 0) {
1313 if (ret == -ENOTSUP) {
1314 return 0;
1316 error_report("vfio: msix_init failed");
1317 return ret;
1320 return 0;
1323 static void vfio_teardown_msi(VFIODevice *vdev)
1325 msi_uninit(&vdev->pdev);
1327 if (vdev->msix) {
1328 msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem,
1329 &vdev->bars[vdev->msix->pba_bar].mem);
1334 * Resource setup
1336 static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled)
1338 int i;
1340 for (i = 0; i < PCI_ROM_SLOT; i++) {
1341 VFIOBAR *bar = &vdev->bars[i];
1343 if (!bar->size) {
1344 continue;
1347 memory_region_set_enabled(&bar->mmap_mem, enabled);
1348 if (vdev->msix && vdev->msix->table_bar == i) {
1349 memory_region_set_enabled(&vdev->msix->mmap_mem, enabled);
1354 static void vfio_unmap_bar(VFIODevice *vdev, int nr)
1356 VFIOBAR *bar = &vdev->bars[nr];
1358 if (!bar->size) {
1359 return;
1362 memory_region_del_subregion(&bar->mem, &bar->mmap_mem);
1363 munmap(bar->mmap, memory_region_size(&bar->mmap_mem));
1365 if (vdev->msix && vdev->msix->table_bar == nr) {
1366 memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem);
1367 munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem));
1370 memory_region_destroy(&bar->mem);
1373 static int vfio_mmap_bar(VFIOBAR *bar, MemoryRegion *mem, MemoryRegion *submem,
1374 void **map, size_t size, off_t offset,
1375 const char *name)
1377 int ret = 0;
1379 if (size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) {
1380 int prot = 0;
1382 if (bar->flags & VFIO_REGION_INFO_FLAG_READ) {
1383 prot |= PROT_READ;
1386 if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) {
1387 prot |= PROT_WRITE;
1390 *map = mmap(NULL, size, prot, MAP_SHARED,
1391 bar->fd, bar->fd_offset + offset);
1392 if (*map == MAP_FAILED) {
1393 *map = NULL;
1394 ret = -errno;
1395 goto empty_region;
1398 memory_region_init_ram_ptr(submem, name, size, *map);
1399 } else {
1400 empty_region:
1401 /* Create a zero sized sub-region to make cleanup easy. */
1402 memory_region_init(submem, name, 0);
1405 memory_region_add_subregion(mem, offset, submem);
1407 return ret;
1410 static void vfio_map_bar(VFIODevice *vdev, int nr)
1412 VFIOBAR *bar = &vdev->bars[nr];
1413 unsigned size = bar->size;
1414 char name[64];
1415 uint32_t pci_bar;
1416 uint8_t type;
1417 int ret;
1419 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1420 if (!size) {
1421 return;
1424 snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
1425 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1426 vdev->host.function, nr);
1428 /* Determine what type of BAR this is for registration */
1429 ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar),
1430 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1431 if (ret != sizeof(pci_bar)) {
1432 error_report("vfio: Failed to read BAR %d (%m)", nr);
1433 return;
1436 pci_bar = le32_to_cpu(pci_bar);
1437 type = pci_bar & (pci_bar & PCI_BASE_ADDRESS_SPACE_IO ?
1438 ~PCI_BASE_ADDRESS_IO_MASK : ~PCI_BASE_ADDRESS_MEM_MASK);
1440 /* A "slow" read/write mapping underlies all BARs */
1441 memory_region_init_io(&bar->mem, &vfio_bar_ops, bar, name, size);
1442 pci_register_bar(&vdev->pdev, nr, type, &bar->mem);
1445 * We can't mmap areas overlapping the MSIX vector table, so we
1446 * potentially insert a direct-mapped subregion before and after it.
1448 if (vdev->msix && vdev->msix->table_bar == nr) {
1449 size = vdev->msix->table_offset & TARGET_PAGE_MASK;
1452 strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
1453 if (vfio_mmap_bar(bar, &bar->mem,
1454 &bar->mmap_mem, &bar->mmap, size, 0, name)) {
1455 error_report("%s unsupported. Performance may be slow", name);
1458 if (vdev->msix && vdev->msix->table_bar == nr) {
1459 unsigned start;
1461 start = TARGET_PAGE_ALIGN(vdev->msix->table_offset +
1462 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1464 size = start < bar->size ? bar->size - start : 0;
1465 strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
1466 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
1467 if (vfio_mmap_bar(bar, &bar->mem, &vdev->msix->mmap_mem,
1468 &vdev->msix->mmap, size, start, name)) {
1469 error_report("%s unsupported. Performance may be slow", name);
1474 static void vfio_map_bars(VFIODevice *vdev)
1476 int i;
1478 for (i = 0; i < PCI_ROM_SLOT; i++) {
1479 vfio_map_bar(vdev, i);
1483 static void vfio_unmap_bars(VFIODevice *vdev)
1485 int i;
1487 for (i = 0; i < PCI_ROM_SLOT; i++) {
1488 vfio_unmap_bar(vdev, i);
1493 * General setup
1495 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1497 uint8_t tmp, next = 0xff;
1499 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1500 tmp = pdev->config[tmp + 1]) {
1501 if (tmp > pos && tmp < next) {
1502 next = tmp;
1506 return next - pos;
1509 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1511 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1514 static void vfio_add_emulated_word(VFIODevice *vdev, int pos,
1515 uint16_t val, uint16_t mask)
1517 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1518 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1519 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1522 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1524 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1527 static void vfio_add_emulated_long(VFIODevice *vdev, int pos,
1528 uint32_t val, uint32_t mask)
1530 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1531 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1532 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1535 static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size)
1537 uint16_t flags;
1538 uint8_t type;
1540 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1541 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1543 if (type != PCI_EXP_TYPE_ENDPOINT &&
1544 type != PCI_EXP_TYPE_LEG_END &&
1545 type != PCI_EXP_TYPE_RC_END) {
1547 error_report("vfio: Assignment of PCIe type 0x%x "
1548 "devices is not currently supported", type);
1549 return -EINVAL;
1552 if (!pci_bus_is_express(vdev->pdev.bus)) {
1554 * Use express capability as-is on PCI bus. It doesn't make much
1555 * sense to even expose, but some drivers (ex. tg3) depend on it
1556 * and guests don't seem to be particular about it. We'll need
1557 * to revist this or force express devices to express buses if we
1558 * ever expose an IOMMU to the guest.
1560 } else if (pci_bus_is_root(vdev->pdev.bus)) {
1562 * On a Root Complex bus Endpoints become Root Complex Integrated
1563 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1565 if (type == PCI_EXP_TYPE_ENDPOINT) {
1566 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1567 PCI_EXP_TYPE_RC_END << 4,
1568 PCI_EXP_FLAGS_TYPE);
1570 /* Link Capabilities, Status, and Control goes away */
1571 if (size > PCI_EXP_LNKCTL) {
1572 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1573 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1574 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1576 #ifndef PCI_EXP_LNKCAP2
1577 #define PCI_EXP_LNKCAP2 44
1578 #endif
1579 #ifndef PCI_EXP_LNKSTA2
1580 #define PCI_EXP_LNKSTA2 50
1581 #endif
1582 /* Link 2 Capabilities, Status, and Control goes away */
1583 if (size > PCI_EXP_LNKCAP2) {
1584 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1585 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1586 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1590 } else if (type == PCI_EXP_TYPE_LEG_END) {
1592 * Legacy endpoints don't belong on the root complex. Windows
1593 * seems to be happier with devices if we skip the capability.
1595 return 0;
1598 } else {
1600 * Convert Root Complex Integrated Endpoints to regular endpoints.
1601 * These devices don't support LNK/LNK2 capabilities, so make them up.
1603 if (type == PCI_EXP_TYPE_RC_END) {
1604 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1605 PCI_EXP_TYPE_ENDPOINT << 4,
1606 PCI_EXP_FLAGS_TYPE);
1607 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1608 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1609 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1612 /* Mark the Link Status bits as emulated to allow virtual negotiation */
1613 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1614 pci_get_word(vdev->pdev.config + pos +
1615 PCI_EXP_LNKSTA),
1616 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1619 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
1620 if (pos >= 0) {
1621 vdev->pdev.exp.exp_cap = pos;
1624 return pos;
1627 static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos)
1629 PCIDevice *pdev = &vdev->pdev;
1630 uint8_t cap_id, next, size;
1631 int ret;
1633 cap_id = pdev->config[pos];
1634 next = pdev->config[pos + 1];
1637 * If it becomes important to configure capabilities to their actual
1638 * size, use this as the default when it's something we don't recognize.
1639 * Since QEMU doesn't actually handle many of the config accesses,
1640 * exact size doesn't seem worthwhile.
1642 size = vfio_std_cap_max_size(pdev, pos);
1645 * pci_add_capability always inserts the new capability at the head
1646 * of the chain. Therefore to end up with a chain that matches the
1647 * physical device, we insert from the end by making this recursive.
1648 * This is also why we pre-caclulate size above as cached config space
1649 * will be changed as we unwind the stack.
1651 if (next) {
1652 ret = vfio_add_std_cap(vdev, next);
1653 if (ret) {
1654 return ret;
1656 } else {
1657 /* Begin the rebuild, use QEMU emulated list bits */
1658 pdev->config[PCI_CAPABILITY_LIST] = 0;
1659 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1660 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1663 /* Use emulated next pointer to allow dropping caps */
1664 pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff);
1666 switch (cap_id) {
1667 case PCI_CAP_ID_MSI:
1668 ret = vfio_setup_msi(vdev, pos);
1669 break;
1670 case PCI_CAP_ID_EXP:
1671 ret = vfio_setup_pcie_cap(vdev, pos, size);
1672 break;
1673 case PCI_CAP_ID_MSIX:
1674 ret = vfio_setup_msix(vdev, pos);
1675 break;
1676 default:
1677 ret = pci_add_capability(pdev, cap_id, pos, size);
1678 break;
1681 if (ret < 0) {
1682 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
1683 "0x%x[0x%x]@0x%x: %d", vdev->host.domain,
1684 vdev->host.bus, vdev->host.slot, vdev->host.function,
1685 cap_id, size, pos, ret);
1686 return ret;
1689 return 0;
1692 static int vfio_add_capabilities(VFIODevice *vdev)
1694 PCIDevice *pdev = &vdev->pdev;
1696 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
1697 !pdev->config[PCI_CAPABILITY_LIST]) {
1698 return 0; /* Nothing to add */
1701 return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
1704 static int vfio_load_rom(VFIODevice *vdev)
1706 uint64_t size = vdev->rom_size;
1707 char name[32];
1708 off_t off = 0, voff = vdev->rom_offset;
1709 ssize_t bytes;
1710 void *ptr;
1712 /* If loading ROM from file, pci handles it */
1713 if (vdev->pdev.romfile || !vdev->pdev.rom_bar || !size) {
1714 return 0;
1717 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
1718 vdev->host.bus, vdev->host.slot, vdev->host.function);
1720 snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom",
1721 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1722 vdev->host.function);
1723 memory_region_init_ram(&vdev->pdev.rom, name, size);
1724 ptr = memory_region_get_ram_ptr(&vdev->pdev.rom);
1725 memset(ptr, 0xff, size);
1727 while (size) {
1728 bytes = pread(vdev->fd, ptr + off, size, voff + off);
1729 if (bytes == 0) {
1730 break; /* expect that we could get back less than the ROM BAR */
1731 } else if (bytes > 0) {
1732 off += bytes;
1733 size -= bytes;
1734 } else {
1735 if (errno == EINTR || errno == EAGAIN) {
1736 continue;
1738 error_report("vfio: Error reading device ROM: %m");
1739 memory_region_destroy(&vdev->pdev.rom);
1740 return -errno;
1744 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, 0, &vdev->pdev.rom);
1745 vdev->pdev.has_rom = true;
1746 return 0;
1749 static int vfio_connect_container(VFIOGroup *group)
1751 VFIOContainer *container;
1752 int ret, fd;
1754 if (group->container) {
1755 return 0;
1758 QLIST_FOREACH(container, &container_list, next) {
1759 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1760 group->container = container;
1761 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1762 return 0;
1766 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
1767 if (fd < 0) {
1768 error_report("vfio: failed to open /dev/vfio/vfio: %m");
1769 return -errno;
1772 ret = ioctl(fd, VFIO_GET_API_VERSION);
1773 if (ret != VFIO_API_VERSION) {
1774 error_report("vfio: supported vfio version: %d, "
1775 "reported version: %d", VFIO_API_VERSION, ret);
1776 close(fd);
1777 return -EINVAL;
1780 container = g_malloc0(sizeof(*container));
1781 container->fd = fd;
1783 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
1784 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
1785 if (ret) {
1786 error_report("vfio: failed to set group container: %m");
1787 g_free(container);
1788 close(fd);
1789 return -errno;
1792 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
1793 if (ret) {
1794 error_report("vfio: failed to set iommu for container: %m");
1795 g_free(container);
1796 close(fd);
1797 return -errno;
1800 container->iommu_data.listener = vfio_memory_listener;
1801 container->iommu_data.release = vfio_listener_release;
1803 memory_listener_register(&container->iommu_data.listener, &address_space_memory);
1804 } else {
1805 error_report("vfio: No available IOMMU models");
1806 g_free(container);
1807 close(fd);
1808 return -EINVAL;
1811 QLIST_INIT(&container->group_list);
1812 QLIST_INSERT_HEAD(&container_list, container, next);
1814 group->container = container;
1815 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1817 return 0;
1820 static void vfio_disconnect_container(VFIOGroup *group)
1822 VFIOContainer *container = group->container;
1824 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1825 error_report("vfio: error disconnecting group %d from container",
1826 group->groupid);
1829 QLIST_REMOVE(group, container_next);
1830 group->container = NULL;
1832 if (QLIST_EMPTY(&container->group_list)) {
1833 if (container->iommu_data.release) {
1834 container->iommu_data.release(container);
1836 QLIST_REMOVE(container, next);
1837 DPRINTF("vfio_disconnect_container: close container->fd\n");
1838 close(container->fd);
1839 g_free(container);
1843 static VFIOGroup *vfio_get_group(int groupid)
1845 VFIOGroup *group;
1846 char path[32];
1847 struct vfio_group_status status = { .argsz = sizeof(status) };
1849 QLIST_FOREACH(group, &group_list, next) {
1850 if (group->groupid == groupid) {
1851 return group;
1855 group = g_malloc0(sizeof(*group));
1857 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1858 group->fd = qemu_open(path, O_RDWR);
1859 if (group->fd < 0) {
1860 error_report("vfio: error opening %s: %m", path);
1861 g_free(group);
1862 return NULL;
1865 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1866 error_report("vfio: error getting group status: %m");
1867 close(group->fd);
1868 g_free(group);
1869 return NULL;
1872 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1873 error_report("vfio: error, group %d is not viable, please ensure "
1874 "all devices within the iommu_group are bound to their "
1875 "vfio bus driver.", groupid);
1876 close(group->fd);
1877 g_free(group);
1878 return NULL;
1881 group->groupid = groupid;
1882 QLIST_INIT(&group->device_list);
1884 if (vfio_connect_container(group)) {
1885 error_report("vfio: failed to setup container for group %d", groupid);
1886 close(group->fd);
1887 g_free(group);
1888 return NULL;
1891 QLIST_INSERT_HEAD(&group_list, group, next);
1893 return group;
1896 static void vfio_put_group(VFIOGroup *group)
1898 if (!QLIST_EMPTY(&group->device_list)) {
1899 return;
1902 vfio_disconnect_container(group);
1903 QLIST_REMOVE(group, next);
1904 DPRINTF("vfio_put_group: close group->fd\n");
1905 close(group->fd);
1906 g_free(group);
1909 static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev)
1911 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1912 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
1913 int ret, i;
1915 ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1916 if (ret < 0) {
1917 error_report("vfio: error getting device %s from group %d: %m",
1918 name, group->groupid);
1919 error_printf("Verify all devices in group %d are bound to vfio-pci "
1920 "or pci-stub and not already in use\n", group->groupid);
1921 return ret;
1924 vdev->fd = ret;
1925 vdev->group = group;
1926 QLIST_INSERT_HEAD(&group->device_list, vdev, next);
1928 /* Sanity check device */
1929 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info);
1930 if (ret) {
1931 error_report("vfio: error getting device info: %m");
1932 goto error;
1935 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name,
1936 dev_info.flags, dev_info.num_regions, dev_info.num_irqs);
1938 if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) {
1939 error_report("vfio: Um, this isn't a PCI device");
1940 goto error;
1943 vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1944 if (!vdev->reset_works) {
1945 error_report("Warning, device %s does not support reset", name);
1948 if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
1949 error_report("vfio: unexpected number of io regions %u",
1950 dev_info.num_regions);
1951 goto error;
1954 if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
1955 error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs);
1956 goto error;
1959 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
1960 reg_info.index = i;
1962 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
1963 if (ret) {
1964 error_report("vfio: Error getting region %d info: %m", i);
1965 goto error;
1968 DPRINTF("Device %s region %d:\n", name, i);
1969 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1970 (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
1971 (unsigned long)reg_info.flags);
1973 vdev->bars[i].flags = reg_info.flags;
1974 vdev->bars[i].size = reg_info.size;
1975 vdev->bars[i].fd_offset = reg_info.offset;
1976 vdev->bars[i].fd = vdev->fd;
1977 vdev->bars[i].nr = i;
1980 reg_info.index = VFIO_PCI_ROM_REGION_INDEX;
1982 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
1983 if (ret) {
1984 error_report("vfio: Error getting ROM info: %m");
1985 goto error;
1988 DPRINTF("Device %s ROM:\n", name);
1989 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1990 (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
1991 (unsigned long)reg_info.flags);
1993 vdev->rom_size = reg_info.size;
1994 vdev->rom_offset = reg_info.offset;
1996 reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX;
1998 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
1999 if (ret) {
2000 error_report("vfio: Error getting config info: %m");
2001 goto error;
2004 DPRINTF("Device %s config:\n", name);
2005 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2006 (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
2007 (unsigned long)reg_info.flags);
2009 vdev->config_size = reg_info.size;
2010 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2011 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2013 vdev->config_offset = reg_info.offset;
2015 error:
2016 if (ret) {
2017 QLIST_REMOVE(vdev, next);
2018 vdev->group = NULL;
2019 close(vdev->fd);
2021 return ret;
2024 static void vfio_put_device(VFIODevice *vdev)
2026 QLIST_REMOVE(vdev, next);
2027 vdev->group = NULL;
2028 DPRINTF("vfio_put_device: close vdev->fd\n");
2029 close(vdev->fd);
2030 if (vdev->msix) {
2031 g_free(vdev->msix);
2032 vdev->msix = NULL;
2036 static int vfio_initfn(PCIDevice *pdev)
2038 VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
2039 VFIOGroup *group;
2040 char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
2041 ssize_t len;
2042 struct stat st;
2043 int groupid;
2044 int ret;
2046 /* Check that the host device exists */
2047 snprintf(path, sizeof(path),
2048 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
2049 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2050 vdev->host.function);
2051 if (stat(path, &st) < 0) {
2052 error_report("vfio: error: no such host device: %s", path);
2053 return -errno;
2056 strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
2058 len = readlink(path, iommu_group_path, PATH_MAX);
2059 if (len <= 0) {
2060 error_report("vfio: error no iommu_group for device");
2061 return -errno;
2064 iommu_group_path[len] = 0;
2065 group_name = basename(iommu_group_path);
2067 if (sscanf(group_name, "%d", &groupid) != 1) {
2068 error_report("vfio: error reading %s: %m", path);
2069 return -errno;
2072 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
2073 vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
2075 group = vfio_get_group(groupid);
2076 if (!group) {
2077 error_report("vfio: failed to get group %d", groupid);
2078 return -ENOENT;
2081 snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
2082 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2083 vdev->host.function);
2085 QLIST_FOREACH(pvdev, &group->device_list, next) {
2086 if (pvdev->host.domain == vdev->host.domain &&
2087 pvdev->host.bus == vdev->host.bus &&
2088 pvdev->host.slot == vdev->host.slot &&
2089 pvdev->host.function == vdev->host.function) {
2091 error_report("vfio: error: device %s is already attached", path);
2092 vfio_put_group(group);
2093 return -EBUSY;
2097 ret = vfio_get_device(group, path, vdev);
2098 if (ret) {
2099 error_report("vfio: failed to get device %s", path);
2100 vfio_put_group(group);
2101 return ret;
2104 /* Get a copy of config space */
2105 ret = pread(vdev->fd, vdev->pdev.config,
2106 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2107 vdev->config_offset);
2108 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2109 ret = ret < 0 ? -errno : -EFAULT;
2110 error_report("vfio: Failed to read device config space");
2111 goto out_put;
2114 /* vfio emulates a lot for us, but some bits need extra love */
2115 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2117 /* QEMU can choose to expose the ROM or not */
2118 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2120 /* QEMU can change multi-function devices to single function, or reverse */
2121 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2122 PCI_HEADER_TYPE_MULTI_FUNCTION;
2125 * Clear host resource mapping info. If we choose not to register a
2126 * BAR, such as might be the case with the option ROM, we can get
2127 * confusing, unwritable, residual addresses from the host here.
2129 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2130 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2132 vfio_load_rom(vdev);
2134 ret = vfio_early_setup_msix(vdev);
2135 if (ret) {
2136 goto out_put;
2139 vfio_map_bars(vdev);
2141 ret = vfio_add_capabilities(vdev);
2142 if (ret) {
2143 goto out_teardown;
2146 /* QEMU emulates all of MSI & MSIX */
2147 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2148 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2149 MSIX_CAP_LENGTH);
2152 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2153 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2154 vdev->msi_cap_size);
2157 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2158 vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock,
2159 vfio_intx_mmap_enable, vdev);
2160 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
2161 ret = vfio_enable_intx(vdev);
2162 if (ret) {
2163 goto out_teardown;
2167 return 0;
2169 out_teardown:
2170 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2171 vfio_teardown_msi(vdev);
2172 vfio_unmap_bars(vdev);
2173 out_put:
2174 g_free(vdev->emulated_config_bits);
2175 vfio_put_device(vdev);
2176 vfio_put_group(group);
2177 return ret;
2180 static void vfio_exitfn(PCIDevice *pdev)
2182 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
2183 VFIOGroup *group = vdev->group;
2185 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2186 vfio_disable_interrupts(vdev);
2187 if (vdev->intx.mmap_timer) {
2188 qemu_free_timer(vdev->intx.mmap_timer);
2190 vfio_teardown_msi(vdev);
2191 vfio_unmap_bars(vdev);
2192 g_free(vdev->emulated_config_bits);
2193 vfio_put_device(vdev);
2194 vfio_put_group(group);
2197 static void vfio_pci_reset(DeviceState *dev)
2199 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2200 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
2201 uint16_t cmd;
2203 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
2204 vdev->host.bus, vdev->host.slot, vdev->host.function);
2206 vfio_disable_interrupts(vdev);
2209 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2210 * Also put INTx Disable in known state.
2212 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2213 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2214 PCI_COMMAND_INTX_DISABLE);
2215 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2217 if (vdev->reset_works) {
2218 if (ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
2219 error_report("vfio: Error unable to reset physical device "
2220 "(%04x:%02x:%02x.%x): %m", vdev->host.domain,
2221 vdev->host.bus, vdev->host.slot, vdev->host.function);
2225 vfio_enable_intx(vdev);
2228 static Property vfio_pci_dev_properties[] = {
2229 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host),
2230 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice,
2231 intx.mmap_timeout, 1100),
2233 * TODO - support passed fds... is this necessary?
2234 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
2235 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
2237 DEFINE_PROP_END_OF_LIST(),
2240 static const VMStateDescription vfio_pci_vmstate = {
2241 .name = "vfio-pci",
2242 .unmigratable = 1,
2245 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
2247 DeviceClass *dc = DEVICE_CLASS(klass);
2248 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
2250 dc->reset = vfio_pci_reset;
2251 dc->props = vfio_pci_dev_properties;
2252 dc->vmsd = &vfio_pci_vmstate;
2253 dc->desc = "VFIO-based PCI device assignment";
2254 pdc->init = vfio_initfn;
2255 pdc->exit = vfio_exitfn;
2256 pdc->config_read = vfio_pci_read_config;
2257 pdc->config_write = vfio_pci_write_config;
2258 pdc->is_express = 1; /* We might be */
2261 static const TypeInfo vfio_pci_dev_info = {
2262 .name = "vfio-pci",
2263 .parent = TYPE_PCI_DEVICE,
2264 .instance_size = sizeof(VFIODevice),
2265 .class_init = vfio_pci_dev_class_init,
2268 static void register_vfio_pci_dev_type(void)
2270 type_register_static(&vfio_pci_dev_info);
2273 type_init(register_vfio_pci_dev_type)