hw/vfio/pci: use name field in format strings
[qemu.git] / hw / vfio / pci.c
blob423d9bb9da91124a406cac735e0d64c1f250bc29
1 /*
2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include <dirent.h>
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
29 #include "config.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
42 #include "trace.h"
43 #include "hw/vfio/vfio.h"
45 /* Extra debugging, trap acceleration paths for more logging */
46 #define VFIO_ALLOW_MMAP 1
47 #define VFIO_ALLOW_KVM_INTX 1
48 #define VFIO_ALLOW_KVM_MSI 1
49 #define VFIO_ALLOW_KVM_MSIX 1
51 enum {
52 VFIO_DEVICE_TYPE_PCI = 0,
55 struct VFIOPCIDevice;
57 typedef struct VFIOQuirk {
58 MemoryRegion mem;
59 struct VFIOPCIDevice *vdev;
60 QLIST_ENTRY(VFIOQuirk) next;
61 struct {
62 uint32_t base_offset:TARGET_PAGE_BITS;
63 uint32_t address_offset:TARGET_PAGE_BITS;
64 uint32_t address_size:3;
65 uint32_t bar:3;
67 uint32_t address_match;
68 uint32_t address_mask;
70 uint32_t address_val:TARGET_PAGE_BITS;
71 uint32_t data_offset:TARGET_PAGE_BITS;
72 uint32_t data_size:3;
74 uint8_t flags;
75 uint8_t read_flags;
76 uint8_t write_flags;
77 } data;
78 } VFIOQuirk;
80 typedef struct VFIORegion {
81 struct VFIODevice *vbasedev;
82 off_t fd_offset; /* offset of region within device fd */
83 MemoryRegion mem; /* slow, read/write access */
84 MemoryRegion mmap_mem; /* direct mapped access */
85 void *mmap;
86 size_t size;
87 uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
88 uint8_t nr; /* cache the region number for debug */
89 } VFIORegion;
91 typedef struct VFIOBAR {
92 VFIORegion region;
93 bool ioport;
94 bool mem64;
95 QLIST_HEAD(, VFIOQuirk) quirks;
96 } VFIOBAR;
98 typedef struct VFIOVGARegion {
99 MemoryRegion mem;
100 off_t offset;
101 int nr;
102 QLIST_HEAD(, VFIOQuirk) quirks;
103 } VFIOVGARegion;
105 typedef struct VFIOVGA {
106 off_t fd_offset;
107 int fd;
108 VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS];
109 } VFIOVGA;
111 typedef struct VFIOINTx {
112 bool pending; /* interrupt pending */
113 bool kvm_accel; /* set when QEMU bypass through KVM enabled */
114 uint8_t pin; /* which pin to pull for qemu_set_irq */
115 EventNotifier interrupt; /* eventfd triggered on interrupt */
116 EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
117 PCIINTxRoute route; /* routing info for QEMU bypass */
118 uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
119 QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
120 } VFIOINTx;
122 typedef struct VFIOMSIVector {
124 * Two interrupt paths are configured per vector. The first, is only used
125 * for interrupts injected via QEMU. This is typically the non-accel path,
126 * but may also be used when we want QEMU to handle masking and pending
127 * bits. The KVM path bypasses QEMU and is therefore higher performance,
128 * but requires masking at the device. virq is used to track the MSI route
129 * through KVM, thus kvm_interrupt is only available when virq is set to a
130 * valid (>= 0) value.
132 EventNotifier interrupt;
133 EventNotifier kvm_interrupt;
134 struct VFIOPCIDevice *vdev; /* back pointer to device */
135 int virq;
136 bool use;
137 } VFIOMSIVector;
139 enum {
140 VFIO_INT_NONE = 0,
141 VFIO_INT_INTx = 1,
142 VFIO_INT_MSI = 2,
143 VFIO_INT_MSIX = 3,
146 typedef struct VFIOAddressSpace {
147 AddressSpace *as;
148 QLIST_HEAD(, VFIOContainer) containers;
149 QLIST_ENTRY(VFIOAddressSpace) list;
150 } VFIOAddressSpace;
152 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
153 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
155 struct VFIOGroup;
157 typedef struct VFIOType1 {
158 MemoryListener listener;
159 int error;
160 bool initialized;
161 } VFIOType1;
163 typedef struct VFIOContainer {
164 VFIOAddressSpace *space;
165 int fd; /* /dev/vfio/vfio, empowered by the attached groups */
166 struct {
167 /* enable abstraction to support various iommu backends */
168 union {
169 VFIOType1 type1;
171 void (*release)(struct VFIOContainer *);
172 } iommu_data;
173 QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
174 QLIST_HEAD(, VFIOGroup) group_list;
175 QLIST_ENTRY(VFIOContainer) next;
176 } VFIOContainer;
178 typedef struct VFIOGuestIOMMU {
179 VFIOContainer *container;
180 MemoryRegion *iommu;
181 Notifier n;
182 QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
183 } VFIOGuestIOMMU;
185 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
186 typedef struct VFIOMSIXInfo {
187 uint8_t table_bar;
188 uint8_t pba_bar;
189 uint16_t entries;
190 uint32_t table_offset;
191 uint32_t pba_offset;
192 MemoryRegion mmap_mem;
193 void *mmap;
194 } VFIOMSIXInfo;
196 typedef struct VFIODeviceOps VFIODeviceOps;
198 typedef struct VFIODevice {
199 QLIST_ENTRY(VFIODevice) next;
200 struct VFIOGroup *group;
201 char *name;
202 int fd;
203 int type;
204 bool reset_works;
205 bool needs_reset;
206 VFIODeviceOps *ops;
207 unsigned int num_irqs;
208 unsigned int num_regions;
209 unsigned int flags;
210 } VFIODevice;
212 struct VFIODeviceOps {
213 void (*vfio_compute_needs_reset)(VFIODevice *vdev);
214 int (*vfio_hot_reset_multi)(VFIODevice *vdev);
215 void (*vfio_eoi)(VFIODevice *vdev);
216 int (*vfio_populate_device)(VFIODevice *vdev);
219 typedef struct VFIOPCIDevice {
220 PCIDevice pdev;
221 VFIODevice vbasedev;
222 VFIOINTx intx;
223 unsigned int config_size;
224 uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
225 off_t config_offset; /* Offset of config space region within device fd */
226 unsigned int rom_size;
227 off_t rom_offset; /* Offset of ROM region within device fd */
228 void *rom;
229 int msi_cap_size;
230 VFIOMSIVector *msi_vectors;
231 VFIOMSIXInfo *msix;
232 int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
233 int interrupt; /* Current interrupt type */
234 VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
235 VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */
236 PCIHostDeviceAddress host;
237 EventNotifier err_notifier;
238 uint32_t features;
239 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
240 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
241 int32_t bootindex;
242 uint8_t pm_cap;
243 bool has_vga;
244 bool pci_aer;
245 bool has_flr;
246 bool has_pm_reset;
247 bool rom_read_failed;
248 } VFIOPCIDevice;
250 typedef struct VFIOGroup {
251 int fd;
252 int groupid;
253 VFIOContainer *container;
254 QLIST_HEAD(, VFIODevice) device_list;
255 QLIST_ENTRY(VFIOGroup) next;
256 QLIST_ENTRY(VFIOGroup) container_next;
257 } VFIOGroup;
259 typedef struct VFIORomBlacklistEntry {
260 uint16_t vendor_id;
261 uint16_t device_id;
262 } VFIORomBlacklistEntry;
265 * List of device ids/vendor ids for which to disable
266 * option rom loading. This avoids the guest hangs during rom
267 * execution as noticed with the BCM 57810 card for lack of a
268 * more better way to handle such issues.
269 * The user can still override by specifying a romfile or
270 * rombar=1.
271 * Please see https://bugs.launchpad.net/qemu/+bug/1284874
272 * for an analysis of the 57810 card hang. When adding
273 * a new vendor id/device id combination below, please also add
274 * your card/environment details and information that could
275 * help in debugging to the bug tracking this issue
277 static const VFIORomBlacklistEntry romblacklist[] = {
278 /* Broadcom BCM 57810 */
279 { 0x14e4, 0x168e }
282 #define MSIX_CAP_LENGTH 12
284 static QLIST_HEAD(, VFIOGroup)
285 vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list);
287 #ifdef CONFIG_KVM
289 * We have a single VFIO pseudo device per KVM VM. Once created it lives
290 * for the life of the VM. Closing the file descriptor only drops our
291 * reference to it and the device's reference to kvm. Therefore once
292 * initialized, this file descriptor is only released on QEMU exit and
293 * we'll re-use it should another vfio device be attached before then.
295 static int vfio_kvm_device_fd = -1;
296 #endif
298 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
299 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
300 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
301 uint32_t val, int len);
302 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
303 static void vfio_put_base_device(VFIODevice *vbasedev);
304 static int vfio_populate_device(VFIODevice *vbasedev);
307 * Common VFIO interrupt disable
309 static void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
311 struct vfio_irq_set irq_set = {
312 .argsz = sizeof(irq_set),
313 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
314 .index = index,
315 .start = 0,
316 .count = 0,
319 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
323 * INTx
325 static void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
327 struct vfio_irq_set irq_set = {
328 .argsz = sizeof(irq_set),
329 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
330 .index = index,
331 .start = 0,
332 .count = 1,
335 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
338 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
339 static void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
341 struct vfio_irq_set irq_set = {
342 .argsz = sizeof(irq_set),
343 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
344 .index = index,
345 .start = 0,
346 .count = 1,
349 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
351 #endif
354 * Disabling BAR mmaping can be slow, but toggling it around INTx can
355 * also be a huge overhead. We try to get the best of both worlds by
356 * waiting until an interrupt to disable mmaps (subsequent transitions
357 * to the same state are effectively no overhead). If the interrupt has
358 * been serviced and the time gap is long enough, we re-enable mmaps for
359 * performance. This works well for things like graphics cards, which
360 * may not use their interrupt at all and are penalized to an unusable
361 * level by read/write BAR traps. Other devices, like NICs, have more
362 * regular interrupts and see much better latency by staying in non-mmap
363 * mode. We therefore set the default mmap_timeout such that a ping
364 * is just enough to keep the mmap disabled. Users can experiment with
365 * other options with the x-intx-mmap-timeout-ms parameter (a value of
366 * zero disables the timer).
368 static void vfio_intx_mmap_enable(void *opaque)
370 VFIOPCIDevice *vdev = opaque;
372 if (vdev->intx.pending) {
373 timer_mod(vdev->intx.mmap_timer,
374 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
375 return;
378 vfio_mmap_set_enabled(vdev, true);
381 static void vfio_intx_interrupt(void *opaque)
383 VFIOPCIDevice *vdev = opaque;
385 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
386 return;
389 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
391 vdev->intx.pending = true;
392 pci_irq_assert(&vdev->pdev);
393 vfio_mmap_set_enabled(vdev, false);
394 if (vdev->intx.mmap_timeout) {
395 timer_mod(vdev->intx.mmap_timer,
396 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
400 static void vfio_eoi(VFIODevice *vbasedev)
402 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
404 if (!vdev->intx.pending) {
405 return;
408 trace_vfio_eoi(vbasedev->name);
410 vdev->intx.pending = false;
411 pci_irq_deassert(&vdev->pdev);
412 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
415 static void vfio_enable_intx_kvm(VFIOPCIDevice *vdev)
417 #ifdef CONFIG_KVM
418 struct kvm_irqfd irqfd = {
419 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
420 .gsi = vdev->intx.route.irq,
421 .flags = KVM_IRQFD_FLAG_RESAMPLE,
423 struct vfio_irq_set *irq_set;
424 int ret, argsz;
425 int32_t *pfd;
427 if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
428 vdev->intx.route.mode != PCI_INTX_ENABLED ||
429 !kvm_resamplefds_enabled()) {
430 return;
433 /* Get to a known interrupt state */
434 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
435 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
436 vdev->intx.pending = false;
437 pci_irq_deassert(&vdev->pdev);
439 /* Get an eventfd for resample/unmask */
440 if (event_notifier_init(&vdev->intx.unmask, 0)) {
441 error_report("vfio: Error: event_notifier_init failed eoi");
442 goto fail;
445 /* KVM triggers it, VFIO listens for it */
446 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
448 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
449 error_report("vfio: Error: Failed to setup resample irqfd: %m");
450 goto fail_irqfd;
453 argsz = sizeof(*irq_set) + sizeof(*pfd);
455 irq_set = g_malloc0(argsz);
456 irq_set->argsz = argsz;
457 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
458 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
459 irq_set->start = 0;
460 irq_set->count = 1;
461 pfd = (int32_t *)&irq_set->data;
463 *pfd = irqfd.resamplefd;
465 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
466 g_free(irq_set);
467 if (ret) {
468 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
469 goto fail_vfio;
472 /* Let'em rip */
473 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
475 vdev->intx.kvm_accel = true;
477 trace_vfio_enable_intx_kvm(vdev->vbasedev.name);
479 return;
481 fail_vfio:
482 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
483 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
484 fail_irqfd:
485 event_notifier_cleanup(&vdev->intx.unmask);
486 fail:
487 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
488 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
489 #endif
492 static void vfio_disable_intx_kvm(VFIOPCIDevice *vdev)
494 #ifdef CONFIG_KVM
495 struct kvm_irqfd irqfd = {
496 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
497 .gsi = vdev->intx.route.irq,
498 .flags = KVM_IRQFD_FLAG_DEASSIGN,
501 if (!vdev->intx.kvm_accel) {
502 return;
506 * Get to a known state, hardware masked, QEMU ready to accept new
507 * interrupts, QEMU IRQ de-asserted.
509 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
510 vdev->intx.pending = false;
511 pci_irq_deassert(&vdev->pdev);
513 /* Tell KVM to stop listening for an INTx irqfd */
514 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
515 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
518 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
519 event_notifier_cleanup(&vdev->intx.unmask);
521 /* QEMU starts listening for interrupt events. */
522 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
524 vdev->intx.kvm_accel = false;
526 /* If we've missed an event, let it re-fire through QEMU */
527 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
529 trace_vfio_disable_intx_kvm(vdev->vbasedev.name);
530 #endif
533 static void vfio_update_irq(PCIDevice *pdev)
535 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
536 PCIINTxRoute route;
538 if (vdev->interrupt != VFIO_INT_INTx) {
539 return;
542 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
544 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
545 return; /* Nothing changed */
548 trace_vfio_update_irq(vdev->vbasedev.name,
549 vdev->intx.route.irq, route.irq);
551 vfio_disable_intx_kvm(vdev);
553 vdev->intx.route = route;
555 if (route.mode != PCI_INTX_ENABLED) {
556 return;
559 vfio_enable_intx_kvm(vdev);
561 /* Re-enable the interrupt in cased we missed an EOI */
562 vfio_eoi(&vdev->vbasedev);
565 static int vfio_enable_intx(VFIOPCIDevice *vdev)
567 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
568 int ret, argsz;
569 struct vfio_irq_set *irq_set;
570 int32_t *pfd;
572 if (!pin) {
573 return 0;
576 vfio_disable_interrupts(vdev);
578 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
579 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
581 #ifdef CONFIG_KVM
583 * Only conditional to avoid generating error messages on platforms
584 * where we won't actually use the result anyway.
586 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
587 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
588 vdev->intx.pin);
590 #endif
592 ret = event_notifier_init(&vdev->intx.interrupt, 0);
593 if (ret) {
594 error_report("vfio: Error: event_notifier_init failed");
595 return ret;
598 argsz = sizeof(*irq_set) + sizeof(*pfd);
600 irq_set = g_malloc0(argsz);
601 irq_set->argsz = argsz;
602 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
603 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
604 irq_set->start = 0;
605 irq_set->count = 1;
606 pfd = (int32_t *)&irq_set->data;
608 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
609 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
611 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
612 g_free(irq_set);
613 if (ret) {
614 error_report("vfio: Error: Failed to setup INTx fd: %m");
615 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
616 event_notifier_cleanup(&vdev->intx.interrupt);
617 return -errno;
620 vfio_enable_intx_kvm(vdev);
622 vdev->interrupt = VFIO_INT_INTx;
624 trace_vfio_enable_intx(vdev->vbasedev.name);
626 return 0;
629 static void vfio_disable_intx(VFIOPCIDevice *vdev)
631 int fd;
633 timer_del(vdev->intx.mmap_timer);
634 vfio_disable_intx_kvm(vdev);
635 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
636 vdev->intx.pending = false;
637 pci_irq_deassert(&vdev->pdev);
638 vfio_mmap_set_enabled(vdev, true);
640 fd = event_notifier_get_fd(&vdev->intx.interrupt);
641 qemu_set_fd_handler(fd, NULL, NULL, vdev);
642 event_notifier_cleanup(&vdev->intx.interrupt);
644 vdev->interrupt = VFIO_INT_NONE;
646 trace_vfio_disable_intx(vdev->vbasedev.name);
650 * MSI/X
652 static void vfio_msi_interrupt(void *opaque)
654 VFIOMSIVector *vector = opaque;
655 VFIOPCIDevice *vdev = vector->vdev;
656 int nr = vector - vdev->msi_vectors;
658 if (!event_notifier_test_and_clear(&vector->interrupt)) {
659 return;
662 #ifdef DEBUG_VFIO
663 MSIMessage msg;
665 if (vdev->interrupt == VFIO_INT_MSIX) {
666 msg = msix_get_message(&vdev->pdev, nr);
667 } else if (vdev->interrupt == VFIO_INT_MSI) {
668 msg = msi_get_message(&vdev->pdev, nr);
669 } else {
670 abort();
673 trace_vfio_msi_interrupt(vbasedev->name, nr, msg.address, msg.data);
674 #endif
676 if (vdev->interrupt == VFIO_INT_MSIX) {
677 msix_notify(&vdev->pdev, nr);
678 } else if (vdev->interrupt == VFIO_INT_MSI) {
679 msi_notify(&vdev->pdev, nr);
680 } else {
681 error_report("vfio: MSI interrupt receieved, but not enabled?");
685 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
687 struct vfio_irq_set *irq_set;
688 int ret = 0, i, argsz;
689 int32_t *fds;
691 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
693 irq_set = g_malloc0(argsz);
694 irq_set->argsz = argsz;
695 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
696 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
697 irq_set->start = 0;
698 irq_set->count = vdev->nr_vectors;
699 fds = (int32_t *)&irq_set->data;
701 for (i = 0; i < vdev->nr_vectors; i++) {
702 int fd = -1;
705 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
706 * bits, therefore we always use the KVM signaling path when setup.
707 * MSI-X mask and pending bits are emulated, so we want to use the
708 * KVM signaling path only when configured and unmasked.
710 if (vdev->msi_vectors[i].use) {
711 if (vdev->msi_vectors[i].virq < 0 ||
712 (msix && msix_is_masked(&vdev->pdev, i))) {
713 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
714 } else {
715 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
719 fds[i] = fd;
722 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
724 g_free(irq_set);
726 return ret;
729 static void vfio_add_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage *msg,
730 bool msix)
732 int virq;
734 if ((msix && !VFIO_ALLOW_KVM_MSIX) ||
735 (!msix && !VFIO_ALLOW_KVM_MSI) || !msg) {
736 return;
739 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
740 return;
743 virq = kvm_irqchip_add_msi_route(kvm_state, *msg);
744 if (virq < 0) {
745 event_notifier_cleanup(&vector->kvm_interrupt);
746 return;
749 if (kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->kvm_interrupt,
750 NULL, virq) < 0) {
751 kvm_irqchip_release_virq(kvm_state, virq);
752 event_notifier_cleanup(&vector->kvm_interrupt);
753 return;
756 vector->virq = virq;
759 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
761 kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->kvm_interrupt,
762 vector->virq);
763 kvm_irqchip_release_virq(kvm_state, vector->virq);
764 vector->virq = -1;
765 event_notifier_cleanup(&vector->kvm_interrupt);
768 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg)
770 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg);
773 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
774 MSIMessage *msg, IOHandler *handler)
776 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
777 VFIOMSIVector *vector;
778 int ret;
780 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
782 vector = &vdev->msi_vectors[nr];
784 if (!vector->use) {
785 vector->vdev = vdev;
786 vector->virq = -1;
787 if (event_notifier_init(&vector->interrupt, 0)) {
788 error_report("vfio: Error: event_notifier_init failed");
790 vector->use = true;
791 msix_vector_use(pdev, nr);
794 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
795 handler, NULL, vector);
798 * Attempt to enable route through KVM irqchip,
799 * default to userspace handling if unavailable.
801 if (vector->virq >= 0) {
802 if (!msg) {
803 vfio_remove_kvm_msi_virq(vector);
804 } else {
805 vfio_update_kvm_msi_virq(vector, *msg);
807 } else {
808 vfio_add_kvm_msi_virq(vector, msg, true);
812 * We don't want to have the host allocate all possible MSI vectors
813 * for a device if they're not in use, so we shutdown and incrementally
814 * increase them as needed.
816 if (vdev->nr_vectors < nr + 1) {
817 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
818 vdev->nr_vectors = nr + 1;
819 ret = vfio_enable_vectors(vdev, true);
820 if (ret) {
821 error_report("vfio: failed to enable vectors, %d", ret);
823 } else {
824 int argsz;
825 struct vfio_irq_set *irq_set;
826 int32_t *pfd;
828 argsz = sizeof(*irq_set) + sizeof(*pfd);
830 irq_set = g_malloc0(argsz);
831 irq_set->argsz = argsz;
832 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
833 VFIO_IRQ_SET_ACTION_TRIGGER;
834 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
835 irq_set->start = nr;
836 irq_set->count = 1;
837 pfd = (int32_t *)&irq_set->data;
839 if (vector->virq >= 0) {
840 *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
841 } else {
842 *pfd = event_notifier_get_fd(&vector->interrupt);
845 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
846 g_free(irq_set);
847 if (ret) {
848 error_report("vfio: failed to modify vector, %d", ret);
852 return 0;
855 static int vfio_msix_vector_use(PCIDevice *pdev,
856 unsigned int nr, MSIMessage msg)
858 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
861 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
863 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
864 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
866 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
869 * There are still old guests that mask and unmask vectors on every
870 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
871 * the KVM setup in place, simply switch VFIO to use the non-bypass
872 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
873 * core will mask the interrupt and set pending bits, allowing it to
874 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
876 if (vector->virq >= 0) {
877 int argsz;
878 struct vfio_irq_set *irq_set;
879 int32_t *pfd;
881 argsz = sizeof(*irq_set) + sizeof(*pfd);
883 irq_set = g_malloc0(argsz);
884 irq_set->argsz = argsz;
885 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
886 VFIO_IRQ_SET_ACTION_TRIGGER;
887 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
888 irq_set->start = nr;
889 irq_set->count = 1;
890 pfd = (int32_t *)&irq_set->data;
892 *pfd = event_notifier_get_fd(&vector->interrupt);
894 ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
896 g_free(irq_set);
900 static void vfio_enable_msix(VFIOPCIDevice *vdev)
902 vfio_disable_interrupts(vdev);
904 vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
906 vdev->interrupt = VFIO_INT_MSIX;
909 * Some communication channels between VF & PF or PF & fw rely on the
910 * physical state of the device and expect that enabling MSI-X from the
911 * guest enables the same on the host. When our guest is Linux, the
912 * guest driver call to pci_enable_msix() sets the enabling bit in the
913 * MSI-X capability, but leaves the vector table masked. We therefore
914 * can't rely on a vector_use callback (from request_irq() in the guest)
915 * to switch the physical device into MSI-X mode because that may come a
916 * long time after pci_enable_msix(). This code enables vector 0 with
917 * triggering to userspace, then immediately release the vector, leaving
918 * the physical device with no vectors enabled, but MSI-X enabled, just
919 * like the guest view.
921 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
922 vfio_msix_vector_release(&vdev->pdev, 0);
924 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
925 vfio_msix_vector_release, NULL)) {
926 error_report("vfio: msix_set_vector_notifiers failed");
929 trace_vfio_enable_msix(vdev->vbasedev.name);
932 static void vfio_enable_msi(VFIOPCIDevice *vdev)
934 int ret, i;
936 vfio_disable_interrupts(vdev);
938 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
939 retry:
940 vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
942 for (i = 0; i < vdev->nr_vectors; i++) {
943 VFIOMSIVector *vector = &vdev->msi_vectors[i];
944 MSIMessage msg = msi_get_message(&vdev->pdev, i);
946 vector->vdev = vdev;
947 vector->virq = -1;
948 vector->use = true;
950 if (event_notifier_init(&vector->interrupt, 0)) {
951 error_report("vfio: Error: event_notifier_init failed");
954 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
955 vfio_msi_interrupt, NULL, vector);
958 * Attempt to enable route through KVM irqchip,
959 * default to userspace handling if unavailable.
961 vfio_add_kvm_msi_virq(vector, &msg, false);
964 /* Set interrupt type prior to possible interrupts */
965 vdev->interrupt = VFIO_INT_MSI;
967 ret = vfio_enable_vectors(vdev, false);
968 if (ret) {
969 if (ret < 0) {
970 error_report("vfio: Error: Failed to setup MSI fds: %m");
971 } else if (ret != vdev->nr_vectors) {
972 error_report("vfio: Error: Failed to enable %d "
973 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
976 for (i = 0; i < vdev->nr_vectors; i++) {
977 VFIOMSIVector *vector = &vdev->msi_vectors[i];
978 if (vector->virq >= 0) {
979 vfio_remove_kvm_msi_virq(vector);
981 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
982 NULL, NULL, NULL);
983 event_notifier_cleanup(&vector->interrupt);
986 g_free(vdev->msi_vectors);
988 if (ret > 0 && ret != vdev->nr_vectors) {
989 vdev->nr_vectors = ret;
990 goto retry;
992 vdev->nr_vectors = 0;
995 * Failing to setup MSI doesn't really fall within any specification.
996 * Let's try leaving interrupts disabled and hope the guest figures
997 * out to fall back to INTx for this device.
999 error_report("vfio: Error: Failed to enable MSI");
1000 vdev->interrupt = VFIO_INT_NONE;
1002 return;
1005 trace_vfio_enable_msi(vdev->vbasedev.name, vdev->nr_vectors);
1008 static void vfio_disable_msi_common(VFIOPCIDevice *vdev)
1010 int i;
1012 for (i = 0; i < vdev->nr_vectors; i++) {
1013 VFIOMSIVector *vector = &vdev->msi_vectors[i];
1014 if (vdev->msi_vectors[i].use) {
1015 if (vector->virq >= 0) {
1016 vfio_remove_kvm_msi_virq(vector);
1018 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
1019 NULL, NULL, NULL);
1020 event_notifier_cleanup(&vector->interrupt);
1024 g_free(vdev->msi_vectors);
1025 vdev->msi_vectors = NULL;
1026 vdev->nr_vectors = 0;
1027 vdev->interrupt = VFIO_INT_NONE;
1029 vfio_enable_intx(vdev);
1032 static void vfio_disable_msix(VFIOPCIDevice *vdev)
1034 int i;
1036 msix_unset_vector_notifiers(&vdev->pdev);
1039 * MSI-X will only release vectors if MSI-X is still enabled on the
1040 * device, check through the rest and release it ourselves if necessary.
1042 for (i = 0; i < vdev->nr_vectors; i++) {
1043 if (vdev->msi_vectors[i].use) {
1044 vfio_msix_vector_release(&vdev->pdev, i);
1045 msix_vector_unuse(&vdev->pdev, i);
1049 if (vdev->nr_vectors) {
1050 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
1053 vfio_disable_msi_common(vdev);
1055 trace_vfio_disable_msix(vdev->vbasedev.name);
1058 static void vfio_disable_msi(VFIOPCIDevice *vdev)
1060 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
1061 vfio_disable_msi_common(vdev);
1063 trace_vfio_disable_msi(vdev->vbasedev.name);
1066 static void vfio_update_msi(VFIOPCIDevice *vdev)
1068 int i;
1070 for (i = 0; i < vdev->nr_vectors; i++) {
1071 VFIOMSIVector *vector = &vdev->msi_vectors[i];
1072 MSIMessage msg;
1074 if (!vector->use || vector->virq < 0) {
1075 continue;
1078 msg = msi_get_message(&vdev->pdev, i);
1079 vfio_update_kvm_msi_virq(vector, msg);
1084 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
1086 static void vfio_region_write(void *opaque, hwaddr addr,
1087 uint64_t data, unsigned size)
1089 VFIORegion *region = opaque;
1090 VFIODevice *vbasedev = region->vbasedev;
1091 union {
1092 uint8_t byte;
1093 uint16_t word;
1094 uint32_t dword;
1095 uint64_t qword;
1096 } buf;
1098 switch (size) {
1099 case 1:
1100 buf.byte = data;
1101 break;
1102 case 2:
1103 buf.word = cpu_to_le16(data);
1104 break;
1105 case 4:
1106 buf.dword = cpu_to_le32(data);
1107 break;
1108 default:
1109 hw_error("vfio: unsupported write size, %d bytes", size);
1110 break;
1113 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
1114 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
1115 ",%d) failed: %m",
1116 __func__, vbasedev->name, region->nr,
1117 addr, data, size);
1120 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
1123 * A read or write to a BAR always signals an INTx EOI. This will
1124 * do nothing if not pending (including not in INTx mode). We assume
1125 * that a BAR access is in response to an interrupt and that BAR
1126 * accesses will service the interrupt. Unfortunately, we don't know
1127 * which access will service the interrupt, so we're potentially
1128 * getting quite a few host interrupts per guest interrupt.
1130 vbasedev->ops->vfio_eoi(vbasedev);
1133 static uint64_t vfio_region_read(void *opaque,
1134 hwaddr addr, unsigned size)
1136 VFIORegion *region = opaque;
1137 VFIODevice *vbasedev = region->vbasedev;
1138 union {
1139 uint8_t byte;
1140 uint16_t word;
1141 uint32_t dword;
1142 uint64_t qword;
1143 } buf;
1144 uint64_t data = 0;
1146 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
1147 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
1148 __func__, vbasedev->name, region->nr,
1149 addr, size);
1150 return (uint64_t)-1;
1153 switch (size) {
1154 case 1:
1155 data = buf.byte;
1156 break;
1157 case 2:
1158 data = le16_to_cpu(buf.word);
1159 break;
1160 case 4:
1161 data = le32_to_cpu(buf.dword);
1162 break;
1163 default:
1164 hw_error("vfio: unsupported read size, %d bytes", size);
1165 break;
1168 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
1170 /* Same as write above */
1171 vbasedev->ops->vfio_eoi(vbasedev);
1173 return data;
1176 static const MemoryRegionOps vfio_region_ops = {
1177 .read = vfio_region_read,
1178 .write = vfio_region_write,
1179 .endianness = DEVICE_LITTLE_ENDIAN,
1182 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
1184 struct vfio_region_info reg_info = {
1185 .argsz = sizeof(reg_info),
1186 .index = VFIO_PCI_ROM_REGION_INDEX
1188 uint64_t size;
1189 off_t off = 0;
1190 size_t bytes;
1192 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
1193 error_report("vfio: Error getting ROM info: %m");
1194 return;
1197 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info.size,
1198 (unsigned long)reg_info.offset,
1199 (unsigned long)reg_info.flags);
1201 vdev->rom_size = size = reg_info.size;
1202 vdev->rom_offset = reg_info.offset;
1204 if (!vdev->rom_size) {
1205 vdev->rom_read_failed = true;
1206 error_report("vfio-pci: Cannot read device rom at "
1207 "%s", vdev->vbasedev.name);
1208 error_printf("Device option ROM contents are probably invalid "
1209 "(check dmesg).\nSkip option ROM probe with rombar=0, "
1210 "or load from file with romfile=\n");
1211 return;
1214 vdev->rom = g_malloc(size);
1215 memset(vdev->rom, 0xff, size);
1217 while (size) {
1218 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
1219 size, vdev->rom_offset + off);
1220 if (bytes == 0) {
1221 break;
1222 } else if (bytes > 0) {
1223 off += bytes;
1224 size -= bytes;
1225 } else {
1226 if (errno == EINTR || errno == EAGAIN) {
1227 continue;
1229 error_report("vfio: Error reading device ROM: %m");
1230 break;
1235 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
1237 VFIOPCIDevice *vdev = opaque;
1238 union {
1239 uint8_t byte;
1240 uint16_t word;
1241 uint32_t dword;
1242 uint64_t qword;
1243 } val;
1244 uint64_t data = 0;
1246 /* Load the ROM lazily when the guest tries to read it */
1247 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
1248 vfio_pci_load_rom(vdev);
1251 memcpy(&val, vdev->rom + addr,
1252 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
1254 switch (size) {
1255 case 1:
1256 data = val.byte;
1257 break;
1258 case 2:
1259 data = le16_to_cpu(val.word);
1260 break;
1261 case 4:
1262 data = le32_to_cpu(val.dword);
1263 break;
1264 default:
1265 hw_error("vfio: unsupported read size, %d bytes\n", size);
1266 break;
1269 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
1271 return data;
1274 static void vfio_rom_write(void *opaque, hwaddr addr,
1275 uint64_t data, unsigned size)
1279 static const MemoryRegionOps vfio_rom_ops = {
1280 .read = vfio_rom_read,
1281 .write = vfio_rom_write,
1282 .endianness = DEVICE_LITTLE_ENDIAN,
1285 static bool vfio_blacklist_opt_rom(VFIOPCIDevice *vdev)
1287 PCIDevice *pdev = &vdev->pdev;
1288 uint16_t vendor_id, device_id;
1289 int count = 0;
1291 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
1292 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
1294 while (count < ARRAY_SIZE(romblacklist)) {
1295 if (romblacklist[count].vendor_id == vendor_id &&
1296 romblacklist[count].device_id == device_id) {
1297 return true;
1299 count++;
1302 return false;
1305 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
1307 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
1308 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
1309 DeviceState *dev = DEVICE(vdev);
1310 char name[32];
1311 int fd = vdev->vbasedev.fd;
1313 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
1314 /* Since pci handles romfile, just print a message and return */
1315 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
1316 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1317 "is known to cause system instability issues during "
1318 "option rom execution. "
1319 "Proceeding anyway since user specified romfile\n",
1320 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1321 vdev->host.function);
1323 return;
1327 * Use the same size ROM BAR as the physical device. The contents
1328 * will get filled in later when the guest tries to read it.
1330 if (pread(fd, &orig, 4, offset) != 4 ||
1331 pwrite(fd, &size, 4, offset) != 4 ||
1332 pread(fd, &size, 4, offset) != 4 ||
1333 pwrite(fd, &orig, 4, offset) != 4) {
1334 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
1335 __func__, vdev->host.domain, vdev->host.bus,
1336 vdev->host.slot, vdev->host.function);
1337 return;
1340 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
1342 if (!size) {
1343 return;
1346 if (vfio_blacklist_opt_rom(vdev)) {
1347 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
1348 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1349 "is known to cause system instability issues during "
1350 "option rom execution. "
1351 "Proceeding anyway since user specified non zero value for "
1352 "rombar\n",
1353 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1354 vdev->host.function);
1355 } else {
1356 error_printf("Warning : Rom loading for device at "
1357 "%04x:%02x:%02x.%x has been disabled due to "
1358 "system instability issues. "
1359 "Specify rombar=1 or romfile to force\n",
1360 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1361 vdev->host.function);
1362 return;
1366 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
1368 snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom",
1369 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1370 vdev->host.function);
1372 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
1373 &vfio_rom_ops, vdev, name, size);
1375 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
1376 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
1378 vdev->pdev.has_rom = true;
1379 vdev->rom_read_failed = false;
1382 static void vfio_vga_write(void *opaque, hwaddr addr,
1383 uint64_t data, unsigned size)
1385 VFIOVGARegion *region = opaque;
1386 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1387 union {
1388 uint8_t byte;
1389 uint16_t word;
1390 uint32_t dword;
1391 uint64_t qword;
1392 } buf;
1393 off_t offset = vga->fd_offset + region->offset + addr;
1395 switch (size) {
1396 case 1:
1397 buf.byte = data;
1398 break;
1399 case 2:
1400 buf.word = cpu_to_le16(data);
1401 break;
1402 case 4:
1403 buf.dword = cpu_to_le32(data);
1404 break;
1405 default:
1406 hw_error("vfio: unsupported write size, %d bytes", size);
1407 break;
1410 if (pwrite(vga->fd, &buf, size, offset) != size) {
1411 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1412 __func__, region->offset + addr, data, size);
1415 trace_vfio_vga_write(region->offset + addr, data, size);
1418 static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1420 VFIOVGARegion *region = opaque;
1421 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1422 union {
1423 uint8_t byte;
1424 uint16_t word;
1425 uint32_t dword;
1426 uint64_t qword;
1427 } buf;
1428 uint64_t data = 0;
1429 off_t offset = vga->fd_offset + region->offset + addr;
1431 if (pread(vga->fd, &buf, size, offset) != size) {
1432 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1433 __func__, region->offset + addr, size);
1434 return (uint64_t)-1;
1437 switch (size) {
1438 case 1:
1439 data = buf.byte;
1440 break;
1441 case 2:
1442 data = le16_to_cpu(buf.word);
1443 break;
1444 case 4:
1445 data = le32_to_cpu(buf.dword);
1446 break;
1447 default:
1448 hw_error("vfio: unsupported read size, %d bytes", size);
1449 break;
1452 trace_vfio_vga_read(region->offset + addr, size, data);
1454 return data;
1457 static const MemoryRegionOps vfio_vga_ops = {
1458 .read = vfio_vga_read,
1459 .write = vfio_vga_write,
1460 .endianness = DEVICE_LITTLE_ENDIAN,
1464 * Device specific quirks
1467 /* Is range1 fully contained within range2? */
1468 static bool vfio_range_contained(uint64_t first1, uint64_t len1,
1469 uint64_t first2, uint64_t len2) {
1470 return (first1 >= first2 && first1 + len1 <= first2 + len2);
1473 static bool vfio_flags_enabled(uint8_t flags, uint8_t mask)
1475 return (mask && (flags & mask) == mask);
1478 static uint64_t vfio_generic_window_quirk_read(void *opaque,
1479 hwaddr addr, unsigned size)
1481 VFIOQuirk *quirk = opaque;
1482 VFIOPCIDevice *vdev = quirk->vdev;
1483 uint64_t data;
1485 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
1486 ranges_overlap(addr, size,
1487 quirk->data.data_offset, quirk->data.data_size)) {
1488 hwaddr offset = addr - quirk->data.data_offset;
1490 if (!vfio_range_contained(addr, size, quirk->data.data_offset,
1491 quirk->data.data_size)) {
1492 hw_error("%s: window data read not fully contained: %s",
1493 __func__, memory_region_name(&quirk->mem));
1496 data = vfio_pci_read_config(&vdev->pdev,
1497 quirk->data.address_val + offset, size);
1499 trace_vfio_generic_window_quirk_read(memory_region_name(&quirk->mem),
1500 vdev->vbasedev.name,
1501 quirk->data.bar,
1502 addr, size, data);
1503 } else {
1504 data = vfio_region_read(&vdev->bars[quirk->data.bar].region,
1505 addr + quirk->data.base_offset, size);
1508 return data;
1511 static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr,
1512 uint64_t data, unsigned size)
1514 VFIOQuirk *quirk = opaque;
1515 VFIOPCIDevice *vdev = quirk->vdev;
1517 if (ranges_overlap(addr, size,
1518 quirk->data.address_offset, quirk->data.address_size)) {
1520 if (addr != quirk->data.address_offset) {
1521 hw_error("%s: offset write into address window: %s",
1522 __func__, memory_region_name(&quirk->mem));
1525 if ((data & ~quirk->data.address_mask) == quirk->data.address_match) {
1526 quirk->data.flags |= quirk->data.write_flags |
1527 quirk->data.read_flags;
1528 quirk->data.address_val = data & quirk->data.address_mask;
1529 } else {
1530 quirk->data.flags &= ~(quirk->data.write_flags |
1531 quirk->data.read_flags);
1535 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
1536 ranges_overlap(addr, size,
1537 quirk->data.data_offset, quirk->data.data_size)) {
1538 hwaddr offset = addr - quirk->data.data_offset;
1540 if (!vfio_range_contained(addr, size, quirk->data.data_offset,
1541 quirk->data.data_size)) {
1542 hw_error("%s: window data write not fully contained: %s",
1543 __func__, memory_region_name(&quirk->mem));
1546 vfio_pci_write_config(&vdev->pdev,
1547 quirk->data.address_val + offset, data, size);
1548 trace_vfio_generic_window_quirk_write(memory_region_name(&quirk->mem),
1549 vdev->vbasedev.name,
1550 quirk->data.bar,
1551 addr, data, size);
1552 return;
1555 vfio_region_write(&vdev->bars[quirk->data.bar].region,
1556 addr + quirk->data.base_offset, data, size);
1559 static const MemoryRegionOps vfio_generic_window_quirk = {
1560 .read = vfio_generic_window_quirk_read,
1561 .write = vfio_generic_window_quirk_write,
1562 .endianness = DEVICE_LITTLE_ENDIAN,
1565 static uint64_t vfio_generic_quirk_read(void *opaque,
1566 hwaddr addr, unsigned size)
1568 VFIOQuirk *quirk = opaque;
1569 VFIOPCIDevice *vdev = quirk->vdev;
1570 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
1571 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK;
1572 uint64_t data;
1574 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
1575 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
1576 if (!vfio_range_contained(addr, size, offset,
1577 quirk->data.address_mask + 1)) {
1578 hw_error("%s: read not fully contained: %s",
1579 __func__, memory_region_name(&quirk->mem));
1582 data = vfio_pci_read_config(&vdev->pdev, addr - offset, size);
1584 trace_vfio_generic_quirk_read(memory_region_name(&quirk->mem),
1585 vdev->vbasedev.name, quirk->data.bar,
1586 addr + base, size, data);
1587 } else {
1588 data = vfio_region_read(&vdev->bars[quirk->data.bar].region,
1589 addr + base, size);
1592 return data;
1595 static void vfio_generic_quirk_write(void *opaque, hwaddr addr,
1596 uint64_t data, unsigned size)
1598 VFIOQuirk *quirk = opaque;
1599 VFIOPCIDevice *vdev = quirk->vdev;
1600 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
1601 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK;
1603 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
1604 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
1605 if (!vfio_range_contained(addr, size, offset,
1606 quirk->data.address_mask + 1)) {
1607 hw_error("%s: write not fully contained: %s",
1608 __func__, memory_region_name(&quirk->mem));
1611 vfio_pci_write_config(&vdev->pdev, addr - offset, data, size);
1613 trace_vfio_generic_quirk_write(memory_region_name(&quirk->mem),
1614 vdev->vbasedev.name, quirk->data.bar,
1615 addr + base, data, size);
1616 } else {
1617 vfio_region_write(&vdev->bars[quirk->data.bar].region,
1618 addr + base, data, size);
1622 static const MemoryRegionOps vfio_generic_quirk = {
1623 .read = vfio_generic_quirk_read,
1624 .write = vfio_generic_quirk_write,
1625 .endianness = DEVICE_LITTLE_ENDIAN,
1628 #define PCI_VENDOR_ID_ATI 0x1002
1631 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1632 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1633 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1634 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1635 * I/O port BAR address. Originally this was coded to return the virtual BAR
1636 * address only if the physical register read returns the actual BAR address,
1637 * but users have reported greater success if we return the virtual address
1638 * unconditionally.
1640 static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
1641 hwaddr addr, unsigned size)
1643 VFIOQuirk *quirk = opaque;
1644 VFIOPCIDevice *vdev = quirk->vdev;
1645 uint64_t data = vfio_pci_read_config(&vdev->pdev,
1646 PCI_BASE_ADDRESS_0 + (4 * 4) + 1,
1647 size);
1648 trace_vfio_ati_3c3_quirk_read(data);
1650 return data;
1653 static const MemoryRegionOps vfio_ati_3c3_quirk = {
1654 .read = vfio_ati_3c3_quirk_read,
1655 .endianness = DEVICE_LITTLE_ENDIAN,
1658 static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev)
1660 PCIDevice *pdev = &vdev->pdev;
1661 VFIOQuirk *quirk;
1663 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1664 return;
1668 * As long as the BAR is >= 256 bytes it will be aligned such that the
1669 * lower byte is always zero. Filter out anything else, if it exists.
1671 if (!vdev->bars[4].ioport || vdev->bars[4].region.size < 256) {
1672 return;
1675 quirk = g_malloc0(sizeof(*quirk));
1676 quirk->vdev = vdev;
1678 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk,
1679 "vfio-ati-3c3-quirk", 1);
1680 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
1681 3 /* offset 3 bytes from 0x3c0 */, &quirk->mem);
1683 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
1684 quirk, next);
1686 trace_vfio_vga_probe_ati_3c3_quirk(vdev->vbasedev.name);
1690 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1691 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1692 * the MMIO space directly, but a window to this space is provided through
1693 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1694 * data register. When the address is programmed to a range of 0x4000-0x4fff
1695 * PCI configuration space is available. Experimentation seems to indicate
1696 * that only read-only access is provided, but we drop writes when the window
1697 * is enabled to config space nonetheless.
1699 static void vfio_probe_ati_bar4_window_quirk(VFIOPCIDevice *vdev, int nr)
1701 PCIDevice *pdev = &vdev->pdev;
1702 VFIOQuirk *quirk;
1704 if (!vdev->has_vga || nr != 4 ||
1705 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1706 return;
1709 quirk = g_malloc0(sizeof(*quirk));
1710 quirk->vdev = vdev;
1711 quirk->data.address_size = 4;
1712 quirk->data.data_offset = 4;
1713 quirk->data.data_size = 4;
1714 quirk->data.address_match = 0x4000;
1715 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1716 quirk->data.bar = nr;
1717 quirk->data.read_flags = quirk->data.write_flags = 1;
1719 memory_region_init_io(&quirk->mem, OBJECT(vdev),
1720 &vfio_generic_window_quirk, quirk,
1721 "vfio-ati-bar4-window-quirk", 8);
1722 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
1723 quirk->data.base_offset, &quirk->mem, 1);
1725 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1727 trace_vfio_probe_ati_bar4_window_quirk(vdev->vbasedev.name);
1730 #define PCI_VENDOR_ID_REALTEK 0x10ec
1733 * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2
1734 * offset 0x70 there is a dword data register, offset 0x74 is a dword address
1735 * register. According to the Linux r8169 driver, the MSI-X table is addressed
1736 * when the "type" portion of the address register is set to 0x1. This appears
1737 * to be bits 16:30. Bit 31 is both a write indicator and some sort of
1738 * "address latched" indicator. Bits 12:15 are a mask field, which we can
1739 * ignore because the MSI-X table should always be accessed as a dword (full
1740 * mask). Bits 0:11 is offset within the type.
1742 * Example trace:
1744 * Read from MSI-X table offset 0
1745 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
1746 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
1747 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
1749 * Write 0xfee00000 to MSI-X table offset 0
1750 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
1751 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
1752 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
1755 static uint64_t vfio_rtl8168_window_quirk_read(void *opaque,
1756 hwaddr addr, unsigned size)
1758 VFIOQuirk *quirk = opaque;
1759 VFIOPCIDevice *vdev = quirk->vdev;
1761 switch (addr) {
1762 case 4: /* address */
1763 if (quirk->data.flags) {
1764 trace_vfio_rtl8168_window_quirk_read_fake(
1765 memory_region_name(&quirk->mem),
1766 vdev->vbasedev.name);
1768 return quirk->data.address_match ^ 0x10000000U;
1770 break;
1771 case 0: /* data */
1772 if (quirk->data.flags) {
1773 uint64_t val;
1775 trace_vfio_rtl8168_window_quirk_read_table(
1776 memory_region_name(&quirk->mem),
1777 vdev->vbasedev.name);
1779 if (!(vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
1780 return 0;
1783 io_mem_read(&vdev->pdev.msix_table_mmio,
1784 (hwaddr)(quirk->data.address_match & 0xfff),
1785 &val, size);
1786 return val;
1790 trace_vfio_rtl8168_window_quirk_read_direct(memory_region_name(&quirk->mem),
1791 vdev->vbasedev.name);
1793 return vfio_region_read(&vdev->bars[quirk->data.bar].region,
1794 addr + 0x70, size);
1797 static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr,
1798 uint64_t data, unsigned size)
1800 VFIOQuirk *quirk = opaque;
1801 VFIOPCIDevice *vdev = quirk->vdev;
1803 switch (addr) {
1804 case 4: /* address */
1805 if ((data & 0x7fff0000) == 0x10000) {
1806 if (data & 0x10000000U &&
1807 vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) {
1809 trace_vfio_rtl8168_window_quirk_write_table(
1810 memory_region_name(&quirk->mem),
1811 vdev->vbasedev.name);
1813 io_mem_write(&vdev->pdev.msix_table_mmio,
1814 (hwaddr)(quirk->data.address_match & 0xfff),
1815 data, size);
1818 quirk->data.flags = 1;
1819 quirk->data.address_match = data;
1821 return;
1823 quirk->data.flags = 0;
1824 break;
1825 case 0: /* data */
1826 quirk->data.address_mask = data;
1827 break;
1830 trace_vfio_rtl8168_window_quirk_write_direct(
1831 memory_region_name(&quirk->mem),
1832 vdev->vbasedev.name);
1834 vfio_region_write(&vdev->bars[quirk->data.bar].region,
1835 addr + 0x70, data, size);
1838 static const MemoryRegionOps vfio_rtl8168_window_quirk = {
1839 .read = vfio_rtl8168_window_quirk_read,
1840 .write = vfio_rtl8168_window_quirk_write,
1841 .valid = {
1842 .min_access_size = 4,
1843 .max_access_size = 4,
1844 .unaligned = false,
1846 .endianness = DEVICE_LITTLE_ENDIAN,
1849 static void vfio_probe_rtl8168_bar2_window_quirk(VFIOPCIDevice *vdev, int nr)
1851 PCIDevice *pdev = &vdev->pdev;
1852 VFIOQuirk *quirk;
1854 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_REALTEK ||
1855 pci_get_word(pdev->config + PCI_DEVICE_ID) != 0x8168 || nr != 2) {
1856 return;
1859 quirk = g_malloc0(sizeof(*quirk));
1860 quirk->vdev = vdev;
1861 quirk->data.bar = nr;
1863 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_rtl8168_window_quirk,
1864 quirk, "vfio-rtl8168-window-quirk", 8);
1865 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
1866 0x70, &quirk->mem, 1);
1868 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1870 trace_vfio_probe_rtl8168_bar2_window_quirk(vdev->vbasedev.name);
1873 * Trap the BAR2 MMIO window to config space as well.
1875 static void vfio_probe_ati_bar2_4000_quirk(VFIOPCIDevice *vdev, int nr)
1877 PCIDevice *pdev = &vdev->pdev;
1878 VFIOQuirk *quirk;
1880 /* Only enable on newer devices where BAR2 is 64bit */
1881 if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 ||
1882 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1883 return;
1886 quirk = g_malloc0(sizeof(*quirk));
1887 quirk->vdev = vdev;
1888 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
1889 quirk->data.address_match = 0x4000;
1890 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1891 quirk->data.bar = nr;
1893 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk,
1894 "vfio-ati-bar2-4000-quirk",
1895 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
1896 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
1897 quirk->data.address_match & TARGET_PAGE_MASK,
1898 &quirk->mem, 1);
1900 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1902 trace_vfio_probe_ati_bar2_4000_quirk(vdev->vbasedev.name);
1906 * Older ATI/AMD cards like the X550 have a similar window to that above.
1907 * I/O port BAR1 provides a window to a mirror of PCI config space located
1908 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1909 * note it for future reference.
1912 #define PCI_VENDOR_ID_NVIDIA 0x10de
1915 * Nvidia has several different methods to get to config space, the
1916 * nouveu project has several of these documented here:
1917 * https://github.com/pathscale/envytools/tree/master/hwdocs
1919 * The first quirk is actually not documented in envytools and is found
1920 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1921 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1922 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1923 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1924 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1925 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1926 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1927 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1929 enum {
1930 NV_3D0_NONE = 0,
1931 NV_3D0_SELECT,
1932 NV_3D0_WINDOW,
1933 NV_3D0_READ,
1934 NV_3D0_WRITE,
1937 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
1938 hwaddr addr, unsigned size)
1940 VFIOQuirk *quirk = opaque;
1941 VFIOPCIDevice *vdev = quirk->vdev;
1942 PCIDevice *pdev = &vdev->pdev;
1943 uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1944 addr + quirk->data.base_offset, size);
1946 if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) {
1947 data = vfio_pci_read_config(pdev, quirk->data.address_val, size);
1948 trace_vfio_nvidia_3d0_quirk_read(size, data);
1951 quirk->data.flags = NV_3D0_NONE;
1953 return data;
1956 static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
1957 uint64_t data, unsigned size)
1959 VFIOQuirk *quirk = opaque;
1960 VFIOPCIDevice *vdev = quirk->vdev;
1961 PCIDevice *pdev = &vdev->pdev;
1963 switch (quirk->data.flags) {
1964 case NV_3D0_NONE:
1965 if (addr == quirk->data.address_offset && data == 0x338) {
1966 quirk->data.flags = NV_3D0_SELECT;
1968 break;
1969 case NV_3D0_SELECT:
1970 quirk->data.flags = NV_3D0_NONE;
1971 if (addr == quirk->data.data_offset &&
1972 (data & ~quirk->data.address_mask) == quirk->data.address_match) {
1973 quirk->data.flags = NV_3D0_WINDOW;
1974 quirk->data.address_val = data & quirk->data.address_mask;
1976 break;
1977 case NV_3D0_WINDOW:
1978 quirk->data.flags = NV_3D0_NONE;
1979 if (addr == quirk->data.address_offset) {
1980 if (data == 0x538) {
1981 quirk->data.flags = NV_3D0_READ;
1982 } else if (data == 0x738) {
1983 quirk->data.flags = NV_3D0_WRITE;
1986 break;
1987 case NV_3D0_WRITE:
1988 quirk->data.flags = NV_3D0_NONE;
1989 if (addr == quirk->data.data_offset) {
1990 vfio_pci_write_config(pdev, quirk->data.address_val, data, size);
1991 trace_vfio_nvidia_3d0_quirk_write(data, size);
1992 return;
1994 break;
1997 vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1998 addr + quirk->data.base_offset, data, size);
2001 static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
2002 .read = vfio_nvidia_3d0_quirk_read,
2003 .write = vfio_nvidia_3d0_quirk_write,
2004 .endianness = DEVICE_LITTLE_ENDIAN,
2007 static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev)
2009 PCIDevice *pdev = &vdev->pdev;
2010 VFIOQuirk *quirk;
2012 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA ||
2013 !vdev->bars[1].region.size) {
2014 return;
2017 quirk = g_malloc0(sizeof(*quirk));
2018 quirk->vdev = vdev;
2019 quirk->data.base_offset = 0x10;
2020 quirk->data.address_offset = 4;
2021 quirk->data.address_size = 2;
2022 quirk->data.address_match = 0x1800;
2023 quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
2024 quirk->data.data_offset = 0;
2025 quirk->data.data_size = 4;
2027 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk,
2028 quirk, "vfio-nvidia-3d0-quirk", 6);
2029 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
2030 quirk->data.base_offset, &quirk->mem);
2032 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
2033 quirk, next);
2035 trace_vfio_vga_probe_nvidia_3d0_quirk(vdev->vbasedev.name);
2039 * The second quirk is documented in envytools. The I/O port BAR5 is just
2040 * a set of address/data ports to the MMIO BARs. The BAR we care about is
2041 * again BAR0. This backdoor is apparently a bit newer than the one above
2042 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
2043 * space, including extended space is available at the 4k @0x88000.
2045 enum {
2046 NV_BAR5_ADDRESS = 0x1,
2047 NV_BAR5_ENABLE = 0x2,
2048 NV_BAR5_MASTER = 0x4,
2049 NV_BAR5_VALID = 0x7,
2052 static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr,
2053 uint64_t data, unsigned size)
2055 VFIOQuirk *quirk = opaque;
2057 switch (addr) {
2058 case 0x0:
2059 if (data & 0x1) {
2060 quirk->data.flags |= NV_BAR5_MASTER;
2061 } else {
2062 quirk->data.flags &= ~NV_BAR5_MASTER;
2064 break;
2065 case 0x4:
2066 if (data & 0x1) {
2067 quirk->data.flags |= NV_BAR5_ENABLE;
2068 } else {
2069 quirk->data.flags &= ~NV_BAR5_ENABLE;
2071 break;
2072 case 0x8:
2073 if (quirk->data.flags & NV_BAR5_MASTER) {
2074 if ((data & ~0xfff) == 0x88000) {
2075 quirk->data.flags |= NV_BAR5_ADDRESS;
2076 quirk->data.address_val = data & 0xfff;
2077 } else if ((data & ~0xff) == 0x1800) {
2078 quirk->data.flags |= NV_BAR5_ADDRESS;
2079 quirk->data.address_val = data & 0xff;
2080 } else {
2081 quirk->data.flags &= ~NV_BAR5_ADDRESS;
2084 break;
2087 vfio_generic_window_quirk_write(opaque, addr, data, size);
2090 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = {
2091 .read = vfio_generic_window_quirk_read,
2092 .write = vfio_nvidia_bar5_window_quirk_write,
2093 .valid.min_access_size = 4,
2094 .endianness = DEVICE_LITTLE_ENDIAN,
2097 static void vfio_probe_nvidia_bar5_window_quirk(VFIOPCIDevice *vdev, int nr)
2099 PCIDevice *pdev = &vdev->pdev;
2100 VFIOQuirk *quirk;
2102 if (!vdev->has_vga || nr != 5 ||
2103 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
2104 return;
2107 quirk = g_malloc0(sizeof(*quirk));
2108 quirk->vdev = vdev;
2109 quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID;
2110 quirk->data.address_offset = 0x8;
2111 quirk->data.address_size = 0; /* actually 4, but avoids generic code */
2112 quirk->data.data_offset = 0xc;
2113 quirk->data.data_size = 4;
2114 quirk->data.bar = nr;
2116 memory_region_init_io(&quirk->mem, OBJECT(vdev),
2117 &vfio_nvidia_bar5_window_quirk, quirk,
2118 "vfio-nvidia-bar5-window-quirk", 16);
2119 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
2120 0, &quirk->mem, 1);
2122 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
2124 trace_vfio_probe_nvidia_bar5_window_quirk(vdev->vbasedev.name);
2127 static void vfio_nvidia_88000_quirk_write(void *opaque, hwaddr addr,
2128 uint64_t data, unsigned size)
2130 VFIOQuirk *quirk = opaque;
2131 VFIOPCIDevice *vdev = quirk->vdev;
2132 PCIDevice *pdev = &vdev->pdev;
2133 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
2135 vfio_generic_quirk_write(opaque, addr, data, size);
2138 * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
2139 * MSI capability ID register. Both the ID and next register are
2140 * read-only, so we allow writes covering either of those to real hw.
2141 * NB - only fixed for the 0x88000 MMIO window.
2143 if ((pdev->cap_present & QEMU_PCI_CAP_MSI) &&
2144 vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) {
2145 vfio_region_write(&vdev->bars[quirk->data.bar].region,
2146 addr + base, data, size);
2150 static const MemoryRegionOps vfio_nvidia_88000_quirk = {
2151 .read = vfio_generic_quirk_read,
2152 .write = vfio_nvidia_88000_quirk_write,
2153 .endianness = DEVICE_LITTLE_ENDIAN,
2157 * Finally, BAR0 itself. We want to redirect any accesses to either
2158 * 0x1800 or 0x88000 through the PCI config space access functions.
2160 * NB - quirk at a page granularity or else they don't seem to work when
2161 * BARs are mmap'd
2163 * Here's offset 0x88000...
2165 static void vfio_probe_nvidia_bar0_88000_quirk(VFIOPCIDevice *vdev, int nr)
2167 PCIDevice *pdev = &vdev->pdev;
2168 VFIOQuirk *quirk;
2169 uint16_t vendor, class;
2171 vendor = pci_get_word(pdev->config + PCI_VENDOR_ID);
2172 class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
2174 if (nr != 0 || vendor != PCI_VENDOR_ID_NVIDIA ||
2175 class != PCI_CLASS_DISPLAY_VGA) {
2176 return;
2179 quirk = g_malloc0(sizeof(*quirk));
2180 quirk->vdev = vdev;
2181 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
2182 quirk->data.address_match = 0x88000;
2183 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
2184 quirk->data.bar = nr;
2186 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_88000_quirk,
2187 quirk, "vfio-nvidia-bar0-88000-quirk",
2188 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
2189 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
2190 quirk->data.address_match & TARGET_PAGE_MASK,
2191 &quirk->mem, 1);
2193 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
2195 trace_vfio_probe_nvidia_bar0_88000_quirk(vdev->vbasedev.name);
2199 * And here's the same for BAR0 offset 0x1800...
2201 static void vfio_probe_nvidia_bar0_1800_quirk(VFIOPCIDevice *vdev, int nr)
2203 PCIDevice *pdev = &vdev->pdev;
2204 VFIOQuirk *quirk;
2206 if (!vdev->has_vga || nr != 0 ||
2207 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
2208 return;
2211 /* Log the chipset ID */
2212 trace_vfio_probe_nvidia_bar0_1800_quirk_id(
2213 (unsigned int)(vfio_region_read(&vdev->bars[0].region, 0, 4) >> 20)
2214 & 0xff);
2216 quirk = g_malloc0(sizeof(*quirk));
2217 quirk->vdev = vdev;
2218 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
2219 quirk->data.address_match = 0x1800;
2220 quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
2221 quirk->data.bar = nr;
2223 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk,
2224 "vfio-nvidia-bar0-1800-quirk",
2225 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
2226 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
2227 quirk->data.address_match & TARGET_PAGE_MASK,
2228 &quirk->mem, 1);
2230 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
2232 trace_vfio_probe_nvidia_bar0_1800_quirk(vdev->vbasedev.name);
2236 * TODO - Some Nvidia devices provide config access to their companion HDA
2237 * device and even to their parent bridge via these config space mirrors.
2238 * Add quirks for those regions.
2242 * Common quirk probe entry points.
2244 static void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
2246 vfio_vga_probe_ati_3c3_quirk(vdev);
2247 vfio_vga_probe_nvidia_3d0_quirk(vdev);
2250 static void vfio_vga_quirk_teardown(VFIOPCIDevice *vdev)
2252 int i;
2254 for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) {
2255 while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) {
2256 VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks);
2257 memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem);
2258 object_unparent(OBJECT(&quirk->mem));
2259 QLIST_REMOVE(quirk, next);
2260 g_free(quirk);
2265 static void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
2267 vfio_probe_ati_bar4_window_quirk(vdev, nr);
2268 vfio_probe_ati_bar2_4000_quirk(vdev, nr);
2269 vfio_probe_nvidia_bar5_window_quirk(vdev, nr);
2270 vfio_probe_nvidia_bar0_88000_quirk(vdev, nr);
2271 vfio_probe_nvidia_bar0_1800_quirk(vdev, nr);
2272 vfio_probe_rtl8168_bar2_window_quirk(vdev, nr);
2275 static void vfio_bar_quirk_teardown(VFIOPCIDevice *vdev, int nr)
2277 VFIOBAR *bar = &vdev->bars[nr];
2279 while (!QLIST_EMPTY(&bar->quirks)) {
2280 VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
2281 memory_region_del_subregion(&bar->region.mem, &quirk->mem);
2282 object_unparent(OBJECT(&quirk->mem));
2283 QLIST_REMOVE(quirk, next);
2284 g_free(quirk);
2289 * PCI config space
2291 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
2293 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2294 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
2296 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
2297 emu_bits = le32_to_cpu(emu_bits);
2299 if (emu_bits) {
2300 emu_val = pci_default_read_config(pdev, addr, len);
2303 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
2304 ssize_t ret;
2306 ret = pread(vdev->vbasedev.fd, &phys_val, len,
2307 vdev->config_offset + addr);
2308 if (ret != len) {
2309 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
2310 __func__, vdev->host.domain, vdev->host.bus,
2311 vdev->host.slot, vdev->host.function, addr, len);
2312 return -errno;
2314 phys_val = le32_to_cpu(phys_val);
2317 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
2319 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
2321 return val;
2324 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
2325 uint32_t val, int len)
2327 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2328 uint32_t val_le = cpu_to_le32(val);
2330 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
2332 /* Write everything to VFIO, let it filter out what we can't write */
2333 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
2334 != len) {
2335 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
2336 __func__, vdev->host.domain, vdev->host.bus,
2337 vdev->host.slot, vdev->host.function, addr, val, len);
2340 /* MSI/MSI-X Enabling/Disabling */
2341 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
2342 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
2343 int is_enabled, was_enabled = msi_enabled(pdev);
2345 pci_default_write_config(pdev, addr, val, len);
2347 is_enabled = msi_enabled(pdev);
2349 if (!was_enabled) {
2350 if (is_enabled) {
2351 vfio_enable_msi(vdev);
2353 } else {
2354 if (!is_enabled) {
2355 vfio_disable_msi(vdev);
2356 } else {
2357 vfio_update_msi(vdev);
2360 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
2361 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
2362 int is_enabled, was_enabled = msix_enabled(pdev);
2364 pci_default_write_config(pdev, addr, val, len);
2366 is_enabled = msix_enabled(pdev);
2368 if (!was_enabled && is_enabled) {
2369 vfio_enable_msix(vdev);
2370 } else if (was_enabled && !is_enabled) {
2371 vfio_disable_msix(vdev);
2373 } else {
2374 /* Write everything to QEMU to keep emulated bits correct */
2375 pci_default_write_config(pdev, addr, val, len);
2380 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
2382 static int vfio_dma_unmap(VFIOContainer *container,
2383 hwaddr iova, ram_addr_t size)
2385 struct vfio_iommu_type1_dma_unmap unmap = {
2386 .argsz = sizeof(unmap),
2387 .flags = 0,
2388 .iova = iova,
2389 .size = size,
2392 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
2393 error_report("VFIO_UNMAP_DMA: %d\n", -errno);
2394 return -errno;
2397 return 0;
2400 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
2401 ram_addr_t size, void *vaddr, bool readonly)
2403 struct vfio_iommu_type1_dma_map map = {
2404 .argsz = sizeof(map),
2405 .flags = VFIO_DMA_MAP_FLAG_READ,
2406 .vaddr = (__u64)(uintptr_t)vaddr,
2407 .iova = iova,
2408 .size = size,
2411 if (!readonly) {
2412 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
2416 * Try the mapping, if it fails with EBUSY, unmap the region and try
2417 * again. This shouldn't be necessary, but we sometimes see it in
2418 * the the VGA ROM space.
2420 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
2421 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
2422 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
2423 return 0;
2426 error_report("VFIO_MAP_DMA: %d\n", -errno);
2427 return -errno;
2430 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
2432 return (!memory_region_is_ram(section->mr) &&
2433 !memory_region_is_iommu(section->mr)) ||
2435 * Sizing an enabled 64-bit BAR can cause spurious mappings to
2436 * addresses in the upper part of the 64-bit address space. These
2437 * are never accessed by the CPU and beyond the address width of
2438 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
2440 section->offset_within_address_space & (1ULL << 63);
2443 static void vfio_iommu_map_notify(Notifier *n, void *data)
2445 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
2446 VFIOContainer *container = giommu->container;
2447 IOMMUTLBEntry *iotlb = data;
2448 MemoryRegion *mr;
2449 hwaddr xlat;
2450 hwaddr len = iotlb->addr_mask + 1;
2451 void *vaddr;
2452 int ret;
2454 trace_vfio_iommu_map_notify(iotlb->iova,
2455 iotlb->iova + iotlb->addr_mask);
2458 * The IOMMU TLB entry we have just covers translation through
2459 * this IOMMU to its immediate target. We need to translate
2460 * it the rest of the way through to memory.
2462 mr = address_space_translate(&address_space_memory,
2463 iotlb->translated_addr,
2464 &xlat, &len, iotlb->perm & IOMMU_WO);
2465 if (!memory_region_is_ram(mr)) {
2466 error_report("iommu map to non memory area %"HWADDR_PRIx"\n",
2467 xlat);
2468 return;
2471 * Translation truncates length to the IOMMU page size,
2472 * check that it did not truncate too much.
2474 if (len & iotlb->addr_mask) {
2475 error_report("iommu has granularity incompatible with target AS\n");
2476 return;
2479 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
2480 vaddr = memory_region_get_ram_ptr(mr) + xlat;
2482 ret = vfio_dma_map(container, iotlb->iova,
2483 iotlb->addr_mask + 1, vaddr,
2484 !(iotlb->perm & IOMMU_WO) || mr->readonly);
2485 if (ret) {
2486 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
2487 "0x%"HWADDR_PRIx", %p) = %d (%m)",
2488 container, iotlb->iova,
2489 iotlb->addr_mask + 1, vaddr, ret);
2491 } else {
2492 ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1);
2493 if (ret) {
2494 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
2495 "0x%"HWADDR_PRIx") = %d (%m)",
2496 container, iotlb->iova,
2497 iotlb->addr_mask + 1, ret);
2502 static void vfio_listener_region_add(MemoryListener *listener,
2503 MemoryRegionSection *section)
2505 VFIOContainer *container = container_of(listener, VFIOContainer,
2506 iommu_data.type1.listener);
2507 hwaddr iova, end;
2508 Int128 llend;
2509 void *vaddr;
2510 int ret;
2512 if (vfio_listener_skipped_section(section)) {
2513 trace_vfio_listener_region_add_skip(
2514 section->offset_within_address_space,
2515 section->offset_within_address_space +
2516 int128_get64(int128_sub(section->size, int128_one())));
2517 return;
2520 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
2521 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
2522 error_report("%s received unaligned region", __func__);
2523 return;
2526 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
2527 llend = int128_make64(section->offset_within_address_space);
2528 llend = int128_add(llend, section->size);
2529 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
2531 if (int128_ge(int128_make64(iova), llend)) {
2532 return;
2535 memory_region_ref(section->mr);
2537 if (memory_region_is_iommu(section->mr)) {
2538 VFIOGuestIOMMU *giommu;
2540 trace_vfio_listener_region_add_iommu(iova,
2541 int128_get64(int128_sub(llend, int128_one())));
2543 * FIXME: We should do some checking to see if the
2544 * capabilities of the host VFIO IOMMU are adequate to model
2545 * the guest IOMMU
2547 * FIXME: For VFIO iommu types which have KVM acceleration to
2548 * avoid bouncing all map/unmaps through qemu this way, this
2549 * would be the right place to wire that up (tell the KVM
2550 * device emulation the VFIO iommu handles to use).
2553 * This assumes that the guest IOMMU is empty of
2554 * mappings at this point.
2556 * One way of doing this is:
2557 * 1. Avoid sharing IOMMUs between emulated devices or different
2558 * IOMMU groups.
2559 * 2. Implement VFIO_IOMMU_ENABLE in the host kernel to fail if
2560 * there are some mappings in IOMMU.
2562 * VFIO on SPAPR does that. Other IOMMU models may do that different,
2563 * they must make sure there are no existing mappings or
2564 * loop through existing mappings to map them into VFIO.
2566 giommu = g_malloc0(sizeof(*giommu));
2567 giommu->iommu = section->mr;
2568 giommu->container = container;
2569 giommu->n.notify = vfio_iommu_map_notify;
2570 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
2571 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
2573 return;
2576 /* Here we assume that memory_region_is_ram(section->mr)==true */
2578 end = int128_get64(llend);
2579 vaddr = memory_region_get_ram_ptr(section->mr) +
2580 section->offset_within_region +
2581 (iova - section->offset_within_address_space);
2583 trace_vfio_listener_region_add_ram(iova, end - 1, vaddr);
2585 ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
2586 if (ret) {
2587 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
2588 "0x%"HWADDR_PRIx", %p) = %d (%m)",
2589 container, iova, end - iova, vaddr, ret);
2592 * On the initfn path, store the first error in the container so we
2593 * can gracefully fail. Runtime, there's not much we can do other
2594 * than throw a hardware error.
2596 if (!container->iommu_data.type1.initialized) {
2597 if (!container->iommu_data.type1.error) {
2598 container->iommu_data.type1.error = ret;
2600 } else {
2601 hw_error("vfio: DMA mapping failed, unable to continue");
2606 static void vfio_listener_region_del(MemoryListener *listener,
2607 MemoryRegionSection *section)
2609 VFIOContainer *container = container_of(listener, VFIOContainer,
2610 iommu_data.type1.listener);
2611 hwaddr iova, end;
2612 int ret;
2614 if (vfio_listener_skipped_section(section)) {
2615 trace_vfio_listener_region_del_skip(
2616 section->offset_within_address_space,
2617 section->offset_within_address_space +
2618 int128_get64(int128_sub(section->size, int128_one())));
2619 return;
2622 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
2623 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
2624 error_report("%s received unaligned region", __func__);
2625 return;
2628 if (memory_region_is_iommu(section->mr)) {
2629 VFIOGuestIOMMU *giommu;
2631 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
2632 if (giommu->iommu == section->mr) {
2633 memory_region_unregister_iommu_notifier(&giommu->n);
2634 QLIST_REMOVE(giommu, giommu_next);
2635 g_free(giommu);
2636 break;
2641 * FIXME: We assume the one big unmap below is adequate to
2642 * remove any individual page mappings in the IOMMU which
2643 * might have been copied into VFIO. This works for a page table
2644 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
2645 * That may not be true for all IOMMU types.
2649 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
2650 end = (section->offset_within_address_space + int128_get64(section->size)) &
2651 TARGET_PAGE_MASK;
2653 if (iova >= end) {
2654 return;
2657 trace_vfio_listener_region_del(iova, end - 1);
2659 ret = vfio_dma_unmap(container, iova, end - iova);
2660 memory_region_unref(section->mr);
2661 if (ret) {
2662 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
2663 "0x%"HWADDR_PRIx") = %d (%m)",
2664 container, iova, end - iova, ret);
2668 static MemoryListener vfio_memory_listener = {
2669 .region_add = vfio_listener_region_add,
2670 .region_del = vfio_listener_region_del,
2673 static void vfio_listener_release(VFIOContainer *container)
2675 memory_listener_unregister(&container->iommu_data.type1.listener);
2679 * Interrupt setup
2681 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
2683 switch (vdev->interrupt) {
2684 case VFIO_INT_INTx:
2685 vfio_disable_intx(vdev);
2686 break;
2687 case VFIO_INT_MSI:
2688 vfio_disable_msi(vdev);
2689 break;
2690 case VFIO_INT_MSIX:
2691 vfio_disable_msix(vdev);
2692 break;
2696 static int vfio_setup_msi(VFIOPCIDevice *vdev, int pos)
2698 uint16_t ctrl;
2699 bool msi_64bit, msi_maskbit;
2700 int ret, entries;
2702 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
2703 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
2704 return -errno;
2706 ctrl = le16_to_cpu(ctrl);
2708 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
2709 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
2710 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
2712 trace_vfio_setup_msi(vdev->vbasedev.name, pos);
2714 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
2715 if (ret < 0) {
2716 if (ret == -ENOTSUP) {
2717 return 0;
2719 error_report("vfio: msi_init failed");
2720 return ret;
2722 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
2724 return 0;
2728 * We don't have any control over how pci_add_capability() inserts
2729 * capabilities into the chain. In order to setup MSI-X we need a
2730 * MemoryRegion for the BAR. In order to setup the BAR and not
2731 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2732 * need to first look for where the MSI-X table lives. So we
2733 * unfortunately split MSI-X setup across two functions.
2735 static int vfio_early_setup_msix(VFIOPCIDevice *vdev)
2737 uint8_t pos;
2738 uint16_t ctrl;
2739 uint32_t table, pba;
2740 int fd = vdev->vbasedev.fd;
2742 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
2743 if (!pos) {
2744 return 0;
2747 if (pread(fd, &ctrl, sizeof(ctrl),
2748 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
2749 return -errno;
2752 if (pread(fd, &table, sizeof(table),
2753 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
2754 return -errno;
2757 if (pread(fd, &pba, sizeof(pba),
2758 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
2759 return -errno;
2762 ctrl = le16_to_cpu(ctrl);
2763 table = le32_to_cpu(table);
2764 pba = le32_to_cpu(pba);
2766 vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
2767 vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
2768 vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
2769 vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
2770 vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
2771 vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
2773 trace_vfio_early_setup_msix(vdev->vbasedev.name, pos,
2774 vdev->msix->table_bar,
2775 vdev->msix->table_offset,
2776 vdev->msix->entries);
2778 return 0;
2781 static int vfio_setup_msix(VFIOPCIDevice *vdev, int pos)
2783 int ret;
2785 ret = msix_init(&vdev->pdev, vdev->msix->entries,
2786 &vdev->bars[vdev->msix->table_bar].region.mem,
2787 vdev->msix->table_bar, vdev->msix->table_offset,
2788 &vdev->bars[vdev->msix->pba_bar].region.mem,
2789 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
2790 if (ret < 0) {
2791 if (ret == -ENOTSUP) {
2792 return 0;
2794 error_report("vfio: msix_init failed");
2795 return ret;
2798 return 0;
2801 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
2803 msi_uninit(&vdev->pdev);
2805 if (vdev->msix) {
2806 msix_uninit(&vdev->pdev,
2807 &vdev->bars[vdev->msix->table_bar].region.mem,
2808 &vdev->bars[vdev->msix->pba_bar].region.mem);
2813 * Resource setup
2815 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
2817 int i;
2819 for (i = 0; i < PCI_ROM_SLOT; i++) {
2820 VFIOBAR *bar = &vdev->bars[i];
2822 if (!bar->region.size) {
2823 continue;
2826 memory_region_set_enabled(&bar->region.mmap_mem, enabled);
2827 if (vdev->msix && vdev->msix->table_bar == i) {
2828 memory_region_set_enabled(&vdev->msix->mmap_mem, enabled);
2833 static void vfio_unmap_bar(VFIOPCIDevice *vdev, int nr)
2835 VFIOBAR *bar = &vdev->bars[nr];
2837 if (!bar->region.size) {
2838 return;
2841 vfio_bar_quirk_teardown(vdev, nr);
2843 memory_region_del_subregion(&bar->region.mem, &bar->region.mmap_mem);
2844 munmap(bar->region.mmap, memory_region_size(&bar->region.mmap_mem));
2846 if (vdev->msix && vdev->msix->table_bar == nr) {
2847 memory_region_del_subregion(&bar->region.mem, &vdev->msix->mmap_mem);
2848 munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem));
2852 static int vfio_mmap_region(Object *obj, VFIORegion *region,
2853 MemoryRegion *mem, MemoryRegion *submem,
2854 void **map, size_t size, off_t offset,
2855 const char *name)
2857 int ret = 0;
2858 VFIODevice *vbasedev = region->vbasedev;
2860 if (VFIO_ALLOW_MMAP && size && region->flags &
2861 VFIO_REGION_INFO_FLAG_MMAP) {
2862 int prot = 0;
2864 if (region->flags & VFIO_REGION_INFO_FLAG_READ) {
2865 prot |= PROT_READ;
2868 if (region->flags & VFIO_REGION_INFO_FLAG_WRITE) {
2869 prot |= PROT_WRITE;
2872 *map = mmap(NULL, size, prot, MAP_SHARED,
2873 vbasedev->fd, region->fd_offset + offset);
2874 if (*map == MAP_FAILED) {
2875 *map = NULL;
2876 ret = -errno;
2877 goto empty_region;
2880 memory_region_init_ram_ptr(submem, obj, name, size, *map);
2881 memory_region_set_skip_dump(submem);
2882 } else {
2883 empty_region:
2884 /* Create a zero sized sub-region to make cleanup easy. */
2885 memory_region_init(submem, obj, name, 0);
2888 memory_region_add_subregion(mem, offset, submem);
2890 return ret;
2893 static void vfio_map_bar(VFIOPCIDevice *vdev, int nr)
2895 VFIOBAR *bar = &vdev->bars[nr];
2896 unsigned size = bar->region.size;
2897 char name[64];
2898 uint32_t pci_bar;
2899 uint8_t type;
2900 int ret;
2902 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2903 if (!size) {
2904 return;
2907 snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
2908 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2909 vdev->host.function, nr);
2911 /* Determine what type of BAR this is for registration */
2912 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
2913 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
2914 if (ret != sizeof(pci_bar)) {
2915 error_report("vfio: Failed to read BAR %d (%m)", nr);
2916 return;
2919 pci_bar = le32_to_cpu(pci_bar);
2920 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
2921 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
2922 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
2923 ~PCI_BASE_ADDRESS_MEM_MASK);
2925 /* A "slow" read/write mapping underlies all BARs */
2926 memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops,
2927 bar, name, size);
2928 pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem);
2931 * We can't mmap areas overlapping the MSIX vector table, so we
2932 * potentially insert a direct-mapped subregion before and after it.
2934 if (vdev->msix && vdev->msix->table_bar == nr) {
2935 size = vdev->msix->table_offset & qemu_host_page_mask;
2938 strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
2939 if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
2940 &bar->region.mmap_mem, &bar->region.mmap,
2941 size, 0, name)) {
2942 error_report("%s unsupported. Performance may be slow", name);
2945 if (vdev->msix && vdev->msix->table_bar == nr) {
2946 unsigned start;
2948 start = HOST_PAGE_ALIGN(vdev->msix->table_offset +
2949 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
2951 size = start < bar->region.size ? bar->region.size - start : 0;
2952 strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
2953 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2954 if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
2955 &vdev->msix->mmap_mem,
2956 &vdev->msix->mmap, size, start, name)) {
2957 error_report("%s unsupported. Performance may be slow", name);
2961 vfio_bar_quirk_setup(vdev, nr);
2964 static void vfio_map_bars(VFIOPCIDevice *vdev)
2966 int i;
2968 for (i = 0; i < PCI_ROM_SLOT; i++) {
2969 vfio_map_bar(vdev, i);
2972 if (vdev->has_vga) {
2973 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
2974 OBJECT(vdev), &vfio_vga_ops,
2975 &vdev->vga.region[QEMU_PCI_VGA_MEM],
2976 "vfio-vga-mmio@0xa0000",
2977 QEMU_PCI_VGA_MEM_SIZE);
2978 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
2979 OBJECT(vdev), &vfio_vga_ops,
2980 &vdev->vga.region[QEMU_PCI_VGA_IO_LO],
2981 "vfio-vga-io@0x3b0",
2982 QEMU_PCI_VGA_IO_LO_SIZE);
2983 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
2984 OBJECT(vdev), &vfio_vga_ops,
2985 &vdev->vga.region[QEMU_PCI_VGA_IO_HI],
2986 "vfio-vga-io@0x3c0",
2987 QEMU_PCI_VGA_IO_HI_SIZE);
2989 pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
2990 &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
2991 &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
2992 vfio_vga_quirk_setup(vdev);
2996 static void vfio_unmap_bars(VFIOPCIDevice *vdev)
2998 int i;
3000 for (i = 0; i < PCI_ROM_SLOT; i++) {
3001 vfio_unmap_bar(vdev, i);
3004 if (vdev->has_vga) {
3005 vfio_vga_quirk_teardown(vdev);
3006 pci_unregister_vga(&vdev->pdev);
3011 * General setup
3013 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
3015 uint8_t tmp, next = 0xff;
3017 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
3018 tmp = pdev->config[tmp + 1]) {
3019 if (tmp > pos && tmp < next) {
3020 next = tmp;
3024 return next - pos;
3027 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
3029 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
3032 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
3033 uint16_t val, uint16_t mask)
3035 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
3036 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
3037 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
3040 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
3042 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
3045 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
3046 uint32_t val, uint32_t mask)
3048 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
3049 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
3050 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
3053 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
3055 uint16_t flags;
3056 uint8_t type;
3058 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
3059 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
3061 if (type != PCI_EXP_TYPE_ENDPOINT &&
3062 type != PCI_EXP_TYPE_LEG_END &&
3063 type != PCI_EXP_TYPE_RC_END) {
3065 error_report("vfio: Assignment of PCIe type 0x%x "
3066 "devices is not currently supported", type);
3067 return -EINVAL;
3070 if (!pci_bus_is_express(vdev->pdev.bus)) {
3072 * Use express capability as-is on PCI bus. It doesn't make much
3073 * sense to even expose, but some drivers (ex. tg3) depend on it
3074 * and guests don't seem to be particular about it. We'll need
3075 * to revist this or force express devices to express buses if we
3076 * ever expose an IOMMU to the guest.
3078 } else if (pci_bus_is_root(vdev->pdev.bus)) {
3080 * On a Root Complex bus Endpoints become Root Complex Integrated
3081 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
3083 if (type == PCI_EXP_TYPE_ENDPOINT) {
3084 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
3085 PCI_EXP_TYPE_RC_END << 4,
3086 PCI_EXP_FLAGS_TYPE);
3088 /* Link Capabilities, Status, and Control goes away */
3089 if (size > PCI_EXP_LNKCTL) {
3090 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
3091 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
3092 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
3094 #ifndef PCI_EXP_LNKCAP2
3095 #define PCI_EXP_LNKCAP2 44
3096 #endif
3097 #ifndef PCI_EXP_LNKSTA2
3098 #define PCI_EXP_LNKSTA2 50
3099 #endif
3100 /* Link 2 Capabilities, Status, and Control goes away */
3101 if (size > PCI_EXP_LNKCAP2) {
3102 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
3103 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
3104 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
3108 } else if (type == PCI_EXP_TYPE_LEG_END) {
3110 * Legacy endpoints don't belong on the root complex. Windows
3111 * seems to be happier with devices if we skip the capability.
3113 return 0;
3116 } else {
3118 * Convert Root Complex Integrated Endpoints to regular endpoints.
3119 * These devices don't support LNK/LNK2 capabilities, so make them up.
3121 if (type == PCI_EXP_TYPE_RC_END) {
3122 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
3123 PCI_EXP_TYPE_ENDPOINT << 4,
3124 PCI_EXP_FLAGS_TYPE);
3125 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
3126 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
3127 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
3130 /* Mark the Link Status bits as emulated to allow virtual negotiation */
3131 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
3132 pci_get_word(vdev->pdev.config + pos +
3133 PCI_EXP_LNKSTA),
3134 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
3137 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
3138 if (pos >= 0) {
3139 vdev->pdev.exp.exp_cap = pos;
3142 return pos;
3145 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
3147 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
3149 if (cap & PCI_EXP_DEVCAP_FLR) {
3150 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
3151 vdev->has_flr = true;
3155 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
3157 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
3159 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
3160 trace_vfio_check_pm_reset(vdev->vbasedev.name);
3161 vdev->has_pm_reset = true;
3165 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
3167 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
3169 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
3170 trace_vfio_check_af_flr(vdev->vbasedev.name);
3171 vdev->has_flr = true;
3175 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
3177 PCIDevice *pdev = &vdev->pdev;
3178 uint8_t cap_id, next, size;
3179 int ret;
3181 cap_id = pdev->config[pos];
3182 next = pdev->config[pos + 1];
3185 * If it becomes important to configure capabilities to their actual
3186 * size, use this as the default when it's something we don't recognize.
3187 * Since QEMU doesn't actually handle many of the config accesses,
3188 * exact size doesn't seem worthwhile.
3190 size = vfio_std_cap_max_size(pdev, pos);
3193 * pci_add_capability always inserts the new capability at the head
3194 * of the chain. Therefore to end up with a chain that matches the
3195 * physical device, we insert from the end by making this recursive.
3196 * This is also why we pre-caclulate size above as cached config space
3197 * will be changed as we unwind the stack.
3199 if (next) {
3200 ret = vfio_add_std_cap(vdev, next);
3201 if (ret) {
3202 return ret;
3204 } else {
3205 /* Begin the rebuild, use QEMU emulated list bits */
3206 pdev->config[PCI_CAPABILITY_LIST] = 0;
3207 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
3208 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
3211 /* Use emulated next pointer to allow dropping caps */
3212 pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff);
3214 switch (cap_id) {
3215 case PCI_CAP_ID_MSI:
3216 ret = vfio_setup_msi(vdev, pos);
3217 break;
3218 case PCI_CAP_ID_EXP:
3219 vfio_check_pcie_flr(vdev, pos);
3220 ret = vfio_setup_pcie_cap(vdev, pos, size);
3221 break;
3222 case PCI_CAP_ID_MSIX:
3223 ret = vfio_setup_msix(vdev, pos);
3224 break;
3225 case PCI_CAP_ID_PM:
3226 vfio_check_pm_reset(vdev, pos);
3227 vdev->pm_cap = pos;
3228 ret = pci_add_capability(pdev, cap_id, pos, size);
3229 break;
3230 case PCI_CAP_ID_AF:
3231 vfio_check_af_flr(vdev, pos);
3232 ret = pci_add_capability(pdev, cap_id, pos, size);
3233 break;
3234 default:
3235 ret = pci_add_capability(pdev, cap_id, pos, size);
3236 break;
3239 if (ret < 0) {
3240 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
3241 "0x%x[0x%x]@0x%x: %d", vdev->host.domain,
3242 vdev->host.bus, vdev->host.slot, vdev->host.function,
3243 cap_id, size, pos, ret);
3244 return ret;
3247 return 0;
3250 static int vfio_add_capabilities(VFIOPCIDevice *vdev)
3252 PCIDevice *pdev = &vdev->pdev;
3254 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
3255 !pdev->config[PCI_CAPABILITY_LIST]) {
3256 return 0; /* Nothing to add */
3259 return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
3262 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
3264 PCIDevice *pdev = &vdev->pdev;
3265 uint16_t cmd;
3267 vfio_disable_interrupts(vdev);
3269 /* Make sure the device is in D0 */
3270 if (vdev->pm_cap) {
3271 uint16_t pmcsr;
3272 uint8_t state;
3274 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
3275 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
3276 if (state) {
3277 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3278 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
3279 /* vfio handles the necessary delay here */
3280 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
3281 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
3282 if (state) {
3283 error_report("vfio: Unable to power on device, stuck in D%d",
3284 state);
3290 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
3291 * Also put INTx Disable in known state.
3293 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
3294 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
3295 PCI_COMMAND_INTX_DISABLE);
3296 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
3299 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
3301 vfio_enable_intx(vdev);
3304 static bool vfio_pci_host_match(PCIHostDeviceAddress *host1,
3305 PCIHostDeviceAddress *host2)
3307 return (host1->domain == host2->domain && host1->bus == host2->bus &&
3308 host1->slot == host2->slot && host1->function == host2->function);
3311 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
3313 VFIOGroup *group;
3314 struct vfio_pci_hot_reset_info *info;
3315 struct vfio_pci_dependent_device *devices;
3316 struct vfio_pci_hot_reset *reset;
3317 int32_t *fds;
3318 int ret, i, count;
3319 bool multi = false;
3321 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
3323 vfio_pci_pre_reset(vdev);
3324 vdev->vbasedev.needs_reset = false;
3326 info = g_malloc0(sizeof(*info));
3327 info->argsz = sizeof(*info);
3329 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
3330 if (ret && errno != ENOSPC) {
3331 ret = -errno;
3332 if (!vdev->has_pm_reset) {
3333 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
3334 "no available reset mechanism.", vdev->host.domain,
3335 vdev->host.bus, vdev->host.slot, vdev->host.function);
3337 goto out_single;
3340 count = info->count;
3341 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
3342 info->argsz = sizeof(*info) + (count * sizeof(*devices));
3343 devices = &info->devices[0];
3345 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
3346 if (ret) {
3347 ret = -errno;
3348 error_report("vfio: hot reset info failed: %m");
3349 goto out_single;
3352 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
3354 /* Verify that we have all the groups required */
3355 for (i = 0; i < info->count; i++) {
3356 PCIHostDeviceAddress host;
3357 VFIOPCIDevice *tmp;
3358 VFIODevice *vbasedev_iter;
3360 host.domain = devices[i].segment;
3361 host.bus = devices[i].bus;
3362 host.slot = PCI_SLOT(devices[i].devfn);
3363 host.function = PCI_FUNC(devices[i].devfn);
3365 trace_vfio_pci_hot_reset_dep_devices(host.domain,
3366 host.bus, host.slot, host.function, devices[i].group_id);
3368 if (vfio_pci_host_match(&host, &vdev->host)) {
3369 continue;
3372 QLIST_FOREACH(group, &vfio_group_list, next) {
3373 if (group->groupid == devices[i].group_id) {
3374 break;
3378 if (!group) {
3379 if (!vdev->has_pm_reset) {
3380 error_report("vfio: Cannot reset device %s, "
3381 "depends on group %d which is not owned.",
3382 vdev->vbasedev.name, devices[i].group_id);
3384 ret = -EPERM;
3385 goto out;
3388 /* Prep dependent devices for reset and clear our marker. */
3389 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
3390 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
3391 continue;
3393 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
3394 if (vfio_pci_host_match(&host, &tmp->host)) {
3395 if (single) {
3396 error_report("vfio: found another in-use device "
3397 "%s\n", vbasedev_iter->name);
3398 ret = -EINVAL;
3399 goto out_single;
3401 vfio_pci_pre_reset(tmp);
3402 tmp->vbasedev.needs_reset = false;
3403 multi = true;
3404 break;
3409 if (!single && !multi) {
3410 error_report("vfio: No other in-use devices for multi hot reset\n");
3411 ret = -EINVAL;
3412 goto out_single;
3415 /* Determine how many group fds need to be passed */
3416 count = 0;
3417 QLIST_FOREACH(group, &vfio_group_list, next) {
3418 for (i = 0; i < info->count; i++) {
3419 if (group->groupid == devices[i].group_id) {
3420 count++;
3421 break;
3426 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
3427 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
3428 fds = &reset->group_fds[0];
3430 /* Fill in group fds */
3431 QLIST_FOREACH(group, &vfio_group_list, next) {
3432 for (i = 0; i < info->count; i++) {
3433 if (group->groupid == devices[i].group_id) {
3434 fds[reset->count++] = group->fd;
3435 break;
3440 /* Bus reset! */
3441 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
3442 g_free(reset);
3444 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
3445 ret ? "%m" : "Success");
3447 out:
3448 /* Re-enable INTx on affected devices */
3449 for (i = 0; i < info->count; i++) {
3450 PCIHostDeviceAddress host;
3451 VFIOPCIDevice *tmp;
3452 VFIODevice *vbasedev_iter;
3454 host.domain = devices[i].segment;
3455 host.bus = devices[i].bus;
3456 host.slot = PCI_SLOT(devices[i].devfn);
3457 host.function = PCI_FUNC(devices[i].devfn);
3459 if (vfio_pci_host_match(&host, &vdev->host)) {
3460 continue;
3463 QLIST_FOREACH(group, &vfio_group_list, next) {
3464 if (group->groupid == devices[i].group_id) {
3465 break;
3469 if (!group) {
3470 break;
3473 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
3474 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
3475 continue;
3477 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
3478 if (vfio_pci_host_match(&host, &tmp->host)) {
3479 vfio_pci_post_reset(tmp);
3480 break;
3484 out_single:
3485 vfio_pci_post_reset(vdev);
3486 g_free(info);
3488 return ret;
3492 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
3493 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
3494 * of doing hot resets when there is only a single device per bus. The in-use
3495 * here refers to how many VFIODevices are affected. A hot reset that affects
3496 * multiple devices, but only a single in-use device, means that we can call
3497 * it from our bus ->reset() callback since the extent is effectively a single
3498 * device. This allows us to make use of it in the hotplug path. When there
3499 * are multiple in-use devices, we can only trigger the hot reset during a
3500 * system reset and thus from our reset handler. We separate _one vs _multi
3501 * here so that we don't overlap and do a double reset on the system reset
3502 * path where both our reset handler and ->reset() callback are used. Calling
3503 * _one() will only do a hot reset for the one in-use devices case, calling
3504 * _multi() will do nothing if a _one() would have been sufficient.
3506 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
3508 return vfio_pci_hot_reset(vdev, true);
3511 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
3513 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
3514 return vfio_pci_hot_reset(vdev, false);
3517 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
3519 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
3520 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
3521 vbasedev->needs_reset = true;
3525 static VFIODeviceOps vfio_pci_ops = {
3526 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
3527 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
3528 .vfio_eoi = vfio_eoi,
3529 .vfio_populate_device = vfio_populate_device,
3532 static void vfio_reset_handler(void *opaque)
3534 VFIOGroup *group;
3535 VFIODevice *vbasedev;
3537 QLIST_FOREACH(group, &vfio_group_list, next) {
3538 QLIST_FOREACH(vbasedev, &group->device_list, next) {
3539 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
3543 QLIST_FOREACH(group, &vfio_group_list, next) {
3544 QLIST_FOREACH(vbasedev, &group->device_list, next) {
3545 if (vbasedev->needs_reset) {
3546 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
3552 static void vfio_kvm_device_add_group(VFIOGroup *group)
3554 #ifdef CONFIG_KVM
3555 struct kvm_device_attr attr = {
3556 .group = KVM_DEV_VFIO_GROUP,
3557 .attr = KVM_DEV_VFIO_GROUP_ADD,
3558 .addr = (uint64_t)(unsigned long)&group->fd,
3561 if (!kvm_enabled()) {
3562 return;
3565 if (vfio_kvm_device_fd < 0) {
3566 struct kvm_create_device cd = {
3567 .type = KVM_DEV_TYPE_VFIO,
3570 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
3571 error_report("KVM_CREATE_DEVICE: %m\n");
3572 return;
3575 vfio_kvm_device_fd = cd.fd;
3578 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
3579 error_report("Failed to add group %d to KVM VFIO device: %m",
3580 group->groupid);
3582 #endif
3585 static void vfio_kvm_device_del_group(VFIOGroup *group)
3587 #ifdef CONFIG_KVM
3588 struct kvm_device_attr attr = {
3589 .group = KVM_DEV_VFIO_GROUP,
3590 .attr = KVM_DEV_VFIO_GROUP_DEL,
3591 .addr = (uint64_t)(unsigned long)&group->fd,
3594 if (vfio_kvm_device_fd < 0) {
3595 return;
3598 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
3599 error_report("Failed to remove group %d from KVM VFIO device: %m",
3600 group->groupid);
3602 #endif
3605 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
3607 VFIOAddressSpace *space;
3609 QLIST_FOREACH(space, &vfio_address_spaces, list) {
3610 if (space->as == as) {
3611 return space;
3615 /* No suitable VFIOAddressSpace, create a new one */
3616 space = g_malloc0(sizeof(*space));
3617 space->as = as;
3618 QLIST_INIT(&space->containers);
3620 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
3622 return space;
3625 static void vfio_put_address_space(VFIOAddressSpace *space)
3627 if (QLIST_EMPTY(&space->containers)) {
3628 QLIST_REMOVE(space, list);
3629 g_free(space);
3633 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
3635 VFIOContainer *container;
3636 int ret, fd;
3637 VFIOAddressSpace *space;
3639 space = vfio_get_address_space(as);
3641 QLIST_FOREACH(container, &space->containers, next) {
3642 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
3643 group->container = container;
3644 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
3645 return 0;
3649 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
3650 if (fd < 0) {
3651 error_report("vfio: failed to open /dev/vfio/vfio: %m");
3652 ret = -errno;
3653 goto put_space_exit;
3656 ret = ioctl(fd, VFIO_GET_API_VERSION);
3657 if (ret != VFIO_API_VERSION) {
3658 error_report("vfio: supported vfio version: %d, "
3659 "reported version: %d", VFIO_API_VERSION, ret);
3660 ret = -EINVAL;
3661 goto close_fd_exit;
3664 container = g_malloc0(sizeof(*container));
3665 container->space = space;
3666 container->fd = fd;
3668 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
3669 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
3670 if (ret) {
3671 error_report("vfio: failed to set group container: %m");
3672 ret = -errno;
3673 goto free_container_exit;
3676 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
3677 if (ret) {
3678 error_report("vfio: failed to set iommu for container: %m");
3679 ret = -errno;
3680 goto free_container_exit;
3683 container->iommu_data.type1.listener = vfio_memory_listener;
3684 container->iommu_data.release = vfio_listener_release;
3686 memory_listener_register(&container->iommu_data.type1.listener,
3687 container->space->as);
3689 if (container->iommu_data.type1.error) {
3690 ret = container->iommu_data.type1.error;
3691 error_report("vfio: memory listener initialization failed for container");
3692 goto listener_release_exit;
3695 container->iommu_data.type1.initialized = true;
3697 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
3698 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
3699 if (ret) {
3700 error_report("vfio: failed to set group container: %m");
3701 ret = -errno;
3702 goto free_container_exit;
3705 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
3706 if (ret) {
3707 error_report("vfio: failed to set iommu for container: %m");
3708 ret = -errno;
3709 goto free_container_exit;
3713 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
3714 * when container fd is closed so we do not call it explicitly
3715 * in this file.
3717 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
3718 if (ret) {
3719 error_report("vfio: failed to enable container: %m");
3720 ret = -errno;
3721 goto free_container_exit;
3724 container->iommu_data.type1.listener = vfio_memory_listener;
3725 container->iommu_data.release = vfio_listener_release;
3727 memory_listener_register(&container->iommu_data.type1.listener,
3728 container->space->as);
3730 } else {
3731 error_report("vfio: No available IOMMU models");
3732 ret = -EINVAL;
3733 goto free_container_exit;
3736 QLIST_INIT(&container->group_list);
3737 QLIST_INSERT_HEAD(&space->containers, container, next);
3739 group->container = container;
3740 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
3742 return 0;
3744 listener_release_exit:
3745 vfio_listener_release(container);
3747 free_container_exit:
3748 g_free(container);
3750 close_fd_exit:
3751 close(fd);
3753 put_space_exit:
3754 vfio_put_address_space(space);
3756 return ret;
3759 static void vfio_disconnect_container(VFIOGroup *group)
3761 VFIOContainer *container = group->container;
3763 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
3764 error_report("vfio: error disconnecting group %d from container",
3765 group->groupid);
3768 QLIST_REMOVE(group, container_next);
3769 group->container = NULL;
3771 if (QLIST_EMPTY(&container->group_list)) {
3772 VFIOAddressSpace *space = container->space;
3774 if (container->iommu_data.release) {
3775 container->iommu_data.release(container);
3777 QLIST_REMOVE(container, next);
3778 trace_vfio_disconnect_container(container->fd);
3779 close(container->fd);
3780 g_free(container);
3782 vfio_put_address_space(space);
3786 static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
3788 VFIOGroup *group;
3789 char path[32];
3790 struct vfio_group_status status = { .argsz = sizeof(status) };
3792 QLIST_FOREACH(group, &vfio_group_list, next) {
3793 if (group->groupid == groupid) {
3794 /* Found it. Now is it already in the right context? */
3795 if (group->container->space->as == as) {
3796 return group;
3797 } else {
3798 error_report("vfio: group %d used in multiple address spaces",
3799 group->groupid);
3800 return NULL;
3805 group = g_malloc0(sizeof(*group));
3807 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
3808 group->fd = qemu_open(path, O_RDWR);
3809 if (group->fd < 0) {
3810 error_report("vfio: error opening %s: %m", path);
3811 goto free_group_exit;
3814 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
3815 error_report("vfio: error getting group status: %m");
3816 goto close_fd_exit;
3819 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
3820 error_report("vfio: error, group %d is not viable, please ensure "
3821 "all devices within the iommu_group are bound to their "
3822 "vfio bus driver.", groupid);
3823 goto close_fd_exit;
3826 group->groupid = groupid;
3827 QLIST_INIT(&group->device_list);
3829 if (vfio_connect_container(group, as)) {
3830 error_report("vfio: failed to setup container for group %d", groupid);
3831 goto close_fd_exit;
3834 if (QLIST_EMPTY(&vfio_group_list)) {
3835 qemu_register_reset(vfio_reset_handler, NULL);
3838 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
3840 vfio_kvm_device_add_group(group);
3842 return group;
3844 close_fd_exit:
3845 close(group->fd);
3847 free_group_exit:
3848 g_free(group);
3850 return NULL;
3853 static void vfio_put_group(VFIOGroup *group)
3855 if (!QLIST_EMPTY(&group->device_list)) {
3856 return;
3859 vfio_kvm_device_del_group(group);
3860 vfio_disconnect_container(group);
3861 QLIST_REMOVE(group, next);
3862 trace_vfio_put_group(group->fd);
3863 close(group->fd);
3864 g_free(group);
3866 if (QLIST_EMPTY(&vfio_group_list)) {
3867 qemu_unregister_reset(vfio_reset_handler, NULL);
3871 static int vfio_populate_device(VFIODevice *vbasedev)
3873 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
3874 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
3875 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
3876 int i, ret = -1;
3878 /* Sanity check device */
3879 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
3880 error_report("vfio: Um, this isn't a PCI device");
3881 goto error;
3884 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
3885 error_report("vfio: unexpected number of io regions %u",
3886 vbasedev->num_regions);
3887 goto error;
3890 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
3891 error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
3892 goto error;
3895 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
3896 reg_info.index = i;
3898 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
3899 if (ret) {
3900 error_report("vfio: Error getting region %d info: %m", i);
3901 goto error;
3904 trace_vfio_populate_device_region(vbasedev->name, i,
3905 (unsigned long)reg_info.size,
3906 (unsigned long)reg_info.offset,
3907 (unsigned long)reg_info.flags);
3909 vdev->bars[i].region.vbasedev = vbasedev;
3910 vdev->bars[i].region.flags = reg_info.flags;
3911 vdev->bars[i].region.size = reg_info.size;
3912 vdev->bars[i].region.fd_offset = reg_info.offset;
3913 vdev->bars[i].region.nr = i;
3914 QLIST_INIT(&vdev->bars[i].quirks);
3917 reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX;
3919 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
3920 if (ret) {
3921 error_report("vfio: Error getting config info: %m");
3922 goto error;
3925 trace_vfio_populate_device_config(vdev->vbasedev.name,
3926 (unsigned long)reg_info.size,
3927 (unsigned long)reg_info.offset,
3928 (unsigned long)reg_info.flags);
3930 vdev->config_size = reg_info.size;
3931 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
3932 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
3934 vdev->config_offset = reg_info.offset;
3936 if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
3937 vbasedev->num_regions > VFIO_PCI_VGA_REGION_INDEX) {
3938 struct vfio_region_info vga_info = {
3939 .argsz = sizeof(vga_info),
3940 .index = VFIO_PCI_VGA_REGION_INDEX,
3943 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info);
3944 if (ret) {
3945 error_report(
3946 "vfio: Device does not support requested feature x-vga");
3947 goto error;
3950 if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
3951 !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) ||
3952 vga_info.size < 0xbffff + 1) {
3953 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
3954 (unsigned long)vga_info.flags,
3955 (unsigned long)vga_info.size);
3956 goto error;
3959 vdev->vga.fd_offset = vga_info.offset;
3960 vdev->vga.fd = vdev->vbasedev.fd;
3962 vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
3963 vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
3964 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks);
3966 vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
3967 vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
3968 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks);
3970 vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
3971 vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
3972 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks);
3974 vdev->has_vga = true;
3976 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
3978 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
3979 if (ret) {
3980 /* This can fail for an old kernel or legacy PCI dev */
3981 trace_vfio_populate_device_get_irq_info_failure();
3982 ret = 0;
3983 } else if (irq_info.count == 1) {
3984 vdev->pci_aer = true;
3985 } else {
3986 error_report("vfio: %s "
3987 "Could not enable error recovery for the device",
3988 vbasedev->name);
3991 error:
3992 return ret;
3995 static int vfio_get_device(VFIOGroup *group, const char *name,
3996 VFIODevice *vbasedev)
3998 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
3999 int ret;
4001 ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
4002 if (ret < 0) {
4003 error_report("vfio: error getting device %s from group %d: %m",
4004 name, group->groupid);
4005 error_printf("Verify all devices in group %d are bound to vfio-<bus> "
4006 "or pci-stub and not already in use\n", group->groupid);
4007 return ret;
4010 vbasedev->fd = ret;
4011 vbasedev->group = group;
4012 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
4014 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_INFO, &dev_info);
4015 if (ret) {
4016 error_report("vfio: error getting device info: %m");
4017 goto error;
4020 vbasedev->num_irqs = dev_info.num_irqs;
4021 vbasedev->num_regions = dev_info.num_regions;
4022 vbasedev->flags = dev_info.flags;
4024 trace_vfio_get_device(name, dev_info.flags,
4025 dev_info.num_regions, dev_info.num_irqs);
4027 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
4029 ret = vbasedev->ops->vfio_populate_device(vbasedev);
4031 error:
4032 if (ret) {
4033 vfio_put_base_device(vbasedev);
4035 return ret;
4038 void vfio_put_base_device(VFIODevice *vbasedev)
4040 QLIST_REMOVE(vbasedev, next);
4041 vbasedev->group = NULL;
4042 trace_vfio_put_base_device(vbasedev->fd);
4043 close(vbasedev->fd);
4046 static void vfio_put_device(VFIOPCIDevice *vdev)
4048 g_free(vdev->vbasedev.name);
4049 if (vdev->msix) {
4050 g_free(vdev->msix);
4051 vdev->msix = NULL;
4053 vfio_put_base_device(&vdev->vbasedev);
4056 static void vfio_err_notifier_handler(void *opaque)
4058 VFIOPCIDevice *vdev = opaque;
4060 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
4061 return;
4065 * TBD. Retrieve the error details and decide what action
4066 * needs to be taken. One of the actions could be to pass
4067 * the error to the guest and have the guest driver recover
4068 * from the error. This requires that PCIe capabilities be
4069 * exposed to the guest. For now, we just terminate the
4070 * guest to contain the error.
4073 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
4074 "Please collect any data possible and then kill the guest",
4075 __func__, vdev->host.domain, vdev->host.bus,
4076 vdev->host.slot, vdev->host.function);
4078 vm_stop(RUN_STATE_INTERNAL_ERROR);
4082 * Registers error notifier for devices supporting error recovery.
4083 * If we encounter a failure in this function, we report an error
4084 * and continue after disabling error recovery support for the
4085 * device.
4087 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
4089 int ret;
4090 int argsz;
4091 struct vfio_irq_set *irq_set;
4092 int32_t *pfd;
4094 if (!vdev->pci_aer) {
4095 return;
4098 if (event_notifier_init(&vdev->err_notifier, 0)) {
4099 error_report("vfio: Unable to init event notifier for error detection");
4100 vdev->pci_aer = false;
4101 return;
4104 argsz = sizeof(*irq_set) + sizeof(*pfd);
4106 irq_set = g_malloc0(argsz);
4107 irq_set->argsz = argsz;
4108 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
4109 VFIO_IRQ_SET_ACTION_TRIGGER;
4110 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
4111 irq_set->start = 0;
4112 irq_set->count = 1;
4113 pfd = (int32_t *)&irq_set->data;
4115 *pfd = event_notifier_get_fd(&vdev->err_notifier);
4116 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
4118 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
4119 if (ret) {
4120 error_report("vfio: Failed to set up error notification");
4121 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
4122 event_notifier_cleanup(&vdev->err_notifier);
4123 vdev->pci_aer = false;
4125 g_free(irq_set);
4128 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
4130 int argsz;
4131 struct vfio_irq_set *irq_set;
4132 int32_t *pfd;
4133 int ret;
4135 if (!vdev->pci_aer) {
4136 return;
4139 argsz = sizeof(*irq_set) + sizeof(*pfd);
4141 irq_set = g_malloc0(argsz);
4142 irq_set->argsz = argsz;
4143 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
4144 VFIO_IRQ_SET_ACTION_TRIGGER;
4145 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
4146 irq_set->start = 0;
4147 irq_set->count = 1;
4148 pfd = (int32_t *)&irq_set->data;
4149 *pfd = -1;
4151 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
4152 if (ret) {
4153 error_report("vfio: Failed to de-assign error fd: %m");
4155 g_free(irq_set);
4156 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
4157 NULL, NULL, vdev);
4158 event_notifier_cleanup(&vdev->err_notifier);
4161 static int vfio_initfn(PCIDevice *pdev)
4163 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4164 VFIODevice *vbasedev_iter;
4165 VFIOGroup *group;
4166 char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
4167 ssize_t len;
4168 struct stat st;
4169 int groupid;
4170 int ret;
4172 /* Check that the host device exists */
4173 snprintf(path, sizeof(path),
4174 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
4175 vdev->host.domain, vdev->host.bus, vdev->host.slot,
4176 vdev->host.function);
4177 if (stat(path, &st) < 0) {
4178 error_report("vfio: error: no such host device: %s", path);
4179 return -errno;
4182 vdev->vbasedev.ops = &vfio_pci_ops;
4184 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
4185 vdev->vbasedev.name = g_strdup_printf("%04x:%02x:%02x.%01x",
4186 vdev->host.domain, vdev->host.bus,
4187 vdev->host.slot, vdev->host.function);
4189 strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
4191 len = readlink(path, iommu_group_path, sizeof(path));
4192 if (len <= 0 || len >= sizeof(path)) {
4193 error_report("vfio: error no iommu_group for device");
4194 return len < 0 ? -errno : ENAMETOOLONG;
4197 iommu_group_path[len] = 0;
4198 group_name = basename(iommu_group_path);
4200 if (sscanf(group_name, "%d", &groupid) != 1) {
4201 error_report("vfio: error reading %s: %m", path);
4202 return -errno;
4205 trace_vfio_initfn(vdev->vbasedev.name, groupid);
4207 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
4208 if (!group) {
4209 error_report("vfio: failed to get group %d", groupid);
4210 return -ENOENT;
4213 snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
4214 vdev->host.domain, vdev->host.bus, vdev->host.slot,
4215 vdev->host.function);
4217 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
4218 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
4219 error_report("vfio: error: device %s is already attached", path);
4220 vfio_put_group(group);
4221 return -EBUSY;
4225 ret = vfio_get_device(group, path, &vdev->vbasedev);
4226 if (ret) {
4227 error_report("vfio: failed to get device %s", path);
4228 vfio_put_group(group);
4229 return ret;
4232 /* Get a copy of config space */
4233 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
4234 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
4235 vdev->config_offset);
4236 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
4237 ret = ret < 0 ? -errno : -EFAULT;
4238 error_report("vfio: Failed to read device config space");
4239 goto out_put;
4242 /* vfio emulates a lot for us, but some bits need extra love */
4243 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
4245 /* QEMU can choose to expose the ROM or not */
4246 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
4248 /* QEMU can change multi-function devices to single function, or reverse */
4249 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
4250 PCI_HEADER_TYPE_MULTI_FUNCTION;
4252 /* Restore or clear multifunction, this is always controlled by QEMU */
4253 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
4254 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
4255 } else {
4256 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
4260 * Clear host resource mapping info. If we choose not to register a
4261 * BAR, such as might be the case with the option ROM, we can get
4262 * confusing, unwritable, residual addresses from the host here.
4264 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
4265 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
4267 vfio_pci_size_rom(vdev);
4269 ret = vfio_early_setup_msix(vdev);
4270 if (ret) {
4271 goto out_put;
4274 vfio_map_bars(vdev);
4276 ret = vfio_add_capabilities(vdev);
4277 if (ret) {
4278 goto out_teardown;
4281 /* QEMU emulates all of MSI & MSIX */
4282 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
4283 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
4284 MSIX_CAP_LENGTH);
4287 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
4288 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
4289 vdev->msi_cap_size);
4292 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
4293 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
4294 vfio_intx_mmap_enable, vdev);
4295 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
4296 ret = vfio_enable_intx(vdev);
4297 if (ret) {
4298 goto out_teardown;
4302 vfio_register_err_notifier(vdev);
4304 return 0;
4306 out_teardown:
4307 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
4308 vfio_teardown_msi(vdev);
4309 vfio_unmap_bars(vdev);
4310 out_put:
4311 g_free(vdev->emulated_config_bits);
4312 vfio_put_device(vdev);
4313 vfio_put_group(group);
4314 return ret;
4317 static void vfio_exitfn(PCIDevice *pdev)
4319 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4320 VFIOGroup *group = vdev->vbasedev.group;
4322 vfio_unregister_err_notifier(vdev);
4323 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
4324 vfio_disable_interrupts(vdev);
4325 if (vdev->intx.mmap_timer) {
4326 timer_free(vdev->intx.mmap_timer);
4328 vfio_teardown_msi(vdev);
4329 vfio_unmap_bars(vdev);
4330 g_free(vdev->emulated_config_bits);
4331 g_free(vdev->rom);
4332 vfio_put_device(vdev);
4333 vfio_put_group(group);
4336 static void vfio_pci_reset(DeviceState *dev)
4338 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
4339 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4341 trace_vfio_pci_reset(vdev->vbasedev.name);
4343 vfio_pci_pre_reset(vdev);
4345 if (vdev->vbasedev.reset_works &&
4346 (vdev->has_flr || !vdev->has_pm_reset) &&
4347 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
4348 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
4349 goto post_reset;
4352 /* See if we can do our own bus reset */
4353 if (!vfio_pci_hot_reset_one(vdev)) {
4354 goto post_reset;
4357 /* If nothing else works and the device supports PM reset, use it */
4358 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
4359 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
4360 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
4361 goto post_reset;
4364 post_reset:
4365 vfio_pci_post_reset(vdev);
4368 static void vfio_instance_init(Object *obj)
4370 PCIDevice *pci_dev = PCI_DEVICE(obj);
4371 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
4373 device_add_bootindex_property(obj, &vdev->bootindex,
4374 "bootindex", NULL,
4375 &pci_dev->qdev, NULL);
4378 static Property vfio_pci_dev_properties[] = {
4379 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
4380 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
4381 intx.mmap_timeout, 1100),
4382 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
4383 VFIO_FEATURE_ENABLE_VGA_BIT, false),
4384 DEFINE_PROP_INT32("bootindex", VFIOPCIDevice, bootindex, -1),
4386 * TODO - support passed fds... is this necessary?
4387 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
4388 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
4390 DEFINE_PROP_END_OF_LIST(),
4393 static const VMStateDescription vfio_pci_vmstate = {
4394 .name = "vfio-pci",
4395 .unmigratable = 1,
4398 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
4400 DeviceClass *dc = DEVICE_CLASS(klass);
4401 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
4403 dc->reset = vfio_pci_reset;
4404 dc->props = vfio_pci_dev_properties;
4405 dc->vmsd = &vfio_pci_vmstate;
4406 dc->desc = "VFIO-based PCI device assignment";
4407 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
4408 pdc->init = vfio_initfn;
4409 pdc->exit = vfio_exitfn;
4410 pdc->config_read = vfio_pci_read_config;
4411 pdc->config_write = vfio_pci_write_config;
4412 pdc->is_express = 1; /* We might be */
4415 static const TypeInfo vfio_pci_dev_info = {
4416 .name = "vfio-pci",
4417 .parent = TYPE_PCI_DEVICE,
4418 .instance_size = sizeof(VFIOPCIDevice),
4419 .class_init = vfio_pci_dev_class_init,
4420 .instance_init = vfio_instance_init,
4423 static void register_vfio_pci_dev_type(void)
4425 type_register_static(&vfio_pci_dev_info);
4428 type_init(register_vfio_pci_dev_type)
4430 static int vfio_container_do_ioctl(AddressSpace *as, int32_t groupid,
4431 int req, void *param)
4433 VFIOGroup *group;
4434 VFIOContainer *container;
4435 int ret = -1;
4437 group = vfio_get_group(groupid, as);
4438 if (!group) {
4439 error_report("vfio: group %d not registered", groupid);
4440 return ret;
4443 container = group->container;
4444 if (group->container) {
4445 ret = ioctl(container->fd, req, param);
4446 if (ret < 0) {
4447 error_report("vfio: failed to ioctl container: ret=%d, %s",
4448 ret, strerror(errno));
4452 vfio_put_group(group);
4454 return ret;
4457 int vfio_container_ioctl(AddressSpace *as, int32_t groupid,
4458 int req, void *param)
4460 /* We allow only certain ioctls to the container */
4461 switch (req) {
4462 case VFIO_CHECK_EXTENSION:
4463 case VFIO_IOMMU_SPAPR_TCE_GET_INFO:
4464 break;
4465 default:
4466 /* Return an error on unknown requests */
4467 error_report("vfio: unsupported ioctl %X", req);
4468 return -1;
4471 return vfio_container_do_ioctl(as, groupid, req, param);