2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
43 /* #define DEBUG_VFIO */
45 #define DPRINTF(fmt, ...) \
46 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
48 #define DPRINTF(fmt, ...) \
52 /* Extra debugging, trap acceleration paths for more logging */
53 #define VFIO_ALLOW_MMAP 1
54 #define VFIO_ALLOW_KVM_INTX 1
58 typedef struct VFIOQuirk
{
60 struct VFIODevice
*vdev
;
61 QLIST_ENTRY(VFIOQuirk
) next
;
63 uint32_t base_offset
:TARGET_PAGE_BITS
;
64 uint32_t address_offset
:TARGET_PAGE_BITS
;
65 uint32_t address_size
:3;
68 uint32_t address_match
;
69 uint32_t address_mask
;
71 uint32_t address_val
:TARGET_PAGE_BITS
;
72 uint32_t data_offset
:TARGET_PAGE_BITS
;
81 typedef struct VFIOBAR
{
82 off_t fd_offset
; /* offset of BAR within device fd */
83 int fd
; /* device fd, allows us to pass VFIOBAR as opaque data */
84 MemoryRegion mem
; /* slow, read/write access */
85 MemoryRegion mmap_mem
; /* direct mapped access */
88 uint32_t flags
; /* VFIO region flags (rd/wr/mmap) */
89 uint8_t nr
; /* cache the BAR number for debug */
92 QLIST_HEAD(, VFIOQuirk
) quirks
;
95 typedef struct VFIOVGARegion
{
99 QLIST_HEAD(, VFIOQuirk
) quirks
;
102 typedef struct VFIOVGA
{
105 VFIOVGARegion region
[QEMU_PCI_VGA_NUM_REGIONS
];
108 typedef struct VFIOINTx
{
109 bool pending
; /* interrupt pending */
110 bool kvm_accel
; /* set when QEMU bypass through KVM enabled */
111 uint8_t pin
; /* which pin to pull for qemu_set_irq */
112 EventNotifier interrupt
; /* eventfd triggered on interrupt */
113 EventNotifier unmask
; /* eventfd for unmask on QEMU bypass */
114 PCIINTxRoute route
; /* routing info for QEMU bypass */
115 uint32_t mmap_timeout
; /* delay to re-enable mmaps after interrupt */
116 QEMUTimer
*mmap_timer
; /* enable mmaps after periods w/o interrupts */
119 typedef struct VFIOMSIVector
{
120 EventNotifier interrupt
; /* eventfd triggered on interrupt */
121 struct VFIODevice
*vdev
; /* back pointer to device */
122 int virq
; /* KVM irqchip route for QEMU bypass */
135 typedef struct VFIOContainer
{
136 int fd
; /* /dev/vfio/vfio, empowered by the attached groups */
138 /* enable abstraction to support various iommu backends */
140 MemoryListener listener
; /* Used by type1 iommu */
142 void (*release
)(struct VFIOContainer
*);
144 QLIST_HEAD(, VFIOGroup
) group_list
;
145 QLIST_ENTRY(VFIOContainer
) next
;
148 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
149 typedef struct VFIOMSIXInfo
{
153 uint32_t table_offset
;
155 MemoryRegion mmap_mem
;
159 typedef struct VFIODevice
{
163 unsigned int config_size
;
164 uint8_t *emulated_config_bits
; /* QEMU emulated bits, little-endian */
165 off_t config_offset
; /* Offset of config space region within device fd */
166 unsigned int rom_size
;
167 off_t rom_offset
; /* Offset of ROM region within device fd */
169 VFIOMSIVector
*msi_vectors
;
171 int nr_vectors
; /* Number of MSI/MSIX vectors currently in use */
172 int interrupt
; /* Current interrupt type */
173 VFIOBAR bars
[PCI_NUM_REGIONS
- 1]; /* No ROM */
174 VFIOVGA vga
; /* 0xa0000, 0x3b0, 0x3c0 */
175 PCIHostDeviceAddress host
;
176 QLIST_ENTRY(VFIODevice
) next
;
177 struct VFIOGroup
*group
;
178 EventNotifier err_notifier
;
180 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
181 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
189 typedef struct VFIOGroup
{
192 VFIOContainer
*container
;
193 QLIST_HEAD(, VFIODevice
) device_list
;
194 QLIST_ENTRY(VFIOGroup
) next
;
195 QLIST_ENTRY(VFIOGroup
) container_next
;
198 #define MSIX_CAP_LENGTH 12
200 static QLIST_HEAD(, VFIOContainer
)
201 container_list
= QLIST_HEAD_INITIALIZER(container_list
);
203 static QLIST_HEAD(, VFIOGroup
)
204 group_list
= QLIST_HEAD_INITIALIZER(group_list
);
206 static void vfio_disable_interrupts(VFIODevice
*vdev
);
207 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
);
208 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
209 uint32_t val
, int len
);
210 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
);
213 * Common VFIO interrupt disable
215 static void vfio_disable_irqindex(VFIODevice
*vdev
, int index
)
217 struct vfio_irq_set irq_set
= {
218 .argsz
= sizeof(irq_set
),
219 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
225 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
231 static void vfio_unmask_intx(VFIODevice
*vdev
)
233 struct vfio_irq_set irq_set
= {
234 .argsz
= sizeof(irq_set
),
235 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
236 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
241 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
244 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
245 static void vfio_mask_intx(VFIODevice
*vdev
)
247 struct vfio_irq_set irq_set
= {
248 .argsz
= sizeof(irq_set
),
249 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
250 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
255 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
260 * Disabling BAR mmaping can be slow, but toggling it around INTx can
261 * also be a huge overhead. We try to get the best of both worlds by
262 * waiting until an interrupt to disable mmaps (subsequent transitions
263 * to the same state are effectively no overhead). If the interrupt has
264 * been serviced and the time gap is long enough, we re-enable mmaps for
265 * performance. This works well for things like graphics cards, which
266 * may not use their interrupt at all and are penalized to an unusable
267 * level by read/write BAR traps. Other devices, like NICs, have more
268 * regular interrupts and see much better latency by staying in non-mmap
269 * mode. We therefore set the default mmap_timeout such that a ping
270 * is just enough to keep the mmap disabled. Users can experiment with
271 * other options with the x-intx-mmap-timeout-ms parameter (a value of
272 * zero disables the timer).
274 static void vfio_intx_mmap_enable(void *opaque
)
276 VFIODevice
*vdev
= opaque
;
278 if (vdev
->intx
.pending
) {
279 qemu_mod_timer(vdev
->intx
.mmap_timer
,
280 qemu_get_clock_ms(vm_clock
) + vdev
->intx
.mmap_timeout
);
284 vfio_mmap_set_enabled(vdev
, true);
287 static void vfio_intx_interrupt(void *opaque
)
289 VFIODevice
*vdev
= opaque
;
291 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
295 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__
, vdev
->host
.domain
,
296 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
297 'A' + vdev
->intx
.pin
);
299 vdev
->intx
.pending
= true;
300 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 1);
301 vfio_mmap_set_enabled(vdev
, false);
302 if (vdev
->intx
.mmap_timeout
) {
303 qemu_mod_timer(vdev
->intx
.mmap_timer
,
304 qemu_get_clock_ms(vm_clock
) + vdev
->intx
.mmap_timeout
);
308 static void vfio_eoi(VFIODevice
*vdev
)
310 if (!vdev
->intx
.pending
) {
314 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__
, vdev
->host
.domain
,
315 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
317 vdev
->intx
.pending
= false;
318 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
319 vfio_unmask_intx(vdev
);
322 static void vfio_enable_intx_kvm(VFIODevice
*vdev
)
325 struct kvm_irqfd irqfd
= {
326 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
327 .gsi
= vdev
->intx
.route
.irq
,
328 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
330 struct vfio_irq_set
*irq_set
;
334 if (!VFIO_ALLOW_KVM_INTX
|| !kvm_irqfds_enabled() ||
335 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
336 !kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
340 /* Get to a known interrupt state */
341 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
342 vfio_mask_intx(vdev
);
343 vdev
->intx
.pending
= false;
344 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
346 /* Get an eventfd for resample/unmask */
347 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
348 error_report("vfio: Error: event_notifier_init failed eoi");
352 /* KVM triggers it, VFIO listens for it */
353 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
355 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
356 error_report("vfio: Error: Failed to setup resample irqfd: %m");
360 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
362 irq_set
= g_malloc0(argsz
);
363 irq_set
->argsz
= argsz
;
364 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_UNMASK
;
365 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
368 pfd
= (int32_t *)&irq_set
->data
;
370 *pfd
= irqfd
.resamplefd
;
372 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
375 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
380 vfio_unmask_intx(vdev
);
382 vdev
->intx
.kvm_accel
= true;
384 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
385 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
386 vdev
->host
.slot
, vdev
->host
.function
);
391 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
392 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
394 event_notifier_cleanup(&vdev
->intx
.unmask
);
396 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
397 vfio_unmask_intx(vdev
);
401 static void vfio_disable_intx_kvm(VFIODevice
*vdev
)
404 struct kvm_irqfd irqfd
= {
405 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
406 .gsi
= vdev
->intx
.route
.irq
,
407 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
410 if (!vdev
->intx
.kvm_accel
) {
415 * Get to a known state, hardware masked, QEMU ready to accept new
416 * interrupts, QEMU IRQ de-asserted.
418 vfio_mask_intx(vdev
);
419 vdev
->intx
.pending
= false;
420 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
422 /* Tell KVM to stop listening for an INTx irqfd */
423 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
424 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
427 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
428 event_notifier_cleanup(&vdev
->intx
.unmask
);
430 /* QEMU starts listening for interrupt events. */
431 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
433 vdev
->intx
.kvm_accel
= false;
435 /* If we've missed an event, let it re-fire through QEMU */
436 vfio_unmask_intx(vdev
);
438 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
439 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
440 vdev
->host
.slot
, vdev
->host
.function
);
444 static void vfio_update_irq(PCIDevice
*pdev
)
446 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
449 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
453 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
455 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
456 return; /* Nothing changed */
459 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__
,
460 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
461 vdev
->host
.function
, vdev
->intx
.route
.irq
, route
.irq
);
463 vfio_disable_intx_kvm(vdev
);
465 vdev
->intx
.route
= route
;
467 if (route
.mode
!= PCI_INTX_ENABLED
) {
471 vfio_enable_intx_kvm(vdev
);
473 /* Re-enable the interrupt in cased we missed an EOI */
477 static int vfio_enable_intx(VFIODevice
*vdev
)
479 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
481 struct vfio_irq_set
*irq_set
;
488 vfio_disable_interrupts(vdev
);
490 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
494 * Only conditional to avoid generating error messages on platforms
495 * where we won't actually use the result anyway.
497 if (kvm_irqfds_enabled() &&
498 kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
499 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
504 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
506 error_report("vfio: Error: event_notifier_init failed");
510 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
512 irq_set
= g_malloc0(argsz
);
513 irq_set
->argsz
= argsz
;
514 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
515 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
518 pfd
= (int32_t *)&irq_set
->data
;
520 *pfd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
521 qemu_set_fd_handler(*pfd
, vfio_intx_interrupt
, NULL
, vdev
);
523 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
526 error_report("vfio: Error: Failed to setup INTx fd: %m");
527 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
528 event_notifier_cleanup(&vdev
->intx
.interrupt
);
532 vfio_enable_intx_kvm(vdev
);
534 vdev
->interrupt
= VFIO_INT_INTx
;
536 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
537 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
542 static void vfio_disable_intx(VFIODevice
*vdev
)
546 qemu_del_timer(vdev
->intx
.mmap_timer
);
547 vfio_disable_intx_kvm(vdev
);
548 vfio_disable_irqindex(vdev
, VFIO_PCI_INTX_IRQ_INDEX
);
549 vdev
->intx
.pending
= false;
550 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
551 vfio_mmap_set_enabled(vdev
, true);
553 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
554 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
555 event_notifier_cleanup(&vdev
->intx
.interrupt
);
557 vdev
->interrupt
= VFIO_INT_NONE
;
559 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
560 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
566 static void vfio_msi_interrupt(void *opaque
)
568 VFIOMSIVector
*vector
= opaque
;
569 VFIODevice
*vdev
= vector
->vdev
;
570 int nr
= vector
- vdev
->msi_vectors
;
572 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
576 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__
,
577 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
578 vdev
->host
.function
, nr
);
580 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
581 msix_notify(&vdev
->pdev
, nr
);
582 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
583 msi_notify(&vdev
->pdev
, nr
);
585 error_report("vfio: MSI interrupt receieved, but not enabled?");
589 static int vfio_enable_vectors(VFIODevice
*vdev
, bool msix
)
591 struct vfio_irq_set
*irq_set
;
592 int ret
= 0, i
, argsz
;
595 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
597 irq_set
= g_malloc0(argsz
);
598 irq_set
->argsz
= argsz
;
599 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
600 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
602 irq_set
->count
= vdev
->nr_vectors
;
603 fds
= (int32_t *)&irq_set
->data
;
605 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
606 if (!vdev
->msi_vectors
[i
].use
) {
611 fds
[i
] = event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
614 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
621 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
622 MSIMessage
*msg
, IOHandler
*handler
)
624 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
625 VFIOMSIVector
*vector
;
628 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__
,
629 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
630 vdev
->host
.function
, nr
);
632 vector
= &vdev
->msi_vectors
[nr
];
636 msix_vector_use(pdev
, nr
);
638 if (event_notifier_init(&vector
->interrupt
, 0)) {
639 error_report("vfio: Error: event_notifier_init failed");
643 * Attempt to enable route through KVM irqchip,
644 * default to userspace handling if unavailable.
646 vector
->virq
= msg
? kvm_irqchip_add_msi_route(kvm_state
, *msg
) : -1;
647 if (vector
->virq
< 0 ||
648 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
649 NULL
, vector
->virq
) < 0) {
650 if (vector
->virq
>= 0) {
651 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
654 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
655 handler
, NULL
, vector
);
659 * We don't want to have the host allocate all possible MSI vectors
660 * for a device if they're not in use, so we shutdown and incrementally
661 * increase them as needed.
663 if (vdev
->nr_vectors
< nr
+ 1) {
664 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
665 vdev
->nr_vectors
= nr
+ 1;
666 ret
= vfio_enable_vectors(vdev
, true);
668 error_report("vfio: failed to enable vectors, %d", ret
);
672 struct vfio_irq_set
*irq_set
;
675 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
677 irq_set
= g_malloc0(argsz
);
678 irq_set
->argsz
= argsz
;
679 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
680 VFIO_IRQ_SET_ACTION_TRIGGER
;
681 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
684 pfd
= (int32_t *)&irq_set
->data
;
686 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
688 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
691 error_report("vfio: failed to modify vector, %d", ret
);
698 static int vfio_msix_vector_use(PCIDevice
*pdev
,
699 unsigned int nr
, MSIMessage msg
)
701 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
704 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
706 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
707 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
709 struct vfio_irq_set
*irq_set
;
712 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__
,
713 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
714 vdev
->host
.function
, nr
);
717 * XXX What's the right thing to do here? This turns off the interrupt
718 * completely, but do we really just want to switch the interrupt to
719 * bouncing through userspace and let msix.c drop it? Not sure.
721 msix_vector_unuse(pdev
, nr
);
723 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
725 irq_set
= g_malloc0(argsz
);
726 irq_set
->argsz
= argsz
;
727 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
728 VFIO_IRQ_SET_ACTION_TRIGGER
;
729 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
732 pfd
= (int32_t *)&irq_set
->data
;
736 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
740 if (vector
->virq
< 0) {
741 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
744 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
746 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
750 event_notifier_cleanup(&vector
->interrupt
);
754 static void vfio_enable_msix(VFIODevice
*vdev
)
756 vfio_disable_interrupts(vdev
);
758 vdev
->msi_vectors
= g_malloc0(vdev
->msix
->entries
* sizeof(VFIOMSIVector
));
760 vdev
->interrupt
= VFIO_INT_MSIX
;
763 * Some communication channels between VF & PF or PF & fw rely on the
764 * physical state of the device and expect that enabling MSI-X from the
765 * guest enables the same on the host. When our guest is Linux, the
766 * guest driver call to pci_enable_msix() sets the enabling bit in the
767 * MSI-X capability, but leaves the vector table masked. We therefore
768 * can't rely on a vector_use callback (from request_irq() in the guest)
769 * to switch the physical device into MSI-X mode because that may come a
770 * long time after pci_enable_msix(). This code enables vector 0 with
771 * triggering to userspace, then immediately release the vector, leaving
772 * the physical device with no vectors enabled, but MSI-X enabled, just
773 * like the guest view.
775 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
776 vfio_msix_vector_release(&vdev
->pdev
, 0);
778 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
779 vfio_msix_vector_release
, NULL
)) {
780 error_report("vfio: msix_set_vector_notifiers failed");
783 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
784 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
787 static void vfio_enable_msi(VFIODevice
*vdev
)
791 vfio_disable_interrupts(vdev
);
793 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
795 vdev
->msi_vectors
= g_malloc0(vdev
->nr_vectors
* sizeof(VFIOMSIVector
));
797 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
799 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
804 if (event_notifier_init(&vector
->interrupt
, 0)) {
805 error_report("vfio: Error: event_notifier_init failed");
808 msg
= msi_get_message(&vdev
->pdev
, i
);
811 * Attempt to enable route through KVM irqchip,
812 * default to userspace handling if unavailable.
814 vector
->virq
= kvm_irqchip_add_msi_route(kvm_state
, msg
);
815 if (vector
->virq
< 0 ||
816 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
817 NULL
, vector
->virq
) < 0) {
818 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
819 vfio_msi_interrupt
, NULL
, vector
);
823 ret
= vfio_enable_vectors(vdev
, false);
826 error_report("vfio: Error: Failed to setup MSI fds: %m");
827 } else if (ret
!= vdev
->nr_vectors
) {
828 error_report("vfio: Error: Failed to enable %d "
829 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
832 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
833 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
834 if (vector
->virq
>= 0) {
835 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
837 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
840 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
843 event_notifier_cleanup(&vector
->interrupt
);
846 g_free(vdev
->msi_vectors
);
848 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
849 vdev
->nr_vectors
= ret
;
852 vdev
->nr_vectors
= 0;
857 vdev
->interrupt
= VFIO_INT_MSI
;
859 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__
,
860 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
861 vdev
->host
.function
, vdev
->nr_vectors
);
864 static void vfio_disable_msi_common(VFIODevice
*vdev
)
866 g_free(vdev
->msi_vectors
);
867 vdev
->msi_vectors
= NULL
;
868 vdev
->nr_vectors
= 0;
869 vdev
->interrupt
= VFIO_INT_NONE
;
871 vfio_enable_intx(vdev
);
874 static void vfio_disable_msix(VFIODevice
*vdev
)
876 msix_unset_vector_notifiers(&vdev
->pdev
);
878 if (vdev
->nr_vectors
) {
879 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
882 vfio_disable_msi_common(vdev
);
884 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
885 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
888 static void vfio_disable_msi(VFIODevice
*vdev
)
892 vfio_disable_irqindex(vdev
, VFIO_PCI_MSI_IRQ_INDEX
);
894 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
895 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
901 if (vector
->virq
>= 0) {
902 kvm_irqchip_remove_irqfd_notifier(kvm_state
,
903 &vector
->interrupt
, vector
->virq
);
904 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
907 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
911 event_notifier_cleanup(&vector
->interrupt
);
914 vfio_disable_msi_common(vdev
);
916 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
917 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
921 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
923 static void vfio_bar_write(void *opaque
, hwaddr addr
,
924 uint64_t data
, unsigned size
)
926 VFIOBAR
*bar
= opaque
;
939 buf
.word
= cpu_to_le16(data
);
942 buf
.dword
= cpu_to_le32(data
);
945 hw_error("vfio: unsupported write size, %d bytes\n", size
);
949 if (pwrite(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
950 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
951 __func__
, addr
, data
, size
);
956 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
958 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
959 ", %d)\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
960 vdev
->host
.slot
, vdev
->host
.function
, bar
->nr
, addr
,
966 * A read or write to a BAR always signals an INTx EOI. This will
967 * do nothing if not pending (including not in INTx mode). We assume
968 * that a BAR access is in response to an interrupt and that BAR
969 * accesses will service the interrupt. Unfortunately, we don't know
970 * which access will service the interrupt, so we're potentially
971 * getting quite a few host interrupts per guest interrupt.
973 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
976 static uint64_t vfio_bar_read(void *opaque
,
977 hwaddr addr
, unsigned size
)
979 VFIOBAR
*bar
= opaque
;
988 if (pread(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
989 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
990 __func__
, addr
, size
);
999 data
= le16_to_cpu(buf
.word
);
1002 data
= le32_to_cpu(buf
.dword
);
1005 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1011 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
1013 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
1014 ", %d) = 0x%"PRIx64
"\n", __func__
, vdev
->host
.domain
,
1015 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1016 bar
->nr
, addr
, size
, data
);
1020 /* Same as write above */
1021 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
1026 static const MemoryRegionOps vfio_bar_ops
= {
1027 .read
= vfio_bar_read
,
1028 .write
= vfio_bar_write
,
1029 .endianness
= DEVICE_LITTLE_ENDIAN
,
1032 static void vfio_vga_write(void *opaque
, hwaddr addr
,
1033 uint64_t data
, unsigned size
)
1035 VFIOVGARegion
*region
= opaque
;
1036 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1043 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1050 buf
.word
= cpu_to_le16(data
);
1053 buf
.dword
= cpu_to_le32(data
);
1056 hw_error("vfio: unsupported write size, %d bytes\n", size
);
1060 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
1061 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1062 __func__
, region
->offset
+ addr
, data
, size
);
1065 DPRINTF("%s(0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d)\n",
1066 __func__
, region
->offset
+ addr
, data
, size
);
1069 static uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
1071 VFIOVGARegion
*region
= opaque
;
1072 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1080 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1082 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1083 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1084 __func__
, region
->offset
+ addr
, size
);
1085 return (uint64_t)-1;
1093 data
= le16_to_cpu(buf
.word
);
1096 data
= le32_to_cpu(buf
.dword
);
1099 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1103 DPRINTF("%s(0x%"HWADDR_PRIx
", %d) = 0x%"PRIx64
"\n",
1104 __func__
, region
->offset
+ addr
, size
, data
);
1109 static const MemoryRegionOps vfio_vga_ops
= {
1110 .read
= vfio_vga_read
,
1111 .write
= vfio_vga_write
,
1112 .endianness
= DEVICE_LITTLE_ENDIAN
,
1116 * Device specific quirks
1119 /* Is range1 fully contained within range2? */
1120 static bool vfio_range_contained(uint64_t first1
, uint64_t len1
,
1121 uint64_t first2
, uint64_t len2
) {
1122 return (first1
>= first2
&& first1
+ len1
<= first2
+ len2
);
1125 static bool vfio_flags_enabled(uint8_t flags
, uint8_t mask
)
1127 return (mask
&& (flags
& mask
) == mask
);
1130 static uint64_t vfio_generic_window_quirk_read(void *opaque
,
1131 hwaddr addr
, unsigned size
)
1133 VFIOQuirk
*quirk
= opaque
;
1134 VFIODevice
*vdev
= quirk
->vdev
;
1137 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1138 ranges_overlap(addr
, size
,
1139 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1140 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1142 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1143 quirk
->data
.data_size
)) {
1144 hw_error("%s: window data read not fully contained: %s\n",
1145 __func__
, memory_region_name(&quirk
->mem
));
1148 data
= vfio_pci_read_config(&vdev
->pdev
,
1149 quirk
->data
.address_val
+ offset
, size
);
1151 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"
1152 PRIx64
"\n", memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1153 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1154 quirk
->data
.bar
, addr
, size
, data
);
1156 data
= vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
],
1157 addr
+ quirk
->data
.base_offset
, size
);
1163 static void vfio_generic_window_quirk_write(void *opaque
, hwaddr addr
,
1164 uint64_t data
, unsigned size
)
1166 VFIOQuirk
*quirk
= opaque
;
1167 VFIODevice
*vdev
= quirk
->vdev
;
1169 if (ranges_overlap(addr
, size
,
1170 quirk
->data
.address_offset
, quirk
->data
.address_size
)) {
1172 if (addr
!= quirk
->data
.address_offset
) {
1173 hw_error("%s: offset write into address window: %s\n",
1174 __func__
, memory_region_name(&quirk
->mem
));
1177 if ((data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1178 quirk
->data
.flags
|= quirk
->data
.write_flags
|
1179 quirk
->data
.read_flags
;
1180 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1182 quirk
->data
.flags
&= ~(quirk
->data
.write_flags
|
1183 quirk
->data
.read_flags
);
1187 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1188 ranges_overlap(addr
, size
,
1189 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1190 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1192 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1193 quirk
->data
.data_size
)) {
1194 hw_error("%s: window data write not fully contained: %s\n",
1195 __func__
, memory_region_name(&quirk
->mem
));
1198 vfio_pci_write_config(&vdev
->pdev
,
1199 quirk
->data
.address_val
+ offset
, data
, size
);
1200 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"
1201 PRIx64
", %d)\n", memory_region_name(&quirk
->mem
),
1202 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1203 vdev
->host
.function
, quirk
->data
.bar
, addr
, data
, size
);
1207 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
],
1208 addr
+ quirk
->data
.base_offset
, data
, size
);
1211 static const MemoryRegionOps vfio_generic_window_quirk
= {
1212 .read
= vfio_generic_window_quirk_read
,
1213 .write
= vfio_generic_window_quirk_write
,
1214 .endianness
= DEVICE_LITTLE_ENDIAN
,
1217 static uint64_t vfio_generic_quirk_read(void *opaque
,
1218 hwaddr addr
, unsigned size
)
1220 VFIOQuirk
*quirk
= opaque
;
1221 VFIODevice
*vdev
= quirk
->vdev
;
1222 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1223 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1226 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1227 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1228 if (!vfio_range_contained(addr
, size
, offset
,
1229 quirk
->data
.address_mask
+ 1)) {
1230 hw_error("%s: read not fully contained: %s\n",
1231 __func__
, memory_region_name(&quirk
->mem
));
1234 data
= vfio_pci_read_config(&vdev
->pdev
, addr
- offset
, size
);
1236 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"
1237 PRIx64
"\n", memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1238 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1239 quirk
->data
.bar
, addr
+ base
, size
, data
);
1241 data
= vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, size
);
1247 static void vfio_generic_quirk_write(void *opaque
, hwaddr addr
,
1248 uint64_t data
, unsigned size
)
1250 VFIOQuirk
*quirk
= opaque
;
1251 VFIODevice
*vdev
= quirk
->vdev
;
1252 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1253 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1255 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1256 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1257 if (!vfio_range_contained(addr
, size
, offset
,
1258 quirk
->data
.address_mask
+ 1)) {
1259 hw_error("%s: write not fully contained: %s\n",
1260 __func__
, memory_region_name(&quirk
->mem
));
1263 vfio_pci_write_config(&vdev
->pdev
, addr
- offset
, data
, size
);
1265 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"
1266 PRIx64
", %d)\n", memory_region_name(&quirk
->mem
),
1267 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1268 vdev
->host
.function
, quirk
->data
.bar
, addr
+ base
, data
, size
);
1270 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, data
, size
);
1274 static const MemoryRegionOps vfio_generic_quirk
= {
1275 .read
= vfio_generic_quirk_read
,
1276 .write
= vfio_generic_quirk_write
,
1277 .endianness
= DEVICE_LITTLE_ENDIAN
,
1280 #define PCI_VENDOR_ID_ATI 0x1002
1283 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1284 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1285 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1286 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1287 * I/O port BAR address. Originally this was coded to return the virtual BAR
1288 * address only if the physical register read returns the actual BAR address,
1289 * but users have reported greater success if we return the virtual address
1292 static uint64_t vfio_ati_3c3_quirk_read(void *opaque
,
1293 hwaddr addr
, unsigned size
)
1295 VFIOQuirk
*quirk
= opaque
;
1296 VFIODevice
*vdev
= quirk
->vdev
;
1297 uint64_t data
= vfio_pci_read_config(&vdev
->pdev
,
1298 PCI_BASE_ADDRESS_0
+ (4 * 4) + 1,
1300 DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64
"\n", __func__
, data
);
1305 static const MemoryRegionOps vfio_ati_3c3_quirk
= {
1306 .read
= vfio_ati_3c3_quirk_read
,
1307 .endianness
= DEVICE_LITTLE_ENDIAN
,
1310 static void vfio_vga_probe_ati_3c3_quirk(VFIODevice
*vdev
)
1312 PCIDevice
*pdev
= &vdev
->pdev
;
1315 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1320 * As long as the BAR is >= 256 bytes it will be aligned such that the
1321 * lower byte is always zero. Filter out anything else, if it exists.
1323 if (!vdev
->bars
[4].ioport
|| vdev
->bars
[4].size
< 256) {
1327 quirk
= g_malloc0(sizeof(*quirk
));
1330 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_ati_3c3_quirk
, quirk
,
1331 "vfio-ati-3c3-quirk", 1);
1332 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1333 3 /* offset 3 bytes from 0x3c0 */, &quirk
->mem
);
1335 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1338 DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n",
1339 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1340 vdev
->host
.function
);
1344 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1345 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1346 * the MMIO space directly, but a window to this space is provided through
1347 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1348 * data register. When the address is programmed to a range of 0x4000-0x4fff
1349 * PCI configuration space is available. Experimentation seems to indicate
1350 * that only read-only access is provided, but we drop writes when the window
1351 * is enabled to config space nonetheless.
1353 static void vfio_probe_ati_bar4_window_quirk(VFIODevice
*vdev
, int nr
)
1355 PCIDevice
*pdev
= &vdev
->pdev
;
1358 if (!vdev
->has_vga
|| nr
!= 4 ||
1359 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1363 quirk
= g_malloc0(sizeof(*quirk
));
1365 quirk
->data
.address_size
= 4;
1366 quirk
->data
.data_offset
= 4;
1367 quirk
->data
.data_size
= 4;
1368 quirk
->data
.address_match
= 0x4000;
1369 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1370 quirk
->data
.bar
= nr
;
1371 quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1373 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1374 &vfio_generic_window_quirk
, quirk
,
1375 "vfio-ati-bar4-window-quirk", 8);
1376 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1377 quirk
->data
.base_offset
, &quirk
->mem
, 1);
1379 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1381 DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n",
1382 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1383 vdev
->host
.function
);
1387 * Trap the BAR2 MMIO window to config space as well.
1389 static void vfio_probe_ati_bar2_4000_quirk(VFIODevice
*vdev
, int nr
)
1391 PCIDevice
*pdev
= &vdev
->pdev
;
1394 /* Only enable on newer devices where BAR2 is 64bit */
1395 if (!vdev
->has_vga
|| nr
!= 2 || !vdev
->bars
[2].mem64
||
1396 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1400 quirk
= g_malloc0(sizeof(*quirk
));
1402 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1403 quirk
->data
.address_match
= 0x4000;
1404 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1405 quirk
->data
.bar
= nr
;
1407 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
1408 "vfio-ati-bar2-4000-quirk",
1409 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1410 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1411 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1414 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1416 DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n",
1417 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1418 vdev
->host
.function
);
1422 * Older ATI/AMD cards like the X550 have a similar window to that above.
1423 * I/O port BAR1 provides a window to a mirror of PCI config space located
1424 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1425 * note it for future reference.
1428 #define PCI_VENDOR_ID_NVIDIA 0x10de
1431 * Nvidia has several different methods to get to config space, the
1432 * nouveu project has several of these documented here:
1433 * https://github.com/pathscale/envytools/tree/master/hwdocs
1435 * The first quirk is actually not documented in envytools and is found
1436 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1437 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1438 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1439 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1440 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1441 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1442 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1443 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1453 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque
,
1454 hwaddr addr
, unsigned size
)
1456 VFIOQuirk
*quirk
= opaque
;
1457 VFIODevice
*vdev
= quirk
->vdev
;
1458 PCIDevice
*pdev
= &vdev
->pdev
;
1459 uint64_t data
= vfio_vga_read(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1460 addr
+ quirk
->data
.base_offset
, size
);
1462 if (quirk
->data
.flags
== NV_3D0_READ
&& addr
== quirk
->data
.data_offset
) {
1463 data
= vfio_pci_read_config(pdev
, quirk
->data
.address_val
, size
);
1464 DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64
"\n", __func__
, size
, data
);
1467 quirk
->data
.flags
= NV_3D0_NONE
;
1472 static void vfio_nvidia_3d0_quirk_write(void *opaque
, hwaddr addr
,
1473 uint64_t data
, unsigned size
)
1475 VFIOQuirk
*quirk
= opaque
;
1476 VFIODevice
*vdev
= quirk
->vdev
;
1477 PCIDevice
*pdev
= &vdev
->pdev
;
1479 switch (quirk
->data
.flags
) {
1481 if (addr
== quirk
->data
.address_offset
&& data
== 0x338) {
1482 quirk
->data
.flags
= NV_3D0_SELECT
;
1486 quirk
->data
.flags
= NV_3D0_NONE
;
1487 if (addr
== quirk
->data
.data_offset
&&
1488 (data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1489 quirk
->data
.flags
= NV_3D0_WINDOW
;
1490 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1494 quirk
->data
.flags
= NV_3D0_NONE
;
1495 if (addr
== quirk
->data
.address_offset
) {
1496 if (data
== 0x538) {
1497 quirk
->data
.flags
= NV_3D0_READ
;
1498 } else if (data
== 0x738) {
1499 quirk
->data
.flags
= NV_3D0_WRITE
;
1504 quirk
->data
.flags
= NV_3D0_NONE
;
1505 if (addr
== quirk
->data
.data_offset
) {
1506 vfio_pci_write_config(pdev
, quirk
->data
.address_val
, data
, size
);
1507 DPRINTF("%s(0x3d0, 0x%"PRIx64
", %d)\n", __func__
, data
, size
);
1513 vfio_vga_write(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1514 addr
+ quirk
->data
.base_offset
, data
, size
);
1517 static const MemoryRegionOps vfio_nvidia_3d0_quirk
= {
1518 .read
= vfio_nvidia_3d0_quirk_read
,
1519 .write
= vfio_nvidia_3d0_quirk_write
,
1520 .endianness
= DEVICE_LITTLE_ENDIAN
,
1523 static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice
*vdev
)
1525 PCIDevice
*pdev
= &vdev
->pdev
;
1528 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
||
1529 !vdev
->bars
[1].size
) {
1533 quirk
= g_malloc0(sizeof(*quirk
));
1535 quirk
->data
.base_offset
= 0x10;
1536 quirk
->data
.address_offset
= 4;
1537 quirk
->data
.address_size
= 2;
1538 quirk
->data
.address_match
= 0x1800;
1539 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
1540 quirk
->data
.data_offset
= 0;
1541 quirk
->data
.data_size
= 4;
1543 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_3d0_quirk
,
1544 quirk
, "vfio-nvidia-3d0-quirk", 6);
1545 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1546 quirk
->data
.base_offset
, &quirk
->mem
);
1548 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1551 DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
1552 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1553 vdev
->host
.function
);
1557 * The second quirk is documented in envytools. The I/O port BAR5 is just
1558 * a set of address/data ports to the MMIO BARs. The BAR we care about is
1559 * again BAR0. This backdoor is apparently a bit newer than the one above
1560 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
1561 * space, including extended space is available at the 4k @0x88000.
1564 NV_BAR5_ADDRESS
= 0x1,
1565 NV_BAR5_ENABLE
= 0x2,
1566 NV_BAR5_MASTER
= 0x4,
1567 NV_BAR5_VALID
= 0x7,
1570 static void vfio_nvidia_bar5_window_quirk_write(void *opaque
, hwaddr addr
,
1571 uint64_t data
, unsigned size
)
1573 VFIOQuirk
*quirk
= opaque
;
1578 quirk
->data
.flags
|= NV_BAR5_MASTER
;
1580 quirk
->data
.flags
&= ~NV_BAR5_MASTER
;
1585 quirk
->data
.flags
|= NV_BAR5_ENABLE
;
1587 quirk
->data
.flags
&= ~NV_BAR5_ENABLE
;
1591 if (quirk
->data
.flags
& NV_BAR5_MASTER
) {
1592 if ((data
& ~0xfff) == 0x88000) {
1593 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
1594 quirk
->data
.address_val
= data
& 0xfff;
1595 } else if ((data
& ~0xff) == 0x1800) {
1596 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
1597 quirk
->data
.address_val
= data
& 0xff;
1599 quirk
->data
.flags
&= ~NV_BAR5_ADDRESS
;
1605 vfio_generic_window_quirk_write(opaque
, addr
, data
, size
);
1608 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk
= {
1609 .read
= vfio_generic_window_quirk_read
,
1610 .write
= vfio_nvidia_bar5_window_quirk_write
,
1611 .valid
.min_access_size
= 4,
1612 .endianness
= DEVICE_LITTLE_ENDIAN
,
1615 static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice
*vdev
, int nr
)
1617 PCIDevice
*pdev
= &vdev
->pdev
;
1620 if (!vdev
->has_vga
|| nr
!= 5 ||
1621 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1625 quirk
= g_malloc0(sizeof(*quirk
));
1627 quirk
->data
.read_flags
= quirk
->data
.write_flags
= NV_BAR5_VALID
;
1628 quirk
->data
.address_offset
= 0x8;
1629 quirk
->data
.address_size
= 0; /* actually 4, but avoids generic code */
1630 quirk
->data
.data_offset
= 0xc;
1631 quirk
->data
.data_size
= 4;
1632 quirk
->data
.bar
= nr
;
1634 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1635 &vfio_nvidia_bar5_window_quirk
, quirk
,
1636 "vfio-nvidia-bar5-window-quirk", 16);
1637 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
, 0, &quirk
->mem
, 1);
1639 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1641 DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
1642 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1643 vdev
->host
.function
);
1647 * Finally, BAR0 itself. We want to redirect any accesses to either
1648 * 0x1800 or 0x88000 through the PCI config space access functions.
1650 * NB - quirk at a page granularity or else they don't seem to work when
1653 * Here's offset 0x88000...
1655 static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice
*vdev
, int nr
)
1657 PCIDevice
*pdev
= &vdev
->pdev
;
1660 if (!vdev
->has_vga
|| nr
!= 0 ||
1661 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1665 quirk
= g_malloc0(sizeof(*quirk
));
1667 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1668 quirk
->data
.address_match
= 0x88000;
1669 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1670 quirk
->data
.bar
= nr
;
1672 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
,
1673 quirk
, "vfio-nvidia-bar0-88000-quirk",
1674 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1675 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1676 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1679 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1681 DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
1682 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1683 vdev
->host
.function
);
1687 * And here's the same for BAR0 offset 0x1800...
1689 static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice
*vdev
, int nr
)
1691 PCIDevice
*pdev
= &vdev
->pdev
;
1694 if (!vdev
->has_vga
|| nr
!= 0 ||
1695 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1699 /* Log the chipset ID */
1700 DPRINTF("Nvidia NV%02x\n",
1701 (unsigned int)(vfio_bar_read(&vdev
->bars
[0], 0, 4) >> 20) & 0xff);
1703 quirk
= g_malloc0(sizeof(*quirk
));
1705 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1706 quirk
->data
.address_match
= 0x1800;
1707 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
1708 quirk
->data
.bar
= nr
;
1710 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
1711 "vfio-nvidia-bar0-1800-quirk",
1712 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1713 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1714 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1717 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1719 DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
1720 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1721 vdev
->host
.function
);
1725 * TODO - Some Nvidia devices provide config access to their companion HDA
1726 * device and even to their parent bridge via these config space mirrors.
1727 * Add quirks for those regions.
1731 * Common quirk probe entry points.
1733 static void vfio_vga_quirk_setup(VFIODevice
*vdev
)
1735 vfio_vga_probe_ati_3c3_quirk(vdev
);
1736 vfio_vga_probe_nvidia_3d0_quirk(vdev
);
1739 static void vfio_vga_quirk_teardown(VFIODevice
*vdev
)
1743 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
.region
); i
++) {
1744 while (!QLIST_EMPTY(&vdev
->vga
.region
[i
].quirks
)) {
1745 VFIOQuirk
*quirk
= QLIST_FIRST(&vdev
->vga
.region
[i
].quirks
);
1746 memory_region_del_subregion(&vdev
->vga
.region
[i
].mem
, &quirk
->mem
);
1747 QLIST_REMOVE(quirk
, next
);
1753 static void vfio_bar_quirk_setup(VFIODevice
*vdev
, int nr
)
1755 vfio_probe_ati_bar4_window_quirk(vdev
, nr
);
1756 vfio_probe_ati_bar2_4000_quirk(vdev
, nr
);
1757 vfio_probe_nvidia_bar5_window_quirk(vdev
, nr
);
1758 vfio_probe_nvidia_bar0_88000_quirk(vdev
, nr
);
1759 vfio_probe_nvidia_bar0_1800_quirk(vdev
, nr
);
1762 static void vfio_bar_quirk_teardown(VFIODevice
*vdev
, int nr
)
1764 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1766 while (!QLIST_EMPTY(&bar
->quirks
)) {
1767 VFIOQuirk
*quirk
= QLIST_FIRST(&bar
->quirks
);
1768 memory_region_del_subregion(&bar
->mem
, &quirk
->mem
);
1769 QLIST_REMOVE(quirk
, next
);
1777 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1779 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
1780 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1782 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1783 emu_bits
= le32_to_cpu(emu_bits
);
1786 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1789 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1792 ret
= pread(vdev
->fd
, &phys_val
, len
, vdev
->config_offset
+ addr
);
1794 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
1795 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1796 vdev
->host
.slot
, vdev
->host
.function
, addr
, len
);
1799 phys_val
= le32_to_cpu(phys_val
);
1802 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1804 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__
,
1805 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1806 vdev
->host
.function
, addr
, len
, val
);
1811 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
1812 uint32_t val
, int len
)
1814 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
1815 uint32_t val_le
= cpu_to_le32(val
);
1817 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__
,
1818 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1819 vdev
->host
.function
, addr
, val
, len
);
1821 /* Write everything to VFIO, let it filter out what we can't write */
1822 if (pwrite(vdev
->fd
, &val_le
, len
, vdev
->config_offset
+ addr
) != len
) {
1823 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
1824 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1825 vdev
->host
.slot
, vdev
->host
.function
, addr
, val
, len
);
1828 /* MSI/MSI-X Enabling/Disabling */
1829 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1830 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1831 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1833 pci_default_write_config(pdev
, addr
, val
, len
);
1835 is_enabled
= msi_enabled(pdev
);
1837 if (!was_enabled
&& is_enabled
) {
1838 vfio_enable_msi(vdev
);
1839 } else if (was_enabled
&& !is_enabled
) {
1840 vfio_disable_msi(vdev
);
1842 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1843 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1844 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1846 pci_default_write_config(pdev
, addr
, val
, len
);
1848 is_enabled
= msix_enabled(pdev
);
1850 if (!was_enabled
&& is_enabled
) {
1851 vfio_enable_msix(vdev
);
1852 } else if (was_enabled
&& !is_enabled
) {
1853 vfio_disable_msix(vdev
);
1856 /* Write everything to QEMU to keep emulated bits correct */
1857 pci_default_write_config(pdev
, addr
, val
, len
);
1862 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
1864 static int vfio_dma_unmap(VFIOContainer
*container
,
1865 hwaddr iova
, ram_addr_t size
)
1867 struct vfio_iommu_type1_dma_unmap unmap
= {
1868 .argsz
= sizeof(unmap
),
1874 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
1875 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno
);
1882 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
1883 ram_addr_t size
, void *vaddr
, bool readonly
)
1885 struct vfio_iommu_type1_dma_map map
= {
1886 .argsz
= sizeof(map
),
1887 .flags
= VFIO_DMA_MAP_FLAG_READ
,
1888 .vaddr
= (__u64
)(uintptr_t)vaddr
,
1894 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
1898 * Try the mapping, if it fails with EBUSY, unmap the region and try
1899 * again. This shouldn't be necessary, but we sometimes see it in
1900 * the the VGA ROM space.
1902 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
1903 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
1904 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
1908 DPRINTF("VFIO_MAP_DMA: %d\n", -errno
);
1912 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
1914 return !memory_region_is_ram(section
->mr
);
1917 static void vfio_listener_region_add(MemoryListener
*listener
,
1918 MemoryRegionSection
*section
)
1920 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
1921 iommu_data
.listener
);
1926 assert(!memory_region_is_iommu(section
->mr
));
1928 if (vfio_listener_skipped_section(section
)) {
1929 DPRINTF("SKIPPING region_add %"HWADDR_PRIx
" - %"PRIx64
"\n",
1930 section
->offset_within_address_space
,
1931 section
->offset_within_address_space
+ section
->size
- 1);
1935 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
1936 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
1937 error_report("%s received unaligned region", __func__
);
1941 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
1942 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
1949 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
1950 section
->offset_within_region
+
1951 (iova
- section
->offset_within_address_space
);
1953 DPRINTF("region_add %"HWADDR_PRIx
" - %"HWADDR_PRIx
" [%p]\n",
1954 iova
, end
- 1, vaddr
);
1956 memory_region_ref(section
->mr
);
1957 ret
= vfio_dma_map(container
, iova
, end
- iova
, vaddr
, section
->readonly
);
1959 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
1960 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
1961 container
, iova
, end
- iova
, vaddr
, ret
);
1965 static void vfio_listener_region_del(MemoryListener
*listener
,
1966 MemoryRegionSection
*section
)
1968 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
1969 iommu_data
.listener
);
1973 if (vfio_listener_skipped_section(section
)) {
1974 DPRINTF("SKIPPING region_del %"HWADDR_PRIx
" - %"PRIx64
"\n",
1975 section
->offset_within_address_space
,
1976 section
->offset_within_address_space
+ section
->size
- 1);
1980 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
1981 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
1982 error_report("%s received unaligned region", __func__
);
1986 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
1987 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
1994 DPRINTF("region_del %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
1997 ret
= vfio_dma_unmap(container
, iova
, end
- iova
);
1998 memory_region_unref(section
->mr
);
2000 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
2001 "0x%"HWADDR_PRIx
") = %d (%m)",
2002 container
, iova
, end
- iova
, ret
);
2006 static MemoryListener vfio_memory_listener
= {
2007 .region_add
= vfio_listener_region_add
,
2008 .region_del
= vfio_listener_region_del
,
2011 static void vfio_listener_release(VFIOContainer
*container
)
2013 memory_listener_unregister(&container
->iommu_data
.listener
);
2019 static void vfio_disable_interrupts(VFIODevice
*vdev
)
2021 switch (vdev
->interrupt
) {
2023 vfio_disable_intx(vdev
);
2026 vfio_disable_msi(vdev
);
2029 vfio_disable_msix(vdev
);
2034 static int vfio_setup_msi(VFIODevice
*vdev
, int pos
)
2037 bool msi_64bit
, msi_maskbit
;
2040 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2041 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2044 ctrl
= le16_to_cpu(ctrl
);
2046 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
2047 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
2048 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
2050 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev
->host
.domain
,
2051 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, pos
);
2053 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
);
2055 if (ret
== -ENOTSUP
) {
2058 error_report("vfio: msi_init failed");
2061 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
2067 * We don't have any control over how pci_add_capability() inserts
2068 * capabilities into the chain. In order to setup MSI-X we need a
2069 * MemoryRegion for the BAR. In order to setup the BAR and not
2070 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2071 * need to first look for where the MSI-X table lives. So we
2072 * unfortunately split MSI-X setup across two functions.
2074 static int vfio_early_setup_msix(VFIODevice
*vdev
)
2078 uint32_t table
, pba
;
2080 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
2085 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2086 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2090 if (pread(vdev
->fd
, &table
, sizeof(table
),
2091 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
2095 if (pread(vdev
->fd
, &pba
, sizeof(pba
),
2096 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
2100 ctrl
= le16_to_cpu(ctrl
);
2101 table
= le32_to_cpu(table
);
2102 pba
= le32_to_cpu(pba
);
2104 vdev
->msix
= g_malloc0(sizeof(*(vdev
->msix
)));
2105 vdev
->msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
2106 vdev
->msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
2107 vdev
->msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
2108 vdev
->msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
2109 vdev
->msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
2111 DPRINTF("%04x:%02x:%02x.%x "
2112 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
2113 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2114 vdev
->host
.function
, pos
, vdev
->msix
->table_bar
,
2115 vdev
->msix
->table_offset
, vdev
->msix
->entries
);
2120 static int vfio_setup_msix(VFIODevice
*vdev
, int pos
)
2124 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
2125 &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2126 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
2127 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
,
2128 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
);
2130 if (ret
== -ENOTSUP
) {
2133 error_report("vfio: msix_init failed");
2140 static void vfio_teardown_msi(VFIODevice
*vdev
)
2142 msi_uninit(&vdev
->pdev
);
2145 msix_uninit(&vdev
->pdev
, &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2146 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
);
2153 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
)
2157 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2158 VFIOBAR
*bar
= &vdev
->bars
[i
];
2164 memory_region_set_enabled(&bar
->mmap_mem
, enabled
);
2165 if (vdev
->msix
&& vdev
->msix
->table_bar
== i
) {
2166 memory_region_set_enabled(&vdev
->msix
->mmap_mem
, enabled
);
2171 static void vfio_unmap_bar(VFIODevice
*vdev
, int nr
)
2173 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2179 vfio_bar_quirk_teardown(vdev
, nr
);
2181 memory_region_del_subregion(&bar
->mem
, &bar
->mmap_mem
);
2182 munmap(bar
->mmap
, memory_region_size(&bar
->mmap_mem
));
2184 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2185 memory_region_del_subregion(&bar
->mem
, &vdev
->msix
->mmap_mem
);
2186 munmap(vdev
->msix
->mmap
, memory_region_size(&vdev
->msix
->mmap_mem
));
2189 memory_region_destroy(&bar
->mem
);
2192 static int vfio_mmap_bar(VFIODevice
*vdev
, VFIOBAR
*bar
,
2193 MemoryRegion
*mem
, MemoryRegion
*submem
,
2194 void **map
, size_t size
, off_t offset
,
2199 if (VFIO_ALLOW_MMAP
&& size
&& bar
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
2202 if (bar
->flags
& VFIO_REGION_INFO_FLAG_READ
) {
2206 if (bar
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) {
2210 *map
= mmap(NULL
, size
, prot
, MAP_SHARED
,
2211 bar
->fd
, bar
->fd_offset
+ offset
);
2212 if (*map
== MAP_FAILED
) {
2218 memory_region_init_ram_ptr(submem
, OBJECT(vdev
), name
, size
, *map
);
2221 /* Create a zero sized sub-region to make cleanup easy. */
2222 memory_region_init(submem
, OBJECT(vdev
), name
, 0);
2225 memory_region_add_subregion(mem
, offset
, submem
);
2230 static void vfio_map_bar(VFIODevice
*vdev
, int nr
)
2232 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2233 unsigned size
= bar
->size
;
2239 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2244 snprintf(name
, sizeof(name
), "VFIO %04x:%02x:%02x.%x BAR %d",
2245 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2246 vdev
->host
.function
, nr
);
2248 /* Determine what type of BAR this is for registration */
2249 ret
= pread(vdev
->fd
, &pci_bar
, sizeof(pci_bar
),
2250 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
2251 if (ret
!= sizeof(pci_bar
)) {
2252 error_report("vfio: Failed to read BAR %d (%m)", nr
);
2256 pci_bar
= le32_to_cpu(pci_bar
);
2257 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
2258 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
2259 type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
2260 ~PCI_BASE_ADDRESS_MEM_MASK
);
2262 /* A "slow" read/write mapping underlies all BARs */
2263 memory_region_init_io(&bar
->mem
, OBJECT(vdev
), &vfio_bar_ops
,
2265 pci_register_bar(&vdev
->pdev
, nr
, type
, &bar
->mem
);
2268 * We can't mmap areas overlapping the MSIX vector table, so we
2269 * potentially insert a direct-mapped subregion before and after it.
2271 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2272 size
= vdev
->msix
->table_offset
& TARGET_PAGE_MASK
;
2275 strncat(name
, " mmap", sizeof(name
) - strlen(name
) - 1);
2276 if (vfio_mmap_bar(vdev
, bar
, &bar
->mem
,
2277 &bar
->mmap_mem
, &bar
->mmap
, size
, 0, name
)) {
2278 error_report("%s unsupported. Performance may be slow", name
);
2281 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2284 start
= TARGET_PAGE_ALIGN(vdev
->msix
->table_offset
+
2285 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
2287 size
= start
< bar
->size
? bar
->size
- start
: 0;
2288 strncat(name
, " msix-hi", sizeof(name
) - strlen(name
) - 1);
2289 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2290 if (vfio_mmap_bar(vdev
, bar
, &bar
->mem
, &vdev
->msix
->mmap_mem
,
2291 &vdev
->msix
->mmap
, size
, start
, name
)) {
2292 error_report("%s unsupported. Performance may be slow", name
);
2296 vfio_bar_quirk_setup(vdev
, nr
);
2299 static void vfio_map_bars(VFIODevice
*vdev
)
2303 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2304 vfio_map_bar(vdev
, i
);
2307 if (vdev
->has_vga
) {
2308 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2309 OBJECT(vdev
), &vfio_vga_ops
,
2310 &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
],
2311 "vfio-vga-mmio@0xa0000",
2312 QEMU_PCI_VGA_MEM_SIZE
);
2313 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2314 OBJECT(vdev
), &vfio_vga_ops
,
2315 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
],
2316 "vfio-vga-io@0x3b0",
2317 QEMU_PCI_VGA_IO_LO_SIZE
);
2318 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
2319 OBJECT(vdev
), &vfio_vga_ops
,
2320 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
2321 "vfio-vga-io@0x3c0",
2322 QEMU_PCI_VGA_IO_HI_SIZE
);
2324 pci_register_vga(&vdev
->pdev
, &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2325 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2326 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2327 vfio_vga_quirk_setup(vdev
);
2331 static void vfio_unmap_bars(VFIODevice
*vdev
)
2335 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2336 vfio_unmap_bar(vdev
, i
);
2339 if (vdev
->has_vga
) {
2340 vfio_vga_quirk_teardown(vdev
);
2341 pci_unregister_vga(&vdev
->pdev
);
2342 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
);
2343 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
);
2344 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2351 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
2353 uint8_t tmp
, next
= 0xff;
2355 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
2356 tmp
= pdev
->config
[tmp
+ 1]) {
2357 if (tmp
> pos
&& tmp
< next
) {
2365 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
2367 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
2370 static void vfio_add_emulated_word(VFIODevice
*vdev
, int pos
,
2371 uint16_t val
, uint16_t mask
)
2373 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2374 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2375 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2378 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
2380 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
2383 static void vfio_add_emulated_long(VFIODevice
*vdev
, int pos
,
2384 uint32_t val
, uint32_t mask
)
2386 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2387 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2388 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2391 static int vfio_setup_pcie_cap(VFIODevice
*vdev
, int pos
, uint8_t size
)
2396 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
2397 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
2399 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
2400 type
!= PCI_EXP_TYPE_LEG_END
&&
2401 type
!= PCI_EXP_TYPE_RC_END
) {
2403 error_report("vfio: Assignment of PCIe type 0x%x "
2404 "devices is not currently supported", type
);
2408 if (!pci_bus_is_express(vdev
->pdev
.bus
)) {
2410 * Use express capability as-is on PCI bus. It doesn't make much
2411 * sense to even expose, but some drivers (ex. tg3) depend on it
2412 * and guests don't seem to be particular about it. We'll need
2413 * to revist this or force express devices to express buses if we
2414 * ever expose an IOMMU to the guest.
2416 } else if (pci_bus_is_root(vdev
->pdev
.bus
)) {
2418 * On a Root Complex bus Endpoints become Root Complex Integrated
2419 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2421 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
2422 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2423 PCI_EXP_TYPE_RC_END
<< 4,
2424 PCI_EXP_FLAGS_TYPE
);
2426 /* Link Capabilities, Status, and Control goes away */
2427 if (size
> PCI_EXP_LNKCTL
) {
2428 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
2429 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2430 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
2432 #ifndef PCI_EXP_LNKCAP2
2433 #define PCI_EXP_LNKCAP2 44
2435 #ifndef PCI_EXP_LNKSTA2
2436 #define PCI_EXP_LNKSTA2 50
2438 /* Link 2 Capabilities, Status, and Control goes away */
2439 if (size
> PCI_EXP_LNKCAP2
) {
2440 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
2441 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
2442 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
2446 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
2448 * Legacy endpoints don't belong on the root complex. Windows
2449 * seems to be happier with devices if we skip the capability.
2456 * Convert Root Complex Integrated Endpoints to regular endpoints.
2457 * These devices don't support LNK/LNK2 capabilities, so make them up.
2459 if (type
== PCI_EXP_TYPE_RC_END
) {
2460 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2461 PCI_EXP_TYPE_ENDPOINT
<< 4,
2462 PCI_EXP_FLAGS_TYPE
);
2463 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
2464 PCI_EXP_LNK_MLW_1
| PCI_EXP_LNK_LS_25
, ~0);
2465 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2468 /* Mark the Link Status bits as emulated to allow virtual negotiation */
2469 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
,
2470 pci_get_word(vdev
->pdev
.config
+ pos
+
2472 PCI_EXP_LNKCAP_MLW
| PCI_EXP_LNKCAP_SLS
);
2475 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
);
2477 vdev
->pdev
.exp
.exp_cap
= pos
;
2483 static int vfio_add_std_cap(VFIODevice
*vdev
, uint8_t pos
)
2485 PCIDevice
*pdev
= &vdev
->pdev
;
2486 uint8_t cap_id
, next
, size
;
2489 cap_id
= pdev
->config
[pos
];
2490 next
= pdev
->config
[pos
+ 1];
2493 * If it becomes important to configure capabilities to their actual
2494 * size, use this as the default when it's something we don't recognize.
2495 * Since QEMU doesn't actually handle many of the config accesses,
2496 * exact size doesn't seem worthwhile.
2498 size
= vfio_std_cap_max_size(pdev
, pos
);
2501 * pci_add_capability always inserts the new capability at the head
2502 * of the chain. Therefore to end up with a chain that matches the
2503 * physical device, we insert from the end by making this recursive.
2504 * This is also why we pre-caclulate size above as cached config space
2505 * will be changed as we unwind the stack.
2508 ret
= vfio_add_std_cap(vdev
, next
);
2513 /* Begin the rebuild, use QEMU emulated list bits */
2514 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
2515 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
2516 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
2519 /* Use emulated next pointer to allow dropping caps */
2520 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ 1, 0xff);
2523 case PCI_CAP_ID_MSI
:
2524 ret
= vfio_setup_msi(vdev
, pos
);
2526 case PCI_CAP_ID_EXP
:
2527 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
);
2529 case PCI_CAP_ID_MSIX
:
2530 ret
= vfio_setup_msix(vdev
, pos
);
2535 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2540 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
2541 "0x%x[0x%x]@0x%x: %d", vdev
->host
.domain
,
2542 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
2543 cap_id
, size
, pos
, ret
);
2550 static int vfio_add_capabilities(VFIODevice
*vdev
)
2552 PCIDevice
*pdev
= &vdev
->pdev
;
2554 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2555 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2556 return 0; /* Nothing to add */
2559 return vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
]);
2562 static int vfio_load_rom(VFIODevice
*vdev
)
2564 uint64_t size
= vdev
->rom_size
;
2566 off_t off
= 0, voff
= vdev
->rom_offset
;
2570 /* If loading ROM from file, pci handles it */
2571 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
|| !size
) {
2575 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
2576 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
2578 snprintf(name
, sizeof(name
), "vfio[%04x:%02x:%02x.%x].rom",
2579 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2580 vdev
->host
.function
);
2581 memory_region_init_ram(&vdev
->pdev
.rom
, OBJECT(vdev
), name
, size
);
2582 ptr
= memory_region_get_ram_ptr(&vdev
->pdev
.rom
);
2583 memset(ptr
, 0xff, size
);
2586 bytes
= pread(vdev
->fd
, ptr
+ off
, size
, voff
+ off
);
2588 break; /* expect that we could get back less than the ROM BAR */
2589 } else if (bytes
> 0) {
2593 if (errno
== EINTR
|| errno
== EAGAIN
) {
2596 error_report("vfio: Error reading device ROM: %m");
2597 memory_region_destroy(&vdev
->pdev
.rom
);
2602 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
, 0, &vdev
->pdev
.rom
);
2603 vdev
->pdev
.has_rom
= true;
2607 static int vfio_connect_container(VFIOGroup
*group
)
2609 VFIOContainer
*container
;
2612 if (group
->container
) {
2616 QLIST_FOREACH(container
, &container_list
, next
) {
2617 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
2618 group
->container
= container
;
2619 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2624 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
2626 error_report("vfio: failed to open /dev/vfio/vfio: %m");
2630 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
2631 if (ret
!= VFIO_API_VERSION
) {
2632 error_report("vfio: supported vfio version: %d, "
2633 "reported version: %d", VFIO_API_VERSION
, ret
);
2638 container
= g_malloc0(sizeof(*container
));
2641 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
)) {
2642 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
2644 error_report("vfio: failed to set group container: %m");
2650 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_TYPE1_IOMMU
);
2652 error_report("vfio: failed to set iommu for container: %m");
2658 container
->iommu_data
.listener
= vfio_memory_listener
;
2659 container
->iommu_data
.release
= vfio_listener_release
;
2661 memory_listener_register(&container
->iommu_data
.listener
, &address_space_memory
);
2663 error_report("vfio: No available IOMMU models");
2669 QLIST_INIT(&container
->group_list
);
2670 QLIST_INSERT_HEAD(&container_list
, container
, next
);
2672 group
->container
= container
;
2673 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2678 static void vfio_disconnect_container(VFIOGroup
*group
)
2680 VFIOContainer
*container
= group
->container
;
2682 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
2683 error_report("vfio: error disconnecting group %d from container",
2687 QLIST_REMOVE(group
, container_next
);
2688 group
->container
= NULL
;
2690 if (QLIST_EMPTY(&container
->group_list
)) {
2691 if (container
->iommu_data
.release
) {
2692 container
->iommu_data
.release(container
);
2694 QLIST_REMOVE(container
, next
);
2695 DPRINTF("vfio_disconnect_container: close container->fd\n");
2696 close(container
->fd
);
2701 static VFIOGroup
*vfio_get_group(int groupid
)
2705 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
2707 QLIST_FOREACH(group
, &group_list
, next
) {
2708 if (group
->groupid
== groupid
) {
2713 group
= g_malloc0(sizeof(*group
));
2715 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
2716 group
->fd
= qemu_open(path
, O_RDWR
);
2717 if (group
->fd
< 0) {
2718 error_report("vfio: error opening %s: %m", path
);
2723 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
2724 error_report("vfio: error getting group status: %m");
2730 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
2731 error_report("vfio: error, group %d is not viable, please ensure "
2732 "all devices within the iommu_group are bound to their "
2733 "vfio bus driver.", groupid
);
2739 group
->groupid
= groupid
;
2740 QLIST_INIT(&group
->device_list
);
2742 if (vfio_connect_container(group
)) {
2743 error_report("vfio: failed to setup container for group %d", groupid
);
2749 QLIST_INSERT_HEAD(&group_list
, group
, next
);
2754 static void vfio_put_group(VFIOGroup
*group
)
2756 if (!QLIST_EMPTY(&group
->device_list
)) {
2760 vfio_disconnect_container(group
);
2761 QLIST_REMOVE(group
, next
);
2762 DPRINTF("vfio_put_group: close group->fd\n");
2767 static int vfio_get_device(VFIOGroup
*group
, const char *name
, VFIODevice
*vdev
)
2769 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
2770 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
) };
2771 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2774 ret
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
2776 error_report("vfio: error getting device %s from group %d: %m",
2777 name
, group
->groupid
);
2778 error_printf("Verify all devices in group %d are bound to vfio-pci "
2779 "or pci-stub and not already in use\n", group
->groupid
);
2784 vdev
->group
= group
;
2785 QLIST_INSERT_HEAD(&group
->device_list
, vdev
, next
);
2787 /* Sanity check device */
2788 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
2790 error_report("vfio: error getting device info: %m");
2794 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name
,
2795 dev_info
.flags
, dev_info
.num_regions
, dev_info
.num_irqs
);
2797 if (!(dev_info
.flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2798 error_report("vfio: Um, this isn't a PCI device");
2802 vdev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
2803 if (!vdev
->reset_works
) {
2804 error_report("Warning, device %s does not support reset", name
);
2807 if (dev_info
.num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2808 error_report("vfio: unexpected number of io regions %u",
2809 dev_info
.num_regions
);
2813 if (dev_info
.num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2814 error_report("vfio: unexpected number of irqs %u", dev_info
.num_irqs
);
2818 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2821 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
2823 error_report("vfio: Error getting region %d info: %m", i
);
2827 DPRINTF("Device %s region %d:\n", name
, i
);
2828 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2829 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
2830 (unsigned long)reg_info
.flags
);
2832 vdev
->bars
[i
].flags
= reg_info
.flags
;
2833 vdev
->bars
[i
].size
= reg_info
.size
;
2834 vdev
->bars
[i
].fd_offset
= reg_info
.offset
;
2835 vdev
->bars
[i
].fd
= vdev
->fd
;
2836 vdev
->bars
[i
].nr
= i
;
2837 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2840 reg_info
.index
= VFIO_PCI_ROM_REGION_INDEX
;
2842 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
2844 error_report("vfio: Error getting ROM info: %m");
2848 DPRINTF("Device %s ROM:\n", name
);
2849 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2850 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
2851 (unsigned long)reg_info
.flags
);
2853 vdev
->rom_size
= reg_info
.size
;
2854 vdev
->rom_offset
= reg_info
.offset
;
2856 reg_info
.index
= VFIO_PCI_CONFIG_REGION_INDEX
;
2858 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
2860 error_report("vfio: Error getting config info: %m");
2864 DPRINTF("Device %s config:\n", name
);
2865 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2866 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
2867 (unsigned long)reg_info
.flags
);
2869 vdev
->config_size
= reg_info
.size
;
2870 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2871 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2873 vdev
->config_offset
= reg_info
.offset
;
2875 if ((vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) &&
2876 dev_info
.num_regions
> VFIO_PCI_VGA_REGION_INDEX
) {
2877 struct vfio_region_info vga_info
= {
2878 .argsz
= sizeof(vga_info
),
2879 .index
= VFIO_PCI_VGA_REGION_INDEX
,
2882 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, &vga_info
);
2885 "vfio: Device does not support requested feature x-vga");
2889 if (!(vga_info
.flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2890 !(vga_info
.flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2891 vga_info
.size
< 0xbffff + 1) {
2892 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2893 (unsigned long)vga_info
.flags
,
2894 (unsigned long)vga_info
.size
);
2898 vdev
->vga
.fd_offset
= vga_info
.offset
;
2899 vdev
->vga
.fd
= vdev
->fd
;
2901 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2902 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2903 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].quirks
);
2905 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2906 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2907 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2909 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2910 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2911 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2913 vdev
->has_vga
= true;
2915 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
2917 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
2919 /* This can fail for an old kernel or legacy PCI dev */
2920 DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure ret=%d\n", ret
);
2922 } else if (irq_info
.count
== 1) {
2923 vdev
->pci_aer
= true;
2925 error_report("vfio: Warning: "
2926 "Could not enable error recovery for the device\n");
2931 QLIST_REMOVE(vdev
, next
);
2938 static void vfio_put_device(VFIODevice
*vdev
)
2940 QLIST_REMOVE(vdev
, next
);
2942 DPRINTF("vfio_put_device: close vdev->fd\n");
2950 static void vfio_err_notifier_handler(void *opaque
)
2952 VFIODevice
*vdev
= opaque
;
2954 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
2959 * TBD. Retrieve the error details and decide what action
2960 * needs to be taken. One of the actions could be to pass
2961 * the error to the guest and have the guest driver recover
2962 * from the error. This requires that PCIe capabilities be
2963 * exposed to the guest. For now, we just terminate the
2964 * guest to contain the error.
2967 error_report("%s (%04x:%02x:%02x.%x)"
2968 "Unrecoverable error detected...\n"
2969 "Please collect any data possible and then kill the guest",
2970 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2971 vdev
->host
.slot
, vdev
->host
.function
);
2973 vm_stop(RUN_STATE_IO_ERROR
);
2977 * Registers error notifier for devices supporting error recovery.
2978 * If we encounter a failure in this function, we report an error
2979 * and continue after disabling error recovery support for the
2982 static void vfio_register_err_notifier(VFIODevice
*vdev
)
2986 struct vfio_irq_set
*irq_set
;
2989 if (!vdev
->pci_aer
) {
2993 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
2994 error_report("vfio: Warning: "
2995 "Unable to init event notifier for error detection\n");
2996 vdev
->pci_aer
= false;
3000 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3002 irq_set
= g_malloc0(argsz
);
3003 irq_set
->argsz
= argsz
;
3004 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3005 VFIO_IRQ_SET_ACTION_TRIGGER
;
3006 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
3009 pfd
= (int32_t *)&irq_set
->data
;
3011 *pfd
= event_notifier_get_fd(&vdev
->err_notifier
);
3012 qemu_set_fd_handler(*pfd
, vfio_err_notifier_handler
, NULL
, vdev
);
3014 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
3016 error_report("vfio: Failed to set up error notification\n");
3017 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
3018 event_notifier_cleanup(&vdev
->err_notifier
);
3019 vdev
->pci_aer
= false;
3024 static void vfio_unregister_err_notifier(VFIODevice
*vdev
)
3027 struct vfio_irq_set
*irq_set
;
3031 if (!vdev
->pci_aer
) {
3035 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3037 irq_set
= g_malloc0(argsz
);
3038 irq_set
->argsz
= argsz
;
3039 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3040 VFIO_IRQ_SET_ACTION_TRIGGER
;
3041 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
3044 pfd
= (int32_t *)&irq_set
->data
;
3047 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
3049 error_report("vfio: Failed to de-assign error fd: %d\n", ret
);
3052 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
3054 event_notifier_cleanup(&vdev
->err_notifier
);
3057 static int vfio_initfn(PCIDevice
*pdev
)
3059 VFIODevice
*pvdev
, *vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3061 char path
[PATH_MAX
], iommu_group_path
[PATH_MAX
], *group_name
;
3067 /* Check that the host device exists */
3068 snprintf(path
, sizeof(path
),
3069 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
3070 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3071 vdev
->host
.function
);
3072 if (stat(path
, &st
) < 0) {
3073 error_report("vfio: error: no such host device: %s", path
);
3077 strncat(path
, "iommu_group", sizeof(path
) - strlen(path
) - 1);
3079 len
= readlink(path
, iommu_group_path
, PATH_MAX
);
3081 error_report("vfio: error no iommu_group for device");
3085 iommu_group_path
[len
] = 0;
3086 group_name
= basename(iommu_group_path
);
3088 if (sscanf(group_name
, "%d", &groupid
) != 1) {
3089 error_report("vfio: error reading %s: %m", path
);
3093 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__
, vdev
->host
.domain
,
3094 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, groupid
);
3096 group
= vfio_get_group(groupid
);
3098 error_report("vfio: failed to get group %d", groupid
);
3102 snprintf(path
, sizeof(path
), "%04x:%02x:%02x.%01x",
3103 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3104 vdev
->host
.function
);
3106 QLIST_FOREACH(pvdev
, &group
->device_list
, next
) {
3107 if (pvdev
->host
.domain
== vdev
->host
.domain
&&
3108 pvdev
->host
.bus
== vdev
->host
.bus
&&
3109 pvdev
->host
.slot
== vdev
->host
.slot
&&
3110 pvdev
->host
.function
== vdev
->host
.function
) {
3112 error_report("vfio: error: device %s is already attached", path
);
3113 vfio_put_group(group
);
3118 ret
= vfio_get_device(group
, path
, vdev
);
3120 error_report("vfio: failed to get device %s", path
);
3121 vfio_put_group(group
);
3125 /* Get a copy of config space */
3126 ret
= pread(vdev
->fd
, vdev
->pdev
.config
,
3127 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
3128 vdev
->config_offset
);
3129 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
3130 ret
= ret
< 0 ? -errno
: -EFAULT
;
3131 error_report("vfio: Failed to read device config space");
3135 /* vfio emulates a lot for us, but some bits need extra love */
3136 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
3138 /* QEMU can choose to expose the ROM or not */
3139 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
3141 /* QEMU can change multi-function devices to single function, or reverse */
3142 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
3143 PCI_HEADER_TYPE_MULTI_FUNCTION
;
3146 * Clear host resource mapping info. If we choose not to register a
3147 * BAR, such as might be the case with the option ROM, we can get
3148 * confusing, unwritable, residual addresses from the host here.
3150 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
3151 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
3153 vfio_load_rom(vdev
);
3155 ret
= vfio_early_setup_msix(vdev
);
3160 vfio_map_bars(vdev
);
3162 ret
= vfio_add_capabilities(vdev
);
3167 /* QEMU emulates all of MSI & MSIX */
3168 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3169 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3173 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3174 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3175 vdev
->msi_cap_size
);
3178 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3179 vdev
->intx
.mmap_timer
= qemu_new_timer_ms(vm_clock
,
3180 vfio_intx_mmap_enable
, vdev
);
3181 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_update_irq
);
3182 ret
= vfio_enable_intx(vdev
);
3188 add_boot_device_path(vdev
->bootindex
, &pdev
->qdev
, NULL
);
3189 vfio_register_err_notifier(vdev
);
3194 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3195 vfio_teardown_msi(vdev
);
3196 vfio_unmap_bars(vdev
);
3198 g_free(vdev
->emulated_config_bits
);
3199 vfio_put_device(vdev
);
3200 vfio_put_group(group
);
3204 static void vfio_exitfn(PCIDevice
*pdev
)
3206 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3207 VFIOGroup
*group
= vdev
->group
;
3209 vfio_unregister_err_notifier(vdev
);
3210 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3211 vfio_disable_interrupts(vdev
);
3212 if (vdev
->intx
.mmap_timer
) {
3213 qemu_free_timer(vdev
->intx
.mmap_timer
);
3215 vfio_teardown_msi(vdev
);
3216 vfio_unmap_bars(vdev
);
3217 g_free(vdev
->emulated_config_bits
);
3218 vfio_put_device(vdev
);
3219 vfio_put_group(group
);
3222 static void vfio_pci_reset(DeviceState
*dev
)
3224 PCIDevice
*pdev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
3225 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3228 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
3229 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3231 vfio_disable_interrupts(vdev
);
3233 /* Make sure the device is in D0 */
3238 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
3239 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
3241 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3242 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
3243 /* vfio handles the necessary delay here */
3244 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
3245 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
3247 error_report("vfio: Unable to power on device, stuck in D%d\n",
3254 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
3255 * Also put INTx Disable in known state.
3257 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
3258 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
3259 PCI_COMMAND_INTX_DISABLE
);
3260 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
3262 if (vdev
->reset_works
) {
3263 if (ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
3264 error_report("vfio: Error unable to reset physical device "
3265 "(%04x:%02x:%02x.%x): %m", vdev
->host
.domain
,
3266 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3270 vfio_enable_intx(vdev
);
3273 static Property vfio_pci_dev_properties
[] = {
3274 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice
, host
),
3275 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice
,
3276 intx
.mmap_timeout
, 1100),
3277 DEFINE_PROP_BIT("x-vga", VFIODevice
, features
,
3278 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3279 DEFINE_PROP_INT32("bootindex", VFIODevice
, bootindex
, -1),
3281 * TODO - support passed fds... is this necessary?
3282 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
3283 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
3285 DEFINE_PROP_END_OF_LIST(),
3288 static const VMStateDescription vfio_pci_vmstate
= {
3293 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3295 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3296 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3298 dc
->reset
= vfio_pci_reset
;
3299 dc
->props
= vfio_pci_dev_properties
;
3300 dc
->vmsd
= &vfio_pci_vmstate
;
3301 dc
->desc
= "VFIO-based PCI device assignment";
3302 pdc
->init
= vfio_initfn
;
3303 pdc
->exit
= vfio_exitfn
;
3304 pdc
->config_read
= vfio_pci_read_config
;
3305 pdc
->config_write
= vfio_pci_write_config
;
3306 pdc
->is_express
= 1; /* We might be */
3309 static const TypeInfo vfio_pci_dev_info
= {
3311 .parent
= TYPE_PCI_DEVICE
,
3312 .instance_size
= sizeof(VFIODevice
),
3313 .class_init
= vfio_pci_dev_class_init
,
3316 static void register_vfio_pci_dev_type(void)
3318 type_register_static(&vfio_pci_dev_info
);
3321 type_init(register_vfio_pci_dev_type
)