2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
43 /* #define DEBUG_VFIO */
45 #define DPRINTF(fmt, ...) \
46 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
48 #define DPRINTF(fmt, ...) \
52 /* Extra debugging, trap acceleration paths for more logging */
53 #define VFIO_ALLOW_MMAP 1
54 #define VFIO_ALLOW_KVM_INTX 1
58 typedef struct VFIOQuirk
{
60 struct VFIODevice
*vdev
;
61 QLIST_ENTRY(VFIOQuirk
) next
;
66 typedef struct VFIOBAR
{
67 off_t fd_offset
; /* offset of BAR within device fd */
68 int fd
; /* device fd, allows us to pass VFIOBAR as opaque data */
69 MemoryRegion mem
; /* slow, read/write access */
70 MemoryRegion mmap_mem
; /* direct mapped access */
73 uint32_t flags
; /* VFIO region flags (rd/wr/mmap) */
74 uint8_t nr
; /* cache the BAR number for debug */
75 QLIST_HEAD(, VFIOQuirk
) quirks
;
78 typedef struct VFIOVGARegion
{
82 QLIST_HEAD(, VFIOQuirk
) quirks
;
85 typedef struct VFIOVGA
{
88 VFIOVGARegion region
[QEMU_PCI_VGA_NUM_REGIONS
];
91 typedef struct VFIOINTx
{
92 bool pending
; /* interrupt pending */
93 bool kvm_accel
; /* set when QEMU bypass through KVM enabled */
94 uint8_t pin
; /* which pin to pull for qemu_set_irq */
95 EventNotifier interrupt
; /* eventfd triggered on interrupt */
96 EventNotifier unmask
; /* eventfd for unmask on QEMU bypass */
97 PCIINTxRoute route
; /* routing info for QEMU bypass */
98 uint32_t mmap_timeout
; /* delay to re-enable mmaps after interrupt */
99 QEMUTimer
*mmap_timer
; /* enable mmaps after periods w/o interrupts */
102 typedef struct VFIOMSIVector
{
103 EventNotifier interrupt
; /* eventfd triggered on interrupt */
104 struct VFIODevice
*vdev
; /* back pointer to device */
105 int virq
; /* KVM irqchip route for QEMU bypass */
118 typedef struct VFIOContainer
{
119 int fd
; /* /dev/vfio/vfio, empowered by the attached groups */
121 /* enable abstraction to support various iommu backends */
123 MemoryListener listener
; /* Used by type1 iommu */
125 void (*release
)(struct VFIOContainer
*);
127 QLIST_HEAD(, VFIOGroup
) group_list
;
128 QLIST_ENTRY(VFIOContainer
) next
;
131 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
132 typedef struct VFIOMSIXInfo
{
136 uint32_t table_offset
;
138 MemoryRegion mmap_mem
;
142 typedef struct VFIODevice
{
146 unsigned int config_size
;
147 uint8_t *emulated_config_bits
; /* QEMU emulated bits, little-endian */
148 off_t config_offset
; /* Offset of config space region within device fd */
149 unsigned int rom_size
;
150 off_t rom_offset
; /* Offset of ROM region within device fd */
152 VFIOMSIVector
*msi_vectors
;
154 int nr_vectors
; /* Number of MSI/MSIX vectors currently in use */
155 int interrupt
; /* Current interrupt type */
156 VFIOBAR bars
[PCI_NUM_REGIONS
- 1]; /* No ROM */
157 VFIOVGA vga
; /* 0xa0000, 0x3b0, 0x3c0 */
158 PCIHostDeviceAddress host
;
159 QLIST_ENTRY(VFIODevice
) next
;
160 struct VFIOGroup
*group
;
162 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
163 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
170 typedef struct VFIOGroup
{
173 VFIOContainer
*container
;
174 QLIST_HEAD(, VFIODevice
) device_list
;
175 QLIST_ENTRY(VFIOGroup
) next
;
176 QLIST_ENTRY(VFIOGroup
) container_next
;
179 #define MSIX_CAP_LENGTH 12
181 static QLIST_HEAD(, VFIOContainer
)
182 container_list
= QLIST_HEAD_INITIALIZER(container_list
);
184 static QLIST_HEAD(, VFIOGroup
)
185 group_list
= QLIST_HEAD_INITIALIZER(group_list
);
187 static void vfio_disable_interrupts(VFIODevice
*vdev
);
188 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
);
189 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
190 uint32_t val
, int len
);
191 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
);
194 * Common VFIO interrupt disable
196 static void vfio_disable_irqindex(VFIODevice
*vdev
, int index
)
198 struct vfio_irq_set irq_set
= {
199 .argsz
= sizeof(irq_set
),
200 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
206 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
212 static void vfio_unmask_intx(VFIODevice
*vdev
)
214 struct vfio_irq_set irq_set
= {
215 .argsz
= sizeof(irq_set
),
216 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
217 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
222 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
225 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
226 static void vfio_mask_intx(VFIODevice
*vdev
)
228 struct vfio_irq_set irq_set
= {
229 .argsz
= sizeof(irq_set
),
230 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
231 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
236 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
241 * Disabling BAR mmaping can be slow, but toggling it around INTx can
242 * also be a huge overhead. We try to get the best of both worlds by
243 * waiting until an interrupt to disable mmaps (subsequent transitions
244 * to the same state are effectively no overhead). If the interrupt has
245 * been serviced and the time gap is long enough, we re-enable mmaps for
246 * performance. This works well for things like graphics cards, which
247 * may not use their interrupt at all and are penalized to an unusable
248 * level by read/write BAR traps. Other devices, like NICs, have more
249 * regular interrupts and see much better latency by staying in non-mmap
250 * mode. We therefore set the default mmap_timeout such that a ping
251 * is just enough to keep the mmap disabled. Users can experiment with
252 * other options with the x-intx-mmap-timeout-ms parameter (a value of
253 * zero disables the timer).
255 static void vfio_intx_mmap_enable(void *opaque
)
257 VFIODevice
*vdev
= opaque
;
259 if (vdev
->intx
.pending
) {
260 qemu_mod_timer(vdev
->intx
.mmap_timer
,
261 qemu_get_clock_ms(vm_clock
) + vdev
->intx
.mmap_timeout
);
265 vfio_mmap_set_enabled(vdev
, true);
268 static void vfio_intx_interrupt(void *opaque
)
270 VFIODevice
*vdev
= opaque
;
272 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
276 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__
, vdev
->host
.domain
,
277 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
278 'A' + vdev
->intx
.pin
);
280 vdev
->intx
.pending
= true;
281 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 1);
282 vfio_mmap_set_enabled(vdev
, false);
283 if (vdev
->intx
.mmap_timeout
) {
284 qemu_mod_timer(vdev
->intx
.mmap_timer
,
285 qemu_get_clock_ms(vm_clock
) + vdev
->intx
.mmap_timeout
);
289 static void vfio_eoi(VFIODevice
*vdev
)
291 if (!vdev
->intx
.pending
) {
295 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__
, vdev
->host
.domain
,
296 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
298 vdev
->intx
.pending
= false;
299 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
300 vfio_unmask_intx(vdev
);
303 static void vfio_enable_intx_kvm(VFIODevice
*vdev
)
306 struct kvm_irqfd irqfd
= {
307 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
308 .gsi
= vdev
->intx
.route
.irq
,
309 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
311 struct vfio_irq_set
*irq_set
;
315 if (!VFIO_ALLOW_KVM_INTX
|| !kvm_irqfds_enabled() ||
316 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
317 !kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
321 /* Get to a known interrupt state */
322 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
323 vfio_mask_intx(vdev
);
324 vdev
->intx
.pending
= false;
325 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
327 /* Get an eventfd for resample/unmask */
328 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
329 error_report("vfio: Error: event_notifier_init failed eoi");
333 /* KVM triggers it, VFIO listens for it */
334 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
336 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
337 error_report("vfio: Error: Failed to setup resample irqfd: %m");
341 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
343 irq_set
= g_malloc0(argsz
);
344 irq_set
->argsz
= argsz
;
345 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_UNMASK
;
346 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
349 pfd
= (int32_t *)&irq_set
->data
;
351 *pfd
= irqfd
.resamplefd
;
353 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
356 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
361 vfio_unmask_intx(vdev
);
363 vdev
->intx
.kvm_accel
= true;
365 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
366 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
367 vdev
->host
.slot
, vdev
->host
.function
);
372 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
373 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
375 event_notifier_cleanup(&vdev
->intx
.unmask
);
377 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
378 vfio_unmask_intx(vdev
);
382 static void vfio_disable_intx_kvm(VFIODevice
*vdev
)
385 struct kvm_irqfd irqfd
= {
386 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
387 .gsi
= vdev
->intx
.route
.irq
,
388 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
391 if (!vdev
->intx
.kvm_accel
) {
396 * Get to a known state, hardware masked, QEMU ready to accept new
397 * interrupts, QEMU IRQ de-asserted.
399 vfio_mask_intx(vdev
);
400 vdev
->intx
.pending
= false;
401 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
403 /* Tell KVM to stop listening for an INTx irqfd */
404 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
405 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
408 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
409 event_notifier_cleanup(&vdev
->intx
.unmask
);
411 /* QEMU starts listening for interrupt events. */
412 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
414 vdev
->intx
.kvm_accel
= false;
416 /* If we've missed an event, let it re-fire through QEMU */
417 vfio_unmask_intx(vdev
);
419 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
420 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
421 vdev
->host
.slot
, vdev
->host
.function
);
425 static void vfio_update_irq(PCIDevice
*pdev
)
427 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
430 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
434 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
436 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
437 return; /* Nothing changed */
440 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__
,
441 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
442 vdev
->host
.function
, vdev
->intx
.route
.irq
, route
.irq
);
444 vfio_disable_intx_kvm(vdev
);
446 vdev
->intx
.route
= route
;
448 if (route
.mode
!= PCI_INTX_ENABLED
) {
452 vfio_enable_intx_kvm(vdev
);
454 /* Re-enable the interrupt in cased we missed an EOI */
458 static int vfio_enable_intx(VFIODevice
*vdev
)
460 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
462 struct vfio_irq_set
*irq_set
;
469 vfio_disable_interrupts(vdev
);
471 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
475 * Only conditional to avoid generating error messages on platforms
476 * where we won't actually use the result anyway.
478 if (kvm_irqfds_enabled() &&
479 kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
480 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
485 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
487 error_report("vfio: Error: event_notifier_init failed");
491 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
493 irq_set
= g_malloc0(argsz
);
494 irq_set
->argsz
= argsz
;
495 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
496 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
499 pfd
= (int32_t *)&irq_set
->data
;
501 *pfd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
502 qemu_set_fd_handler(*pfd
, vfio_intx_interrupt
, NULL
, vdev
);
504 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
507 error_report("vfio: Error: Failed to setup INTx fd: %m");
508 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
509 event_notifier_cleanup(&vdev
->intx
.interrupt
);
513 vfio_enable_intx_kvm(vdev
);
515 vdev
->interrupt
= VFIO_INT_INTx
;
517 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
518 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
523 static void vfio_disable_intx(VFIODevice
*vdev
)
527 qemu_del_timer(vdev
->intx
.mmap_timer
);
528 vfio_disable_intx_kvm(vdev
);
529 vfio_disable_irqindex(vdev
, VFIO_PCI_INTX_IRQ_INDEX
);
530 vdev
->intx
.pending
= false;
531 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
532 vfio_mmap_set_enabled(vdev
, true);
534 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
535 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
536 event_notifier_cleanup(&vdev
->intx
.interrupt
);
538 vdev
->interrupt
= VFIO_INT_NONE
;
540 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
541 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
547 static void vfio_msi_interrupt(void *opaque
)
549 VFIOMSIVector
*vector
= opaque
;
550 VFIODevice
*vdev
= vector
->vdev
;
551 int nr
= vector
- vdev
->msi_vectors
;
553 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
557 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__
,
558 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
559 vdev
->host
.function
, nr
);
561 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
562 msix_notify(&vdev
->pdev
, nr
);
563 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
564 msi_notify(&vdev
->pdev
, nr
);
566 error_report("vfio: MSI interrupt receieved, but not enabled?");
570 static int vfio_enable_vectors(VFIODevice
*vdev
, bool msix
)
572 struct vfio_irq_set
*irq_set
;
573 int ret
= 0, i
, argsz
;
576 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
578 irq_set
= g_malloc0(argsz
);
579 irq_set
->argsz
= argsz
;
580 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
581 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
583 irq_set
->count
= vdev
->nr_vectors
;
584 fds
= (int32_t *)&irq_set
->data
;
586 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
587 if (!vdev
->msi_vectors
[i
].use
) {
592 fds
[i
] = event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
595 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
602 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
603 MSIMessage
*msg
, IOHandler
*handler
)
605 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
606 VFIOMSIVector
*vector
;
609 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__
,
610 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
611 vdev
->host
.function
, nr
);
613 vector
= &vdev
->msi_vectors
[nr
];
617 msix_vector_use(pdev
, nr
);
619 if (event_notifier_init(&vector
->interrupt
, 0)) {
620 error_report("vfio: Error: event_notifier_init failed");
624 * Attempt to enable route through KVM irqchip,
625 * default to userspace handling if unavailable.
627 vector
->virq
= msg
? kvm_irqchip_add_msi_route(kvm_state
, *msg
) : -1;
628 if (vector
->virq
< 0 ||
629 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
631 if (vector
->virq
>= 0) {
632 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
635 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
636 handler
, NULL
, vector
);
640 * We don't want to have the host allocate all possible MSI vectors
641 * for a device if they're not in use, so we shutdown and incrementally
642 * increase them as needed.
644 if (vdev
->nr_vectors
< nr
+ 1) {
645 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
646 vdev
->nr_vectors
= nr
+ 1;
647 ret
= vfio_enable_vectors(vdev
, true);
649 error_report("vfio: failed to enable vectors, %d", ret
);
653 struct vfio_irq_set
*irq_set
;
656 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
658 irq_set
= g_malloc0(argsz
);
659 irq_set
->argsz
= argsz
;
660 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
661 VFIO_IRQ_SET_ACTION_TRIGGER
;
662 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
665 pfd
= (int32_t *)&irq_set
->data
;
667 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
669 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
672 error_report("vfio: failed to modify vector, %d", ret
);
679 static int vfio_msix_vector_use(PCIDevice
*pdev
,
680 unsigned int nr
, MSIMessage msg
)
682 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
685 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
687 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
688 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
690 struct vfio_irq_set
*irq_set
;
693 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__
,
694 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
695 vdev
->host
.function
, nr
);
698 * XXX What's the right thing to do here? This turns off the interrupt
699 * completely, but do we really just want to switch the interrupt to
700 * bouncing through userspace and let msix.c drop it? Not sure.
702 msix_vector_unuse(pdev
, nr
);
704 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
706 irq_set
= g_malloc0(argsz
);
707 irq_set
->argsz
= argsz
;
708 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
709 VFIO_IRQ_SET_ACTION_TRIGGER
;
710 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
713 pfd
= (int32_t *)&irq_set
->data
;
717 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
721 if (vector
->virq
< 0) {
722 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
725 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
727 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
731 event_notifier_cleanup(&vector
->interrupt
);
735 static void vfio_enable_msix(VFIODevice
*vdev
)
737 vfio_disable_interrupts(vdev
);
739 vdev
->msi_vectors
= g_malloc0(vdev
->msix
->entries
* sizeof(VFIOMSIVector
));
741 vdev
->interrupt
= VFIO_INT_MSIX
;
744 * Some communication channels between VF & PF or PF & fw rely on the
745 * physical state of the device and expect that enabling MSI-X from the
746 * guest enables the same on the host. When our guest is Linux, the
747 * guest driver call to pci_enable_msix() sets the enabling bit in the
748 * MSI-X capability, but leaves the vector table masked. We therefore
749 * can't rely on a vector_use callback (from request_irq() in the guest)
750 * to switch the physical device into MSI-X mode because that may come a
751 * long time after pci_enable_msix(). This code enables vector 0 with
752 * triggering to userspace, then immediately release the vector, leaving
753 * the physical device with no vectors enabled, but MSI-X enabled, just
754 * like the guest view.
756 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
757 vfio_msix_vector_release(&vdev
->pdev
, 0);
759 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
760 vfio_msix_vector_release
, NULL
)) {
761 error_report("vfio: msix_set_vector_notifiers failed");
764 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
765 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
768 static void vfio_enable_msi(VFIODevice
*vdev
)
772 vfio_disable_interrupts(vdev
);
774 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
776 vdev
->msi_vectors
= g_malloc0(vdev
->nr_vectors
* sizeof(VFIOMSIVector
));
778 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
780 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
785 if (event_notifier_init(&vector
->interrupt
, 0)) {
786 error_report("vfio: Error: event_notifier_init failed");
789 msg
= msi_get_message(&vdev
->pdev
, i
);
792 * Attempt to enable route through KVM irqchip,
793 * default to userspace handling if unavailable.
795 vector
->virq
= kvm_irqchip_add_msi_route(kvm_state
, msg
);
796 if (vector
->virq
< 0 ||
797 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
799 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
800 vfio_msi_interrupt
, NULL
, vector
);
804 ret
= vfio_enable_vectors(vdev
, false);
807 error_report("vfio: Error: Failed to setup MSI fds: %m");
808 } else if (ret
!= vdev
->nr_vectors
) {
809 error_report("vfio: Error: Failed to enable %d "
810 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
813 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
814 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
815 if (vector
->virq
>= 0) {
816 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
818 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
821 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
824 event_notifier_cleanup(&vector
->interrupt
);
827 g_free(vdev
->msi_vectors
);
829 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
830 vdev
->nr_vectors
= ret
;
833 vdev
->nr_vectors
= 0;
838 vdev
->interrupt
= VFIO_INT_MSI
;
840 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__
,
841 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
842 vdev
->host
.function
, vdev
->nr_vectors
);
845 static void vfio_disable_msi_common(VFIODevice
*vdev
)
847 g_free(vdev
->msi_vectors
);
848 vdev
->msi_vectors
= NULL
;
849 vdev
->nr_vectors
= 0;
850 vdev
->interrupt
= VFIO_INT_NONE
;
852 vfio_enable_intx(vdev
);
855 static void vfio_disable_msix(VFIODevice
*vdev
)
857 msix_unset_vector_notifiers(&vdev
->pdev
);
859 if (vdev
->nr_vectors
) {
860 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
863 vfio_disable_msi_common(vdev
);
865 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
866 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
869 static void vfio_disable_msi(VFIODevice
*vdev
)
873 vfio_disable_irqindex(vdev
, VFIO_PCI_MSI_IRQ_INDEX
);
875 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
876 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
882 if (vector
->virq
>= 0) {
883 kvm_irqchip_remove_irqfd_notifier(kvm_state
,
884 &vector
->interrupt
, vector
->virq
);
885 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
888 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
892 event_notifier_cleanup(&vector
->interrupt
);
895 vfio_disable_msi_common(vdev
);
897 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
898 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
902 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
904 static void vfio_bar_write(void *opaque
, hwaddr addr
,
905 uint64_t data
, unsigned size
)
907 VFIOBAR
*bar
= opaque
;
920 buf
.word
= cpu_to_le16(data
);
923 buf
.dword
= cpu_to_le32(data
);
926 hw_error("vfio: unsupported write size, %d bytes\n", size
);
930 if (pwrite(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
931 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
932 __func__
, addr
, data
, size
);
937 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
939 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
940 ", %d)\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
941 vdev
->host
.slot
, vdev
->host
.function
, bar
->nr
, addr
,
947 * A read or write to a BAR always signals an INTx EOI. This will
948 * do nothing if not pending (including not in INTx mode). We assume
949 * that a BAR access is in response to an interrupt and that BAR
950 * accesses will service the interrupt. Unfortunately, we don't know
951 * which access will service the interrupt, so we're potentially
952 * getting quite a few host interrupts per guest interrupt.
954 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
957 static uint64_t vfio_bar_read(void *opaque
,
958 hwaddr addr
, unsigned size
)
960 VFIOBAR
*bar
= opaque
;
969 if (pread(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
970 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
971 __func__
, addr
, size
);
980 data
= le16_to_cpu(buf
.word
);
983 data
= le32_to_cpu(buf
.dword
);
986 hw_error("vfio: unsupported read size, %d bytes\n", size
);
992 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
994 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
995 ", %d) = 0x%"PRIx64
"\n", __func__
, vdev
->host
.domain
,
996 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
997 bar
->nr
, addr
, size
, data
);
1001 /* Same as write above */
1002 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
1007 static const MemoryRegionOps vfio_bar_ops
= {
1008 .read
= vfio_bar_read
,
1009 .write
= vfio_bar_write
,
1010 .endianness
= DEVICE_LITTLE_ENDIAN
,
1013 static void vfio_vga_write(void *opaque
, hwaddr addr
,
1014 uint64_t data
, unsigned size
)
1016 VFIOVGARegion
*region
= opaque
;
1017 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1024 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1031 buf
.word
= cpu_to_le16(data
);
1034 buf
.dword
= cpu_to_le32(data
);
1037 hw_error("vfio: unsupported write size, %d bytes\n", size
);
1041 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
1042 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1043 __func__
, region
->offset
+ addr
, data
, size
);
1046 DPRINTF("%s(0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d)\n",
1047 __func__
, region
->offset
+ addr
, data
, size
);
1050 static uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
1052 VFIOVGARegion
*region
= opaque
;
1053 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1061 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1063 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1064 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1065 __func__
, region
->offset
+ addr
, size
);
1066 return (uint64_t)-1;
1074 data
= le16_to_cpu(buf
.word
);
1077 data
= le32_to_cpu(buf
.dword
);
1080 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1084 DPRINTF("%s(0x%"HWADDR_PRIx
", %d) = 0x%"PRIx64
"\n",
1085 __func__
, region
->offset
+ addr
, size
, data
);
1090 static const MemoryRegionOps vfio_vga_ops
= {
1091 .read
= vfio_vga_read
,
1092 .write
= vfio_vga_write
,
1093 .endianness
= DEVICE_LITTLE_ENDIAN
,
1097 * Device specific quirks
1100 #define PCI_VENDOR_ID_ATI 0x1002
1103 * Device 1002:68f9 (Advanced Micro Devices [AMD] nee ATI Cedar PRO [Radeon
1104 * HD 5450/6350]) reports the upper byte of the physical address of the
1105 * I/O port BAR4 through VGA register 0x3c3. The BAR is 256 bytes, so the
1106 * lower byte is known to be zero. Probing for this quirk reads 0xff from
1107 * port 0x3c3 on some devices so we store the physical address and replace
1108 * reads with the virtual address any time it matches. XXX Research when
1111 static uint64_t vfio_ati_3c3_quirk_read(void *opaque
,
1112 hwaddr addr
, unsigned size
)
1114 VFIOQuirk
*quirk
= opaque
;
1115 VFIODevice
*vdev
= quirk
->vdev
;
1116 PCIDevice
*pdev
= &vdev
->pdev
;
1117 uint64_t data
= vfio_vga_read(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1120 if (data
== quirk
->data
) {
1121 data
= pci_get_byte(pdev
->config
+ PCI_BASE_ADDRESS_4
+ 1);
1122 DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64
"\n", __func__
, data
);
1128 static const MemoryRegionOps vfio_ati_3c3_quirk
= {
1129 .read
= vfio_ati_3c3_quirk_read
,
1130 .endianness
= DEVICE_LITTLE_ENDIAN
,
1133 static void vfio_vga_probe_ati_3c3_quirk(VFIODevice
*vdev
)
1135 PCIDevice
*pdev
= &vdev
->pdev
;
1136 off_t physoffset
= vdev
->config_offset
+ PCI_BASE_ADDRESS_4
;
1140 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
||
1141 vdev
->bars
[4].size
< 256) {
1145 /* Get I/O port BAR physical address */
1146 if (pread(vdev
->fd
, &physbar
, 4, physoffset
) != 4) {
1147 error_report("vfio: probe failed for ATI/AMD 0x3c3 quirk on device "
1148 "%04x:%02x:%02x.%x", vdev
->host
.domain
,
1149 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1153 quirk
= g_malloc0(sizeof(*quirk
));
1155 quirk
->data
= (physbar
>> 8) & 0xff;
1157 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_ati_3c3_quirk
, quirk
,
1158 "vfio-ati-3c3-quirk", 1);
1159 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
, 3,
1162 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1165 DPRINTF("Enabled ATI/AMD quirk 0x3c3 for device %04x:%02x:%02x.%x\n",
1166 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1167 vdev
->host
.function
);
1171 * Device 1002:68f9 (Advanced Micro Devices [AMD] nee ATI Cedar PRO [Radeon
1172 * HD 5450/6350]) reports the physical address of MMIO BAR0 through a
1173 * write/read operation on I/O port BAR4. When uint32_t 0x4010 is written
1174 * to offset 0x0, the subsequent read from offset 0x4 returns the contents
1175 * of BAR0. Test for this quirk on all ATI/AMD devices. XXX - Note that
1176 * 0x10 is the offset of BAR0 in config sapce, is this a window to all of
1179 static uint64_t vfio_ati_4010_quirk_read(void *opaque
,
1180 hwaddr addr
, unsigned size
)
1182 VFIOQuirk
*quirk
= opaque
;
1183 VFIODevice
*vdev
= quirk
->vdev
;
1184 PCIDevice
*pdev
= &vdev
->pdev
;
1185 uint64_t data
= vfio_bar_read(&vdev
->bars
[4], addr
, size
);
1187 if (addr
== 4 && size
== 4 && quirk
->data
) {
1188 data
= pci_get_long(pdev
->config
+ PCI_BASE_ADDRESS_0
);
1189 DPRINTF("%s(BAR4+0x4) = 0x%"PRIx64
"\n", __func__
, data
);
1197 static void vfio_ati_4010_quirk_write(void *opaque
, hwaddr addr
,
1198 uint64_t data
, unsigned size
)
1200 VFIOQuirk
*quirk
= opaque
;
1201 VFIODevice
*vdev
= quirk
->vdev
;
1203 vfio_bar_write(&vdev
->bars
[4], addr
, data
, size
);
1205 quirk
->data
= (addr
== 0 && size
== 4 && data
== 0x4010) ? 1 : 0;
1208 static const MemoryRegionOps vfio_ati_4010_quirk
= {
1209 .read
= vfio_ati_4010_quirk_read
,
1210 .write
= vfio_ati_4010_quirk_write
,
1211 .endianness
= DEVICE_LITTLE_ENDIAN
,
1214 static void vfio_probe_ati_4010_quirk(VFIODevice
*vdev
, int nr
)
1216 PCIDevice
*pdev
= &vdev
->pdev
;
1217 off_t physoffset
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
;
1222 if (!vdev
->has_vga
|| nr
!= 4 || !vdev
->bars
[0].size
||
1223 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1227 /* Get I/O port BAR physical address */
1228 if (pread(vdev
->fd
, &physbar0
, 4, physoffset
) != 4) {
1229 error_report("vfio: probe failed for ATI/AMD 0x4010 quirk on device "
1230 "%04x:%02x:%02x.%x", vdev
->host
.domain
,
1231 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1235 /* Write 0x4010 to I/O port BAR offset 0 */
1236 vfio_bar_write(&vdev
->bars
[4], 0, 0x4010, 4);
1237 /* Read back result */
1238 data
= vfio_bar_read(&vdev
->bars
[4], 4, 4);
1240 /* If the register matches the physical address of BAR0, we need a quirk */
1241 if (data
!= physbar0
) {
1245 quirk
= g_malloc0(sizeof(*quirk
));
1248 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_ati_4010_quirk
, quirk
,
1249 "vfio-ati-4010-quirk", 8);
1250 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
, 0, &quirk
->mem
, 1);
1252 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1254 DPRINTF("Enabled ATI/AMD quirk 0x4010 for device %04x:%02x:%02x.%x\n",
1255 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1256 vdev
->host
.function
);
1260 * Device 1002:5b63 (Advanced Micro Devices [AMD] nee ATI RV370 [Radeon X550])
1261 * retrieves the upper half of the MMIO BAR0 physical address by writing
1262 * 0xf10 to I/O port BAR1 offset 0 and reading the result from offset 6.
1263 * XXX - 0x10 is the offset of BAR0 in PCI config space, this could provide
1264 * full access to config space. Config space is little endian, so the data
1265 * register probably starts at 0x4.
1267 static uint64_t vfio_ati_f10_quirk_read(void *opaque
,
1268 hwaddr addr
, unsigned size
)
1270 VFIOQuirk
*quirk
= opaque
;
1271 VFIODevice
*vdev
= quirk
->vdev
;
1272 PCIDevice
*pdev
= &vdev
->pdev
;
1273 uint64_t data
= vfio_bar_read(&vdev
->bars
[1], addr
, size
);
1275 if (addr
== 6 && size
== 2 && quirk
->data
) {
1276 data
= pci_get_word(pdev
->config
+ PCI_BASE_ADDRESS_0
+ 2);
1277 DPRINTF("%s(BAR1+0x6) = 0x%"PRIx64
"\n", __func__
, data
);
1285 static void vfio_ati_f10_quirk_write(void *opaque
, hwaddr addr
,
1286 uint64_t data
, unsigned size
)
1288 VFIOQuirk
*quirk
= opaque
;
1289 VFIODevice
*vdev
= quirk
->vdev
;
1291 vfio_bar_write(&vdev
->bars
[1], addr
, data
, size
);
1293 quirk
->data
= (addr
== 0 && size
== 4 && data
== 0xf10) ? 1 : 0;
1296 static const MemoryRegionOps vfio_ati_f10_quirk
= {
1297 .read
= vfio_ati_f10_quirk_read
,
1298 .write
= vfio_ati_f10_quirk_write
,
1299 .endianness
= DEVICE_LITTLE_ENDIAN
,
1302 static void vfio_probe_ati_f10_quirk(VFIODevice
*vdev
, int nr
)
1304 PCIDevice
*pdev
= &vdev
->pdev
;
1305 off_t physoffset
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
;
1310 if (!vdev
->has_vga
|| nr
!= 1 || !vdev
->bars
[0].size
||
1311 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1315 /* Get I/O port BAR physical address */
1316 if (pread(vdev
->fd
, &physbar0
, 4, physoffset
) != 4) {
1317 error_report("vfio: probe failed for ATI/AMD 0xf10 quirk on device "
1318 "%04x:%02x:%02x.%x", vdev
->host
.domain
,
1319 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1323 vfio_bar_write(&vdev
->bars
[1], 0, 0xf10, 4);
1324 data
= vfio_bar_read(&vdev
->bars
[1], 0x6, 2);
1326 /* If the register matches the physical address of BAR0, we need a quirk */
1327 if (data
!= (le32_to_cpu(physbar0
) >> 16)) {
1331 quirk
= g_malloc0(sizeof(*quirk
));
1334 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_ati_f10_quirk
, quirk
,
1335 "vfio-ati-f10-quirk", 8);
1336 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
, 0, &quirk
->mem
, 1);
1338 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1340 DPRINTF("Enabled ATI/AMD quirk 0xf10 for device %04x:%02x:%02x.%x\n",
1341 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1342 vdev
->host
.function
);
1345 #define PCI_VENDOR_ID_NVIDIA 0x10de
1348 * Nvidia has several different methods to get to config space, the
1349 * nouveu project has several of these documented here:
1350 * https://github.com/pathscale/envytools/tree/master/hwdocs
1352 * The first quirk is actually not documented in envytools and is found
1353 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1354 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1355 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1356 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1357 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1358 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1359 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1360 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1370 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque
,
1371 hwaddr addr
, unsigned size
)
1373 VFIOQuirk
*quirk
= opaque
;
1374 VFIODevice
*vdev
= quirk
->vdev
;
1375 PCIDevice
*pdev
= &vdev
->pdev
;
1376 uint64_t data
= vfio_vga_read(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1379 if (quirk
->data
== NV_3D0_READ
&& addr
== 0) {
1380 data
= vfio_pci_read_config(pdev
, quirk
->data2
, size
);
1381 DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64
"\n", __func__
, size
, data
);
1384 quirk
->data
= NV_3D0_NONE
;
1389 static void vfio_nvidia_3d0_quirk_write(void *opaque
, hwaddr addr
,
1390 uint64_t data
, unsigned size
)
1392 VFIOQuirk
*quirk
= opaque
;
1393 VFIODevice
*vdev
= quirk
->vdev
;
1394 PCIDevice
*pdev
= &vdev
->pdev
;
1396 switch (quirk
->data
) {
1398 if (addr
== 4 && data
== 0x338) {
1399 quirk
->data
= NV_3D0_SELECT
;
1403 quirk
->data
= NV_3D0_NONE
;
1404 if (addr
== 0 && (data
& ~0xff) == 0x1800) {
1405 quirk
->data
= NV_3D0_WINDOW
;
1406 quirk
->data2
= data
& 0xff;
1410 quirk
->data
= NV_3D0_NONE
;
1412 if (data
== 0x538) {
1413 quirk
->data
= NV_3D0_READ
;
1414 } else if (data
== 0x738) {
1415 quirk
->data
= NV_3D0_WRITE
;
1420 quirk
->data
= NV_3D0_NONE
;
1422 vfio_pci_write_config(pdev
, quirk
->data2
, data
, size
);
1423 DPRINTF("%s(0x3d0, 0x%"PRIx64
", %d)\n", __func__
, data
, size
);
1428 quirk
->data
= NV_3D0_NONE
;
1431 vfio_vga_write(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1432 addr
+ 0x10, data
, size
);
1435 static const MemoryRegionOps vfio_nvidia_3d0_quirk
= {
1436 .read
= vfio_nvidia_3d0_quirk_read
,
1437 .write
= vfio_nvidia_3d0_quirk_write
,
1438 .endianness
= DEVICE_LITTLE_ENDIAN
,
1441 static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice
*vdev
)
1443 PCIDevice
*pdev
= &vdev
->pdev
;
1446 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
||
1447 !vdev
->bars
[1].size
) {
1451 quirk
= g_malloc0(sizeof(*quirk
));
1454 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_nvidia_3d0_quirk
, quirk
,
1455 "vfio-nvidia-3d0-quirk", 6);
1456 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1459 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1462 DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
1463 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1464 vdev
->host
.function
);
1468 * The second quirk is documented in envytools. The I/O port BAR5 is just
1469 * a set of address/data ports to the MMIO BARs. The BAR we care about is
1470 * again BAR0. This backdoor is apparently a bit newer than the one above
1471 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
1472 * space, including extended space is available at the 4k @0x88000.
1475 NV_BAR5_ADDRESS
= 0x1,
1476 NV_BAR5_ENABLE
= 0x2,
1477 NV_BAR5_MASTER
= 0x4,
1478 NV_BAR5_VALID
= 0x7,
1481 static uint64_t vfio_nvidia_bar5_window_quirk_read(void *opaque
,
1482 hwaddr addr
, unsigned size
)
1484 VFIOQuirk
*quirk
= opaque
;
1485 VFIODevice
*vdev
= quirk
->vdev
;
1486 uint64_t data
= vfio_bar_read(&vdev
->bars
[5], addr
, size
);
1488 if (addr
== 0xc && quirk
->data
== NV_BAR5_VALID
) {
1489 data
= vfio_pci_read_config(&vdev
->pdev
, quirk
->data2
, size
);
1490 DPRINTF("%s(%04x:%02x:%02x.%x:BAR5+0x%"HWADDR_PRIx
", %d) = 0x%"
1491 PRIx64
"\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1492 vdev
->host
.slot
, vdev
->host
.function
, addr
, size
, data
);
1498 static void vfio_nvidia_bar5_window_quirk_write(void *opaque
, hwaddr addr
,
1499 uint64_t data
, unsigned size
)
1501 VFIOQuirk
*quirk
= opaque
;
1502 VFIODevice
*vdev
= quirk
->vdev
;
1505 * Use quirk->data to track enables and quirk->data2 for the offset
1510 quirk
->data
|= NV_BAR5_MASTER
;
1512 quirk
->data
&= ~NV_BAR5_MASTER
;
1517 quirk
->data
|= NV_BAR5_ENABLE
;
1519 quirk
->data
&= ~NV_BAR5_ENABLE
;
1523 if (quirk
->data
& NV_BAR5_MASTER
) {
1524 if ((data
& ~0xfff) == 0x88000) {
1525 quirk
->data
|= NV_BAR5_ADDRESS
;
1526 quirk
->data2
= data
& 0xfff;
1527 } else if ((data
& ~0xff) == 0x1800) {
1528 quirk
->data
|= NV_BAR5_ADDRESS
;
1529 quirk
->data2
= data
& 0xff;
1531 quirk
->data
&= ~NV_BAR5_ADDRESS
;
1536 if (quirk
->data
== NV_BAR5_VALID
) {
1537 vfio_pci_write_config(&vdev
->pdev
, quirk
->data2
, data
, size
);
1538 DPRINTF("%s(%04x:%02x:%02x.%x:BAR5+0x%"HWADDR_PRIx
", 0x%"
1539 PRIx64
", %d)\n", __func__
, vdev
->host
.domain
,
1540 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1546 vfio_bar_write(&vdev
->bars
[5], addr
, data
, size
);
1549 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk
= {
1550 .read
= vfio_nvidia_bar5_window_quirk_read
,
1551 .write
= vfio_nvidia_bar5_window_quirk_write
,
1552 .valid
.min_access_size
= 4,
1553 .endianness
= DEVICE_LITTLE_ENDIAN
,
1556 static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice
*vdev
, int nr
)
1558 PCIDevice
*pdev
= &vdev
->pdev
;
1561 if (!vdev
->has_vga
|| nr
!= 5 ||
1562 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1566 quirk
= g_malloc0(sizeof(*quirk
));
1569 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_nvidia_bar5_window_quirk
, quirk
,
1570 "vfio-nvidia-bar5-window-quirk", 16);
1571 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
, 0, &quirk
->mem
, 1);
1573 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1575 DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
1576 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1577 vdev
->host
.function
);
1581 * Finally, BAR0 itself. We want to redirect any accesses to either
1582 * 0x1800 or 0x88000 through the PCI config space access functions.
1584 * NB - quirk at a page granularity or else they don't seem to work when
1587 * Here's offset 0x88000...
1589 static uint64_t vfio_nvidia_bar0_88000_quirk_read(void *opaque
,
1590 hwaddr addr
, unsigned size
)
1592 VFIOQuirk
*quirk
= opaque
;
1593 VFIODevice
*vdev
= quirk
->vdev
;
1594 hwaddr base
= 0x88000 & TARGET_PAGE_MASK
;
1595 hwaddr offset
= 0x88000 & ~TARGET_PAGE_MASK
;
1596 uint64_t data
= vfio_bar_read(&vdev
->bars
[0], addr
+ base
, size
);
1598 if (ranges_overlap(addr
, size
, offset
, PCI_CONFIG_SPACE_SIZE
)) {
1599 data
= vfio_pci_read_config(&vdev
->pdev
, addr
- offset
, size
);
1601 DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx
", %d) = 0x%"
1602 PRIx64
"\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1603 vdev
->host
.slot
, vdev
->host
.function
, addr
+ base
, size
, data
);
1609 static void vfio_nvidia_bar0_88000_quirk_write(void *opaque
, hwaddr addr
,
1610 uint64_t data
, unsigned size
)
1612 VFIOQuirk
*quirk
= opaque
;
1613 VFIODevice
*vdev
= quirk
->vdev
;
1614 hwaddr base
= 0x88000 & TARGET_PAGE_MASK
;
1615 hwaddr offset
= 0x88000 & ~TARGET_PAGE_MASK
;
1617 if (ranges_overlap(addr
, size
, offset
, PCI_CONFIG_SPACE_SIZE
)) {
1618 vfio_pci_write_config(&vdev
->pdev
, addr
- offset
, data
, size
);
1620 DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx
", 0x%"
1621 PRIx64
", %d)\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1622 vdev
->host
.slot
, vdev
->host
.function
, addr
+ base
, data
, size
);
1624 vfio_bar_write(&vdev
->bars
[0], addr
+ base
, data
, size
);
1628 static const MemoryRegionOps vfio_nvidia_bar0_88000_quirk
= {
1629 .read
= vfio_nvidia_bar0_88000_quirk_read
,
1630 .write
= vfio_nvidia_bar0_88000_quirk_write
,
1631 .endianness
= DEVICE_LITTLE_ENDIAN
,
1634 static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice
*vdev
, int nr
)
1636 PCIDevice
*pdev
= &vdev
->pdev
;
1639 if (!vdev
->has_vga
|| nr
!= 0 ||
1640 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1644 quirk
= g_malloc0(sizeof(*quirk
));
1647 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_nvidia_bar0_88000_quirk
, quirk
,
1648 "vfio-nvidia-bar0-88000-quirk",
1649 TARGET_PAGE_ALIGN(PCIE_CONFIG_SPACE_SIZE
));
1650 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1651 0x88000 & TARGET_PAGE_MASK
,
1654 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1656 DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
1657 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1658 vdev
->host
.function
);
1662 * And here's the same for BAR0 offset 0x1800...
1664 static uint64_t vfio_nvidia_bar0_1800_quirk_read(void *opaque
,
1665 hwaddr addr
, unsigned size
)
1667 VFIOQuirk
*quirk
= opaque
;
1668 VFIODevice
*vdev
= quirk
->vdev
;
1669 hwaddr base
= 0x1800 & TARGET_PAGE_MASK
;
1670 hwaddr offset
= 0x1800 & ~TARGET_PAGE_MASK
;
1671 uint64_t data
= vfio_bar_read(&vdev
->bars
[0], addr
+ base
, size
);
1673 if (ranges_overlap(addr
, size
, offset
, PCI_CONFIG_SPACE_SIZE
)) {
1674 data
= vfio_pci_read_config(&vdev
->pdev
, addr
- offset
, size
);
1676 DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx
", %d) = 0x%"
1677 PRIx64
"\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1678 vdev
->host
.slot
, vdev
->host
.function
, addr
+ base
, size
, data
);
1684 static void vfio_nvidia_bar0_1800_quirk_write(void *opaque
, hwaddr addr
,
1685 uint64_t data
, unsigned size
)
1687 VFIOQuirk
*quirk
= opaque
;
1688 VFIODevice
*vdev
= quirk
->vdev
;
1689 hwaddr base
= 0x1800 & TARGET_PAGE_MASK
;
1690 hwaddr offset
= 0x1800 & ~TARGET_PAGE_MASK
;
1692 if (ranges_overlap(addr
, size
, offset
, PCI_CONFIG_SPACE_SIZE
)) {
1693 vfio_pci_write_config(&vdev
->pdev
, addr
- offset
, data
, size
);
1695 DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx
", 0x%"
1696 PRIx64
", %d)\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1697 vdev
->host
.slot
, vdev
->host
.function
, addr
+ base
, data
, size
);
1699 vfio_bar_write(&vdev
->bars
[0], addr
+ base
, data
, size
);
1703 static const MemoryRegionOps vfio_nvidia_bar0_1800_quirk
= {
1704 .read
= vfio_nvidia_bar0_1800_quirk_read
,
1705 .write
= vfio_nvidia_bar0_1800_quirk_write
,
1706 .endianness
= DEVICE_LITTLE_ENDIAN
,
1709 static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice
*vdev
, int nr
)
1711 PCIDevice
*pdev
= &vdev
->pdev
;
1714 if (!vdev
->has_vga
|| nr
!= 0 ||
1715 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1719 /* Log the chipset ID */
1720 DPRINTF("Nvidia NV%02x\n",
1721 (unsigned int)(vfio_bar_read(&vdev
->bars
[0], 0, 4) >> 20) & 0xff);
1723 quirk
= g_malloc0(sizeof(*quirk
));
1726 memory_region_init_io(&quirk
->mem
, NULL
, &vfio_nvidia_bar0_1800_quirk
, quirk
,
1727 "vfio-nvidia-bar0-1800-quirk",
1728 TARGET_PAGE_ALIGN(PCI_CONFIG_SPACE_SIZE
));
1729 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1730 0x1800 & TARGET_PAGE_MASK
,
1733 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1735 DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
1736 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1737 vdev
->host
.function
);
1741 * TODO - Some Nvidia devices provide config access to their companion HDA
1742 * device and even to their parent bridge via these config space mirrors.
1743 * Add quirks for those regions.
1747 * Common quirk probe entry points.
1749 static void vfio_vga_quirk_setup(VFIODevice
*vdev
)
1751 vfio_vga_probe_ati_3c3_quirk(vdev
);
1752 vfio_vga_probe_nvidia_3d0_quirk(vdev
);
1755 static void vfio_vga_quirk_teardown(VFIODevice
*vdev
)
1759 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
.region
); i
++) {
1760 while (!QLIST_EMPTY(&vdev
->vga
.region
[i
].quirks
)) {
1761 VFIOQuirk
*quirk
= QLIST_FIRST(&vdev
->vga
.region
[i
].quirks
);
1762 memory_region_del_subregion(&vdev
->vga
.region
[i
].mem
, &quirk
->mem
);
1763 QLIST_REMOVE(quirk
, next
);
1769 static void vfio_bar_quirk_setup(VFIODevice
*vdev
, int nr
)
1771 vfio_probe_ati_4010_quirk(vdev
, nr
);
1772 vfio_probe_ati_f10_quirk(vdev
, nr
);
1773 vfio_probe_nvidia_bar5_window_quirk(vdev
, nr
);
1774 vfio_probe_nvidia_bar0_88000_quirk(vdev
, nr
);
1775 vfio_probe_nvidia_bar0_1800_quirk(vdev
, nr
);
1778 static void vfio_bar_quirk_teardown(VFIODevice
*vdev
, int nr
)
1780 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1782 while (!QLIST_EMPTY(&bar
->quirks
)) {
1783 VFIOQuirk
*quirk
= QLIST_FIRST(&bar
->quirks
);
1784 memory_region_del_subregion(&bar
->mem
, &quirk
->mem
);
1785 QLIST_REMOVE(quirk
, next
);
1793 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1795 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
1796 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1798 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1799 emu_bits
= le32_to_cpu(emu_bits
);
1802 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1805 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1808 ret
= pread(vdev
->fd
, &phys_val
, len
, vdev
->config_offset
+ addr
);
1810 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
1811 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1812 vdev
->host
.slot
, vdev
->host
.function
, addr
, len
);
1815 phys_val
= le32_to_cpu(phys_val
);
1818 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1820 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__
,
1821 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1822 vdev
->host
.function
, addr
, len
, val
);
1827 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
1828 uint32_t val
, int len
)
1830 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
1831 uint32_t val_le
= cpu_to_le32(val
);
1833 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__
,
1834 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1835 vdev
->host
.function
, addr
, val
, len
);
1837 /* Write everything to VFIO, let it filter out what we can't write */
1838 if (pwrite(vdev
->fd
, &val_le
, len
, vdev
->config_offset
+ addr
) != len
) {
1839 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
1840 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1841 vdev
->host
.slot
, vdev
->host
.function
, addr
, val
, len
);
1844 /* MSI/MSI-X Enabling/Disabling */
1845 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1846 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1847 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1849 pci_default_write_config(pdev
, addr
, val
, len
);
1851 is_enabled
= msi_enabled(pdev
);
1853 if (!was_enabled
&& is_enabled
) {
1854 vfio_enable_msi(vdev
);
1855 } else if (was_enabled
&& !is_enabled
) {
1856 vfio_disable_msi(vdev
);
1858 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1859 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1860 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1862 pci_default_write_config(pdev
, addr
, val
, len
);
1864 is_enabled
= msix_enabled(pdev
);
1866 if (!was_enabled
&& is_enabled
) {
1867 vfio_enable_msix(vdev
);
1868 } else if (was_enabled
&& !is_enabled
) {
1869 vfio_disable_msix(vdev
);
1872 /* Write everything to QEMU to keep emulated bits correct */
1873 pci_default_write_config(pdev
, addr
, val
, len
);
1878 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
1880 static int vfio_dma_unmap(VFIOContainer
*container
,
1881 hwaddr iova
, ram_addr_t size
)
1883 struct vfio_iommu_type1_dma_unmap unmap
= {
1884 .argsz
= sizeof(unmap
),
1890 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
1891 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno
);
1898 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
1899 ram_addr_t size
, void *vaddr
, bool readonly
)
1901 struct vfio_iommu_type1_dma_map map
= {
1902 .argsz
= sizeof(map
),
1903 .flags
= VFIO_DMA_MAP_FLAG_READ
,
1904 .vaddr
= (__u64
)(uintptr_t)vaddr
,
1910 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
1914 * Try the mapping, if it fails with EBUSY, unmap the region and try
1915 * again. This shouldn't be necessary, but we sometimes see it in
1916 * the the VGA ROM space.
1918 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
1919 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
1920 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
1924 DPRINTF("VFIO_MAP_DMA: %d\n", -errno
);
1928 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
1930 return !memory_region_is_ram(section
->mr
);
1933 static void vfio_listener_region_add(MemoryListener
*listener
,
1934 MemoryRegionSection
*section
)
1936 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
1937 iommu_data
.listener
);
1942 assert(!memory_region_is_iommu(section
->mr
));
1944 if (vfio_listener_skipped_section(section
)) {
1945 DPRINTF("SKIPPING region_add %"HWADDR_PRIx
" - %"PRIx64
"\n",
1946 section
->offset_within_address_space
,
1947 section
->offset_within_address_space
+ section
->size
- 1);
1951 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
1952 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
1953 error_report("%s received unaligned region", __func__
);
1957 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
1958 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
1965 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
1966 section
->offset_within_region
+
1967 (iova
- section
->offset_within_address_space
);
1969 DPRINTF("region_add %"HWADDR_PRIx
" - %"HWADDR_PRIx
" [%p]\n",
1970 iova
, end
- 1, vaddr
);
1972 ret
= vfio_dma_map(container
, iova
, end
- iova
, vaddr
, section
->readonly
);
1974 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
1975 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
1976 container
, iova
, end
- iova
, vaddr
, ret
);
1980 static void vfio_listener_region_del(MemoryListener
*listener
,
1981 MemoryRegionSection
*section
)
1983 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
1984 iommu_data
.listener
);
1988 if (vfio_listener_skipped_section(section
)) {
1989 DPRINTF("SKIPPING region_del %"HWADDR_PRIx
" - %"PRIx64
"\n",
1990 section
->offset_within_address_space
,
1991 section
->offset_within_address_space
+ section
->size
- 1);
1995 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
1996 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
1997 error_report("%s received unaligned region", __func__
);
2001 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
2002 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
2009 DPRINTF("region_del %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
2012 ret
= vfio_dma_unmap(container
, iova
, end
- iova
);
2014 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
2015 "0x%"HWADDR_PRIx
") = %d (%m)",
2016 container
, iova
, end
- iova
, ret
);
2020 static MemoryListener vfio_memory_listener
= {
2021 .region_add
= vfio_listener_region_add
,
2022 .region_del
= vfio_listener_region_del
,
2025 static void vfio_listener_release(VFIOContainer
*container
)
2027 memory_listener_unregister(&container
->iommu_data
.listener
);
2033 static void vfio_disable_interrupts(VFIODevice
*vdev
)
2035 switch (vdev
->interrupt
) {
2037 vfio_disable_intx(vdev
);
2040 vfio_disable_msi(vdev
);
2043 vfio_disable_msix(vdev
);
2048 static int vfio_setup_msi(VFIODevice
*vdev
, int pos
)
2051 bool msi_64bit
, msi_maskbit
;
2054 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2055 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2058 ctrl
= le16_to_cpu(ctrl
);
2060 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
2061 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
2062 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
2064 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev
->host
.domain
,
2065 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, pos
);
2067 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
);
2069 if (ret
== -ENOTSUP
) {
2072 error_report("vfio: msi_init failed");
2075 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
2081 * We don't have any control over how pci_add_capability() inserts
2082 * capabilities into the chain. In order to setup MSI-X we need a
2083 * MemoryRegion for the BAR. In order to setup the BAR and not
2084 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2085 * need to first look for where the MSI-X table lives. So we
2086 * unfortunately split MSI-X setup across two functions.
2088 static int vfio_early_setup_msix(VFIODevice
*vdev
)
2092 uint32_t table
, pba
;
2094 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
2099 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2100 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2104 if (pread(vdev
->fd
, &table
, sizeof(table
),
2105 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
2109 if (pread(vdev
->fd
, &pba
, sizeof(pba
),
2110 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
2114 ctrl
= le16_to_cpu(ctrl
);
2115 table
= le32_to_cpu(table
);
2116 pba
= le32_to_cpu(pba
);
2118 vdev
->msix
= g_malloc0(sizeof(*(vdev
->msix
)));
2119 vdev
->msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
2120 vdev
->msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
2121 vdev
->msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
2122 vdev
->msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
2123 vdev
->msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
2125 DPRINTF("%04x:%02x:%02x.%x "
2126 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
2127 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2128 vdev
->host
.function
, pos
, vdev
->msix
->table_bar
,
2129 vdev
->msix
->table_offset
, vdev
->msix
->entries
);
2134 static int vfio_setup_msix(VFIODevice
*vdev
, int pos
)
2138 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
2139 &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2140 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
2141 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
,
2142 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
);
2144 if (ret
== -ENOTSUP
) {
2147 error_report("vfio: msix_init failed");
2154 static void vfio_teardown_msi(VFIODevice
*vdev
)
2156 msi_uninit(&vdev
->pdev
);
2159 msix_uninit(&vdev
->pdev
, &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2160 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
);
2167 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
)
2171 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2172 VFIOBAR
*bar
= &vdev
->bars
[i
];
2178 memory_region_set_enabled(&bar
->mmap_mem
, enabled
);
2179 if (vdev
->msix
&& vdev
->msix
->table_bar
== i
) {
2180 memory_region_set_enabled(&vdev
->msix
->mmap_mem
, enabled
);
2185 static void vfio_unmap_bar(VFIODevice
*vdev
, int nr
)
2187 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2193 vfio_bar_quirk_teardown(vdev
, nr
);
2195 memory_region_del_subregion(&bar
->mem
, &bar
->mmap_mem
);
2196 munmap(bar
->mmap
, memory_region_size(&bar
->mmap_mem
));
2198 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2199 memory_region_del_subregion(&bar
->mem
, &vdev
->msix
->mmap_mem
);
2200 munmap(vdev
->msix
->mmap
, memory_region_size(&vdev
->msix
->mmap_mem
));
2203 memory_region_destroy(&bar
->mem
);
2206 static int vfio_mmap_bar(VFIOBAR
*bar
, MemoryRegion
*mem
, MemoryRegion
*submem
,
2207 void **map
, size_t size
, off_t offset
,
2212 if (VFIO_ALLOW_MMAP
&& size
&& bar
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
2215 if (bar
->flags
& VFIO_REGION_INFO_FLAG_READ
) {
2219 if (bar
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) {
2223 *map
= mmap(NULL
, size
, prot
, MAP_SHARED
,
2224 bar
->fd
, bar
->fd_offset
+ offset
);
2225 if (*map
== MAP_FAILED
) {
2231 memory_region_init_ram_ptr(submem
, NULL
, name
, size
, *map
);
2234 /* Create a zero sized sub-region to make cleanup easy. */
2235 memory_region_init(submem
, NULL
, name
, 0);
2238 memory_region_add_subregion(mem
, offset
, submem
);
2243 static void vfio_map_bar(VFIODevice
*vdev
, int nr
)
2245 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2246 unsigned size
= bar
->size
;
2252 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2257 snprintf(name
, sizeof(name
), "VFIO %04x:%02x:%02x.%x BAR %d",
2258 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2259 vdev
->host
.function
, nr
);
2261 /* Determine what type of BAR this is for registration */
2262 ret
= pread(vdev
->fd
, &pci_bar
, sizeof(pci_bar
),
2263 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
2264 if (ret
!= sizeof(pci_bar
)) {
2265 error_report("vfio: Failed to read BAR %d (%m)", nr
);
2269 pci_bar
= le32_to_cpu(pci_bar
);
2270 type
= pci_bar
& (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
?
2271 ~PCI_BASE_ADDRESS_IO_MASK
: ~PCI_BASE_ADDRESS_MEM_MASK
);
2273 /* A "slow" read/write mapping underlies all BARs */
2274 memory_region_init_io(&bar
->mem
, NULL
, &vfio_bar_ops
, bar
, name
, size
);
2275 pci_register_bar(&vdev
->pdev
, nr
, type
, &bar
->mem
);
2278 * We can't mmap areas overlapping the MSIX vector table, so we
2279 * potentially insert a direct-mapped subregion before and after it.
2281 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2282 size
= vdev
->msix
->table_offset
& TARGET_PAGE_MASK
;
2285 strncat(name
, " mmap", sizeof(name
) - strlen(name
) - 1);
2286 if (vfio_mmap_bar(bar
, &bar
->mem
,
2287 &bar
->mmap_mem
, &bar
->mmap
, size
, 0, name
)) {
2288 error_report("%s unsupported. Performance may be slow", name
);
2291 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2294 start
= TARGET_PAGE_ALIGN(vdev
->msix
->table_offset
+
2295 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
2297 size
= start
< bar
->size
? bar
->size
- start
: 0;
2298 strncat(name
, " msix-hi", sizeof(name
) - strlen(name
) - 1);
2299 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2300 if (vfio_mmap_bar(bar
, &bar
->mem
, &vdev
->msix
->mmap_mem
,
2301 &vdev
->msix
->mmap
, size
, start
, name
)) {
2302 error_report("%s unsupported. Performance may be slow", name
);
2306 vfio_bar_quirk_setup(vdev
, nr
);
2309 static void vfio_map_bars(VFIODevice
*vdev
)
2313 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2314 vfio_map_bar(vdev
, i
);
2317 if (vdev
->has_vga
) {
2318 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
, NULL
,
2320 &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
],
2321 "vfio-vga-mmio@0xa0000",
2322 QEMU_PCI_VGA_MEM_SIZE
);
2323 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
, NULL
,
2325 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
],
2326 "vfio-vga-io@0x3b0",
2327 QEMU_PCI_VGA_IO_LO_SIZE
);
2328 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
, NULL
,
2330 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
2331 "vfio-vga-io@0x3c0",
2332 QEMU_PCI_VGA_IO_HI_SIZE
);
2334 pci_register_vga(&vdev
->pdev
, &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2335 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2336 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2337 vfio_vga_quirk_setup(vdev
);
2341 static void vfio_unmap_bars(VFIODevice
*vdev
)
2345 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2346 vfio_unmap_bar(vdev
, i
);
2349 if (vdev
->has_vga
) {
2350 vfio_vga_quirk_teardown(vdev
);
2351 pci_unregister_vga(&vdev
->pdev
);
2352 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
);
2353 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
);
2354 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2361 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
2363 uint8_t tmp
, next
= 0xff;
2365 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
2366 tmp
= pdev
->config
[tmp
+ 1]) {
2367 if (tmp
> pos
&& tmp
< next
) {
2375 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
2377 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
2380 static void vfio_add_emulated_word(VFIODevice
*vdev
, int pos
,
2381 uint16_t val
, uint16_t mask
)
2383 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2384 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2385 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2388 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
2390 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
2393 static void vfio_add_emulated_long(VFIODevice
*vdev
, int pos
,
2394 uint32_t val
, uint32_t mask
)
2396 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2397 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2398 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2401 static int vfio_setup_pcie_cap(VFIODevice
*vdev
, int pos
, uint8_t size
)
2406 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
2407 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
2409 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
2410 type
!= PCI_EXP_TYPE_LEG_END
&&
2411 type
!= PCI_EXP_TYPE_RC_END
) {
2413 error_report("vfio: Assignment of PCIe type 0x%x "
2414 "devices is not currently supported", type
);
2418 if (!pci_bus_is_express(vdev
->pdev
.bus
)) {
2420 * Use express capability as-is on PCI bus. It doesn't make much
2421 * sense to even expose, but some drivers (ex. tg3) depend on it
2422 * and guests don't seem to be particular about it. We'll need
2423 * to revist this or force express devices to express buses if we
2424 * ever expose an IOMMU to the guest.
2426 } else if (pci_bus_is_root(vdev
->pdev
.bus
)) {
2428 * On a Root Complex bus Endpoints become Root Complex Integrated
2429 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2431 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
2432 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2433 PCI_EXP_TYPE_RC_END
<< 4,
2434 PCI_EXP_FLAGS_TYPE
);
2436 /* Link Capabilities, Status, and Control goes away */
2437 if (size
> PCI_EXP_LNKCTL
) {
2438 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
2439 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2440 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
2442 #ifndef PCI_EXP_LNKCAP2
2443 #define PCI_EXP_LNKCAP2 44
2445 #ifndef PCI_EXP_LNKSTA2
2446 #define PCI_EXP_LNKSTA2 50
2448 /* Link 2 Capabilities, Status, and Control goes away */
2449 if (size
> PCI_EXP_LNKCAP2
) {
2450 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
2451 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
2452 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
2456 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
2458 * Legacy endpoints don't belong on the root complex. Windows
2459 * seems to be happier with devices if we skip the capability.
2466 * Convert Root Complex Integrated Endpoints to regular endpoints.
2467 * These devices don't support LNK/LNK2 capabilities, so make them up.
2469 if (type
== PCI_EXP_TYPE_RC_END
) {
2470 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2471 PCI_EXP_TYPE_ENDPOINT
<< 4,
2472 PCI_EXP_FLAGS_TYPE
);
2473 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
2474 PCI_EXP_LNK_MLW_1
| PCI_EXP_LNK_LS_25
, ~0);
2475 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2478 /* Mark the Link Status bits as emulated to allow virtual negotiation */
2479 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
,
2480 pci_get_word(vdev
->pdev
.config
+ pos
+
2482 PCI_EXP_LNKCAP_MLW
| PCI_EXP_LNKCAP_SLS
);
2485 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
);
2487 vdev
->pdev
.exp
.exp_cap
= pos
;
2493 static int vfio_add_std_cap(VFIODevice
*vdev
, uint8_t pos
)
2495 PCIDevice
*pdev
= &vdev
->pdev
;
2496 uint8_t cap_id
, next
, size
;
2499 cap_id
= pdev
->config
[pos
];
2500 next
= pdev
->config
[pos
+ 1];
2503 * If it becomes important to configure capabilities to their actual
2504 * size, use this as the default when it's something we don't recognize.
2505 * Since QEMU doesn't actually handle many of the config accesses,
2506 * exact size doesn't seem worthwhile.
2508 size
= vfio_std_cap_max_size(pdev
, pos
);
2511 * pci_add_capability always inserts the new capability at the head
2512 * of the chain. Therefore to end up with a chain that matches the
2513 * physical device, we insert from the end by making this recursive.
2514 * This is also why we pre-caclulate size above as cached config space
2515 * will be changed as we unwind the stack.
2518 ret
= vfio_add_std_cap(vdev
, next
);
2523 /* Begin the rebuild, use QEMU emulated list bits */
2524 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
2525 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
2526 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
2529 /* Use emulated next pointer to allow dropping caps */
2530 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ 1, 0xff);
2533 case PCI_CAP_ID_MSI
:
2534 ret
= vfio_setup_msi(vdev
, pos
);
2536 case PCI_CAP_ID_EXP
:
2537 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
);
2539 case PCI_CAP_ID_MSIX
:
2540 ret
= vfio_setup_msix(vdev
, pos
);
2545 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2550 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
2551 "0x%x[0x%x]@0x%x: %d", vdev
->host
.domain
,
2552 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
2553 cap_id
, size
, pos
, ret
);
2560 static int vfio_add_capabilities(VFIODevice
*vdev
)
2562 PCIDevice
*pdev
= &vdev
->pdev
;
2564 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2565 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2566 return 0; /* Nothing to add */
2569 return vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
]);
2572 static int vfio_load_rom(VFIODevice
*vdev
)
2574 uint64_t size
= vdev
->rom_size
;
2576 off_t off
= 0, voff
= vdev
->rom_offset
;
2580 /* If loading ROM from file, pci handles it */
2581 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
|| !size
) {
2585 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
2586 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
2588 snprintf(name
, sizeof(name
), "vfio[%04x:%02x:%02x.%x].rom",
2589 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2590 vdev
->host
.function
);
2591 memory_region_init_ram(&vdev
->pdev
.rom
, NULL
, name
, size
);
2592 ptr
= memory_region_get_ram_ptr(&vdev
->pdev
.rom
);
2593 memset(ptr
, 0xff, size
);
2596 bytes
= pread(vdev
->fd
, ptr
+ off
, size
, voff
+ off
);
2598 break; /* expect that we could get back less than the ROM BAR */
2599 } else if (bytes
> 0) {
2603 if (errno
== EINTR
|| errno
== EAGAIN
) {
2606 error_report("vfio: Error reading device ROM: %m");
2607 memory_region_destroy(&vdev
->pdev
.rom
);
2612 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
, 0, &vdev
->pdev
.rom
);
2613 vdev
->pdev
.has_rom
= true;
2617 static int vfio_connect_container(VFIOGroup
*group
)
2619 VFIOContainer
*container
;
2622 if (group
->container
) {
2626 QLIST_FOREACH(container
, &container_list
, next
) {
2627 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
2628 group
->container
= container
;
2629 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2634 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
2636 error_report("vfio: failed to open /dev/vfio/vfio: %m");
2640 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
2641 if (ret
!= VFIO_API_VERSION
) {
2642 error_report("vfio: supported vfio version: %d, "
2643 "reported version: %d", VFIO_API_VERSION
, ret
);
2648 container
= g_malloc0(sizeof(*container
));
2651 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
)) {
2652 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
2654 error_report("vfio: failed to set group container: %m");
2660 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_TYPE1_IOMMU
);
2662 error_report("vfio: failed to set iommu for container: %m");
2668 container
->iommu_data
.listener
= vfio_memory_listener
;
2669 container
->iommu_data
.release
= vfio_listener_release
;
2671 memory_listener_register(&container
->iommu_data
.listener
, &address_space_memory
);
2673 error_report("vfio: No available IOMMU models");
2679 QLIST_INIT(&container
->group_list
);
2680 QLIST_INSERT_HEAD(&container_list
, container
, next
);
2682 group
->container
= container
;
2683 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2688 static void vfio_disconnect_container(VFIOGroup
*group
)
2690 VFIOContainer
*container
= group
->container
;
2692 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
2693 error_report("vfio: error disconnecting group %d from container",
2697 QLIST_REMOVE(group
, container_next
);
2698 group
->container
= NULL
;
2700 if (QLIST_EMPTY(&container
->group_list
)) {
2701 if (container
->iommu_data
.release
) {
2702 container
->iommu_data
.release(container
);
2704 QLIST_REMOVE(container
, next
);
2705 DPRINTF("vfio_disconnect_container: close container->fd\n");
2706 close(container
->fd
);
2711 static VFIOGroup
*vfio_get_group(int groupid
)
2715 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
2717 QLIST_FOREACH(group
, &group_list
, next
) {
2718 if (group
->groupid
== groupid
) {
2723 group
= g_malloc0(sizeof(*group
));
2725 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
2726 group
->fd
= qemu_open(path
, O_RDWR
);
2727 if (group
->fd
< 0) {
2728 error_report("vfio: error opening %s: %m", path
);
2733 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
2734 error_report("vfio: error getting group status: %m");
2740 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
2741 error_report("vfio: error, group %d is not viable, please ensure "
2742 "all devices within the iommu_group are bound to their "
2743 "vfio bus driver.", groupid
);
2749 group
->groupid
= groupid
;
2750 QLIST_INIT(&group
->device_list
);
2752 if (vfio_connect_container(group
)) {
2753 error_report("vfio: failed to setup container for group %d", groupid
);
2759 QLIST_INSERT_HEAD(&group_list
, group
, next
);
2764 static void vfio_put_group(VFIOGroup
*group
)
2766 if (!QLIST_EMPTY(&group
->device_list
)) {
2770 vfio_disconnect_container(group
);
2771 QLIST_REMOVE(group
, next
);
2772 DPRINTF("vfio_put_group: close group->fd\n");
2777 static int vfio_get_device(VFIOGroup
*group
, const char *name
, VFIODevice
*vdev
)
2779 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
2780 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
) };
2783 ret
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
2785 error_report("vfio: error getting device %s from group %d: %m",
2786 name
, group
->groupid
);
2787 error_printf("Verify all devices in group %d are bound to vfio-pci "
2788 "or pci-stub and not already in use\n", group
->groupid
);
2793 vdev
->group
= group
;
2794 QLIST_INSERT_HEAD(&group
->device_list
, vdev
, next
);
2796 /* Sanity check device */
2797 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
2799 error_report("vfio: error getting device info: %m");
2803 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name
,
2804 dev_info
.flags
, dev_info
.num_regions
, dev_info
.num_irqs
);
2806 if (!(dev_info
.flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2807 error_report("vfio: Um, this isn't a PCI device");
2811 vdev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
2812 if (!vdev
->reset_works
) {
2813 error_report("Warning, device %s does not support reset", name
);
2816 if (dev_info
.num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2817 error_report("vfio: unexpected number of io regions %u",
2818 dev_info
.num_regions
);
2822 if (dev_info
.num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2823 error_report("vfio: unexpected number of irqs %u", dev_info
.num_irqs
);
2827 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2830 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
2832 error_report("vfio: Error getting region %d info: %m", i
);
2836 DPRINTF("Device %s region %d:\n", name
, i
);
2837 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2838 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
2839 (unsigned long)reg_info
.flags
);
2841 vdev
->bars
[i
].flags
= reg_info
.flags
;
2842 vdev
->bars
[i
].size
= reg_info
.size
;
2843 vdev
->bars
[i
].fd_offset
= reg_info
.offset
;
2844 vdev
->bars
[i
].fd
= vdev
->fd
;
2845 vdev
->bars
[i
].nr
= i
;
2846 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2849 reg_info
.index
= VFIO_PCI_ROM_REGION_INDEX
;
2851 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
2853 error_report("vfio: Error getting ROM info: %m");
2857 DPRINTF("Device %s ROM:\n", name
);
2858 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2859 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
2860 (unsigned long)reg_info
.flags
);
2862 vdev
->rom_size
= reg_info
.size
;
2863 vdev
->rom_offset
= reg_info
.offset
;
2865 reg_info
.index
= VFIO_PCI_CONFIG_REGION_INDEX
;
2867 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
2869 error_report("vfio: Error getting config info: %m");
2873 DPRINTF("Device %s config:\n", name
);
2874 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
2875 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
2876 (unsigned long)reg_info
.flags
);
2878 vdev
->config_size
= reg_info
.size
;
2879 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2880 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2882 vdev
->config_offset
= reg_info
.offset
;
2884 if ((vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) &&
2885 dev_info
.num_regions
> VFIO_PCI_VGA_REGION_INDEX
) {
2886 struct vfio_region_info vga_info
= {
2887 .argsz
= sizeof(vga_info
),
2888 .index
= VFIO_PCI_VGA_REGION_INDEX
,
2891 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, &vga_info
);
2894 "vfio: Device does not support requested feature x-vga");
2898 if (!(vga_info
.flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2899 !(vga_info
.flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2900 vga_info
.size
< 0xbffff + 1) {
2901 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2902 (unsigned long)vga_info
.flags
,
2903 (unsigned long)vga_info
.size
);
2907 vdev
->vga
.fd_offset
= vga_info
.offset
;
2908 vdev
->vga
.fd
= vdev
->fd
;
2910 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2911 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2912 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].quirks
);
2914 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2915 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2916 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2918 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2919 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2920 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2922 vdev
->has_vga
= true;
2927 QLIST_REMOVE(vdev
, next
);
2934 static void vfio_put_device(VFIODevice
*vdev
)
2936 QLIST_REMOVE(vdev
, next
);
2938 DPRINTF("vfio_put_device: close vdev->fd\n");
2946 static int vfio_initfn(PCIDevice
*pdev
)
2948 VFIODevice
*pvdev
, *vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2950 char path
[PATH_MAX
], iommu_group_path
[PATH_MAX
], *group_name
;
2956 /* Check that the host device exists */
2957 snprintf(path
, sizeof(path
),
2958 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
2959 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2960 vdev
->host
.function
);
2961 if (stat(path
, &st
) < 0) {
2962 error_report("vfio: error: no such host device: %s", path
);
2966 strncat(path
, "iommu_group", sizeof(path
) - strlen(path
) - 1);
2968 len
= readlink(path
, iommu_group_path
, PATH_MAX
);
2970 error_report("vfio: error no iommu_group for device");
2974 iommu_group_path
[len
] = 0;
2975 group_name
= basename(iommu_group_path
);
2977 if (sscanf(group_name
, "%d", &groupid
) != 1) {
2978 error_report("vfio: error reading %s: %m", path
);
2982 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__
, vdev
->host
.domain
,
2983 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, groupid
);
2985 group
= vfio_get_group(groupid
);
2987 error_report("vfio: failed to get group %d", groupid
);
2991 snprintf(path
, sizeof(path
), "%04x:%02x:%02x.%01x",
2992 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2993 vdev
->host
.function
);
2995 QLIST_FOREACH(pvdev
, &group
->device_list
, next
) {
2996 if (pvdev
->host
.domain
== vdev
->host
.domain
&&
2997 pvdev
->host
.bus
== vdev
->host
.bus
&&
2998 pvdev
->host
.slot
== vdev
->host
.slot
&&
2999 pvdev
->host
.function
== vdev
->host
.function
) {
3001 error_report("vfio: error: device %s is already attached", path
);
3002 vfio_put_group(group
);
3007 ret
= vfio_get_device(group
, path
, vdev
);
3009 error_report("vfio: failed to get device %s", path
);
3010 vfio_put_group(group
);
3014 /* Get a copy of config space */
3015 ret
= pread(vdev
->fd
, vdev
->pdev
.config
,
3016 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
3017 vdev
->config_offset
);
3018 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
3019 ret
= ret
< 0 ? -errno
: -EFAULT
;
3020 error_report("vfio: Failed to read device config space");
3024 /* vfio emulates a lot for us, but some bits need extra love */
3025 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
3027 /* QEMU can choose to expose the ROM or not */
3028 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
3030 /* QEMU can change multi-function devices to single function, or reverse */
3031 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
3032 PCI_HEADER_TYPE_MULTI_FUNCTION
;
3035 * Clear host resource mapping info. If we choose not to register a
3036 * BAR, such as might be the case with the option ROM, we can get
3037 * confusing, unwritable, residual addresses from the host here.
3039 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
3040 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
3042 vfio_load_rom(vdev
);
3044 ret
= vfio_early_setup_msix(vdev
);
3049 vfio_map_bars(vdev
);
3051 ret
= vfio_add_capabilities(vdev
);
3056 /* QEMU emulates all of MSI & MSIX */
3057 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3058 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3062 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3063 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3064 vdev
->msi_cap_size
);
3067 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3068 vdev
->intx
.mmap_timer
= qemu_new_timer_ms(vm_clock
,
3069 vfio_intx_mmap_enable
, vdev
);
3070 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_update_irq
);
3071 ret
= vfio_enable_intx(vdev
);
3077 add_boot_device_path(vdev
->bootindex
, &pdev
->qdev
, NULL
);
3082 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3083 vfio_teardown_msi(vdev
);
3084 vfio_unmap_bars(vdev
);
3086 g_free(vdev
->emulated_config_bits
);
3087 vfio_put_device(vdev
);
3088 vfio_put_group(group
);
3092 static void vfio_exitfn(PCIDevice
*pdev
)
3094 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3095 VFIOGroup
*group
= vdev
->group
;
3097 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3098 vfio_disable_interrupts(vdev
);
3099 if (vdev
->intx
.mmap_timer
) {
3100 qemu_free_timer(vdev
->intx
.mmap_timer
);
3102 vfio_teardown_msi(vdev
);
3103 vfio_unmap_bars(vdev
);
3104 g_free(vdev
->emulated_config_bits
);
3105 vfio_put_device(vdev
);
3106 vfio_put_group(group
);
3109 static void vfio_pci_reset(DeviceState
*dev
)
3111 PCIDevice
*pdev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
3112 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3115 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
3116 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3118 vfio_disable_interrupts(vdev
);
3120 /* Make sure the device is in D0 */
3125 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
3126 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
3128 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3129 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
3130 /* vfio handles the necessary delay here */
3131 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
3132 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
3134 error_report("vfio: Unable to power on device, stuck in D%d\n",
3141 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
3142 * Also put INTx Disable in known state.
3144 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
3145 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
3146 PCI_COMMAND_INTX_DISABLE
);
3147 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
3149 if (vdev
->reset_works
) {
3150 if (ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
3151 error_report("vfio: Error unable to reset physical device "
3152 "(%04x:%02x:%02x.%x): %m", vdev
->host
.domain
,
3153 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3157 vfio_enable_intx(vdev
);
3160 static Property vfio_pci_dev_properties
[] = {
3161 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice
, host
),
3162 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice
,
3163 intx
.mmap_timeout
, 1100),
3164 DEFINE_PROP_BIT("x-vga", VFIODevice
, features
,
3165 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3166 DEFINE_PROP_INT32("bootindex", VFIODevice
, bootindex
, -1),
3168 * TODO - support passed fds... is this necessary?
3169 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
3170 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
3172 DEFINE_PROP_END_OF_LIST(),
3175 static const VMStateDescription vfio_pci_vmstate
= {
3180 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3182 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3183 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3185 dc
->reset
= vfio_pci_reset
;
3186 dc
->props
= vfio_pci_dev_properties
;
3187 dc
->vmsd
= &vfio_pci_vmstate
;
3188 dc
->desc
= "VFIO-based PCI device assignment";
3189 pdc
->init
= vfio_initfn
;
3190 pdc
->exit
= vfio_exitfn
;
3191 pdc
->config_read
= vfio_pci_read_config
;
3192 pdc
->config_write
= vfio_pci_write_config
;
3193 pdc
->is_express
= 1; /* We might be */
3196 static const TypeInfo vfio_pci_dev_info
= {
3198 .parent
= TYPE_PCI_DEVICE
,
3199 .instance_size
= sizeof(VFIODevice
),
3200 .class_init
= vfio_pci_dev_class_init
,
3203 static void register_vfio_pci_dev_type(void)
3205 type_register_static(&vfio_pci_dev_info
);
3208 type_init(register_vfio_pci_dev_type
)