2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
43 /* #define DEBUG_VFIO */
45 #define DPRINTF(fmt, ...) \
46 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
48 #define DPRINTF(fmt, ...) \
52 /* Extra debugging, trap acceleration paths for more logging */
53 #define VFIO_ALLOW_MMAP 1
54 #define VFIO_ALLOW_KVM_INTX 1
55 #define VFIO_ALLOW_KVM_MSI 1
56 #define VFIO_ALLOW_KVM_MSIX 1
60 typedef struct VFIOQuirk
{
62 struct VFIODevice
*vdev
;
63 QLIST_ENTRY(VFIOQuirk
) next
;
65 uint32_t base_offset
:TARGET_PAGE_BITS
;
66 uint32_t address_offset
:TARGET_PAGE_BITS
;
67 uint32_t address_size
:3;
70 uint32_t address_match
;
71 uint32_t address_mask
;
73 uint32_t address_val
:TARGET_PAGE_BITS
;
74 uint32_t data_offset
:TARGET_PAGE_BITS
;
83 typedef struct VFIOBAR
{
84 off_t fd_offset
; /* offset of BAR within device fd */
85 int fd
; /* device fd, allows us to pass VFIOBAR as opaque data */
86 MemoryRegion mem
; /* slow, read/write access */
87 MemoryRegion mmap_mem
; /* direct mapped access */
90 uint32_t flags
; /* VFIO region flags (rd/wr/mmap) */
91 uint8_t nr
; /* cache the BAR number for debug */
94 QLIST_HEAD(, VFIOQuirk
) quirks
;
97 typedef struct VFIOVGARegion
{
101 QLIST_HEAD(, VFIOQuirk
) quirks
;
104 typedef struct VFIOVGA
{
107 VFIOVGARegion region
[QEMU_PCI_VGA_NUM_REGIONS
];
110 typedef struct VFIOINTx
{
111 bool pending
; /* interrupt pending */
112 bool kvm_accel
; /* set when QEMU bypass through KVM enabled */
113 uint8_t pin
; /* which pin to pull for qemu_set_irq */
114 EventNotifier interrupt
; /* eventfd triggered on interrupt */
115 EventNotifier unmask
; /* eventfd for unmask on QEMU bypass */
116 PCIINTxRoute route
; /* routing info for QEMU bypass */
117 uint32_t mmap_timeout
; /* delay to re-enable mmaps after interrupt */
118 QEMUTimer
*mmap_timer
; /* enable mmaps after periods w/o interrupts */
121 typedef struct VFIOMSIVector
{
122 EventNotifier interrupt
; /* eventfd triggered on interrupt */
123 struct VFIODevice
*vdev
; /* back pointer to device */
124 MSIMessage msg
; /* cache the MSI message so we know when it changes */
125 int virq
; /* KVM irqchip route for QEMU bypass */
138 typedef struct VFIOType1
{
139 MemoryListener listener
;
144 typedef struct VFIOContainer
{
145 int fd
; /* /dev/vfio/vfio, empowered by the attached groups */
147 /* enable abstraction to support various iommu backends */
151 void (*release
)(struct VFIOContainer
*);
153 QLIST_HEAD(, VFIOGroup
) group_list
;
154 QLIST_ENTRY(VFIOContainer
) next
;
157 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
158 typedef struct VFIOMSIXInfo
{
162 uint32_t table_offset
;
164 MemoryRegion mmap_mem
;
168 typedef struct VFIODevice
{
172 unsigned int config_size
;
173 uint8_t *emulated_config_bits
; /* QEMU emulated bits, little-endian */
174 off_t config_offset
; /* Offset of config space region within device fd */
175 unsigned int rom_size
;
176 off_t rom_offset
; /* Offset of ROM region within device fd */
179 VFIOMSIVector
*msi_vectors
;
181 int nr_vectors
; /* Number of MSI/MSIX vectors currently in use */
182 int interrupt
; /* Current interrupt type */
183 VFIOBAR bars
[PCI_NUM_REGIONS
- 1]; /* No ROM */
184 VFIOVGA vga
; /* 0xa0000, 0x3b0, 0x3c0 */
185 PCIHostDeviceAddress host
;
186 QLIST_ENTRY(VFIODevice
) next
;
187 struct VFIOGroup
*group
;
188 EventNotifier err_notifier
;
190 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
191 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
200 bool rom_read_failed
;
203 typedef struct VFIOGroup
{
206 VFIOContainer
*container
;
207 QLIST_HEAD(, VFIODevice
) device_list
;
208 QLIST_ENTRY(VFIOGroup
) next
;
209 QLIST_ENTRY(VFIOGroup
) container_next
;
212 typedef struct VFIORomBlacklistEntry
{
215 } VFIORomBlacklistEntry
;
218 * List of device ids/vendor ids for which to disable
219 * option rom loading. This avoids the guest hangs during rom
220 * execution as noticed with the BCM 57810 card for lack of a
221 * more better way to handle such issues.
222 * The user can still override by specifying a romfile or
224 * Please see https://bugs.launchpad.net/qemu/+bug/1284874
225 * for an analysis of the 57810 card hang. When adding
226 * a new vendor id/device id combination below, please also add
227 * your card/environment details and information that could
228 * help in debugging to the bug tracking this issue
230 static const VFIORomBlacklistEntry romblacklist
[] = {
231 /* Broadcom BCM 57810 */
235 #define MSIX_CAP_LENGTH 12
237 static QLIST_HEAD(, VFIOContainer
)
238 container_list
= QLIST_HEAD_INITIALIZER(container_list
);
240 static QLIST_HEAD(, VFIOGroup
)
241 group_list
= QLIST_HEAD_INITIALIZER(group_list
);
245 * We have a single VFIO pseudo device per KVM VM. Once created it lives
246 * for the life of the VM. Closing the file descriptor only drops our
247 * reference to it and the device's reference to kvm. Therefore once
248 * initialized, this file descriptor is only released on QEMU exit and
249 * we'll re-use it should another vfio device be attached before then.
251 static int vfio_kvm_device_fd
= -1;
254 static void vfio_disable_interrupts(VFIODevice
*vdev
);
255 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
);
256 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
257 uint32_t val
, int len
);
258 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
);
261 * Common VFIO interrupt disable
263 static void vfio_disable_irqindex(VFIODevice
*vdev
, int index
)
265 struct vfio_irq_set irq_set
= {
266 .argsz
= sizeof(irq_set
),
267 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
273 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
279 static void vfio_unmask_intx(VFIODevice
*vdev
)
281 struct vfio_irq_set irq_set
= {
282 .argsz
= sizeof(irq_set
),
283 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
284 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
289 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
292 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
293 static void vfio_mask_intx(VFIODevice
*vdev
)
295 struct vfio_irq_set irq_set
= {
296 .argsz
= sizeof(irq_set
),
297 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
298 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
303 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
308 * Disabling BAR mmaping can be slow, but toggling it around INTx can
309 * also be a huge overhead. We try to get the best of both worlds by
310 * waiting until an interrupt to disable mmaps (subsequent transitions
311 * to the same state are effectively no overhead). If the interrupt has
312 * been serviced and the time gap is long enough, we re-enable mmaps for
313 * performance. This works well for things like graphics cards, which
314 * may not use their interrupt at all and are penalized to an unusable
315 * level by read/write BAR traps. Other devices, like NICs, have more
316 * regular interrupts and see much better latency by staying in non-mmap
317 * mode. We therefore set the default mmap_timeout such that a ping
318 * is just enough to keep the mmap disabled. Users can experiment with
319 * other options with the x-intx-mmap-timeout-ms parameter (a value of
320 * zero disables the timer).
322 static void vfio_intx_mmap_enable(void *opaque
)
324 VFIODevice
*vdev
= opaque
;
326 if (vdev
->intx
.pending
) {
327 timer_mod(vdev
->intx
.mmap_timer
,
328 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
332 vfio_mmap_set_enabled(vdev
, true);
335 static void vfio_intx_interrupt(void *opaque
)
337 VFIODevice
*vdev
= opaque
;
339 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
343 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__
, vdev
->host
.domain
,
344 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
345 'A' + vdev
->intx
.pin
);
347 vdev
->intx
.pending
= true;
348 pci_irq_assert(&vdev
->pdev
);
349 vfio_mmap_set_enabled(vdev
, false);
350 if (vdev
->intx
.mmap_timeout
) {
351 timer_mod(vdev
->intx
.mmap_timer
,
352 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
356 static void vfio_eoi(VFIODevice
*vdev
)
358 if (!vdev
->intx
.pending
) {
362 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__
, vdev
->host
.domain
,
363 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
365 vdev
->intx
.pending
= false;
366 pci_irq_deassert(&vdev
->pdev
);
367 vfio_unmask_intx(vdev
);
370 static void vfio_enable_intx_kvm(VFIODevice
*vdev
)
373 struct kvm_irqfd irqfd
= {
374 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
375 .gsi
= vdev
->intx
.route
.irq
,
376 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
378 struct vfio_irq_set
*irq_set
;
382 if (!VFIO_ALLOW_KVM_INTX
|| !kvm_irqfds_enabled() ||
383 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
384 !kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
388 /* Get to a known interrupt state */
389 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
390 vfio_mask_intx(vdev
);
391 vdev
->intx
.pending
= false;
392 pci_irq_deassert(&vdev
->pdev
);
394 /* Get an eventfd for resample/unmask */
395 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
396 error_report("vfio: Error: event_notifier_init failed eoi");
400 /* KVM triggers it, VFIO listens for it */
401 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
403 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
404 error_report("vfio: Error: Failed to setup resample irqfd: %m");
408 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
410 irq_set
= g_malloc0(argsz
);
411 irq_set
->argsz
= argsz
;
412 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_UNMASK
;
413 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
416 pfd
= (int32_t *)&irq_set
->data
;
418 *pfd
= irqfd
.resamplefd
;
420 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
423 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
428 vfio_unmask_intx(vdev
);
430 vdev
->intx
.kvm_accel
= true;
432 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
433 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
434 vdev
->host
.slot
, vdev
->host
.function
);
439 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
440 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
442 event_notifier_cleanup(&vdev
->intx
.unmask
);
444 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
445 vfio_unmask_intx(vdev
);
449 static void vfio_disable_intx_kvm(VFIODevice
*vdev
)
452 struct kvm_irqfd irqfd
= {
453 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
454 .gsi
= vdev
->intx
.route
.irq
,
455 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
458 if (!vdev
->intx
.kvm_accel
) {
463 * Get to a known state, hardware masked, QEMU ready to accept new
464 * interrupts, QEMU IRQ de-asserted.
466 vfio_mask_intx(vdev
);
467 vdev
->intx
.pending
= false;
468 pci_irq_deassert(&vdev
->pdev
);
470 /* Tell KVM to stop listening for an INTx irqfd */
471 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
472 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
475 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
476 event_notifier_cleanup(&vdev
->intx
.unmask
);
478 /* QEMU starts listening for interrupt events. */
479 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
481 vdev
->intx
.kvm_accel
= false;
483 /* If we've missed an event, let it re-fire through QEMU */
484 vfio_unmask_intx(vdev
);
486 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
487 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
488 vdev
->host
.slot
, vdev
->host
.function
);
492 static void vfio_update_irq(PCIDevice
*pdev
)
494 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
497 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
501 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
503 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
504 return; /* Nothing changed */
507 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__
,
508 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
509 vdev
->host
.function
, vdev
->intx
.route
.irq
, route
.irq
);
511 vfio_disable_intx_kvm(vdev
);
513 vdev
->intx
.route
= route
;
515 if (route
.mode
!= PCI_INTX_ENABLED
) {
519 vfio_enable_intx_kvm(vdev
);
521 /* Re-enable the interrupt in cased we missed an EOI */
525 static int vfio_enable_intx(VFIODevice
*vdev
)
527 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
529 struct vfio_irq_set
*irq_set
;
536 vfio_disable_interrupts(vdev
);
538 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
539 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
543 * Only conditional to avoid generating error messages on platforms
544 * where we won't actually use the result anyway.
546 if (kvm_irqfds_enabled() &&
547 kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
548 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
553 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
555 error_report("vfio: Error: event_notifier_init failed");
559 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
561 irq_set
= g_malloc0(argsz
);
562 irq_set
->argsz
= argsz
;
563 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
564 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
567 pfd
= (int32_t *)&irq_set
->data
;
569 *pfd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
570 qemu_set_fd_handler(*pfd
, vfio_intx_interrupt
, NULL
, vdev
);
572 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
575 error_report("vfio: Error: Failed to setup INTx fd: %m");
576 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
577 event_notifier_cleanup(&vdev
->intx
.interrupt
);
581 vfio_enable_intx_kvm(vdev
);
583 vdev
->interrupt
= VFIO_INT_INTx
;
585 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
586 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
591 static void vfio_disable_intx(VFIODevice
*vdev
)
595 timer_del(vdev
->intx
.mmap_timer
);
596 vfio_disable_intx_kvm(vdev
);
597 vfio_disable_irqindex(vdev
, VFIO_PCI_INTX_IRQ_INDEX
);
598 vdev
->intx
.pending
= false;
599 pci_irq_deassert(&vdev
->pdev
);
600 vfio_mmap_set_enabled(vdev
, true);
602 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
603 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
604 event_notifier_cleanup(&vdev
->intx
.interrupt
);
606 vdev
->interrupt
= VFIO_INT_NONE
;
608 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
609 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
615 static void vfio_msi_interrupt(void *opaque
)
617 VFIOMSIVector
*vector
= opaque
;
618 VFIODevice
*vdev
= vector
->vdev
;
619 int nr
= vector
- vdev
->msi_vectors
;
621 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
628 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
629 msg
= msi_get_message(&vdev
->pdev
, nr
);
630 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
631 msg
= msix_get_message(&vdev
->pdev
, nr
);
636 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d 0x%"PRIx64
"/0x%x\n", __func__
,
637 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
638 vdev
->host
.function
, nr
, msg
.address
, msg
.data
);
641 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
642 msix_notify(&vdev
->pdev
, nr
);
643 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
644 msi_notify(&vdev
->pdev
, nr
);
646 error_report("vfio: MSI interrupt receieved, but not enabled?");
650 static int vfio_enable_vectors(VFIODevice
*vdev
, bool msix
)
652 struct vfio_irq_set
*irq_set
;
653 int ret
= 0, i
, argsz
;
656 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
658 irq_set
= g_malloc0(argsz
);
659 irq_set
->argsz
= argsz
;
660 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
661 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
663 irq_set
->count
= vdev
->nr_vectors
;
664 fds
= (int32_t *)&irq_set
->data
;
666 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
667 if (!vdev
->msi_vectors
[i
].use
) {
672 fds
[i
] = event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
675 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
682 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
683 MSIMessage
*msg
, IOHandler
*handler
)
685 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
686 VFIOMSIVector
*vector
;
689 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__
,
690 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
691 vdev
->host
.function
, nr
);
693 vector
= &vdev
->msi_vectors
[nr
];
697 msix_vector_use(pdev
, nr
);
699 if (event_notifier_init(&vector
->interrupt
, 0)) {
700 error_report("vfio: Error: event_notifier_init failed");
704 * Attempt to enable route through KVM irqchip,
705 * default to userspace handling if unavailable.
707 vector
->virq
= msg
&& VFIO_ALLOW_KVM_MSIX
?
708 kvm_irqchip_add_msi_route(kvm_state
, *msg
) : -1;
709 if (vector
->virq
< 0 ||
710 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
711 NULL
, vector
->virq
) < 0) {
712 if (vector
->virq
>= 0) {
713 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
716 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
717 handler
, NULL
, vector
);
721 * We don't want to have the host allocate all possible MSI vectors
722 * for a device if they're not in use, so we shutdown and incrementally
723 * increase them as needed.
725 if (vdev
->nr_vectors
< nr
+ 1) {
726 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
727 vdev
->nr_vectors
= nr
+ 1;
728 ret
= vfio_enable_vectors(vdev
, true);
730 error_report("vfio: failed to enable vectors, %d", ret
);
734 struct vfio_irq_set
*irq_set
;
737 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
739 irq_set
= g_malloc0(argsz
);
740 irq_set
->argsz
= argsz
;
741 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
742 VFIO_IRQ_SET_ACTION_TRIGGER
;
743 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
746 pfd
= (int32_t *)&irq_set
->data
;
748 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
750 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
753 error_report("vfio: failed to modify vector, %d", ret
);
760 static int vfio_msix_vector_use(PCIDevice
*pdev
,
761 unsigned int nr
, MSIMessage msg
)
763 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
766 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
768 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
769 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
771 struct vfio_irq_set
*irq_set
;
774 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__
,
775 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
776 vdev
->host
.function
, nr
);
779 * XXX What's the right thing to do here? This turns off the interrupt
780 * completely, but do we really just want to switch the interrupt to
781 * bouncing through userspace and let msix.c drop it? Not sure.
783 msix_vector_unuse(pdev
, nr
);
785 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
787 irq_set
= g_malloc0(argsz
);
788 irq_set
->argsz
= argsz
;
789 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
790 VFIO_IRQ_SET_ACTION_TRIGGER
;
791 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
794 pfd
= (int32_t *)&irq_set
->data
;
798 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
802 if (vector
->virq
< 0) {
803 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
806 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
808 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
812 event_notifier_cleanup(&vector
->interrupt
);
816 static void vfio_enable_msix(VFIODevice
*vdev
)
818 vfio_disable_interrupts(vdev
);
820 vdev
->msi_vectors
= g_malloc0(vdev
->msix
->entries
* sizeof(VFIOMSIVector
));
822 vdev
->interrupt
= VFIO_INT_MSIX
;
825 * Some communication channels between VF & PF or PF & fw rely on the
826 * physical state of the device and expect that enabling MSI-X from the
827 * guest enables the same on the host. When our guest is Linux, the
828 * guest driver call to pci_enable_msix() sets the enabling bit in the
829 * MSI-X capability, but leaves the vector table masked. We therefore
830 * can't rely on a vector_use callback (from request_irq() in the guest)
831 * to switch the physical device into MSI-X mode because that may come a
832 * long time after pci_enable_msix(). This code enables vector 0 with
833 * triggering to userspace, then immediately release the vector, leaving
834 * the physical device with no vectors enabled, but MSI-X enabled, just
835 * like the guest view.
837 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
838 vfio_msix_vector_release(&vdev
->pdev
, 0);
840 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
841 vfio_msix_vector_release
, NULL
)) {
842 error_report("vfio: msix_set_vector_notifiers failed");
845 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
846 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
849 static void vfio_enable_msi(VFIODevice
*vdev
)
853 vfio_disable_interrupts(vdev
);
855 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
857 vdev
->msi_vectors
= g_malloc0(vdev
->nr_vectors
* sizeof(VFIOMSIVector
));
859 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
860 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
865 if (event_notifier_init(&vector
->interrupt
, 0)) {
866 error_report("vfio: Error: event_notifier_init failed");
869 vector
->msg
= msi_get_message(&vdev
->pdev
, i
);
872 * Attempt to enable route through KVM irqchip,
873 * default to userspace handling if unavailable.
875 vector
->virq
= VFIO_ALLOW_KVM_MSI
?
876 kvm_irqchip_add_msi_route(kvm_state
, vector
->msg
) : -1;
877 if (vector
->virq
< 0 ||
878 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
879 NULL
, vector
->virq
) < 0) {
880 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
881 vfio_msi_interrupt
, NULL
, vector
);
885 ret
= vfio_enable_vectors(vdev
, false);
888 error_report("vfio: Error: Failed to setup MSI fds: %m");
889 } else if (ret
!= vdev
->nr_vectors
) {
890 error_report("vfio: Error: Failed to enable %d "
891 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
894 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
895 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
896 if (vector
->virq
>= 0) {
897 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
899 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
902 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
905 event_notifier_cleanup(&vector
->interrupt
);
908 g_free(vdev
->msi_vectors
);
910 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
911 vdev
->nr_vectors
= ret
;
914 vdev
->nr_vectors
= 0;
919 vdev
->interrupt
= VFIO_INT_MSI
;
921 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__
,
922 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
923 vdev
->host
.function
, vdev
->nr_vectors
);
926 static void vfio_disable_msi_common(VFIODevice
*vdev
)
928 g_free(vdev
->msi_vectors
);
929 vdev
->msi_vectors
= NULL
;
930 vdev
->nr_vectors
= 0;
931 vdev
->interrupt
= VFIO_INT_NONE
;
933 vfio_enable_intx(vdev
);
936 static void vfio_disable_msix(VFIODevice
*vdev
)
940 msix_unset_vector_notifiers(&vdev
->pdev
);
943 * MSI-X will only release vectors if MSI-X is still enabled on the
944 * device, check through the rest and release it ourselves if necessary.
946 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
947 if (vdev
->msi_vectors
[i
].use
) {
948 vfio_msix_vector_release(&vdev
->pdev
, i
);
952 if (vdev
->nr_vectors
) {
953 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
956 vfio_disable_msi_common(vdev
);
958 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
959 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
962 static void vfio_disable_msi(VFIODevice
*vdev
)
966 vfio_disable_irqindex(vdev
, VFIO_PCI_MSI_IRQ_INDEX
);
968 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
969 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
975 if (vector
->virq
>= 0) {
976 kvm_irqchip_remove_irqfd_notifier(kvm_state
,
977 &vector
->interrupt
, vector
->virq
);
978 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
981 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
985 event_notifier_cleanup(&vector
->interrupt
);
988 vfio_disable_msi_common(vdev
);
990 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
991 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
994 static void vfio_update_msi(VFIODevice
*vdev
)
998 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
999 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
1002 if (!vector
->use
|| vector
->virq
< 0) {
1006 msg
= msi_get_message(&vdev
->pdev
, i
);
1008 if (msg
.address
!= vector
->msg
.address
||
1009 msg
.data
!= vector
->msg
.data
) {
1011 DPRINTF("%s(%04x:%02x:%02x.%x) MSI vector %d changed\n",
1012 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1013 vdev
->host
.slot
, vdev
->host
.function
, i
);
1015 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
);
1022 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
1024 static void vfio_bar_write(void *opaque
, hwaddr addr
,
1025 uint64_t data
, unsigned size
)
1027 VFIOBAR
*bar
= opaque
;
1040 buf
.word
= cpu_to_le16(data
);
1043 buf
.dword
= cpu_to_le32(data
);
1046 hw_error("vfio: unsupported write size, %d bytes\n", size
);
1050 if (pwrite(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
1051 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1052 __func__
, addr
, data
, size
);
1057 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
1059 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
1060 ", %d)\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1061 vdev
->host
.slot
, vdev
->host
.function
, bar
->nr
, addr
,
1067 * A read or write to a BAR always signals an INTx EOI. This will
1068 * do nothing if not pending (including not in INTx mode). We assume
1069 * that a BAR access is in response to an interrupt and that BAR
1070 * accesses will service the interrupt. Unfortunately, we don't know
1071 * which access will service the interrupt, so we're potentially
1072 * getting quite a few host interrupts per guest interrupt.
1074 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
1077 static uint64_t vfio_bar_read(void *opaque
,
1078 hwaddr addr
, unsigned size
)
1080 VFIOBAR
*bar
= opaque
;
1089 if (pread(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
1090 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1091 __func__
, addr
, size
);
1092 return (uint64_t)-1;
1100 data
= le16_to_cpu(buf
.word
);
1103 data
= le32_to_cpu(buf
.dword
);
1106 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1112 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
1114 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
1115 ", %d) = 0x%"PRIx64
"\n", __func__
, vdev
->host
.domain
,
1116 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1117 bar
->nr
, addr
, size
, data
);
1121 /* Same as write above */
1122 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
1127 static const MemoryRegionOps vfio_bar_ops
= {
1128 .read
= vfio_bar_read
,
1129 .write
= vfio_bar_write
,
1130 .endianness
= DEVICE_LITTLE_ENDIAN
,
1133 static void vfio_pci_load_rom(VFIODevice
*vdev
)
1135 struct vfio_region_info reg_info
= {
1136 .argsz
= sizeof(reg_info
),
1137 .index
= VFIO_PCI_ROM_REGION_INDEX
1143 if (ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
)) {
1144 error_report("vfio: Error getting ROM info: %m");
1148 DPRINTF("Device %04x:%02x:%02x.%x ROM:\n", vdev
->host
.domain
,
1149 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1150 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1151 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
1152 (unsigned long)reg_info
.flags
);
1154 vdev
->rom_size
= size
= reg_info
.size
;
1155 vdev
->rom_offset
= reg_info
.offset
;
1157 if (!vdev
->rom_size
) {
1158 vdev
->rom_read_failed
= true;
1159 error_report("vfio-pci: Cannot read device rom at "
1160 "%04x:%02x:%02x.%x\n",
1161 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1162 vdev
->host
.function
);
1163 error_printf("Device option ROM contents are probably invalid "
1164 "(check dmesg).\nSkip option ROM probe with rombar=0, "
1165 "or load from file with romfile=\n");
1169 vdev
->rom
= g_malloc(size
);
1170 memset(vdev
->rom
, 0xff, size
);
1173 bytes
= pread(vdev
->fd
, vdev
->rom
+ off
, size
, vdev
->rom_offset
+ off
);
1176 } else if (bytes
> 0) {
1180 if (errno
== EINTR
|| errno
== EAGAIN
) {
1183 error_report("vfio: Error reading device ROM: %m");
1189 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
1191 VFIODevice
*vdev
= opaque
;
1192 uint64_t val
= ((uint64_t)1 << (size
* 8)) - 1;
1194 /* Load the ROM lazily when the guest tries to read it */
1195 if (unlikely(!vdev
->rom
)) {
1196 vfio_pci_load_rom(vdev
);
1197 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
1198 vfio_pci_load_rom(vdev
);
1202 memcpy(&val
, vdev
->rom
+ addr
,
1203 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
1205 DPRINTF("%s(%04x:%02x:%02x.%x, 0x%"HWADDR_PRIx
", 0x%x) = 0x%"PRIx64
"\n",
1206 __func__
, vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1207 vdev
->host
.function
, addr
, size
, val
);
1212 static void vfio_rom_write(void *opaque
, hwaddr addr
,
1213 uint64_t data
, unsigned size
)
1217 static const MemoryRegionOps vfio_rom_ops
= {
1218 .read
= vfio_rom_read
,
1219 .write
= vfio_rom_write
,
1220 .endianness
= DEVICE_LITTLE_ENDIAN
,
1223 static bool vfio_blacklist_opt_rom(VFIODevice
*vdev
)
1225 PCIDevice
*pdev
= &vdev
->pdev
;
1226 uint16_t vendor_id
, device_id
;
1229 vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
1230 device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
1232 while (count
< ARRAY_SIZE(romblacklist
)) {
1233 if (romblacklist
[count
].vendor_id
== vendor_id
&&
1234 romblacklist
[count
].device_id
== device_id
) {
1243 static void vfio_pci_size_rom(VFIODevice
*vdev
)
1245 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
1246 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
1247 DeviceState
*dev
= DEVICE(vdev
);
1250 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
1251 /* Since pci handles romfile, just print a message and return */
1252 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
1253 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1254 "is known to cause system instability issues during "
1255 "option rom execution. "
1256 "Proceeding anyway since user specified romfile\n",
1257 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1258 vdev
->host
.function
);
1264 * Use the same size ROM BAR as the physical device. The contents
1265 * will get filled in later when the guest tries to read it.
1267 if (pread(vdev
->fd
, &orig
, 4, offset
) != 4 ||
1268 pwrite(vdev
->fd
, &size
, 4, offset
) != 4 ||
1269 pread(vdev
->fd
, &size
, 4, offset
) != 4 ||
1270 pwrite(vdev
->fd
, &orig
, 4, offset
) != 4) {
1271 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
1272 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1273 vdev
->host
.slot
, vdev
->host
.function
);
1277 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
1283 if (vfio_blacklist_opt_rom(vdev
)) {
1284 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
1285 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1286 "is known to cause system instability issues during "
1287 "option rom execution. "
1288 "Proceeding anyway since user specified non zero value for "
1290 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1291 vdev
->host
.function
);
1293 error_printf("Warning : Rom loading for device at "
1294 "%04x:%02x:%02x.%x has been disabled due to "
1295 "system instability issues. "
1296 "Specify rombar=1 or romfile to force\n",
1297 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1298 vdev
->host
.function
);
1303 DPRINTF("%04x:%02x:%02x.%x ROM size 0x%x\n", vdev
->host
.domain
,
1304 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, size
);
1306 snprintf(name
, sizeof(name
), "vfio[%04x:%02x:%02x.%x].rom",
1307 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1308 vdev
->host
.function
);
1310 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
1311 &vfio_rom_ops
, vdev
, name
, size
);
1313 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
1314 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
1316 vdev
->pdev
.has_rom
= true;
1317 vdev
->rom_read_failed
= false;
1320 static void vfio_vga_write(void *opaque
, hwaddr addr
,
1321 uint64_t data
, unsigned size
)
1323 VFIOVGARegion
*region
= opaque
;
1324 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1331 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1338 buf
.word
= cpu_to_le16(data
);
1341 buf
.dword
= cpu_to_le32(data
);
1344 hw_error("vfio: unsupported write size, %d bytes\n", size
);
1348 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
1349 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1350 __func__
, region
->offset
+ addr
, data
, size
);
1353 DPRINTF("%s(0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d)\n",
1354 __func__
, region
->offset
+ addr
, data
, size
);
1357 static uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
1359 VFIOVGARegion
*region
= opaque
;
1360 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1368 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1370 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1371 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1372 __func__
, region
->offset
+ addr
, size
);
1373 return (uint64_t)-1;
1381 data
= le16_to_cpu(buf
.word
);
1384 data
= le32_to_cpu(buf
.dword
);
1387 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1391 DPRINTF("%s(0x%"HWADDR_PRIx
", %d) = 0x%"PRIx64
"\n",
1392 __func__
, region
->offset
+ addr
, size
, data
);
1397 static const MemoryRegionOps vfio_vga_ops
= {
1398 .read
= vfio_vga_read
,
1399 .write
= vfio_vga_write
,
1400 .endianness
= DEVICE_LITTLE_ENDIAN
,
1404 * Device specific quirks
1407 /* Is range1 fully contained within range2? */
1408 static bool vfio_range_contained(uint64_t first1
, uint64_t len1
,
1409 uint64_t first2
, uint64_t len2
) {
1410 return (first1
>= first2
&& first1
+ len1
<= first2
+ len2
);
1413 static bool vfio_flags_enabled(uint8_t flags
, uint8_t mask
)
1415 return (mask
&& (flags
& mask
) == mask
);
1418 static uint64_t vfio_generic_window_quirk_read(void *opaque
,
1419 hwaddr addr
, unsigned size
)
1421 VFIOQuirk
*quirk
= opaque
;
1422 VFIODevice
*vdev
= quirk
->vdev
;
1425 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1426 ranges_overlap(addr
, size
,
1427 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1428 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1430 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1431 quirk
->data
.data_size
)) {
1432 hw_error("%s: window data read not fully contained: %s\n",
1433 __func__
, memory_region_name(&quirk
->mem
));
1436 data
= vfio_pci_read_config(&vdev
->pdev
,
1437 quirk
->data
.address_val
+ offset
, size
);
1439 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"
1440 PRIx64
"\n", memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1441 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1442 quirk
->data
.bar
, addr
, size
, data
);
1444 data
= vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
],
1445 addr
+ quirk
->data
.base_offset
, size
);
1451 static void vfio_generic_window_quirk_write(void *opaque
, hwaddr addr
,
1452 uint64_t data
, unsigned size
)
1454 VFIOQuirk
*quirk
= opaque
;
1455 VFIODevice
*vdev
= quirk
->vdev
;
1457 if (ranges_overlap(addr
, size
,
1458 quirk
->data
.address_offset
, quirk
->data
.address_size
)) {
1460 if (addr
!= quirk
->data
.address_offset
) {
1461 hw_error("%s: offset write into address window: %s\n",
1462 __func__
, memory_region_name(&quirk
->mem
));
1465 if ((data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1466 quirk
->data
.flags
|= quirk
->data
.write_flags
|
1467 quirk
->data
.read_flags
;
1468 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1470 quirk
->data
.flags
&= ~(quirk
->data
.write_flags
|
1471 quirk
->data
.read_flags
);
1475 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1476 ranges_overlap(addr
, size
,
1477 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1478 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1480 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1481 quirk
->data
.data_size
)) {
1482 hw_error("%s: window data write not fully contained: %s\n",
1483 __func__
, memory_region_name(&quirk
->mem
));
1486 vfio_pci_write_config(&vdev
->pdev
,
1487 quirk
->data
.address_val
+ offset
, data
, size
);
1488 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"
1489 PRIx64
", %d)\n", memory_region_name(&quirk
->mem
),
1490 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1491 vdev
->host
.function
, quirk
->data
.bar
, addr
, data
, size
);
1495 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
],
1496 addr
+ quirk
->data
.base_offset
, data
, size
);
1499 static const MemoryRegionOps vfio_generic_window_quirk
= {
1500 .read
= vfio_generic_window_quirk_read
,
1501 .write
= vfio_generic_window_quirk_write
,
1502 .endianness
= DEVICE_LITTLE_ENDIAN
,
1505 static uint64_t vfio_generic_quirk_read(void *opaque
,
1506 hwaddr addr
, unsigned size
)
1508 VFIOQuirk
*quirk
= opaque
;
1509 VFIODevice
*vdev
= quirk
->vdev
;
1510 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1511 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1514 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1515 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1516 if (!vfio_range_contained(addr
, size
, offset
,
1517 quirk
->data
.address_mask
+ 1)) {
1518 hw_error("%s: read not fully contained: %s\n",
1519 __func__
, memory_region_name(&quirk
->mem
));
1522 data
= vfio_pci_read_config(&vdev
->pdev
, addr
- offset
, size
);
1524 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"
1525 PRIx64
"\n", memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1526 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1527 quirk
->data
.bar
, addr
+ base
, size
, data
);
1529 data
= vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, size
);
1535 static void vfio_generic_quirk_write(void *opaque
, hwaddr addr
,
1536 uint64_t data
, unsigned size
)
1538 VFIOQuirk
*quirk
= opaque
;
1539 VFIODevice
*vdev
= quirk
->vdev
;
1540 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1541 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1543 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1544 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1545 if (!vfio_range_contained(addr
, size
, offset
,
1546 quirk
->data
.address_mask
+ 1)) {
1547 hw_error("%s: write not fully contained: %s\n",
1548 __func__
, memory_region_name(&quirk
->mem
));
1551 vfio_pci_write_config(&vdev
->pdev
, addr
- offset
, data
, size
);
1553 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"
1554 PRIx64
", %d)\n", memory_region_name(&quirk
->mem
),
1555 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1556 vdev
->host
.function
, quirk
->data
.bar
, addr
+ base
, data
, size
);
1558 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, data
, size
);
1562 static const MemoryRegionOps vfio_generic_quirk
= {
1563 .read
= vfio_generic_quirk_read
,
1564 .write
= vfio_generic_quirk_write
,
1565 .endianness
= DEVICE_LITTLE_ENDIAN
,
1568 #define PCI_VENDOR_ID_ATI 0x1002
1571 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1572 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1573 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1574 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1575 * I/O port BAR address. Originally this was coded to return the virtual BAR
1576 * address only if the physical register read returns the actual BAR address,
1577 * but users have reported greater success if we return the virtual address
1580 static uint64_t vfio_ati_3c3_quirk_read(void *opaque
,
1581 hwaddr addr
, unsigned size
)
1583 VFIOQuirk
*quirk
= opaque
;
1584 VFIODevice
*vdev
= quirk
->vdev
;
1585 uint64_t data
= vfio_pci_read_config(&vdev
->pdev
,
1586 PCI_BASE_ADDRESS_0
+ (4 * 4) + 1,
1588 DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64
"\n", __func__
, data
);
1593 static const MemoryRegionOps vfio_ati_3c3_quirk
= {
1594 .read
= vfio_ati_3c3_quirk_read
,
1595 .endianness
= DEVICE_LITTLE_ENDIAN
,
1598 static void vfio_vga_probe_ati_3c3_quirk(VFIODevice
*vdev
)
1600 PCIDevice
*pdev
= &vdev
->pdev
;
1603 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1608 * As long as the BAR is >= 256 bytes it will be aligned such that the
1609 * lower byte is always zero. Filter out anything else, if it exists.
1611 if (!vdev
->bars
[4].ioport
|| vdev
->bars
[4].size
< 256) {
1615 quirk
= g_malloc0(sizeof(*quirk
));
1618 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_ati_3c3_quirk
, quirk
,
1619 "vfio-ati-3c3-quirk", 1);
1620 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1621 3 /* offset 3 bytes from 0x3c0 */, &quirk
->mem
);
1623 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1626 DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n",
1627 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1628 vdev
->host
.function
);
1632 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1633 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1634 * the MMIO space directly, but a window to this space is provided through
1635 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1636 * data register. When the address is programmed to a range of 0x4000-0x4fff
1637 * PCI configuration space is available. Experimentation seems to indicate
1638 * that only read-only access is provided, but we drop writes when the window
1639 * is enabled to config space nonetheless.
1641 static void vfio_probe_ati_bar4_window_quirk(VFIODevice
*vdev
, int nr
)
1643 PCIDevice
*pdev
= &vdev
->pdev
;
1646 if (!vdev
->has_vga
|| nr
!= 4 ||
1647 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1651 quirk
= g_malloc0(sizeof(*quirk
));
1653 quirk
->data
.address_size
= 4;
1654 quirk
->data
.data_offset
= 4;
1655 quirk
->data
.data_size
= 4;
1656 quirk
->data
.address_match
= 0x4000;
1657 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1658 quirk
->data
.bar
= nr
;
1659 quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1661 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1662 &vfio_generic_window_quirk
, quirk
,
1663 "vfio-ati-bar4-window-quirk", 8);
1664 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1665 quirk
->data
.base_offset
, &quirk
->mem
, 1);
1667 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1669 DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n",
1670 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1671 vdev
->host
.function
);
1675 * Trap the BAR2 MMIO window to config space as well.
1677 static void vfio_probe_ati_bar2_4000_quirk(VFIODevice
*vdev
, int nr
)
1679 PCIDevice
*pdev
= &vdev
->pdev
;
1682 /* Only enable on newer devices where BAR2 is 64bit */
1683 if (!vdev
->has_vga
|| nr
!= 2 || !vdev
->bars
[2].mem64
||
1684 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1688 quirk
= g_malloc0(sizeof(*quirk
));
1690 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1691 quirk
->data
.address_match
= 0x4000;
1692 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1693 quirk
->data
.bar
= nr
;
1695 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
1696 "vfio-ati-bar2-4000-quirk",
1697 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1698 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1699 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1702 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1704 DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n",
1705 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1706 vdev
->host
.function
);
1710 * Older ATI/AMD cards like the X550 have a similar window to that above.
1711 * I/O port BAR1 provides a window to a mirror of PCI config space located
1712 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1713 * note it for future reference.
1716 #define PCI_VENDOR_ID_NVIDIA 0x10de
1719 * Nvidia has several different methods to get to config space, the
1720 * nouveu project has several of these documented here:
1721 * https://github.com/pathscale/envytools/tree/master/hwdocs
1723 * The first quirk is actually not documented in envytools and is found
1724 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1725 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1726 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1727 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1728 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1729 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1730 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1731 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1741 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque
,
1742 hwaddr addr
, unsigned size
)
1744 VFIOQuirk
*quirk
= opaque
;
1745 VFIODevice
*vdev
= quirk
->vdev
;
1746 PCIDevice
*pdev
= &vdev
->pdev
;
1747 uint64_t data
= vfio_vga_read(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1748 addr
+ quirk
->data
.base_offset
, size
);
1750 if (quirk
->data
.flags
== NV_3D0_READ
&& addr
== quirk
->data
.data_offset
) {
1751 data
= vfio_pci_read_config(pdev
, quirk
->data
.address_val
, size
);
1752 DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64
"\n", __func__
, size
, data
);
1755 quirk
->data
.flags
= NV_3D0_NONE
;
1760 static void vfio_nvidia_3d0_quirk_write(void *opaque
, hwaddr addr
,
1761 uint64_t data
, unsigned size
)
1763 VFIOQuirk
*quirk
= opaque
;
1764 VFIODevice
*vdev
= quirk
->vdev
;
1765 PCIDevice
*pdev
= &vdev
->pdev
;
1767 switch (quirk
->data
.flags
) {
1769 if (addr
== quirk
->data
.address_offset
&& data
== 0x338) {
1770 quirk
->data
.flags
= NV_3D0_SELECT
;
1774 quirk
->data
.flags
= NV_3D0_NONE
;
1775 if (addr
== quirk
->data
.data_offset
&&
1776 (data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1777 quirk
->data
.flags
= NV_3D0_WINDOW
;
1778 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1782 quirk
->data
.flags
= NV_3D0_NONE
;
1783 if (addr
== quirk
->data
.address_offset
) {
1784 if (data
== 0x538) {
1785 quirk
->data
.flags
= NV_3D0_READ
;
1786 } else if (data
== 0x738) {
1787 quirk
->data
.flags
= NV_3D0_WRITE
;
1792 quirk
->data
.flags
= NV_3D0_NONE
;
1793 if (addr
== quirk
->data
.data_offset
) {
1794 vfio_pci_write_config(pdev
, quirk
->data
.address_val
, data
, size
);
1795 DPRINTF("%s(0x3d0, 0x%"PRIx64
", %d)\n", __func__
, data
, size
);
1801 vfio_vga_write(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1802 addr
+ quirk
->data
.base_offset
, data
, size
);
1805 static const MemoryRegionOps vfio_nvidia_3d0_quirk
= {
1806 .read
= vfio_nvidia_3d0_quirk_read
,
1807 .write
= vfio_nvidia_3d0_quirk_write
,
1808 .endianness
= DEVICE_LITTLE_ENDIAN
,
1811 static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice
*vdev
)
1813 PCIDevice
*pdev
= &vdev
->pdev
;
1816 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
||
1817 !vdev
->bars
[1].size
) {
1821 quirk
= g_malloc0(sizeof(*quirk
));
1823 quirk
->data
.base_offset
= 0x10;
1824 quirk
->data
.address_offset
= 4;
1825 quirk
->data
.address_size
= 2;
1826 quirk
->data
.address_match
= 0x1800;
1827 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
1828 quirk
->data
.data_offset
= 0;
1829 quirk
->data
.data_size
= 4;
1831 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_3d0_quirk
,
1832 quirk
, "vfio-nvidia-3d0-quirk", 6);
1833 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1834 quirk
->data
.base_offset
, &quirk
->mem
);
1836 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1839 DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
1840 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1841 vdev
->host
.function
);
1845 * The second quirk is documented in envytools. The I/O port BAR5 is just
1846 * a set of address/data ports to the MMIO BARs. The BAR we care about is
1847 * again BAR0. This backdoor is apparently a bit newer than the one above
1848 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
1849 * space, including extended space is available at the 4k @0x88000.
1852 NV_BAR5_ADDRESS
= 0x1,
1853 NV_BAR5_ENABLE
= 0x2,
1854 NV_BAR5_MASTER
= 0x4,
1855 NV_BAR5_VALID
= 0x7,
1858 static void vfio_nvidia_bar5_window_quirk_write(void *opaque
, hwaddr addr
,
1859 uint64_t data
, unsigned size
)
1861 VFIOQuirk
*quirk
= opaque
;
1866 quirk
->data
.flags
|= NV_BAR5_MASTER
;
1868 quirk
->data
.flags
&= ~NV_BAR5_MASTER
;
1873 quirk
->data
.flags
|= NV_BAR5_ENABLE
;
1875 quirk
->data
.flags
&= ~NV_BAR5_ENABLE
;
1879 if (quirk
->data
.flags
& NV_BAR5_MASTER
) {
1880 if ((data
& ~0xfff) == 0x88000) {
1881 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
1882 quirk
->data
.address_val
= data
& 0xfff;
1883 } else if ((data
& ~0xff) == 0x1800) {
1884 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
1885 quirk
->data
.address_val
= data
& 0xff;
1887 quirk
->data
.flags
&= ~NV_BAR5_ADDRESS
;
1893 vfio_generic_window_quirk_write(opaque
, addr
, data
, size
);
1896 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk
= {
1897 .read
= vfio_generic_window_quirk_read
,
1898 .write
= vfio_nvidia_bar5_window_quirk_write
,
1899 .valid
.min_access_size
= 4,
1900 .endianness
= DEVICE_LITTLE_ENDIAN
,
1903 static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice
*vdev
, int nr
)
1905 PCIDevice
*pdev
= &vdev
->pdev
;
1908 if (!vdev
->has_vga
|| nr
!= 5 ||
1909 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1913 quirk
= g_malloc0(sizeof(*quirk
));
1915 quirk
->data
.read_flags
= quirk
->data
.write_flags
= NV_BAR5_VALID
;
1916 quirk
->data
.address_offset
= 0x8;
1917 quirk
->data
.address_size
= 0; /* actually 4, but avoids generic code */
1918 quirk
->data
.data_offset
= 0xc;
1919 quirk
->data
.data_size
= 4;
1920 quirk
->data
.bar
= nr
;
1922 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1923 &vfio_nvidia_bar5_window_quirk
, quirk
,
1924 "vfio-nvidia-bar5-window-quirk", 16);
1925 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
, 0, &quirk
->mem
, 1);
1927 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1929 DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
1930 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1931 vdev
->host
.function
);
1934 static void vfio_nvidia_88000_quirk_write(void *opaque
, hwaddr addr
,
1935 uint64_t data
, unsigned size
)
1937 VFIOQuirk
*quirk
= opaque
;
1938 VFIODevice
*vdev
= quirk
->vdev
;
1939 PCIDevice
*pdev
= &vdev
->pdev
;
1940 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1942 vfio_generic_quirk_write(opaque
, addr
, data
, size
);
1945 * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
1946 * MSI capability ID register. Both the ID and next register are
1947 * read-only, so we allow writes covering either of those to real hw.
1948 * NB - only fixed for the 0x88000 MMIO window.
1950 if ((pdev
->cap_present
& QEMU_PCI_CAP_MSI
) &&
1951 vfio_range_contained(addr
, size
, pdev
->msi_cap
, PCI_MSI_FLAGS
)) {
1952 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, data
, size
);
1956 static const MemoryRegionOps vfio_nvidia_88000_quirk
= {
1957 .read
= vfio_generic_quirk_read
,
1958 .write
= vfio_nvidia_88000_quirk_write
,
1959 .endianness
= DEVICE_LITTLE_ENDIAN
,
1963 * Finally, BAR0 itself. We want to redirect any accesses to either
1964 * 0x1800 or 0x88000 through the PCI config space access functions.
1966 * NB - quirk at a page granularity or else they don't seem to work when
1969 * Here's offset 0x88000...
1971 static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice
*vdev
, int nr
)
1973 PCIDevice
*pdev
= &vdev
->pdev
;
1976 if (!vdev
->has_vga
|| nr
!= 0 ||
1977 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1981 quirk
= g_malloc0(sizeof(*quirk
));
1983 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1984 quirk
->data
.address_match
= 0x88000;
1985 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1986 quirk
->data
.bar
= nr
;
1988 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_88000_quirk
,
1989 quirk
, "vfio-nvidia-bar0-88000-quirk",
1990 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1991 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1992 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1995 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1997 DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
1998 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1999 vdev
->host
.function
);
2003 * And here's the same for BAR0 offset 0x1800...
2005 static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice
*vdev
, int nr
)
2007 PCIDevice
*pdev
= &vdev
->pdev
;
2010 if (!vdev
->has_vga
|| nr
!= 0 ||
2011 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
2015 /* Log the chipset ID */
2016 DPRINTF("Nvidia NV%02x\n",
2017 (unsigned int)(vfio_bar_read(&vdev
->bars
[0], 0, 4) >> 20) & 0xff);
2019 quirk
= g_malloc0(sizeof(*quirk
));
2021 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
2022 quirk
->data
.address_match
= 0x1800;
2023 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
2024 quirk
->data
.bar
= nr
;
2026 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
2027 "vfio-nvidia-bar0-1800-quirk",
2028 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
2029 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
2030 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
2033 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
2035 DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
2036 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2037 vdev
->host
.function
);
2041 * TODO - Some Nvidia devices provide config access to their companion HDA
2042 * device and even to their parent bridge via these config space mirrors.
2043 * Add quirks for those regions.
2047 * Common quirk probe entry points.
2049 static void vfio_vga_quirk_setup(VFIODevice
*vdev
)
2051 vfio_vga_probe_ati_3c3_quirk(vdev
);
2052 vfio_vga_probe_nvidia_3d0_quirk(vdev
);
2055 static void vfio_vga_quirk_teardown(VFIODevice
*vdev
)
2059 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
.region
); i
++) {
2060 while (!QLIST_EMPTY(&vdev
->vga
.region
[i
].quirks
)) {
2061 VFIOQuirk
*quirk
= QLIST_FIRST(&vdev
->vga
.region
[i
].quirks
);
2062 memory_region_del_subregion(&vdev
->vga
.region
[i
].mem
, &quirk
->mem
);
2063 memory_region_destroy(&quirk
->mem
);
2064 QLIST_REMOVE(quirk
, next
);
2070 static void vfio_bar_quirk_setup(VFIODevice
*vdev
, int nr
)
2072 vfio_probe_ati_bar4_window_quirk(vdev
, nr
);
2073 vfio_probe_ati_bar2_4000_quirk(vdev
, nr
);
2074 vfio_probe_nvidia_bar5_window_quirk(vdev
, nr
);
2075 vfio_probe_nvidia_bar0_88000_quirk(vdev
, nr
);
2076 vfio_probe_nvidia_bar0_1800_quirk(vdev
, nr
);
2079 static void vfio_bar_quirk_teardown(VFIODevice
*vdev
, int nr
)
2081 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2083 while (!QLIST_EMPTY(&bar
->quirks
)) {
2084 VFIOQuirk
*quirk
= QLIST_FIRST(&bar
->quirks
);
2085 memory_region_del_subregion(&bar
->mem
, &quirk
->mem
);
2086 memory_region_destroy(&quirk
->mem
);
2087 QLIST_REMOVE(quirk
, next
);
2095 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
2097 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2098 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
2100 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
2101 emu_bits
= le32_to_cpu(emu_bits
);
2104 emu_val
= pci_default_read_config(pdev
, addr
, len
);
2107 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
2110 ret
= pread(vdev
->fd
, &phys_val
, len
, vdev
->config_offset
+ addr
);
2112 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
2113 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2114 vdev
->host
.slot
, vdev
->host
.function
, addr
, len
);
2117 phys_val
= le32_to_cpu(phys_val
);
2120 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
2122 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__
,
2123 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2124 vdev
->host
.function
, addr
, len
, val
);
2129 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
2130 uint32_t val
, int len
)
2132 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2133 uint32_t val_le
= cpu_to_le32(val
);
2135 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__
,
2136 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2137 vdev
->host
.function
, addr
, val
, len
);
2139 /* Write everything to VFIO, let it filter out what we can't write */
2140 if (pwrite(vdev
->fd
, &val_le
, len
, vdev
->config_offset
+ addr
) != len
) {
2141 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
2142 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2143 vdev
->host
.slot
, vdev
->host
.function
, addr
, val
, len
);
2146 /* MSI/MSI-X Enabling/Disabling */
2147 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
2148 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
2149 int is_enabled
, was_enabled
= msi_enabled(pdev
);
2151 pci_default_write_config(pdev
, addr
, val
, len
);
2153 is_enabled
= msi_enabled(pdev
);
2157 vfio_enable_msi(vdev
);
2161 vfio_disable_msi(vdev
);
2163 vfio_update_msi(vdev
);
2166 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
2167 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
2168 int is_enabled
, was_enabled
= msix_enabled(pdev
);
2170 pci_default_write_config(pdev
, addr
, val
, len
);
2172 is_enabled
= msix_enabled(pdev
);
2174 if (!was_enabled
&& is_enabled
) {
2175 vfio_enable_msix(vdev
);
2176 } else if (was_enabled
&& !is_enabled
) {
2177 vfio_disable_msix(vdev
);
2180 /* Write everything to QEMU to keep emulated bits correct */
2181 pci_default_write_config(pdev
, addr
, val
, len
);
2186 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
2188 static int vfio_dma_unmap(VFIOContainer
*container
,
2189 hwaddr iova
, ram_addr_t size
)
2191 struct vfio_iommu_type1_dma_unmap unmap
= {
2192 .argsz
= sizeof(unmap
),
2198 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
2199 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno
);
2206 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
2207 ram_addr_t size
, void *vaddr
, bool readonly
)
2209 struct vfio_iommu_type1_dma_map map
= {
2210 .argsz
= sizeof(map
),
2211 .flags
= VFIO_DMA_MAP_FLAG_READ
,
2212 .vaddr
= (__u64
)(uintptr_t)vaddr
,
2218 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
2222 * Try the mapping, if it fails with EBUSY, unmap the region and try
2223 * again. This shouldn't be necessary, but we sometimes see it in
2224 * the the VGA ROM space.
2226 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
2227 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
2228 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
2232 DPRINTF("VFIO_MAP_DMA: %d\n", -errno
);
2236 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
2238 return !memory_region_is_ram(section
->mr
) ||
2240 * Sizing an enabled 64-bit BAR can cause spurious mappings to
2241 * addresses in the upper part of the 64-bit address space. These
2242 * are never accessed by the CPU and beyond the address width of
2243 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
2245 section
->offset_within_address_space
& (1ULL << 63);
2248 static void vfio_listener_region_add(MemoryListener
*listener
,
2249 MemoryRegionSection
*section
)
2251 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
2252 iommu_data
.type1
.listener
);
2257 assert(!memory_region_is_iommu(section
->mr
));
2259 if (vfio_listener_skipped_section(section
)) {
2260 DPRINTF("SKIPPING region_add %"HWADDR_PRIx
" - %"PRIx64
"\n",
2261 section
->offset_within_address_space
,
2262 section
->offset_within_address_space
+
2263 int128_get64(int128_sub(section
->size
, int128_one())));
2267 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
2268 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
2269 error_report("%s received unaligned region", __func__
);
2273 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
2274 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
2281 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
2282 section
->offset_within_region
+
2283 (iova
- section
->offset_within_address_space
);
2285 DPRINTF("region_add %"HWADDR_PRIx
" - %"HWADDR_PRIx
" [%p]\n",
2286 iova
, end
- 1, vaddr
);
2288 memory_region_ref(section
->mr
);
2289 ret
= vfio_dma_map(container
, iova
, end
- iova
, vaddr
, section
->readonly
);
2291 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
2292 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
2293 container
, iova
, end
- iova
, vaddr
, ret
);
2296 * On the initfn path, store the first error in the container so we
2297 * can gracefully fail. Runtime, there's not much we can do other
2298 * than throw a hardware error.
2300 if (!container
->iommu_data
.type1
.initialized
) {
2301 if (!container
->iommu_data
.type1
.error
) {
2302 container
->iommu_data
.type1
.error
= ret
;
2305 hw_error("vfio: DMA mapping failed, unable to continue\n");
2310 static void vfio_listener_region_del(MemoryListener
*listener
,
2311 MemoryRegionSection
*section
)
2313 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
2314 iommu_data
.type1
.listener
);
2318 if (vfio_listener_skipped_section(section
)) {
2319 DPRINTF("SKIPPING region_del %"HWADDR_PRIx
" - %"PRIx64
"\n",
2320 section
->offset_within_address_space
,
2321 section
->offset_within_address_space
+
2322 int128_get64(int128_sub(section
->size
, int128_one())));
2326 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
2327 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
2328 error_report("%s received unaligned region", __func__
);
2332 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
2333 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
2340 DPRINTF("region_del %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
2343 ret
= vfio_dma_unmap(container
, iova
, end
- iova
);
2344 memory_region_unref(section
->mr
);
2346 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
2347 "0x%"HWADDR_PRIx
") = %d (%m)",
2348 container
, iova
, end
- iova
, ret
);
2352 static MemoryListener vfio_memory_listener
= {
2353 .region_add
= vfio_listener_region_add
,
2354 .region_del
= vfio_listener_region_del
,
2357 static void vfio_listener_release(VFIOContainer
*container
)
2359 memory_listener_unregister(&container
->iommu_data
.type1
.listener
);
2365 static void vfio_disable_interrupts(VFIODevice
*vdev
)
2367 switch (vdev
->interrupt
) {
2369 vfio_disable_intx(vdev
);
2372 vfio_disable_msi(vdev
);
2375 vfio_disable_msix(vdev
);
2380 static int vfio_setup_msi(VFIODevice
*vdev
, int pos
)
2383 bool msi_64bit
, msi_maskbit
;
2386 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2387 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2390 ctrl
= le16_to_cpu(ctrl
);
2392 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
2393 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
2394 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
2396 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev
->host
.domain
,
2397 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, pos
);
2399 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
);
2401 if (ret
== -ENOTSUP
) {
2404 error_report("vfio: msi_init failed");
2407 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
2413 * We don't have any control over how pci_add_capability() inserts
2414 * capabilities into the chain. In order to setup MSI-X we need a
2415 * MemoryRegion for the BAR. In order to setup the BAR and not
2416 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2417 * need to first look for where the MSI-X table lives. So we
2418 * unfortunately split MSI-X setup across two functions.
2420 static int vfio_early_setup_msix(VFIODevice
*vdev
)
2424 uint32_t table
, pba
;
2426 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
2431 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2432 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2436 if (pread(vdev
->fd
, &table
, sizeof(table
),
2437 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
2441 if (pread(vdev
->fd
, &pba
, sizeof(pba
),
2442 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
2446 ctrl
= le16_to_cpu(ctrl
);
2447 table
= le32_to_cpu(table
);
2448 pba
= le32_to_cpu(pba
);
2450 vdev
->msix
= g_malloc0(sizeof(*(vdev
->msix
)));
2451 vdev
->msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
2452 vdev
->msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
2453 vdev
->msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
2454 vdev
->msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
2455 vdev
->msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
2457 DPRINTF("%04x:%02x:%02x.%x "
2458 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
2459 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2460 vdev
->host
.function
, pos
, vdev
->msix
->table_bar
,
2461 vdev
->msix
->table_offset
, vdev
->msix
->entries
);
2466 static int vfio_setup_msix(VFIODevice
*vdev
, int pos
)
2470 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
2471 &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2472 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
2473 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
,
2474 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
);
2476 if (ret
== -ENOTSUP
) {
2479 error_report("vfio: msix_init failed");
2486 static void vfio_teardown_msi(VFIODevice
*vdev
)
2488 msi_uninit(&vdev
->pdev
);
2491 msix_uninit(&vdev
->pdev
, &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2492 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
);
2499 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
)
2503 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2504 VFIOBAR
*bar
= &vdev
->bars
[i
];
2510 memory_region_set_enabled(&bar
->mmap_mem
, enabled
);
2511 if (vdev
->msix
&& vdev
->msix
->table_bar
== i
) {
2512 memory_region_set_enabled(&vdev
->msix
->mmap_mem
, enabled
);
2517 static void vfio_unmap_bar(VFIODevice
*vdev
, int nr
)
2519 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2525 vfio_bar_quirk_teardown(vdev
, nr
);
2527 memory_region_del_subregion(&bar
->mem
, &bar
->mmap_mem
);
2528 munmap(bar
->mmap
, memory_region_size(&bar
->mmap_mem
));
2529 memory_region_destroy(&bar
->mmap_mem
);
2531 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2532 memory_region_del_subregion(&bar
->mem
, &vdev
->msix
->mmap_mem
);
2533 munmap(vdev
->msix
->mmap
, memory_region_size(&vdev
->msix
->mmap_mem
));
2534 memory_region_destroy(&vdev
->msix
->mmap_mem
);
2537 memory_region_destroy(&bar
->mem
);
2540 static int vfio_mmap_bar(VFIODevice
*vdev
, VFIOBAR
*bar
,
2541 MemoryRegion
*mem
, MemoryRegion
*submem
,
2542 void **map
, size_t size
, off_t offset
,
2547 if (VFIO_ALLOW_MMAP
&& size
&& bar
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
2550 if (bar
->flags
& VFIO_REGION_INFO_FLAG_READ
) {
2554 if (bar
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) {
2558 *map
= mmap(NULL
, size
, prot
, MAP_SHARED
,
2559 bar
->fd
, bar
->fd_offset
+ offset
);
2560 if (*map
== MAP_FAILED
) {
2566 memory_region_init_ram_ptr(submem
, OBJECT(vdev
), name
, size
, *map
);
2569 /* Create a zero sized sub-region to make cleanup easy. */
2570 memory_region_init(submem
, OBJECT(vdev
), name
, 0);
2573 memory_region_add_subregion(mem
, offset
, submem
);
2578 static void vfio_map_bar(VFIODevice
*vdev
, int nr
)
2580 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2581 unsigned size
= bar
->size
;
2587 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2592 snprintf(name
, sizeof(name
), "VFIO %04x:%02x:%02x.%x BAR %d",
2593 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2594 vdev
->host
.function
, nr
);
2596 /* Determine what type of BAR this is for registration */
2597 ret
= pread(vdev
->fd
, &pci_bar
, sizeof(pci_bar
),
2598 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
2599 if (ret
!= sizeof(pci_bar
)) {
2600 error_report("vfio: Failed to read BAR %d (%m)", nr
);
2604 pci_bar
= le32_to_cpu(pci_bar
);
2605 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
2606 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
2607 type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
2608 ~PCI_BASE_ADDRESS_MEM_MASK
);
2610 /* A "slow" read/write mapping underlies all BARs */
2611 memory_region_init_io(&bar
->mem
, OBJECT(vdev
), &vfio_bar_ops
,
2613 pci_register_bar(&vdev
->pdev
, nr
, type
, &bar
->mem
);
2616 * We can't mmap areas overlapping the MSIX vector table, so we
2617 * potentially insert a direct-mapped subregion before and after it.
2619 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2620 size
= vdev
->msix
->table_offset
& qemu_host_page_mask
;
2623 strncat(name
, " mmap", sizeof(name
) - strlen(name
) - 1);
2624 if (vfio_mmap_bar(vdev
, bar
, &bar
->mem
,
2625 &bar
->mmap_mem
, &bar
->mmap
, size
, 0, name
)) {
2626 error_report("%s unsupported. Performance may be slow", name
);
2629 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2632 start
= HOST_PAGE_ALIGN(vdev
->msix
->table_offset
+
2633 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
2635 size
= start
< bar
->size
? bar
->size
- start
: 0;
2636 strncat(name
, " msix-hi", sizeof(name
) - strlen(name
) - 1);
2637 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2638 if (vfio_mmap_bar(vdev
, bar
, &bar
->mem
, &vdev
->msix
->mmap_mem
,
2639 &vdev
->msix
->mmap
, size
, start
, name
)) {
2640 error_report("%s unsupported. Performance may be slow", name
);
2644 vfio_bar_quirk_setup(vdev
, nr
);
2647 static void vfio_map_bars(VFIODevice
*vdev
)
2651 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2652 vfio_map_bar(vdev
, i
);
2655 if (vdev
->has_vga
) {
2656 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2657 OBJECT(vdev
), &vfio_vga_ops
,
2658 &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
],
2659 "vfio-vga-mmio@0xa0000",
2660 QEMU_PCI_VGA_MEM_SIZE
);
2661 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2662 OBJECT(vdev
), &vfio_vga_ops
,
2663 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
],
2664 "vfio-vga-io@0x3b0",
2665 QEMU_PCI_VGA_IO_LO_SIZE
);
2666 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
2667 OBJECT(vdev
), &vfio_vga_ops
,
2668 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
2669 "vfio-vga-io@0x3c0",
2670 QEMU_PCI_VGA_IO_HI_SIZE
);
2672 pci_register_vga(&vdev
->pdev
, &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2673 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2674 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2675 vfio_vga_quirk_setup(vdev
);
2679 static void vfio_unmap_bars(VFIODevice
*vdev
)
2683 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2684 vfio_unmap_bar(vdev
, i
);
2687 if (vdev
->has_vga
) {
2688 vfio_vga_quirk_teardown(vdev
);
2689 pci_unregister_vga(&vdev
->pdev
);
2690 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
);
2691 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
);
2692 memory_region_destroy(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2699 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
2701 uint8_t tmp
, next
= 0xff;
2703 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
2704 tmp
= pdev
->config
[tmp
+ 1]) {
2705 if (tmp
> pos
&& tmp
< next
) {
2713 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
2715 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
2718 static void vfio_add_emulated_word(VFIODevice
*vdev
, int pos
,
2719 uint16_t val
, uint16_t mask
)
2721 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2722 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2723 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2726 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
2728 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
2731 static void vfio_add_emulated_long(VFIODevice
*vdev
, int pos
,
2732 uint32_t val
, uint32_t mask
)
2734 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2735 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2736 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2739 static int vfio_setup_pcie_cap(VFIODevice
*vdev
, int pos
, uint8_t size
)
2744 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
2745 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
2747 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
2748 type
!= PCI_EXP_TYPE_LEG_END
&&
2749 type
!= PCI_EXP_TYPE_RC_END
) {
2751 error_report("vfio: Assignment of PCIe type 0x%x "
2752 "devices is not currently supported", type
);
2756 if (!pci_bus_is_express(vdev
->pdev
.bus
)) {
2758 * Use express capability as-is on PCI bus. It doesn't make much
2759 * sense to even expose, but some drivers (ex. tg3) depend on it
2760 * and guests don't seem to be particular about it. We'll need
2761 * to revist this or force express devices to express buses if we
2762 * ever expose an IOMMU to the guest.
2764 } else if (pci_bus_is_root(vdev
->pdev
.bus
)) {
2766 * On a Root Complex bus Endpoints become Root Complex Integrated
2767 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2769 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
2770 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2771 PCI_EXP_TYPE_RC_END
<< 4,
2772 PCI_EXP_FLAGS_TYPE
);
2774 /* Link Capabilities, Status, and Control goes away */
2775 if (size
> PCI_EXP_LNKCTL
) {
2776 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
2777 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2778 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
2780 #ifndef PCI_EXP_LNKCAP2
2781 #define PCI_EXP_LNKCAP2 44
2783 #ifndef PCI_EXP_LNKSTA2
2784 #define PCI_EXP_LNKSTA2 50
2786 /* Link 2 Capabilities, Status, and Control goes away */
2787 if (size
> PCI_EXP_LNKCAP2
) {
2788 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
2789 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
2790 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
2794 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
2796 * Legacy endpoints don't belong on the root complex. Windows
2797 * seems to be happier with devices if we skip the capability.
2804 * Convert Root Complex Integrated Endpoints to regular endpoints.
2805 * These devices don't support LNK/LNK2 capabilities, so make them up.
2807 if (type
== PCI_EXP_TYPE_RC_END
) {
2808 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2809 PCI_EXP_TYPE_ENDPOINT
<< 4,
2810 PCI_EXP_FLAGS_TYPE
);
2811 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
2812 PCI_EXP_LNK_MLW_1
| PCI_EXP_LNK_LS_25
, ~0);
2813 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2816 /* Mark the Link Status bits as emulated to allow virtual negotiation */
2817 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
,
2818 pci_get_word(vdev
->pdev
.config
+ pos
+
2820 PCI_EXP_LNKCAP_MLW
| PCI_EXP_LNKCAP_SLS
);
2823 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
);
2825 vdev
->pdev
.exp
.exp_cap
= pos
;
2831 static void vfio_check_pcie_flr(VFIODevice
*vdev
, uint8_t pos
)
2833 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
2835 if (cap
& PCI_EXP_DEVCAP_FLR
) {
2836 DPRINTF("%04x:%02x:%02x.%x Supports FLR via PCIe cap\n",
2837 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2838 vdev
->host
.function
);
2839 vdev
->has_flr
= true;
2843 static void vfio_check_pm_reset(VFIODevice
*vdev
, uint8_t pos
)
2845 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
2847 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
2848 DPRINTF("%04x:%02x:%02x.%x Supports PM reset\n",
2849 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2850 vdev
->host
.function
);
2851 vdev
->has_pm_reset
= true;
2855 static void vfio_check_af_flr(VFIODevice
*vdev
, uint8_t pos
)
2857 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
2859 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
2860 DPRINTF("%04x:%02x:%02x.%x Supports FLR via AF cap\n",
2861 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2862 vdev
->host
.function
);
2863 vdev
->has_flr
= true;
2867 static int vfio_add_std_cap(VFIODevice
*vdev
, uint8_t pos
)
2869 PCIDevice
*pdev
= &vdev
->pdev
;
2870 uint8_t cap_id
, next
, size
;
2873 cap_id
= pdev
->config
[pos
];
2874 next
= pdev
->config
[pos
+ 1];
2877 * If it becomes important to configure capabilities to their actual
2878 * size, use this as the default when it's something we don't recognize.
2879 * Since QEMU doesn't actually handle many of the config accesses,
2880 * exact size doesn't seem worthwhile.
2882 size
= vfio_std_cap_max_size(pdev
, pos
);
2885 * pci_add_capability always inserts the new capability at the head
2886 * of the chain. Therefore to end up with a chain that matches the
2887 * physical device, we insert from the end by making this recursive.
2888 * This is also why we pre-caclulate size above as cached config space
2889 * will be changed as we unwind the stack.
2892 ret
= vfio_add_std_cap(vdev
, next
);
2897 /* Begin the rebuild, use QEMU emulated list bits */
2898 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
2899 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
2900 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
2903 /* Use emulated next pointer to allow dropping caps */
2904 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ 1, 0xff);
2907 case PCI_CAP_ID_MSI
:
2908 ret
= vfio_setup_msi(vdev
, pos
);
2910 case PCI_CAP_ID_EXP
:
2911 vfio_check_pcie_flr(vdev
, pos
);
2912 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
);
2914 case PCI_CAP_ID_MSIX
:
2915 ret
= vfio_setup_msix(vdev
, pos
);
2918 vfio_check_pm_reset(vdev
, pos
);
2920 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2923 vfio_check_af_flr(vdev
, pos
);
2924 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2927 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2932 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
2933 "0x%x[0x%x]@0x%x: %d", vdev
->host
.domain
,
2934 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
2935 cap_id
, size
, pos
, ret
);
2942 static int vfio_add_capabilities(VFIODevice
*vdev
)
2944 PCIDevice
*pdev
= &vdev
->pdev
;
2946 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2947 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2948 return 0; /* Nothing to add */
2951 return vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
]);
2954 static void vfio_pci_pre_reset(VFIODevice
*vdev
)
2956 PCIDevice
*pdev
= &vdev
->pdev
;
2959 vfio_disable_interrupts(vdev
);
2961 /* Make sure the device is in D0 */
2966 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2967 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2969 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2970 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2971 /* vfio handles the necessary delay here */
2972 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2973 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2975 error_report("vfio: Unable to power on device, stuck in D%d\n",
2982 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2983 * Also put INTx Disable in known state.
2985 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2986 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2987 PCI_COMMAND_INTX_DISABLE
);
2988 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2991 static void vfio_pci_post_reset(VFIODevice
*vdev
)
2993 vfio_enable_intx(vdev
);
2996 static bool vfio_pci_host_match(PCIHostDeviceAddress
*host1
,
2997 PCIHostDeviceAddress
*host2
)
2999 return (host1
->domain
== host2
->domain
&& host1
->bus
== host2
->bus
&&
3000 host1
->slot
== host2
->slot
&& host1
->function
== host2
->function
);
3003 static int vfio_pci_hot_reset(VFIODevice
*vdev
, bool single
)
3006 struct vfio_pci_hot_reset_info
*info
;
3007 struct vfio_pci_dependent_device
*devices
;
3008 struct vfio_pci_hot_reset
*reset
;
3013 DPRINTF("%s(%04x:%02x:%02x.%x) %s\n", __func__
, vdev
->host
.domain
,
3014 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
3015 single
? "one" : "multi");
3017 vfio_pci_pre_reset(vdev
);
3018 vdev
->needs_reset
= false;
3020 info
= g_malloc0(sizeof(*info
));
3021 info
->argsz
= sizeof(*info
);
3023 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
3024 if (ret
&& errno
!= ENOSPC
) {
3026 if (!vdev
->has_pm_reset
) {
3027 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
3028 "no available reset mechanism.", vdev
->host
.domain
,
3029 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3034 count
= info
->count
;
3035 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
3036 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
3037 devices
= &info
->devices
[0];
3039 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
3042 error_report("vfio: hot reset info failed: %m");
3046 DPRINTF("%04x:%02x:%02x.%x: hot reset dependent devices:\n",
3047 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3048 vdev
->host
.function
);
3050 /* Verify that we have all the groups required */
3051 for (i
= 0; i
< info
->count
; i
++) {
3052 PCIHostDeviceAddress host
;
3055 host
.domain
= devices
[i
].segment
;
3056 host
.bus
= devices
[i
].bus
;
3057 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
3058 host
.function
= PCI_FUNC(devices
[i
].devfn
);
3060 DPRINTF("\t%04x:%02x:%02x.%x group %d\n", host
.domain
,
3061 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
3063 if (vfio_pci_host_match(&host
, &vdev
->host
)) {
3067 QLIST_FOREACH(group
, &group_list
, next
) {
3068 if (group
->groupid
== devices
[i
].group_id
) {
3074 if (!vdev
->has_pm_reset
) {
3075 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
3076 "depends on group %d which is not owned.",
3077 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3078 vdev
->host
.function
, devices
[i
].group_id
);
3084 /* Prep dependent devices for reset and clear our marker. */
3085 QLIST_FOREACH(tmp
, &group
->device_list
, next
) {
3086 if (vfio_pci_host_match(&host
, &tmp
->host
)) {
3088 DPRINTF("vfio: found another in-use device "
3089 "%04x:%02x:%02x.%x\n", host
.domain
, host
.bus
,
3090 host
.slot
, host
.function
);
3094 vfio_pci_pre_reset(tmp
);
3095 tmp
->needs_reset
= false;
3102 if (!single
&& !multi
) {
3103 DPRINTF("vfio: No other in-use devices for multi hot reset\n");
3108 /* Determine how many group fds need to be passed */
3110 QLIST_FOREACH(group
, &group_list
, next
) {
3111 for (i
= 0; i
< info
->count
; i
++) {
3112 if (group
->groupid
== devices
[i
].group_id
) {
3119 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
3120 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
3121 fds
= &reset
->group_fds
[0];
3123 /* Fill in group fds */
3124 QLIST_FOREACH(group
, &group_list
, next
) {
3125 for (i
= 0; i
< info
->count
; i
++) {
3126 if (group
->groupid
== devices
[i
].group_id
) {
3127 fds
[reset
->count
++] = group
->fd
;
3134 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
3137 DPRINTF("%04x:%02x:%02x.%x hot reset: %s\n", vdev
->host
.domain
,
3138 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
3139 ret
? "%m" : "Success");
3142 /* Re-enable INTx on affected devices */
3143 for (i
= 0; i
< info
->count
; i
++) {
3144 PCIHostDeviceAddress host
;
3147 host
.domain
= devices
[i
].segment
;
3148 host
.bus
= devices
[i
].bus
;
3149 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
3150 host
.function
= PCI_FUNC(devices
[i
].devfn
);
3152 if (vfio_pci_host_match(&host
, &vdev
->host
)) {
3156 QLIST_FOREACH(group
, &group_list
, next
) {
3157 if (group
->groupid
== devices
[i
].group_id
) {
3166 QLIST_FOREACH(tmp
, &group
->device_list
, next
) {
3167 if (vfio_pci_host_match(&host
, &tmp
->host
)) {
3168 vfio_pci_post_reset(tmp
);
3174 vfio_pci_post_reset(vdev
);
3181 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
3182 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
3183 * of doing hot resets when there is only a single device per bus. The in-use
3184 * here refers to how many VFIODevices are affected. A hot reset that affects
3185 * multiple devices, but only a single in-use device, means that we can call
3186 * it from our bus ->reset() callback since the extent is effectively a single
3187 * device. This allows us to make use of it in the hotplug path. When there
3188 * are multiple in-use devices, we can only trigger the hot reset during a
3189 * system reset and thus from our reset handler. We separate _one vs _multi
3190 * here so that we don't overlap and do a double reset on the system reset
3191 * path where both our reset handler and ->reset() callback are used. Calling
3192 * _one() will only do a hot reset for the one in-use devices case, calling
3193 * _multi() will do nothing if a _one() would have been sufficient.
3195 static int vfio_pci_hot_reset_one(VFIODevice
*vdev
)
3197 return vfio_pci_hot_reset(vdev
, true);
3200 static int vfio_pci_hot_reset_multi(VFIODevice
*vdev
)
3202 return vfio_pci_hot_reset(vdev
, false);
3205 static void vfio_pci_reset_handler(void *opaque
)
3210 QLIST_FOREACH(group
, &group_list
, next
) {
3211 QLIST_FOREACH(vdev
, &group
->device_list
, next
) {
3212 if (!vdev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
3213 vdev
->needs_reset
= true;
3218 QLIST_FOREACH(group
, &group_list
, next
) {
3219 QLIST_FOREACH(vdev
, &group
->device_list
, next
) {
3220 if (vdev
->needs_reset
) {
3221 vfio_pci_hot_reset_multi(vdev
);
3227 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
3230 struct kvm_device_attr attr
= {
3231 .group
= KVM_DEV_VFIO_GROUP
,
3232 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
3233 .addr
= (uint64_t)(unsigned long)&group
->fd
,
3236 if (!kvm_enabled()) {
3240 if (vfio_kvm_device_fd
< 0) {
3241 struct kvm_create_device cd
= {
3242 .type
= KVM_DEV_TYPE_VFIO
,
3245 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
3246 DPRINTF("KVM_CREATE_DEVICE: %m\n");
3250 vfio_kvm_device_fd
= cd
.fd
;
3253 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
3254 error_report("Failed to add group %d to KVM VFIO device: %m",
3260 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
3263 struct kvm_device_attr attr
= {
3264 .group
= KVM_DEV_VFIO_GROUP
,
3265 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
3266 .addr
= (uint64_t)(unsigned long)&group
->fd
,
3269 if (vfio_kvm_device_fd
< 0) {
3273 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
3274 error_report("Failed to remove group %d to KVM VFIO device: %m",
3280 static int vfio_connect_container(VFIOGroup
*group
)
3282 VFIOContainer
*container
;
3285 if (group
->container
) {
3289 QLIST_FOREACH(container
, &container_list
, next
) {
3290 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
3291 group
->container
= container
;
3292 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
3297 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
3299 error_report("vfio: failed to open /dev/vfio/vfio: %m");
3303 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
3304 if (ret
!= VFIO_API_VERSION
) {
3305 error_report("vfio: supported vfio version: %d, "
3306 "reported version: %d", VFIO_API_VERSION
, ret
);
3311 container
= g_malloc0(sizeof(*container
));
3314 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
)) {
3315 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
3317 error_report("vfio: failed to set group container: %m");
3323 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_TYPE1_IOMMU
);
3325 error_report("vfio: failed to set iommu for container: %m");
3331 container
->iommu_data
.type1
.listener
= vfio_memory_listener
;
3332 container
->iommu_data
.release
= vfio_listener_release
;
3334 memory_listener_register(&container
->iommu_data
.type1
.listener
,
3335 &address_space_memory
);
3337 if (container
->iommu_data
.type1
.error
) {
3338 ret
= container
->iommu_data
.type1
.error
;
3339 vfio_listener_release(container
);
3342 error_report("vfio: memory listener initialization failed for container\n");
3346 container
->iommu_data
.type1
.initialized
= true;
3349 error_report("vfio: No available IOMMU models");
3355 QLIST_INIT(&container
->group_list
);
3356 QLIST_INSERT_HEAD(&container_list
, container
, next
);
3358 group
->container
= container
;
3359 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
3364 static void vfio_disconnect_container(VFIOGroup
*group
)
3366 VFIOContainer
*container
= group
->container
;
3368 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
3369 error_report("vfio: error disconnecting group %d from container",
3373 QLIST_REMOVE(group
, container_next
);
3374 group
->container
= NULL
;
3376 if (QLIST_EMPTY(&container
->group_list
)) {
3377 if (container
->iommu_data
.release
) {
3378 container
->iommu_data
.release(container
);
3380 QLIST_REMOVE(container
, next
);
3381 DPRINTF("vfio_disconnect_container: close container->fd\n");
3382 close(container
->fd
);
3387 static VFIOGroup
*vfio_get_group(int groupid
)
3391 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
3393 QLIST_FOREACH(group
, &group_list
, next
) {
3394 if (group
->groupid
== groupid
) {
3399 group
= g_malloc0(sizeof(*group
));
3401 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
3402 group
->fd
= qemu_open(path
, O_RDWR
);
3403 if (group
->fd
< 0) {
3404 error_report("vfio: error opening %s: %m", path
);
3409 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
3410 error_report("vfio: error getting group status: %m");
3416 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
3417 error_report("vfio: error, group %d is not viable, please ensure "
3418 "all devices within the iommu_group are bound to their "
3419 "vfio bus driver.", groupid
);
3425 group
->groupid
= groupid
;
3426 QLIST_INIT(&group
->device_list
);
3428 if (vfio_connect_container(group
)) {
3429 error_report("vfio: failed to setup container for group %d", groupid
);
3435 if (QLIST_EMPTY(&group_list
)) {
3436 qemu_register_reset(vfio_pci_reset_handler
, NULL
);
3439 QLIST_INSERT_HEAD(&group_list
, group
, next
);
3441 vfio_kvm_device_add_group(group
);
3446 static void vfio_put_group(VFIOGroup
*group
)
3448 if (!QLIST_EMPTY(&group
->device_list
)) {
3452 vfio_kvm_device_del_group(group
);
3453 vfio_disconnect_container(group
);
3454 QLIST_REMOVE(group
, next
);
3455 DPRINTF("vfio_put_group: close group->fd\n");
3459 if (QLIST_EMPTY(&group_list
)) {
3460 qemu_unregister_reset(vfio_pci_reset_handler
, NULL
);
3464 static int vfio_get_device(VFIOGroup
*group
, const char *name
, VFIODevice
*vdev
)
3466 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
3467 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
) };
3468 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
3471 ret
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
3473 error_report("vfio: error getting device %s from group %d: %m",
3474 name
, group
->groupid
);
3475 error_printf("Verify all devices in group %d are bound to vfio-pci "
3476 "or pci-stub and not already in use\n", group
->groupid
);
3481 vdev
->group
= group
;
3482 QLIST_INSERT_HEAD(&group
->device_list
, vdev
, next
);
3484 /* Sanity check device */
3485 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
3487 error_report("vfio: error getting device info: %m");
3491 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name
,
3492 dev_info
.flags
, dev_info
.num_regions
, dev_info
.num_irqs
);
3494 if (!(dev_info
.flags
& VFIO_DEVICE_FLAGS_PCI
)) {
3495 error_report("vfio: Um, this isn't a PCI device");
3499 vdev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
3501 if (dev_info
.num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
3502 error_report("vfio: unexpected number of io regions %u",
3503 dev_info
.num_regions
);
3507 if (dev_info
.num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
3508 error_report("vfio: unexpected number of irqs %u", dev_info
.num_irqs
);
3512 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
3515 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
3517 error_report("vfio: Error getting region %d info: %m", i
);
3521 DPRINTF("Device %s region %d:\n", name
, i
);
3522 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
3523 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
3524 (unsigned long)reg_info
.flags
);
3526 vdev
->bars
[i
].flags
= reg_info
.flags
;
3527 vdev
->bars
[i
].size
= reg_info
.size
;
3528 vdev
->bars
[i
].fd_offset
= reg_info
.offset
;
3529 vdev
->bars
[i
].fd
= vdev
->fd
;
3530 vdev
->bars
[i
].nr
= i
;
3531 QLIST_INIT(&vdev
->bars
[i
].quirks
);
3534 reg_info
.index
= VFIO_PCI_CONFIG_REGION_INDEX
;
3536 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
3538 error_report("vfio: Error getting config info: %m");
3542 DPRINTF("Device %s config:\n", name
);
3543 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
3544 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
3545 (unsigned long)reg_info
.flags
);
3547 vdev
->config_size
= reg_info
.size
;
3548 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
3549 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
3551 vdev
->config_offset
= reg_info
.offset
;
3553 if ((vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) &&
3554 dev_info
.num_regions
> VFIO_PCI_VGA_REGION_INDEX
) {
3555 struct vfio_region_info vga_info
= {
3556 .argsz
= sizeof(vga_info
),
3557 .index
= VFIO_PCI_VGA_REGION_INDEX
,
3560 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, &vga_info
);
3563 "vfio: Device does not support requested feature x-vga");
3567 if (!(vga_info
.flags
& VFIO_REGION_INFO_FLAG_READ
) ||
3568 !(vga_info
.flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
3569 vga_info
.size
< 0xbffff + 1) {
3570 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
3571 (unsigned long)vga_info
.flags
,
3572 (unsigned long)vga_info
.size
);
3576 vdev
->vga
.fd_offset
= vga_info
.offset
;
3577 vdev
->vga
.fd
= vdev
->fd
;
3579 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
3580 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
3581 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].quirks
);
3583 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
3584 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
3585 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].quirks
);
3587 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
3588 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
3589 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
);
3591 vdev
->has_vga
= true;
3593 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
3595 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
3597 /* This can fail for an old kernel or legacy PCI dev */
3598 DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure: %m\n");
3600 } else if (irq_info
.count
== 1) {
3601 vdev
->pci_aer
= true;
3603 error_report("vfio: %04x:%02x:%02x.%x "
3604 "Could not enable error recovery for the device",
3605 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3606 vdev
->host
.function
);
3611 QLIST_REMOVE(vdev
, next
);
3618 static void vfio_put_device(VFIODevice
*vdev
)
3620 QLIST_REMOVE(vdev
, next
);
3622 DPRINTF("vfio_put_device: close vdev->fd\n");
3630 static void vfio_err_notifier_handler(void *opaque
)
3632 VFIODevice
*vdev
= opaque
;
3634 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
3639 * TBD. Retrieve the error details and decide what action
3640 * needs to be taken. One of the actions could be to pass
3641 * the error to the guest and have the guest driver recover
3642 * from the error. This requires that PCIe capabilities be
3643 * exposed to the guest. For now, we just terminate the
3644 * guest to contain the error.
3647 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
3648 "Please collect any data possible and then kill the guest",
3649 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
3650 vdev
->host
.slot
, vdev
->host
.function
);
3652 vm_stop(RUN_STATE_IO_ERROR
);
3656 * Registers error notifier for devices supporting error recovery.
3657 * If we encounter a failure in this function, we report an error
3658 * and continue after disabling error recovery support for the
3661 static void vfio_register_err_notifier(VFIODevice
*vdev
)
3665 struct vfio_irq_set
*irq_set
;
3668 if (!vdev
->pci_aer
) {
3672 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
3673 error_report("vfio: Unable to init event notifier for error detection");
3674 vdev
->pci_aer
= false;
3678 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3680 irq_set
= g_malloc0(argsz
);
3681 irq_set
->argsz
= argsz
;
3682 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3683 VFIO_IRQ_SET_ACTION_TRIGGER
;
3684 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
3687 pfd
= (int32_t *)&irq_set
->data
;
3689 *pfd
= event_notifier_get_fd(&vdev
->err_notifier
);
3690 qemu_set_fd_handler(*pfd
, vfio_err_notifier_handler
, NULL
, vdev
);
3692 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
3694 error_report("vfio: Failed to set up error notification");
3695 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
3696 event_notifier_cleanup(&vdev
->err_notifier
);
3697 vdev
->pci_aer
= false;
3702 static void vfio_unregister_err_notifier(VFIODevice
*vdev
)
3705 struct vfio_irq_set
*irq_set
;
3709 if (!vdev
->pci_aer
) {
3713 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3715 irq_set
= g_malloc0(argsz
);
3716 irq_set
->argsz
= argsz
;
3717 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3718 VFIO_IRQ_SET_ACTION_TRIGGER
;
3719 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
3722 pfd
= (int32_t *)&irq_set
->data
;
3725 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
3727 error_report("vfio: Failed to de-assign error fd: %m");
3730 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
3732 event_notifier_cleanup(&vdev
->err_notifier
);
3735 static int vfio_initfn(PCIDevice
*pdev
)
3737 VFIODevice
*pvdev
, *vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3739 char path
[PATH_MAX
], iommu_group_path
[PATH_MAX
], *group_name
;
3745 /* Check that the host device exists */
3746 snprintf(path
, sizeof(path
),
3747 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
3748 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3749 vdev
->host
.function
);
3750 if (stat(path
, &st
) < 0) {
3751 error_report("vfio: error: no such host device: %s", path
);
3755 strncat(path
, "iommu_group", sizeof(path
) - strlen(path
) - 1);
3757 len
= readlink(path
, iommu_group_path
, sizeof(path
));
3758 if (len
<= 0 || len
>= sizeof(path
)) {
3759 error_report("vfio: error no iommu_group for device");
3760 return len
< 0 ? -errno
: ENAMETOOLONG
;
3763 iommu_group_path
[len
] = 0;
3764 group_name
= basename(iommu_group_path
);
3766 if (sscanf(group_name
, "%d", &groupid
) != 1) {
3767 error_report("vfio: error reading %s: %m", path
);
3771 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__
, vdev
->host
.domain
,
3772 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, groupid
);
3774 group
= vfio_get_group(groupid
);
3776 error_report("vfio: failed to get group %d", groupid
);
3780 snprintf(path
, sizeof(path
), "%04x:%02x:%02x.%01x",
3781 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3782 vdev
->host
.function
);
3784 QLIST_FOREACH(pvdev
, &group
->device_list
, next
) {
3785 if (pvdev
->host
.domain
== vdev
->host
.domain
&&
3786 pvdev
->host
.bus
== vdev
->host
.bus
&&
3787 pvdev
->host
.slot
== vdev
->host
.slot
&&
3788 pvdev
->host
.function
== vdev
->host
.function
) {
3790 error_report("vfio: error: device %s is already attached", path
);
3791 vfio_put_group(group
);
3796 ret
= vfio_get_device(group
, path
, vdev
);
3798 error_report("vfio: failed to get device %s", path
);
3799 vfio_put_group(group
);
3803 /* Get a copy of config space */
3804 ret
= pread(vdev
->fd
, vdev
->pdev
.config
,
3805 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
3806 vdev
->config_offset
);
3807 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
3808 ret
= ret
< 0 ? -errno
: -EFAULT
;
3809 error_report("vfio: Failed to read device config space");
3813 /* vfio emulates a lot for us, but some bits need extra love */
3814 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
3816 /* QEMU can choose to expose the ROM or not */
3817 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
3819 /* QEMU can change multi-function devices to single function, or reverse */
3820 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
3821 PCI_HEADER_TYPE_MULTI_FUNCTION
;
3823 /* Restore or clear multifunction, this is always controlled by QEMU */
3824 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
3825 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
3827 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
3831 * Clear host resource mapping info. If we choose not to register a
3832 * BAR, such as might be the case with the option ROM, we can get
3833 * confusing, unwritable, residual addresses from the host here.
3835 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
3836 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
3838 vfio_pci_size_rom(vdev
);
3840 ret
= vfio_early_setup_msix(vdev
);
3845 vfio_map_bars(vdev
);
3847 ret
= vfio_add_capabilities(vdev
);
3852 /* QEMU emulates all of MSI & MSIX */
3853 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3854 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3858 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3859 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3860 vdev
->msi_cap_size
);
3863 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3864 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3865 vfio_intx_mmap_enable
, vdev
);
3866 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_update_irq
);
3867 ret
= vfio_enable_intx(vdev
);
3873 add_boot_device_path(vdev
->bootindex
, &pdev
->qdev
, NULL
);
3874 vfio_register_err_notifier(vdev
);
3879 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3880 vfio_teardown_msi(vdev
);
3881 vfio_unmap_bars(vdev
);
3883 g_free(vdev
->emulated_config_bits
);
3884 vfio_put_device(vdev
);
3885 vfio_put_group(group
);
3889 static void vfio_exitfn(PCIDevice
*pdev
)
3891 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3892 VFIOGroup
*group
= vdev
->group
;
3894 vfio_unregister_err_notifier(vdev
);
3895 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3896 vfio_disable_interrupts(vdev
);
3897 if (vdev
->intx
.mmap_timer
) {
3898 timer_free(vdev
->intx
.mmap_timer
);
3900 vfio_teardown_msi(vdev
);
3901 vfio_unmap_bars(vdev
);
3902 g_free(vdev
->emulated_config_bits
);
3904 vfio_put_device(vdev
);
3905 vfio_put_group(group
);
3908 static void vfio_pci_reset(DeviceState
*dev
)
3910 PCIDevice
*pdev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
3911 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
3913 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
3914 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3916 vfio_pci_pre_reset(vdev
);
3918 if (vdev
->reset_works
&& (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3919 !ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
3920 DPRINTF("%04x:%02x:%02x.%x FLR/VFIO_DEVICE_RESET\n", vdev
->host
.domain
,
3921 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3925 /* See if we can do our own bus reset */
3926 if (!vfio_pci_hot_reset_one(vdev
)) {
3930 /* If nothing else works and the device supports PM reset, use it */
3931 if (vdev
->reset_works
&& vdev
->has_pm_reset
&&
3932 !ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
3933 DPRINTF("%04x:%02x:%02x.%x PCI PM Reset\n", vdev
->host
.domain
,
3934 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3939 vfio_pci_post_reset(vdev
);
3942 static Property vfio_pci_dev_properties
[] = {
3943 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice
, host
),
3944 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice
,
3945 intx
.mmap_timeout
, 1100),
3946 DEFINE_PROP_BIT("x-vga", VFIODevice
, features
,
3947 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3948 DEFINE_PROP_INT32("bootindex", VFIODevice
, bootindex
, -1),
3950 * TODO - support passed fds... is this necessary?
3951 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
3952 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
3954 DEFINE_PROP_END_OF_LIST(),
3957 static const VMStateDescription vfio_pci_vmstate
= {
3962 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3964 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3965 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3967 dc
->reset
= vfio_pci_reset
;
3968 dc
->props
= vfio_pci_dev_properties
;
3969 dc
->vmsd
= &vfio_pci_vmstate
;
3970 dc
->desc
= "VFIO-based PCI device assignment";
3971 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3972 pdc
->init
= vfio_initfn
;
3973 pdc
->exit
= vfio_exitfn
;
3974 pdc
->config_read
= vfio_pci_read_config
;
3975 pdc
->config_write
= vfio_pci_write_config
;
3976 pdc
->is_express
= 1; /* We might be */
3979 static const TypeInfo vfio_pci_dev_info
= {
3981 .parent
= TYPE_PCI_DEVICE
,
3982 .instance_size
= sizeof(VFIODevice
),
3983 .class_init
= vfio_pci_dev_class_init
,
3986 static void register_vfio_pci_dev_type(void)
3988 type_register_static(&vfio_pci_dev_info
);
3991 type_init(register_vfio_pci_dev_type
)