2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
42 #include "hw/misc/vfio.h"
44 /* #define DEBUG_VFIO */
46 #define DPRINTF(fmt, ...) \
47 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
49 #define DPRINTF(fmt, ...) \
53 /* Extra debugging, trap acceleration paths for more logging */
54 #define VFIO_ALLOW_MMAP 1
55 #define VFIO_ALLOW_KVM_INTX 1
56 #define VFIO_ALLOW_KVM_MSI 1
57 #define VFIO_ALLOW_KVM_MSIX 1
61 typedef struct VFIOQuirk
{
63 struct VFIODevice
*vdev
;
64 QLIST_ENTRY(VFIOQuirk
) next
;
66 uint32_t base_offset
:TARGET_PAGE_BITS
;
67 uint32_t address_offset
:TARGET_PAGE_BITS
;
68 uint32_t address_size
:3;
71 uint32_t address_match
;
72 uint32_t address_mask
;
74 uint32_t address_val
:TARGET_PAGE_BITS
;
75 uint32_t data_offset
:TARGET_PAGE_BITS
;
84 typedef struct VFIOBAR
{
85 off_t fd_offset
; /* offset of BAR within device fd */
86 int fd
; /* device fd, allows us to pass VFIOBAR as opaque data */
87 MemoryRegion mem
; /* slow, read/write access */
88 MemoryRegion mmap_mem
; /* direct mapped access */
91 uint32_t flags
; /* VFIO region flags (rd/wr/mmap) */
92 uint8_t nr
; /* cache the BAR number for debug */
95 QLIST_HEAD(, VFIOQuirk
) quirks
;
98 typedef struct VFIOVGARegion
{
102 QLIST_HEAD(, VFIOQuirk
) quirks
;
105 typedef struct VFIOVGA
{
108 VFIOVGARegion region
[QEMU_PCI_VGA_NUM_REGIONS
];
111 typedef struct VFIOINTx
{
112 bool pending
; /* interrupt pending */
113 bool kvm_accel
; /* set when QEMU bypass through KVM enabled */
114 uint8_t pin
; /* which pin to pull for qemu_set_irq */
115 EventNotifier interrupt
; /* eventfd triggered on interrupt */
116 EventNotifier unmask
; /* eventfd for unmask on QEMU bypass */
117 PCIINTxRoute route
; /* routing info for QEMU bypass */
118 uint32_t mmap_timeout
; /* delay to re-enable mmaps after interrupt */
119 QEMUTimer
*mmap_timer
; /* enable mmaps after periods w/o interrupts */
122 typedef struct VFIOMSIVector
{
124 * Two interrupt paths are configured per vector. The first, is only used
125 * for interrupts injected via QEMU. This is typically the non-accel path,
126 * but may also be used when we want QEMU to handle masking and pending
127 * bits. The KVM path bypasses QEMU and is therefore higher performance,
128 * but requires masking at the device. virq is used to track the MSI route
129 * through KVM, thus kvm_interrupt is only available when virq is set to a
130 * valid (>= 0) value.
132 EventNotifier interrupt
;
133 EventNotifier kvm_interrupt
;
134 struct VFIODevice
*vdev
; /* back pointer to device */
146 typedef struct VFIOAddressSpace
{
148 QLIST_HEAD(, VFIOContainer
) containers
;
149 QLIST_ENTRY(VFIOAddressSpace
) list
;
152 static QLIST_HEAD(, VFIOAddressSpace
) vfio_address_spaces
=
153 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
157 typedef struct VFIOType1
{
158 MemoryListener listener
;
163 typedef struct VFIOContainer
{
164 VFIOAddressSpace
*space
;
165 int fd
; /* /dev/vfio/vfio, empowered by the attached groups */
167 /* enable abstraction to support various iommu backends */
171 void (*release
)(struct VFIOContainer
*);
173 QLIST_HEAD(, VFIOGuestIOMMU
) giommu_list
;
174 QLIST_HEAD(, VFIOGroup
) group_list
;
175 QLIST_ENTRY(VFIOContainer
) next
;
178 typedef struct VFIOGuestIOMMU
{
179 VFIOContainer
*container
;
182 QLIST_ENTRY(VFIOGuestIOMMU
) giommu_next
;
185 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
186 typedef struct VFIOMSIXInfo
{
190 uint32_t table_offset
;
192 MemoryRegion mmap_mem
;
196 typedef struct VFIODevice
{
200 unsigned int config_size
;
201 uint8_t *emulated_config_bits
; /* QEMU emulated bits, little-endian */
202 off_t config_offset
; /* Offset of config space region within device fd */
203 unsigned int rom_size
;
204 off_t rom_offset
; /* Offset of ROM region within device fd */
207 VFIOMSIVector
*msi_vectors
;
209 int nr_vectors
; /* Number of MSI/MSIX vectors currently in use */
210 int interrupt
; /* Current interrupt type */
211 VFIOBAR bars
[PCI_NUM_REGIONS
- 1]; /* No ROM */
212 VFIOVGA vga
; /* 0xa0000, 0x3b0, 0x3c0 */
213 PCIHostDeviceAddress host
;
214 QLIST_ENTRY(VFIODevice
) next
;
215 struct VFIOGroup
*group
;
216 EventNotifier err_notifier
;
218 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
219 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
228 bool rom_read_failed
;
231 typedef struct VFIOGroup
{
234 VFIOContainer
*container
;
235 QLIST_HEAD(, VFIODevice
) device_list
;
236 QLIST_ENTRY(VFIOGroup
) next
;
237 QLIST_ENTRY(VFIOGroup
) container_next
;
240 typedef struct VFIORomBlacklistEntry
{
243 } VFIORomBlacklistEntry
;
246 * List of device ids/vendor ids for which to disable
247 * option rom loading. This avoids the guest hangs during rom
248 * execution as noticed with the BCM 57810 card for lack of a
249 * more better way to handle such issues.
250 * The user can still override by specifying a romfile or
252 * Please see https://bugs.launchpad.net/qemu/+bug/1284874
253 * for an analysis of the 57810 card hang. When adding
254 * a new vendor id/device id combination below, please also add
255 * your card/environment details and information that could
256 * help in debugging to the bug tracking this issue
258 static const VFIORomBlacklistEntry romblacklist
[] = {
259 /* Broadcom BCM 57810 */
263 #define MSIX_CAP_LENGTH 12
265 static QLIST_HEAD(, VFIOGroup
)
266 group_list
= QLIST_HEAD_INITIALIZER(group_list
);
270 * We have a single VFIO pseudo device per KVM VM. Once created it lives
271 * for the life of the VM. Closing the file descriptor only drops our
272 * reference to it and the device's reference to kvm. Therefore once
273 * initialized, this file descriptor is only released on QEMU exit and
274 * we'll re-use it should another vfio device be attached before then.
276 static int vfio_kvm_device_fd
= -1;
279 static void vfio_disable_interrupts(VFIODevice
*vdev
);
280 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
);
281 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
282 uint32_t val
, int len
);
283 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
);
286 * Common VFIO interrupt disable
288 static void vfio_disable_irqindex(VFIODevice
*vdev
, int index
)
290 struct vfio_irq_set irq_set
= {
291 .argsz
= sizeof(irq_set
),
292 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
298 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
304 static void vfio_unmask_intx(VFIODevice
*vdev
)
306 struct vfio_irq_set irq_set
= {
307 .argsz
= sizeof(irq_set
),
308 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
309 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
314 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
317 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
318 static void vfio_mask_intx(VFIODevice
*vdev
)
320 struct vfio_irq_set irq_set
= {
321 .argsz
= sizeof(irq_set
),
322 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
323 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
328 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
333 * Disabling BAR mmaping can be slow, but toggling it around INTx can
334 * also be a huge overhead. We try to get the best of both worlds by
335 * waiting until an interrupt to disable mmaps (subsequent transitions
336 * to the same state are effectively no overhead). If the interrupt has
337 * been serviced and the time gap is long enough, we re-enable mmaps for
338 * performance. This works well for things like graphics cards, which
339 * may not use their interrupt at all and are penalized to an unusable
340 * level by read/write BAR traps. Other devices, like NICs, have more
341 * regular interrupts and see much better latency by staying in non-mmap
342 * mode. We therefore set the default mmap_timeout such that a ping
343 * is just enough to keep the mmap disabled. Users can experiment with
344 * other options with the x-intx-mmap-timeout-ms parameter (a value of
345 * zero disables the timer).
347 static void vfio_intx_mmap_enable(void *opaque
)
349 VFIODevice
*vdev
= opaque
;
351 if (vdev
->intx
.pending
) {
352 timer_mod(vdev
->intx
.mmap_timer
,
353 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
357 vfio_mmap_set_enabled(vdev
, true);
360 static void vfio_intx_interrupt(void *opaque
)
362 VFIODevice
*vdev
= opaque
;
364 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
368 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__
, vdev
->host
.domain
,
369 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
370 'A' + vdev
->intx
.pin
);
372 vdev
->intx
.pending
= true;
373 pci_irq_assert(&vdev
->pdev
);
374 vfio_mmap_set_enabled(vdev
, false);
375 if (vdev
->intx
.mmap_timeout
) {
376 timer_mod(vdev
->intx
.mmap_timer
,
377 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
381 static void vfio_eoi(VFIODevice
*vdev
)
383 if (!vdev
->intx
.pending
) {
387 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__
, vdev
->host
.domain
,
388 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
390 vdev
->intx
.pending
= false;
391 pci_irq_deassert(&vdev
->pdev
);
392 vfio_unmask_intx(vdev
);
395 static void vfio_enable_intx_kvm(VFIODevice
*vdev
)
398 struct kvm_irqfd irqfd
= {
399 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
400 .gsi
= vdev
->intx
.route
.irq
,
401 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
403 struct vfio_irq_set
*irq_set
;
407 if (!VFIO_ALLOW_KVM_INTX
|| !kvm_irqfds_enabled() ||
408 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
409 !kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
413 /* Get to a known interrupt state */
414 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
415 vfio_mask_intx(vdev
);
416 vdev
->intx
.pending
= false;
417 pci_irq_deassert(&vdev
->pdev
);
419 /* Get an eventfd for resample/unmask */
420 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
421 error_report("vfio: Error: event_notifier_init failed eoi");
425 /* KVM triggers it, VFIO listens for it */
426 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
428 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
429 error_report("vfio: Error: Failed to setup resample irqfd: %m");
433 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
435 irq_set
= g_malloc0(argsz
);
436 irq_set
->argsz
= argsz
;
437 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_UNMASK
;
438 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
441 pfd
= (int32_t *)&irq_set
->data
;
443 *pfd
= irqfd
.resamplefd
;
445 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
448 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
453 vfio_unmask_intx(vdev
);
455 vdev
->intx
.kvm_accel
= true;
457 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
458 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
459 vdev
->host
.slot
, vdev
->host
.function
);
464 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
465 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
467 event_notifier_cleanup(&vdev
->intx
.unmask
);
469 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
470 vfio_unmask_intx(vdev
);
474 static void vfio_disable_intx_kvm(VFIODevice
*vdev
)
477 struct kvm_irqfd irqfd
= {
478 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
479 .gsi
= vdev
->intx
.route
.irq
,
480 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
483 if (!vdev
->intx
.kvm_accel
) {
488 * Get to a known state, hardware masked, QEMU ready to accept new
489 * interrupts, QEMU IRQ de-asserted.
491 vfio_mask_intx(vdev
);
492 vdev
->intx
.pending
= false;
493 pci_irq_deassert(&vdev
->pdev
);
495 /* Tell KVM to stop listening for an INTx irqfd */
496 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
497 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
500 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
501 event_notifier_cleanup(&vdev
->intx
.unmask
);
503 /* QEMU starts listening for interrupt events. */
504 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
506 vdev
->intx
.kvm_accel
= false;
508 /* If we've missed an event, let it re-fire through QEMU */
509 vfio_unmask_intx(vdev
);
511 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
512 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
513 vdev
->host
.slot
, vdev
->host
.function
);
517 static void vfio_update_irq(PCIDevice
*pdev
)
519 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
522 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
526 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
528 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
529 return; /* Nothing changed */
532 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__
,
533 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
534 vdev
->host
.function
, vdev
->intx
.route
.irq
, route
.irq
);
536 vfio_disable_intx_kvm(vdev
);
538 vdev
->intx
.route
= route
;
540 if (route
.mode
!= PCI_INTX_ENABLED
) {
544 vfio_enable_intx_kvm(vdev
);
546 /* Re-enable the interrupt in cased we missed an EOI */
550 static int vfio_enable_intx(VFIODevice
*vdev
)
552 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
554 struct vfio_irq_set
*irq_set
;
561 vfio_disable_interrupts(vdev
);
563 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
564 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
568 * Only conditional to avoid generating error messages on platforms
569 * where we won't actually use the result anyway.
571 if (kvm_irqfds_enabled() &&
572 kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
573 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
578 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
580 error_report("vfio: Error: event_notifier_init failed");
584 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
586 irq_set
= g_malloc0(argsz
);
587 irq_set
->argsz
= argsz
;
588 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
589 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
592 pfd
= (int32_t *)&irq_set
->data
;
594 *pfd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
595 qemu_set_fd_handler(*pfd
, vfio_intx_interrupt
, NULL
, vdev
);
597 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
600 error_report("vfio: Error: Failed to setup INTx fd: %m");
601 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
602 event_notifier_cleanup(&vdev
->intx
.interrupt
);
606 vfio_enable_intx_kvm(vdev
);
608 vdev
->interrupt
= VFIO_INT_INTx
;
610 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
611 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
616 static void vfio_disable_intx(VFIODevice
*vdev
)
620 timer_del(vdev
->intx
.mmap_timer
);
621 vfio_disable_intx_kvm(vdev
);
622 vfio_disable_irqindex(vdev
, VFIO_PCI_INTX_IRQ_INDEX
);
623 vdev
->intx
.pending
= false;
624 pci_irq_deassert(&vdev
->pdev
);
625 vfio_mmap_set_enabled(vdev
, true);
627 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
628 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
629 event_notifier_cleanup(&vdev
->intx
.interrupt
);
631 vdev
->interrupt
= VFIO_INT_NONE
;
633 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
634 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
640 static void vfio_msi_interrupt(void *opaque
)
642 VFIOMSIVector
*vector
= opaque
;
643 VFIODevice
*vdev
= vector
->vdev
;
644 int nr
= vector
- vdev
->msi_vectors
;
646 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
653 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
654 msg
= msix_get_message(&vdev
->pdev
, nr
);
655 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
656 msg
= msi_get_message(&vdev
->pdev
, nr
);
661 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d 0x%"PRIx64
"/0x%x\n", __func__
,
662 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
663 vdev
->host
.function
, nr
, msg
.address
, msg
.data
);
666 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
667 msix_notify(&vdev
->pdev
, nr
);
668 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
669 msi_notify(&vdev
->pdev
, nr
);
671 error_report("vfio: MSI interrupt receieved, but not enabled?");
675 static int vfio_enable_vectors(VFIODevice
*vdev
, bool msix
)
677 struct vfio_irq_set
*irq_set
;
678 int ret
= 0, i
, argsz
;
681 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
683 irq_set
= g_malloc0(argsz
);
684 irq_set
->argsz
= argsz
;
685 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
686 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
688 irq_set
->count
= vdev
->nr_vectors
;
689 fds
= (int32_t *)&irq_set
->data
;
691 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
695 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
696 * bits, therefore we always use the KVM signaling path when setup.
697 * MSI-X mask and pending bits are emulated, so we want to use the
698 * KVM signaling path only when configured and unmasked.
700 if (vdev
->msi_vectors
[i
].use
) {
701 if (vdev
->msi_vectors
[i
].virq
< 0 ||
702 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
703 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
705 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
712 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
719 static void vfio_add_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage
*msg
,
724 if ((msix
&& !VFIO_ALLOW_KVM_MSIX
) ||
725 (!msix
&& !VFIO_ALLOW_KVM_MSI
) || !msg
) {
729 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
733 virq
= kvm_irqchip_add_msi_route(kvm_state
, *msg
);
735 event_notifier_cleanup(&vector
->kvm_interrupt
);
739 if (kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->kvm_interrupt
,
741 kvm_irqchip_release_virq(kvm_state
, virq
);
742 event_notifier_cleanup(&vector
->kvm_interrupt
);
749 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
751 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->kvm_interrupt
,
753 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
755 event_notifier_cleanup(&vector
->kvm_interrupt
);
758 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
)
760 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
);
763 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
764 MSIMessage
*msg
, IOHandler
*handler
)
766 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
767 VFIOMSIVector
*vector
;
770 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__
,
771 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
772 vdev
->host
.function
, nr
);
774 vector
= &vdev
->msi_vectors
[nr
];
779 if (event_notifier_init(&vector
->interrupt
, 0)) {
780 error_report("vfio: Error: event_notifier_init failed");
783 msix_vector_use(pdev
, nr
);
786 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
787 handler
, NULL
, vector
);
790 * Attempt to enable route through KVM irqchip,
791 * default to userspace handling if unavailable.
793 if (vector
->virq
>= 0) {
795 vfio_remove_kvm_msi_virq(vector
);
797 vfio_update_kvm_msi_virq(vector
, *msg
);
800 vfio_add_kvm_msi_virq(vector
, msg
, true);
804 * We don't want to have the host allocate all possible MSI vectors
805 * for a device if they're not in use, so we shutdown and incrementally
806 * increase them as needed.
808 if (vdev
->nr_vectors
< nr
+ 1) {
809 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
810 vdev
->nr_vectors
= nr
+ 1;
811 ret
= vfio_enable_vectors(vdev
, true);
813 error_report("vfio: failed to enable vectors, %d", ret
);
817 struct vfio_irq_set
*irq_set
;
820 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
822 irq_set
= g_malloc0(argsz
);
823 irq_set
->argsz
= argsz
;
824 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
825 VFIO_IRQ_SET_ACTION_TRIGGER
;
826 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
829 pfd
= (int32_t *)&irq_set
->data
;
831 if (vector
->virq
>= 0) {
832 *pfd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
834 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
837 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
840 error_report("vfio: failed to modify vector, %d", ret
);
847 static int vfio_msix_vector_use(PCIDevice
*pdev
,
848 unsigned int nr
, MSIMessage msg
)
850 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
853 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
855 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
856 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
858 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__
,
859 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
860 vdev
->host
.function
, nr
);
863 * There are still old guests that mask and unmask vectors on every
864 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
865 * the KVM setup in place, simply switch VFIO to use the non-bypass
866 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
867 * core will mask the interrupt and set pending bits, allowing it to
868 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
870 if (vector
->virq
>= 0) {
872 struct vfio_irq_set
*irq_set
;
875 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
877 irq_set
= g_malloc0(argsz
);
878 irq_set
->argsz
= argsz
;
879 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
880 VFIO_IRQ_SET_ACTION_TRIGGER
;
881 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
884 pfd
= (int32_t *)&irq_set
->data
;
886 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
888 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
894 static void vfio_enable_msix(VFIODevice
*vdev
)
896 vfio_disable_interrupts(vdev
);
898 vdev
->msi_vectors
= g_malloc0(vdev
->msix
->entries
* sizeof(VFIOMSIVector
));
900 vdev
->interrupt
= VFIO_INT_MSIX
;
903 * Some communication channels between VF & PF or PF & fw rely on the
904 * physical state of the device and expect that enabling MSI-X from the
905 * guest enables the same on the host. When our guest is Linux, the
906 * guest driver call to pci_enable_msix() sets the enabling bit in the
907 * MSI-X capability, but leaves the vector table masked. We therefore
908 * can't rely on a vector_use callback (from request_irq() in the guest)
909 * to switch the physical device into MSI-X mode because that may come a
910 * long time after pci_enable_msix(). This code enables vector 0 with
911 * triggering to userspace, then immediately release the vector, leaving
912 * the physical device with no vectors enabled, but MSI-X enabled, just
913 * like the guest view.
915 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
916 vfio_msix_vector_release(&vdev
->pdev
, 0);
918 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
919 vfio_msix_vector_release
, NULL
)) {
920 error_report("vfio: msix_set_vector_notifiers failed");
923 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
924 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
927 static void vfio_enable_msi(VFIODevice
*vdev
)
931 vfio_disable_interrupts(vdev
);
933 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
935 vdev
->msi_vectors
= g_malloc0(vdev
->nr_vectors
* sizeof(VFIOMSIVector
));
937 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
938 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
939 MSIMessage msg
= msi_get_message(&vdev
->pdev
, i
);
945 if (event_notifier_init(&vector
->interrupt
, 0)) {
946 error_report("vfio: Error: event_notifier_init failed");
949 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
950 vfio_msi_interrupt
, NULL
, vector
);
953 * Attempt to enable route through KVM irqchip,
954 * default to userspace handling if unavailable.
956 vfio_add_kvm_msi_virq(vector
, &msg
, false);
959 /* Set interrupt type prior to possible interrupts */
960 vdev
->interrupt
= VFIO_INT_MSI
;
962 ret
= vfio_enable_vectors(vdev
, false);
965 error_report("vfio: Error: Failed to setup MSI fds: %m");
966 } else if (ret
!= vdev
->nr_vectors
) {
967 error_report("vfio: Error: Failed to enable %d "
968 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
971 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
972 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
973 if (vector
->virq
>= 0) {
974 vfio_remove_kvm_msi_virq(vector
);
976 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
978 event_notifier_cleanup(&vector
->interrupt
);
981 g_free(vdev
->msi_vectors
);
983 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
984 vdev
->nr_vectors
= ret
;
987 vdev
->nr_vectors
= 0;
990 * Failing to setup MSI doesn't really fall within any specification.
991 * Let's try leaving interrupts disabled and hope the guest figures
992 * out to fall back to INTx for this device.
994 error_report("vfio: Error: Failed to enable MSI");
995 vdev
->interrupt
= VFIO_INT_NONE
;
1000 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__
,
1001 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1002 vdev
->host
.function
, vdev
->nr_vectors
);
1005 static void vfio_disable_msi_common(VFIODevice
*vdev
)
1009 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
1010 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
1011 if (vdev
->msi_vectors
[i
].use
) {
1012 if (vector
->virq
>= 0) {
1013 vfio_remove_kvm_msi_virq(vector
);
1015 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
1017 event_notifier_cleanup(&vector
->interrupt
);
1021 g_free(vdev
->msi_vectors
);
1022 vdev
->msi_vectors
= NULL
;
1023 vdev
->nr_vectors
= 0;
1024 vdev
->interrupt
= VFIO_INT_NONE
;
1026 vfio_enable_intx(vdev
);
1029 static void vfio_disable_msix(VFIODevice
*vdev
)
1033 msix_unset_vector_notifiers(&vdev
->pdev
);
1036 * MSI-X will only release vectors if MSI-X is still enabled on the
1037 * device, check through the rest and release it ourselves if necessary.
1039 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
1040 if (vdev
->msi_vectors
[i
].use
) {
1041 vfio_msix_vector_release(&vdev
->pdev
, i
);
1042 msix_vector_unuse(&vdev
->pdev
, i
);
1046 if (vdev
->nr_vectors
) {
1047 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
1050 vfio_disable_msi_common(vdev
);
1052 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
1053 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1056 static void vfio_disable_msi(VFIODevice
*vdev
)
1058 vfio_disable_irqindex(vdev
, VFIO_PCI_MSI_IRQ_INDEX
);
1059 vfio_disable_msi_common(vdev
);
1061 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
1062 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1065 static void vfio_update_msi(VFIODevice
*vdev
)
1069 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
1070 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
1073 if (!vector
->use
|| vector
->virq
< 0) {
1077 msg
= msi_get_message(&vdev
->pdev
, i
);
1078 vfio_update_kvm_msi_virq(vector
, msg
);
1083 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
1085 static void vfio_bar_write(void *opaque
, hwaddr addr
,
1086 uint64_t data
, unsigned size
)
1088 VFIOBAR
*bar
= opaque
;
1101 buf
.word
= cpu_to_le16(data
);
1104 buf
.dword
= cpu_to_le32(data
);
1107 hw_error("vfio: unsupported write size, %d bytes", size
);
1111 if (pwrite(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
1112 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1113 __func__
, addr
, data
, size
);
1118 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
1120 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
1121 ", %d)\n", __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1122 vdev
->host
.slot
, vdev
->host
.function
, bar
->nr
, addr
,
1128 * A read or write to a BAR always signals an INTx EOI. This will
1129 * do nothing if not pending (including not in INTx mode). We assume
1130 * that a BAR access is in response to an interrupt and that BAR
1131 * accesses will service the interrupt. Unfortunately, we don't know
1132 * which access will service the interrupt, so we're potentially
1133 * getting quite a few host interrupts per guest interrupt.
1135 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
1138 static uint64_t vfio_bar_read(void *opaque
,
1139 hwaddr addr
, unsigned size
)
1141 VFIOBAR
*bar
= opaque
;
1150 if (pread(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
1151 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1152 __func__
, addr
, size
);
1153 return (uint64_t)-1;
1161 data
= le16_to_cpu(buf
.word
);
1164 data
= le32_to_cpu(buf
.dword
);
1167 hw_error("vfio: unsupported read size, %d bytes", size
);
1173 VFIODevice
*vdev
= container_of(bar
, VFIODevice
, bars
[bar
->nr
]);
1175 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
1176 ", %d) = 0x%"PRIx64
"\n", __func__
, vdev
->host
.domain
,
1177 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1178 bar
->nr
, addr
, size
, data
);
1182 /* Same as write above */
1183 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
1188 static const MemoryRegionOps vfio_bar_ops
= {
1189 .read
= vfio_bar_read
,
1190 .write
= vfio_bar_write
,
1191 .endianness
= DEVICE_LITTLE_ENDIAN
,
1194 static void vfio_pci_load_rom(VFIODevice
*vdev
)
1196 struct vfio_region_info reg_info
= {
1197 .argsz
= sizeof(reg_info
),
1198 .index
= VFIO_PCI_ROM_REGION_INDEX
1204 if (ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
)) {
1205 error_report("vfio: Error getting ROM info: %m");
1209 DPRINTF("Device %04x:%02x:%02x.%x ROM:\n", vdev
->host
.domain
,
1210 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1211 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1212 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
1213 (unsigned long)reg_info
.flags
);
1215 vdev
->rom_size
= size
= reg_info
.size
;
1216 vdev
->rom_offset
= reg_info
.offset
;
1218 if (!vdev
->rom_size
) {
1219 vdev
->rom_read_failed
= true;
1220 error_report("vfio-pci: Cannot read device rom at "
1221 "%04x:%02x:%02x.%x",
1222 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1223 vdev
->host
.function
);
1224 error_printf("Device option ROM contents are probably invalid "
1225 "(check dmesg).\nSkip option ROM probe with rombar=0, "
1226 "or load from file with romfile=\n");
1230 vdev
->rom
= g_malloc(size
);
1231 memset(vdev
->rom
, 0xff, size
);
1234 bytes
= pread(vdev
->fd
, vdev
->rom
+ off
, size
, vdev
->rom_offset
+ off
);
1237 } else if (bytes
> 0) {
1241 if (errno
== EINTR
|| errno
== EAGAIN
) {
1244 error_report("vfio: Error reading device ROM: %m");
1250 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
1252 VFIODevice
*vdev
= opaque
;
1261 /* Load the ROM lazily when the guest tries to read it */
1262 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
1263 vfio_pci_load_rom(vdev
);
1266 memcpy(&val
, vdev
->rom
+ addr
,
1267 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
1274 data
= le16_to_cpu(val
.word
);
1277 data
= le32_to_cpu(val
.dword
);
1280 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1284 DPRINTF("%s(%04x:%02x:%02x.%x, 0x%"HWADDR_PRIx
", 0x%x) = 0x%"PRIx64
"\n",
1285 __func__
, vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1286 vdev
->host
.function
, addr
, size
, data
);
1291 static void vfio_rom_write(void *opaque
, hwaddr addr
,
1292 uint64_t data
, unsigned size
)
1296 static const MemoryRegionOps vfio_rom_ops
= {
1297 .read
= vfio_rom_read
,
1298 .write
= vfio_rom_write
,
1299 .endianness
= DEVICE_LITTLE_ENDIAN
,
1302 static bool vfio_blacklist_opt_rom(VFIODevice
*vdev
)
1304 PCIDevice
*pdev
= &vdev
->pdev
;
1305 uint16_t vendor_id
, device_id
;
1308 vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
1309 device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
1311 while (count
< ARRAY_SIZE(romblacklist
)) {
1312 if (romblacklist
[count
].vendor_id
== vendor_id
&&
1313 romblacklist
[count
].device_id
== device_id
) {
1322 static void vfio_pci_size_rom(VFIODevice
*vdev
)
1324 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
1325 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
1326 DeviceState
*dev
= DEVICE(vdev
);
1329 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
1330 /* Since pci handles romfile, just print a message and return */
1331 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
1332 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1333 "is known to cause system instability issues during "
1334 "option rom execution. "
1335 "Proceeding anyway since user specified romfile\n",
1336 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1337 vdev
->host
.function
);
1343 * Use the same size ROM BAR as the physical device. The contents
1344 * will get filled in later when the guest tries to read it.
1346 if (pread(vdev
->fd
, &orig
, 4, offset
) != 4 ||
1347 pwrite(vdev
->fd
, &size
, 4, offset
) != 4 ||
1348 pread(vdev
->fd
, &size
, 4, offset
) != 4 ||
1349 pwrite(vdev
->fd
, &orig
, 4, offset
) != 4) {
1350 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
1351 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1352 vdev
->host
.slot
, vdev
->host
.function
);
1356 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
1362 if (vfio_blacklist_opt_rom(vdev
)) {
1363 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
1364 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1365 "is known to cause system instability issues during "
1366 "option rom execution. "
1367 "Proceeding anyway since user specified non zero value for "
1369 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1370 vdev
->host
.function
);
1372 error_printf("Warning : Rom loading for device at "
1373 "%04x:%02x:%02x.%x has been disabled due to "
1374 "system instability issues. "
1375 "Specify rombar=1 or romfile to force\n",
1376 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1377 vdev
->host
.function
);
1382 DPRINTF("%04x:%02x:%02x.%x ROM size 0x%x\n", vdev
->host
.domain
,
1383 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, size
);
1385 snprintf(name
, sizeof(name
), "vfio[%04x:%02x:%02x.%x].rom",
1386 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1387 vdev
->host
.function
);
1389 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
1390 &vfio_rom_ops
, vdev
, name
, size
);
1392 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
1393 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
1395 vdev
->pdev
.has_rom
= true;
1396 vdev
->rom_read_failed
= false;
1399 static void vfio_vga_write(void *opaque
, hwaddr addr
,
1400 uint64_t data
, unsigned size
)
1402 VFIOVGARegion
*region
= opaque
;
1403 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1410 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1417 buf
.word
= cpu_to_le16(data
);
1420 buf
.dword
= cpu_to_le32(data
);
1423 hw_error("vfio: unsupported write size, %d bytes", size
);
1427 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
1428 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1429 __func__
, region
->offset
+ addr
, data
, size
);
1432 DPRINTF("%s(0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d)\n",
1433 __func__
, region
->offset
+ addr
, data
, size
);
1436 static uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
1438 VFIOVGARegion
*region
= opaque
;
1439 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1447 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1449 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1450 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1451 __func__
, region
->offset
+ addr
, size
);
1452 return (uint64_t)-1;
1460 data
= le16_to_cpu(buf
.word
);
1463 data
= le32_to_cpu(buf
.dword
);
1466 hw_error("vfio: unsupported read size, %d bytes", size
);
1470 DPRINTF("%s(0x%"HWADDR_PRIx
", %d) = 0x%"PRIx64
"\n",
1471 __func__
, region
->offset
+ addr
, size
, data
);
1476 static const MemoryRegionOps vfio_vga_ops
= {
1477 .read
= vfio_vga_read
,
1478 .write
= vfio_vga_write
,
1479 .endianness
= DEVICE_LITTLE_ENDIAN
,
1483 * Device specific quirks
1486 /* Is range1 fully contained within range2? */
1487 static bool vfio_range_contained(uint64_t first1
, uint64_t len1
,
1488 uint64_t first2
, uint64_t len2
) {
1489 return (first1
>= first2
&& first1
+ len1
<= first2
+ len2
);
1492 static bool vfio_flags_enabled(uint8_t flags
, uint8_t mask
)
1494 return (mask
&& (flags
& mask
) == mask
);
1497 static uint64_t vfio_generic_window_quirk_read(void *opaque
,
1498 hwaddr addr
, unsigned size
)
1500 VFIOQuirk
*quirk
= opaque
;
1501 VFIODevice
*vdev
= quirk
->vdev
;
1504 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1505 ranges_overlap(addr
, size
,
1506 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1507 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1509 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1510 quirk
->data
.data_size
)) {
1511 hw_error("%s: window data read not fully contained: %s",
1512 __func__
, memory_region_name(&quirk
->mem
));
1515 data
= vfio_pci_read_config(&vdev
->pdev
,
1516 quirk
->data
.address_val
+ offset
, size
);
1518 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"
1519 PRIx64
"\n", memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1520 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1521 quirk
->data
.bar
, addr
, size
, data
);
1523 data
= vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
],
1524 addr
+ quirk
->data
.base_offset
, size
);
1530 static void vfio_generic_window_quirk_write(void *opaque
, hwaddr addr
,
1531 uint64_t data
, unsigned size
)
1533 VFIOQuirk
*quirk
= opaque
;
1534 VFIODevice
*vdev
= quirk
->vdev
;
1536 if (ranges_overlap(addr
, size
,
1537 quirk
->data
.address_offset
, quirk
->data
.address_size
)) {
1539 if (addr
!= quirk
->data
.address_offset
) {
1540 hw_error("%s: offset write into address window: %s",
1541 __func__
, memory_region_name(&quirk
->mem
));
1544 if ((data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1545 quirk
->data
.flags
|= quirk
->data
.write_flags
|
1546 quirk
->data
.read_flags
;
1547 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1549 quirk
->data
.flags
&= ~(quirk
->data
.write_flags
|
1550 quirk
->data
.read_flags
);
1554 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1555 ranges_overlap(addr
, size
,
1556 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1557 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1559 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1560 quirk
->data
.data_size
)) {
1561 hw_error("%s: window data write not fully contained: %s",
1562 __func__
, memory_region_name(&quirk
->mem
));
1565 vfio_pci_write_config(&vdev
->pdev
,
1566 quirk
->data
.address_val
+ offset
, data
, size
);
1567 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"
1568 PRIx64
", %d)\n", memory_region_name(&quirk
->mem
),
1569 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1570 vdev
->host
.function
, quirk
->data
.bar
, addr
, data
, size
);
1574 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
],
1575 addr
+ quirk
->data
.base_offset
, data
, size
);
1578 static const MemoryRegionOps vfio_generic_window_quirk
= {
1579 .read
= vfio_generic_window_quirk_read
,
1580 .write
= vfio_generic_window_quirk_write
,
1581 .endianness
= DEVICE_LITTLE_ENDIAN
,
1584 static uint64_t vfio_generic_quirk_read(void *opaque
,
1585 hwaddr addr
, unsigned size
)
1587 VFIOQuirk
*quirk
= opaque
;
1588 VFIODevice
*vdev
= quirk
->vdev
;
1589 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1590 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1593 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1594 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1595 if (!vfio_range_contained(addr
, size
, offset
,
1596 quirk
->data
.address_mask
+ 1)) {
1597 hw_error("%s: read not fully contained: %s",
1598 __func__
, memory_region_name(&quirk
->mem
));
1601 data
= vfio_pci_read_config(&vdev
->pdev
, addr
- offset
, size
);
1603 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"
1604 PRIx64
"\n", memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1605 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1606 quirk
->data
.bar
, addr
+ base
, size
, data
);
1608 data
= vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, size
);
1614 static void vfio_generic_quirk_write(void *opaque
, hwaddr addr
,
1615 uint64_t data
, unsigned size
)
1617 VFIOQuirk
*quirk
= opaque
;
1618 VFIODevice
*vdev
= quirk
->vdev
;
1619 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1620 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1622 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1623 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1624 if (!vfio_range_contained(addr
, size
, offset
,
1625 quirk
->data
.address_mask
+ 1)) {
1626 hw_error("%s: write not fully contained: %s",
1627 __func__
, memory_region_name(&quirk
->mem
));
1630 vfio_pci_write_config(&vdev
->pdev
, addr
- offset
, data
, size
);
1632 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
", 0x%"
1633 PRIx64
", %d)\n", memory_region_name(&quirk
->mem
),
1634 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1635 vdev
->host
.function
, quirk
->data
.bar
, addr
+ base
, data
, size
);
1637 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, data
, size
);
1641 static const MemoryRegionOps vfio_generic_quirk
= {
1642 .read
= vfio_generic_quirk_read
,
1643 .write
= vfio_generic_quirk_write
,
1644 .endianness
= DEVICE_LITTLE_ENDIAN
,
1647 #define PCI_VENDOR_ID_ATI 0x1002
1650 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1651 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1652 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1653 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1654 * I/O port BAR address. Originally this was coded to return the virtual BAR
1655 * address only if the physical register read returns the actual BAR address,
1656 * but users have reported greater success if we return the virtual address
1659 static uint64_t vfio_ati_3c3_quirk_read(void *opaque
,
1660 hwaddr addr
, unsigned size
)
1662 VFIOQuirk
*quirk
= opaque
;
1663 VFIODevice
*vdev
= quirk
->vdev
;
1664 uint64_t data
= vfio_pci_read_config(&vdev
->pdev
,
1665 PCI_BASE_ADDRESS_0
+ (4 * 4) + 1,
1667 DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64
"\n", __func__
, data
);
1672 static const MemoryRegionOps vfio_ati_3c3_quirk
= {
1673 .read
= vfio_ati_3c3_quirk_read
,
1674 .endianness
= DEVICE_LITTLE_ENDIAN
,
1677 static void vfio_vga_probe_ati_3c3_quirk(VFIODevice
*vdev
)
1679 PCIDevice
*pdev
= &vdev
->pdev
;
1682 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1687 * As long as the BAR is >= 256 bytes it will be aligned such that the
1688 * lower byte is always zero. Filter out anything else, if it exists.
1690 if (!vdev
->bars
[4].ioport
|| vdev
->bars
[4].size
< 256) {
1694 quirk
= g_malloc0(sizeof(*quirk
));
1697 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_ati_3c3_quirk
, quirk
,
1698 "vfio-ati-3c3-quirk", 1);
1699 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1700 3 /* offset 3 bytes from 0x3c0 */, &quirk
->mem
);
1702 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1705 DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n",
1706 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1707 vdev
->host
.function
);
1711 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1712 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1713 * the MMIO space directly, but a window to this space is provided through
1714 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1715 * data register. When the address is programmed to a range of 0x4000-0x4fff
1716 * PCI configuration space is available. Experimentation seems to indicate
1717 * that only read-only access is provided, but we drop writes when the window
1718 * is enabled to config space nonetheless.
1720 static void vfio_probe_ati_bar4_window_quirk(VFIODevice
*vdev
, int nr
)
1722 PCIDevice
*pdev
= &vdev
->pdev
;
1725 if (!vdev
->has_vga
|| nr
!= 4 ||
1726 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1730 quirk
= g_malloc0(sizeof(*quirk
));
1732 quirk
->data
.address_size
= 4;
1733 quirk
->data
.data_offset
= 4;
1734 quirk
->data
.data_size
= 4;
1735 quirk
->data
.address_match
= 0x4000;
1736 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1737 quirk
->data
.bar
= nr
;
1738 quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1740 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1741 &vfio_generic_window_quirk
, quirk
,
1742 "vfio-ati-bar4-window-quirk", 8);
1743 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1744 quirk
->data
.base_offset
, &quirk
->mem
, 1);
1746 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1748 DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n",
1749 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1750 vdev
->host
.function
);
1753 #define PCI_VENDOR_ID_REALTEK 0x10ec
1756 * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2
1757 * offset 0x70 there is a dword data register, offset 0x74 is a dword address
1758 * register. According to the Linux r8169 driver, the MSI-X table is addressed
1759 * when the "type" portion of the address register is set to 0x1. This appears
1760 * to be bits 16:30. Bit 31 is both a write indicator and some sort of
1761 * "address latched" indicator. Bits 12:15 are a mask field, which we can
1762 * ignore because the MSI-X table should always be accessed as a dword (full
1763 * mask). Bits 0:11 is offset within the type.
1767 * Read from MSI-X table offset 0
1768 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
1769 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
1770 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
1772 * Write 0xfee00000 to MSI-X table offset 0
1773 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
1774 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
1775 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
1778 static uint64_t vfio_rtl8168_window_quirk_read(void *opaque
,
1779 hwaddr addr
, unsigned size
)
1781 VFIOQuirk
*quirk
= opaque
;
1782 VFIODevice
*vdev
= quirk
->vdev
;
1785 case 4: /* address */
1786 if (quirk
->data
.flags
) {
1787 DPRINTF("%s fake read(%04x:%02x:%02x.%d)\n",
1788 memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1789 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1791 return quirk
->data
.address_match
^ 0x10000000U
;
1795 if (quirk
->data
.flags
) {
1798 DPRINTF("%s MSI-X table read(%04x:%02x:%02x.%d)\n",
1799 memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1800 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1802 if (!(vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MSIX
)) {
1806 io_mem_read(&vdev
->pdev
.msix_table_mmio
,
1807 (hwaddr
)(quirk
->data
.address_match
& 0xfff),
1813 DPRINTF("%s direct read(%04x:%02x:%02x.%d)\n",
1814 memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1815 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1817 return vfio_bar_read(&vdev
->bars
[quirk
->data
.bar
], addr
+ 0x70, size
);
1820 static void vfio_rtl8168_window_quirk_write(void *opaque
, hwaddr addr
,
1821 uint64_t data
, unsigned size
)
1823 VFIOQuirk
*quirk
= opaque
;
1824 VFIODevice
*vdev
= quirk
->vdev
;
1827 case 4: /* address */
1828 if ((data
& 0x7fff0000) == 0x10000) {
1829 if (data
& 0x10000000U
&&
1830 vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MSIX
) {
1832 DPRINTF("%s MSI-X table write(%04x:%02x:%02x.%d)\n",
1833 memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1834 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1836 io_mem_write(&vdev
->pdev
.msix_table_mmio
,
1837 (hwaddr
)(quirk
->data
.address_match
& 0xfff),
1841 quirk
->data
.flags
= 1;
1842 quirk
->data
.address_match
= data
;
1846 quirk
->data
.flags
= 0;
1849 quirk
->data
.address_mask
= data
;
1853 DPRINTF("%s direct write(%04x:%02x:%02x.%d)\n",
1854 memory_region_name(&quirk
->mem
), vdev
->host
.domain
,
1855 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1857 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
], addr
+ 0x70, data
, size
);
1860 static const MemoryRegionOps vfio_rtl8168_window_quirk
= {
1861 .read
= vfio_rtl8168_window_quirk_read
,
1862 .write
= vfio_rtl8168_window_quirk_write
,
1864 .min_access_size
= 4,
1865 .max_access_size
= 4,
1868 .endianness
= DEVICE_LITTLE_ENDIAN
,
1871 static void vfio_probe_rtl8168_bar2_window_quirk(VFIODevice
*vdev
, int nr
)
1873 PCIDevice
*pdev
= &vdev
->pdev
;
1876 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_REALTEK
||
1877 pci_get_word(pdev
->config
+ PCI_DEVICE_ID
) != 0x8168 || nr
!= 2) {
1881 quirk
= g_malloc0(sizeof(*quirk
));
1883 quirk
->data
.bar
= nr
;
1885 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_rtl8168_window_quirk
,
1886 quirk
, "vfio-rtl8168-window-quirk", 8);
1887 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1888 0x70, &quirk
->mem
, 1);
1890 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1892 DPRINTF("Enabled RTL8168 BAR2 window quirk for device %04x:%02x:%02x.%x\n",
1893 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1894 vdev
->host
.function
);
1897 * Trap the BAR2 MMIO window to config space as well.
1899 static void vfio_probe_ati_bar2_4000_quirk(VFIODevice
*vdev
, int nr
)
1901 PCIDevice
*pdev
= &vdev
->pdev
;
1904 /* Only enable on newer devices where BAR2 is 64bit */
1905 if (!vdev
->has_vga
|| nr
!= 2 || !vdev
->bars
[2].mem64
||
1906 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1910 quirk
= g_malloc0(sizeof(*quirk
));
1912 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1913 quirk
->data
.address_match
= 0x4000;
1914 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1915 quirk
->data
.bar
= nr
;
1917 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
1918 "vfio-ati-bar2-4000-quirk",
1919 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1920 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
1921 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1924 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1926 DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n",
1927 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1928 vdev
->host
.function
);
1932 * Older ATI/AMD cards like the X550 have a similar window to that above.
1933 * I/O port BAR1 provides a window to a mirror of PCI config space located
1934 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1935 * note it for future reference.
1938 #define PCI_VENDOR_ID_NVIDIA 0x10de
1941 * Nvidia has several different methods to get to config space, the
1942 * nouveu project has several of these documented here:
1943 * https://github.com/pathscale/envytools/tree/master/hwdocs
1945 * The first quirk is actually not documented in envytools and is found
1946 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1947 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1948 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1949 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1950 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1951 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1952 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1953 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1963 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque
,
1964 hwaddr addr
, unsigned size
)
1966 VFIOQuirk
*quirk
= opaque
;
1967 VFIODevice
*vdev
= quirk
->vdev
;
1968 PCIDevice
*pdev
= &vdev
->pdev
;
1969 uint64_t data
= vfio_vga_read(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1970 addr
+ quirk
->data
.base_offset
, size
);
1972 if (quirk
->data
.flags
== NV_3D0_READ
&& addr
== quirk
->data
.data_offset
) {
1973 data
= vfio_pci_read_config(pdev
, quirk
->data
.address_val
, size
);
1974 DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64
"\n", __func__
, size
, data
);
1977 quirk
->data
.flags
= NV_3D0_NONE
;
1982 static void vfio_nvidia_3d0_quirk_write(void *opaque
, hwaddr addr
,
1983 uint64_t data
, unsigned size
)
1985 VFIOQuirk
*quirk
= opaque
;
1986 VFIODevice
*vdev
= quirk
->vdev
;
1987 PCIDevice
*pdev
= &vdev
->pdev
;
1989 switch (quirk
->data
.flags
) {
1991 if (addr
== quirk
->data
.address_offset
&& data
== 0x338) {
1992 quirk
->data
.flags
= NV_3D0_SELECT
;
1996 quirk
->data
.flags
= NV_3D0_NONE
;
1997 if (addr
== quirk
->data
.data_offset
&&
1998 (data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1999 quirk
->data
.flags
= NV_3D0_WINDOW
;
2000 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
2004 quirk
->data
.flags
= NV_3D0_NONE
;
2005 if (addr
== quirk
->data
.address_offset
) {
2006 if (data
== 0x538) {
2007 quirk
->data
.flags
= NV_3D0_READ
;
2008 } else if (data
== 0x738) {
2009 quirk
->data
.flags
= NV_3D0_WRITE
;
2014 quirk
->data
.flags
= NV_3D0_NONE
;
2015 if (addr
== quirk
->data
.data_offset
) {
2016 vfio_pci_write_config(pdev
, quirk
->data
.address_val
, data
, size
);
2017 DPRINTF("%s(0x3d0, 0x%"PRIx64
", %d)\n", __func__
, data
, size
);
2023 vfio_vga_write(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
2024 addr
+ quirk
->data
.base_offset
, data
, size
);
2027 static const MemoryRegionOps vfio_nvidia_3d0_quirk
= {
2028 .read
= vfio_nvidia_3d0_quirk_read
,
2029 .write
= vfio_nvidia_3d0_quirk_write
,
2030 .endianness
= DEVICE_LITTLE_ENDIAN
,
2033 static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice
*vdev
)
2035 PCIDevice
*pdev
= &vdev
->pdev
;
2038 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
||
2039 !vdev
->bars
[1].size
) {
2043 quirk
= g_malloc0(sizeof(*quirk
));
2045 quirk
->data
.base_offset
= 0x10;
2046 quirk
->data
.address_offset
= 4;
2047 quirk
->data
.address_size
= 2;
2048 quirk
->data
.address_match
= 0x1800;
2049 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
2050 quirk
->data
.data_offset
= 0;
2051 quirk
->data
.data_size
= 4;
2053 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_3d0_quirk
,
2054 quirk
, "vfio-nvidia-3d0-quirk", 6);
2055 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
2056 quirk
->data
.base_offset
, &quirk
->mem
);
2058 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
2061 DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
2062 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2063 vdev
->host
.function
);
2067 * The second quirk is documented in envytools. The I/O port BAR5 is just
2068 * a set of address/data ports to the MMIO BARs. The BAR we care about is
2069 * again BAR0. This backdoor is apparently a bit newer than the one above
2070 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
2071 * space, including extended space is available at the 4k @0x88000.
2074 NV_BAR5_ADDRESS
= 0x1,
2075 NV_BAR5_ENABLE
= 0x2,
2076 NV_BAR5_MASTER
= 0x4,
2077 NV_BAR5_VALID
= 0x7,
2080 static void vfio_nvidia_bar5_window_quirk_write(void *opaque
, hwaddr addr
,
2081 uint64_t data
, unsigned size
)
2083 VFIOQuirk
*quirk
= opaque
;
2088 quirk
->data
.flags
|= NV_BAR5_MASTER
;
2090 quirk
->data
.flags
&= ~NV_BAR5_MASTER
;
2095 quirk
->data
.flags
|= NV_BAR5_ENABLE
;
2097 quirk
->data
.flags
&= ~NV_BAR5_ENABLE
;
2101 if (quirk
->data
.flags
& NV_BAR5_MASTER
) {
2102 if ((data
& ~0xfff) == 0x88000) {
2103 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
2104 quirk
->data
.address_val
= data
& 0xfff;
2105 } else if ((data
& ~0xff) == 0x1800) {
2106 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
2107 quirk
->data
.address_val
= data
& 0xff;
2109 quirk
->data
.flags
&= ~NV_BAR5_ADDRESS
;
2115 vfio_generic_window_quirk_write(opaque
, addr
, data
, size
);
2118 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk
= {
2119 .read
= vfio_generic_window_quirk_read
,
2120 .write
= vfio_nvidia_bar5_window_quirk_write
,
2121 .valid
.min_access_size
= 4,
2122 .endianness
= DEVICE_LITTLE_ENDIAN
,
2125 static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice
*vdev
, int nr
)
2127 PCIDevice
*pdev
= &vdev
->pdev
;
2130 if (!vdev
->has_vga
|| nr
!= 5 ||
2131 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
2135 quirk
= g_malloc0(sizeof(*quirk
));
2137 quirk
->data
.read_flags
= quirk
->data
.write_flags
= NV_BAR5_VALID
;
2138 quirk
->data
.address_offset
= 0x8;
2139 quirk
->data
.address_size
= 0; /* actually 4, but avoids generic code */
2140 quirk
->data
.data_offset
= 0xc;
2141 quirk
->data
.data_size
= 4;
2142 quirk
->data
.bar
= nr
;
2144 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
2145 &vfio_nvidia_bar5_window_quirk
, quirk
,
2146 "vfio-nvidia-bar5-window-quirk", 16);
2147 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
, 0, &quirk
->mem
, 1);
2149 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
2151 DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
2152 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2153 vdev
->host
.function
);
2156 static void vfio_nvidia_88000_quirk_write(void *opaque
, hwaddr addr
,
2157 uint64_t data
, unsigned size
)
2159 VFIOQuirk
*quirk
= opaque
;
2160 VFIODevice
*vdev
= quirk
->vdev
;
2161 PCIDevice
*pdev
= &vdev
->pdev
;
2162 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
2164 vfio_generic_quirk_write(opaque
, addr
, data
, size
);
2167 * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
2168 * MSI capability ID register. Both the ID and next register are
2169 * read-only, so we allow writes covering either of those to real hw.
2170 * NB - only fixed for the 0x88000 MMIO window.
2172 if ((pdev
->cap_present
& QEMU_PCI_CAP_MSI
) &&
2173 vfio_range_contained(addr
, size
, pdev
->msi_cap
, PCI_MSI_FLAGS
)) {
2174 vfio_bar_write(&vdev
->bars
[quirk
->data
.bar
], addr
+ base
, data
, size
);
2178 static const MemoryRegionOps vfio_nvidia_88000_quirk
= {
2179 .read
= vfio_generic_quirk_read
,
2180 .write
= vfio_nvidia_88000_quirk_write
,
2181 .endianness
= DEVICE_LITTLE_ENDIAN
,
2185 * Finally, BAR0 itself. We want to redirect any accesses to either
2186 * 0x1800 or 0x88000 through the PCI config space access functions.
2188 * NB - quirk at a page granularity or else they don't seem to work when
2191 * Here's offset 0x88000...
2193 static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice
*vdev
, int nr
)
2195 PCIDevice
*pdev
= &vdev
->pdev
;
2197 uint16_t vendor
, class;
2199 vendor
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
2200 class = pci_get_word(pdev
->config
+ PCI_CLASS_DEVICE
);
2202 if (nr
!= 0 || vendor
!= PCI_VENDOR_ID_NVIDIA
||
2203 class != PCI_CLASS_DISPLAY_VGA
) {
2207 quirk
= g_malloc0(sizeof(*quirk
));
2209 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
2210 quirk
->data
.address_match
= 0x88000;
2211 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
2212 quirk
->data
.bar
= nr
;
2214 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_88000_quirk
,
2215 quirk
, "vfio-nvidia-bar0-88000-quirk",
2216 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
2217 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
2218 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
2221 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
2223 DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
2224 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2225 vdev
->host
.function
);
2229 * And here's the same for BAR0 offset 0x1800...
2231 static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice
*vdev
, int nr
)
2233 PCIDevice
*pdev
= &vdev
->pdev
;
2236 if (!vdev
->has_vga
|| nr
!= 0 ||
2237 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
2241 /* Log the chipset ID */
2242 DPRINTF("Nvidia NV%02x\n",
2243 (unsigned int)(vfio_bar_read(&vdev
->bars
[0], 0, 4) >> 20) & 0xff);
2245 quirk
= g_malloc0(sizeof(*quirk
));
2247 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
2248 quirk
->data
.address_match
= 0x1800;
2249 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
2250 quirk
->data
.bar
= nr
;
2252 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
2253 "vfio-nvidia-bar0-1800-quirk",
2254 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
2255 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].mem
,
2256 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
2259 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
2261 DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
2262 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2263 vdev
->host
.function
);
2267 * TODO - Some Nvidia devices provide config access to their companion HDA
2268 * device and even to their parent bridge via these config space mirrors.
2269 * Add quirks for those regions.
2273 * Common quirk probe entry points.
2275 static void vfio_vga_quirk_setup(VFIODevice
*vdev
)
2277 vfio_vga_probe_ati_3c3_quirk(vdev
);
2278 vfio_vga_probe_nvidia_3d0_quirk(vdev
);
2281 static void vfio_vga_quirk_teardown(VFIODevice
*vdev
)
2285 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
.region
); i
++) {
2286 while (!QLIST_EMPTY(&vdev
->vga
.region
[i
].quirks
)) {
2287 VFIOQuirk
*quirk
= QLIST_FIRST(&vdev
->vga
.region
[i
].quirks
);
2288 memory_region_del_subregion(&vdev
->vga
.region
[i
].mem
, &quirk
->mem
);
2289 object_unparent(OBJECT(&quirk
->mem
));
2290 QLIST_REMOVE(quirk
, next
);
2296 static void vfio_bar_quirk_setup(VFIODevice
*vdev
, int nr
)
2298 vfio_probe_ati_bar4_window_quirk(vdev
, nr
);
2299 vfio_probe_ati_bar2_4000_quirk(vdev
, nr
);
2300 vfio_probe_nvidia_bar5_window_quirk(vdev
, nr
);
2301 vfio_probe_nvidia_bar0_88000_quirk(vdev
, nr
);
2302 vfio_probe_nvidia_bar0_1800_quirk(vdev
, nr
);
2303 vfio_probe_rtl8168_bar2_window_quirk(vdev
, nr
);
2306 static void vfio_bar_quirk_teardown(VFIODevice
*vdev
, int nr
)
2308 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2310 while (!QLIST_EMPTY(&bar
->quirks
)) {
2311 VFIOQuirk
*quirk
= QLIST_FIRST(&bar
->quirks
);
2312 memory_region_del_subregion(&bar
->mem
, &quirk
->mem
);
2313 object_unparent(OBJECT(&quirk
->mem
));
2314 QLIST_REMOVE(quirk
, next
);
2322 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
2324 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2325 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
2327 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
2328 emu_bits
= le32_to_cpu(emu_bits
);
2331 emu_val
= pci_default_read_config(pdev
, addr
, len
);
2334 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
2337 ret
= pread(vdev
->fd
, &phys_val
, len
, vdev
->config_offset
+ addr
);
2339 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
2340 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2341 vdev
->host
.slot
, vdev
->host
.function
, addr
, len
);
2344 phys_val
= le32_to_cpu(phys_val
);
2347 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
2349 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__
,
2350 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2351 vdev
->host
.function
, addr
, len
, val
);
2356 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
2357 uint32_t val
, int len
)
2359 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2360 uint32_t val_le
= cpu_to_le32(val
);
2362 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__
,
2363 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2364 vdev
->host
.function
, addr
, val
, len
);
2366 /* Write everything to VFIO, let it filter out what we can't write */
2367 if (pwrite(vdev
->fd
, &val_le
, len
, vdev
->config_offset
+ addr
) != len
) {
2368 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
2369 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2370 vdev
->host
.slot
, vdev
->host
.function
, addr
, val
, len
);
2373 /* MSI/MSI-X Enabling/Disabling */
2374 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
2375 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
2376 int is_enabled
, was_enabled
= msi_enabled(pdev
);
2378 pci_default_write_config(pdev
, addr
, val
, len
);
2380 is_enabled
= msi_enabled(pdev
);
2384 vfio_enable_msi(vdev
);
2388 vfio_disable_msi(vdev
);
2390 vfio_update_msi(vdev
);
2393 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
2394 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
2395 int is_enabled
, was_enabled
= msix_enabled(pdev
);
2397 pci_default_write_config(pdev
, addr
, val
, len
);
2399 is_enabled
= msix_enabled(pdev
);
2401 if (!was_enabled
&& is_enabled
) {
2402 vfio_enable_msix(vdev
);
2403 } else if (was_enabled
&& !is_enabled
) {
2404 vfio_disable_msix(vdev
);
2407 /* Write everything to QEMU to keep emulated bits correct */
2408 pci_default_write_config(pdev
, addr
, val
, len
);
2413 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
2415 static int vfio_dma_unmap(VFIOContainer
*container
,
2416 hwaddr iova
, ram_addr_t size
)
2418 struct vfio_iommu_type1_dma_unmap unmap
= {
2419 .argsz
= sizeof(unmap
),
2425 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
2426 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno
);
2433 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
2434 ram_addr_t size
, void *vaddr
, bool readonly
)
2436 struct vfio_iommu_type1_dma_map map
= {
2437 .argsz
= sizeof(map
),
2438 .flags
= VFIO_DMA_MAP_FLAG_READ
,
2439 .vaddr
= (__u64
)(uintptr_t)vaddr
,
2445 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
2449 * Try the mapping, if it fails with EBUSY, unmap the region and try
2450 * again. This shouldn't be necessary, but we sometimes see it in
2451 * the the VGA ROM space.
2453 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
2454 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
2455 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
2459 DPRINTF("VFIO_MAP_DMA: %d\n", -errno
);
2463 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
2465 return (!memory_region_is_ram(section
->mr
) &&
2466 !memory_region_is_iommu(section
->mr
)) ||
2468 * Sizing an enabled 64-bit BAR can cause spurious mappings to
2469 * addresses in the upper part of the 64-bit address space. These
2470 * are never accessed by the CPU and beyond the address width of
2471 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
2473 section
->offset_within_address_space
& (1ULL << 63);
2476 static void vfio_iommu_map_notify(Notifier
*n
, void *data
)
2478 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
2479 VFIOContainer
*container
= giommu
->container
;
2480 IOMMUTLBEntry
*iotlb
= data
;
2483 hwaddr len
= iotlb
->addr_mask
+ 1;
2487 DPRINTF("iommu map @ %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
2488 iotlb
->iova
, iotlb
->iova
+ iotlb
->addr_mask
);
2491 * The IOMMU TLB entry we have just covers translation through
2492 * this IOMMU to its immediate target. We need to translate
2493 * it the rest of the way through to memory.
2495 mr
= address_space_translate(&address_space_memory
,
2496 iotlb
->translated_addr
,
2497 &xlat
, &len
, iotlb
->perm
& IOMMU_WO
);
2498 if (!memory_region_is_ram(mr
)) {
2499 DPRINTF("iommu map to non memory area %"HWADDR_PRIx
"\n",
2504 * Translation truncates length to the IOMMU page size,
2505 * check that it did not truncate too much.
2507 if (len
& iotlb
->addr_mask
) {
2508 DPRINTF("iommu has granularity incompatible with target AS\n");
2512 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
2513 vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
2515 ret
= vfio_dma_map(container
, iotlb
->iova
,
2516 iotlb
->addr_mask
+ 1, vaddr
,
2517 !(iotlb
->perm
& IOMMU_WO
) || mr
->readonly
);
2519 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
2520 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
2521 container
, iotlb
->iova
,
2522 iotlb
->addr_mask
+ 1, vaddr
, ret
);
2525 ret
= vfio_dma_unmap(container
, iotlb
->iova
, iotlb
->addr_mask
+ 1);
2527 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
2528 "0x%"HWADDR_PRIx
") = %d (%m)",
2529 container
, iotlb
->iova
,
2530 iotlb
->addr_mask
+ 1, ret
);
2535 static void vfio_listener_region_add(MemoryListener
*listener
,
2536 MemoryRegionSection
*section
)
2538 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
2539 iommu_data
.type1
.listener
);
2545 if (vfio_listener_skipped_section(section
)) {
2546 DPRINTF("SKIPPING region_add %"HWADDR_PRIx
" - %"PRIx64
"\n",
2547 section
->offset_within_address_space
,
2548 section
->offset_within_address_space
+
2549 int128_get64(int128_sub(section
->size
, int128_one())));
2553 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
2554 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
2555 error_report("%s received unaligned region", __func__
);
2559 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
2560 llend
= int128_make64(section
->offset_within_address_space
);
2561 llend
= int128_add(llend
, section
->size
);
2562 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
2564 if (int128_ge(int128_make64(iova
), llend
)) {
2568 memory_region_ref(section
->mr
);
2570 if (memory_region_is_iommu(section
->mr
)) {
2571 VFIOGuestIOMMU
*giommu
;
2573 DPRINTF("region_add [iommu] %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
2574 iova
, int128_get64(int128_sub(llend
, int128_one())));
2576 * FIXME: We should do some checking to see if the
2577 * capabilities of the host VFIO IOMMU are adequate to model
2580 * FIXME: For VFIO iommu types which have KVM acceleration to
2581 * avoid bouncing all map/unmaps through qemu this way, this
2582 * would be the right place to wire that up (tell the KVM
2583 * device emulation the VFIO iommu handles to use).
2586 * This assumes that the guest IOMMU is empty of
2587 * mappings at this point.
2589 * One way of doing this is:
2590 * 1. Avoid sharing IOMMUs between emulated devices or different
2592 * 2. Implement VFIO_IOMMU_ENABLE in the host kernel to fail if
2593 * there are some mappings in IOMMU.
2595 * VFIO on SPAPR does that. Other IOMMU models may do that different,
2596 * they must make sure there are no existing mappings or
2597 * loop through existing mappings to map them into VFIO.
2599 giommu
= g_malloc0(sizeof(*giommu
));
2600 giommu
->iommu
= section
->mr
;
2601 giommu
->container
= container
;
2602 giommu
->n
.notify
= vfio_iommu_map_notify
;
2603 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
2604 memory_region_register_iommu_notifier(giommu
->iommu
, &giommu
->n
);
2609 /* Here we assume that memory_region_is_ram(section->mr)==true */
2611 end
= int128_get64(llend
);
2612 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
2613 section
->offset_within_region
+
2614 (iova
- section
->offset_within_address_space
);
2616 DPRINTF("region_add [ram] %"HWADDR_PRIx
" - %"HWADDR_PRIx
" [%p]\n",
2617 iova
, end
- 1, vaddr
);
2619 ret
= vfio_dma_map(container
, iova
, end
- iova
, vaddr
, section
->readonly
);
2621 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
2622 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
2623 container
, iova
, end
- iova
, vaddr
, ret
);
2626 * On the initfn path, store the first error in the container so we
2627 * can gracefully fail. Runtime, there's not much we can do other
2628 * than throw a hardware error.
2630 if (!container
->iommu_data
.type1
.initialized
) {
2631 if (!container
->iommu_data
.type1
.error
) {
2632 container
->iommu_data
.type1
.error
= ret
;
2635 hw_error("vfio: DMA mapping failed, unable to continue");
2640 static void vfio_listener_region_del(MemoryListener
*listener
,
2641 MemoryRegionSection
*section
)
2643 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
2644 iommu_data
.type1
.listener
);
2648 if (vfio_listener_skipped_section(section
)) {
2649 DPRINTF("SKIPPING region_del %"HWADDR_PRIx
" - %"PRIx64
"\n",
2650 section
->offset_within_address_space
,
2651 section
->offset_within_address_space
+
2652 int128_get64(int128_sub(section
->size
, int128_one())));
2656 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
2657 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
2658 error_report("%s received unaligned region", __func__
);
2662 if (memory_region_is_iommu(section
->mr
)) {
2663 VFIOGuestIOMMU
*giommu
;
2665 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
2666 if (giommu
->iommu
== section
->mr
) {
2667 memory_region_unregister_iommu_notifier(&giommu
->n
);
2668 QLIST_REMOVE(giommu
, giommu_next
);
2675 * FIXME: We assume the one big unmap below is adequate to
2676 * remove any individual page mappings in the IOMMU which
2677 * might have been copied into VFIO. This works for a page table
2678 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
2679 * That may not be true for all IOMMU types.
2683 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
2684 end
= (section
->offset_within_address_space
+ int128_get64(section
->size
)) &
2691 DPRINTF("region_del %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
2694 ret
= vfio_dma_unmap(container
, iova
, end
- iova
);
2695 memory_region_unref(section
->mr
);
2697 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
2698 "0x%"HWADDR_PRIx
") = %d (%m)",
2699 container
, iova
, end
- iova
, ret
);
2703 static MemoryListener vfio_memory_listener
= {
2704 .region_add
= vfio_listener_region_add
,
2705 .region_del
= vfio_listener_region_del
,
2708 static void vfio_listener_release(VFIOContainer
*container
)
2710 memory_listener_unregister(&container
->iommu_data
.type1
.listener
);
2716 static void vfio_disable_interrupts(VFIODevice
*vdev
)
2718 switch (vdev
->interrupt
) {
2720 vfio_disable_intx(vdev
);
2723 vfio_disable_msi(vdev
);
2726 vfio_disable_msix(vdev
);
2731 static int vfio_setup_msi(VFIODevice
*vdev
, int pos
)
2734 bool msi_64bit
, msi_maskbit
;
2737 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2738 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2741 ctrl
= le16_to_cpu(ctrl
);
2743 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
2744 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
2745 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
2747 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev
->host
.domain
,
2748 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, pos
);
2750 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
);
2752 if (ret
== -ENOTSUP
) {
2755 error_report("vfio: msi_init failed");
2758 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
2764 * We don't have any control over how pci_add_capability() inserts
2765 * capabilities into the chain. In order to setup MSI-X we need a
2766 * MemoryRegion for the BAR. In order to setup the BAR and not
2767 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2768 * need to first look for where the MSI-X table lives. So we
2769 * unfortunately split MSI-X setup across two functions.
2771 static int vfio_early_setup_msix(VFIODevice
*vdev
)
2775 uint32_t table
, pba
;
2777 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
2782 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
2783 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2787 if (pread(vdev
->fd
, &table
, sizeof(table
),
2788 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
2792 if (pread(vdev
->fd
, &pba
, sizeof(pba
),
2793 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
2797 ctrl
= le16_to_cpu(ctrl
);
2798 table
= le32_to_cpu(table
);
2799 pba
= le32_to_cpu(pba
);
2801 vdev
->msix
= g_malloc0(sizeof(*(vdev
->msix
)));
2802 vdev
->msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
2803 vdev
->msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
2804 vdev
->msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
2805 vdev
->msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
2806 vdev
->msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
2808 DPRINTF("%04x:%02x:%02x.%x "
2809 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
2810 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2811 vdev
->host
.function
, pos
, vdev
->msix
->table_bar
,
2812 vdev
->msix
->table_offset
, vdev
->msix
->entries
);
2817 static int vfio_setup_msix(VFIODevice
*vdev
, int pos
)
2821 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
2822 &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2823 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
2824 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
,
2825 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
);
2827 if (ret
== -ENOTSUP
) {
2830 error_report("vfio: msix_init failed");
2837 static void vfio_teardown_msi(VFIODevice
*vdev
)
2839 msi_uninit(&vdev
->pdev
);
2842 msix_uninit(&vdev
->pdev
, &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
2843 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
);
2850 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
)
2854 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2855 VFIOBAR
*bar
= &vdev
->bars
[i
];
2861 memory_region_set_enabled(&bar
->mmap_mem
, enabled
);
2862 if (vdev
->msix
&& vdev
->msix
->table_bar
== i
) {
2863 memory_region_set_enabled(&vdev
->msix
->mmap_mem
, enabled
);
2868 static void vfio_unmap_bar(VFIODevice
*vdev
, int nr
)
2870 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2876 vfio_bar_quirk_teardown(vdev
, nr
);
2878 memory_region_del_subregion(&bar
->mem
, &bar
->mmap_mem
);
2879 munmap(bar
->mmap
, memory_region_size(&bar
->mmap_mem
));
2881 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2882 memory_region_del_subregion(&bar
->mem
, &vdev
->msix
->mmap_mem
);
2883 munmap(vdev
->msix
->mmap
, memory_region_size(&vdev
->msix
->mmap_mem
));
2887 static int vfio_mmap_bar(VFIODevice
*vdev
, VFIOBAR
*bar
,
2888 MemoryRegion
*mem
, MemoryRegion
*submem
,
2889 void **map
, size_t size
, off_t offset
,
2894 if (VFIO_ALLOW_MMAP
&& size
&& bar
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
2897 if (bar
->flags
& VFIO_REGION_INFO_FLAG_READ
) {
2901 if (bar
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) {
2905 *map
= mmap(NULL
, size
, prot
, MAP_SHARED
,
2906 bar
->fd
, bar
->fd_offset
+ offset
);
2907 if (*map
== MAP_FAILED
) {
2913 memory_region_init_ram_ptr(submem
, OBJECT(vdev
), name
, size
, *map
);
2916 /* Create a zero sized sub-region to make cleanup easy. */
2917 memory_region_init(submem
, OBJECT(vdev
), name
, 0);
2920 memory_region_add_subregion(mem
, offset
, submem
);
2925 static void vfio_map_bar(VFIODevice
*vdev
, int nr
)
2927 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2928 unsigned size
= bar
->size
;
2934 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2939 snprintf(name
, sizeof(name
), "VFIO %04x:%02x:%02x.%x BAR %d",
2940 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2941 vdev
->host
.function
, nr
);
2943 /* Determine what type of BAR this is for registration */
2944 ret
= pread(vdev
->fd
, &pci_bar
, sizeof(pci_bar
),
2945 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
2946 if (ret
!= sizeof(pci_bar
)) {
2947 error_report("vfio: Failed to read BAR %d (%m)", nr
);
2951 pci_bar
= le32_to_cpu(pci_bar
);
2952 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
2953 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
2954 type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
2955 ~PCI_BASE_ADDRESS_MEM_MASK
);
2957 /* A "slow" read/write mapping underlies all BARs */
2958 memory_region_init_io(&bar
->mem
, OBJECT(vdev
), &vfio_bar_ops
,
2960 pci_register_bar(&vdev
->pdev
, nr
, type
, &bar
->mem
);
2963 * We can't mmap areas overlapping the MSIX vector table, so we
2964 * potentially insert a direct-mapped subregion before and after it.
2966 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2967 size
= vdev
->msix
->table_offset
& qemu_host_page_mask
;
2970 strncat(name
, " mmap", sizeof(name
) - strlen(name
) - 1);
2971 if (vfio_mmap_bar(vdev
, bar
, &bar
->mem
,
2972 &bar
->mmap_mem
, &bar
->mmap
, size
, 0, name
)) {
2973 error_report("%s unsupported. Performance may be slow", name
);
2976 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2979 start
= HOST_PAGE_ALIGN(vdev
->msix
->table_offset
+
2980 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
2982 size
= start
< bar
->size
? bar
->size
- start
: 0;
2983 strncat(name
, " msix-hi", sizeof(name
) - strlen(name
) - 1);
2984 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2985 if (vfio_mmap_bar(vdev
, bar
, &bar
->mem
, &vdev
->msix
->mmap_mem
,
2986 &vdev
->msix
->mmap
, size
, start
, name
)) {
2987 error_report("%s unsupported. Performance may be slow", name
);
2991 vfio_bar_quirk_setup(vdev
, nr
);
2994 static void vfio_map_bars(VFIODevice
*vdev
)
2998 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2999 vfio_map_bar(vdev
, i
);
3002 if (vdev
->has_vga
) {
3003 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
3004 OBJECT(vdev
), &vfio_vga_ops
,
3005 &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
],
3006 "vfio-vga-mmio@0xa0000",
3007 QEMU_PCI_VGA_MEM_SIZE
);
3008 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
3009 OBJECT(vdev
), &vfio_vga_ops
,
3010 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
],
3011 "vfio-vga-io@0x3b0",
3012 QEMU_PCI_VGA_IO_LO_SIZE
);
3013 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
3014 OBJECT(vdev
), &vfio_vga_ops
,
3015 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
3016 "vfio-vga-io@0x3c0",
3017 QEMU_PCI_VGA_IO_HI_SIZE
);
3019 pci_register_vga(&vdev
->pdev
, &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
3020 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
3021 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
3022 vfio_vga_quirk_setup(vdev
);
3026 static void vfio_unmap_bars(VFIODevice
*vdev
)
3030 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
3031 vfio_unmap_bar(vdev
, i
);
3034 if (vdev
->has_vga
) {
3035 vfio_vga_quirk_teardown(vdev
);
3036 pci_unregister_vga(&vdev
->pdev
);
3043 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
3045 uint8_t tmp
, next
= 0xff;
3047 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
3048 tmp
= pdev
->config
[tmp
+ 1]) {
3049 if (tmp
> pos
&& tmp
< next
) {
3057 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
3059 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
3062 static void vfio_add_emulated_word(VFIODevice
*vdev
, int pos
,
3063 uint16_t val
, uint16_t mask
)
3065 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
3066 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
3067 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
3070 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
3072 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
3075 static void vfio_add_emulated_long(VFIODevice
*vdev
, int pos
,
3076 uint32_t val
, uint32_t mask
)
3078 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
3079 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
3080 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
3083 static int vfio_setup_pcie_cap(VFIODevice
*vdev
, int pos
, uint8_t size
)
3088 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
3089 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
3091 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
3092 type
!= PCI_EXP_TYPE_LEG_END
&&
3093 type
!= PCI_EXP_TYPE_RC_END
) {
3095 error_report("vfio: Assignment of PCIe type 0x%x "
3096 "devices is not currently supported", type
);
3100 if (!pci_bus_is_express(vdev
->pdev
.bus
)) {
3102 * Use express capability as-is on PCI bus. It doesn't make much
3103 * sense to even expose, but some drivers (ex. tg3) depend on it
3104 * and guests don't seem to be particular about it. We'll need
3105 * to revist this or force express devices to express buses if we
3106 * ever expose an IOMMU to the guest.
3108 } else if (pci_bus_is_root(vdev
->pdev
.bus
)) {
3110 * On a Root Complex bus Endpoints become Root Complex Integrated
3111 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
3113 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
3114 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
3115 PCI_EXP_TYPE_RC_END
<< 4,
3116 PCI_EXP_FLAGS_TYPE
);
3118 /* Link Capabilities, Status, and Control goes away */
3119 if (size
> PCI_EXP_LNKCTL
) {
3120 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
3121 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
3122 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
3124 #ifndef PCI_EXP_LNKCAP2
3125 #define PCI_EXP_LNKCAP2 44
3127 #ifndef PCI_EXP_LNKSTA2
3128 #define PCI_EXP_LNKSTA2 50
3130 /* Link 2 Capabilities, Status, and Control goes away */
3131 if (size
> PCI_EXP_LNKCAP2
) {
3132 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
3133 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
3134 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
3138 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
3140 * Legacy endpoints don't belong on the root complex. Windows
3141 * seems to be happier with devices if we skip the capability.
3148 * Convert Root Complex Integrated Endpoints to regular endpoints.
3149 * These devices don't support LNK/LNK2 capabilities, so make them up.
3151 if (type
== PCI_EXP_TYPE_RC_END
) {
3152 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
3153 PCI_EXP_TYPE_ENDPOINT
<< 4,
3154 PCI_EXP_FLAGS_TYPE
);
3155 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
3156 PCI_EXP_LNK_MLW_1
| PCI_EXP_LNK_LS_25
, ~0);
3157 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
3160 /* Mark the Link Status bits as emulated to allow virtual negotiation */
3161 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
,
3162 pci_get_word(vdev
->pdev
.config
+ pos
+
3164 PCI_EXP_LNKCAP_MLW
| PCI_EXP_LNKCAP_SLS
);
3167 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
);
3169 vdev
->pdev
.exp
.exp_cap
= pos
;
3175 static void vfio_check_pcie_flr(VFIODevice
*vdev
, uint8_t pos
)
3177 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
3179 if (cap
& PCI_EXP_DEVCAP_FLR
) {
3180 DPRINTF("%04x:%02x:%02x.%x Supports FLR via PCIe cap\n",
3181 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3182 vdev
->host
.function
);
3183 vdev
->has_flr
= true;
3187 static void vfio_check_pm_reset(VFIODevice
*vdev
, uint8_t pos
)
3189 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
3191 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
3192 DPRINTF("%04x:%02x:%02x.%x Supports PM reset\n",
3193 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3194 vdev
->host
.function
);
3195 vdev
->has_pm_reset
= true;
3199 static void vfio_check_af_flr(VFIODevice
*vdev
, uint8_t pos
)
3201 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
3203 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
3204 DPRINTF("%04x:%02x:%02x.%x Supports FLR via AF cap\n",
3205 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3206 vdev
->host
.function
);
3207 vdev
->has_flr
= true;
3211 static int vfio_add_std_cap(VFIODevice
*vdev
, uint8_t pos
)
3213 PCIDevice
*pdev
= &vdev
->pdev
;
3214 uint8_t cap_id
, next
, size
;
3217 cap_id
= pdev
->config
[pos
];
3218 next
= pdev
->config
[pos
+ 1];
3221 * If it becomes important to configure capabilities to their actual
3222 * size, use this as the default when it's something we don't recognize.
3223 * Since QEMU doesn't actually handle many of the config accesses,
3224 * exact size doesn't seem worthwhile.
3226 size
= vfio_std_cap_max_size(pdev
, pos
);
3229 * pci_add_capability always inserts the new capability at the head
3230 * of the chain. Therefore to end up with a chain that matches the
3231 * physical device, we insert from the end by making this recursive.
3232 * This is also why we pre-caclulate size above as cached config space
3233 * will be changed as we unwind the stack.
3236 ret
= vfio_add_std_cap(vdev
, next
);
3241 /* Begin the rebuild, use QEMU emulated list bits */
3242 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
3243 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
3244 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
3247 /* Use emulated next pointer to allow dropping caps */
3248 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ 1, 0xff);
3251 case PCI_CAP_ID_MSI
:
3252 ret
= vfio_setup_msi(vdev
, pos
);
3254 case PCI_CAP_ID_EXP
:
3255 vfio_check_pcie_flr(vdev
, pos
);
3256 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
);
3258 case PCI_CAP_ID_MSIX
:
3259 ret
= vfio_setup_msix(vdev
, pos
);
3262 vfio_check_pm_reset(vdev
, pos
);
3264 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
3267 vfio_check_af_flr(vdev
, pos
);
3268 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
3271 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
3276 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
3277 "0x%x[0x%x]@0x%x: %d", vdev
->host
.domain
,
3278 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
3279 cap_id
, size
, pos
, ret
);
3286 static int vfio_add_capabilities(VFIODevice
*vdev
)
3288 PCIDevice
*pdev
= &vdev
->pdev
;
3290 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
3291 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
3292 return 0; /* Nothing to add */
3295 return vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
]);
3298 static void vfio_pci_pre_reset(VFIODevice
*vdev
)
3300 PCIDevice
*pdev
= &vdev
->pdev
;
3303 vfio_disable_interrupts(vdev
);
3305 /* Make sure the device is in D0 */
3310 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
3311 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
3313 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3314 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
3315 /* vfio handles the necessary delay here */
3316 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
3317 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
3319 error_report("vfio: Unable to power on device, stuck in D%d",
3326 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
3327 * Also put INTx Disable in known state.
3329 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
3330 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
3331 PCI_COMMAND_INTX_DISABLE
);
3332 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
3335 static void vfio_pci_post_reset(VFIODevice
*vdev
)
3337 vfio_enable_intx(vdev
);
3340 static bool vfio_pci_host_match(PCIHostDeviceAddress
*host1
,
3341 PCIHostDeviceAddress
*host2
)
3343 return (host1
->domain
== host2
->domain
&& host1
->bus
== host2
->bus
&&
3344 host1
->slot
== host2
->slot
&& host1
->function
== host2
->function
);
3347 static int vfio_pci_hot_reset(VFIODevice
*vdev
, bool single
)
3350 struct vfio_pci_hot_reset_info
*info
;
3351 struct vfio_pci_dependent_device
*devices
;
3352 struct vfio_pci_hot_reset
*reset
;
3357 DPRINTF("%s(%04x:%02x:%02x.%x) %s\n", __func__
, vdev
->host
.domain
,
3358 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
3359 single
? "one" : "multi");
3361 vfio_pci_pre_reset(vdev
);
3362 vdev
->needs_reset
= false;
3364 info
= g_malloc0(sizeof(*info
));
3365 info
->argsz
= sizeof(*info
);
3367 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
3368 if (ret
&& errno
!= ENOSPC
) {
3370 if (!vdev
->has_pm_reset
) {
3371 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
3372 "no available reset mechanism.", vdev
->host
.domain
,
3373 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
3378 count
= info
->count
;
3379 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
3380 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
3381 devices
= &info
->devices
[0];
3383 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
3386 error_report("vfio: hot reset info failed: %m");
3390 DPRINTF("%04x:%02x:%02x.%x: hot reset dependent devices:\n",
3391 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3392 vdev
->host
.function
);
3394 /* Verify that we have all the groups required */
3395 for (i
= 0; i
< info
->count
; i
++) {
3396 PCIHostDeviceAddress host
;
3399 host
.domain
= devices
[i
].segment
;
3400 host
.bus
= devices
[i
].bus
;
3401 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
3402 host
.function
= PCI_FUNC(devices
[i
].devfn
);
3404 DPRINTF("\t%04x:%02x:%02x.%x group %d\n", host
.domain
,
3405 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
3407 if (vfio_pci_host_match(&host
, &vdev
->host
)) {
3411 QLIST_FOREACH(group
, &group_list
, next
) {
3412 if (group
->groupid
== devices
[i
].group_id
) {
3418 if (!vdev
->has_pm_reset
) {
3419 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
3420 "depends on group %d which is not owned.",
3421 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3422 vdev
->host
.function
, devices
[i
].group_id
);
3428 /* Prep dependent devices for reset and clear our marker. */
3429 QLIST_FOREACH(tmp
, &group
->device_list
, next
) {
3430 if (vfio_pci_host_match(&host
, &tmp
->host
)) {
3432 DPRINTF("vfio: found another in-use device "
3433 "%04x:%02x:%02x.%x\n", host
.domain
, host
.bus
,
3434 host
.slot
, host
.function
);
3438 vfio_pci_pre_reset(tmp
);
3439 tmp
->needs_reset
= false;
3446 if (!single
&& !multi
) {
3447 DPRINTF("vfio: No other in-use devices for multi hot reset\n");
3452 /* Determine how many group fds need to be passed */
3454 QLIST_FOREACH(group
, &group_list
, next
) {
3455 for (i
= 0; i
< info
->count
; i
++) {
3456 if (group
->groupid
== devices
[i
].group_id
) {
3463 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
3464 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
3465 fds
= &reset
->group_fds
[0];
3467 /* Fill in group fds */
3468 QLIST_FOREACH(group
, &group_list
, next
) {
3469 for (i
= 0; i
< info
->count
; i
++) {
3470 if (group
->groupid
== devices
[i
].group_id
) {
3471 fds
[reset
->count
++] = group
->fd
;
3478 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
3481 DPRINTF("%04x:%02x:%02x.%x hot reset: %s\n", vdev
->host
.domain
,
3482 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
3483 ret
? "%m" : "Success");
3486 /* Re-enable INTx on affected devices */
3487 for (i
= 0; i
< info
->count
; i
++) {
3488 PCIHostDeviceAddress host
;
3491 host
.domain
= devices
[i
].segment
;
3492 host
.bus
= devices
[i
].bus
;
3493 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
3494 host
.function
= PCI_FUNC(devices
[i
].devfn
);
3496 if (vfio_pci_host_match(&host
, &vdev
->host
)) {
3500 QLIST_FOREACH(group
, &group_list
, next
) {
3501 if (group
->groupid
== devices
[i
].group_id
) {
3510 QLIST_FOREACH(tmp
, &group
->device_list
, next
) {
3511 if (vfio_pci_host_match(&host
, &tmp
->host
)) {
3512 vfio_pci_post_reset(tmp
);
3518 vfio_pci_post_reset(vdev
);
3525 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
3526 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
3527 * of doing hot resets when there is only a single device per bus. The in-use
3528 * here refers to how many VFIODevices are affected. A hot reset that affects
3529 * multiple devices, but only a single in-use device, means that we can call
3530 * it from our bus ->reset() callback since the extent is effectively a single
3531 * device. This allows us to make use of it in the hotplug path. When there
3532 * are multiple in-use devices, we can only trigger the hot reset during a
3533 * system reset and thus from our reset handler. We separate _one vs _multi
3534 * here so that we don't overlap and do a double reset on the system reset
3535 * path where both our reset handler and ->reset() callback are used. Calling
3536 * _one() will only do a hot reset for the one in-use devices case, calling
3537 * _multi() will do nothing if a _one() would have been sufficient.
3539 static int vfio_pci_hot_reset_one(VFIODevice
*vdev
)
3541 return vfio_pci_hot_reset(vdev
, true);
3544 static int vfio_pci_hot_reset_multi(VFIODevice
*vdev
)
3546 return vfio_pci_hot_reset(vdev
, false);
3549 static void vfio_pci_reset_handler(void *opaque
)
3554 QLIST_FOREACH(group
, &group_list
, next
) {
3555 QLIST_FOREACH(vdev
, &group
->device_list
, next
) {
3556 if (!vdev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
3557 vdev
->needs_reset
= true;
3562 QLIST_FOREACH(group
, &group_list
, next
) {
3563 QLIST_FOREACH(vdev
, &group
->device_list
, next
) {
3564 if (vdev
->needs_reset
) {
3565 vfio_pci_hot_reset_multi(vdev
);
3571 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
3574 struct kvm_device_attr attr
= {
3575 .group
= KVM_DEV_VFIO_GROUP
,
3576 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
3577 .addr
= (uint64_t)(unsigned long)&group
->fd
,
3580 if (!kvm_enabled()) {
3584 if (vfio_kvm_device_fd
< 0) {
3585 struct kvm_create_device cd
= {
3586 .type
= KVM_DEV_TYPE_VFIO
,
3589 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
3590 DPRINTF("KVM_CREATE_DEVICE: %m\n");
3594 vfio_kvm_device_fd
= cd
.fd
;
3597 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
3598 error_report("Failed to add group %d to KVM VFIO device: %m",
3604 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
3607 struct kvm_device_attr attr
= {
3608 .group
= KVM_DEV_VFIO_GROUP
,
3609 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
3610 .addr
= (uint64_t)(unsigned long)&group
->fd
,
3613 if (vfio_kvm_device_fd
< 0) {
3617 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
3618 error_report("Failed to remove group %d from KVM VFIO device: %m",
3624 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
3626 VFIOAddressSpace
*space
;
3628 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
3629 if (space
->as
== as
) {
3634 /* No suitable VFIOAddressSpace, create a new one */
3635 space
= g_malloc0(sizeof(*space
));
3637 QLIST_INIT(&space
->containers
);
3639 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
3644 static void vfio_put_address_space(VFIOAddressSpace
*space
)
3646 if (QLIST_EMPTY(&space
->containers
)) {
3647 QLIST_REMOVE(space
, list
);
3652 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
)
3654 VFIOContainer
*container
;
3656 VFIOAddressSpace
*space
;
3658 space
= vfio_get_address_space(as
);
3660 QLIST_FOREACH(container
, &space
->containers
, next
) {
3661 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
3662 group
->container
= container
;
3663 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
3668 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
3670 error_report("vfio: failed to open /dev/vfio/vfio: %m");
3672 goto put_space_exit
;
3675 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
3676 if (ret
!= VFIO_API_VERSION
) {
3677 error_report("vfio: supported vfio version: %d, "
3678 "reported version: %d", VFIO_API_VERSION
, ret
);
3683 container
= g_malloc0(sizeof(*container
));
3684 container
->space
= space
;
3687 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
)) {
3688 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
3690 error_report("vfio: failed to set group container: %m");
3692 goto free_container_exit
;
3695 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_TYPE1_IOMMU
);
3697 error_report("vfio: failed to set iommu for container: %m");
3699 goto free_container_exit
;
3702 container
->iommu_data
.type1
.listener
= vfio_memory_listener
;
3703 container
->iommu_data
.release
= vfio_listener_release
;
3705 memory_listener_register(&container
->iommu_data
.type1
.listener
,
3706 &address_space_memory
);
3708 if (container
->iommu_data
.type1
.error
) {
3709 ret
= container
->iommu_data
.type1
.error
;
3710 error_report("vfio: memory listener initialization failed for container");
3711 goto listener_release_exit
;
3714 container
->iommu_data
.type1
.initialized
= true;
3716 } else if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_IOMMU
)) {
3717 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
3719 error_report("vfio: failed to set group container: %m");
3721 goto free_container_exit
;
3724 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_SPAPR_TCE_IOMMU
);
3726 error_report("vfio: failed to set iommu for container: %m");
3728 goto free_container_exit
;
3732 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
3733 * when container fd is closed so we do not call it explicitly
3736 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
3738 error_report("vfio: failed to enable container: %m");
3740 goto free_container_exit
;
3743 container
->iommu_data
.type1
.listener
= vfio_memory_listener
;
3744 container
->iommu_data
.release
= vfio_listener_release
;
3746 memory_listener_register(&container
->iommu_data
.type1
.listener
,
3747 container
->space
->as
);
3750 error_report("vfio: No available IOMMU models");
3752 goto free_container_exit
;
3755 QLIST_INIT(&container
->group_list
);
3756 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
3758 group
->container
= container
;
3759 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
3763 listener_release_exit
:
3764 vfio_listener_release(container
);
3766 free_container_exit
:
3773 vfio_put_address_space(space
);
3778 static void vfio_disconnect_container(VFIOGroup
*group
)
3780 VFIOContainer
*container
= group
->container
;
3782 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
3783 error_report("vfio: error disconnecting group %d from container",
3787 QLIST_REMOVE(group
, container_next
);
3788 group
->container
= NULL
;
3790 if (QLIST_EMPTY(&container
->group_list
)) {
3791 VFIOAddressSpace
*space
= container
->space
;
3793 if (container
->iommu_data
.release
) {
3794 container
->iommu_data
.release(container
);
3796 QLIST_REMOVE(container
, next
);
3797 DPRINTF("vfio_disconnect_container: close container->fd\n");
3798 close(container
->fd
);
3801 vfio_put_address_space(space
);
3805 static VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
)
3809 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
3811 QLIST_FOREACH(group
, &group_list
, next
) {
3812 if (group
->groupid
== groupid
) {
3813 /* Found it. Now is it already in the right context? */
3814 if (group
->container
->space
->as
== as
) {
3817 error_report("vfio: group %d used in multiple address spaces",
3824 group
= g_malloc0(sizeof(*group
));
3826 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
3827 group
->fd
= qemu_open(path
, O_RDWR
);
3828 if (group
->fd
< 0) {
3829 error_report("vfio: error opening %s: %m", path
);
3830 goto free_group_exit
;
3833 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
3834 error_report("vfio: error getting group status: %m");
3838 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
3839 error_report("vfio: error, group %d is not viable, please ensure "
3840 "all devices within the iommu_group are bound to their "
3841 "vfio bus driver.", groupid
);
3845 group
->groupid
= groupid
;
3846 QLIST_INIT(&group
->device_list
);
3848 if (vfio_connect_container(group
, as
)) {
3849 error_report("vfio: failed to setup container for group %d", groupid
);
3853 if (QLIST_EMPTY(&group_list
)) {
3854 qemu_register_reset(vfio_pci_reset_handler
, NULL
);
3857 QLIST_INSERT_HEAD(&group_list
, group
, next
);
3859 vfio_kvm_device_add_group(group
);
3872 static void vfio_put_group(VFIOGroup
*group
)
3874 if (!QLIST_EMPTY(&group
->device_list
)) {
3878 vfio_kvm_device_del_group(group
);
3879 vfio_disconnect_container(group
);
3880 QLIST_REMOVE(group
, next
);
3881 DPRINTF("vfio_put_group: close group->fd\n");
3885 if (QLIST_EMPTY(&group_list
)) {
3886 qemu_unregister_reset(vfio_pci_reset_handler
, NULL
);
3890 static int vfio_get_device(VFIOGroup
*group
, const char *name
, VFIODevice
*vdev
)
3892 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
3893 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
) };
3894 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
3897 ret
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
3899 error_report("vfio: error getting device %s from group %d: %m",
3900 name
, group
->groupid
);
3901 error_printf("Verify all devices in group %d are bound to vfio-pci "
3902 "or pci-stub and not already in use\n", group
->groupid
);
3907 vdev
->group
= group
;
3908 QLIST_INSERT_HEAD(&group
->device_list
, vdev
, next
);
3910 /* Sanity check device */
3911 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
3913 error_report("vfio: error getting device info: %m");
3917 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name
,
3918 dev_info
.flags
, dev_info
.num_regions
, dev_info
.num_irqs
);
3920 if (!(dev_info
.flags
& VFIO_DEVICE_FLAGS_PCI
)) {
3921 error_report("vfio: Um, this isn't a PCI device");
3925 vdev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
3927 if (dev_info
.num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
3928 error_report("vfio: unexpected number of io regions %u",
3929 dev_info
.num_regions
);
3933 if (dev_info
.num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
3934 error_report("vfio: unexpected number of irqs %u", dev_info
.num_irqs
);
3938 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
3941 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
3943 error_report("vfio: Error getting region %d info: %m", i
);
3947 DPRINTF("Device %s region %d:\n", name
, i
);
3948 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
3949 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
3950 (unsigned long)reg_info
.flags
);
3952 vdev
->bars
[i
].flags
= reg_info
.flags
;
3953 vdev
->bars
[i
].size
= reg_info
.size
;
3954 vdev
->bars
[i
].fd_offset
= reg_info
.offset
;
3955 vdev
->bars
[i
].fd
= vdev
->fd
;
3956 vdev
->bars
[i
].nr
= i
;
3957 QLIST_INIT(&vdev
->bars
[i
].quirks
);
3960 reg_info
.index
= VFIO_PCI_CONFIG_REGION_INDEX
;
3962 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
3964 error_report("vfio: Error getting config info: %m");
3968 DPRINTF("Device %s config:\n", name
);
3969 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
3970 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
3971 (unsigned long)reg_info
.flags
);
3973 vdev
->config_size
= reg_info
.size
;
3974 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
3975 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
3977 vdev
->config_offset
= reg_info
.offset
;
3979 if ((vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) &&
3980 dev_info
.num_regions
> VFIO_PCI_VGA_REGION_INDEX
) {
3981 struct vfio_region_info vga_info
= {
3982 .argsz
= sizeof(vga_info
),
3983 .index
= VFIO_PCI_VGA_REGION_INDEX
,
3986 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, &vga_info
);
3989 "vfio: Device does not support requested feature x-vga");
3993 if (!(vga_info
.flags
& VFIO_REGION_INFO_FLAG_READ
) ||
3994 !(vga_info
.flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
3995 vga_info
.size
< 0xbffff + 1) {
3996 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
3997 (unsigned long)vga_info
.flags
,
3998 (unsigned long)vga_info
.size
);
4002 vdev
->vga
.fd_offset
= vga_info
.offset
;
4003 vdev
->vga
.fd
= vdev
->fd
;
4005 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
4006 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
4007 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].quirks
);
4009 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
4010 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
4011 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].quirks
);
4013 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
4014 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
4015 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
);
4017 vdev
->has_vga
= true;
4019 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
4021 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
4023 /* This can fail for an old kernel or legacy PCI dev */
4024 DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure: %m\n");
4026 } else if (irq_info
.count
== 1) {
4027 vdev
->pci_aer
= true;
4029 error_report("vfio: %04x:%02x:%02x.%x "
4030 "Could not enable error recovery for the device",
4031 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
4032 vdev
->host
.function
);
4037 QLIST_REMOVE(vdev
, next
);
4044 static void vfio_put_device(VFIODevice
*vdev
)
4046 QLIST_REMOVE(vdev
, next
);
4048 DPRINTF("vfio_put_device: close vdev->fd\n");
4056 static void vfio_err_notifier_handler(void *opaque
)
4058 VFIODevice
*vdev
= opaque
;
4060 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
4065 * TBD. Retrieve the error details and decide what action
4066 * needs to be taken. One of the actions could be to pass
4067 * the error to the guest and have the guest driver recover
4068 * from the error. This requires that PCIe capabilities be
4069 * exposed to the guest. For now, we just terminate the
4070 * guest to contain the error.
4073 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
4074 "Please collect any data possible and then kill the guest",
4075 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
4076 vdev
->host
.slot
, vdev
->host
.function
);
4078 vm_stop(RUN_STATE_INTERNAL_ERROR
);
4082 * Registers error notifier for devices supporting error recovery.
4083 * If we encounter a failure in this function, we report an error
4084 * and continue after disabling error recovery support for the
4087 static void vfio_register_err_notifier(VFIODevice
*vdev
)
4091 struct vfio_irq_set
*irq_set
;
4094 if (!vdev
->pci_aer
) {
4098 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
4099 error_report("vfio: Unable to init event notifier for error detection");
4100 vdev
->pci_aer
= false;
4104 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
4106 irq_set
= g_malloc0(argsz
);
4107 irq_set
->argsz
= argsz
;
4108 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
4109 VFIO_IRQ_SET_ACTION_TRIGGER
;
4110 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
4113 pfd
= (int32_t *)&irq_set
->data
;
4115 *pfd
= event_notifier_get_fd(&vdev
->err_notifier
);
4116 qemu_set_fd_handler(*pfd
, vfio_err_notifier_handler
, NULL
, vdev
);
4118 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
4120 error_report("vfio: Failed to set up error notification");
4121 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
4122 event_notifier_cleanup(&vdev
->err_notifier
);
4123 vdev
->pci_aer
= false;
4128 static void vfio_unregister_err_notifier(VFIODevice
*vdev
)
4131 struct vfio_irq_set
*irq_set
;
4135 if (!vdev
->pci_aer
) {
4139 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
4141 irq_set
= g_malloc0(argsz
);
4142 irq_set
->argsz
= argsz
;
4143 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
4144 VFIO_IRQ_SET_ACTION_TRIGGER
;
4145 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
4148 pfd
= (int32_t *)&irq_set
->data
;
4151 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
4153 error_report("vfio: Failed to de-assign error fd: %m");
4156 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
4158 event_notifier_cleanup(&vdev
->err_notifier
);
4161 static int vfio_initfn(PCIDevice
*pdev
)
4163 VFIODevice
*pvdev
, *vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
4165 char path
[PATH_MAX
], iommu_group_path
[PATH_MAX
], *group_name
;
4171 /* Check that the host device exists */
4172 snprintf(path
, sizeof(path
),
4173 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
4174 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
4175 vdev
->host
.function
);
4176 if (stat(path
, &st
) < 0) {
4177 error_report("vfio: error: no such host device: %s", path
);
4181 strncat(path
, "iommu_group", sizeof(path
) - strlen(path
) - 1);
4183 len
= readlink(path
, iommu_group_path
, sizeof(path
));
4184 if (len
<= 0 || len
>= sizeof(path
)) {
4185 error_report("vfio: error no iommu_group for device");
4186 return len
< 0 ? -errno
: ENAMETOOLONG
;
4189 iommu_group_path
[len
] = 0;
4190 group_name
= basename(iommu_group_path
);
4192 if (sscanf(group_name
, "%d", &groupid
) != 1) {
4193 error_report("vfio: error reading %s: %m", path
);
4197 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__
, vdev
->host
.domain
,
4198 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, groupid
);
4200 group
= vfio_get_group(groupid
, pci_device_iommu_address_space(pdev
));
4202 error_report("vfio: failed to get group %d", groupid
);
4206 snprintf(path
, sizeof(path
), "%04x:%02x:%02x.%01x",
4207 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
4208 vdev
->host
.function
);
4210 QLIST_FOREACH(pvdev
, &group
->device_list
, next
) {
4211 if (pvdev
->host
.domain
== vdev
->host
.domain
&&
4212 pvdev
->host
.bus
== vdev
->host
.bus
&&
4213 pvdev
->host
.slot
== vdev
->host
.slot
&&
4214 pvdev
->host
.function
== vdev
->host
.function
) {
4216 error_report("vfio: error: device %s is already attached", path
);
4217 vfio_put_group(group
);
4222 ret
= vfio_get_device(group
, path
, vdev
);
4224 error_report("vfio: failed to get device %s", path
);
4225 vfio_put_group(group
);
4229 /* Get a copy of config space */
4230 ret
= pread(vdev
->fd
, vdev
->pdev
.config
,
4231 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
4232 vdev
->config_offset
);
4233 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
4234 ret
= ret
< 0 ? -errno
: -EFAULT
;
4235 error_report("vfio: Failed to read device config space");
4239 /* vfio emulates a lot for us, but some bits need extra love */
4240 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
4242 /* QEMU can choose to expose the ROM or not */
4243 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
4245 /* QEMU can change multi-function devices to single function, or reverse */
4246 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
4247 PCI_HEADER_TYPE_MULTI_FUNCTION
;
4249 /* Restore or clear multifunction, this is always controlled by QEMU */
4250 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
4251 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
4253 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
4257 * Clear host resource mapping info. If we choose not to register a
4258 * BAR, such as might be the case with the option ROM, we can get
4259 * confusing, unwritable, residual addresses from the host here.
4261 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
4262 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
4264 vfio_pci_size_rom(vdev
);
4266 ret
= vfio_early_setup_msix(vdev
);
4271 vfio_map_bars(vdev
);
4273 ret
= vfio_add_capabilities(vdev
);
4278 /* QEMU emulates all of MSI & MSIX */
4279 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
4280 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
4284 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
4285 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
4286 vdev
->msi_cap_size
);
4289 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
4290 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
4291 vfio_intx_mmap_enable
, vdev
);
4292 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_update_irq
);
4293 ret
= vfio_enable_intx(vdev
);
4299 add_boot_device_path(vdev
->bootindex
, &pdev
->qdev
, NULL
);
4300 vfio_register_err_notifier(vdev
);
4305 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
4306 vfio_teardown_msi(vdev
);
4307 vfio_unmap_bars(vdev
);
4309 g_free(vdev
->emulated_config_bits
);
4310 vfio_put_device(vdev
);
4311 vfio_put_group(group
);
4315 static void vfio_exitfn(PCIDevice
*pdev
)
4317 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
4318 VFIOGroup
*group
= vdev
->group
;
4320 vfio_unregister_err_notifier(vdev
);
4321 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
4322 vfio_disable_interrupts(vdev
);
4323 if (vdev
->intx
.mmap_timer
) {
4324 timer_free(vdev
->intx
.mmap_timer
);
4326 vfio_teardown_msi(vdev
);
4327 vfio_unmap_bars(vdev
);
4328 g_free(vdev
->emulated_config_bits
);
4330 vfio_put_device(vdev
);
4331 vfio_put_group(group
);
4334 static void vfio_pci_reset(DeviceState
*dev
)
4336 PCIDevice
*pdev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
4337 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
4339 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
4340 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
4342 vfio_pci_pre_reset(vdev
);
4344 if (vdev
->reset_works
&& (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
4345 !ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
4346 DPRINTF("%04x:%02x:%02x.%x FLR/VFIO_DEVICE_RESET\n", vdev
->host
.domain
,
4347 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
4351 /* See if we can do our own bus reset */
4352 if (!vfio_pci_hot_reset_one(vdev
)) {
4356 /* If nothing else works and the device supports PM reset, use it */
4357 if (vdev
->reset_works
&& vdev
->has_pm_reset
&&
4358 !ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
4359 DPRINTF("%04x:%02x:%02x.%x PCI PM Reset\n", vdev
->host
.domain
,
4360 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
4365 vfio_pci_post_reset(vdev
);
4368 static void vfio_instance_init(Object
*obj
)
4370 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
4371 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, PCI_DEVICE(obj
));
4373 device_add_bootindex_property(obj
, &vdev
->bootindex
,
4375 &pci_dev
->qdev
, NULL
);
4378 static Property vfio_pci_dev_properties
[] = {
4379 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice
, host
),
4380 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice
,
4381 intx
.mmap_timeout
, 1100),
4382 DEFINE_PROP_BIT("x-vga", VFIODevice
, features
,
4383 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
4385 * TODO - support passed fds... is this necessary?
4386 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
4387 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
4389 DEFINE_PROP_END_OF_LIST(),
4392 static const VMStateDescription vfio_pci_vmstate
= {
4397 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
4399 DeviceClass
*dc
= DEVICE_CLASS(klass
);
4400 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
4402 dc
->reset
= vfio_pci_reset
;
4403 dc
->props
= vfio_pci_dev_properties
;
4404 dc
->vmsd
= &vfio_pci_vmstate
;
4405 dc
->desc
= "VFIO-based PCI device assignment";
4406 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
4407 pdc
->init
= vfio_initfn
;
4408 pdc
->exit
= vfio_exitfn
;
4409 pdc
->config_read
= vfio_pci_read_config
;
4410 pdc
->config_write
= vfio_pci_write_config
;
4411 pdc
->is_express
= 1; /* We might be */
4414 static const TypeInfo vfio_pci_dev_info
= {
4416 .parent
= TYPE_PCI_DEVICE
,
4417 .instance_size
= sizeof(VFIODevice
),
4418 .class_init
= vfio_pci_dev_class_init
,
4419 .instance_init
= vfio_instance_init
,
4422 static void register_vfio_pci_dev_type(void)
4424 type_register_static(&vfio_pci_dev_info
);
4427 type_init(register_vfio_pci_dev_type
)
4429 static int vfio_container_do_ioctl(AddressSpace
*as
, int32_t groupid
,
4430 int req
, void *param
)
4433 VFIOContainer
*container
;
4436 group
= vfio_get_group(groupid
, as
);
4438 error_report("vfio: group %d not registered", groupid
);
4442 container
= group
->container
;
4443 if (group
->container
) {
4444 ret
= ioctl(container
->fd
, req
, param
);
4446 error_report("vfio: failed to ioctl container: ret=%d, %s",
4447 ret
, strerror(errno
));
4451 vfio_put_group(group
);
4456 int vfio_container_ioctl(AddressSpace
*as
, int32_t groupid
,
4457 int req
, void *param
)
4459 /* We allow only certain ioctls to the container */
4461 case VFIO_CHECK_EXTENSION
:
4462 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
:
4465 /* Return an error on unknown requests */
4466 error_report("vfio: unsupported ioctl %X", req
);
4470 return vfio_container_do_ioctl(as
, groupid
, req
, param
);