2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
43 #include "hw/vfio/vfio.h"
44 #include "hw/vfio/vfio-common.h"
48 typedef struct VFIOQuirk
{
50 struct VFIOPCIDevice
*vdev
;
51 QLIST_ENTRY(VFIOQuirk
) next
;
53 uint32_t base_offset
:TARGET_PAGE_BITS
;
54 uint32_t address_offset
:TARGET_PAGE_BITS
;
55 uint32_t address_size
:3;
58 uint32_t address_match
;
59 uint32_t address_mask
;
61 uint32_t address_val
:TARGET_PAGE_BITS
;
62 uint32_t data_offset
:TARGET_PAGE_BITS
;
71 typedef struct VFIOBAR
{
75 QLIST_HEAD(, VFIOQuirk
) quirks
;
78 typedef struct VFIOVGARegion
{
82 QLIST_HEAD(, VFIOQuirk
) quirks
;
85 typedef struct VFIOVGA
{
88 VFIOVGARegion region
[QEMU_PCI_VGA_NUM_REGIONS
];
91 typedef struct VFIOINTx
{
92 bool pending
; /* interrupt pending */
93 bool kvm_accel
; /* set when QEMU bypass through KVM enabled */
94 uint8_t pin
; /* which pin to pull for qemu_set_irq */
95 EventNotifier interrupt
; /* eventfd triggered on interrupt */
96 EventNotifier unmask
; /* eventfd for unmask on QEMU bypass */
97 PCIINTxRoute route
; /* routing info for QEMU bypass */
98 uint32_t mmap_timeout
; /* delay to re-enable mmaps after interrupt */
99 QEMUTimer
*mmap_timer
; /* enable mmaps after periods w/o interrupts */
102 typedef struct VFIOMSIVector
{
104 * Two interrupt paths are configured per vector. The first, is only used
105 * for interrupts injected via QEMU. This is typically the non-accel path,
106 * but may also be used when we want QEMU to handle masking and pending
107 * bits. The KVM path bypasses QEMU and is therefore higher performance,
108 * but requires masking at the device. virq is used to track the MSI route
109 * through KVM, thus kvm_interrupt is only available when virq is set to a
110 * valid (>= 0) value.
112 EventNotifier interrupt
;
113 EventNotifier kvm_interrupt
;
114 struct VFIOPCIDevice
*vdev
; /* back pointer to device */
126 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
127 typedef struct VFIOMSIXInfo
{
131 uint32_t table_offset
;
133 MemoryRegion mmap_mem
;
137 typedef struct VFIOPCIDevice
{
141 unsigned int config_size
;
142 uint8_t *emulated_config_bits
; /* QEMU emulated bits, little-endian */
143 off_t config_offset
; /* Offset of config space region within device fd */
144 unsigned int rom_size
;
145 off_t rom_offset
; /* Offset of ROM region within device fd */
148 VFIOMSIVector
*msi_vectors
;
150 int nr_vectors
; /* Number of MSI/MSIX vectors currently in use */
151 int interrupt
; /* Current interrupt type */
152 VFIOBAR bars
[PCI_NUM_REGIONS
- 1]; /* No ROM */
153 VFIOVGA vga
; /* 0xa0000, 0x3b0, 0x3c0 */
154 PCIHostDeviceAddress host
;
155 EventNotifier err_notifier
;
156 EventNotifier req_notifier
;
158 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
159 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
160 #define VFIO_FEATURE_ENABLE_REQ_BIT 1
161 #define VFIO_FEATURE_ENABLE_REQ (1 << VFIO_FEATURE_ENABLE_REQ_BIT)
169 bool rom_read_failed
;
172 typedef struct VFIORomBlacklistEntry
{
175 } VFIORomBlacklistEntry
;
178 * List of device ids/vendor ids for which to disable
179 * option rom loading. This avoids the guest hangs during rom
180 * execution as noticed with the BCM 57810 card for lack of a
181 * more better way to handle such issues.
182 * The user can still override by specifying a romfile or
184 * Please see https://bugs.launchpad.net/qemu/+bug/1284874
185 * for an analysis of the 57810 card hang. When adding
186 * a new vendor id/device id combination below, please also add
187 * your card/environment details and information that could
188 * help in debugging to the bug tracking this issue
190 static const VFIORomBlacklistEntry romblacklist
[] = {
191 /* Broadcom BCM 57810 */
195 #define MSIX_CAP_LENGTH 12
197 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
);
198 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
);
199 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
200 uint32_t val
, int len
);
201 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
);
204 * Disabling BAR mmaping can be slow, but toggling it around INTx can
205 * also be a huge overhead. We try to get the best of both worlds by
206 * waiting until an interrupt to disable mmaps (subsequent transitions
207 * to the same state are effectively no overhead). If the interrupt has
208 * been serviced and the time gap is long enough, we re-enable mmaps for
209 * performance. This works well for things like graphics cards, which
210 * may not use their interrupt at all and are penalized to an unusable
211 * level by read/write BAR traps. Other devices, like NICs, have more
212 * regular interrupts and see much better latency by staying in non-mmap
213 * mode. We therefore set the default mmap_timeout such that a ping
214 * is just enough to keep the mmap disabled. Users can experiment with
215 * other options with the x-intx-mmap-timeout-ms parameter (a value of
216 * zero disables the timer).
218 static void vfio_intx_mmap_enable(void *opaque
)
220 VFIOPCIDevice
*vdev
= opaque
;
222 if (vdev
->intx
.pending
) {
223 timer_mod(vdev
->intx
.mmap_timer
,
224 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
228 vfio_mmap_set_enabled(vdev
, true);
231 static void vfio_intx_interrupt(void *opaque
)
233 VFIOPCIDevice
*vdev
= opaque
;
235 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
239 trace_vfio_intx_interrupt(vdev
->vbasedev
.name
, 'A' + vdev
->intx
.pin
);
241 vdev
->intx
.pending
= true;
242 pci_irq_assert(&vdev
->pdev
);
243 vfio_mmap_set_enabled(vdev
, false);
244 if (vdev
->intx
.mmap_timeout
) {
245 timer_mod(vdev
->intx
.mmap_timer
,
246 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
250 static void vfio_eoi(VFIODevice
*vbasedev
)
252 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
254 if (!vdev
->intx
.pending
) {
258 trace_vfio_eoi(vbasedev
->name
);
260 vdev
->intx
.pending
= false;
261 pci_irq_deassert(&vdev
->pdev
);
262 vfio_unmask_single_irqindex(vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
265 static void vfio_enable_intx_kvm(VFIOPCIDevice
*vdev
)
268 struct kvm_irqfd irqfd
= {
269 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
270 .gsi
= vdev
->intx
.route
.irq
,
271 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
273 struct vfio_irq_set
*irq_set
;
277 if (!VFIO_ALLOW_KVM_INTX
|| !kvm_irqfds_enabled() ||
278 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
279 !kvm_resamplefds_enabled()) {
283 /* Get to a known interrupt state */
284 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
285 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
286 vdev
->intx
.pending
= false;
287 pci_irq_deassert(&vdev
->pdev
);
289 /* Get an eventfd for resample/unmask */
290 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
291 error_report("vfio: Error: event_notifier_init failed eoi");
295 /* KVM triggers it, VFIO listens for it */
296 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
298 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
299 error_report("vfio: Error: Failed to setup resample irqfd: %m");
303 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
305 irq_set
= g_malloc0(argsz
);
306 irq_set
->argsz
= argsz
;
307 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_UNMASK
;
308 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
311 pfd
= (int32_t *)&irq_set
->data
;
313 *pfd
= irqfd
.resamplefd
;
315 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
318 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
323 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
325 vdev
->intx
.kvm_accel
= true;
327 trace_vfio_enable_intx_kvm(vdev
->vbasedev
.name
);
332 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
333 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
335 event_notifier_cleanup(&vdev
->intx
.unmask
);
337 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
338 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
342 static void vfio_disable_intx_kvm(VFIOPCIDevice
*vdev
)
345 struct kvm_irqfd irqfd
= {
346 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
347 .gsi
= vdev
->intx
.route
.irq
,
348 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
351 if (!vdev
->intx
.kvm_accel
) {
356 * Get to a known state, hardware masked, QEMU ready to accept new
357 * interrupts, QEMU IRQ de-asserted.
359 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
360 vdev
->intx
.pending
= false;
361 pci_irq_deassert(&vdev
->pdev
);
363 /* Tell KVM to stop listening for an INTx irqfd */
364 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
365 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
368 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
369 event_notifier_cleanup(&vdev
->intx
.unmask
);
371 /* QEMU starts listening for interrupt events. */
372 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
374 vdev
->intx
.kvm_accel
= false;
376 /* If we've missed an event, let it re-fire through QEMU */
377 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
379 trace_vfio_disable_intx_kvm(vdev
->vbasedev
.name
);
383 static void vfio_update_irq(PCIDevice
*pdev
)
385 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
388 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
392 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
394 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
395 return; /* Nothing changed */
398 trace_vfio_update_irq(vdev
->vbasedev
.name
,
399 vdev
->intx
.route
.irq
, route
.irq
);
401 vfio_disable_intx_kvm(vdev
);
403 vdev
->intx
.route
= route
;
405 if (route
.mode
!= PCI_INTX_ENABLED
) {
409 vfio_enable_intx_kvm(vdev
);
411 /* Re-enable the interrupt in cased we missed an EOI */
412 vfio_eoi(&vdev
->vbasedev
);
415 static int vfio_enable_intx(VFIOPCIDevice
*vdev
)
417 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
419 struct vfio_irq_set
*irq_set
;
426 vfio_disable_interrupts(vdev
);
428 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
429 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
433 * Only conditional to avoid generating error messages on platforms
434 * where we won't actually use the result anyway.
436 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
437 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
442 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
444 error_report("vfio: Error: event_notifier_init failed");
448 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
450 irq_set
= g_malloc0(argsz
);
451 irq_set
->argsz
= argsz
;
452 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
453 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
456 pfd
= (int32_t *)&irq_set
->data
;
458 *pfd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
459 qemu_set_fd_handler(*pfd
, vfio_intx_interrupt
, NULL
, vdev
);
461 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
464 error_report("vfio: Error: Failed to setup INTx fd: %m");
465 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
466 event_notifier_cleanup(&vdev
->intx
.interrupt
);
470 vfio_enable_intx_kvm(vdev
);
472 vdev
->interrupt
= VFIO_INT_INTx
;
474 trace_vfio_enable_intx(vdev
->vbasedev
.name
);
479 static void vfio_disable_intx(VFIOPCIDevice
*vdev
)
483 timer_del(vdev
->intx
.mmap_timer
);
484 vfio_disable_intx_kvm(vdev
);
485 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
486 vdev
->intx
.pending
= false;
487 pci_irq_deassert(&vdev
->pdev
);
488 vfio_mmap_set_enabled(vdev
, true);
490 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
491 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
492 event_notifier_cleanup(&vdev
->intx
.interrupt
);
494 vdev
->interrupt
= VFIO_INT_NONE
;
496 trace_vfio_disable_intx(vdev
->vbasedev
.name
);
502 static void vfio_msi_interrupt(void *opaque
)
504 VFIOMSIVector
*vector
= opaque
;
505 VFIOPCIDevice
*vdev
= vector
->vdev
;
506 int nr
= vector
- vdev
->msi_vectors
;
508 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
515 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
516 msg
= msix_get_message(&vdev
->pdev
, nr
);
517 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
518 msg
= msi_get_message(&vdev
->pdev
, nr
);
523 trace_vfio_msi_interrupt(vdev
->vbasedev
.name
, nr
, msg
.address
, msg
.data
);
526 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
527 msix_notify(&vdev
->pdev
, nr
);
528 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
529 msi_notify(&vdev
->pdev
, nr
);
531 error_report("vfio: MSI interrupt receieved, but not enabled?");
535 static int vfio_enable_vectors(VFIOPCIDevice
*vdev
, bool msix
)
537 struct vfio_irq_set
*irq_set
;
538 int ret
= 0, i
, argsz
;
541 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
543 irq_set
= g_malloc0(argsz
);
544 irq_set
->argsz
= argsz
;
545 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
546 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
548 irq_set
->count
= vdev
->nr_vectors
;
549 fds
= (int32_t *)&irq_set
->data
;
551 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
555 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
556 * bits, therefore we always use the KVM signaling path when setup.
557 * MSI-X mask and pending bits are emulated, so we want to use the
558 * KVM signaling path only when configured and unmasked.
560 if (vdev
->msi_vectors
[i
].use
) {
561 if (vdev
->msi_vectors
[i
].virq
< 0 ||
562 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
563 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
565 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
572 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
579 static void vfio_add_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage
*msg
,
584 if ((msix
&& !VFIO_ALLOW_KVM_MSIX
) ||
585 (!msix
&& !VFIO_ALLOW_KVM_MSI
) || !msg
) {
589 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
593 virq
= kvm_irqchip_add_msi_route(kvm_state
, *msg
);
595 event_notifier_cleanup(&vector
->kvm_interrupt
);
599 if (kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->kvm_interrupt
,
601 kvm_irqchip_release_virq(kvm_state
, virq
);
602 event_notifier_cleanup(&vector
->kvm_interrupt
);
609 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
611 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->kvm_interrupt
,
613 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
615 event_notifier_cleanup(&vector
->kvm_interrupt
);
618 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
)
620 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
);
623 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
624 MSIMessage
*msg
, IOHandler
*handler
)
626 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
627 VFIOMSIVector
*vector
;
630 trace_vfio_msix_vector_do_use(vdev
->vbasedev
.name
, nr
);
632 vector
= &vdev
->msi_vectors
[nr
];
637 if (event_notifier_init(&vector
->interrupt
, 0)) {
638 error_report("vfio: Error: event_notifier_init failed");
641 msix_vector_use(pdev
, nr
);
644 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
645 handler
, NULL
, vector
);
648 * Attempt to enable route through KVM irqchip,
649 * default to userspace handling if unavailable.
651 if (vector
->virq
>= 0) {
653 vfio_remove_kvm_msi_virq(vector
);
655 vfio_update_kvm_msi_virq(vector
, *msg
);
658 vfio_add_kvm_msi_virq(vector
, msg
, true);
662 * We don't want to have the host allocate all possible MSI vectors
663 * for a device if they're not in use, so we shutdown and incrementally
664 * increase them as needed.
666 if (vdev
->nr_vectors
< nr
+ 1) {
667 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
668 vdev
->nr_vectors
= nr
+ 1;
669 ret
= vfio_enable_vectors(vdev
, true);
671 error_report("vfio: failed to enable vectors, %d", ret
);
675 struct vfio_irq_set
*irq_set
;
678 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
680 irq_set
= g_malloc0(argsz
);
681 irq_set
->argsz
= argsz
;
682 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
683 VFIO_IRQ_SET_ACTION_TRIGGER
;
684 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
687 pfd
= (int32_t *)&irq_set
->data
;
689 if (vector
->virq
>= 0) {
690 *pfd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
692 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
695 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
698 error_report("vfio: failed to modify vector, %d", ret
);
705 static int vfio_msix_vector_use(PCIDevice
*pdev
,
706 unsigned int nr
, MSIMessage msg
)
708 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
711 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
713 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
714 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
716 trace_vfio_msix_vector_release(vdev
->vbasedev
.name
, nr
);
719 * There are still old guests that mask and unmask vectors on every
720 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
721 * the KVM setup in place, simply switch VFIO to use the non-bypass
722 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
723 * core will mask the interrupt and set pending bits, allowing it to
724 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
726 if (vector
->virq
>= 0) {
728 struct vfio_irq_set
*irq_set
;
731 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
733 irq_set
= g_malloc0(argsz
);
734 irq_set
->argsz
= argsz
;
735 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
736 VFIO_IRQ_SET_ACTION_TRIGGER
;
737 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
740 pfd
= (int32_t *)&irq_set
->data
;
742 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
744 ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
750 static void vfio_enable_msix(VFIOPCIDevice
*vdev
)
752 vfio_disable_interrupts(vdev
);
754 vdev
->msi_vectors
= g_malloc0(vdev
->msix
->entries
* sizeof(VFIOMSIVector
));
756 vdev
->interrupt
= VFIO_INT_MSIX
;
759 * Some communication channels between VF & PF or PF & fw rely on the
760 * physical state of the device and expect that enabling MSI-X from the
761 * guest enables the same on the host. When our guest is Linux, the
762 * guest driver call to pci_enable_msix() sets the enabling bit in the
763 * MSI-X capability, but leaves the vector table masked. We therefore
764 * can't rely on a vector_use callback (from request_irq() in the guest)
765 * to switch the physical device into MSI-X mode because that may come a
766 * long time after pci_enable_msix(). This code enables vector 0 with
767 * triggering to userspace, then immediately release the vector, leaving
768 * the physical device with no vectors enabled, but MSI-X enabled, just
769 * like the guest view.
771 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
772 vfio_msix_vector_release(&vdev
->pdev
, 0);
774 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
775 vfio_msix_vector_release
, NULL
)) {
776 error_report("vfio: msix_set_vector_notifiers failed");
779 trace_vfio_enable_msix(vdev
->vbasedev
.name
);
782 static void vfio_enable_msi(VFIOPCIDevice
*vdev
)
786 vfio_disable_interrupts(vdev
);
788 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
790 vdev
->msi_vectors
= g_malloc0(vdev
->nr_vectors
* sizeof(VFIOMSIVector
));
792 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
793 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
794 MSIMessage msg
= msi_get_message(&vdev
->pdev
, i
);
800 if (event_notifier_init(&vector
->interrupt
, 0)) {
801 error_report("vfio: Error: event_notifier_init failed");
804 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
805 vfio_msi_interrupt
, NULL
, vector
);
808 * Attempt to enable route through KVM irqchip,
809 * default to userspace handling if unavailable.
811 vfio_add_kvm_msi_virq(vector
, &msg
, false);
814 /* Set interrupt type prior to possible interrupts */
815 vdev
->interrupt
= VFIO_INT_MSI
;
817 ret
= vfio_enable_vectors(vdev
, false);
820 error_report("vfio: Error: Failed to setup MSI fds: %m");
821 } else if (ret
!= vdev
->nr_vectors
) {
822 error_report("vfio: Error: Failed to enable %d "
823 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
826 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
827 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
828 if (vector
->virq
>= 0) {
829 vfio_remove_kvm_msi_virq(vector
);
831 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
833 event_notifier_cleanup(&vector
->interrupt
);
836 g_free(vdev
->msi_vectors
);
838 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
839 vdev
->nr_vectors
= ret
;
842 vdev
->nr_vectors
= 0;
845 * Failing to setup MSI doesn't really fall within any specification.
846 * Let's try leaving interrupts disabled and hope the guest figures
847 * out to fall back to INTx for this device.
849 error_report("vfio: Error: Failed to enable MSI");
850 vdev
->interrupt
= VFIO_INT_NONE
;
855 trace_vfio_enable_msi(vdev
->vbasedev
.name
, vdev
->nr_vectors
);
858 static void vfio_disable_msi_common(VFIOPCIDevice
*vdev
)
862 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
863 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
864 if (vdev
->msi_vectors
[i
].use
) {
865 if (vector
->virq
>= 0) {
866 vfio_remove_kvm_msi_virq(vector
);
868 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
870 event_notifier_cleanup(&vector
->interrupt
);
874 g_free(vdev
->msi_vectors
);
875 vdev
->msi_vectors
= NULL
;
876 vdev
->nr_vectors
= 0;
877 vdev
->interrupt
= VFIO_INT_NONE
;
879 vfio_enable_intx(vdev
);
882 static void vfio_disable_msix(VFIOPCIDevice
*vdev
)
886 msix_unset_vector_notifiers(&vdev
->pdev
);
889 * MSI-X will only release vectors if MSI-X is still enabled on the
890 * device, check through the rest and release it ourselves if necessary.
892 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
893 if (vdev
->msi_vectors
[i
].use
) {
894 vfio_msix_vector_release(&vdev
->pdev
, i
);
895 msix_vector_unuse(&vdev
->pdev
, i
);
899 if (vdev
->nr_vectors
) {
900 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
903 vfio_disable_msi_common(vdev
);
905 trace_vfio_disable_msix(vdev
->vbasedev
.name
);
908 static void vfio_disable_msi(VFIOPCIDevice
*vdev
)
910 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSI_IRQ_INDEX
);
911 vfio_disable_msi_common(vdev
);
913 trace_vfio_disable_msi(vdev
->vbasedev
.name
);
916 static void vfio_update_msi(VFIOPCIDevice
*vdev
)
920 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
921 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
924 if (!vector
->use
|| vector
->virq
< 0) {
928 msg
= msi_get_message(&vdev
->pdev
, i
);
929 vfio_update_kvm_msi_virq(vector
, msg
);
933 static void vfio_pci_load_rom(VFIOPCIDevice
*vdev
)
935 struct vfio_region_info reg_info
= {
936 .argsz
= sizeof(reg_info
),
937 .index
= VFIO_PCI_ROM_REGION_INDEX
943 if (ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
)) {
944 error_report("vfio: Error getting ROM info: %m");
948 trace_vfio_pci_load_rom(vdev
->vbasedev
.name
, (unsigned long)reg_info
.size
,
949 (unsigned long)reg_info
.offset
,
950 (unsigned long)reg_info
.flags
);
952 vdev
->rom_size
= size
= reg_info
.size
;
953 vdev
->rom_offset
= reg_info
.offset
;
955 if (!vdev
->rom_size
) {
956 vdev
->rom_read_failed
= true;
957 error_report("vfio-pci: Cannot read device rom at "
958 "%s", vdev
->vbasedev
.name
);
959 error_printf("Device option ROM contents are probably invalid "
960 "(check dmesg).\nSkip option ROM probe with rombar=0, "
961 "or load from file with romfile=\n");
965 vdev
->rom
= g_malloc(size
);
966 memset(vdev
->rom
, 0xff, size
);
969 bytes
= pread(vdev
->vbasedev
.fd
, vdev
->rom
+ off
,
970 size
, vdev
->rom_offset
+ off
);
973 } else if (bytes
> 0) {
977 if (errno
== EINTR
|| errno
== EAGAIN
) {
980 error_report("vfio: Error reading device ROM: %m");
986 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
988 VFIOPCIDevice
*vdev
= opaque
;
997 /* Load the ROM lazily when the guest tries to read it */
998 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
999 vfio_pci_load_rom(vdev
);
1002 memcpy(&val
, vdev
->rom
+ addr
,
1003 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
1010 data
= le16_to_cpu(val
.word
);
1013 data
= le32_to_cpu(val
.dword
);
1016 hw_error("vfio: unsupported read size, %d bytes\n", size
);
1020 trace_vfio_rom_read(vdev
->vbasedev
.name
, addr
, size
, data
);
1025 static void vfio_rom_write(void *opaque
, hwaddr addr
,
1026 uint64_t data
, unsigned size
)
1030 static const MemoryRegionOps vfio_rom_ops
= {
1031 .read
= vfio_rom_read
,
1032 .write
= vfio_rom_write
,
1033 .endianness
= DEVICE_LITTLE_ENDIAN
,
1036 static bool vfio_blacklist_opt_rom(VFIOPCIDevice
*vdev
)
1038 PCIDevice
*pdev
= &vdev
->pdev
;
1039 uint16_t vendor_id
, device_id
;
1042 vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
1043 device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
1045 while (count
< ARRAY_SIZE(romblacklist
)) {
1046 if (romblacklist
[count
].vendor_id
== vendor_id
&&
1047 romblacklist
[count
].device_id
== device_id
) {
1056 static void vfio_pci_size_rom(VFIOPCIDevice
*vdev
)
1058 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
1059 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
1060 DeviceState
*dev
= DEVICE(vdev
);
1062 int fd
= vdev
->vbasedev
.fd
;
1064 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
1065 /* Since pci handles romfile, just print a message and return */
1066 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
1067 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1068 "is known to cause system instability issues during "
1069 "option rom execution. "
1070 "Proceeding anyway since user specified romfile\n",
1071 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1072 vdev
->host
.function
);
1078 * Use the same size ROM BAR as the physical device. The contents
1079 * will get filled in later when the guest tries to read it.
1081 if (pread(fd
, &orig
, 4, offset
) != 4 ||
1082 pwrite(fd
, &size
, 4, offset
) != 4 ||
1083 pread(fd
, &size
, 4, offset
) != 4 ||
1084 pwrite(fd
, &orig
, 4, offset
) != 4) {
1085 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
1086 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1087 vdev
->host
.slot
, vdev
->host
.function
);
1091 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
1097 if (vfio_blacklist_opt_rom(vdev
)) {
1098 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
1099 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1100 "is known to cause system instability issues during "
1101 "option rom execution. "
1102 "Proceeding anyway since user specified non zero value for "
1104 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1105 vdev
->host
.function
);
1107 error_printf("Warning : Rom loading for device at "
1108 "%04x:%02x:%02x.%x has been disabled due to "
1109 "system instability issues. "
1110 "Specify rombar=1 or romfile to force\n",
1111 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1112 vdev
->host
.function
);
1117 trace_vfio_pci_size_rom(vdev
->vbasedev
.name
, size
);
1119 snprintf(name
, sizeof(name
), "vfio[%04x:%02x:%02x.%x].rom",
1120 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1121 vdev
->host
.function
);
1123 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
1124 &vfio_rom_ops
, vdev
, name
, size
);
1126 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
1127 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
1129 vdev
->pdev
.has_rom
= true;
1130 vdev
->rom_read_failed
= false;
1133 static void vfio_vga_write(void *opaque
, hwaddr addr
,
1134 uint64_t data
, unsigned size
)
1136 VFIOVGARegion
*region
= opaque
;
1137 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1144 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1151 buf
.word
= cpu_to_le16(data
);
1154 buf
.dword
= cpu_to_le32(data
);
1157 hw_error("vfio: unsupported write size, %d bytes", size
);
1161 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
1162 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1163 __func__
, region
->offset
+ addr
, data
, size
);
1166 trace_vfio_vga_write(region
->offset
+ addr
, data
, size
);
1169 static uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
1171 VFIOVGARegion
*region
= opaque
;
1172 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1180 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1182 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1183 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1184 __func__
, region
->offset
+ addr
, size
);
1185 return (uint64_t)-1;
1193 data
= le16_to_cpu(buf
.word
);
1196 data
= le32_to_cpu(buf
.dword
);
1199 hw_error("vfio: unsupported read size, %d bytes", size
);
1203 trace_vfio_vga_read(region
->offset
+ addr
, size
, data
);
1208 static const MemoryRegionOps vfio_vga_ops
= {
1209 .read
= vfio_vga_read
,
1210 .write
= vfio_vga_write
,
1211 .endianness
= DEVICE_LITTLE_ENDIAN
,
1215 * Device specific quirks
1218 /* Is range1 fully contained within range2? */
1219 static bool vfio_range_contained(uint64_t first1
, uint64_t len1
,
1220 uint64_t first2
, uint64_t len2
) {
1221 return (first1
>= first2
&& first1
+ len1
<= first2
+ len2
);
1224 static bool vfio_flags_enabled(uint8_t flags
, uint8_t mask
)
1226 return (mask
&& (flags
& mask
) == mask
);
1229 static uint64_t vfio_generic_window_quirk_read(void *opaque
,
1230 hwaddr addr
, unsigned size
)
1232 VFIOQuirk
*quirk
= opaque
;
1233 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1236 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1237 ranges_overlap(addr
, size
,
1238 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1239 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1241 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1242 quirk
->data
.data_size
)) {
1243 hw_error("%s: window data read not fully contained: %s",
1244 __func__
, memory_region_name(&quirk
->mem
));
1247 data
= vfio_pci_read_config(&vdev
->pdev
,
1248 quirk
->data
.address_val
+ offset
, size
);
1250 trace_vfio_generic_window_quirk_read(memory_region_name(&quirk
->mem
),
1251 vdev
->vbasedev
.name
,
1255 data
= vfio_region_read(&vdev
->bars
[quirk
->data
.bar
].region
,
1256 addr
+ quirk
->data
.base_offset
, size
);
1262 static void vfio_generic_window_quirk_write(void *opaque
, hwaddr addr
,
1263 uint64_t data
, unsigned size
)
1265 VFIOQuirk
*quirk
= opaque
;
1266 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1268 if (ranges_overlap(addr
, size
,
1269 quirk
->data
.address_offset
, quirk
->data
.address_size
)) {
1271 if (addr
!= quirk
->data
.address_offset
) {
1272 hw_error("%s: offset write into address window: %s",
1273 __func__
, memory_region_name(&quirk
->mem
));
1276 if ((data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1277 quirk
->data
.flags
|= quirk
->data
.write_flags
|
1278 quirk
->data
.read_flags
;
1279 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1281 quirk
->data
.flags
&= ~(quirk
->data
.write_flags
|
1282 quirk
->data
.read_flags
);
1286 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1287 ranges_overlap(addr
, size
,
1288 quirk
->data
.data_offset
, quirk
->data
.data_size
)) {
1289 hwaddr offset
= addr
- quirk
->data
.data_offset
;
1291 if (!vfio_range_contained(addr
, size
, quirk
->data
.data_offset
,
1292 quirk
->data
.data_size
)) {
1293 hw_error("%s: window data write not fully contained: %s",
1294 __func__
, memory_region_name(&quirk
->mem
));
1297 vfio_pci_write_config(&vdev
->pdev
,
1298 quirk
->data
.address_val
+ offset
, data
, size
);
1299 trace_vfio_generic_window_quirk_write(memory_region_name(&quirk
->mem
),
1300 vdev
->vbasedev
.name
,
1306 vfio_region_write(&vdev
->bars
[quirk
->data
.bar
].region
,
1307 addr
+ quirk
->data
.base_offset
, data
, size
);
1310 static const MemoryRegionOps vfio_generic_window_quirk
= {
1311 .read
= vfio_generic_window_quirk_read
,
1312 .write
= vfio_generic_window_quirk_write
,
1313 .endianness
= DEVICE_LITTLE_ENDIAN
,
1316 static uint64_t vfio_generic_quirk_read(void *opaque
,
1317 hwaddr addr
, unsigned size
)
1319 VFIOQuirk
*quirk
= opaque
;
1320 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1321 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1322 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1325 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.read_flags
) &&
1326 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1327 if (!vfio_range_contained(addr
, size
, offset
,
1328 quirk
->data
.address_mask
+ 1)) {
1329 hw_error("%s: read not fully contained: %s",
1330 __func__
, memory_region_name(&quirk
->mem
));
1333 data
= vfio_pci_read_config(&vdev
->pdev
, addr
- offset
, size
);
1335 trace_vfio_generic_quirk_read(memory_region_name(&quirk
->mem
),
1336 vdev
->vbasedev
.name
, quirk
->data
.bar
,
1337 addr
+ base
, size
, data
);
1339 data
= vfio_region_read(&vdev
->bars
[quirk
->data
.bar
].region
,
1346 static void vfio_generic_quirk_write(void *opaque
, hwaddr addr
,
1347 uint64_t data
, unsigned size
)
1349 VFIOQuirk
*quirk
= opaque
;
1350 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1351 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1352 hwaddr offset
= quirk
->data
.address_match
& ~TARGET_PAGE_MASK
;
1354 if (vfio_flags_enabled(quirk
->data
.flags
, quirk
->data
.write_flags
) &&
1355 ranges_overlap(addr
, size
, offset
, quirk
->data
.address_mask
+ 1)) {
1356 if (!vfio_range_contained(addr
, size
, offset
,
1357 quirk
->data
.address_mask
+ 1)) {
1358 hw_error("%s: write not fully contained: %s",
1359 __func__
, memory_region_name(&quirk
->mem
));
1362 vfio_pci_write_config(&vdev
->pdev
, addr
- offset
, data
, size
);
1364 trace_vfio_generic_quirk_write(memory_region_name(&quirk
->mem
),
1365 vdev
->vbasedev
.name
, quirk
->data
.bar
,
1366 addr
+ base
, data
, size
);
1368 vfio_region_write(&vdev
->bars
[quirk
->data
.bar
].region
,
1369 addr
+ base
, data
, size
);
1373 static const MemoryRegionOps vfio_generic_quirk
= {
1374 .read
= vfio_generic_quirk_read
,
1375 .write
= vfio_generic_quirk_write
,
1376 .endianness
= DEVICE_LITTLE_ENDIAN
,
1379 #define PCI_VENDOR_ID_ATI 0x1002
1382 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1383 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1384 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1385 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1386 * I/O port BAR address. Originally this was coded to return the virtual BAR
1387 * address only if the physical register read returns the actual BAR address,
1388 * but users have reported greater success if we return the virtual address
1391 static uint64_t vfio_ati_3c3_quirk_read(void *opaque
,
1392 hwaddr addr
, unsigned size
)
1394 VFIOQuirk
*quirk
= opaque
;
1395 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1396 uint64_t data
= vfio_pci_read_config(&vdev
->pdev
,
1397 PCI_BASE_ADDRESS_0
+ (4 * 4) + 1,
1399 trace_vfio_ati_3c3_quirk_read(data
);
1404 static const MemoryRegionOps vfio_ati_3c3_quirk
= {
1405 .read
= vfio_ati_3c3_quirk_read
,
1406 .endianness
= DEVICE_LITTLE_ENDIAN
,
1409 static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice
*vdev
)
1411 PCIDevice
*pdev
= &vdev
->pdev
;
1414 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1419 * As long as the BAR is >= 256 bytes it will be aligned such that the
1420 * lower byte is always zero. Filter out anything else, if it exists.
1422 if (!vdev
->bars
[4].ioport
|| vdev
->bars
[4].region
.size
< 256) {
1426 quirk
= g_malloc0(sizeof(*quirk
));
1429 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_ati_3c3_quirk
, quirk
,
1430 "vfio-ati-3c3-quirk", 1);
1431 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1432 3 /* offset 3 bytes from 0x3c0 */, &quirk
->mem
);
1434 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1437 trace_vfio_vga_probe_ati_3c3_quirk(vdev
->vbasedev
.name
);
1441 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1442 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1443 * the MMIO space directly, but a window to this space is provided through
1444 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1445 * data register. When the address is programmed to a range of 0x4000-0x4fff
1446 * PCI configuration space is available. Experimentation seems to indicate
1447 * that only read-only access is provided, but we drop writes when the window
1448 * is enabled to config space nonetheless.
1450 static void vfio_probe_ati_bar4_window_quirk(VFIOPCIDevice
*vdev
, int nr
)
1452 PCIDevice
*pdev
= &vdev
->pdev
;
1455 if (!vdev
->has_vga
|| nr
!= 4 ||
1456 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1460 quirk
= g_malloc0(sizeof(*quirk
));
1462 quirk
->data
.address_size
= 4;
1463 quirk
->data
.data_offset
= 4;
1464 quirk
->data
.data_size
= 4;
1465 quirk
->data
.address_match
= 0x4000;
1466 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1467 quirk
->data
.bar
= nr
;
1468 quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1470 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1471 &vfio_generic_window_quirk
, quirk
,
1472 "vfio-ati-bar4-window-quirk", 8);
1473 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].region
.mem
,
1474 quirk
->data
.base_offset
, &quirk
->mem
, 1);
1476 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1478 trace_vfio_probe_ati_bar4_window_quirk(vdev
->vbasedev
.name
);
1481 #define PCI_VENDOR_ID_REALTEK 0x10ec
1484 * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2
1485 * offset 0x70 there is a dword data register, offset 0x74 is a dword address
1486 * register. According to the Linux r8169 driver, the MSI-X table is addressed
1487 * when the "type" portion of the address register is set to 0x1. This appears
1488 * to be bits 16:30. Bit 31 is both a write indicator and some sort of
1489 * "address latched" indicator. Bits 12:15 are a mask field, which we can
1490 * ignore because the MSI-X table should always be accessed as a dword (full
1491 * mask). Bits 0:11 is offset within the type.
1495 * Read from MSI-X table offset 0
1496 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
1497 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
1498 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
1500 * Write 0xfee00000 to MSI-X table offset 0
1501 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
1502 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
1503 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
1506 static uint64_t vfio_rtl8168_window_quirk_read(void *opaque
,
1507 hwaddr addr
, unsigned size
)
1509 VFIOQuirk
*quirk
= opaque
;
1510 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1513 case 4: /* address */
1514 if (quirk
->data
.flags
) {
1515 trace_vfio_rtl8168_window_quirk_read_fake(
1516 memory_region_name(&quirk
->mem
),
1517 vdev
->vbasedev
.name
);
1519 return quirk
->data
.address_match
^ 0x10000000U
;
1523 if (quirk
->data
.flags
) {
1526 trace_vfio_rtl8168_window_quirk_read_table(
1527 memory_region_name(&quirk
->mem
),
1528 vdev
->vbasedev
.name
);
1530 if (!(vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MSIX
)) {
1534 io_mem_read(&vdev
->pdev
.msix_table_mmio
,
1535 (hwaddr
)(quirk
->data
.address_match
& 0xfff),
1541 trace_vfio_rtl8168_window_quirk_read_direct(memory_region_name(&quirk
->mem
),
1542 vdev
->vbasedev
.name
);
1544 return vfio_region_read(&vdev
->bars
[quirk
->data
.bar
].region
,
1548 static void vfio_rtl8168_window_quirk_write(void *opaque
, hwaddr addr
,
1549 uint64_t data
, unsigned size
)
1551 VFIOQuirk
*quirk
= opaque
;
1552 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1555 case 4: /* address */
1556 if ((data
& 0x7fff0000) == 0x10000) {
1557 if (data
& 0x10000000U
&&
1558 vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MSIX
) {
1560 trace_vfio_rtl8168_window_quirk_write_table(
1561 memory_region_name(&quirk
->mem
),
1562 vdev
->vbasedev
.name
);
1564 io_mem_write(&vdev
->pdev
.msix_table_mmio
,
1565 (hwaddr
)(quirk
->data
.address_match
& 0xfff),
1569 quirk
->data
.flags
= 1;
1570 quirk
->data
.address_match
= data
;
1574 quirk
->data
.flags
= 0;
1577 quirk
->data
.address_mask
= data
;
1581 trace_vfio_rtl8168_window_quirk_write_direct(
1582 memory_region_name(&quirk
->mem
),
1583 vdev
->vbasedev
.name
);
1585 vfio_region_write(&vdev
->bars
[quirk
->data
.bar
].region
,
1586 addr
+ 0x70, data
, size
);
1589 static const MemoryRegionOps vfio_rtl8168_window_quirk
= {
1590 .read
= vfio_rtl8168_window_quirk_read
,
1591 .write
= vfio_rtl8168_window_quirk_write
,
1593 .min_access_size
= 4,
1594 .max_access_size
= 4,
1597 .endianness
= DEVICE_LITTLE_ENDIAN
,
1600 static void vfio_probe_rtl8168_bar2_window_quirk(VFIOPCIDevice
*vdev
, int nr
)
1602 PCIDevice
*pdev
= &vdev
->pdev
;
1605 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_REALTEK
||
1606 pci_get_word(pdev
->config
+ PCI_DEVICE_ID
) != 0x8168 || nr
!= 2) {
1610 quirk
= g_malloc0(sizeof(*quirk
));
1612 quirk
->data
.bar
= nr
;
1614 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_rtl8168_window_quirk
,
1615 quirk
, "vfio-rtl8168-window-quirk", 8);
1616 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].region
.mem
,
1617 0x70, &quirk
->mem
, 1);
1619 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1621 trace_vfio_probe_rtl8168_bar2_window_quirk(vdev
->vbasedev
.name
);
1624 * Trap the BAR2 MMIO window to config space as well.
1626 static void vfio_probe_ati_bar2_4000_quirk(VFIOPCIDevice
*vdev
, int nr
)
1628 PCIDevice
*pdev
= &vdev
->pdev
;
1631 /* Only enable on newer devices where BAR2 is 64bit */
1632 if (!vdev
->has_vga
|| nr
!= 2 || !vdev
->bars
[2].mem64
||
1633 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_ATI
) {
1637 quirk
= g_malloc0(sizeof(*quirk
));
1639 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1640 quirk
->data
.address_match
= 0x4000;
1641 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1642 quirk
->data
.bar
= nr
;
1644 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
1645 "vfio-ati-bar2-4000-quirk",
1646 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1647 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].region
.mem
,
1648 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1651 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1653 trace_vfio_probe_ati_bar2_4000_quirk(vdev
->vbasedev
.name
);
1657 * Older ATI/AMD cards like the X550 have a similar window to that above.
1658 * I/O port BAR1 provides a window to a mirror of PCI config space located
1659 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1660 * note it for future reference.
1663 #define PCI_VENDOR_ID_NVIDIA 0x10de
1666 * Nvidia has several different methods to get to config space, the
1667 * nouveu project has several of these documented here:
1668 * https://github.com/pathscale/envytools/tree/master/hwdocs
1670 * The first quirk is actually not documented in envytools and is found
1671 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1672 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1673 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1674 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1675 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1676 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1677 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1678 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1688 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque
,
1689 hwaddr addr
, unsigned size
)
1691 VFIOQuirk
*quirk
= opaque
;
1692 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1693 PCIDevice
*pdev
= &vdev
->pdev
;
1694 uint64_t data
= vfio_vga_read(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1695 addr
+ quirk
->data
.base_offset
, size
);
1697 if (quirk
->data
.flags
== NV_3D0_READ
&& addr
== quirk
->data
.data_offset
) {
1698 data
= vfio_pci_read_config(pdev
, quirk
->data
.address_val
, size
);
1699 trace_vfio_nvidia_3d0_quirk_read(size
, data
);
1702 quirk
->data
.flags
= NV_3D0_NONE
;
1707 static void vfio_nvidia_3d0_quirk_write(void *opaque
, hwaddr addr
,
1708 uint64_t data
, unsigned size
)
1710 VFIOQuirk
*quirk
= opaque
;
1711 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1712 PCIDevice
*pdev
= &vdev
->pdev
;
1714 switch (quirk
->data
.flags
) {
1716 if (addr
== quirk
->data
.address_offset
&& data
== 0x338) {
1717 quirk
->data
.flags
= NV_3D0_SELECT
;
1721 quirk
->data
.flags
= NV_3D0_NONE
;
1722 if (addr
== quirk
->data
.data_offset
&&
1723 (data
& ~quirk
->data
.address_mask
) == quirk
->data
.address_match
) {
1724 quirk
->data
.flags
= NV_3D0_WINDOW
;
1725 quirk
->data
.address_val
= data
& quirk
->data
.address_mask
;
1729 quirk
->data
.flags
= NV_3D0_NONE
;
1730 if (addr
== quirk
->data
.address_offset
) {
1731 if (data
== 0x538) {
1732 quirk
->data
.flags
= NV_3D0_READ
;
1733 } else if (data
== 0x738) {
1734 quirk
->data
.flags
= NV_3D0_WRITE
;
1739 quirk
->data
.flags
= NV_3D0_NONE
;
1740 if (addr
== quirk
->data
.data_offset
) {
1741 vfio_pci_write_config(pdev
, quirk
->data
.address_val
, data
, size
);
1742 trace_vfio_nvidia_3d0_quirk_write(data
, size
);
1748 vfio_vga_write(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
1749 addr
+ quirk
->data
.base_offset
, data
, size
);
1752 static const MemoryRegionOps vfio_nvidia_3d0_quirk
= {
1753 .read
= vfio_nvidia_3d0_quirk_read
,
1754 .write
= vfio_nvidia_3d0_quirk_write
,
1755 .endianness
= DEVICE_LITTLE_ENDIAN
,
1758 static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice
*vdev
)
1760 PCIDevice
*pdev
= &vdev
->pdev
;
1763 if (pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
||
1764 !vdev
->bars
[1].region
.size
) {
1768 quirk
= g_malloc0(sizeof(*quirk
));
1770 quirk
->data
.base_offset
= 0x10;
1771 quirk
->data
.address_offset
= 4;
1772 quirk
->data
.address_size
= 2;
1773 quirk
->data
.address_match
= 0x1800;
1774 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
1775 quirk
->data
.data_offset
= 0;
1776 quirk
->data
.data_size
= 4;
1778 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_3d0_quirk
,
1779 quirk
, "vfio-nvidia-3d0-quirk", 6);
1780 memory_region_add_subregion(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
1781 quirk
->data
.base_offset
, &quirk
->mem
);
1783 QLIST_INSERT_HEAD(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
,
1786 trace_vfio_vga_probe_nvidia_3d0_quirk(vdev
->vbasedev
.name
);
1790 * The second quirk is documented in envytools. The I/O port BAR5 is just
1791 * a set of address/data ports to the MMIO BARs. The BAR we care about is
1792 * again BAR0. This backdoor is apparently a bit newer than the one above
1793 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
1794 * space, including extended space is available at the 4k @0x88000.
1797 NV_BAR5_ADDRESS
= 0x1,
1798 NV_BAR5_ENABLE
= 0x2,
1799 NV_BAR5_MASTER
= 0x4,
1800 NV_BAR5_VALID
= 0x7,
1803 static void vfio_nvidia_bar5_window_quirk_write(void *opaque
, hwaddr addr
,
1804 uint64_t data
, unsigned size
)
1806 VFIOQuirk
*quirk
= opaque
;
1811 quirk
->data
.flags
|= NV_BAR5_MASTER
;
1813 quirk
->data
.flags
&= ~NV_BAR5_MASTER
;
1818 quirk
->data
.flags
|= NV_BAR5_ENABLE
;
1820 quirk
->data
.flags
&= ~NV_BAR5_ENABLE
;
1824 if (quirk
->data
.flags
& NV_BAR5_MASTER
) {
1825 if ((data
& ~0xfff) == 0x88000) {
1826 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
1827 quirk
->data
.address_val
= data
& 0xfff;
1828 } else if ((data
& ~0xff) == 0x1800) {
1829 quirk
->data
.flags
|= NV_BAR5_ADDRESS
;
1830 quirk
->data
.address_val
= data
& 0xff;
1832 quirk
->data
.flags
&= ~NV_BAR5_ADDRESS
;
1838 vfio_generic_window_quirk_write(opaque
, addr
, data
, size
);
1841 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk
= {
1842 .read
= vfio_generic_window_quirk_read
,
1843 .write
= vfio_nvidia_bar5_window_quirk_write
,
1844 .valid
.min_access_size
= 4,
1845 .endianness
= DEVICE_LITTLE_ENDIAN
,
1848 static void vfio_probe_nvidia_bar5_window_quirk(VFIOPCIDevice
*vdev
, int nr
)
1850 PCIDevice
*pdev
= &vdev
->pdev
;
1853 if (!vdev
->has_vga
|| nr
!= 5 ||
1854 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1858 quirk
= g_malloc0(sizeof(*quirk
));
1860 quirk
->data
.read_flags
= quirk
->data
.write_flags
= NV_BAR5_VALID
;
1861 quirk
->data
.address_offset
= 0x8;
1862 quirk
->data
.address_size
= 0; /* actually 4, but avoids generic code */
1863 quirk
->data
.data_offset
= 0xc;
1864 quirk
->data
.data_size
= 4;
1865 quirk
->data
.bar
= nr
;
1867 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
),
1868 &vfio_nvidia_bar5_window_quirk
, quirk
,
1869 "vfio-nvidia-bar5-window-quirk", 16);
1870 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].region
.mem
,
1873 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1875 trace_vfio_probe_nvidia_bar5_window_quirk(vdev
->vbasedev
.name
);
1878 static void vfio_nvidia_88000_quirk_write(void *opaque
, hwaddr addr
,
1879 uint64_t data
, unsigned size
)
1881 VFIOQuirk
*quirk
= opaque
;
1882 VFIOPCIDevice
*vdev
= quirk
->vdev
;
1883 PCIDevice
*pdev
= &vdev
->pdev
;
1884 hwaddr base
= quirk
->data
.address_match
& TARGET_PAGE_MASK
;
1886 vfio_generic_quirk_write(opaque
, addr
, data
, size
);
1889 * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
1890 * MSI capability ID register. Both the ID and next register are
1891 * read-only, so we allow writes covering either of those to real hw.
1892 * NB - only fixed for the 0x88000 MMIO window.
1894 if ((pdev
->cap_present
& QEMU_PCI_CAP_MSI
) &&
1895 vfio_range_contained(addr
, size
, pdev
->msi_cap
, PCI_MSI_FLAGS
)) {
1896 vfio_region_write(&vdev
->bars
[quirk
->data
.bar
].region
,
1897 addr
+ base
, data
, size
);
1901 static const MemoryRegionOps vfio_nvidia_88000_quirk
= {
1902 .read
= vfio_generic_quirk_read
,
1903 .write
= vfio_nvidia_88000_quirk_write
,
1904 .endianness
= DEVICE_LITTLE_ENDIAN
,
1908 * Finally, BAR0 itself. We want to redirect any accesses to either
1909 * 0x1800 or 0x88000 through the PCI config space access functions.
1911 * NB - quirk at a page granularity or else they don't seem to work when
1914 * Here's offset 0x88000...
1916 static void vfio_probe_nvidia_bar0_88000_quirk(VFIOPCIDevice
*vdev
, int nr
)
1918 PCIDevice
*pdev
= &vdev
->pdev
;
1920 uint16_t vendor
, class;
1922 vendor
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
1923 class = pci_get_word(pdev
->config
+ PCI_CLASS_DEVICE
);
1925 if (nr
!= 0 || vendor
!= PCI_VENDOR_ID_NVIDIA
||
1926 class != PCI_CLASS_DISPLAY_VGA
) {
1930 quirk
= g_malloc0(sizeof(*quirk
));
1932 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1933 quirk
->data
.address_match
= 0x88000;
1934 quirk
->data
.address_mask
= PCIE_CONFIG_SPACE_SIZE
- 1;
1935 quirk
->data
.bar
= nr
;
1937 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_nvidia_88000_quirk
,
1938 quirk
, "vfio-nvidia-bar0-88000-quirk",
1939 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1940 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].region
.mem
,
1941 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1944 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1946 trace_vfio_probe_nvidia_bar0_88000_quirk(vdev
->vbasedev
.name
);
1950 * And here's the same for BAR0 offset 0x1800...
1952 static void vfio_probe_nvidia_bar0_1800_quirk(VFIOPCIDevice
*vdev
, int nr
)
1954 PCIDevice
*pdev
= &vdev
->pdev
;
1957 if (!vdev
->has_vga
|| nr
!= 0 ||
1958 pci_get_word(pdev
->config
+ PCI_VENDOR_ID
) != PCI_VENDOR_ID_NVIDIA
) {
1962 /* Log the chipset ID */
1963 trace_vfio_probe_nvidia_bar0_1800_quirk_id(
1964 (unsigned int)(vfio_region_read(&vdev
->bars
[0].region
, 0, 4) >> 20)
1967 quirk
= g_malloc0(sizeof(*quirk
));
1969 quirk
->data
.flags
= quirk
->data
.read_flags
= quirk
->data
.write_flags
= 1;
1970 quirk
->data
.address_match
= 0x1800;
1971 quirk
->data
.address_mask
= PCI_CONFIG_SPACE_SIZE
- 1;
1972 quirk
->data
.bar
= nr
;
1974 memory_region_init_io(&quirk
->mem
, OBJECT(vdev
), &vfio_generic_quirk
, quirk
,
1975 "vfio-nvidia-bar0-1800-quirk",
1976 TARGET_PAGE_ALIGN(quirk
->data
.address_mask
+ 1));
1977 memory_region_add_subregion_overlap(&vdev
->bars
[nr
].region
.mem
,
1978 quirk
->data
.address_match
& TARGET_PAGE_MASK
,
1981 QLIST_INSERT_HEAD(&vdev
->bars
[nr
].quirks
, quirk
, next
);
1983 trace_vfio_probe_nvidia_bar0_1800_quirk(vdev
->vbasedev
.name
);
1987 * TODO - Some Nvidia devices provide config access to their companion HDA
1988 * device and even to their parent bridge via these config space mirrors.
1989 * Add quirks for those regions.
1993 * Common quirk probe entry points.
1995 static void vfio_vga_quirk_setup(VFIOPCIDevice
*vdev
)
1997 vfio_vga_probe_ati_3c3_quirk(vdev
);
1998 vfio_vga_probe_nvidia_3d0_quirk(vdev
);
2001 static void vfio_vga_quirk_teardown(VFIOPCIDevice
*vdev
)
2006 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
.region
); i
++) {
2007 QLIST_FOREACH(quirk
, &vdev
->vga
.region
[i
].quirks
, next
) {
2008 memory_region_del_subregion(&vdev
->vga
.region
[i
].mem
, &quirk
->mem
);
2013 static void vfio_vga_quirk_free(VFIOPCIDevice
*vdev
)
2017 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
.region
); i
++) {
2018 while (!QLIST_EMPTY(&vdev
->vga
.region
[i
].quirks
)) {
2019 VFIOQuirk
*quirk
= QLIST_FIRST(&vdev
->vga
.region
[i
].quirks
);
2020 object_unparent(OBJECT(&quirk
->mem
));
2021 QLIST_REMOVE(quirk
, next
);
2027 static void vfio_bar_quirk_setup(VFIOPCIDevice
*vdev
, int nr
)
2029 vfio_probe_ati_bar4_window_quirk(vdev
, nr
);
2030 vfio_probe_ati_bar2_4000_quirk(vdev
, nr
);
2031 vfio_probe_nvidia_bar5_window_quirk(vdev
, nr
);
2032 vfio_probe_nvidia_bar0_88000_quirk(vdev
, nr
);
2033 vfio_probe_nvidia_bar0_1800_quirk(vdev
, nr
);
2034 vfio_probe_rtl8168_bar2_window_quirk(vdev
, nr
);
2037 static void vfio_bar_quirk_teardown(VFIOPCIDevice
*vdev
, int nr
)
2039 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2042 QLIST_FOREACH(quirk
, &bar
->quirks
, next
) {
2043 memory_region_del_subregion(&bar
->region
.mem
, &quirk
->mem
);
2047 static void vfio_bar_quirk_free(VFIOPCIDevice
*vdev
, int nr
)
2049 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2051 while (!QLIST_EMPTY(&bar
->quirks
)) {
2052 VFIOQuirk
*quirk
= QLIST_FIRST(&bar
->quirks
);
2053 object_unparent(OBJECT(&quirk
->mem
));
2054 QLIST_REMOVE(quirk
, next
);
2062 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
2064 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
2065 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
2067 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
2068 emu_bits
= le32_to_cpu(emu_bits
);
2071 emu_val
= pci_default_read_config(pdev
, addr
, len
);
2074 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
2077 ret
= pread(vdev
->vbasedev
.fd
, &phys_val
, len
,
2078 vdev
->config_offset
+ addr
);
2080 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
2081 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2082 vdev
->host
.slot
, vdev
->host
.function
, addr
, len
);
2085 phys_val
= le32_to_cpu(phys_val
);
2088 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
2090 trace_vfio_pci_read_config(vdev
->vbasedev
.name
, addr
, len
, val
);
2095 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
2096 uint32_t val
, int len
)
2098 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
2099 uint32_t val_le
= cpu_to_le32(val
);
2101 trace_vfio_pci_write_config(vdev
->vbasedev
.name
, addr
, val
, len
);
2103 /* Write everything to VFIO, let it filter out what we can't write */
2104 if (pwrite(vdev
->vbasedev
.fd
, &val_le
, len
, vdev
->config_offset
+ addr
)
2106 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
2107 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
2108 vdev
->host
.slot
, vdev
->host
.function
, addr
, val
, len
);
2111 /* MSI/MSI-X Enabling/Disabling */
2112 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
2113 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
2114 int is_enabled
, was_enabled
= msi_enabled(pdev
);
2116 pci_default_write_config(pdev
, addr
, val
, len
);
2118 is_enabled
= msi_enabled(pdev
);
2122 vfio_enable_msi(vdev
);
2126 vfio_disable_msi(vdev
);
2128 vfio_update_msi(vdev
);
2131 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
2132 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
2133 int is_enabled
, was_enabled
= msix_enabled(pdev
);
2135 pci_default_write_config(pdev
, addr
, val
, len
);
2137 is_enabled
= msix_enabled(pdev
);
2139 if (!was_enabled
&& is_enabled
) {
2140 vfio_enable_msix(vdev
);
2141 } else if (was_enabled
&& !is_enabled
) {
2142 vfio_disable_msix(vdev
);
2145 /* Write everything to QEMU to keep emulated bits correct */
2146 pci_default_write_config(pdev
, addr
, val
, len
);
2153 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
)
2156 * More complicated than it looks. Disabling MSI/X transitions the
2157 * device to INTx mode (if supported). Therefore we need to first
2158 * disable MSI/X and then cleanup by disabling INTx.
2160 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
2161 vfio_disable_msix(vdev
);
2162 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
2163 vfio_disable_msi(vdev
);
2166 if (vdev
->interrupt
== VFIO_INT_INTx
) {
2167 vfio_disable_intx(vdev
);
2171 static int vfio_setup_msi(VFIOPCIDevice
*vdev
, int pos
)
2174 bool msi_64bit
, msi_maskbit
;
2177 if (pread(vdev
->vbasedev
.fd
, &ctrl
, sizeof(ctrl
),
2178 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2181 ctrl
= le16_to_cpu(ctrl
);
2183 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
2184 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
2185 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
2187 trace_vfio_setup_msi(vdev
->vbasedev
.name
, pos
);
2189 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
);
2191 if (ret
== -ENOTSUP
) {
2194 error_report("vfio: msi_init failed");
2197 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
2203 * We don't have any control over how pci_add_capability() inserts
2204 * capabilities into the chain. In order to setup MSI-X we need a
2205 * MemoryRegion for the BAR. In order to setup the BAR and not
2206 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2207 * need to first look for where the MSI-X table lives. So we
2208 * unfortunately split MSI-X setup across two functions.
2210 static int vfio_early_setup_msix(VFIOPCIDevice
*vdev
)
2214 uint32_t table
, pba
;
2215 int fd
= vdev
->vbasedev
.fd
;
2217 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
2222 if (pread(fd
, &ctrl
, sizeof(ctrl
),
2223 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
2227 if (pread(fd
, &table
, sizeof(table
),
2228 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
2232 if (pread(fd
, &pba
, sizeof(pba
),
2233 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
2237 ctrl
= le16_to_cpu(ctrl
);
2238 table
= le32_to_cpu(table
);
2239 pba
= le32_to_cpu(pba
);
2241 vdev
->msix
= g_malloc0(sizeof(*(vdev
->msix
)));
2242 vdev
->msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
2243 vdev
->msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
2244 vdev
->msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
2245 vdev
->msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
2246 vdev
->msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
2248 trace_vfio_early_setup_msix(vdev
->vbasedev
.name
, pos
,
2249 vdev
->msix
->table_bar
,
2250 vdev
->msix
->table_offset
,
2251 vdev
->msix
->entries
);
2256 static int vfio_setup_msix(VFIOPCIDevice
*vdev
, int pos
)
2260 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
2261 &vdev
->bars
[vdev
->msix
->table_bar
].region
.mem
,
2262 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
2263 &vdev
->bars
[vdev
->msix
->pba_bar
].region
.mem
,
2264 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
);
2266 if (ret
== -ENOTSUP
) {
2269 error_report("vfio: msix_init failed");
2276 static void vfio_teardown_msi(VFIOPCIDevice
*vdev
)
2278 msi_uninit(&vdev
->pdev
);
2281 msix_uninit(&vdev
->pdev
,
2282 &vdev
->bars
[vdev
->msix
->table_bar
].region
.mem
,
2283 &vdev
->bars
[vdev
->msix
->pba_bar
].region
.mem
);
2290 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
)
2294 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2295 VFIOBAR
*bar
= &vdev
->bars
[i
];
2297 if (!bar
->region
.size
) {
2301 memory_region_set_enabled(&bar
->region
.mmap_mem
, enabled
);
2302 if (vdev
->msix
&& vdev
->msix
->table_bar
== i
) {
2303 memory_region_set_enabled(&vdev
->msix
->mmap_mem
, enabled
);
2308 static void vfio_unregister_bar(VFIOPCIDevice
*vdev
, int nr
)
2310 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2312 if (!bar
->region
.size
) {
2316 vfio_bar_quirk_teardown(vdev
, nr
);
2318 memory_region_del_subregion(&bar
->region
.mem
, &bar
->region
.mmap_mem
);
2320 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2321 memory_region_del_subregion(&bar
->region
.mem
, &vdev
->msix
->mmap_mem
);
2325 static void vfio_unmap_bar(VFIOPCIDevice
*vdev
, int nr
)
2327 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2329 if (!bar
->region
.size
) {
2333 vfio_bar_quirk_free(vdev
, nr
);
2335 munmap(bar
->region
.mmap
, memory_region_size(&bar
->region
.mmap_mem
));
2337 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2338 munmap(vdev
->msix
->mmap
, memory_region_size(&vdev
->msix
->mmap_mem
));
2342 static void vfio_map_bar(VFIOPCIDevice
*vdev
, int nr
)
2344 VFIOBAR
*bar
= &vdev
->bars
[nr
];
2345 uint64_t size
= bar
->region
.size
;
2351 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2356 snprintf(name
, sizeof(name
), "VFIO %04x:%02x:%02x.%x BAR %d",
2357 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
2358 vdev
->host
.function
, nr
);
2360 /* Determine what type of BAR this is for registration */
2361 ret
= pread(vdev
->vbasedev
.fd
, &pci_bar
, sizeof(pci_bar
),
2362 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
2363 if (ret
!= sizeof(pci_bar
)) {
2364 error_report("vfio: Failed to read BAR %d (%m)", nr
);
2368 pci_bar
= le32_to_cpu(pci_bar
);
2369 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
2370 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
2371 type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
2372 ~PCI_BASE_ADDRESS_MEM_MASK
);
2374 /* A "slow" read/write mapping underlies all BARs */
2375 memory_region_init_io(&bar
->region
.mem
, OBJECT(vdev
), &vfio_region_ops
,
2377 pci_register_bar(&vdev
->pdev
, nr
, type
, &bar
->region
.mem
);
2380 * We can't mmap areas overlapping the MSIX vector table, so we
2381 * potentially insert a direct-mapped subregion before and after it.
2383 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2384 size
= vdev
->msix
->table_offset
& qemu_host_page_mask
;
2387 strncat(name
, " mmap", sizeof(name
) - strlen(name
) - 1);
2388 if (vfio_mmap_region(OBJECT(vdev
), &bar
->region
, &bar
->region
.mem
,
2389 &bar
->region
.mmap_mem
, &bar
->region
.mmap
,
2391 error_report("%s unsupported. Performance may be slow", name
);
2394 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
2397 start
= HOST_PAGE_ALIGN(vdev
->msix
->table_offset
+
2398 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
2400 size
= start
< bar
->region
.size
? bar
->region
.size
- start
: 0;
2401 strncat(name
, " msix-hi", sizeof(name
) - strlen(name
) - 1);
2402 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2403 if (vfio_mmap_region(OBJECT(vdev
), &bar
->region
, &bar
->region
.mem
,
2404 &vdev
->msix
->mmap_mem
,
2405 &vdev
->msix
->mmap
, size
, start
, name
)) {
2406 error_report("%s unsupported. Performance may be slow", name
);
2410 vfio_bar_quirk_setup(vdev
, nr
);
2413 static void vfio_map_bars(VFIOPCIDevice
*vdev
)
2417 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2418 vfio_map_bar(vdev
, i
);
2421 if (vdev
->has_vga
) {
2422 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2423 OBJECT(vdev
), &vfio_vga_ops
,
2424 &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
],
2425 "vfio-vga-mmio@0xa0000",
2426 QEMU_PCI_VGA_MEM_SIZE
);
2427 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2428 OBJECT(vdev
), &vfio_vga_ops
,
2429 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
],
2430 "vfio-vga-io@0x3b0",
2431 QEMU_PCI_VGA_IO_LO_SIZE
);
2432 memory_region_init_io(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
,
2433 OBJECT(vdev
), &vfio_vga_ops
,
2434 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
],
2435 "vfio-vga-io@0x3c0",
2436 QEMU_PCI_VGA_IO_HI_SIZE
);
2438 pci_register_vga(&vdev
->pdev
, &vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].mem
,
2439 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].mem
,
2440 &vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].mem
);
2441 vfio_vga_quirk_setup(vdev
);
2445 static void vfio_unregister_bars(VFIOPCIDevice
*vdev
)
2449 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2450 vfio_unregister_bar(vdev
, i
);
2453 if (vdev
->has_vga
) {
2454 vfio_vga_quirk_teardown(vdev
);
2455 pci_unregister_vga(&vdev
->pdev
);
2459 static void vfio_unmap_bars(VFIOPCIDevice
*vdev
)
2463 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2464 vfio_unmap_bar(vdev
, i
);
2467 if (vdev
->has_vga
) {
2468 vfio_vga_quirk_free(vdev
);
2475 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
2477 uint8_t tmp
, next
= 0xff;
2479 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
2480 tmp
= pdev
->config
[tmp
+ 1]) {
2481 if (tmp
> pos
&& tmp
< next
) {
2489 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
2491 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
2494 static void vfio_add_emulated_word(VFIOPCIDevice
*vdev
, int pos
,
2495 uint16_t val
, uint16_t mask
)
2497 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2498 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2499 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2502 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
2504 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
2507 static void vfio_add_emulated_long(VFIOPCIDevice
*vdev
, int pos
,
2508 uint32_t val
, uint32_t mask
)
2510 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
2511 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
2512 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
2515 static int vfio_setup_pcie_cap(VFIOPCIDevice
*vdev
, int pos
, uint8_t size
)
2520 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
2521 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
2523 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
2524 type
!= PCI_EXP_TYPE_LEG_END
&&
2525 type
!= PCI_EXP_TYPE_RC_END
) {
2527 error_report("vfio: Assignment of PCIe type 0x%x "
2528 "devices is not currently supported", type
);
2532 if (!pci_bus_is_express(vdev
->pdev
.bus
)) {
2534 * Use express capability as-is on PCI bus. It doesn't make much
2535 * sense to even expose, but some drivers (ex. tg3) depend on it
2536 * and guests don't seem to be particular about it. We'll need
2537 * to revist this or force express devices to express buses if we
2538 * ever expose an IOMMU to the guest.
2540 } else if (pci_bus_is_root(vdev
->pdev
.bus
)) {
2542 * On a Root Complex bus Endpoints become Root Complex Integrated
2543 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2545 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
2546 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2547 PCI_EXP_TYPE_RC_END
<< 4,
2548 PCI_EXP_FLAGS_TYPE
);
2550 /* Link Capabilities, Status, and Control goes away */
2551 if (size
> PCI_EXP_LNKCTL
) {
2552 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
2553 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2554 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
2556 #ifndef PCI_EXP_LNKCAP2
2557 #define PCI_EXP_LNKCAP2 44
2559 #ifndef PCI_EXP_LNKSTA2
2560 #define PCI_EXP_LNKSTA2 50
2562 /* Link 2 Capabilities, Status, and Control goes away */
2563 if (size
> PCI_EXP_LNKCAP2
) {
2564 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
2565 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
2566 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
2570 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
2572 * Legacy endpoints don't belong on the root complex. Windows
2573 * seems to be happier with devices if we skip the capability.
2580 * Convert Root Complex Integrated Endpoints to regular endpoints.
2581 * These devices don't support LNK/LNK2 capabilities, so make them up.
2583 if (type
== PCI_EXP_TYPE_RC_END
) {
2584 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2585 PCI_EXP_TYPE_ENDPOINT
<< 4,
2586 PCI_EXP_FLAGS_TYPE
);
2587 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
2588 PCI_EXP_LNK_MLW_1
| PCI_EXP_LNK_LS_25
, ~0);
2589 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2592 /* Mark the Link Status bits as emulated to allow virtual negotiation */
2593 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
,
2594 pci_get_word(vdev
->pdev
.config
+ pos
+
2596 PCI_EXP_LNKCAP_MLW
| PCI_EXP_LNKCAP_SLS
);
2599 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
);
2601 vdev
->pdev
.exp
.exp_cap
= pos
;
2607 static void vfio_check_pcie_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
2609 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
2611 if (cap
& PCI_EXP_DEVCAP_FLR
) {
2612 trace_vfio_check_pcie_flr(vdev
->vbasedev
.name
);
2613 vdev
->has_flr
= true;
2617 static void vfio_check_pm_reset(VFIOPCIDevice
*vdev
, uint8_t pos
)
2619 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
2621 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
2622 trace_vfio_check_pm_reset(vdev
->vbasedev
.name
);
2623 vdev
->has_pm_reset
= true;
2627 static void vfio_check_af_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
2629 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
2631 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
2632 trace_vfio_check_af_flr(vdev
->vbasedev
.name
);
2633 vdev
->has_flr
= true;
2637 static int vfio_add_std_cap(VFIOPCIDevice
*vdev
, uint8_t pos
)
2639 PCIDevice
*pdev
= &vdev
->pdev
;
2640 uint8_t cap_id
, next
, size
;
2643 cap_id
= pdev
->config
[pos
];
2644 next
= pdev
->config
[pos
+ 1];
2647 * If it becomes important to configure capabilities to their actual
2648 * size, use this as the default when it's something we don't recognize.
2649 * Since QEMU doesn't actually handle many of the config accesses,
2650 * exact size doesn't seem worthwhile.
2652 size
= vfio_std_cap_max_size(pdev
, pos
);
2655 * pci_add_capability always inserts the new capability at the head
2656 * of the chain. Therefore to end up with a chain that matches the
2657 * physical device, we insert from the end by making this recursive.
2658 * This is also why we pre-caclulate size above as cached config space
2659 * will be changed as we unwind the stack.
2662 ret
= vfio_add_std_cap(vdev
, next
);
2667 /* Begin the rebuild, use QEMU emulated list bits */
2668 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
2669 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
2670 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
2673 /* Use emulated next pointer to allow dropping caps */
2674 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ 1, 0xff);
2677 case PCI_CAP_ID_MSI
:
2678 ret
= vfio_setup_msi(vdev
, pos
);
2680 case PCI_CAP_ID_EXP
:
2681 vfio_check_pcie_flr(vdev
, pos
);
2682 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
);
2684 case PCI_CAP_ID_MSIX
:
2685 ret
= vfio_setup_msix(vdev
, pos
);
2688 vfio_check_pm_reset(vdev
, pos
);
2690 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2693 vfio_check_af_flr(vdev
, pos
);
2694 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2697 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
2702 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
2703 "0x%x[0x%x]@0x%x: %d", vdev
->host
.domain
,
2704 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
2705 cap_id
, size
, pos
, ret
);
2712 static int vfio_add_capabilities(VFIOPCIDevice
*vdev
)
2714 PCIDevice
*pdev
= &vdev
->pdev
;
2716 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2717 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2718 return 0; /* Nothing to add */
2721 return vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
]);
2724 static void vfio_pci_pre_reset(VFIOPCIDevice
*vdev
)
2726 PCIDevice
*pdev
= &vdev
->pdev
;
2729 vfio_disable_interrupts(vdev
);
2731 /* Make sure the device is in D0 */
2736 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2737 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2739 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2740 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2741 /* vfio handles the necessary delay here */
2742 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2743 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2745 error_report("vfio: Unable to power on device, stuck in D%d",
2752 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2753 * Also put INTx Disable in known state.
2755 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2756 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2757 PCI_COMMAND_INTX_DISABLE
);
2758 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2761 static void vfio_pci_post_reset(VFIOPCIDevice
*vdev
)
2763 vfio_enable_intx(vdev
);
2766 static bool vfio_pci_host_match(PCIHostDeviceAddress
*host1
,
2767 PCIHostDeviceAddress
*host2
)
2769 return (host1
->domain
== host2
->domain
&& host1
->bus
== host2
->bus
&&
2770 host1
->slot
== host2
->slot
&& host1
->function
== host2
->function
);
2773 static int vfio_pci_hot_reset(VFIOPCIDevice
*vdev
, bool single
)
2776 struct vfio_pci_hot_reset_info
*info
;
2777 struct vfio_pci_dependent_device
*devices
;
2778 struct vfio_pci_hot_reset
*reset
;
2783 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
2785 vfio_pci_pre_reset(vdev
);
2786 vdev
->vbasedev
.needs_reset
= false;
2788 info
= g_malloc0(sizeof(*info
));
2789 info
->argsz
= sizeof(*info
);
2791 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2792 if (ret
&& errno
!= ENOSPC
) {
2794 if (!vdev
->has_pm_reset
) {
2795 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
2796 "no available reset mechanism.", vdev
->host
.domain
,
2797 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
2802 count
= info
->count
;
2803 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
2804 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
2805 devices
= &info
->devices
[0];
2807 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2810 error_report("vfio: hot reset info failed: %m");
2814 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
2816 /* Verify that we have all the groups required */
2817 for (i
= 0; i
< info
->count
; i
++) {
2818 PCIHostDeviceAddress host
;
2820 VFIODevice
*vbasedev_iter
;
2822 host
.domain
= devices
[i
].segment
;
2823 host
.bus
= devices
[i
].bus
;
2824 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2825 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2827 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
2828 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
2830 if (vfio_pci_host_match(&host
, &vdev
->host
)) {
2834 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2835 if (group
->groupid
== devices
[i
].group_id
) {
2841 if (!vdev
->has_pm_reset
) {
2842 error_report("vfio: Cannot reset device %s, "
2843 "depends on group %d which is not owned.",
2844 vdev
->vbasedev
.name
, devices
[i
].group_id
);
2850 /* Prep dependent devices for reset and clear our marker. */
2851 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2852 if (vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2855 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2856 if (vfio_pci_host_match(&host
, &tmp
->host
)) {
2861 vfio_pci_pre_reset(tmp
);
2862 tmp
->vbasedev
.needs_reset
= false;
2869 if (!single
&& !multi
) {
2874 /* Determine how many group fds need to be passed */
2876 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2877 for (i
= 0; i
< info
->count
; i
++) {
2878 if (group
->groupid
== devices
[i
].group_id
) {
2885 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
2886 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
2887 fds
= &reset
->group_fds
[0];
2889 /* Fill in group fds */
2890 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2891 for (i
= 0; i
< info
->count
; i
++) {
2892 if (group
->groupid
== devices
[i
].group_id
) {
2893 fds
[reset
->count
++] = group
->fd
;
2900 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
2903 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
2904 ret
? "%m" : "Success");
2907 /* Re-enable INTx on affected devices */
2908 for (i
= 0; i
< info
->count
; i
++) {
2909 PCIHostDeviceAddress host
;
2911 VFIODevice
*vbasedev_iter
;
2913 host
.domain
= devices
[i
].segment
;
2914 host
.bus
= devices
[i
].bus
;
2915 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2916 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2918 if (vfio_pci_host_match(&host
, &vdev
->host
)) {
2922 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2923 if (group
->groupid
== devices
[i
].group_id
) {
2932 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2933 if (vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2936 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2937 if (vfio_pci_host_match(&host
, &tmp
->host
)) {
2938 vfio_pci_post_reset(tmp
);
2944 vfio_pci_post_reset(vdev
);
2951 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2952 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2953 * of doing hot resets when there is only a single device per bus. The in-use
2954 * here refers to how many VFIODevices are affected. A hot reset that affects
2955 * multiple devices, but only a single in-use device, means that we can call
2956 * it from our bus ->reset() callback since the extent is effectively a single
2957 * device. This allows us to make use of it in the hotplug path. When there
2958 * are multiple in-use devices, we can only trigger the hot reset during a
2959 * system reset and thus from our reset handler. We separate _one vs _multi
2960 * here so that we don't overlap and do a double reset on the system reset
2961 * path where both our reset handler and ->reset() callback are used. Calling
2962 * _one() will only do a hot reset for the one in-use devices case, calling
2963 * _multi() will do nothing if a _one() would have been sufficient.
2965 static int vfio_pci_hot_reset_one(VFIOPCIDevice
*vdev
)
2967 return vfio_pci_hot_reset(vdev
, true);
2970 static int vfio_pci_hot_reset_multi(VFIODevice
*vbasedev
)
2972 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2973 return vfio_pci_hot_reset(vdev
, false);
2976 static void vfio_pci_compute_needs_reset(VFIODevice
*vbasedev
)
2978 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2979 if (!vbasedev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
2980 vbasedev
->needs_reset
= true;
2984 static VFIODeviceOps vfio_pci_ops
= {
2985 .vfio_compute_needs_reset
= vfio_pci_compute_needs_reset
,
2986 .vfio_hot_reset_multi
= vfio_pci_hot_reset_multi
,
2987 .vfio_eoi
= vfio_eoi
,
2990 static int vfio_populate_device(VFIOPCIDevice
*vdev
)
2992 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2993 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
) };
2994 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2997 /* Sanity check device */
2998 if (!(vbasedev
->flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2999 error_report("vfio: Um, this isn't a PCI device");
3003 if (vbasedev
->num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
3004 error_report("vfio: unexpected number of io regions %u",
3005 vbasedev
->num_regions
);
3009 if (vbasedev
->num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
3010 error_report("vfio: unexpected number of irqs %u", vbasedev
->num_irqs
);
3014 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
3017 ret
= ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
3019 error_report("vfio: Error getting region %d info: %m", i
);
3023 trace_vfio_populate_device_region(vbasedev
->name
, i
,
3024 (unsigned long)reg_info
.size
,
3025 (unsigned long)reg_info
.offset
,
3026 (unsigned long)reg_info
.flags
);
3028 vdev
->bars
[i
].region
.vbasedev
= vbasedev
;
3029 vdev
->bars
[i
].region
.flags
= reg_info
.flags
;
3030 vdev
->bars
[i
].region
.size
= reg_info
.size
;
3031 vdev
->bars
[i
].region
.fd_offset
= reg_info
.offset
;
3032 vdev
->bars
[i
].region
.nr
= i
;
3033 QLIST_INIT(&vdev
->bars
[i
].quirks
);
3036 reg_info
.index
= VFIO_PCI_CONFIG_REGION_INDEX
;
3038 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
3040 error_report("vfio: Error getting config info: %m");
3044 trace_vfio_populate_device_config(vdev
->vbasedev
.name
,
3045 (unsigned long)reg_info
.size
,
3046 (unsigned long)reg_info
.offset
,
3047 (unsigned long)reg_info
.flags
);
3049 vdev
->config_size
= reg_info
.size
;
3050 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
3051 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
3053 vdev
->config_offset
= reg_info
.offset
;
3055 if ((vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) &&
3056 vbasedev
->num_regions
> VFIO_PCI_VGA_REGION_INDEX
) {
3057 struct vfio_region_info vga_info
= {
3058 .argsz
= sizeof(vga_info
),
3059 .index
= VFIO_PCI_VGA_REGION_INDEX
,
3062 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_REGION_INFO
, &vga_info
);
3065 "vfio: Device does not support requested feature x-vga");
3069 if (!(vga_info
.flags
& VFIO_REGION_INFO_FLAG_READ
) ||
3070 !(vga_info
.flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
3071 vga_info
.size
< 0xbffff + 1) {
3072 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
3073 (unsigned long)vga_info
.flags
,
3074 (unsigned long)vga_info
.size
);
3078 vdev
->vga
.fd_offset
= vga_info
.offset
;
3079 vdev
->vga
.fd
= vdev
->vbasedev
.fd
;
3081 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
3082 vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
3083 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_MEM
].quirks
);
3085 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
3086 vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
3087 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_LO
].quirks
);
3089 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
3090 vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
3091 QLIST_INIT(&vdev
->vga
.region
[QEMU_PCI_VGA_IO_HI
].quirks
);
3093 vdev
->has_vga
= true;
3096 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
3098 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
3100 /* This can fail for an old kernel or legacy PCI dev */
3101 trace_vfio_populate_device_get_irq_info_failure();
3103 } else if (irq_info
.count
== 1) {
3104 vdev
->pci_aer
= true;
3106 error_report("vfio: %s "
3107 "Could not enable error recovery for the device",
3115 static void vfio_put_device(VFIOPCIDevice
*vdev
)
3117 g_free(vdev
->vbasedev
.name
);
3119 object_unparent(OBJECT(&vdev
->msix
->mmap_mem
));
3123 vfio_put_base_device(&vdev
->vbasedev
);
3126 static void vfio_err_notifier_handler(void *opaque
)
3128 VFIOPCIDevice
*vdev
= opaque
;
3130 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
3135 * TBD. Retrieve the error details and decide what action
3136 * needs to be taken. One of the actions could be to pass
3137 * the error to the guest and have the guest driver recover
3138 * from the error. This requires that PCIe capabilities be
3139 * exposed to the guest. For now, we just terminate the
3140 * guest to contain the error.
3143 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
3144 "Please collect any data possible and then kill the guest",
3145 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
3146 vdev
->host
.slot
, vdev
->host
.function
);
3148 vm_stop(RUN_STATE_INTERNAL_ERROR
);
3152 * Registers error notifier for devices supporting error recovery.
3153 * If we encounter a failure in this function, we report an error
3154 * and continue after disabling error recovery support for the
3157 static void vfio_register_err_notifier(VFIOPCIDevice
*vdev
)
3161 struct vfio_irq_set
*irq_set
;
3164 if (!vdev
->pci_aer
) {
3168 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
3169 error_report("vfio: Unable to init event notifier for error detection");
3170 vdev
->pci_aer
= false;
3174 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3176 irq_set
= g_malloc0(argsz
);
3177 irq_set
->argsz
= argsz
;
3178 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3179 VFIO_IRQ_SET_ACTION_TRIGGER
;
3180 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
3183 pfd
= (int32_t *)&irq_set
->data
;
3185 *pfd
= event_notifier_get_fd(&vdev
->err_notifier
);
3186 qemu_set_fd_handler(*pfd
, vfio_err_notifier_handler
, NULL
, vdev
);
3188 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
3190 error_report("vfio: Failed to set up error notification");
3191 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
3192 event_notifier_cleanup(&vdev
->err_notifier
);
3193 vdev
->pci_aer
= false;
3198 static void vfio_unregister_err_notifier(VFIOPCIDevice
*vdev
)
3201 struct vfio_irq_set
*irq_set
;
3205 if (!vdev
->pci_aer
) {
3209 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3211 irq_set
= g_malloc0(argsz
);
3212 irq_set
->argsz
= argsz
;
3213 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3214 VFIO_IRQ_SET_ACTION_TRIGGER
;
3215 irq_set
->index
= VFIO_PCI_ERR_IRQ_INDEX
;
3218 pfd
= (int32_t *)&irq_set
->data
;
3221 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
3223 error_report("vfio: Failed to de-assign error fd: %m");
3226 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
3228 event_notifier_cleanup(&vdev
->err_notifier
);
3231 static void vfio_req_notifier_handler(void *opaque
)
3233 VFIOPCIDevice
*vdev
= opaque
;
3235 if (!event_notifier_test_and_clear(&vdev
->req_notifier
)) {
3239 qdev_unplug(&vdev
->pdev
.qdev
, NULL
);
3242 static void vfio_register_req_notifier(VFIOPCIDevice
*vdev
)
3244 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
3245 .index
= VFIO_PCI_REQ_IRQ_INDEX
};
3247 struct vfio_irq_set
*irq_set
;
3250 if (!(vdev
->features
& VFIO_FEATURE_ENABLE_REQ
)) {
3254 if (ioctl(vdev
->vbasedev
.fd
,
3255 VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
) < 0 || irq_info
.count
< 1) {
3259 if (event_notifier_init(&vdev
->req_notifier
, 0)) {
3260 error_report("vfio: Unable to init event notifier for device request");
3264 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3266 irq_set
= g_malloc0(argsz
);
3267 irq_set
->argsz
= argsz
;
3268 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3269 VFIO_IRQ_SET_ACTION_TRIGGER
;
3270 irq_set
->index
= VFIO_PCI_REQ_IRQ_INDEX
;
3273 pfd
= (int32_t *)&irq_set
->data
;
3275 *pfd
= event_notifier_get_fd(&vdev
->req_notifier
);
3276 qemu_set_fd_handler(*pfd
, vfio_req_notifier_handler
, NULL
, vdev
);
3278 if (ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
)) {
3279 error_report("vfio: Failed to set up device request notification");
3280 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
3281 event_notifier_cleanup(&vdev
->req_notifier
);
3283 vdev
->req_enabled
= true;
3289 static void vfio_unregister_req_notifier(VFIOPCIDevice
*vdev
)
3292 struct vfio_irq_set
*irq_set
;
3295 if (!vdev
->req_enabled
) {
3299 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
3301 irq_set
= g_malloc0(argsz
);
3302 irq_set
->argsz
= argsz
;
3303 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
3304 VFIO_IRQ_SET_ACTION_TRIGGER
;
3305 irq_set
->index
= VFIO_PCI_REQ_IRQ_INDEX
;
3308 pfd
= (int32_t *)&irq_set
->data
;
3311 if (ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
)) {
3312 error_report("vfio: Failed to de-assign device request fd: %m");
3315 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->req_notifier
),
3317 event_notifier_cleanup(&vdev
->req_notifier
);
3319 vdev
->req_enabled
= false;
3322 static int vfio_initfn(PCIDevice
*pdev
)
3324 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
3325 VFIODevice
*vbasedev_iter
;
3327 char path
[PATH_MAX
], iommu_group_path
[PATH_MAX
], *group_name
;
3333 /* Check that the host device exists */
3334 snprintf(path
, sizeof(path
),
3335 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
3336 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3337 vdev
->host
.function
);
3338 if (stat(path
, &st
) < 0) {
3339 error_report("vfio: error: no such host device: %s", path
);
3343 vdev
->vbasedev
.ops
= &vfio_pci_ops
;
3345 vdev
->vbasedev
.type
= VFIO_DEVICE_TYPE_PCI
;
3346 vdev
->vbasedev
.name
= g_strdup_printf("%04x:%02x:%02x.%01x",
3347 vdev
->host
.domain
, vdev
->host
.bus
,
3348 vdev
->host
.slot
, vdev
->host
.function
);
3350 strncat(path
, "iommu_group", sizeof(path
) - strlen(path
) - 1);
3352 len
= readlink(path
, iommu_group_path
, sizeof(path
));
3353 if (len
<= 0 || len
>= sizeof(path
)) {
3354 error_report("vfio: error no iommu_group for device");
3355 return len
< 0 ? -errno
: ENAMETOOLONG
;
3358 iommu_group_path
[len
] = 0;
3359 group_name
= basename(iommu_group_path
);
3361 if (sscanf(group_name
, "%d", &groupid
) != 1) {
3362 error_report("vfio: error reading %s: %m", path
);
3366 trace_vfio_initfn(vdev
->vbasedev
.name
, groupid
);
3368 group
= vfio_get_group(groupid
, pci_device_iommu_address_space(pdev
));
3370 error_report("vfio: failed to get group %d", groupid
);
3374 snprintf(path
, sizeof(path
), "%04x:%02x:%02x.%01x",
3375 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
3376 vdev
->host
.function
);
3378 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
3379 if (strcmp(vbasedev_iter
->name
, vdev
->vbasedev
.name
) == 0) {
3380 error_report("vfio: error: device %s is already attached", path
);
3381 vfio_put_group(group
);
3386 ret
= vfio_get_device(group
, path
, &vdev
->vbasedev
);
3388 error_report("vfio: failed to get device %s", path
);
3389 vfio_put_group(group
);
3393 ret
= vfio_populate_device(vdev
);
3398 /* Get a copy of config space */
3399 ret
= pread(vdev
->vbasedev
.fd
, vdev
->pdev
.config
,
3400 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
3401 vdev
->config_offset
);
3402 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
3403 ret
= ret
< 0 ? -errno
: -EFAULT
;
3404 error_report("vfio: Failed to read device config space");
3408 /* vfio emulates a lot for us, but some bits need extra love */
3409 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
3411 /* QEMU can choose to expose the ROM or not */
3412 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
3414 /* QEMU can change multi-function devices to single function, or reverse */
3415 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
3416 PCI_HEADER_TYPE_MULTI_FUNCTION
;
3418 /* Restore or clear multifunction, this is always controlled by QEMU */
3419 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
3420 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
3422 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
3426 * Clear host resource mapping info. If we choose not to register a
3427 * BAR, such as might be the case with the option ROM, we can get
3428 * confusing, unwritable, residual addresses from the host here.
3430 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
3431 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
3433 vfio_pci_size_rom(vdev
);
3435 ret
= vfio_early_setup_msix(vdev
);
3440 vfio_map_bars(vdev
);
3442 ret
= vfio_add_capabilities(vdev
);
3447 /* QEMU emulates all of MSI & MSIX */
3448 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3449 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3453 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3454 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3455 vdev
->msi_cap_size
);
3458 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3459 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3460 vfio_intx_mmap_enable
, vdev
);
3461 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_update_irq
);
3462 ret
= vfio_enable_intx(vdev
);
3468 vfio_register_err_notifier(vdev
);
3469 vfio_register_req_notifier(vdev
);
3474 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3475 vfio_teardown_msi(vdev
);
3476 vfio_unregister_bars(vdev
);
3480 static void vfio_instance_finalize(Object
*obj
)
3482 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3483 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pci_dev
);
3484 VFIOGroup
*group
= vdev
->vbasedev
.group
;
3486 vfio_unmap_bars(vdev
);
3487 g_free(vdev
->emulated_config_bits
);
3489 vfio_put_device(vdev
);
3490 vfio_put_group(group
);
3493 static void vfio_exitfn(PCIDevice
*pdev
)
3495 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
3497 vfio_unregister_req_notifier(vdev
);
3498 vfio_unregister_err_notifier(vdev
);
3499 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3500 vfio_disable_interrupts(vdev
);
3501 if (vdev
->intx
.mmap_timer
) {
3502 timer_free(vdev
->intx
.mmap_timer
);
3504 vfio_teardown_msi(vdev
);
3505 vfio_unregister_bars(vdev
);
3508 static void vfio_pci_reset(DeviceState
*dev
)
3510 PCIDevice
*pdev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
3511 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, pdev
);
3513 trace_vfio_pci_reset(vdev
->vbasedev
.name
);
3515 vfio_pci_pre_reset(vdev
);
3517 if (vdev
->vbasedev
.reset_works
&&
3518 (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3519 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3520 trace_vfio_pci_reset_flr(vdev
->vbasedev
.name
);
3524 /* See if we can do our own bus reset */
3525 if (!vfio_pci_hot_reset_one(vdev
)) {
3529 /* If nothing else works and the device supports PM reset, use it */
3530 if (vdev
->vbasedev
.reset_works
&& vdev
->has_pm_reset
&&
3531 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3532 trace_vfio_pci_reset_pm(vdev
->vbasedev
.name
);
3537 vfio_pci_post_reset(vdev
);
3540 static void vfio_instance_init(Object
*obj
)
3542 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3543 VFIOPCIDevice
*vdev
= DO_UPCAST(VFIOPCIDevice
, pdev
, PCI_DEVICE(obj
));
3545 device_add_bootindex_property(obj
, &vdev
->bootindex
,
3547 &pci_dev
->qdev
, NULL
);
3550 static Property vfio_pci_dev_properties
[] = {
3551 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice
, host
),
3552 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice
,
3553 intx
.mmap_timeout
, 1100),
3554 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice
, features
,
3555 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3556 DEFINE_PROP_BIT("x-req", VFIOPCIDevice
, features
,
3557 VFIO_FEATURE_ENABLE_REQ_BIT
, true),
3558 DEFINE_PROP_INT32("bootindex", VFIOPCIDevice
, bootindex
, -1),
3559 DEFINE_PROP_BOOL("x-mmap", VFIOPCIDevice
, vbasedev
.allow_mmap
, true),
3561 * TODO - support passed fds... is this necessary?
3562 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3563 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3565 DEFINE_PROP_END_OF_LIST(),
3568 static const VMStateDescription vfio_pci_vmstate
= {
3573 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3575 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3576 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3578 dc
->reset
= vfio_pci_reset
;
3579 dc
->props
= vfio_pci_dev_properties
;
3580 dc
->vmsd
= &vfio_pci_vmstate
;
3581 dc
->desc
= "VFIO-based PCI device assignment";
3582 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3583 pdc
->init
= vfio_initfn
;
3584 pdc
->exit
= vfio_exitfn
;
3585 pdc
->config_read
= vfio_pci_read_config
;
3586 pdc
->config_write
= vfio_pci_write_config
;
3587 pdc
->is_express
= 1; /* We might be */
3590 static const TypeInfo vfio_pci_dev_info
= {
3592 .parent
= TYPE_PCI_DEVICE
,
3593 .instance_size
= sizeof(VFIOPCIDevice
),
3594 .class_init
= vfio_pci_dev_class_init
,
3595 .instance_init
= vfio_instance_init
,
3596 .instance_finalize
= vfio_instance_finalize
,
3599 static void register_vfio_pci_dev_type(void)
3601 type_register_static(&vfio_pci_dev_info
);
3604 type_init(register_vfio_pci_dev_type
)