2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include "hw/pci/msi.h"
27 #include "hw/pci/msix.h"
28 #include "hw/pci/pci_bridge.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
34 #include "qemu/option.h"
35 #include "qemu/range.h"
36 #include "qemu/units.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/runstate.h"
39 #include "sysemu/sysemu.h"
42 #include "qapi/error.h"
43 #include "migration/blocker.h"
44 #include "migration/qemu-file.h"
46 #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
48 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
);
49 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
);
52 * Disabling BAR mmaping can be slow, but toggling it around INTx can
53 * also be a huge overhead. We try to get the best of both worlds by
54 * waiting until an interrupt to disable mmaps (subsequent transitions
55 * to the same state are effectively no overhead). If the interrupt has
56 * been serviced and the time gap is long enough, we re-enable mmaps for
57 * performance. This works well for things like graphics cards, which
58 * may not use their interrupt at all and are penalized to an unusable
59 * level by read/write BAR traps. Other devices, like NICs, have more
60 * regular interrupts and see much better latency by staying in non-mmap
61 * mode. We therefore set the default mmap_timeout such that a ping
62 * is just enough to keep the mmap disabled. Users can experiment with
63 * other options with the x-intx-mmap-timeout-ms parameter (a value of
64 * zero disables the timer).
66 static void vfio_intx_mmap_enable(void *opaque
)
68 VFIOPCIDevice
*vdev
= opaque
;
70 if (vdev
->intx
.pending
) {
71 timer_mod(vdev
->intx
.mmap_timer
,
72 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
76 vfio_mmap_set_enabled(vdev
, true);
79 static void vfio_intx_interrupt(void *opaque
)
81 VFIOPCIDevice
*vdev
= opaque
;
83 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
87 trace_vfio_intx_interrupt(vdev
->vbasedev
.name
, 'A' + vdev
->intx
.pin
);
89 vdev
->intx
.pending
= true;
90 pci_irq_assert(&vdev
->pdev
);
91 vfio_mmap_set_enabled(vdev
, false);
92 if (vdev
->intx
.mmap_timeout
) {
93 timer_mod(vdev
->intx
.mmap_timer
,
94 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
98 static void vfio_intx_eoi(VFIODevice
*vbasedev
)
100 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
102 if (!vdev
->intx
.pending
) {
106 trace_vfio_intx_eoi(vbasedev
->name
);
108 vdev
->intx
.pending
= false;
109 pci_irq_deassert(&vdev
->pdev
);
110 vfio_unmask_single_irqindex(vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
113 static void vfio_intx_enable_kvm(VFIOPCIDevice
*vdev
, Error
**errp
)
116 int irq_fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
118 if (vdev
->no_kvm_intx
|| !kvm_irqfds_enabled() ||
119 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
120 !kvm_resamplefds_enabled()) {
124 /* Get to a known interrupt state */
125 qemu_set_fd_handler(irq_fd
, NULL
, NULL
, vdev
);
126 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
127 vdev
->intx
.pending
= false;
128 pci_irq_deassert(&vdev
->pdev
);
130 /* Get an eventfd for resample/unmask */
131 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
132 error_setg(errp
, "event_notifier_init failed eoi");
136 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
,
137 &vdev
->intx
.interrupt
,
139 vdev
->intx
.route
.irq
)) {
140 error_setg_errno(errp
, errno
, "failed to setup resample irqfd");
144 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
145 VFIO_IRQ_SET_ACTION_UNMASK
,
146 event_notifier_get_fd(&vdev
->intx
.unmask
),
152 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
154 vdev
->intx
.kvm_accel
= true;
156 trace_vfio_intx_enable_kvm(vdev
->vbasedev
.name
);
161 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vdev
->intx
.interrupt
,
162 vdev
->intx
.route
.irq
);
164 event_notifier_cleanup(&vdev
->intx
.unmask
);
166 qemu_set_fd_handler(irq_fd
, vfio_intx_interrupt
, NULL
, vdev
);
167 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
171 static void vfio_intx_disable_kvm(VFIOPCIDevice
*vdev
)
174 if (!vdev
->intx
.kvm_accel
) {
179 * Get to a known state, hardware masked, QEMU ready to accept new
180 * interrupts, QEMU IRQ de-asserted.
182 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
183 vdev
->intx
.pending
= false;
184 pci_irq_deassert(&vdev
->pdev
);
186 /* Tell KVM to stop listening for an INTx irqfd */
187 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vdev
->intx
.interrupt
,
188 vdev
->intx
.route
.irq
)) {
189 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
192 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
193 event_notifier_cleanup(&vdev
->intx
.unmask
);
195 /* QEMU starts listening for interrupt events. */
196 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->intx
.interrupt
),
197 vfio_intx_interrupt
, NULL
, vdev
);
199 vdev
->intx
.kvm_accel
= false;
201 /* If we've missed an event, let it re-fire through QEMU */
202 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
204 trace_vfio_intx_disable_kvm(vdev
->vbasedev
.name
);
208 static void vfio_intx_update(VFIOPCIDevice
*vdev
, PCIINTxRoute
*route
)
212 trace_vfio_intx_update(vdev
->vbasedev
.name
,
213 vdev
->intx
.route
.irq
, route
->irq
);
215 vfio_intx_disable_kvm(vdev
);
217 vdev
->intx
.route
= *route
;
219 if (route
->mode
!= PCI_INTX_ENABLED
) {
223 vfio_intx_enable_kvm(vdev
, &err
);
225 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
228 /* Re-enable the interrupt in cased we missed an EOI */
229 vfio_intx_eoi(&vdev
->vbasedev
);
232 static void vfio_intx_routing_notifier(PCIDevice
*pdev
)
234 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
237 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
241 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
243 if (pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
244 vfio_intx_update(vdev
, &route
);
248 static void vfio_irqchip_change(Notifier
*notify
, void *data
)
250 VFIOPCIDevice
*vdev
= container_of(notify
, VFIOPCIDevice
,
251 irqchip_change_notifier
);
253 vfio_intx_update(vdev
, &vdev
->intx
.route
);
256 static int vfio_intx_enable(VFIOPCIDevice
*vdev
, Error
**errp
)
258 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
268 vfio_disable_interrupts(vdev
);
270 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
271 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
275 * Only conditional to avoid generating error messages on platforms
276 * where we won't actually use the result anyway.
278 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
279 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
284 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
286 error_setg_errno(errp
, -ret
, "event_notifier_init failed");
289 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
290 qemu_set_fd_handler(fd
, vfio_intx_interrupt
, NULL
, vdev
);
292 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
293 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, errp
)) {
294 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
295 event_notifier_cleanup(&vdev
->intx
.interrupt
);
299 vfio_intx_enable_kvm(vdev
, &err
);
301 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
304 vdev
->interrupt
= VFIO_INT_INTx
;
306 trace_vfio_intx_enable(vdev
->vbasedev
.name
);
310 static void vfio_intx_disable(VFIOPCIDevice
*vdev
)
314 timer_del(vdev
->intx
.mmap_timer
);
315 vfio_intx_disable_kvm(vdev
);
316 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
317 vdev
->intx
.pending
= false;
318 pci_irq_deassert(&vdev
->pdev
);
319 vfio_mmap_set_enabled(vdev
, true);
321 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
322 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
323 event_notifier_cleanup(&vdev
->intx
.interrupt
);
325 vdev
->interrupt
= VFIO_INT_NONE
;
327 trace_vfio_intx_disable(vdev
->vbasedev
.name
);
333 static void vfio_msi_interrupt(void *opaque
)
335 VFIOMSIVector
*vector
= opaque
;
336 VFIOPCIDevice
*vdev
= vector
->vdev
;
337 MSIMessage (*get_msg
)(PCIDevice
*dev
, unsigned vector
);
338 void (*notify
)(PCIDevice
*dev
, unsigned vector
);
340 int nr
= vector
- vdev
->msi_vectors
;
342 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
346 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
347 get_msg
= msix_get_message
;
348 notify
= msix_notify
;
350 /* A masked vector firing needs to use the PBA, enable it */
351 if (msix_is_masked(&vdev
->pdev
, nr
)) {
352 set_bit(nr
, vdev
->msix
->pending
);
353 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, true);
354 trace_vfio_msix_pba_enable(vdev
->vbasedev
.name
);
356 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
357 get_msg
= msi_get_message
;
363 msg
= get_msg(&vdev
->pdev
, nr
);
364 trace_vfio_msi_interrupt(vdev
->vbasedev
.name
, nr
, msg
.address
, msg
.data
);
365 notify(&vdev
->pdev
, nr
);
368 static int vfio_enable_vectors(VFIOPCIDevice
*vdev
, bool msix
)
370 struct vfio_irq_set
*irq_set
;
371 int ret
= 0, i
, argsz
;
374 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
376 irq_set
= g_malloc0(argsz
);
377 irq_set
->argsz
= argsz
;
378 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
379 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
381 irq_set
->count
= vdev
->nr_vectors
;
382 fds
= (int32_t *)&irq_set
->data
;
384 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
388 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
389 * bits, therefore we always use the KVM signaling path when setup.
390 * MSI-X mask and pending bits are emulated, so we want to use the
391 * KVM signaling path only when configured and unmasked.
393 if (vdev
->msi_vectors
[i
].use
) {
394 if (vdev
->msi_vectors
[i
].virq
< 0 ||
395 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
396 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
398 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
405 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
412 static void vfio_add_kvm_msi_virq(VFIOPCIDevice
*vdev
, VFIOMSIVector
*vector
,
413 int vector_n
, bool msix
)
417 if ((msix
&& vdev
->no_kvm_msix
) || (!msix
&& vdev
->no_kvm_msi
)) {
421 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
425 virq
= kvm_irqchip_add_msi_route(kvm_state
, vector_n
, &vdev
->pdev
);
427 event_notifier_cleanup(&vector
->kvm_interrupt
);
431 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
433 kvm_irqchip_release_virq(kvm_state
, virq
);
434 event_notifier_cleanup(&vector
->kvm_interrupt
);
441 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
443 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
445 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
447 event_notifier_cleanup(&vector
->kvm_interrupt
);
450 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
,
453 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
, pdev
);
454 kvm_irqchip_commit_routes(kvm_state
);
457 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
458 MSIMessage
*msg
, IOHandler
*handler
)
460 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
461 VFIOMSIVector
*vector
;
464 trace_vfio_msix_vector_do_use(vdev
->vbasedev
.name
, nr
);
466 vector
= &vdev
->msi_vectors
[nr
];
471 if (event_notifier_init(&vector
->interrupt
, 0)) {
472 error_report("vfio: Error: event_notifier_init failed");
475 msix_vector_use(pdev
, nr
);
478 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
479 handler
, NULL
, vector
);
482 * Attempt to enable route through KVM irqchip,
483 * default to userspace handling if unavailable.
485 if (vector
->virq
>= 0) {
487 vfio_remove_kvm_msi_virq(vector
);
489 vfio_update_kvm_msi_virq(vector
, *msg
, pdev
);
493 vfio_add_kvm_msi_virq(vdev
, vector
, nr
, true);
498 * We don't want to have the host allocate all possible MSI vectors
499 * for a device if they're not in use, so we shutdown and incrementally
500 * increase them as needed.
502 if (vdev
->nr_vectors
< nr
+ 1) {
503 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
504 vdev
->nr_vectors
= nr
+ 1;
505 ret
= vfio_enable_vectors(vdev
, true);
507 error_report("vfio: failed to enable vectors, %d", ret
);
513 if (vector
->virq
>= 0) {
514 fd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
516 fd
= event_notifier_get_fd(&vector
->interrupt
);
519 if (vfio_set_irq_signaling(&vdev
->vbasedev
,
520 VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
521 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
522 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
526 /* Disable PBA emulation when nothing more is pending. */
527 clear_bit(nr
, vdev
->msix
->pending
);
528 if (find_first_bit(vdev
->msix
->pending
,
529 vdev
->nr_vectors
) == vdev
->nr_vectors
) {
530 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
531 trace_vfio_msix_pba_disable(vdev
->vbasedev
.name
);
537 static int vfio_msix_vector_use(PCIDevice
*pdev
,
538 unsigned int nr
, MSIMessage msg
)
540 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
543 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
545 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
546 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
548 trace_vfio_msix_vector_release(vdev
->vbasedev
.name
, nr
);
551 * There are still old guests that mask and unmask vectors on every
552 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
553 * the KVM setup in place, simply switch VFIO to use the non-bypass
554 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
555 * core will mask the interrupt and set pending bits, allowing it to
556 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
558 if (vector
->virq
>= 0) {
559 int32_t fd
= event_notifier_get_fd(&vector
->interrupt
);
562 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
563 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
564 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
569 static void vfio_msix_enable(VFIOPCIDevice
*vdev
)
571 vfio_disable_interrupts(vdev
);
573 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->msix
->entries
);
575 vdev
->interrupt
= VFIO_INT_MSIX
;
578 * Some communication channels between VF & PF or PF & fw rely on the
579 * physical state of the device and expect that enabling MSI-X from the
580 * guest enables the same on the host. When our guest is Linux, the
581 * guest driver call to pci_enable_msix() sets the enabling bit in the
582 * MSI-X capability, but leaves the vector table masked. We therefore
583 * can't rely on a vector_use callback (from request_irq() in the guest)
584 * to switch the physical device into MSI-X mode because that may come a
585 * long time after pci_enable_msix(). This code enables vector 0 with
586 * triggering to userspace, then immediately release the vector, leaving
587 * the physical device with no vectors enabled, but MSI-X enabled, just
588 * like the guest view.
590 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
591 vfio_msix_vector_release(&vdev
->pdev
, 0);
593 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
594 vfio_msix_vector_release
, NULL
)) {
595 error_report("vfio: msix_set_vector_notifiers failed");
598 trace_vfio_msix_enable(vdev
->vbasedev
.name
);
601 static void vfio_msi_enable(VFIOPCIDevice
*vdev
)
605 vfio_disable_interrupts(vdev
);
607 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
609 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->nr_vectors
);
611 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
612 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
618 if (event_notifier_init(&vector
->interrupt
, 0)) {
619 error_report("vfio: Error: event_notifier_init failed");
622 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
623 vfio_msi_interrupt
, NULL
, vector
);
626 * Attempt to enable route through KVM irqchip,
627 * default to userspace handling if unavailable.
629 vfio_add_kvm_msi_virq(vdev
, vector
, i
, false);
632 /* Set interrupt type prior to possible interrupts */
633 vdev
->interrupt
= VFIO_INT_MSI
;
635 ret
= vfio_enable_vectors(vdev
, false);
638 error_report("vfio: Error: Failed to setup MSI fds: %m");
639 } else if (ret
!= vdev
->nr_vectors
) {
640 error_report("vfio: Error: Failed to enable %d "
641 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
644 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
645 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
646 if (vector
->virq
>= 0) {
647 vfio_remove_kvm_msi_virq(vector
);
649 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
651 event_notifier_cleanup(&vector
->interrupt
);
654 g_free(vdev
->msi_vectors
);
655 vdev
->msi_vectors
= NULL
;
657 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
658 vdev
->nr_vectors
= ret
;
661 vdev
->nr_vectors
= 0;
664 * Failing to setup MSI doesn't really fall within any specification.
665 * Let's try leaving interrupts disabled and hope the guest figures
666 * out to fall back to INTx for this device.
668 error_report("vfio: Error: Failed to enable MSI");
669 vdev
->interrupt
= VFIO_INT_NONE
;
674 trace_vfio_msi_enable(vdev
->vbasedev
.name
, vdev
->nr_vectors
);
677 static void vfio_msi_disable_common(VFIOPCIDevice
*vdev
)
682 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
683 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
684 if (vdev
->msi_vectors
[i
].use
) {
685 if (vector
->virq
>= 0) {
686 vfio_remove_kvm_msi_virq(vector
);
688 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
690 event_notifier_cleanup(&vector
->interrupt
);
694 g_free(vdev
->msi_vectors
);
695 vdev
->msi_vectors
= NULL
;
696 vdev
->nr_vectors
= 0;
697 vdev
->interrupt
= VFIO_INT_NONE
;
699 vfio_intx_enable(vdev
, &err
);
701 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
705 static void vfio_msix_disable(VFIOPCIDevice
*vdev
)
709 msix_unset_vector_notifiers(&vdev
->pdev
);
712 * MSI-X will only release vectors if MSI-X is still enabled on the
713 * device, check through the rest and release it ourselves if necessary.
715 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
716 if (vdev
->msi_vectors
[i
].use
) {
717 vfio_msix_vector_release(&vdev
->pdev
, i
);
718 msix_vector_unuse(&vdev
->pdev
, i
);
722 if (vdev
->nr_vectors
) {
723 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
726 vfio_msi_disable_common(vdev
);
728 memset(vdev
->msix
->pending
, 0,
729 BITS_TO_LONGS(vdev
->msix
->entries
) * sizeof(unsigned long));
731 trace_vfio_msix_disable(vdev
->vbasedev
.name
);
734 static void vfio_msi_disable(VFIOPCIDevice
*vdev
)
736 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSI_IRQ_INDEX
);
737 vfio_msi_disable_common(vdev
);
739 trace_vfio_msi_disable(vdev
->vbasedev
.name
);
742 static void vfio_update_msi(VFIOPCIDevice
*vdev
)
746 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
747 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
750 if (!vector
->use
|| vector
->virq
< 0) {
754 msg
= msi_get_message(&vdev
->pdev
, i
);
755 vfio_update_kvm_msi_virq(vector
, msg
, &vdev
->pdev
);
759 static void vfio_pci_load_rom(VFIOPCIDevice
*vdev
)
761 struct vfio_region_info
*reg_info
;
766 if (vfio_get_region_info(&vdev
->vbasedev
,
767 VFIO_PCI_ROM_REGION_INDEX
, ®_info
)) {
768 error_report("vfio: Error getting ROM info: %m");
772 trace_vfio_pci_load_rom(vdev
->vbasedev
.name
, (unsigned long)reg_info
->size
,
773 (unsigned long)reg_info
->offset
,
774 (unsigned long)reg_info
->flags
);
776 vdev
->rom_size
= size
= reg_info
->size
;
777 vdev
->rom_offset
= reg_info
->offset
;
781 if (!vdev
->rom_size
) {
782 vdev
->rom_read_failed
= true;
783 error_report("vfio-pci: Cannot read device rom at "
784 "%s", vdev
->vbasedev
.name
);
785 error_printf("Device option ROM contents are probably invalid "
786 "(check dmesg).\nSkip option ROM probe with rombar=0, "
787 "or load from file with romfile=\n");
791 vdev
->rom
= g_malloc(size
);
792 memset(vdev
->rom
, 0xff, size
);
795 bytes
= pread(vdev
->vbasedev
.fd
, vdev
->rom
+ off
,
796 size
, vdev
->rom_offset
+ off
);
799 } else if (bytes
> 0) {
803 if (errno
== EINTR
|| errno
== EAGAIN
) {
806 error_report("vfio: Error reading device ROM: %m");
812 * Test the ROM signature against our device, if the vendor is correct
813 * but the device ID doesn't match, store the correct device ID and
814 * recompute the checksum. Intel IGD devices need this and are known
815 * to have bogus checksums so we can't simply adjust the checksum.
817 if (pci_get_word(vdev
->rom
) == 0xaa55 &&
818 pci_get_word(vdev
->rom
+ 0x18) + 8 < vdev
->rom_size
&&
819 !memcmp(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18), "PCIR", 4)) {
822 vid
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 4);
823 did
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6);
825 if (vid
== vdev
->vendor_id
&& did
!= vdev
->device_id
) {
827 uint8_t csum
, *data
= vdev
->rom
;
829 pci_set_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6,
833 for (csum
= 0, i
= 0; i
< vdev
->rom_size
; i
++) {
842 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
844 VFIOPCIDevice
*vdev
= opaque
;
853 /* Load the ROM lazily when the guest tries to read it */
854 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
855 vfio_pci_load_rom(vdev
);
858 memcpy(&val
, vdev
->rom
+ addr
,
859 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
866 data
= le16_to_cpu(val
.word
);
869 data
= le32_to_cpu(val
.dword
);
872 hw_error("vfio: unsupported read size, %d bytes\n", size
);
876 trace_vfio_rom_read(vdev
->vbasedev
.name
, addr
, size
, data
);
881 static void vfio_rom_write(void *opaque
, hwaddr addr
,
882 uint64_t data
, unsigned size
)
886 static const MemoryRegionOps vfio_rom_ops
= {
887 .read
= vfio_rom_read
,
888 .write
= vfio_rom_write
,
889 .endianness
= DEVICE_LITTLE_ENDIAN
,
892 static void vfio_pci_size_rom(VFIOPCIDevice
*vdev
)
894 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
895 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
896 DeviceState
*dev
= DEVICE(vdev
);
898 int fd
= vdev
->vbasedev
.fd
;
900 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
901 /* Since pci handles romfile, just print a message and return */
902 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
903 warn_report("Device at %s is known to cause system instability"
904 " issues during option rom execution",
905 vdev
->vbasedev
.name
);
906 error_printf("Proceeding anyway since user specified romfile\n");
912 * Use the same size ROM BAR as the physical device. The contents
913 * will get filled in later when the guest tries to read it.
915 if (pread(fd
, &orig
, 4, offset
) != 4 ||
916 pwrite(fd
, &size
, 4, offset
) != 4 ||
917 pread(fd
, &size
, 4, offset
) != 4 ||
918 pwrite(fd
, &orig
, 4, offset
) != 4) {
919 error_report("%s(%s) failed: %m", __func__
, vdev
->vbasedev
.name
);
923 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
929 if (vfio_blacklist_opt_rom(vdev
)) {
930 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
931 warn_report("Device at %s is known to cause system instability"
932 " issues during option rom execution",
933 vdev
->vbasedev
.name
);
934 error_printf("Proceeding anyway since user specified"
935 " non zero value for rombar\n");
937 warn_report("Rom loading for device at %s has been disabled"
938 " due to system instability issues",
939 vdev
->vbasedev
.name
);
940 error_printf("Specify rombar=1 or romfile to force\n");
945 trace_vfio_pci_size_rom(vdev
->vbasedev
.name
, size
);
947 name
= g_strdup_printf("vfio[%s].rom", vdev
->vbasedev
.name
);
949 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
950 &vfio_rom_ops
, vdev
, name
, size
);
953 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
954 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
956 vdev
->rom_read_failed
= false;
959 void vfio_vga_write(void *opaque
, hwaddr addr
,
960 uint64_t data
, unsigned size
)
962 VFIOVGARegion
*region
= opaque
;
963 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
970 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
977 buf
.word
= cpu_to_le16(data
);
980 buf
.dword
= cpu_to_le32(data
);
983 hw_error("vfio: unsupported write size, %d bytes", size
);
987 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
988 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
989 __func__
, region
->offset
+ addr
, data
, size
);
992 trace_vfio_vga_write(region
->offset
+ addr
, data
, size
);
995 uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
997 VFIOVGARegion
*region
= opaque
;
998 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1006 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1008 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1009 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1010 __func__
, region
->offset
+ addr
, size
);
1011 return (uint64_t)-1;
1019 data
= le16_to_cpu(buf
.word
);
1022 data
= le32_to_cpu(buf
.dword
);
1025 hw_error("vfio: unsupported read size, %d bytes", size
);
1029 trace_vfio_vga_read(region
->offset
+ addr
, size
, data
);
1034 static const MemoryRegionOps vfio_vga_ops
= {
1035 .read
= vfio_vga_read
,
1036 .write
= vfio_vga_write
,
1037 .endianness
= DEVICE_LITTLE_ENDIAN
,
1041 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1042 * size if the BAR is in an exclusive page in host so that we could map
1043 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1044 * page in guest. So we should set the priority of the expanded memory
1045 * region to zero in case of overlap with BARs which share the same page
1046 * with the sub-page BAR in guest. Besides, we should also recover the
1047 * size of this sub-page BAR when its base address is changed in guest
1048 * and not page aligned any more.
1050 static void vfio_sub_page_bar_update_mapping(PCIDevice
*pdev
, int bar
)
1052 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1053 VFIORegion
*region
= &vdev
->bars
[bar
].region
;
1054 MemoryRegion
*mmap_mr
, *region_mr
, *base_mr
;
1057 uint64_t size
= region
->size
;
1059 /* Make sure that the whole region is allowed to be mmapped */
1060 if (region
->nr_mmaps
!= 1 || !region
->mmaps
[0].mmap
||
1061 region
->mmaps
[0].size
!= region
->size
) {
1065 r
= &pdev
->io_regions
[bar
];
1067 base_mr
= vdev
->bars
[bar
].mr
;
1068 region_mr
= region
->mem
;
1069 mmap_mr
= ®ion
->mmaps
[0].mem
;
1071 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1072 if (bar_addr
!= PCI_BAR_UNMAPPED
&&
1073 !(bar_addr
& ~qemu_real_host_page_mask
)) {
1074 size
= qemu_real_host_page_size
;
1077 memory_region_transaction_begin();
1079 if (vdev
->bars
[bar
].size
< size
) {
1080 memory_region_set_size(base_mr
, size
);
1082 memory_region_set_size(region_mr
, size
);
1083 memory_region_set_size(mmap_mr
, size
);
1084 if (size
!= vdev
->bars
[bar
].size
&& memory_region_is_mapped(base_mr
)) {
1085 memory_region_del_subregion(r
->address_space
, base_mr
);
1086 memory_region_add_subregion_overlap(r
->address_space
,
1087 bar_addr
, base_mr
, 0);
1090 memory_region_transaction_commit();
1096 uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1098 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1099 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1101 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1102 emu_bits
= le32_to_cpu(emu_bits
);
1105 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1108 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1111 ret
= pread(vdev
->vbasedev
.fd
, &phys_val
, len
,
1112 vdev
->config_offset
+ addr
);
1114 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1115 __func__
, vdev
->vbasedev
.name
, addr
, len
);
1118 phys_val
= le32_to_cpu(phys_val
);
1121 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1123 trace_vfio_pci_read_config(vdev
->vbasedev
.name
, addr
, len
, val
);
1128 void vfio_pci_write_config(PCIDevice
*pdev
,
1129 uint32_t addr
, uint32_t val
, int len
)
1131 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1132 uint32_t val_le
= cpu_to_le32(val
);
1134 trace_vfio_pci_write_config(vdev
->vbasedev
.name
, addr
, val
, len
);
1136 /* Write everything to VFIO, let it filter out what we can't write */
1137 if (pwrite(vdev
->vbasedev
.fd
, &val_le
, len
, vdev
->config_offset
+ addr
)
1139 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1140 __func__
, vdev
->vbasedev
.name
, addr
, val
, len
);
1143 /* MSI/MSI-X Enabling/Disabling */
1144 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1145 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1146 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1148 pci_default_write_config(pdev
, addr
, val
, len
);
1150 is_enabled
= msi_enabled(pdev
);
1154 vfio_msi_enable(vdev
);
1158 vfio_msi_disable(vdev
);
1160 vfio_update_msi(vdev
);
1163 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1164 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1165 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1167 pci_default_write_config(pdev
, addr
, val
, len
);
1169 is_enabled
= msix_enabled(pdev
);
1171 if (!was_enabled
&& is_enabled
) {
1172 vfio_msix_enable(vdev
);
1173 } else if (was_enabled
&& !is_enabled
) {
1174 vfio_msix_disable(vdev
);
1176 } else if (ranges_overlap(addr
, len
, PCI_BASE_ADDRESS_0
, 24) ||
1177 range_covers_byte(addr
, len
, PCI_COMMAND
)) {
1178 pcibus_t old_addr
[PCI_NUM_REGIONS
- 1];
1181 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1182 old_addr
[bar
] = pdev
->io_regions
[bar
].addr
;
1185 pci_default_write_config(pdev
, addr
, val
, len
);
1187 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1188 if (old_addr
[bar
] != pdev
->io_regions
[bar
].addr
&&
1189 vdev
->bars
[bar
].region
.size
> 0 &&
1190 vdev
->bars
[bar
].region
.size
< qemu_real_host_page_size
) {
1191 vfio_sub_page_bar_update_mapping(pdev
, bar
);
1195 /* Write everything to QEMU to keep emulated bits correct */
1196 pci_default_write_config(pdev
, addr
, val
, len
);
1203 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
)
1206 * More complicated than it looks. Disabling MSI/X transitions the
1207 * device to INTx mode (if supported). Therefore we need to first
1208 * disable MSI/X and then cleanup by disabling INTx.
1210 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
1211 vfio_msix_disable(vdev
);
1212 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
1213 vfio_msi_disable(vdev
);
1216 if (vdev
->interrupt
== VFIO_INT_INTx
) {
1217 vfio_intx_disable(vdev
);
1221 static int vfio_msi_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1224 bool msi_64bit
, msi_maskbit
;
1228 if (pread(vdev
->vbasedev
.fd
, &ctrl
, sizeof(ctrl
),
1229 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1230 error_setg_errno(errp
, errno
, "failed reading MSI PCI_CAP_FLAGS");
1233 ctrl
= le16_to_cpu(ctrl
);
1235 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
1236 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
1237 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
1239 trace_vfio_msi_setup(vdev
->vbasedev
.name
, pos
);
1241 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
, &err
);
1243 if (ret
== -ENOTSUP
) {
1246 error_propagate_prepend(errp
, err
, "msi_init failed: ");
1249 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
1254 static void vfio_pci_fixup_msix_region(VFIOPCIDevice
*vdev
)
1257 VFIORegion
*region
= &vdev
->bars
[vdev
->msix
->table_bar
].region
;
1260 * If the host driver allows mapping of a MSIX data, we are going to
1261 * do map the entire BAR and emulate MSIX table on top of that.
1263 if (vfio_has_region_cap(&vdev
->vbasedev
, region
->nr
,
1264 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE
)) {
1269 * We expect to find a single mmap covering the whole BAR, anything else
1270 * means it's either unsupported or already setup.
1272 if (region
->nr_mmaps
!= 1 || region
->mmaps
[0].offset
||
1273 region
->size
!= region
->mmaps
[0].size
) {
1277 /* MSI-X table start and end aligned to host page size */
1278 start
= vdev
->msix
->table_offset
& qemu_real_host_page_mask
;
1279 end
= REAL_HOST_PAGE_ALIGN((uint64_t)vdev
->msix
->table_offset
+
1280 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
1283 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1284 * NB - Host page size is necessarily a power of two and so is the PCI
1285 * BAR (not counting EA yet), therefore if we have host page aligned
1286 * @start and @end, then any remainder of the BAR before or after those
1287 * must be at least host page sized and therefore mmap'able.
1290 if (end
>= region
->size
) {
1291 region
->nr_mmaps
= 0;
1292 g_free(region
->mmaps
);
1293 region
->mmaps
= NULL
;
1294 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1295 vdev
->msix
->table_bar
, 0, 0);
1297 region
->mmaps
[0].offset
= end
;
1298 region
->mmaps
[0].size
= region
->size
- end
;
1299 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1300 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1301 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1304 /* Maybe it's aligned at the end of the BAR */
1305 } else if (end
>= region
->size
) {
1306 region
->mmaps
[0].size
= start
;
1307 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1308 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1309 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1311 /* Otherwise it must split the BAR */
1313 region
->nr_mmaps
= 2;
1314 region
->mmaps
= g_renew(VFIOMmap
, region
->mmaps
, 2);
1316 memcpy(®ion
->mmaps
[1], ®ion
->mmaps
[0], sizeof(VFIOMmap
));
1318 region
->mmaps
[0].size
= start
;
1319 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1320 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1321 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1323 region
->mmaps
[1].offset
= end
;
1324 region
->mmaps
[1].size
= region
->size
- end
;
1325 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1326 vdev
->msix
->table_bar
, region
->mmaps
[1].offset
,
1327 region
->mmaps
[1].offset
+ region
->mmaps
[1].size
);
1331 static void vfio_pci_relocate_msix(VFIOPCIDevice
*vdev
, Error
**errp
)
1333 int target_bar
= -1;
1336 if (!vdev
->msix
|| vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1340 /* The actual minimum size of MSI-X structures */
1341 msix_sz
= (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
) +
1342 (QEMU_ALIGN_UP(vdev
->msix
->entries
, 64) / 8);
1343 /* Round up to host pages, we don't want to share a page */
1344 msix_sz
= REAL_HOST_PAGE_ALIGN(msix_sz
);
1345 /* PCI BARs must be a power of 2 */
1346 msix_sz
= pow2ceil(msix_sz
);
1348 if (vdev
->msix_relo
== OFF_AUTOPCIBAR_AUTO
) {
1350 * TODO: Lookup table for known devices.
1352 * Logically we might use an algorithm here to select the BAR adding
1353 * the least additional MMIO space, but we cannot programatically
1354 * predict the driver dependency on BAR ordering or sizing, therefore
1355 * 'auto' becomes a lookup for combinations reported to work.
1357 if (target_bar
< 0) {
1358 error_setg(errp
, "No automatic MSI-X relocation available for "
1359 "device %04x:%04x", vdev
->vendor_id
, vdev
->device_id
);
1363 target_bar
= (int)(vdev
->msix_relo
- OFF_AUTOPCIBAR_BAR0
);
1366 /* I/O port BARs cannot host MSI-X structures */
1367 if (vdev
->bars
[target_bar
].ioport
) {
1368 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1369 "I/O port BAR", target_bar
);
1373 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1374 if (!vdev
->bars
[target_bar
].size
&&
1375 target_bar
> 0 && vdev
->bars
[target_bar
- 1].mem64
) {
1376 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1377 "consumed by 64-bit BAR %d", target_bar
, target_bar
- 1);
1381 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1382 if (vdev
->bars
[target_bar
].size
> 1 * GiB
&&
1383 !vdev
->bars
[target_bar
].mem64
) {
1384 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1385 "no space to extend 32-bit BAR", target_bar
);
1390 * If adding a new BAR, test if we can make it 64bit. We make it
1391 * prefetchable since QEMU MSI-X emulation has no read side effects
1392 * and doing so makes mapping more flexible.
1394 if (!vdev
->bars
[target_bar
].size
) {
1395 if (target_bar
< (PCI_ROM_SLOT
- 1) &&
1396 !vdev
->bars
[target_bar
+ 1].size
) {
1397 vdev
->bars
[target_bar
].mem64
= true;
1398 vdev
->bars
[target_bar
].type
= PCI_BASE_ADDRESS_MEM_TYPE_64
;
1400 vdev
->bars
[target_bar
].type
|= PCI_BASE_ADDRESS_MEM_PREFETCH
;
1401 vdev
->bars
[target_bar
].size
= msix_sz
;
1402 vdev
->msix
->table_offset
= 0;
1404 vdev
->bars
[target_bar
].size
= MAX(vdev
->bars
[target_bar
].size
* 2,
1407 * Due to above size calc, MSI-X always starts halfway into the BAR,
1408 * which will always be a separate host page.
1410 vdev
->msix
->table_offset
= vdev
->bars
[target_bar
].size
/ 2;
1413 vdev
->msix
->table_bar
= target_bar
;
1414 vdev
->msix
->pba_bar
= target_bar
;
1415 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1416 vdev
->msix
->pba_offset
= vdev
->msix
->table_offset
+
1417 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
);
1419 trace_vfio_msix_relo(vdev
->vbasedev
.name
,
1420 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
);
1424 * We don't have any control over how pci_add_capability() inserts
1425 * capabilities into the chain. In order to setup MSI-X we need a
1426 * MemoryRegion for the BAR. In order to setup the BAR and not
1427 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1428 * need to first look for where the MSI-X table lives. So we
1429 * unfortunately split MSI-X setup across two functions.
1431 static void vfio_msix_early_setup(VFIOPCIDevice
*vdev
, Error
**errp
)
1435 uint32_t table
, pba
;
1436 int fd
= vdev
->vbasedev
.fd
;
1439 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
1444 if (pread(fd
, &ctrl
, sizeof(ctrl
),
1445 vdev
->config_offset
+ pos
+ PCI_MSIX_FLAGS
) != sizeof(ctrl
)) {
1446 error_setg_errno(errp
, errno
, "failed to read PCI MSIX FLAGS");
1450 if (pread(fd
, &table
, sizeof(table
),
1451 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
1452 error_setg_errno(errp
, errno
, "failed to read PCI MSIX TABLE");
1456 if (pread(fd
, &pba
, sizeof(pba
),
1457 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
1458 error_setg_errno(errp
, errno
, "failed to read PCI MSIX PBA");
1462 ctrl
= le16_to_cpu(ctrl
);
1463 table
= le32_to_cpu(table
);
1464 pba
= le32_to_cpu(pba
);
1466 msix
= g_malloc0(sizeof(*msix
));
1467 msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
1468 msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
1469 msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
1470 msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
1471 msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
1474 * Test the size of the pba_offset variable and catch if it extends outside
1475 * of the specified BAR. If it is the case, we need to apply a hardware
1476 * specific quirk if the device is known or we have a broken configuration.
1478 if (msix
->pba_offset
>= vdev
->bars
[msix
->pba_bar
].region
.size
) {
1480 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1481 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1482 * the VF PBA offset while the BAR itself is only 8k. The correct value
1483 * is 0x1000, so we hard code that here.
1485 if (vdev
->vendor_id
== PCI_VENDOR_ID_CHELSIO
&&
1486 (vdev
->device_id
& 0xff00) == 0x5800) {
1487 msix
->pba_offset
= 0x1000;
1488 } else if (vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1489 error_setg(errp
, "hardware reports invalid configuration, "
1490 "MSIX PBA outside of specified BAR");
1496 trace_vfio_msix_early_setup(vdev
->vbasedev
.name
, pos
, msix
->table_bar
,
1497 msix
->table_offset
, msix
->entries
);
1500 vfio_pci_fixup_msix_region(vdev
);
1502 vfio_pci_relocate_msix(vdev
, errp
);
1505 static int vfio_msix_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1510 vdev
->msix
->pending
= g_malloc0(BITS_TO_LONGS(vdev
->msix
->entries
) *
1511 sizeof(unsigned long));
1512 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
1513 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1514 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
1515 vdev
->bars
[vdev
->msix
->pba_bar
].mr
,
1516 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
,
1519 if (ret
== -ENOTSUP
) {
1520 warn_report_err(err
);
1524 error_propagate(errp
, err
);
1529 * The PCI spec suggests that devices provide additional alignment for
1530 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1531 * For an assigned device, this hopefully means that emulation of MSI-X
1532 * structures does not affect the performance of the device. If devices
1533 * fail to provide that alignment, a significant performance penalty may
1534 * result, for instance Mellanox MT27500 VFs:
1535 * http://www.spinics.net/lists/kvm/msg125881.html
1537 * The PBA is simply not that important for such a serious regression and
1538 * most drivers do not appear to look at it. The solution for this is to
1539 * disable the PBA MemoryRegion unless it's being used. We disable it
1540 * here and only enable it if a masked vector fires through QEMU. As the
1541 * vector-use notifier is called, which occurs on unmask, we test whether
1542 * PBA emulation is needed and again disable if not.
1544 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
1547 * The emulated machine may provide a paravirt interface for MSIX setup
1548 * so it is not strictly necessary to emulate MSIX here. This becomes
1549 * helpful when frequently accessed MMIO registers are located in
1550 * subpages adjacent to the MSIX table but the MSIX data containing page
1551 * cannot be mapped because of a host page size bigger than the MSIX table
1554 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1555 "vfio-no-msix-emulation", NULL
)) {
1556 memory_region_set_enabled(&vdev
->pdev
.msix_table_mmio
, false);
1562 static void vfio_teardown_msi(VFIOPCIDevice
*vdev
)
1564 msi_uninit(&vdev
->pdev
);
1567 msix_uninit(&vdev
->pdev
,
1568 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1569 vdev
->bars
[vdev
->msix
->pba_bar
].mr
);
1570 g_free(vdev
->msix
->pending
);
1577 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
)
1581 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1582 vfio_region_mmaps_set_enabled(&vdev
->bars
[i
].region
, enabled
);
1586 static void vfio_bar_prepare(VFIOPCIDevice
*vdev
, int nr
)
1588 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1593 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1594 if (!bar
->region
.size
) {
1598 /* Determine what type of BAR this is for registration */
1599 ret
= pread(vdev
->vbasedev
.fd
, &pci_bar
, sizeof(pci_bar
),
1600 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
1601 if (ret
!= sizeof(pci_bar
)) {
1602 error_report("vfio: Failed to read BAR %d (%m)", nr
);
1606 pci_bar
= le32_to_cpu(pci_bar
);
1607 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
1608 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
1609 bar
->type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
1610 ~PCI_BASE_ADDRESS_MEM_MASK
);
1611 bar
->size
= bar
->region
.size
;
1614 static void vfio_bars_prepare(VFIOPCIDevice
*vdev
)
1618 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1619 vfio_bar_prepare(vdev
, i
);
1623 static void vfio_bar_register(VFIOPCIDevice
*vdev
, int nr
)
1625 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1632 bar
->mr
= g_new0(MemoryRegion
, 1);
1633 name
= g_strdup_printf("%s base BAR %d", vdev
->vbasedev
.name
, nr
);
1634 memory_region_init_io(bar
->mr
, OBJECT(vdev
), NULL
, NULL
, name
, bar
->size
);
1637 if (bar
->region
.size
) {
1638 memory_region_add_subregion(bar
->mr
, 0, bar
->region
.mem
);
1640 if (vfio_region_mmap(&bar
->region
)) {
1641 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1642 vdev
->vbasedev
.name
, nr
);
1646 pci_register_bar(&vdev
->pdev
, nr
, bar
->type
, bar
->mr
);
1649 static void vfio_bars_register(VFIOPCIDevice
*vdev
)
1653 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1654 vfio_bar_register(vdev
, i
);
1658 static void vfio_bars_exit(VFIOPCIDevice
*vdev
)
1662 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1663 VFIOBAR
*bar
= &vdev
->bars
[i
];
1665 vfio_bar_quirk_exit(vdev
, i
);
1666 vfio_region_exit(&bar
->region
);
1667 if (bar
->region
.size
) {
1668 memory_region_del_subregion(bar
->mr
, bar
->region
.mem
);
1673 pci_unregister_vga(&vdev
->pdev
);
1674 vfio_vga_quirk_exit(vdev
);
1678 static void vfio_bars_finalize(VFIOPCIDevice
*vdev
)
1682 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1683 VFIOBAR
*bar
= &vdev
->bars
[i
];
1685 vfio_bar_quirk_finalize(vdev
, i
);
1686 vfio_region_finalize(&bar
->region
);
1688 object_unparent(OBJECT(bar
->mr
));
1694 vfio_vga_quirk_finalize(vdev
);
1695 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
->region
); i
++) {
1696 object_unparent(OBJECT(&vdev
->vga
->region
[i
].mem
));
1705 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
1708 uint16_t next
= PCI_CONFIG_SPACE_SIZE
;
1710 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
1711 tmp
= pdev
->config
[tmp
+ PCI_CAP_LIST_NEXT
]) {
1712 if (tmp
> pos
&& tmp
< next
) {
1721 static uint16_t vfio_ext_cap_max_size(const uint8_t *config
, uint16_t pos
)
1723 uint16_t tmp
, next
= PCIE_CONFIG_SPACE_SIZE
;
1725 for (tmp
= PCI_CONFIG_SPACE_SIZE
; tmp
;
1726 tmp
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ tmp
))) {
1727 if (tmp
> pos
&& tmp
< next
) {
1735 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
1737 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
1740 static void vfio_add_emulated_word(VFIOPCIDevice
*vdev
, int pos
,
1741 uint16_t val
, uint16_t mask
)
1743 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1744 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1745 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1748 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
1750 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
1753 static void vfio_add_emulated_long(VFIOPCIDevice
*vdev
, int pos
,
1754 uint32_t val
, uint32_t mask
)
1756 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1757 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1758 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1761 static int vfio_setup_pcie_cap(VFIOPCIDevice
*vdev
, int pos
, uint8_t size
,
1767 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
1768 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
1770 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
1771 type
!= PCI_EXP_TYPE_LEG_END
&&
1772 type
!= PCI_EXP_TYPE_RC_END
) {
1774 error_setg(errp
, "assignment of PCIe type 0x%x "
1775 "devices is not currently supported", type
);
1779 if (!pci_bus_is_express(pci_get_bus(&vdev
->pdev
))) {
1780 PCIBus
*bus
= pci_get_bus(&vdev
->pdev
);
1784 * Traditionally PCI device assignment exposes the PCIe capability
1785 * as-is on non-express buses. The reason being that some drivers
1786 * simply assume that it's there, for example tg3. However when
1787 * we're running on a native PCIe machine type, like Q35, we need
1788 * to hide the PCIe capability. The reason for this is twofold;
1789 * first Windows guests get a Code 10 error when the PCIe capability
1790 * is exposed in this configuration. Therefore express devices won't
1791 * work at all unless they're attached to express buses in the VM.
1792 * Second, a native PCIe machine introduces the possibility of fine
1793 * granularity IOMMUs supporting both translation and isolation.
1794 * Guest code to discover the IOMMU visibility of a device, such as
1795 * IOMMU grouping code on Linux, is very aware of device types and
1796 * valid transitions between bus types. An express device on a non-
1797 * express bus is not a valid combination on bare metal systems.
1799 * Drivers that require a PCIe capability to make the device
1800 * functional are simply going to need to have their devices placed
1801 * on a PCIe bus in the VM.
1803 while (!pci_bus_is_root(bus
)) {
1804 bridge
= pci_bridge_get_device(bus
);
1805 bus
= pci_get_bus(bridge
);
1808 if (pci_bus_is_express(bus
)) {
1812 } else if (pci_bus_is_root(pci_get_bus(&vdev
->pdev
))) {
1814 * On a Root Complex bus Endpoints become Root Complex Integrated
1815 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1817 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
1818 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1819 PCI_EXP_TYPE_RC_END
<< 4,
1820 PCI_EXP_FLAGS_TYPE
);
1822 /* Link Capabilities, Status, and Control goes away */
1823 if (size
> PCI_EXP_LNKCTL
) {
1824 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
1825 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1826 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
1828 #ifndef PCI_EXP_LNKCAP2
1829 #define PCI_EXP_LNKCAP2 44
1831 #ifndef PCI_EXP_LNKSTA2
1832 #define PCI_EXP_LNKSTA2 50
1834 /* Link 2 Capabilities, Status, and Control goes away */
1835 if (size
> PCI_EXP_LNKCAP2
) {
1836 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
1837 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
1838 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
1842 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
1844 * Legacy endpoints don't belong on the root complex. Windows
1845 * seems to be happier with devices if we skip the capability.
1852 * Convert Root Complex Integrated Endpoints to regular endpoints.
1853 * These devices don't support LNK/LNK2 capabilities, so make them up.
1855 if (type
== PCI_EXP_TYPE_RC_END
) {
1856 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1857 PCI_EXP_TYPE_ENDPOINT
<< 4,
1858 PCI_EXP_FLAGS_TYPE
);
1859 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
1860 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1
) |
1861 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT
), ~0);
1862 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1867 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1868 * (Niantic errate #35) causing Windows to error with a Code 10 for the
1869 * device on Q35. Fixup any such devices to report version 1. If we
1870 * were to remove the capability entirely the guest would lose extended
1873 if ((flags
& PCI_EXP_FLAGS_VERS
) == 0) {
1874 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1875 1, PCI_EXP_FLAGS_VERS
);
1878 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
,
1884 vdev
->pdev
.exp
.exp_cap
= pos
;
1889 static void vfio_check_pcie_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1891 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
1893 if (cap
& PCI_EXP_DEVCAP_FLR
) {
1894 trace_vfio_check_pcie_flr(vdev
->vbasedev
.name
);
1895 vdev
->has_flr
= true;
1899 static void vfio_check_pm_reset(VFIOPCIDevice
*vdev
, uint8_t pos
)
1901 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
1903 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
1904 trace_vfio_check_pm_reset(vdev
->vbasedev
.name
);
1905 vdev
->has_pm_reset
= true;
1909 static void vfio_check_af_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1911 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
1913 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
1914 trace_vfio_check_af_flr(vdev
->vbasedev
.name
);
1915 vdev
->has_flr
= true;
1919 static int vfio_add_std_cap(VFIOPCIDevice
*vdev
, uint8_t pos
, Error
**errp
)
1921 PCIDevice
*pdev
= &vdev
->pdev
;
1922 uint8_t cap_id
, next
, size
;
1925 cap_id
= pdev
->config
[pos
];
1926 next
= pdev
->config
[pos
+ PCI_CAP_LIST_NEXT
];
1929 * If it becomes important to configure capabilities to their actual
1930 * size, use this as the default when it's something we don't recognize.
1931 * Since QEMU doesn't actually handle many of the config accesses,
1932 * exact size doesn't seem worthwhile.
1934 size
= vfio_std_cap_max_size(pdev
, pos
);
1937 * pci_add_capability always inserts the new capability at the head
1938 * of the chain. Therefore to end up with a chain that matches the
1939 * physical device, we insert from the end by making this recursive.
1940 * This is also why we pre-calculate size above as cached config space
1941 * will be changed as we unwind the stack.
1944 ret
= vfio_add_std_cap(vdev
, next
, errp
);
1949 /* Begin the rebuild, use QEMU emulated list bits */
1950 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
1951 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
1952 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
1954 ret
= vfio_add_virt_caps(vdev
, errp
);
1960 /* Scale down size, esp in case virt caps were added above */
1961 size
= MIN(size
, vfio_std_cap_max_size(pdev
, pos
));
1963 /* Use emulated next pointer to allow dropping caps */
1964 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ PCI_CAP_LIST_NEXT
, 0xff);
1967 case PCI_CAP_ID_MSI
:
1968 ret
= vfio_msi_setup(vdev
, pos
, errp
);
1970 case PCI_CAP_ID_EXP
:
1971 vfio_check_pcie_flr(vdev
, pos
);
1972 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
, errp
);
1974 case PCI_CAP_ID_MSIX
:
1975 ret
= vfio_msix_setup(vdev
, pos
, errp
);
1978 vfio_check_pm_reset(vdev
, pos
);
1980 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1983 vfio_check_af_flr(vdev
, pos
);
1984 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1987 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1993 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2001 static void vfio_add_ext_cap(VFIOPCIDevice
*vdev
)
2003 PCIDevice
*pdev
= &vdev
->pdev
;
2005 uint16_t cap_id
, next
, size
;
2009 /* Only add extended caps if we have them and the guest can see them */
2010 if (!pci_is_express(pdev
) || !pci_bus_is_express(pci_get_bus(pdev
)) ||
2011 !pci_get_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
)) {
2016 * pcie_add_capability always inserts the new capability at the tail
2017 * of the chain. Therefore to end up with a chain that matches the
2018 * physical device, we cache the config space to avoid overwriting
2019 * the original config space when we parse the extended capabilities.
2021 config
= g_memdup(pdev
->config
, vdev
->config_size
);
2024 * Extended capabilities are chained with each pointing to the next, so we
2025 * can drop anything other than the head of the chain simply by modifying
2026 * the previous next pointer. Seed the head of the chain here such that
2027 * we can simply skip any capabilities we want to drop below, regardless
2028 * of their position in the chain. If this stub capability still exists
2029 * after we add the capabilities we want to expose, update the capability
2030 * ID to zero. Note that we cannot seed with the capability header being
2031 * zero as this conflicts with definition of an absent capability chain
2032 * and prevents capabilities beyond the head of the list from being added.
2033 * By replacing the dummy capability ID with zero after walking the device
2034 * chain, we also transparently mark extended capabilities as absent if
2035 * no capabilities were added. Note that the PCIe spec defines an absence
2036 * of extended capabilities to be determined by a value of zero for the
2037 * capability ID, version, AND next pointer. A non-zero next pointer
2038 * should be sufficient to indicate additional capabilities are present,
2039 * which will occur if we call pcie_add_capability() below. The entire
2040 * first dword is emulated to support this.
2042 * NB. The kernel side does similar masking, so be prepared that our
2043 * view of the device may also contain a capability ID zero in the head
2044 * of the chain. Skip it for the same reason that we cannot seed the
2045 * chain with a zero capability.
2047 pci_set_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
,
2048 PCI_EXT_CAP(0xFFFF, 0, 0));
2049 pci_set_long(pdev
->wmask
+ PCI_CONFIG_SPACE_SIZE
, 0);
2050 pci_set_long(vdev
->emulated_config_bits
+ PCI_CONFIG_SPACE_SIZE
, ~0);
2052 for (next
= PCI_CONFIG_SPACE_SIZE
; next
;
2053 next
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ next
))) {
2054 header
= pci_get_long(config
+ next
);
2055 cap_id
= PCI_EXT_CAP_ID(header
);
2056 cap_ver
= PCI_EXT_CAP_VER(header
);
2059 * If it becomes important to configure extended capabilities to their
2060 * actual size, use this as the default when it's something we don't
2061 * recognize. Since QEMU doesn't actually handle many of the config
2062 * accesses, exact size doesn't seem worthwhile.
2064 size
= vfio_ext_cap_max_size(config
, next
);
2066 /* Use emulated next pointer to allow dropping extended caps */
2067 pci_long_test_and_set_mask(vdev
->emulated_config_bits
+ next
,
2068 PCI_EXT_CAP_NEXT_MASK
);
2071 case 0: /* kernel masked capability */
2072 case PCI_EXT_CAP_ID_SRIOV
: /* Read-only VF BARs confuse OVMF */
2073 case PCI_EXT_CAP_ID_ARI
: /* XXX Needs next function virtualization */
2074 case PCI_EXT_CAP_ID_REBAR
: /* Can't expose read-only */
2075 trace_vfio_add_ext_cap_dropped(vdev
->vbasedev
.name
, cap_id
, next
);
2078 pcie_add_capability(pdev
, cap_id
, cap_ver
, next
, size
);
2083 /* Cleanup chain head ID if necessary */
2084 if (pci_get_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
) == 0xFFFF) {
2085 pci_set_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
, 0);
2092 static int vfio_add_capabilities(VFIOPCIDevice
*vdev
, Error
**errp
)
2094 PCIDevice
*pdev
= &vdev
->pdev
;
2097 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2098 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2099 return 0; /* Nothing to add */
2102 ret
= vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
], errp
);
2107 vfio_add_ext_cap(vdev
);
2111 static void vfio_pci_pre_reset(VFIOPCIDevice
*vdev
)
2113 PCIDevice
*pdev
= &vdev
->pdev
;
2116 vfio_disable_interrupts(vdev
);
2118 /* Make sure the device is in D0 */
2123 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2124 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2126 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2127 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2128 /* vfio handles the necessary delay here */
2129 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2130 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2132 error_report("vfio: Unable to power on device, stuck in D%d",
2139 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2140 * Also put INTx Disable in known state.
2142 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2143 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2144 PCI_COMMAND_INTX_DISABLE
);
2145 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2148 static void vfio_pci_post_reset(VFIOPCIDevice
*vdev
)
2153 vfio_intx_enable(vdev
, &err
);
2155 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2158 for (nr
= 0; nr
< PCI_NUM_REGIONS
- 1; ++nr
) {
2159 off_t addr
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
);
2161 uint32_t len
= sizeof(val
);
2163 if (pwrite(vdev
->vbasedev
.fd
, &val
, len
, addr
) != len
) {
2164 error_report("%s(%s) reset bar %d failed: %m", __func__
,
2165 vdev
->vbasedev
.name
, nr
);
2169 vfio_quirk_reset(vdev
);
2172 static bool vfio_pci_host_match(PCIHostDeviceAddress
*addr
, const char *name
)
2176 sprintf(tmp
, "%04x:%02x:%02x.%1x", addr
->domain
,
2177 addr
->bus
, addr
->slot
, addr
->function
);
2179 return (strcmp(tmp
, name
) == 0);
2182 static int vfio_pci_hot_reset(VFIOPCIDevice
*vdev
, bool single
)
2185 struct vfio_pci_hot_reset_info
*info
;
2186 struct vfio_pci_dependent_device
*devices
;
2187 struct vfio_pci_hot_reset
*reset
;
2192 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
2195 vfio_pci_pre_reset(vdev
);
2197 vdev
->vbasedev
.needs_reset
= false;
2199 info
= g_malloc0(sizeof(*info
));
2200 info
->argsz
= sizeof(*info
);
2202 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2203 if (ret
&& errno
!= ENOSPC
) {
2205 if (!vdev
->has_pm_reset
) {
2206 error_report("vfio: Cannot reset device %s, "
2207 "no available reset mechanism.", vdev
->vbasedev
.name
);
2212 count
= info
->count
;
2213 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
2214 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
2215 devices
= &info
->devices
[0];
2217 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2220 error_report("vfio: hot reset info failed: %m");
2224 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
2226 /* Verify that we have all the groups required */
2227 for (i
= 0; i
< info
->count
; i
++) {
2228 PCIHostDeviceAddress host
;
2230 VFIODevice
*vbasedev_iter
;
2232 host
.domain
= devices
[i
].segment
;
2233 host
.bus
= devices
[i
].bus
;
2234 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2235 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2237 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
2238 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
2240 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2244 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2245 if (group
->groupid
== devices
[i
].group_id
) {
2251 if (!vdev
->has_pm_reset
) {
2252 error_report("vfio: Cannot reset device %s, "
2253 "depends on group %d which is not owned.",
2254 vdev
->vbasedev
.name
, devices
[i
].group_id
);
2260 /* Prep dependent devices for reset and clear our marker. */
2261 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2262 if (!vbasedev_iter
->dev
->realized
||
2263 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2266 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2267 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2272 vfio_pci_pre_reset(tmp
);
2273 tmp
->vbasedev
.needs_reset
= false;
2280 if (!single
&& !multi
) {
2285 /* Determine how many group fds need to be passed */
2287 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2288 for (i
= 0; i
< info
->count
; i
++) {
2289 if (group
->groupid
== devices
[i
].group_id
) {
2296 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
2297 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
2298 fds
= &reset
->group_fds
[0];
2300 /* Fill in group fds */
2301 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2302 for (i
= 0; i
< info
->count
; i
++) {
2303 if (group
->groupid
== devices
[i
].group_id
) {
2304 fds
[reset
->count
++] = group
->fd
;
2311 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
2314 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
2315 ret
? "%m" : "Success");
2318 /* Re-enable INTx on affected devices */
2319 for (i
= 0; i
< info
->count
; i
++) {
2320 PCIHostDeviceAddress host
;
2322 VFIODevice
*vbasedev_iter
;
2324 host
.domain
= devices
[i
].segment
;
2325 host
.bus
= devices
[i
].bus
;
2326 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2327 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2329 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2333 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2334 if (group
->groupid
== devices
[i
].group_id
) {
2343 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2344 if (!vbasedev_iter
->dev
->realized
||
2345 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2348 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2349 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2350 vfio_pci_post_reset(tmp
);
2357 vfio_pci_post_reset(vdev
);
2365 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2366 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2367 * of doing hot resets when there is only a single device per bus. The in-use
2368 * here refers to how many VFIODevices are affected. A hot reset that affects
2369 * multiple devices, but only a single in-use device, means that we can call
2370 * it from our bus ->reset() callback since the extent is effectively a single
2371 * device. This allows us to make use of it in the hotplug path. When there
2372 * are multiple in-use devices, we can only trigger the hot reset during a
2373 * system reset and thus from our reset handler. We separate _one vs _multi
2374 * here so that we don't overlap and do a double reset on the system reset
2375 * path where both our reset handler and ->reset() callback are used. Calling
2376 * _one() will only do a hot reset for the one in-use devices case, calling
2377 * _multi() will do nothing if a _one() would have been sufficient.
2379 static int vfio_pci_hot_reset_one(VFIOPCIDevice
*vdev
)
2381 return vfio_pci_hot_reset(vdev
, true);
2384 static int vfio_pci_hot_reset_multi(VFIODevice
*vbasedev
)
2386 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2387 return vfio_pci_hot_reset(vdev
, false);
2390 static void vfio_pci_compute_needs_reset(VFIODevice
*vbasedev
)
2392 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2393 if (!vbasedev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
2394 vbasedev
->needs_reset
= true;
2398 static Object
*vfio_pci_get_object(VFIODevice
*vbasedev
)
2400 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2402 return OBJECT(vdev
);
2405 static bool vfio_msix_present(void *opaque
, int version_id
)
2407 PCIDevice
*pdev
= opaque
;
2409 return msix_present(pdev
);
2412 const VMStateDescription vmstate_vfio_pci_config
= {
2413 .name
= "VFIOPCIDevice",
2415 .minimum_version_id
= 1,
2416 .fields
= (VMStateField
[]) {
2417 VMSTATE_PCI_DEVICE(pdev
, VFIOPCIDevice
),
2418 VMSTATE_MSIX_TEST(pdev
, VFIOPCIDevice
, vfio_msix_present
),
2419 VMSTATE_END_OF_LIST()
2423 static void vfio_pci_save_config(VFIODevice
*vbasedev
, QEMUFile
*f
)
2425 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2427 vmstate_save_state(f
, &vmstate_vfio_pci_config
, vdev
, NULL
);
2430 static int vfio_pci_load_config(VFIODevice
*vbasedev
, QEMUFile
*f
)
2432 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2433 PCIDevice
*pdev
= &vdev
->pdev
;
2436 ret
= vmstate_load_state(f
, &vmstate_vfio_pci_config
, vdev
, 1);
2441 vfio_pci_write_config(pdev
, PCI_COMMAND
,
2442 pci_get_word(pdev
->config
+ PCI_COMMAND
), 2);
2444 if (msi_enabled(pdev
)) {
2445 vfio_msi_enable(vdev
);
2446 } else if (msix_enabled(pdev
)) {
2447 vfio_msix_enable(vdev
);
2453 static VFIODeviceOps vfio_pci_ops
= {
2454 .vfio_compute_needs_reset
= vfio_pci_compute_needs_reset
,
2455 .vfio_hot_reset_multi
= vfio_pci_hot_reset_multi
,
2456 .vfio_eoi
= vfio_intx_eoi
,
2457 .vfio_get_object
= vfio_pci_get_object
,
2458 .vfio_save_config
= vfio_pci_save_config
,
2459 .vfio_load_config
= vfio_pci_load_config
,
2462 int vfio_populate_vga(VFIOPCIDevice
*vdev
, Error
**errp
)
2464 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2465 struct vfio_region_info
*reg_info
;
2468 ret
= vfio_get_region_info(vbasedev
, VFIO_PCI_VGA_REGION_INDEX
, ®_info
);
2470 error_setg_errno(errp
, -ret
,
2471 "failed getting region info for VGA region index %d",
2472 VFIO_PCI_VGA_REGION_INDEX
);
2476 if (!(reg_info
->flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2477 !(reg_info
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2478 reg_info
->size
< 0xbffff + 1) {
2479 error_setg(errp
, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2480 (unsigned long)reg_info
->flags
,
2481 (unsigned long)reg_info
->size
);
2486 vdev
->vga
= g_new0(VFIOVGA
, 1);
2488 vdev
->vga
->fd_offset
= reg_info
->offset
;
2489 vdev
->vga
->fd
= vdev
->vbasedev
.fd
;
2493 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2494 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2495 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].quirks
);
2497 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2498 OBJECT(vdev
), &vfio_vga_ops
,
2499 &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
],
2500 "vfio-vga-mmio@0xa0000",
2501 QEMU_PCI_VGA_MEM_SIZE
);
2503 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2504 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2505 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2507 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2508 OBJECT(vdev
), &vfio_vga_ops
,
2509 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
],
2510 "vfio-vga-io@0x3b0",
2511 QEMU_PCI_VGA_IO_LO_SIZE
);
2513 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2514 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2515 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2517 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
,
2518 OBJECT(vdev
), &vfio_vga_ops
,
2519 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
],
2520 "vfio-vga-io@0x3c0",
2521 QEMU_PCI_VGA_IO_HI_SIZE
);
2523 pci_register_vga(&vdev
->pdev
, &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2524 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2525 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
);
2530 static void vfio_populate_device(VFIOPCIDevice
*vdev
, Error
**errp
)
2532 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2533 struct vfio_region_info
*reg_info
;
2534 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2537 /* Sanity check device */
2538 if (!(vbasedev
->flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2539 error_setg(errp
, "this isn't a PCI device");
2543 if (vbasedev
->num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2544 error_setg(errp
, "unexpected number of io regions %u",
2545 vbasedev
->num_regions
);
2549 if (vbasedev
->num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2550 error_setg(errp
, "unexpected number of irqs %u", vbasedev
->num_irqs
);
2554 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2555 char *name
= g_strdup_printf("%s BAR %d", vbasedev
->name
, i
);
2557 ret
= vfio_region_setup(OBJECT(vdev
), vbasedev
,
2558 &vdev
->bars
[i
].region
, i
, name
);
2562 error_setg_errno(errp
, -ret
, "failed to get region %d info", i
);
2566 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2569 ret
= vfio_get_region_info(vbasedev
,
2570 VFIO_PCI_CONFIG_REGION_INDEX
, ®_info
);
2572 error_setg_errno(errp
, -ret
, "failed to get config info");
2576 trace_vfio_populate_device_config(vdev
->vbasedev
.name
,
2577 (unsigned long)reg_info
->size
,
2578 (unsigned long)reg_info
->offset
,
2579 (unsigned long)reg_info
->flags
);
2581 vdev
->config_size
= reg_info
->size
;
2582 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2583 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2585 vdev
->config_offset
= reg_info
->offset
;
2589 if (vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) {
2590 ret
= vfio_populate_vga(vdev
, errp
);
2592 error_append_hint(errp
, "device does not support "
2593 "requested feature x-vga\n");
2598 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
2600 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
2602 /* This can fail for an old kernel or legacy PCI dev */
2603 trace_vfio_populate_device_get_irq_info_failure(strerror(errno
));
2604 } else if (irq_info
.count
== 1) {
2605 vdev
->pci_aer
= true;
2607 warn_report(VFIO_MSG_PREFIX
2608 "Could not enable error recovery for the device",
2613 static void vfio_put_device(VFIOPCIDevice
*vdev
)
2615 g_free(vdev
->vbasedev
.name
);
2618 vfio_put_base_device(&vdev
->vbasedev
);
2621 static void vfio_err_notifier_handler(void *opaque
)
2623 VFIOPCIDevice
*vdev
= opaque
;
2625 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
2630 * TBD. Retrieve the error details and decide what action
2631 * needs to be taken. One of the actions could be to pass
2632 * the error to the guest and have the guest driver recover
2633 * from the error. This requires that PCIe capabilities be
2634 * exposed to the guest. For now, we just terminate the
2635 * guest to contain the error.
2638 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__
, vdev
->vbasedev
.name
);
2640 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2644 * Registers error notifier for devices supporting error recovery.
2645 * If we encounter a failure in this function, we report an error
2646 * and continue after disabling error recovery support for the
2649 static void vfio_register_err_notifier(VFIOPCIDevice
*vdev
)
2654 if (!vdev
->pci_aer
) {
2658 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
2659 error_report("vfio: Unable to init event notifier for error detection");
2660 vdev
->pci_aer
= false;
2664 fd
= event_notifier_get_fd(&vdev
->err_notifier
);
2665 qemu_set_fd_handler(fd
, vfio_err_notifier_handler
, NULL
, vdev
);
2667 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2668 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2669 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2670 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2671 event_notifier_cleanup(&vdev
->err_notifier
);
2672 vdev
->pci_aer
= false;
2676 static void vfio_unregister_err_notifier(VFIOPCIDevice
*vdev
)
2680 if (!vdev
->pci_aer
) {
2684 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2685 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2686 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2688 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
2690 event_notifier_cleanup(&vdev
->err_notifier
);
2693 static void vfio_req_notifier_handler(void *opaque
)
2695 VFIOPCIDevice
*vdev
= opaque
;
2698 if (!event_notifier_test_and_clear(&vdev
->req_notifier
)) {
2702 qdev_unplug(DEVICE(vdev
), &err
);
2704 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2708 static void vfio_register_req_notifier(VFIOPCIDevice
*vdev
)
2710 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
2711 .index
= VFIO_PCI_REQ_IRQ_INDEX
};
2715 if (!(vdev
->features
& VFIO_FEATURE_ENABLE_REQ
)) {
2719 if (ioctl(vdev
->vbasedev
.fd
,
2720 VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
) < 0 || irq_info
.count
< 1) {
2724 if (event_notifier_init(&vdev
->req_notifier
, 0)) {
2725 error_report("vfio: Unable to init event notifier for device request");
2729 fd
= event_notifier_get_fd(&vdev
->req_notifier
);
2730 qemu_set_fd_handler(fd
, vfio_req_notifier_handler
, NULL
, vdev
);
2732 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2733 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2734 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2735 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2736 event_notifier_cleanup(&vdev
->req_notifier
);
2738 vdev
->req_enabled
= true;
2742 static void vfio_unregister_req_notifier(VFIOPCIDevice
*vdev
)
2746 if (!vdev
->req_enabled
) {
2750 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2751 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2752 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2754 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->req_notifier
),
2756 event_notifier_cleanup(&vdev
->req_notifier
);
2758 vdev
->req_enabled
= false;
2761 static void vfio_realize(PCIDevice
*pdev
, Error
**errp
)
2763 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
2764 VFIODevice
*vbasedev_iter
;
2766 char *tmp
, *subsys
, group_path
[PATH_MAX
], *group_name
;
2774 if (!vdev
->vbasedev
.sysfsdev
) {
2775 if (!(~vdev
->host
.domain
|| ~vdev
->host
.bus
||
2776 ~vdev
->host
.slot
|| ~vdev
->host
.function
)) {
2777 error_setg(errp
, "No provided host device");
2778 error_append_hint(errp
, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2779 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2782 vdev
->vbasedev
.sysfsdev
=
2783 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2784 vdev
->host
.domain
, vdev
->host
.bus
,
2785 vdev
->host
.slot
, vdev
->host
.function
);
2788 if (stat(vdev
->vbasedev
.sysfsdev
, &st
) < 0) {
2789 error_setg_errno(errp
, errno
, "no such host device");
2790 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.sysfsdev
);
2794 vdev
->vbasedev
.name
= g_path_get_basename(vdev
->vbasedev
.sysfsdev
);
2795 vdev
->vbasedev
.ops
= &vfio_pci_ops
;
2796 vdev
->vbasedev
.type
= VFIO_DEVICE_TYPE_PCI
;
2797 vdev
->vbasedev
.dev
= DEVICE(vdev
);
2799 tmp
= g_strdup_printf("%s/iommu_group", vdev
->vbasedev
.sysfsdev
);
2800 len
= readlink(tmp
, group_path
, sizeof(group_path
));
2803 if (len
<= 0 || len
>= sizeof(group_path
)) {
2804 error_setg_errno(errp
, len
< 0 ? errno
: ENAMETOOLONG
,
2805 "no iommu_group found");
2809 group_path
[len
] = 0;
2811 group_name
= basename(group_path
);
2812 if (sscanf(group_name
, "%d", &groupid
) != 1) {
2813 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
2817 trace_vfio_realize(vdev
->vbasedev
.name
, groupid
);
2819 group
= vfio_get_group(groupid
, pci_device_iommu_address_space(pdev
), errp
);
2824 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2825 if (strcmp(vbasedev_iter
->name
, vdev
->vbasedev
.name
) == 0) {
2826 error_setg(errp
, "device is already attached");
2827 vfio_put_group(group
);
2833 * Mediated devices *might* operate compatibly with discarding of RAM, but
2834 * we cannot know for certain, it depends on whether the mdev vendor driver
2835 * stays in sync with the active working set of the guest driver. Prevent
2836 * the x-balloon-allowed option unless this is minimally an mdev device.
2838 tmp
= g_strdup_printf("%s/subsystem", vdev
->vbasedev
.sysfsdev
);
2839 subsys
= realpath(tmp
, NULL
);
2841 is_mdev
= subsys
&& (strcmp(subsys
, "/sys/bus/mdev") == 0);
2844 trace_vfio_mdev(vdev
->vbasedev
.name
, is_mdev
);
2846 if (vdev
->vbasedev
.ram_block_discard_allowed
&& !is_mdev
) {
2847 error_setg(errp
, "x-balloon-allowed only potentially compatible "
2848 "with mdev devices");
2849 vfio_put_group(group
);
2853 ret
= vfio_get_device(group
, vdev
->vbasedev
.name
, &vdev
->vbasedev
, errp
);
2855 vfio_put_group(group
);
2859 vfio_populate_device(vdev
, &err
);
2861 error_propagate(errp
, err
);
2865 /* Get a copy of config space */
2866 ret
= pread(vdev
->vbasedev
.fd
, vdev
->pdev
.config
,
2867 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
2868 vdev
->config_offset
);
2869 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
2870 ret
= ret
< 0 ? -errno
: -EFAULT
;
2871 error_setg_errno(errp
, -ret
, "failed to read device config space");
2875 /* vfio emulates a lot for us, but some bits need extra love */
2876 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
2878 /* QEMU can choose to expose the ROM or not */
2879 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
2880 /* QEMU can also add or extend BARs */
2881 memset(vdev
->emulated_config_bits
+ PCI_BASE_ADDRESS_0
, 0xff, 6 * 4);
2884 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2885 * device ID is managed by the vendor and need only be a 16-bit value.
2886 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2888 if (vdev
->vendor_id
!= PCI_ANY_ID
) {
2889 if (vdev
->vendor_id
>= 0xffff) {
2890 error_setg(errp
, "invalid PCI vendor ID provided");
2893 vfio_add_emulated_word(vdev
, PCI_VENDOR_ID
, vdev
->vendor_id
, ~0);
2894 trace_vfio_pci_emulated_vendor_id(vdev
->vbasedev
.name
, vdev
->vendor_id
);
2896 vdev
->vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
2899 if (vdev
->device_id
!= PCI_ANY_ID
) {
2900 if (vdev
->device_id
> 0xffff) {
2901 error_setg(errp
, "invalid PCI device ID provided");
2904 vfio_add_emulated_word(vdev
, PCI_DEVICE_ID
, vdev
->device_id
, ~0);
2905 trace_vfio_pci_emulated_device_id(vdev
->vbasedev
.name
, vdev
->device_id
);
2907 vdev
->device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
2910 if (vdev
->sub_vendor_id
!= PCI_ANY_ID
) {
2911 if (vdev
->sub_vendor_id
> 0xffff) {
2912 error_setg(errp
, "invalid PCI subsystem vendor ID provided");
2915 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_VENDOR_ID
,
2916 vdev
->sub_vendor_id
, ~0);
2917 trace_vfio_pci_emulated_sub_vendor_id(vdev
->vbasedev
.name
,
2918 vdev
->sub_vendor_id
);
2921 if (vdev
->sub_device_id
!= PCI_ANY_ID
) {
2922 if (vdev
->sub_device_id
> 0xffff) {
2923 error_setg(errp
, "invalid PCI subsystem device ID provided");
2926 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_ID
, vdev
->sub_device_id
, ~0);
2927 trace_vfio_pci_emulated_sub_device_id(vdev
->vbasedev
.name
,
2928 vdev
->sub_device_id
);
2931 /* QEMU can change multi-function devices to single function, or reverse */
2932 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
2933 PCI_HEADER_TYPE_MULTI_FUNCTION
;
2935 /* Restore or clear multifunction, this is always controlled by QEMU */
2936 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
2937 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
2939 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
2943 * Clear host resource mapping info. If we choose not to register a
2944 * BAR, such as might be the case with the option ROM, we can get
2945 * confusing, unwritable, residual addresses from the host here.
2947 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
2948 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
2950 vfio_pci_size_rom(vdev
);
2952 vfio_bars_prepare(vdev
);
2954 vfio_msix_early_setup(vdev
, &err
);
2956 error_propagate(errp
, err
);
2960 vfio_bars_register(vdev
);
2962 ret
= vfio_add_capabilities(vdev
, errp
);
2968 vfio_vga_quirk_setup(vdev
);
2971 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2972 vfio_bar_quirk_setup(vdev
, i
);
2975 if (!vdev
->igd_opregion
&&
2976 vdev
->features
& VFIO_FEATURE_ENABLE_IGD_OPREGION
) {
2977 struct vfio_region_info
*opregion
;
2979 if (vdev
->pdev
.qdev
.hotplugged
) {
2981 "cannot support IGD OpRegion feature on hotplugged "
2986 ret
= vfio_get_dev_region_info(&vdev
->vbasedev
,
2987 VFIO_REGION_TYPE_PCI_VENDOR_TYPE
| PCI_VENDOR_ID_INTEL
,
2988 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
, &opregion
);
2990 error_setg_errno(errp
, -ret
,
2991 "does not support requested IGD OpRegion feature");
2995 ret
= vfio_pci_igd_opregion_init(vdev
, opregion
, errp
);
3002 /* QEMU emulates all of MSI & MSIX */
3003 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3004 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3008 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3009 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3010 vdev
->msi_cap_size
);
3013 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3014 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3015 vfio_intx_mmap_enable
, vdev
);
3016 pci_device_set_intx_routing_notifier(&vdev
->pdev
,
3017 vfio_intx_routing_notifier
);
3018 vdev
->irqchip_change_notifier
.notify
= vfio_irqchip_change
;
3019 kvm_irqchip_add_change_notifier(&vdev
->irqchip_change_notifier
);
3020 ret
= vfio_intx_enable(vdev
, errp
);
3022 goto out_deregister
;
3026 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3027 ret
= vfio_display_probe(vdev
, errp
);
3029 goto out_deregister
;
3032 if (vdev
->enable_ramfb
&& vdev
->dpy
== NULL
) {
3033 error_setg(errp
, "ramfb=on requires display=on");
3034 goto out_deregister
;
3036 if (vdev
->display_xres
|| vdev
->display_yres
) {
3037 if (vdev
->dpy
== NULL
) {
3038 error_setg(errp
, "xres and yres properties require display=on");
3039 goto out_deregister
;
3041 if (vdev
->dpy
->edid_regs
== NULL
) {
3042 error_setg(errp
, "xres and yres properties need edid support");
3043 goto out_deregister
;
3047 if (vdev
->vendor_id
== PCI_VENDOR_ID_NVIDIA
) {
3048 ret
= vfio_pci_nvidia_v100_ram_init(vdev
, errp
);
3049 if (ret
&& ret
!= -ENODEV
) {
3050 error_report("Failed to setup NVIDIA V100 GPU RAM");
3054 if (vdev
->vendor_id
== PCI_VENDOR_ID_IBM
) {
3055 ret
= vfio_pci_nvlink2_init(vdev
, errp
);
3056 if (ret
&& ret
!= -ENODEV
) {
3057 error_report("Failed to setup NVlink2 bridge");
3061 if (!pdev
->failover_pair_id
) {
3062 ret
= vfio_migration_probe(&vdev
->vbasedev
, errp
);
3064 error_report("%s: Migration disabled", vdev
->vbasedev
.name
);
3068 vfio_register_err_notifier(vdev
);
3069 vfio_register_req_notifier(vdev
);
3070 vfio_setup_resetfn_quirk(vdev
);
3075 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3076 kvm_irqchip_remove_change_notifier(&vdev
->irqchip_change_notifier
);
3078 vfio_teardown_msi(vdev
);
3079 vfio_bars_exit(vdev
);
3081 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
3084 static void vfio_instance_finalize(Object
*obj
)
3086 VFIOPCIDevice
*vdev
= VFIO_PCI(obj
);
3087 VFIOGroup
*group
= vdev
->vbasedev
.group
;
3089 vfio_display_finalize(vdev
);
3090 vfio_bars_finalize(vdev
);
3091 g_free(vdev
->emulated_config_bits
);
3094 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3095 * fw_cfg entry therefore leaking this allocation seems like the safest
3098 * g_free(vdev->igd_opregion);
3100 vfio_put_device(vdev
);
3101 vfio_put_group(group
);
3104 static void vfio_exitfn(PCIDevice
*pdev
)
3106 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
3108 vfio_unregister_req_notifier(vdev
);
3109 vfio_unregister_err_notifier(vdev
);
3110 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3111 if (vdev
->irqchip_change_notifier
.notify
) {
3112 kvm_irqchip_remove_change_notifier(&vdev
->irqchip_change_notifier
);
3114 vfio_disable_interrupts(vdev
);
3115 if (vdev
->intx
.mmap_timer
) {
3116 timer_free(vdev
->intx
.mmap_timer
);
3118 vfio_teardown_msi(vdev
);
3119 vfio_bars_exit(vdev
);
3120 vfio_migration_finalize(&vdev
->vbasedev
);
3123 static void vfio_pci_reset(DeviceState
*dev
)
3125 VFIOPCIDevice
*vdev
= VFIO_PCI(dev
);
3127 trace_vfio_pci_reset(vdev
->vbasedev
.name
);
3129 vfio_pci_pre_reset(vdev
);
3131 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3132 vfio_display_reset(vdev
);
3135 if (vdev
->resetfn
&& !vdev
->resetfn(vdev
)) {
3139 if (vdev
->vbasedev
.reset_works
&&
3140 (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3141 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3142 trace_vfio_pci_reset_flr(vdev
->vbasedev
.name
);
3146 /* See if we can do our own bus reset */
3147 if (!vfio_pci_hot_reset_one(vdev
)) {
3151 /* If nothing else works and the device supports PM reset, use it */
3152 if (vdev
->vbasedev
.reset_works
&& vdev
->has_pm_reset
&&
3153 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3154 trace_vfio_pci_reset_pm(vdev
->vbasedev
.name
);
3159 vfio_pci_post_reset(vdev
);
3162 static void vfio_instance_init(Object
*obj
)
3164 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3165 VFIOPCIDevice
*vdev
= VFIO_PCI(obj
);
3167 device_add_bootindex_property(obj
, &vdev
->bootindex
,
3170 vdev
->host
.domain
= ~0U;
3171 vdev
->host
.bus
= ~0U;
3172 vdev
->host
.slot
= ~0U;
3173 vdev
->host
.function
= ~0U;
3175 vdev
->nv_gpudirect_clique
= 0xFF;
3177 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3178 * line, therefore, no need to wait to realize like other devices */
3179 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
3182 static Property vfio_pci_dev_properties
[] = {
3183 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice
, host
),
3184 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice
, vbasedev
.sysfsdev
),
3185 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice
,
3186 display
, ON_OFF_AUTO_OFF
),
3187 DEFINE_PROP_UINT32("xres", VFIOPCIDevice
, display_xres
, 0),
3188 DEFINE_PROP_UINT32("yres", VFIOPCIDevice
, display_yres
, 0),
3189 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice
,
3190 intx
.mmap_timeout
, 1100),
3191 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice
, features
,
3192 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3193 DEFINE_PROP_BIT("x-req", VFIOPCIDevice
, features
,
3194 VFIO_FEATURE_ENABLE_REQ_BIT
, true),
3195 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice
, features
,
3196 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT
, false),
3197 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice
, vbasedev
.no_mmap
, false),
3198 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice
,
3199 vbasedev
.ram_block_discard_allowed
, false),
3200 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice
, no_kvm_intx
, false),
3201 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice
, no_kvm_msi
, false),
3202 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice
, no_kvm_msix
, false),
3203 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice
,
3204 no_geforce_quirks
, false),
3205 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice
, no_kvm_ioeventfd
,
3207 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice
, no_vfio_ioeventfd
,
3209 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice
, vendor_id
, PCI_ANY_ID
),
3210 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice
, device_id
, PCI_ANY_ID
),
3211 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice
,
3212 sub_vendor_id
, PCI_ANY_ID
),
3213 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice
,
3214 sub_device_id
, PCI_ANY_ID
),
3215 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice
, igd_gms
, 0),
3216 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice
,
3217 nv_gpudirect_clique
,
3218 qdev_prop_nv_gpudirect_clique
, uint8_t),
3219 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice
, msix_relo
,
3220 OFF_AUTOPCIBAR_OFF
),
3222 * TODO - support passed fds... is this necessary?
3223 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3224 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3226 DEFINE_PROP_END_OF_LIST(),
3229 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3231 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3232 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3234 dc
->reset
= vfio_pci_reset
;
3235 device_class_set_props(dc
, vfio_pci_dev_properties
);
3236 dc
->desc
= "VFIO-based PCI device assignment";
3237 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3238 pdc
->realize
= vfio_realize
;
3239 pdc
->exit
= vfio_exitfn
;
3240 pdc
->config_read
= vfio_pci_read_config
;
3241 pdc
->config_write
= vfio_pci_write_config
;
3244 static const TypeInfo vfio_pci_dev_info
= {
3245 .name
= TYPE_VFIO_PCI
,
3246 .parent
= TYPE_PCI_DEVICE
,
3247 .instance_size
= sizeof(VFIOPCIDevice
),
3248 .class_init
= vfio_pci_dev_class_init
,
3249 .instance_init
= vfio_instance_init
,
3250 .instance_finalize
= vfio_instance_finalize
,
3251 .interfaces
= (InterfaceInfo
[]) {
3252 { INTERFACE_PCIE_DEVICE
},
3253 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
3258 static Property vfio_pci_dev_nohotplug_properties
[] = {
3259 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice
, enable_ramfb
, false),
3260 DEFINE_PROP_END_OF_LIST(),
3263 static void vfio_pci_nohotplug_dev_class_init(ObjectClass
*klass
, void *data
)
3265 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3267 device_class_set_props(dc
, vfio_pci_dev_nohotplug_properties
);
3268 dc
->hotpluggable
= false;
3271 static const TypeInfo vfio_pci_nohotplug_dev_info
= {
3272 .name
= TYPE_VFIO_PCI_NOHOTPLUG
,
3273 .parent
= TYPE_VFIO_PCI
,
3274 .instance_size
= sizeof(VFIOPCIDevice
),
3275 .class_init
= vfio_pci_nohotplug_dev_class_init
,
3278 static void register_vfio_pci_dev_type(void)
3280 type_register_static(&vfio_pci_dev_info
);
3281 type_register_static(&vfio_pci_nohotplug_dev_info
);
3284 type_init(register_vfio_pci_dev_type
)