2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include "hw/pci/msi.h"
27 #include "hw/pci/msix.h"
28 #include "hw/pci/pci_bridge.h"
29 #include "hw/qdev-properties.h"
30 #include "hw/qdev-properties-system.h"
31 #include "migration/vmstate.h"
32 #include "qemu/error-report.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/module.h"
35 #include "qemu/option.h"
36 #include "qemu/range.h"
37 #include "qemu/units.h"
38 #include "sysemu/kvm.h"
39 #include "sysemu/runstate.h"
40 #include "sysemu/sysemu.h"
43 #include "qapi/error.h"
44 #include "migration/blocker.h"
45 #include "migration/qemu-file.h"
47 #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
49 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
);
50 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
);
53 * Disabling BAR mmaping can be slow, but toggling it around INTx can
54 * also be a huge overhead. We try to get the best of both worlds by
55 * waiting until an interrupt to disable mmaps (subsequent transitions
56 * to the same state are effectively no overhead). If the interrupt has
57 * been serviced and the time gap is long enough, we re-enable mmaps for
58 * performance. This works well for things like graphics cards, which
59 * may not use their interrupt at all and are penalized to an unusable
60 * level by read/write BAR traps. Other devices, like NICs, have more
61 * regular interrupts and see much better latency by staying in non-mmap
62 * mode. We therefore set the default mmap_timeout such that a ping
63 * is just enough to keep the mmap disabled. Users can experiment with
64 * other options with the x-intx-mmap-timeout-ms parameter (a value of
65 * zero disables the timer).
67 static void vfio_intx_mmap_enable(void *opaque
)
69 VFIOPCIDevice
*vdev
= opaque
;
71 if (vdev
->intx
.pending
) {
72 timer_mod(vdev
->intx
.mmap_timer
,
73 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
77 vfio_mmap_set_enabled(vdev
, true);
80 static void vfio_intx_interrupt(void *opaque
)
82 VFIOPCIDevice
*vdev
= opaque
;
84 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
88 trace_vfio_intx_interrupt(vdev
->vbasedev
.name
, 'A' + vdev
->intx
.pin
);
90 vdev
->intx
.pending
= true;
91 pci_irq_assert(&vdev
->pdev
);
92 vfio_mmap_set_enabled(vdev
, false);
93 if (vdev
->intx
.mmap_timeout
) {
94 timer_mod(vdev
->intx
.mmap_timer
,
95 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
99 static void vfio_intx_eoi(VFIODevice
*vbasedev
)
101 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
103 if (!vdev
->intx
.pending
) {
107 trace_vfio_intx_eoi(vbasedev
->name
);
109 vdev
->intx
.pending
= false;
110 pci_irq_deassert(&vdev
->pdev
);
111 vfio_unmask_single_irqindex(vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
114 static void vfio_intx_enable_kvm(VFIOPCIDevice
*vdev
, Error
**errp
)
117 int irq_fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
119 if (vdev
->no_kvm_intx
|| !kvm_irqfds_enabled() ||
120 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
121 !kvm_resamplefds_enabled()) {
125 /* Get to a known interrupt state */
126 qemu_set_fd_handler(irq_fd
, NULL
, NULL
, vdev
);
127 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
128 vdev
->intx
.pending
= false;
129 pci_irq_deassert(&vdev
->pdev
);
131 /* Get an eventfd for resample/unmask */
132 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
133 error_setg(errp
, "event_notifier_init failed eoi");
137 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
,
138 &vdev
->intx
.interrupt
,
140 vdev
->intx
.route
.irq
)) {
141 error_setg_errno(errp
, errno
, "failed to setup resample irqfd");
145 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
146 VFIO_IRQ_SET_ACTION_UNMASK
,
147 event_notifier_get_fd(&vdev
->intx
.unmask
),
153 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
155 vdev
->intx
.kvm_accel
= true;
157 trace_vfio_intx_enable_kvm(vdev
->vbasedev
.name
);
162 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vdev
->intx
.interrupt
,
163 vdev
->intx
.route
.irq
);
165 event_notifier_cleanup(&vdev
->intx
.unmask
);
167 qemu_set_fd_handler(irq_fd
, vfio_intx_interrupt
, NULL
, vdev
);
168 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
172 static void vfio_intx_disable_kvm(VFIOPCIDevice
*vdev
)
175 if (!vdev
->intx
.kvm_accel
) {
180 * Get to a known state, hardware masked, QEMU ready to accept new
181 * interrupts, QEMU IRQ de-asserted.
183 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
184 vdev
->intx
.pending
= false;
185 pci_irq_deassert(&vdev
->pdev
);
187 /* Tell KVM to stop listening for an INTx irqfd */
188 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vdev
->intx
.interrupt
,
189 vdev
->intx
.route
.irq
)) {
190 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
193 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
194 event_notifier_cleanup(&vdev
->intx
.unmask
);
196 /* QEMU starts listening for interrupt events. */
197 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->intx
.interrupt
),
198 vfio_intx_interrupt
, NULL
, vdev
);
200 vdev
->intx
.kvm_accel
= false;
202 /* If we've missed an event, let it re-fire through QEMU */
203 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
205 trace_vfio_intx_disable_kvm(vdev
->vbasedev
.name
);
209 static void vfio_intx_update(VFIOPCIDevice
*vdev
, PCIINTxRoute
*route
)
213 trace_vfio_intx_update(vdev
->vbasedev
.name
,
214 vdev
->intx
.route
.irq
, route
->irq
);
216 vfio_intx_disable_kvm(vdev
);
218 vdev
->intx
.route
= *route
;
220 if (route
->mode
!= PCI_INTX_ENABLED
) {
224 vfio_intx_enable_kvm(vdev
, &err
);
226 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
229 /* Re-enable the interrupt in cased we missed an EOI */
230 vfio_intx_eoi(&vdev
->vbasedev
);
233 static void vfio_intx_routing_notifier(PCIDevice
*pdev
)
235 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
238 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
242 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
244 if (pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
245 vfio_intx_update(vdev
, &route
);
249 static void vfio_irqchip_change(Notifier
*notify
, void *data
)
251 VFIOPCIDevice
*vdev
= container_of(notify
, VFIOPCIDevice
,
252 irqchip_change_notifier
);
254 vfio_intx_update(vdev
, &vdev
->intx
.route
);
257 static int vfio_intx_enable(VFIOPCIDevice
*vdev
, Error
**errp
)
259 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
269 vfio_disable_interrupts(vdev
);
271 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
272 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
276 * Only conditional to avoid generating error messages on platforms
277 * where we won't actually use the result anyway.
279 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
280 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
285 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
287 error_setg_errno(errp
, -ret
, "event_notifier_init failed");
290 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
291 qemu_set_fd_handler(fd
, vfio_intx_interrupt
, NULL
, vdev
);
293 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
294 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, errp
)) {
295 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
296 event_notifier_cleanup(&vdev
->intx
.interrupt
);
300 vfio_intx_enable_kvm(vdev
, &err
);
302 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
305 vdev
->interrupt
= VFIO_INT_INTx
;
307 trace_vfio_intx_enable(vdev
->vbasedev
.name
);
311 static void vfio_intx_disable(VFIOPCIDevice
*vdev
)
315 timer_del(vdev
->intx
.mmap_timer
);
316 vfio_intx_disable_kvm(vdev
);
317 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
318 vdev
->intx
.pending
= false;
319 pci_irq_deassert(&vdev
->pdev
);
320 vfio_mmap_set_enabled(vdev
, true);
322 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
323 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
324 event_notifier_cleanup(&vdev
->intx
.interrupt
);
326 vdev
->interrupt
= VFIO_INT_NONE
;
328 trace_vfio_intx_disable(vdev
->vbasedev
.name
);
334 static void vfio_msi_interrupt(void *opaque
)
336 VFIOMSIVector
*vector
= opaque
;
337 VFIOPCIDevice
*vdev
= vector
->vdev
;
338 MSIMessage (*get_msg
)(PCIDevice
*dev
, unsigned vector
);
339 void (*notify
)(PCIDevice
*dev
, unsigned vector
);
341 int nr
= vector
- vdev
->msi_vectors
;
343 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
347 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
348 get_msg
= msix_get_message
;
349 notify
= msix_notify
;
351 /* A masked vector firing needs to use the PBA, enable it */
352 if (msix_is_masked(&vdev
->pdev
, nr
)) {
353 set_bit(nr
, vdev
->msix
->pending
);
354 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, true);
355 trace_vfio_msix_pba_enable(vdev
->vbasedev
.name
);
357 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
358 get_msg
= msi_get_message
;
364 msg
= get_msg(&vdev
->pdev
, nr
);
365 trace_vfio_msi_interrupt(vdev
->vbasedev
.name
, nr
, msg
.address
, msg
.data
);
366 notify(&vdev
->pdev
, nr
);
369 static int vfio_enable_vectors(VFIOPCIDevice
*vdev
, bool msix
)
371 struct vfio_irq_set
*irq_set
;
372 int ret
= 0, i
, argsz
;
375 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
377 irq_set
= g_malloc0(argsz
);
378 irq_set
->argsz
= argsz
;
379 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
380 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
382 irq_set
->count
= vdev
->nr_vectors
;
383 fds
= (int32_t *)&irq_set
->data
;
385 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
389 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
390 * bits, therefore we always use the KVM signaling path when setup.
391 * MSI-X mask and pending bits are emulated, so we want to use the
392 * KVM signaling path only when configured and unmasked.
394 if (vdev
->msi_vectors
[i
].use
) {
395 if (vdev
->msi_vectors
[i
].virq
< 0 ||
396 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
397 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
399 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
406 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
413 static void vfio_add_kvm_msi_virq(VFIOPCIDevice
*vdev
, VFIOMSIVector
*vector
,
414 int vector_n
, bool msix
)
418 if ((msix
&& vdev
->no_kvm_msix
) || (!msix
&& vdev
->no_kvm_msi
)) {
422 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
426 virq
= kvm_irqchip_add_msi_route(kvm_state
, vector_n
, &vdev
->pdev
);
428 event_notifier_cleanup(&vector
->kvm_interrupt
);
432 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
434 kvm_irqchip_release_virq(kvm_state
, virq
);
435 event_notifier_cleanup(&vector
->kvm_interrupt
);
442 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
444 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
446 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
448 event_notifier_cleanup(&vector
->kvm_interrupt
);
451 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
,
454 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
, pdev
);
455 kvm_irqchip_commit_routes(kvm_state
);
458 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
459 MSIMessage
*msg
, IOHandler
*handler
)
461 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
462 VFIOMSIVector
*vector
;
465 trace_vfio_msix_vector_do_use(vdev
->vbasedev
.name
, nr
);
467 vector
= &vdev
->msi_vectors
[nr
];
472 if (event_notifier_init(&vector
->interrupt
, 0)) {
473 error_report("vfio: Error: event_notifier_init failed");
476 msix_vector_use(pdev
, nr
);
479 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
480 handler
, NULL
, vector
);
483 * Attempt to enable route through KVM irqchip,
484 * default to userspace handling if unavailable.
486 if (vector
->virq
>= 0) {
488 vfio_remove_kvm_msi_virq(vector
);
490 vfio_update_kvm_msi_virq(vector
, *msg
, pdev
);
494 vfio_add_kvm_msi_virq(vdev
, vector
, nr
, true);
499 * We don't want to have the host allocate all possible MSI vectors
500 * for a device if they're not in use, so we shutdown and incrementally
501 * increase them as needed.
503 if (vdev
->nr_vectors
< nr
+ 1) {
504 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
505 vdev
->nr_vectors
= nr
+ 1;
506 ret
= vfio_enable_vectors(vdev
, true);
508 error_report("vfio: failed to enable vectors, %d", ret
);
514 if (vector
->virq
>= 0) {
515 fd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
517 fd
= event_notifier_get_fd(&vector
->interrupt
);
520 if (vfio_set_irq_signaling(&vdev
->vbasedev
,
521 VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
522 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
523 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
527 /* Disable PBA emulation when nothing more is pending. */
528 clear_bit(nr
, vdev
->msix
->pending
);
529 if (find_first_bit(vdev
->msix
->pending
,
530 vdev
->nr_vectors
) == vdev
->nr_vectors
) {
531 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
532 trace_vfio_msix_pba_disable(vdev
->vbasedev
.name
);
538 static int vfio_msix_vector_use(PCIDevice
*pdev
,
539 unsigned int nr
, MSIMessage msg
)
541 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
544 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
546 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
547 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
549 trace_vfio_msix_vector_release(vdev
->vbasedev
.name
, nr
);
552 * There are still old guests that mask and unmask vectors on every
553 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
554 * the KVM setup in place, simply switch VFIO to use the non-bypass
555 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
556 * core will mask the interrupt and set pending bits, allowing it to
557 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
559 if (vector
->virq
>= 0) {
560 int32_t fd
= event_notifier_get_fd(&vector
->interrupt
);
563 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
564 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
565 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
570 static void vfio_msix_enable(VFIOPCIDevice
*vdev
)
572 vfio_disable_interrupts(vdev
);
574 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->msix
->entries
);
576 vdev
->interrupt
= VFIO_INT_MSIX
;
579 * Some communication channels between VF & PF or PF & fw rely on the
580 * physical state of the device and expect that enabling MSI-X from the
581 * guest enables the same on the host. When our guest is Linux, the
582 * guest driver call to pci_enable_msix() sets the enabling bit in the
583 * MSI-X capability, but leaves the vector table masked. We therefore
584 * can't rely on a vector_use callback (from request_irq() in the guest)
585 * to switch the physical device into MSI-X mode because that may come a
586 * long time after pci_enable_msix(). This code enables vector 0 with
587 * triggering to userspace, then immediately release the vector, leaving
588 * the physical device with no vectors enabled, but MSI-X enabled, just
589 * like the guest view.
591 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
592 vfio_msix_vector_release(&vdev
->pdev
, 0);
594 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
595 vfio_msix_vector_release
, NULL
)) {
596 error_report("vfio: msix_set_vector_notifiers failed");
599 trace_vfio_msix_enable(vdev
->vbasedev
.name
);
602 static void vfio_msi_enable(VFIOPCIDevice
*vdev
)
606 vfio_disable_interrupts(vdev
);
608 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
610 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->nr_vectors
);
612 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
613 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
619 if (event_notifier_init(&vector
->interrupt
, 0)) {
620 error_report("vfio: Error: event_notifier_init failed");
623 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
624 vfio_msi_interrupt
, NULL
, vector
);
627 * Attempt to enable route through KVM irqchip,
628 * default to userspace handling if unavailable.
630 vfio_add_kvm_msi_virq(vdev
, vector
, i
, false);
633 /* Set interrupt type prior to possible interrupts */
634 vdev
->interrupt
= VFIO_INT_MSI
;
636 ret
= vfio_enable_vectors(vdev
, false);
639 error_report("vfio: Error: Failed to setup MSI fds: %m");
640 } else if (ret
!= vdev
->nr_vectors
) {
641 error_report("vfio: Error: Failed to enable %d "
642 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
645 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
646 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
647 if (vector
->virq
>= 0) {
648 vfio_remove_kvm_msi_virq(vector
);
650 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
652 event_notifier_cleanup(&vector
->interrupt
);
655 g_free(vdev
->msi_vectors
);
656 vdev
->msi_vectors
= NULL
;
658 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
659 vdev
->nr_vectors
= ret
;
662 vdev
->nr_vectors
= 0;
665 * Failing to setup MSI doesn't really fall within any specification.
666 * Let's try leaving interrupts disabled and hope the guest figures
667 * out to fall back to INTx for this device.
669 error_report("vfio: Error: Failed to enable MSI");
670 vdev
->interrupt
= VFIO_INT_NONE
;
675 trace_vfio_msi_enable(vdev
->vbasedev
.name
, vdev
->nr_vectors
);
678 static void vfio_msi_disable_common(VFIOPCIDevice
*vdev
)
683 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
684 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
685 if (vdev
->msi_vectors
[i
].use
) {
686 if (vector
->virq
>= 0) {
687 vfio_remove_kvm_msi_virq(vector
);
689 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
691 event_notifier_cleanup(&vector
->interrupt
);
695 g_free(vdev
->msi_vectors
);
696 vdev
->msi_vectors
= NULL
;
697 vdev
->nr_vectors
= 0;
698 vdev
->interrupt
= VFIO_INT_NONE
;
700 vfio_intx_enable(vdev
, &err
);
702 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
706 static void vfio_msix_disable(VFIOPCIDevice
*vdev
)
710 msix_unset_vector_notifiers(&vdev
->pdev
);
713 * MSI-X will only release vectors if MSI-X is still enabled on the
714 * device, check through the rest and release it ourselves if necessary.
716 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
717 if (vdev
->msi_vectors
[i
].use
) {
718 vfio_msix_vector_release(&vdev
->pdev
, i
);
719 msix_vector_unuse(&vdev
->pdev
, i
);
723 if (vdev
->nr_vectors
) {
724 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
727 vfio_msi_disable_common(vdev
);
729 memset(vdev
->msix
->pending
, 0,
730 BITS_TO_LONGS(vdev
->msix
->entries
) * sizeof(unsigned long));
732 trace_vfio_msix_disable(vdev
->vbasedev
.name
);
735 static void vfio_msi_disable(VFIOPCIDevice
*vdev
)
737 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSI_IRQ_INDEX
);
738 vfio_msi_disable_common(vdev
);
740 trace_vfio_msi_disable(vdev
->vbasedev
.name
);
743 static void vfio_update_msi(VFIOPCIDevice
*vdev
)
747 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
748 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
751 if (!vector
->use
|| vector
->virq
< 0) {
755 msg
= msi_get_message(&vdev
->pdev
, i
);
756 vfio_update_kvm_msi_virq(vector
, msg
, &vdev
->pdev
);
760 static void vfio_pci_load_rom(VFIOPCIDevice
*vdev
)
762 struct vfio_region_info
*reg_info
;
767 if (vfio_get_region_info(&vdev
->vbasedev
,
768 VFIO_PCI_ROM_REGION_INDEX
, ®_info
)) {
769 error_report("vfio: Error getting ROM info: %m");
773 trace_vfio_pci_load_rom(vdev
->vbasedev
.name
, (unsigned long)reg_info
->size
,
774 (unsigned long)reg_info
->offset
,
775 (unsigned long)reg_info
->flags
);
777 vdev
->rom_size
= size
= reg_info
->size
;
778 vdev
->rom_offset
= reg_info
->offset
;
782 if (!vdev
->rom_size
) {
783 vdev
->rom_read_failed
= true;
784 error_report("vfio-pci: Cannot read device rom at "
785 "%s", vdev
->vbasedev
.name
);
786 error_printf("Device option ROM contents are probably invalid "
787 "(check dmesg).\nSkip option ROM probe with rombar=0, "
788 "or load from file with romfile=\n");
792 vdev
->rom
= g_malloc(size
);
793 memset(vdev
->rom
, 0xff, size
);
796 bytes
= pread(vdev
->vbasedev
.fd
, vdev
->rom
+ off
,
797 size
, vdev
->rom_offset
+ off
);
800 } else if (bytes
> 0) {
804 if (errno
== EINTR
|| errno
== EAGAIN
) {
807 error_report("vfio: Error reading device ROM: %m");
813 * Test the ROM signature against our device, if the vendor is correct
814 * but the device ID doesn't match, store the correct device ID and
815 * recompute the checksum. Intel IGD devices need this and are known
816 * to have bogus checksums so we can't simply adjust the checksum.
818 if (pci_get_word(vdev
->rom
) == 0xaa55 &&
819 pci_get_word(vdev
->rom
+ 0x18) + 8 < vdev
->rom_size
&&
820 !memcmp(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18), "PCIR", 4)) {
823 vid
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 4);
824 did
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6);
826 if (vid
== vdev
->vendor_id
&& did
!= vdev
->device_id
) {
828 uint8_t csum
, *data
= vdev
->rom
;
830 pci_set_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6,
834 for (csum
= 0, i
= 0; i
< vdev
->rom_size
; i
++) {
843 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
845 VFIOPCIDevice
*vdev
= opaque
;
854 /* Load the ROM lazily when the guest tries to read it */
855 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
856 vfio_pci_load_rom(vdev
);
859 memcpy(&val
, vdev
->rom
+ addr
,
860 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
867 data
= le16_to_cpu(val
.word
);
870 data
= le32_to_cpu(val
.dword
);
873 hw_error("vfio: unsupported read size, %d bytes\n", size
);
877 trace_vfio_rom_read(vdev
->vbasedev
.name
, addr
, size
, data
);
882 static void vfio_rom_write(void *opaque
, hwaddr addr
,
883 uint64_t data
, unsigned size
)
887 static const MemoryRegionOps vfio_rom_ops
= {
888 .read
= vfio_rom_read
,
889 .write
= vfio_rom_write
,
890 .endianness
= DEVICE_LITTLE_ENDIAN
,
893 static void vfio_pci_size_rom(VFIOPCIDevice
*vdev
)
895 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
896 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
897 DeviceState
*dev
= DEVICE(vdev
);
899 int fd
= vdev
->vbasedev
.fd
;
901 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
902 /* Since pci handles romfile, just print a message and return */
903 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
904 warn_report("Device at %s is known to cause system instability"
905 " issues during option rom execution",
906 vdev
->vbasedev
.name
);
907 error_printf("Proceeding anyway since user specified romfile\n");
913 * Use the same size ROM BAR as the physical device. The contents
914 * will get filled in later when the guest tries to read it.
916 if (pread(fd
, &orig
, 4, offset
) != 4 ||
917 pwrite(fd
, &size
, 4, offset
) != 4 ||
918 pread(fd
, &size
, 4, offset
) != 4 ||
919 pwrite(fd
, &orig
, 4, offset
) != 4) {
920 error_report("%s(%s) failed: %m", __func__
, vdev
->vbasedev
.name
);
924 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
930 if (vfio_blacklist_opt_rom(vdev
)) {
931 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
932 warn_report("Device at %s is known to cause system instability"
933 " issues during option rom execution",
934 vdev
->vbasedev
.name
);
935 error_printf("Proceeding anyway since user specified"
936 " non zero value for rombar\n");
938 warn_report("Rom loading for device at %s has been disabled"
939 " due to system instability issues",
940 vdev
->vbasedev
.name
);
941 error_printf("Specify rombar=1 or romfile to force\n");
946 trace_vfio_pci_size_rom(vdev
->vbasedev
.name
, size
);
948 name
= g_strdup_printf("vfio[%s].rom", vdev
->vbasedev
.name
);
950 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
951 &vfio_rom_ops
, vdev
, name
, size
);
954 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
955 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
957 vdev
->rom_read_failed
= false;
960 void vfio_vga_write(void *opaque
, hwaddr addr
,
961 uint64_t data
, unsigned size
)
963 VFIOVGARegion
*region
= opaque
;
964 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
971 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
978 buf
.word
= cpu_to_le16(data
);
981 buf
.dword
= cpu_to_le32(data
);
984 hw_error("vfio: unsupported write size, %d bytes", size
);
988 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
989 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
990 __func__
, region
->offset
+ addr
, data
, size
);
993 trace_vfio_vga_write(region
->offset
+ addr
, data
, size
);
996 uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
998 VFIOVGARegion
*region
= opaque
;
999 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1007 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1009 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1010 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1011 __func__
, region
->offset
+ addr
, size
);
1012 return (uint64_t)-1;
1020 data
= le16_to_cpu(buf
.word
);
1023 data
= le32_to_cpu(buf
.dword
);
1026 hw_error("vfio: unsupported read size, %d bytes", size
);
1030 trace_vfio_vga_read(region
->offset
+ addr
, size
, data
);
1035 static const MemoryRegionOps vfio_vga_ops
= {
1036 .read
= vfio_vga_read
,
1037 .write
= vfio_vga_write
,
1038 .endianness
= DEVICE_LITTLE_ENDIAN
,
1042 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1043 * size if the BAR is in an exclusive page in host so that we could map
1044 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1045 * page in guest. So we should set the priority of the expanded memory
1046 * region to zero in case of overlap with BARs which share the same page
1047 * with the sub-page BAR in guest. Besides, we should also recover the
1048 * size of this sub-page BAR when its base address is changed in guest
1049 * and not page aligned any more.
1051 static void vfio_sub_page_bar_update_mapping(PCIDevice
*pdev
, int bar
)
1053 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1054 VFIORegion
*region
= &vdev
->bars
[bar
].region
;
1055 MemoryRegion
*mmap_mr
, *region_mr
, *base_mr
;
1058 uint64_t size
= region
->size
;
1060 /* Make sure that the whole region is allowed to be mmapped */
1061 if (region
->nr_mmaps
!= 1 || !region
->mmaps
[0].mmap
||
1062 region
->mmaps
[0].size
!= region
->size
) {
1066 r
= &pdev
->io_regions
[bar
];
1068 base_mr
= vdev
->bars
[bar
].mr
;
1069 region_mr
= region
->mem
;
1070 mmap_mr
= ®ion
->mmaps
[0].mem
;
1072 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1073 if (bar_addr
!= PCI_BAR_UNMAPPED
&&
1074 !(bar_addr
& ~qemu_real_host_page_mask
)) {
1075 size
= qemu_real_host_page_size
;
1078 memory_region_transaction_begin();
1080 if (vdev
->bars
[bar
].size
< size
) {
1081 memory_region_set_size(base_mr
, size
);
1083 memory_region_set_size(region_mr
, size
);
1084 memory_region_set_size(mmap_mr
, size
);
1085 if (size
!= vdev
->bars
[bar
].size
&& memory_region_is_mapped(base_mr
)) {
1086 memory_region_del_subregion(r
->address_space
, base_mr
);
1087 memory_region_add_subregion_overlap(r
->address_space
,
1088 bar_addr
, base_mr
, 0);
1091 memory_region_transaction_commit();
1097 uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1099 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1100 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1102 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1103 emu_bits
= le32_to_cpu(emu_bits
);
1106 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1109 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1112 ret
= pread(vdev
->vbasedev
.fd
, &phys_val
, len
,
1113 vdev
->config_offset
+ addr
);
1115 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1116 __func__
, vdev
->vbasedev
.name
, addr
, len
);
1119 phys_val
= le32_to_cpu(phys_val
);
1122 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1124 trace_vfio_pci_read_config(vdev
->vbasedev
.name
, addr
, len
, val
);
1129 void vfio_pci_write_config(PCIDevice
*pdev
,
1130 uint32_t addr
, uint32_t val
, int len
)
1132 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1133 uint32_t val_le
= cpu_to_le32(val
);
1135 trace_vfio_pci_write_config(vdev
->vbasedev
.name
, addr
, val
, len
);
1137 /* Write everything to VFIO, let it filter out what we can't write */
1138 if (pwrite(vdev
->vbasedev
.fd
, &val_le
, len
, vdev
->config_offset
+ addr
)
1140 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1141 __func__
, vdev
->vbasedev
.name
, addr
, val
, len
);
1144 /* MSI/MSI-X Enabling/Disabling */
1145 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1146 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1147 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1149 pci_default_write_config(pdev
, addr
, val
, len
);
1151 is_enabled
= msi_enabled(pdev
);
1155 vfio_msi_enable(vdev
);
1159 vfio_msi_disable(vdev
);
1161 vfio_update_msi(vdev
);
1164 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1165 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1166 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1168 pci_default_write_config(pdev
, addr
, val
, len
);
1170 is_enabled
= msix_enabled(pdev
);
1172 if (!was_enabled
&& is_enabled
) {
1173 vfio_msix_enable(vdev
);
1174 } else if (was_enabled
&& !is_enabled
) {
1175 vfio_msix_disable(vdev
);
1177 } else if (ranges_overlap(addr
, len
, PCI_BASE_ADDRESS_0
, 24) ||
1178 range_covers_byte(addr
, len
, PCI_COMMAND
)) {
1179 pcibus_t old_addr
[PCI_NUM_REGIONS
- 1];
1182 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1183 old_addr
[bar
] = pdev
->io_regions
[bar
].addr
;
1186 pci_default_write_config(pdev
, addr
, val
, len
);
1188 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1189 if (old_addr
[bar
] != pdev
->io_regions
[bar
].addr
&&
1190 vdev
->bars
[bar
].region
.size
> 0 &&
1191 vdev
->bars
[bar
].region
.size
< qemu_real_host_page_size
) {
1192 vfio_sub_page_bar_update_mapping(pdev
, bar
);
1196 /* Write everything to QEMU to keep emulated bits correct */
1197 pci_default_write_config(pdev
, addr
, val
, len
);
1204 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
)
1207 * More complicated than it looks. Disabling MSI/X transitions the
1208 * device to INTx mode (if supported). Therefore we need to first
1209 * disable MSI/X and then cleanup by disabling INTx.
1211 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
1212 vfio_msix_disable(vdev
);
1213 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
1214 vfio_msi_disable(vdev
);
1217 if (vdev
->interrupt
== VFIO_INT_INTx
) {
1218 vfio_intx_disable(vdev
);
1222 static int vfio_msi_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1225 bool msi_64bit
, msi_maskbit
;
1229 if (pread(vdev
->vbasedev
.fd
, &ctrl
, sizeof(ctrl
),
1230 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1231 error_setg_errno(errp
, errno
, "failed reading MSI PCI_CAP_FLAGS");
1234 ctrl
= le16_to_cpu(ctrl
);
1236 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
1237 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
1238 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
1240 trace_vfio_msi_setup(vdev
->vbasedev
.name
, pos
);
1242 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
, &err
);
1244 if (ret
== -ENOTSUP
) {
1247 error_propagate_prepend(errp
, err
, "msi_init failed: ");
1250 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
1255 static void vfio_pci_fixup_msix_region(VFIOPCIDevice
*vdev
)
1258 VFIORegion
*region
= &vdev
->bars
[vdev
->msix
->table_bar
].region
;
1261 * If the host driver allows mapping of a MSIX data, we are going to
1262 * do map the entire BAR and emulate MSIX table on top of that.
1264 if (vfio_has_region_cap(&vdev
->vbasedev
, region
->nr
,
1265 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE
)) {
1270 * We expect to find a single mmap covering the whole BAR, anything else
1271 * means it's either unsupported or already setup.
1273 if (region
->nr_mmaps
!= 1 || region
->mmaps
[0].offset
||
1274 region
->size
!= region
->mmaps
[0].size
) {
1278 /* MSI-X table start and end aligned to host page size */
1279 start
= vdev
->msix
->table_offset
& qemu_real_host_page_mask
;
1280 end
= REAL_HOST_PAGE_ALIGN((uint64_t)vdev
->msix
->table_offset
+
1281 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
1284 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1285 * NB - Host page size is necessarily a power of two and so is the PCI
1286 * BAR (not counting EA yet), therefore if we have host page aligned
1287 * @start and @end, then any remainder of the BAR before or after those
1288 * must be at least host page sized and therefore mmap'able.
1291 if (end
>= region
->size
) {
1292 region
->nr_mmaps
= 0;
1293 g_free(region
->mmaps
);
1294 region
->mmaps
= NULL
;
1295 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1296 vdev
->msix
->table_bar
, 0, 0);
1298 region
->mmaps
[0].offset
= end
;
1299 region
->mmaps
[0].size
= region
->size
- end
;
1300 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1301 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1302 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1305 /* Maybe it's aligned at the end of the BAR */
1306 } else if (end
>= region
->size
) {
1307 region
->mmaps
[0].size
= start
;
1308 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1309 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1310 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1312 /* Otherwise it must split the BAR */
1314 region
->nr_mmaps
= 2;
1315 region
->mmaps
= g_renew(VFIOMmap
, region
->mmaps
, 2);
1317 memcpy(®ion
->mmaps
[1], ®ion
->mmaps
[0], sizeof(VFIOMmap
));
1319 region
->mmaps
[0].size
= start
;
1320 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1321 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1322 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1324 region
->mmaps
[1].offset
= end
;
1325 region
->mmaps
[1].size
= region
->size
- end
;
1326 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1327 vdev
->msix
->table_bar
, region
->mmaps
[1].offset
,
1328 region
->mmaps
[1].offset
+ region
->mmaps
[1].size
);
1332 static void vfio_pci_relocate_msix(VFIOPCIDevice
*vdev
, Error
**errp
)
1334 int target_bar
= -1;
1337 if (!vdev
->msix
|| vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1341 /* The actual minimum size of MSI-X structures */
1342 msix_sz
= (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
) +
1343 (QEMU_ALIGN_UP(vdev
->msix
->entries
, 64) / 8);
1344 /* Round up to host pages, we don't want to share a page */
1345 msix_sz
= REAL_HOST_PAGE_ALIGN(msix_sz
);
1346 /* PCI BARs must be a power of 2 */
1347 msix_sz
= pow2ceil(msix_sz
);
1349 if (vdev
->msix_relo
== OFF_AUTOPCIBAR_AUTO
) {
1351 * TODO: Lookup table for known devices.
1353 * Logically we might use an algorithm here to select the BAR adding
1354 * the least additional MMIO space, but we cannot programatically
1355 * predict the driver dependency on BAR ordering or sizing, therefore
1356 * 'auto' becomes a lookup for combinations reported to work.
1358 if (target_bar
< 0) {
1359 error_setg(errp
, "No automatic MSI-X relocation available for "
1360 "device %04x:%04x", vdev
->vendor_id
, vdev
->device_id
);
1364 target_bar
= (int)(vdev
->msix_relo
- OFF_AUTOPCIBAR_BAR0
);
1367 /* I/O port BARs cannot host MSI-X structures */
1368 if (vdev
->bars
[target_bar
].ioport
) {
1369 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1370 "I/O port BAR", target_bar
);
1374 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1375 if (!vdev
->bars
[target_bar
].size
&&
1376 target_bar
> 0 && vdev
->bars
[target_bar
- 1].mem64
) {
1377 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1378 "consumed by 64-bit BAR %d", target_bar
, target_bar
- 1);
1382 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1383 if (vdev
->bars
[target_bar
].size
> 1 * GiB
&&
1384 !vdev
->bars
[target_bar
].mem64
) {
1385 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1386 "no space to extend 32-bit BAR", target_bar
);
1391 * If adding a new BAR, test if we can make it 64bit. We make it
1392 * prefetchable since QEMU MSI-X emulation has no read side effects
1393 * and doing so makes mapping more flexible.
1395 if (!vdev
->bars
[target_bar
].size
) {
1396 if (target_bar
< (PCI_ROM_SLOT
- 1) &&
1397 !vdev
->bars
[target_bar
+ 1].size
) {
1398 vdev
->bars
[target_bar
].mem64
= true;
1399 vdev
->bars
[target_bar
].type
= PCI_BASE_ADDRESS_MEM_TYPE_64
;
1401 vdev
->bars
[target_bar
].type
|= PCI_BASE_ADDRESS_MEM_PREFETCH
;
1402 vdev
->bars
[target_bar
].size
= msix_sz
;
1403 vdev
->msix
->table_offset
= 0;
1405 vdev
->bars
[target_bar
].size
= MAX(vdev
->bars
[target_bar
].size
* 2,
1408 * Due to above size calc, MSI-X always starts halfway into the BAR,
1409 * which will always be a separate host page.
1411 vdev
->msix
->table_offset
= vdev
->bars
[target_bar
].size
/ 2;
1414 vdev
->msix
->table_bar
= target_bar
;
1415 vdev
->msix
->pba_bar
= target_bar
;
1416 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1417 vdev
->msix
->pba_offset
= vdev
->msix
->table_offset
+
1418 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
);
1420 trace_vfio_msix_relo(vdev
->vbasedev
.name
,
1421 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
);
1425 * We don't have any control over how pci_add_capability() inserts
1426 * capabilities into the chain. In order to setup MSI-X we need a
1427 * MemoryRegion for the BAR. In order to setup the BAR and not
1428 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1429 * need to first look for where the MSI-X table lives. So we
1430 * unfortunately split MSI-X setup across two functions.
1432 static void vfio_msix_early_setup(VFIOPCIDevice
*vdev
, Error
**errp
)
1436 uint32_t table
, pba
;
1437 int fd
= vdev
->vbasedev
.fd
;
1440 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
1445 if (pread(fd
, &ctrl
, sizeof(ctrl
),
1446 vdev
->config_offset
+ pos
+ PCI_MSIX_FLAGS
) != sizeof(ctrl
)) {
1447 error_setg_errno(errp
, errno
, "failed to read PCI MSIX FLAGS");
1451 if (pread(fd
, &table
, sizeof(table
),
1452 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
1453 error_setg_errno(errp
, errno
, "failed to read PCI MSIX TABLE");
1457 if (pread(fd
, &pba
, sizeof(pba
),
1458 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
1459 error_setg_errno(errp
, errno
, "failed to read PCI MSIX PBA");
1463 ctrl
= le16_to_cpu(ctrl
);
1464 table
= le32_to_cpu(table
);
1465 pba
= le32_to_cpu(pba
);
1467 msix
= g_malloc0(sizeof(*msix
));
1468 msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
1469 msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
1470 msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
1471 msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
1472 msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
1475 * Test the size of the pba_offset variable and catch if it extends outside
1476 * of the specified BAR. If it is the case, we need to apply a hardware
1477 * specific quirk if the device is known or we have a broken configuration.
1479 if (msix
->pba_offset
>= vdev
->bars
[msix
->pba_bar
].region
.size
) {
1481 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1482 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1483 * the VF PBA offset while the BAR itself is only 8k. The correct value
1484 * is 0x1000, so we hard code that here.
1486 if (vdev
->vendor_id
== PCI_VENDOR_ID_CHELSIO
&&
1487 (vdev
->device_id
& 0xff00) == 0x5800) {
1488 msix
->pba_offset
= 0x1000;
1489 } else if (vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1490 error_setg(errp
, "hardware reports invalid configuration, "
1491 "MSIX PBA outside of specified BAR");
1497 trace_vfio_msix_early_setup(vdev
->vbasedev
.name
, pos
, msix
->table_bar
,
1498 msix
->table_offset
, msix
->entries
);
1501 vfio_pci_fixup_msix_region(vdev
);
1503 vfio_pci_relocate_msix(vdev
, errp
);
1506 static int vfio_msix_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1511 vdev
->msix
->pending
= g_malloc0(BITS_TO_LONGS(vdev
->msix
->entries
) *
1512 sizeof(unsigned long));
1513 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
1514 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1515 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
1516 vdev
->bars
[vdev
->msix
->pba_bar
].mr
,
1517 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
,
1520 if (ret
== -ENOTSUP
) {
1521 warn_report_err(err
);
1525 error_propagate(errp
, err
);
1530 * The PCI spec suggests that devices provide additional alignment for
1531 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1532 * For an assigned device, this hopefully means that emulation of MSI-X
1533 * structures does not affect the performance of the device. If devices
1534 * fail to provide that alignment, a significant performance penalty may
1535 * result, for instance Mellanox MT27500 VFs:
1536 * http://www.spinics.net/lists/kvm/msg125881.html
1538 * The PBA is simply not that important for such a serious regression and
1539 * most drivers do not appear to look at it. The solution for this is to
1540 * disable the PBA MemoryRegion unless it's being used. We disable it
1541 * here and only enable it if a masked vector fires through QEMU. As the
1542 * vector-use notifier is called, which occurs on unmask, we test whether
1543 * PBA emulation is needed and again disable if not.
1545 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
1548 * The emulated machine may provide a paravirt interface for MSIX setup
1549 * so it is not strictly necessary to emulate MSIX here. This becomes
1550 * helpful when frequently accessed MMIO registers are located in
1551 * subpages adjacent to the MSIX table but the MSIX data containing page
1552 * cannot be mapped because of a host page size bigger than the MSIX table
1555 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1556 "vfio-no-msix-emulation", NULL
)) {
1557 memory_region_set_enabled(&vdev
->pdev
.msix_table_mmio
, false);
1563 static void vfio_teardown_msi(VFIOPCIDevice
*vdev
)
1565 msi_uninit(&vdev
->pdev
);
1568 msix_uninit(&vdev
->pdev
,
1569 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1570 vdev
->bars
[vdev
->msix
->pba_bar
].mr
);
1571 g_free(vdev
->msix
->pending
);
1578 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
)
1582 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1583 vfio_region_mmaps_set_enabled(&vdev
->bars
[i
].region
, enabled
);
1587 static void vfio_bar_prepare(VFIOPCIDevice
*vdev
, int nr
)
1589 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1594 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1595 if (!bar
->region
.size
) {
1599 /* Determine what type of BAR this is for registration */
1600 ret
= pread(vdev
->vbasedev
.fd
, &pci_bar
, sizeof(pci_bar
),
1601 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
1602 if (ret
!= sizeof(pci_bar
)) {
1603 error_report("vfio: Failed to read BAR %d (%m)", nr
);
1607 pci_bar
= le32_to_cpu(pci_bar
);
1608 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
1609 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
1610 bar
->type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
1611 ~PCI_BASE_ADDRESS_MEM_MASK
);
1612 bar
->size
= bar
->region
.size
;
1615 static void vfio_bars_prepare(VFIOPCIDevice
*vdev
)
1619 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1620 vfio_bar_prepare(vdev
, i
);
1624 static void vfio_bar_register(VFIOPCIDevice
*vdev
, int nr
)
1626 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1633 bar
->mr
= g_new0(MemoryRegion
, 1);
1634 name
= g_strdup_printf("%s base BAR %d", vdev
->vbasedev
.name
, nr
);
1635 memory_region_init_io(bar
->mr
, OBJECT(vdev
), NULL
, NULL
, name
, bar
->size
);
1638 if (bar
->region
.size
) {
1639 memory_region_add_subregion(bar
->mr
, 0, bar
->region
.mem
);
1641 if (vfio_region_mmap(&bar
->region
)) {
1642 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1643 vdev
->vbasedev
.name
, nr
);
1647 pci_register_bar(&vdev
->pdev
, nr
, bar
->type
, bar
->mr
);
1650 static void vfio_bars_register(VFIOPCIDevice
*vdev
)
1654 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1655 vfio_bar_register(vdev
, i
);
1659 static void vfio_bars_exit(VFIOPCIDevice
*vdev
)
1663 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1664 VFIOBAR
*bar
= &vdev
->bars
[i
];
1666 vfio_bar_quirk_exit(vdev
, i
);
1667 vfio_region_exit(&bar
->region
);
1668 if (bar
->region
.size
) {
1669 memory_region_del_subregion(bar
->mr
, bar
->region
.mem
);
1674 pci_unregister_vga(&vdev
->pdev
);
1675 vfio_vga_quirk_exit(vdev
);
1679 static void vfio_bars_finalize(VFIOPCIDevice
*vdev
)
1683 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1684 VFIOBAR
*bar
= &vdev
->bars
[i
];
1686 vfio_bar_quirk_finalize(vdev
, i
);
1687 vfio_region_finalize(&bar
->region
);
1689 object_unparent(OBJECT(bar
->mr
));
1695 vfio_vga_quirk_finalize(vdev
);
1696 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
->region
); i
++) {
1697 object_unparent(OBJECT(&vdev
->vga
->region
[i
].mem
));
1706 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
1709 uint16_t next
= PCI_CONFIG_SPACE_SIZE
;
1711 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
1712 tmp
= pdev
->config
[tmp
+ PCI_CAP_LIST_NEXT
]) {
1713 if (tmp
> pos
&& tmp
< next
) {
1722 static uint16_t vfio_ext_cap_max_size(const uint8_t *config
, uint16_t pos
)
1724 uint16_t tmp
, next
= PCIE_CONFIG_SPACE_SIZE
;
1726 for (tmp
= PCI_CONFIG_SPACE_SIZE
; tmp
;
1727 tmp
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ tmp
))) {
1728 if (tmp
> pos
&& tmp
< next
) {
1736 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
1738 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
1741 static void vfio_add_emulated_word(VFIOPCIDevice
*vdev
, int pos
,
1742 uint16_t val
, uint16_t mask
)
1744 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1745 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1746 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1749 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
1751 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
1754 static void vfio_add_emulated_long(VFIOPCIDevice
*vdev
, int pos
,
1755 uint32_t val
, uint32_t mask
)
1757 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1758 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1759 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1762 static int vfio_setup_pcie_cap(VFIOPCIDevice
*vdev
, int pos
, uint8_t size
,
1768 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
1769 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
1771 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
1772 type
!= PCI_EXP_TYPE_LEG_END
&&
1773 type
!= PCI_EXP_TYPE_RC_END
) {
1775 error_setg(errp
, "assignment of PCIe type 0x%x "
1776 "devices is not currently supported", type
);
1780 if (!pci_bus_is_express(pci_get_bus(&vdev
->pdev
))) {
1781 PCIBus
*bus
= pci_get_bus(&vdev
->pdev
);
1785 * Traditionally PCI device assignment exposes the PCIe capability
1786 * as-is on non-express buses. The reason being that some drivers
1787 * simply assume that it's there, for example tg3. However when
1788 * we're running on a native PCIe machine type, like Q35, we need
1789 * to hide the PCIe capability. The reason for this is twofold;
1790 * first Windows guests get a Code 10 error when the PCIe capability
1791 * is exposed in this configuration. Therefore express devices won't
1792 * work at all unless they're attached to express buses in the VM.
1793 * Second, a native PCIe machine introduces the possibility of fine
1794 * granularity IOMMUs supporting both translation and isolation.
1795 * Guest code to discover the IOMMU visibility of a device, such as
1796 * IOMMU grouping code on Linux, is very aware of device types and
1797 * valid transitions between bus types. An express device on a non-
1798 * express bus is not a valid combination on bare metal systems.
1800 * Drivers that require a PCIe capability to make the device
1801 * functional are simply going to need to have their devices placed
1802 * on a PCIe bus in the VM.
1804 while (!pci_bus_is_root(bus
)) {
1805 bridge
= pci_bridge_get_device(bus
);
1806 bus
= pci_get_bus(bridge
);
1809 if (pci_bus_is_express(bus
)) {
1813 } else if (pci_bus_is_root(pci_get_bus(&vdev
->pdev
))) {
1815 * On a Root Complex bus Endpoints become Root Complex Integrated
1816 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1818 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
1819 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1820 PCI_EXP_TYPE_RC_END
<< 4,
1821 PCI_EXP_FLAGS_TYPE
);
1823 /* Link Capabilities, Status, and Control goes away */
1824 if (size
> PCI_EXP_LNKCTL
) {
1825 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
1826 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1827 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
1829 #ifndef PCI_EXP_LNKCAP2
1830 #define PCI_EXP_LNKCAP2 44
1832 #ifndef PCI_EXP_LNKSTA2
1833 #define PCI_EXP_LNKSTA2 50
1835 /* Link 2 Capabilities, Status, and Control goes away */
1836 if (size
> PCI_EXP_LNKCAP2
) {
1837 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
1838 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
1839 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
1843 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
1845 * Legacy endpoints don't belong on the root complex. Windows
1846 * seems to be happier with devices if we skip the capability.
1853 * Convert Root Complex Integrated Endpoints to regular endpoints.
1854 * These devices don't support LNK/LNK2 capabilities, so make them up.
1856 if (type
== PCI_EXP_TYPE_RC_END
) {
1857 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1858 PCI_EXP_TYPE_ENDPOINT
<< 4,
1859 PCI_EXP_FLAGS_TYPE
);
1860 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
1861 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1
) |
1862 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT
), ~0);
1863 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1868 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1869 * (Niantic errate #35) causing Windows to error with a Code 10 for the
1870 * device on Q35. Fixup any such devices to report version 1. If we
1871 * were to remove the capability entirely the guest would lose extended
1874 if ((flags
& PCI_EXP_FLAGS_VERS
) == 0) {
1875 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1876 1, PCI_EXP_FLAGS_VERS
);
1879 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
,
1885 vdev
->pdev
.exp
.exp_cap
= pos
;
1890 static void vfio_check_pcie_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1892 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
1894 if (cap
& PCI_EXP_DEVCAP_FLR
) {
1895 trace_vfio_check_pcie_flr(vdev
->vbasedev
.name
);
1896 vdev
->has_flr
= true;
1900 static void vfio_check_pm_reset(VFIOPCIDevice
*vdev
, uint8_t pos
)
1902 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
1904 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
1905 trace_vfio_check_pm_reset(vdev
->vbasedev
.name
);
1906 vdev
->has_pm_reset
= true;
1910 static void vfio_check_af_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1912 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
1914 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
1915 trace_vfio_check_af_flr(vdev
->vbasedev
.name
);
1916 vdev
->has_flr
= true;
1920 static int vfio_add_std_cap(VFIOPCIDevice
*vdev
, uint8_t pos
, Error
**errp
)
1922 PCIDevice
*pdev
= &vdev
->pdev
;
1923 uint8_t cap_id
, next
, size
;
1926 cap_id
= pdev
->config
[pos
];
1927 next
= pdev
->config
[pos
+ PCI_CAP_LIST_NEXT
];
1930 * If it becomes important to configure capabilities to their actual
1931 * size, use this as the default when it's something we don't recognize.
1932 * Since QEMU doesn't actually handle many of the config accesses,
1933 * exact size doesn't seem worthwhile.
1935 size
= vfio_std_cap_max_size(pdev
, pos
);
1938 * pci_add_capability always inserts the new capability at the head
1939 * of the chain. Therefore to end up with a chain that matches the
1940 * physical device, we insert from the end by making this recursive.
1941 * This is also why we pre-calculate size above as cached config space
1942 * will be changed as we unwind the stack.
1945 ret
= vfio_add_std_cap(vdev
, next
, errp
);
1950 /* Begin the rebuild, use QEMU emulated list bits */
1951 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
1952 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
1953 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
1955 ret
= vfio_add_virt_caps(vdev
, errp
);
1961 /* Scale down size, esp in case virt caps were added above */
1962 size
= MIN(size
, vfio_std_cap_max_size(pdev
, pos
));
1964 /* Use emulated next pointer to allow dropping caps */
1965 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ PCI_CAP_LIST_NEXT
, 0xff);
1968 case PCI_CAP_ID_MSI
:
1969 ret
= vfio_msi_setup(vdev
, pos
, errp
);
1971 case PCI_CAP_ID_EXP
:
1972 vfio_check_pcie_flr(vdev
, pos
);
1973 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
, errp
);
1975 case PCI_CAP_ID_MSIX
:
1976 ret
= vfio_msix_setup(vdev
, pos
, errp
);
1979 vfio_check_pm_reset(vdev
, pos
);
1981 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1984 vfio_check_af_flr(vdev
, pos
);
1985 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1988 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1994 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2002 static void vfio_add_ext_cap(VFIOPCIDevice
*vdev
)
2004 PCIDevice
*pdev
= &vdev
->pdev
;
2006 uint16_t cap_id
, next
, size
;
2010 /* Only add extended caps if we have them and the guest can see them */
2011 if (!pci_is_express(pdev
) || !pci_bus_is_express(pci_get_bus(pdev
)) ||
2012 !pci_get_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
)) {
2017 * pcie_add_capability always inserts the new capability at the tail
2018 * of the chain. Therefore to end up with a chain that matches the
2019 * physical device, we cache the config space to avoid overwriting
2020 * the original config space when we parse the extended capabilities.
2022 config
= g_memdup(pdev
->config
, vdev
->config_size
);
2025 * Extended capabilities are chained with each pointing to the next, so we
2026 * can drop anything other than the head of the chain simply by modifying
2027 * the previous next pointer. Seed the head of the chain here such that
2028 * we can simply skip any capabilities we want to drop below, regardless
2029 * of their position in the chain. If this stub capability still exists
2030 * after we add the capabilities we want to expose, update the capability
2031 * ID to zero. Note that we cannot seed with the capability header being
2032 * zero as this conflicts with definition of an absent capability chain
2033 * and prevents capabilities beyond the head of the list from being added.
2034 * By replacing the dummy capability ID with zero after walking the device
2035 * chain, we also transparently mark extended capabilities as absent if
2036 * no capabilities were added. Note that the PCIe spec defines an absence
2037 * of extended capabilities to be determined by a value of zero for the
2038 * capability ID, version, AND next pointer. A non-zero next pointer
2039 * should be sufficient to indicate additional capabilities are present,
2040 * which will occur if we call pcie_add_capability() below. The entire
2041 * first dword is emulated to support this.
2043 * NB. The kernel side does similar masking, so be prepared that our
2044 * view of the device may also contain a capability ID zero in the head
2045 * of the chain. Skip it for the same reason that we cannot seed the
2046 * chain with a zero capability.
2048 pci_set_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
,
2049 PCI_EXT_CAP(0xFFFF, 0, 0));
2050 pci_set_long(pdev
->wmask
+ PCI_CONFIG_SPACE_SIZE
, 0);
2051 pci_set_long(vdev
->emulated_config_bits
+ PCI_CONFIG_SPACE_SIZE
, ~0);
2053 for (next
= PCI_CONFIG_SPACE_SIZE
; next
;
2054 next
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ next
))) {
2055 header
= pci_get_long(config
+ next
);
2056 cap_id
= PCI_EXT_CAP_ID(header
);
2057 cap_ver
= PCI_EXT_CAP_VER(header
);
2060 * If it becomes important to configure extended capabilities to their
2061 * actual size, use this as the default when it's something we don't
2062 * recognize. Since QEMU doesn't actually handle many of the config
2063 * accesses, exact size doesn't seem worthwhile.
2065 size
= vfio_ext_cap_max_size(config
, next
);
2067 /* Use emulated next pointer to allow dropping extended caps */
2068 pci_long_test_and_set_mask(vdev
->emulated_config_bits
+ next
,
2069 PCI_EXT_CAP_NEXT_MASK
);
2072 case 0: /* kernel masked capability */
2073 case PCI_EXT_CAP_ID_SRIOV
: /* Read-only VF BARs confuse OVMF */
2074 case PCI_EXT_CAP_ID_ARI
: /* XXX Needs next function virtualization */
2075 case PCI_EXT_CAP_ID_REBAR
: /* Can't expose read-only */
2076 trace_vfio_add_ext_cap_dropped(vdev
->vbasedev
.name
, cap_id
, next
);
2079 pcie_add_capability(pdev
, cap_id
, cap_ver
, next
, size
);
2084 /* Cleanup chain head ID if necessary */
2085 if (pci_get_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
) == 0xFFFF) {
2086 pci_set_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
, 0);
2093 static int vfio_add_capabilities(VFIOPCIDevice
*vdev
, Error
**errp
)
2095 PCIDevice
*pdev
= &vdev
->pdev
;
2098 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2099 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2100 return 0; /* Nothing to add */
2103 ret
= vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
], errp
);
2108 vfio_add_ext_cap(vdev
);
2112 static void vfio_pci_pre_reset(VFIOPCIDevice
*vdev
)
2114 PCIDevice
*pdev
= &vdev
->pdev
;
2117 vfio_disable_interrupts(vdev
);
2119 /* Make sure the device is in D0 */
2124 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2125 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2127 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2128 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2129 /* vfio handles the necessary delay here */
2130 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2131 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2133 error_report("vfio: Unable to power on device, stuck in D%d",
2140 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2141 * Also put INTx Disable in known state.
2143 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2144 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2145 PCI_COMMAND_INTX_DISABLE
);
2146 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2149 static void vfio_pci_post_reset(VFIOPCIDevice
*vdev
)
2154 vfio_intx_enable(vdev
, &err
);
2156 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2159 for (nr
= 0; nr
< PCI_NUM_REGIONS
- 1; ++nr
) {
2160 off_t addr
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
);
2162 uint32_t len
= sizeof(val
);
2164 if (pwrite(vdev
->vbasedev
.fd
, &val
, len
, addr
) != len
) {
2165 error_report("%s(%s) reset bar %d failed: %m", __func__
,
2166 vdev
->vbasedev
.name
, nr
);
2170 vfio_quirk_reset(vdev
);
2173 static bool vfio_pci_host_match(PCIHostDeviceAddress
*addr
, const char *name
)
2177 sprintf(tmp
, "%04x:%02x:%02x.%1x", addr
->domain
,
2178 addr
->bus
, addr
->slot
, addr
->function
);
2180 return (strcmp(tmp
, name
) == 0);
2183 static int vfio_pci_hot_reset(VFIOPCIDevice
*vdev
, bool single
)
2186 struct vfio_pci_hot_reset_info
*info
;
2187 struct vfio_pci_dependent_device
*devices
;
2188 struct vfio_pci_hot_reset
*reset
;
2193 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
2196 vfio_pci_pre_reset(vdev
);
2198 vdev
->vbasedev
.needs_reset
= false;
2200 info
= g_malloc0(sizeof(*info
));
2201 info
->argsz
= sizeof(*info
);
2203 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2204 if (ret
&& errno
!= ENOSPC
) {
2206 if (!vdev
->has_pm_reset
) {
2207 error_report("vfio: Cannot reset device %s, "
2208 "no available reset mechanism.", vdev
->vbasedev
.name
);
2213 count
= info
->count
;
2214 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
2215 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
2216 devices
= &info
->devices
[0];
2218 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2221 error_report("vfio: hot reset info failed: %m");
2225 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
2227 /* Verify that we have all the groups required */
2228 for (i
= 0; i
< info
->count
; i
++) {
2229 PCIHostDeviceAddress host
;
2231 VFIODevice
*vbasedev_iter
;
2233 host
.domain
= devices
[i
].segment
;
2234 host
.bus
= devices
[i
].bus
;
2235 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2236 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2238 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
2239 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
2241 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2245 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2246 if (group
->groupid
== devices
[i
].group_id
) {
2252 if (!vdev
->has_pm_reset
) {
2253 error_report("vfio: Cannot reset device %s, "
2254 "depends on group %d which is not owned.",
2255 vdev
->vbasedev
.name
, devices
[i
].group_id
);
2261 /* Prep dependent devices for reset and clear our marker. */
2262 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2263 if (!vbasedev_iter
->dev
->realized
||
2264 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2267 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2268 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2273 vfio_pci_pre_reset(tmp
);
2274 tmp
->vbasedev
.needs_reset
= false;
2281 if (!single
&& !multi
) {
2286 /* Determine how many group fds need to be passed */
2288 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2289 for (i
= 0; i
< info
->count
; i
++) {
2290 if (group
->groupid
== devices
[i
].group_id
) {
2297 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
2298 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
2299 fds
= &reset
->group_fds
[0];
2301 /* Fill in group fds */
2302 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2303 for (i
= 0; i
< info
->count
; i
++) {
2304 if (group
->groupid
== devices
[i
].group_id
) {
2305 fds
[reset
->count
++] = group
->fd
;
2312 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
2315 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
2316 ret
? "%m" : "Success");
2319 /* Re-enable INTx on affected devices */
2320 for (i
= 0; i
< info
->count
; i
++) {
2321 PCIHostDeviceAddress host
;
2323 VFIODevice
*vbasedev_iter
;
2325 host
.domain
= devices
[i
].segment
;
2326 host
.bus
= devices
[i
].bus
;
2327 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2328 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2330 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2334 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2335 if (group
->groupid
== devices
[i
].group_id
) {
2344 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2345 if (!vbasedev_iter
->dev
->realized
||
2346 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2349 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2350 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2351 vfio_pci_post_reset(tmp
);
2358 vfio_pci_post_reset(vdev
);
2366 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2367 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2368 * of doing hot resets when there is only a single device per bus. The in-use
2369 * here refers to how many VFIODevices are affected. A hot reset that affects
2370 * multiple devices, but only a single in-use device, means that we can call
2371 * it from our bus ->reset() callback since the extent is effectively a single
2372 * device. This allows us to make use of it in the hotplug path. When there
2373 * are multiple in-use devices, we can only trigger the hot reset during a
2374 * system reset and thus from our reset handler. We separate _one vs _multi
2375 * here so that we don't overlap and do a double reset on the system reset
2376 * path where both our reset handler and ->reset() callback are used. Calling
2377 * _one() will only do a hot reset for the one in-use devices case, calling
2378 * _multi() will do nothing if a _one() would have been sufficient.
2380 static int vfio_pci_hot_reset_one(VFIOPCIDevice
*vdev
)
2382 return vfio_pci_hot_reset(vdev
, true);
2385 static int vfio_pci_hot_reset_multi(VFIODevice
*vbasedev
)
2387 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2388 return vfio_pci_hot_reset(vdev
, false);
2391 static void vfio_pci_compute_needs_reset(VFIODevice
*vbasedev
)
2393 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2394 if (!vbasedev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
2395 vbasedev
->needs_reset
= true;
2399 static Object
*vfio_pci_get_object(VFIODevice
*vbasedev
)
2401 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2403 return OBJECT(vdev
);
2406 static bool vfio_msix_present(void *opaque
, int version_id
)
2408 PCIDevice
*pdev
= opaque
;
2410 return msix_present(pdev
);
2413 const VMStateDescription vmstate_vfio_pci_config
= {
2414 .name
= "VFIOPCIDevice",
2416 .minimum_version_id
= 1,
2417 .fields
= (VMStateField
[]) {
2418 VMSTATE_PCI_DEVICE(pdev
, VFIOPCIDevice
),
2419 VMSTATE_MSIX_TEST(pdev
, VFIOPCIDevice
, vfio_msix_present
),
2420 VMSTATE_END_OF_LIST()
2424 static void vfio_pci_save_config(VFIODevice
*vbasedev
, QEMUFile
*f
)
2426 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2428 vmstate_save_state(f
, &vmstate_vfio_pci_config
, vdev
, NULL
);
2431 static int vfio_pci_load_config(VFIODevice
*vbasedev
, QEMUFile
*f
)
2433 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2434 PCIDevice
*pdev
= &vdev
->pdev
;
2437 ret
= vmstate_load_state(f
, &vmstate_vfio_pci_config
, vdev
, 1);
2442 vfio_pci_write_config(pdev
, PCI_COMMAND
,
2443 pci_get_word(pdev
->config
+ PCI_COMMAND
), 2);
2445 if (msi_enabled(pdev
)) {
2446 vfio_msi_enable(vdev
);
2447 } else if (msix_enabled(pdev
)) {
2448 vfio_msix_enable(vdev
);
2454 static VFIODeviceOps vfio_pci_ops
= {
2455 .vfio_compute_needs_reset
= vfio_pci_compute_needs_reset
,
2456 .vfio_hot_reset_multi
= vfio_pci_hot_reset_multi
,
2457 .vfio_eoi
= vfio_intx_eoi
,
2458 .vfio_get_object
= vfio_pci_get_object
,
2459 .vfio_save_config
= vfio_pci_save_config
,
2460 .vfio_load_config
= vfio_pci_load_config
,
2463 int vfio_populate_vga(VFIOPCIDevice
*vdev
, Error
**errp
)
2465 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2466 struct vfio_region_info
*reg_info
;
2469 ret
= vfio_get_region_info(vbasedev
, VFIO_PCI_VGA_REGION_INDEX
, ®_info
);
2471 error_setg_errno(errp
, -ret
,
2472 "failed getting region info for VGA region index %d",
2473 VFIO_PCI_VGA_REGION_INDEX
);
2477 if (!(reg_info
->flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2478 !(reg_info
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2479 reg_info
->size
< 0xbffff + 1) {
2480 error_setg(errp
, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2481 (unsigned long)reg_info
->flags
,
2482 (unsigned long)reg_info
->size
);
2487 vdev
->vga
= g_new0(VFIOVGA
, 1);
2489 vdev
->vga
->fd_offset
= reg_info
->offset
;
2490 vdev
->vga
->fd
= vdev
->vbasedev
.fd
;
2494 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2495 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2496 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].quirks
);
2498 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2499 OBJECT(vdev
), &vfio_vga_ops
,
2500 &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
],
2501 "vfio-vga-mmio@0xa0000",
2502 QEMU_PCI_VGA_MEM_SIZE
);
2504 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2505 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2506 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2508 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2509 OBJECT(vdev
), &vfio_vga_ops
,
2510 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
],
2511 "vfio-vga-io@0x3b0",
2512 QEMU_PCI_VGA_IO_LO_SIZE
);
2514 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2515 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2516 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2518 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
,
2519 OBJECT(vdev
), &vfio_vga_ops
,
2520 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
],
2521 "vfio-vga-io@0x3c0",
2522 QEMU_PCI_VGA_IO_HI_SIZE
);
2524 pci_register_vga(&vdev
->pdev
, &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2525 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2526 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
);
2531 static void vfio_populate_device(VFIOPCIDevice
*vdev
, Error
**errp
)
2533 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2534 struct vfio_region_info
*reg_info
;
2535 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2538 /* Sanity check device */
2539 if (!(vbasedev
->flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2540 error_setg(errp
, "this isn't a PCI device");
2544 if (vbasedev
->num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2545 error_setg(errp
, "unexpected number of io regions %u",
2546 vbasedev
->num_regions
);
2550 if (vbasedev
->num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2551 error_setg(errp
, "unexpected number of irqs %u", vbasedev
->num_irqs
);
2555 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2556 char *name
= g_strdup_printf("%s BAR %d", vbasedev
->name
, i
);
2558 ret
= vfio_region_setup(OBJECT(vdev
), vbasedev
,
2559 &vdev
->bars
[i
].region
, i
, name
);
2563 error_setg_errno(errp
, -ret
, "failed to get region %d info", i
);
2567 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2570 ret
= vfio_get_region_info(vbasedev
,
2571 VFIO_PCI_CONFIG_REGION_INDEX
, ®_info
);
2573 error_setg_errno(errp
, -ret
, "failed to get config info");
2577 trace_vfio_populate_device_config(vdev
->vbasedev
.name
,
2578 (unsigned long)reg_info
->size
,
2579 (unsigned long)reg_info
->offset
,
2580 (unsigned long)reg_info
->flags
);
2582 vdev
->config_size
= reg_info
->size
;
2583 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2584 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2586 vdev
->config_offset
= reg_info
->offset
;
2590 if (vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) {
2591 ret
= vfio_populate_vga(vdev
, errp
);
2593 error_append_hint(errp
, "device does not support "
2594 "requested feature x-vga\n");
2599 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
2601 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
2603 /* This can fail for an old kernel or legacy PCI dev */
2604 trace_vfio_populate_device_get_irq_info_failure(strerror(errno
));
2605 } else if (irq_info
.count
== 1) {
2606 vdev
->pci_aer
= true;
2608 warn_report(VFIO_MSG_PREFIX
2609 "Could not enable error recovery for the device",
2614 static void vfio_put_device(VFIOPCIDevice
*vdev
)
2616 g_free(vdev
->vbasedev
.name
);
2619 vfio_put_base_device(&vdev
->vbasedev
);
2622 static void vfio_err_notifier_handler(void *opaque
)
2624 VFIOPCIDevice
*vdev
= opaque
;
2626 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
2631 * TBD. Retrieve the error details and decide what action
2632 * needs to be taken. One of the actions could be to pass
2633 * the error to the guest and have the guest driver recover
2634 * from the error. This requires that PCIe capabilities be
2635 * exposed to the guest. For now, we just terminate the
2636 * guest to contain the error.
2639 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__
, vdev
->vbasedev
.name
);
2641 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2645 * Registers error notifier for devices supporting error recovery.
2646 * If we encounter a failure in this function, we report an error
2647 * and continue after disabling error recovery support for the
2650 static void vfio_register_err_notifier(VFIOPCIDevice
*vdev
)
2655 if (!vdev
->pci_aer
) {
2659 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
2660 error_report("vfio: Unable to init event notifier for error detection");
2661 vdev
->pci_aer
= false;
2665 fd
= event_notifier_get_fd(&vdev
->err_notifier
);
2666 qemu_set_fd_handler(fd
, vfio_err_notifier_handler
, NULL
, vdev
);
2668 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2669 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2670 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2671 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2672 event_notifier_cleanup(&vdev
->err_notifier
);
2673 vdev
->pci_aer
= false;
2677 static void vfio_unregister_err_notifier(VFIOPCIDevice
*vdev
)
2681 if (!vdev
->pci_aer
) {
2685 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2686 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2687 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2689 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
2691 event_notifier_cleanup(&vdev
->err_notifier
);
2694 static void vfio_req_notifier_handler(void *opaque
)
2696 VFIOPCIDevice
*vdev
= opaque
;
2699 if (!event_notifier_test_and_clear(&vdev
->req_notifier
)) {
2703 qdev_unplug(DEVICE(vdev
), &err
);
2705 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2709 static void vfio_register_req_notifier(VFIOPCIDevice
*vdev
)
2711 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
2712 .index
= VFIO_PCI_REQ_IRQ_INDEX
};
2716 if (!(vdev
->features
& VFIO_FEATURE_ENABLE_REQ
)) {
2720 if (ioctl(vdev
->vbasedev
.fd
,
2721 VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
) < 0 || irq_info
.count
< 1) {
2725 if (event_notifier_init(&vdev
->req_notifier
, 0)) {
2726 error_report("vfio: Unable to init event notifier for device request");
2730 fd
= event_notifier_get_fd(&vdev
->req_notifier
);
2731 qemu_set_fd_handler(fd
, vfio_req_notifier_handler
, NULL
, vdev
);
2733 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2734 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2735 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2736 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2737 event_notifier_cleanup(&vdev
->req_notifier
);
2739 vdev
->req_enabled
= true;
2743 static void vfio_unregister_req_notifier(VFIOPCIDevice
*vdev
)
2747 if (!vdev
->req_enabled
) {
2751 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2752 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2753 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2755 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->req_notifier
),
2757 event_notifier_cleanup(&vdev
->req_notifier
);
2759 vdev
->req_enabled
= false;
2762 static void vfio_realize(PCIDevice
*pdev
, Error
**errp
)
2764 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
2765 VFIODevice
*vbasedev_iter
;
2767 char *tmp
, *subsys
, group_path
[PATH_MAX
], *group_name
;
2775 if (!vdev
->vbasedev
.sysfsdev
) {
2776 if (!(~vdev
->host
.domain
|| ~vdev
->host
.bus
||
2777 ~vdev
->host
.slot
|| ~vdev
->host
.function
)) {
2778 error_setg(errp
, "No provided host device");
2779 error_append_hint(errp
, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2780 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2783 vdev
->vbasedev
.sysfsdev
=
2784 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2785 vdev
->host
.domain
, vdev
->host
.bus
,
2786 vdev
->host
.slot
, vdev
->host
.function
);
2789 if (stat(vdev
->vbasedev
.sysfsdev
, &st
) < 0) {
2790 error_setg_errno(errp
, errno
, "no such host device");
2791 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.sysfsdev
);
2795 vdev
->vbasedev
.name
= g_path_get_basename(vdev
->vbasedev
.sysfsdev
);
2796 vdev
->vbasedev
.ops
= &vfio_pci_ops
;
2797 vdev
->vbasedev
.type
= VFIO_DEVICE_TYPE_PCI
;
2798 vdev
->vbasedev
.dev
= DEVICE(vdev
);
2800 tmp
= g_strdup_printf("%s/iommu_group", vdev
->vbasedev
.sysfsdev
);
2801 len
= readlink(tmp
, group_path
, sizeof(group_path
));
2804 if (len
<= 0 || len
>= sizeof(group_path
)) {
2805 error_setg_errno(errp
, len
< 0 ? errno
: ENAMETOOLONG
,
2806 "no iommu_group found");
2810 group_path
[len
] = 0;
2812 group_name
= basename(group_path
);
2813 if (sscanf(group_name
, "%d", &groupid
) != 1) {
2814 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
2818 trace_vfio_realize(vdev
->vbasedev
.name
, groupid
);
2820 group
= vfio_get_group(groupid
, pci_device_iommu_address_space(pdev
), errp
);
2825 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2826 if (strcmp(vbasedev_iter
->name
, vdev
->vbasedev
.name
) == 0) {
2827 error_setg(errp
, "device is already attached");
2828 vfio_put_group(group
);
2834 * Mediated devices *might* operate compatibly with discarding of RAM, but
2835 * we cannot know for certain, it depends on whether the mdev vendor driver
2836 * stays in sync with the active working set of the guest driver. Prevent
2837 * the x-balloon-allowed option unless this is minimally an mdev device.
2839 tmp
= g_strdup_printf("%s/subsystem", vdev
->vbasedev
.sysfsdev
);
2840 subsys
= realpath(tmp
, NULL
);
2842 is_mdev
= subsys
&& (strcmp(subsys
, "/sys/bus/mdev") == 0);
2845 trace_vfio_mdev(vdev
->vbasedev
.name
, is_mdev
);
2847 if (vdev
->vbasedev
.ram_block_discard_allowed
&& !is_mdev
) {
2848 error_setg(errp
, "x-balloon-allowed only potentially compatible "
2849 "with mdev devices");
2850 vfio_put_group(group
);
2854 ret
= vfio_get_device(group
, vdev
->vbasedev
.name
, &vdev
->vbasedev
, errp
);
2856 vfio_put_group(group
);
2860 vfio_populate_device(vdev
, &err
);
2862 error_propagate(errp
, err
);
2866 /* Get a copy of config space */
2867 ret
= pread(vdev
->vbasedev
.fd
, vdev
->pdev
.config
,
2868 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
2869 vdev
->config_offset
);
2870 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
2871 ret
= ret
< 0 ? -errno
: -EFAULT
;
2872 error_setg_errno(errp
, -ret
, "failed to read device config space");
2876 /* vfio emulates a lot for us, but some bits need extra love */
2877 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
2879 /* QEMU can choose to expose the ROM or not */
2880 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
2881 /* QEMU can also add or extend BARs */
2882 memset(vdev
->emulated_config_bits
+ PCI_BASE_ADDRESS_0
, 0xff, 6 * 4);
2885 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2886 * device ID is managed by the vendor and need only be a 16-bit value.
2887 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2889 if (vdev
->vendor_id
!= PCI_ANY_ID
) {
2890 if (vdev
->vendor_id
>= 0xffff) {
2891 error_setg(errp
, "invalid PCI vendor ID provided");
2894 vfio_add_emulated_word(vdev
, PCI_VENDOR_ID
, vdev
->vendor_id
, ~0);
2895 trace_vfio_pci_emulated_vendor_id(vdev
->vbasedev
.name
, vdev
->vendor_id
);
2897 vdev
->vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
2900 if (vdev
->device_id
!= PCI_ANY_ID
) {
2901 if (vdev
->device_id
> 0xffff) {
2902 error_setg(errp
, "invalid PCI device ID provided");
2905 vfio_add_emulated_word(vdev
, PCI_DEVICE_ID
, vdev
->device_id
, ~0);
2906 trace_vfio_pci_emulated_device_id(vdev
->vbasedev
.name
, vdev
->device_id
);
2908 vdev
->device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
2911 if (vdev
->sub_vendor_id
!= PCI_ANY_ID
) {
2912 if (vdev
->sub_vendor_id
> 0xffff) {
2913 error_setg(errp
, "invalid PCI subsystem vendor ID provided");
2916 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_VENDOR_ID
,
2917 vdev
->sub_vendor_id
, ~0);
2918 trace_vfio_pci_emulated_sub_vendor_id(vdev
->vbasedev
.name
,
2919 vdev
->sub_vendor_id
);
2922 if (vdev
->sub_device_id
!= PCI_ANY_ID
) {
2923 if (vdev
->sub_device_id
> 0xffff) {
2924 error_setg(errp
, "invalid PCI subsystem device ID provided");
2927 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_ID
, vdev
->sub_device_id
, ~0);
2928 trace_vfio_pci_emulated_sub_device_id(vdev
->vbasedev
.name
,
2929 vdev
->sub_device_id
);
2932 /* QEMU can change multi-function devices to single function, or reverse */
2933 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
2934 PCI_HEADER_TYPE_MULTI_FUNCTION
;
2936 /* Restore or clear multifunction, this is always controlled by QEMU */
2937 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
2938 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
2940 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
2944 * Clear host resource mapping info. If we choose not to register a
2945 * BAR, such as might be the case with the option ROM, we can get
2946 * confusing, unwritable, residual addresses from the host here.
2948 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
2949 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
2951 vfio_pci_size_rom(vdev
);
2953 vfio_bars_prepare(vdev
);
2955 vfio_msix_early_setup(vdev
, &err
);
2957 error_propagate(errp
, err
);
2961 vfio_bars_register(vdev
);
2963 ret
= vfio_add_capabilities(vdev
, errp
);
2969 vfio_vga_quirk_setup(vdev
);
2972 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2973 vfio_bar_quirk_setup(vdev
, i
);
2976 if (!vdev
->igd_opregion
&&
2977 vdev
->features
& VFIO_FEATURE_ENABLE_IGD_OPREGION
) {
2978 struct vfio_region_info
*opregion
;
2980 if (vdev
->pdev
.qdev
.hotplugged
) {
2982 "cannot support IGD OpRegion feature on hotplugged "
2987 ret
= vfio_get_dev_region_info(&vdev
->vbasedev
,
2988 VFIO_REGION_TYPE_PCI_VENDOR_TYPE
| PCI_VENDOR_ID_INTEL
,
2989 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
, &opregion
);
2991 error_setg_errno(errp
, -ret
,
2992 "does not support requested IGD OpRegion feature");
2996 ret
= vfio_pci_igd_opregion_init(vdev
, opregion
, errp
);
3003 /* QEMU emulates all of MSI & MSIX */
3004 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3005 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3009 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3010 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3011 vdev
->msi_cap_size
);
3014 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3015 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3016 vfio_intx_mmap_enable
, vdev
);
3017 pci_device_set_intx_routing_notifier(&vdev
->pdev
,
3018 vfio_intx_routing_notifier
);
3019 vdev
->irqchip_change_notifier
.notify
= vfio_irqchip_change
;
3020 kvm_irqchip_add_change_notifier(&vdev
->irqchip_change_notifier
);
3021 ret
= vfio_intx_enable(vdev
, errp
);
3023 goto out_deregister
;
3027 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3028 ret
= vfio_display_probe(vdev
, errp
);
3030 goto out_deregister
;
3033 if (vdev
->enable_ramfb
&& vdev
->dpy
== NULL
) {
3034 error_setg(errp
, "ramfb=on requires display=on");
3035 goto out_deregister
;
3037 if (vdev
->display_xres
|| vdev
->display_yres
) {
3038 if (vdev
->dpy
== NULL
) {
3039 error_setg(errp
, "xres and yres properties require display=on");
3040 goto out_deregister
;
3042 if (vdev
->dpy
->edid_regs
== NULL
) {
3043 error_setg(errp
, "xres and yres properties need edid support");
3044 goto out_deregister
;
3048 if (vdev
->vendor_id
== PCI_VENDOR_ID_NVIDIA
) {
3049 ret
= vfio_pci_nvidia_v100_ram_init(vdev
, errp
);
3050 if (ret
&& ret
!= -ENODEV
) {
3051 error_report("Failed to setup NVIDIA V100 GPU RAM");
3055 if (vdev
->vendor_id
== PCI_VENDOR_ID_IBM
) {
3056 ret
= vfio_pci_nvlink2_init(vdev
, errp
);
3057 if (ret
&& ret
!= -ENODEV
) {
3058 error_report("Failed to setup NVlink2 bridge");
3062 if (!pdev
->failover_pair_id
) {
3063 ret
= vfio_migration_probe(&vdev
->vbasedev
, errp
);
3065 error_report("%s: Migration disabled", vdev
->vbasedev
.name
);
3069 vfio_register_err_notifier(vdev
);
3070 vfio_register_req_notifier(vdev
);
3071 vfio_setup_resetfn_quirk(vdev
);
3076 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3077 kvm_irqchip_remove_change_notifier(&vdev
->irqchip_change_notifier
);
3079 vfio_teardown_msi(vdev
);
3080 vfio_bars_exit(vdev
);
3082 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
3085 static void vfio_instance_finalize(Object
*obj
)
3087 VFIOPCIDevice
*vdev
= VFIO_PCI(obj
);
3088 VFIOGroup
*group
= vdev
->vbasedev
.group
;
3090 vfio_display_finalize(vdev
);
3091 vfio_bars_finalize(vdev
);
3092 g_free(vdev
->emulated_config_bits
);
3095 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3096 * fw_cfg entry therefore leaking this allocation seems like the safest
3099 * g_free(vdev->igd_opregion);
3101 vfio_put_device(vdev
);
3102 vfio_put_group(group
);
3105 static void vfio_exitfn(PCIDevice
*pdev
)
3107 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
3109 vfio_unregister_req_notifier(vdev
);
3110 vfio_unregister_err_notifier(vdev
);
3111 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3112 if (vdev
->irqchip_change_notifier
.notify
) {
3113 kvm_irqchip_remove_change_notifier(&vdev
->irqchip_change_notifier
);
3115 vfio_disable_interrupts(vdev
);
3116 if (vdev
->intx
.mmap_timer
) {
3117 timer_free(vdev
->intx
.mmap_timer
);
3119 vfio_teardown_msi(vdev
);
3120 vfio_bars_exit(vdev
);
3121 vfio_migration_finalize(&vdev
->vbasedev
);
3124 static void vfio_pci_reset(DeviceState
*dev
)
3126 VFIOPCIDevice
*vdev
= VFIO_PCI(dev
);
3128 trace_vfio_pci_reset(vdev
->vbasedev
.name
);
3130 vfio_pci_pre_reset(vdev
);
3132 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3133 vfio_display_reset(vdev
);
3136 if (vdev
->resetfn
&& !vdev
->resetfn(vdev
)) {
3140 if (vdev
->vbasedev
.reset_works
&&
3141 (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3142 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3143 trace_vfio_pci_reset_flr(vdev
->vbasedev
.name
);
3147 /* See if we can do our own bus reset */
3148 if (!vfio_pci_hot_reset_one(vdev
)) {
3152 /* If nothing else works and the device supports PM reset, use it */
3153 if (vdev
->vbasedev
.reset_works
&& vdev
->has_pm_reset
&&
3154 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3155 trace_vfio_pci_reset_pm(vdev
->vbasedev
.name
);
3160 vfio_pci_post_reset(vdev
);
3163 static void vfio_instance_init(Object
*obj
)
3165 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3166 VFIOPCIDevice
*vdev
= VFIO_PCI(obj
);
3168 device_add_bootindex_property(obj
, &vdev
->bootindex
,
3171 vdev
->host
.domain
= ~0U;
3172 vdev
->host
.bus
= ~0U;
3173 vdev
->host
.slot
= ~0U;
3174 vdev
->host
.function
= ~0U;
3176 vdev
->nv_gpudirect_clique
= 0xFF;
3178 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3179 * line, therefore, no need to wait to realize like other devices */
3180 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
3183 static Property vfio_pci_dev_properties
[] = {
3184 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice
, host
),
3185 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice
, vbasedev
.sysfsdev
),
3186 DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice
,
3187 vbasedev
.pre_copy_dirty_page_tracking
,
3189 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice
,
3190 display
, ON_OFF_AUTO_OFF
),
3191 DEFINE_PROP_UINT32("xres", VFIOPCIDevice
, display_xres
, 0),
3192 DEFINE_PROP_UINT32("yres", VFIOPCIDevice
, display_yres
, 0),
3193 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice
,
3194 intx
.mmap_timeout
, 1100),
3195 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice
, features
,
3196 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3197 DEFINE_PROP_BIT("x-req", VFIOPCIDevice
, features
,
3198 VFIO_FEATURE_ENABLE_REQ_BIT
, true),
3199 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice
, features
,
3200 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT
, false),
3201 DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice
,
3202 vbasedev
.enable_migration
, false),
3203 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice
, vbasedev
.no_mmap
, false),
3204 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice
,
3205 vbasedev
.ram_block_discard_allowed
, false),
3206 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice
, no_kvm_intx
, false),
3207 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice
, no_kvm_msi
, false),
3208 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice
, no_kvm_msix
, false),
3209 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice
,
3210 no_geforce_quirks
, false),
3211 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice
, no_kvm_ioeventfd
,
3213 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice
, no_vfio_ioeventfd
,
3215 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice
, vendor_id
, PCI_ANY_ID
),
3216 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice
, device_id
, PCI_ANY_ID
),
3217 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice
,
3218 sub_vendor_id
, PCI_ANY_ID
),
3219 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice
,
3220 sub_device_id
, PCI_ANY_ID
),
3221 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice
, igd_gms
, 0),
3222 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice
,
3223 nv_gpudirect_clique
,
3224 qdev_prop_nv_gpudirect_clique
, uint8_t),
3225 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice
, msix_relo
,
3226 OFF_AUTOPCIBAR_OFF
),
3228 * TODO - support passed fds... is this necessary?
3229 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3230 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3232 DEFINE_PROP_END_OF_LIST(),
3235 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3237 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3238 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3240 dc
->reset
= vfio_pci_reset
;
3241 device_class_set_props(dc
, vfio_pci_dev_properties
);
3242 dc
->desc
= "VFIO-based PCI device assignment";
3243 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3244 pdc
->realize
= vfio_realize
;
3245 pdc
->exit
= vfio_exitfn
;
3246 pdc
->config_read
= vfio_pci_read_config
;
3247 pdc
->config_write
= vfio_pci_write_config
;
3250 static const TypeInfo vfio_pci_dev_info
= {
3251 .name
= TYPE_VFIO_PCI
,
3252 .parent
= TYPE_PCI_DEVICE
,
3253 .instance_size
= sizeof(VFIOPCIDevice
),
3254 .class_init
= vfio_pci_dev_class_init
,
3255 .instance_init
= vfio_instance_init
,
3256 .instance_finalize
= vfio_instance_finalize
,
3257 .interfaces
= (InterfaceInfo
[]) {
3258 { INTERFACE_PCIE_DEVICE
},
3259 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
3264 static Property vfio_pci_dev_nohotplug_properties
[] = {
3265 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice
, enable_ramfb
, false),
3266 DEFINE_PROP_END_OF_LIST(),
3269 static void vfio_pci_nohotplug_dev_class_init(ObjectClass
*klass
, void *data
)
3271 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3273 device_class_set_props(dc
, vfio_pci_dev_nohotplug_properties
);
3274 dc
->hotpluggable
= false;
3277 static const TypeInfo vfio_pci_nohotplug_dev_info
= {
3278 .name
= TYPE_VFIO_PCI_NOHOTPLUG
,
3279 .parent
= TYPE_VFIO_PCI
,
3280 .instance_size
= sizeof(VFIOPCIDevice
),
3281 .class_init
= vfio_pci_nohotplug_dev_class_init
,
3284 static void register_vfio_pci_dev_type(void)
3286 type_register_static(&vfio_pci_dev_info
);
3287 type_register_static(&vfio_pci_nohotplug_dev_info
);
3290 type_init(register_vfio_pci_dev_type
)