block: fix deadlock in bdrv_co_flush
[qemu/kevin.git] / hw / vfio / pci.c
blob7bfa17ce38b5449a8bc19480379383db6350c14e
1 /*
2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
25 #include "hw/pci/msi.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/pci_bridge.h"
28 #include "qemu/error-report.h"
29 #include "qemu/range.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "pci.h"
33 #include "trace.h"
34 #include "qapi/error.h"
36 #define MSIX_CAP_LENGTH 12
38 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
39 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
42 * Disabling BAR mmaping can be slow, but toggling it around INTx can
43 * also be a huge overhead. We try to get the best of both worlds by
44 * waiting until an interrupt to disable mmaps (subsequent transitions
45 * to the same state are effectively no overhead). If the interrupt has
46 * been serviced and the time gap is long enough, we re-enable mmaps for
47 * performance. This works well for things like graphics cards, which
48 * may not use their interrupt at all and are penalized to an unusable
49 * level by read/write BAR traps. Other devices, like NICs, have more
50 * regular interrupts and see much better latency by staying in non-mmap
51 * mode. We therefore set the default mmap_timeout such that a ping
52 * is just enough to keep the mmap disabled. Users can experiment with
53 * other options with the x-intx-mmap-timeout-ms parameter (a value of
54 * zero disables the timer).
56 static void vfio_intx_mmap_enable(void *opaque)
58 VFIOPCIDevice *vdev = opaque;
60 if (vdev->intx.pending) {
61 timer_mod(vdev->intx.mmap_timer,
62 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
63 return;
66 vfio_mmap_set_enabled(vdev, true);
69 static void vfio_intx_interrupt(void *opaque)
71 VFIOPCIDevice *vdev = opaque;
73 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
74 return;
77 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
79 vdev->intx.pending = true;
80 pci_irq_assert(&vdev->pdev);
81 vfio_mmap_set_enabled(vdev, false);
82 if (vdev->intx.mmap_timeout) {
83 timer_mod(vdev->intx.mmap_timer,
84 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
88 static void vfio_intx_eoi(VFIODevice *vbasedev)
90 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
92 if (!vdev->intx.pending) {
93 return;
96 trace_vfio_intx_eoi(vbasedev->name);
98 vdev->intx.pending = false;
99 pci_irq_deassert(&vdev->pdev);
100 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
103 static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
105 #ifdef CONFIG_KVM
106 struct kvm_irqfd irqfd = {
107 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
108 .gsi = vdev->intx.route.irq,
109 .flags = KVM_IRQFD_FLAG_RESAMPLE,
111 struct vfio_irq_set *irq_set;
112 int ret, argsz;
113 int32_t *pfd;
115 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
116 vdev->intx.route.mode != PCI_INTX_ENABLED ||
117 !kvm_resamplefds_enabled()) {
118 return;
121 /* Get to a known interrupt state */
122 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
123 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
124 vdev->intx.pending = false;
125 pci_irq_deassert(&vdev->pdev);
127 /* Get an eventfd for resample/unmask */
128 if (event_notifier_init(&vdev->intx.unmask, 0)) {
129 error_report("vfio: Error: event_notifier_init failed eoi");
130 goto fail;
133 /* KVM triggers it, VFIO listens for it */
134 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
136 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
137 error_report("vfio: Error: Failed to setup resample irqfd: %m");
138 goto fail_irqfd;
141 argsz = sizeof(*irq_set) + sizeof(*pfd);
143 irq_set = g_malloc0(argsz);
144 irq_set->argsz = argsz;
145 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
146 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
147 irq_set->start = 0;
148 irq_set->count = 1;
149 pfd = (int32_t *)&irq_set->data;
151 *pfd = irqfd.resamplefd;
153 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
154 g_free(irq_set);
155 if (ret) {
156 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
157 goto fail_vfio;
160 /* Let'em rip */
161 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
163 vdev->intx.kvm_accel = true;
165 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
167 return;
169 fail_vfio:
170 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
171 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
172 fail_irqfd:
173 event_notifier_cleanup(&vdev->intx.unmask);
174 fail:
175 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
176 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
177 #endif
180 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
182 #ifdef CONFIG_KVM
183 struct kvm_irqfd irqfd = {
184 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
185 .gsi = vdev->intx.route.irq,
186 .flags = KVM_IRQFD_FLAG_DEASSIGN,
189 if (!vdev->intx.kvm_accel) {
190 return;
194 * Get to a known state, hardware masked, QEMU ready to accept new
195 * interrupts, QEMU IRQ de-asserted.
197 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
198 vdev->intx.pending = false;
199 pci_irq_deassert(&vdev->pdev);
201 /* Tell KVM to stop listening for an INTx irqfd */
202 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
203 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
206 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
207 event_notifier_cleanup(&vdev->intx.unmask);
209 /* QEMU starts listening for interrupt events. */
210 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
212 vdev->intx.kvm_accel = false;
214 /* If we've missed an event, let it re-fire through QEMU */
215 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
217 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
218 #endif
221 static void vfio_intx_update(PCIDevice *pdev)
223 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
224 PCIINTxRoute route;
226 if (vdev->interrupt != VFIO_INT_INTx) {
227 return;
230 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
232 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
233 return; /* Nothing changed */
236 trace_vfio_intx_update(vdev->vbasedev.name,
237 vdev->intx.route.irq, route.irq);
239 vfio_intx_disable_kvm(vdev);
241 vdev->intx.route = route;
243 if (route.mode != PCI_INTX_ENABLED) {
244 return;
247 vfio_intx_enable_kvm(vdev);
249 /* Re-enable the interrupt in cased we missed an EOI */
250 vfio_intx_eoi(&vdev->vbasedev);
253 static int vfio_intx_enable(VFIOPCIDevice *vdev)
255 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
256 int ret, argsz;
257 struct vfio_irq_set *irq_set;
258 int32_t *pfd;
260 if (!pin) {
261 return 0;
264 vfio_disable_interrupts(vdev);
266 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
267 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
269 #ifdef CONFIG_KVM
271 * Only conditional to avoid generating error messages on platforms
272 * where we won't actually use the result anyway.
274 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
275 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
276 vdev->intx.pin);
278 #endif
280 ret = event_notifier_init(&vdev->intx.interrupt, 0);
281 if (ret) {
282 error_report("vfio: Error: event_notifier_init failed");
283 return ret;
286 argsz = sizeof(*irq_set) + sizeof(*pfd);
288 irq_set = g_malloc0(argsz);
289 irq_set->argsz = argsz;
290 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
291 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
292 irq_set->start = 0;
293 irq_set->count = 1;
294 pfd = (int32_t *)&irq_set->data;
296 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
297 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
299 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
300 g_free(irq_set);
301 if (ret) {
302 error_report("vfio: Error: Failed to setup INTx fd: %m");
303 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
304 event_notifier_cleanup(&vdev->intx.interrupt);
305 return -errno;
308 vfio_intx_enable_kvm(vdev);
310 vdev->interrupt = VFIO_INT_INTx;
312 trace_vfio_intx_enable(vdev->vbasedev.name);
314 return 0;
317 static void vfio_intx_disable(VFIOPCIDevice *vdev)
319 int fd;
321 timer_del(vdev->intx.mmap_timer);
322 vfio_intx_disable_kvm(vdev);
323 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
324 vdev->intx.pending = false;
325 pci_irq_deassert(&vdev->pdev);
326 vfio_mmap_set_enabled(vdev, true);
328 fd = event_notifier_get_fd(&vdev->intx.interrupt);
329 qemu_set_fd_handler(fd, NULL, NULL, vdev);
330 event_notifier_cleanup(&vdev->intx.interrupt);
332 vdev->interrupt = VFIO_INT_NONE;
334 trace_vfio_intx_disable(vdev->vbasedev.name);
338 * MSI/X
340 static void vfio_msi_interrupt(void *opaque)
342 VFIOMSIVector *vector = opaque;
343 VFIOPCIDevice *vdev = vector->vdev;
344 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
345 void (*notify)(PCIDevice *dev, unsigned vector);
346 MSIMessage msg;
347 int nr = vector - vdev->msi_vectors;
349 if (!event_notifier_test_and_clear(&vector->interrupt)) {
350 return;
353 if (vdev->interrupt == VFIO_INT_MSIX) {
354 get_msg = msix_get_message;
355 notify = msix_notify;
357 /* A masked vector firing needs to use the PBA, enable it */
358 if (msix_is_masked(&vdev->pdev, nr)) {
359 set_bit(nr, vdev->msix->pending);
360 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
361 trace_vfio_msix_pba_enable(vdev->vbasedev.name);
363 } else if (vdev->interrupt == VFIO_INT_MSI) {
364 get_msg = msi_get_message;
365 notify = msi_notify;
366 } else {
367 abort();
370 msg = get_msg(&vdev->pdev, nr);
371 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
372 notify(&vdev->pdev, nr);
375 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
377 struct vfio_irq_set *irq_set;
378 int ret = 0, i, argsz;
379 int32_t *fds;
381 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
383 irq_set = g_malloc0(argsz);
384 irq_set->argsz = argsz;
385 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
386 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
387 irq_set->start = 0;
388 irq_set->count = vdev->nr_vectors;
389 fds = (int32_t *)&irq_set->data;
391 for (i = 0; i < vdev->nr_vectors; i++) {
392 int fd = -1;
395 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
396 * bits, therefore we always use the KVM signaling path when setup.
397 * MSI-X mask and pending bits are emulated, so we want to use the
398 * KVM signaling path only when configured and unmasked.
400 if (vdev->msi_vectors[i].use) {
401 if (vdev->msi_vectors[i].virq < 0 ||
402 (msix && msix_is_masked(&vdev->pdev, i))) {
403 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
404 } else {
405 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
409 fds[i] = fd;
412 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
414 g_free(irq_set);
416 return ret;
419 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
420 int vector_n, bool msix)
422 int virq;
424 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
425 return;
428 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
429 return;
432 virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
433 if (virq < 0) {
434 event_notifier_cleanup(&vector->kvm_interrupt);
435 return;
438 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
439 NULL, virq) < 0) {
440 kvm_irqchip_release_virq(kvm_state, virq);
441 event_notifier_cleanup(&vector->kvm_interrupt);
442 return;
445 vector->virq = virq;
448 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
450 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
451 vector->virq);
452 kvm_irqchip_release_virq(kvm_state, vector->virq);
453 vector->virq = -1;
454 event_notifier_cleanup(&vector->kvm_interrupt);
457 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
458 PCIDevice *pdev)
460 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
461 kvm_irqchip_commit_routes(kvm_state);
464 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
465 MSIMessage *msg, IOHandler *handler)
467 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
468 VFIOMSIVector *vector;
469 int ret;
471 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
473 vector = &vdev->msi_vectors[nr];
475 if (!vector->use) {
476 vector->vdev = vdev;
477 vector->virq = -1;
478 if (event_notifier_init(&vector->interrupt, 0)) {
479 error_report("vfio: Error: event_notifier_init failed");
481 vector->use = true;
482 msix_vector_use(pdev, nr);
485 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
486 handler, NULL, vector);
489 * Attempt to enable route through KVM irqchip,
490 * default to userspace handling if unavailable.
492 if (vector->virq >= 0) {
493 if (!msg) {
494 vfio_remove_kvm_msi_virq(vector);
495 } else {
496 vfio_update_kvm_msi_virq(vector, *msg, pdev);
498 } else {
499 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
503 * We don't want to have the host allocate all possible MSI vectors
504 * for a device if they're not in use, so we shutdown and incrementally
505 * increase them as needed.
507 if (vdev->nr_vectors < nr + 1) {
508 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
509 vdev->nr_vectors = nr + 1;
510 ret = vfio_enable_vectors(vdev, true);
511 if (ret) {
512 error_report("vfio: failed to enable vectors, %d", ret);
514 } else {
515 int argsz;
516 struct vfio_irq_set *irq_set;
517 int32_t *pfd;
519 argsz = sizeof(*irq_set) + sizeof(*pfd);
521 irq_set = g_malloc0(argsz);
522 irq_set->argsz = argsz;
523 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
524 VFIO_IRQ_SET_ACTION_TRIGGER;
525 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
526 irq_set->start = nr;
527 irq_set->count = 1;
528 pfd = (int32_t *)&irq_set->data;
530 if (vector->virq >= 0) {
531 *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
532 } else {
533 *pfd = event_notifier_get_fd(&vector->interrupt);
536 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
537 g_free(irq_set);
538 if (ret) {
539 error_report("vfio: failed to modify vector, %d", ret);
543 /* Disable PBA emulation when nothing more is pending. */
544 clear_bit(nr, vdev->msix->pending);
545 if (find_first_bit(vdev->msix->pending,
546 vdev->nr_vectors) == vdev->nr_vectors) {
547 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
548 trace_vfio_msix_pba_disable(vdev->vbasedev.name);
551 return 0;
554 static int vfio_msix_vector_use(PCIDevice *pdev,
555 unsigned int nr, MSIMessage msg)
557 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
560 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
562 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
563 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
565 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
568 * There are still old guests that mask and unmask vectors on every
569 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
570 * the KVM setup in place, simply switch VFIO to use the non-bypass
571 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
572 * core will mask the interrupt and set pending bits, allowing it to
573 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
575 if (vector->virq >= 0) {
576 int argsz;
577 struct vfio_irq_set *irq_set;
578 int32_t *pfd;
580 argsz = sizeof(*irq_set) + sizeof(*pfd);
582 irq_set = g_malloc0(argsz);
583 irq_set->argsz = argsz;
584 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
585 VFIO_IRQ_SET_ACTION_TRIGGER;
586 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
587 irq_set->start = nr;
588 irq_set->count = 1;
589 pfd = (int32_t *)&irq_set->data;
591 *pfd = event_notifier_get_fd(&vector->interrupt);
593 ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
595 g_free(irq_set);
599 static void vfio_msix_enable(VFIOPCIDevice *vdev)
601 vfio_disable_interrupts(vdev);
603 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
605 vdev->interrupt = VFIO_INT_MSIX;
608 * Some communication channels between VF & PF or PF & fw rely on the
609 * physical state of the device and expect that enabling MSI-X from the
610 * guest enables the same on the host. When our guest is Linux, the
611 * guest driver call to pci_enable_msix() sets the enabling bit in the
612 * MSI-X capability, but leaves the vector table masked. We therefore
613 * can't rely on a vector_use callback (from request_irq() in the guest)
614 * to switch the physical device into MSI-X mode because that may come a
615 * long time after pci_enable_msix(). This code enables vector 0 with
616 * triggering to userspace, then immediately release the vector, leaving
617 * the physical device with no vectors enabled, but MSI-X enabled, just
618 * like the guest view.
620 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
621 vfio_msix_vector_release(&vdev->pdev, 0);
623 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
624 vfio_msix_vector_release, NULL)) {
625 error_report("vfio: msix_set_vector_notifiers failed");
628 trace_vfio_msix_enable(vdev->vbasedev.name);
631 static void vfio_msi_enable(VFIOPCIDevice *vdev)
633 int ret, i;
635 vfio_disable_interrupts(vdev);
637 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
638 retry:
639 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
641 for (i = 0; i < vdev->nr_vectors; i++) {
642 VFIOMSIVector *vector = &vdev->msi_vectors[i];
644 vector->vdev = vdev;
645 vector->virq = -1;
646 vector->use = true;
648 if (event_notifier_init(&vector->interrupt, 0)) {
649 error_report("vfio: Error: event_notifier_init failed");
652 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
653 vfio_msi_interrupt, NULL, vector);
656 * Attempt to enable route through KVM irqchip,
657 * default to userspace handling if unavailable.
659 vfio_add_kvm_msi_virq(vdev, vector, i, false);
662 /* Set interrupt type prior to possible interrupts */
663 vdev->interrupt = VFIO_INT_MSI;
665 ret = vfio_enable_vectors(vdev, false);
666 if (ret) {
667 if (ret < 0) {
668 error_report("vfio: Error: Failed to setup MSI fds: %m");
669 } else if (ret != vdev->nr_vectors) {
670 error_report("vfio: Error: Failed to enable %d "
671 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
674 for (i = 0; i < vdev->nr_vectors; i++) {
675 VFIOMSIVector *vector = &vdev->msi_vectors[i];
676 if (vector->virq >= 0) {
677 vfio_remove_kvm_msi_virq(vector);
679 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
680 NULL, NULL, NULL);
681 event_notifier_cleanup(&vector->interrupt);
684 g_free(vdev->msi_vectors);
686 if (ret > 0 && ret != vdev->nr_vectors) {
687 vdev->nr_vectors = ret;
688 goto retry;
690 vdev->nr_vectors = 0;
693 * Failing to setup MSI doesn't really fall within any specification.
694 * Let's try leaving interrupts disabled and hope the guest figures
695 * out to fall back to INTx for this device.
697 error_report("vfio: Error: Failed to enable MSI");
698 vdev->interrupt = VFIO_INT_NONE;
700 return;
703 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
706 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
708 int i;
710 for (i = 0; i < vdev->nr_vectors; i++) {
711 VFIOMSIVector *vector = &vdev->msi_vectors[i];
712 if (vdev->msi_vectors[i].use) {
713 if (vector->virq >= 0) {
714 vfio_remove_kvm_msi_virq(vector);
716 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
717 NULL, NULL, NULL);
718 event_notifier_cleanup(&vector->interrupt);
722 g_free(vdev->msi_vectors);
723 vdev->msi_vectors = NULL;
724 vdev->nr_vectors = 0;
725 vdev->interrupt = VFIO_INT_NONE;
727 vfio_intx_enable(vdev);
730 static void vfio_msix_disable(VFIOPCIDevice *vdev)
732 int i;
734 msix_unset_vector_notifiers(&vdev->pdev);
737 * MSI-X will only release vectors if MSI-X is still enabled on the
738 * device, check through the rest and release it ourselves if necessary.
740 for (i = 0; i < vdev->nr_vectors; i++) {
741 if (vdev->msi_vectors[i].use) {
742 vfio_msix_vector_release(&vdev->pdev, i);
743 msix_vector_unuse(&vdev->pdev, i);
747 if (vdev->nr_vectors) {
748 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
751 vfio_msi_disable_common(vdev);
753 memset(vdev->msix->pending, 0,
754 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
756 trace_vfio_msix_disable(vdev->vbasedev.name);
759 static void vfio_msi_disable(VFIOPCIDevice *vdev)
761 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
762 vfio_msi_disable_common(vdev);
764 trace_vfio_msi_disable(vdev->vbasedev.name);
767 static void vfio_update_msi(VFIOPCIDevice *vdev)
769 int i;
771 for (i = 0; i < vdev->nr_vectors; i++) {
772 VFIOMSIVector *vector = &vdev->msi_vectors[i];
773 MSIMessage msg;
775 if (!vector->use || vector->virq < 0) {
776 continue;
779 msg = msi_get_message(&vdev->pdev, i);
780 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
784 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
786 struct vfio_region_info *reg_info;
787 uint64_t size;
788 off_t off = 0;
789 ssize_t bytes;
791 if (vfio_get_region_info(&vdev->vbasedev,
792 VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
793 error_report("vfio: Error getting ROM info: %m");
794 return;
797 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
798 (unsigned long)reg_info->offset,
799 (unsigned long)reg_info->flags);
801 vdev->rom_size = size = reg_info->size;
802 vdev->rom_offset = reg_info->offset;
804 g_free(reg_info);
806 if (!vdev->rom_size) {
807 vdev->rom_read_failed = true;
808 error_report("vfio-pci: Cannot read device rom at "
809 "%s", vdev->vbasedev.name);
810 error_printf("Device option ROM contents are probably invalid "
811 "(check dmesg).\nSkip option ROM probe with rombar=0, "
812 "or load from file with romfile=\n");
813 return;
816 vdev->rom = g_malloc(size);
817 memset(vdev->rom, 0xff, size);
819 while (size) {
820 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
821 size, vdev->rom_offset + off);
822 if (bytes == 0) {
823 break;
824 } else if (bytes > 0) {
825 off += bytes;
826 size -= bytes;
827 } else {
828 if (errno == EINTR || errno == EAGAIN) {
829 continue;
831 error_report("vfio: Error reading device ROM: %m");
832 break;
837 * Test the ROM signature against our device, if the vendor is correct
838 * but the device ID doesn't match, store the correct device ID and
839 * recompute the checksum. Intel IGD devices need this and are known
840 * to have bogus checksums so we can't simply adjust the checksum.
842 if (pci_get_word(vdev->rom) == 0xaa55 &&
843 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
844 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
845 uint16_t vid, did;
847 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
848 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
850 if (vid == vdev->vendor_id && did != vdev->device_id) {
851 int i;
852 uint8_t csum, *data = vdev->rom;
854 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
855 vdev->device_id);
856 data[6] = 0;
858 for (csum = 0, i = 0; i < vdev->rom_size; i++) {
859 csum += data[i];
862 data[6] = -csum;
867 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
869 VFIOPCIDevice *vdev = opaque;
870 union {
871 uint8_t byte;
872 uint16_t word;
873 uint32_t dword;
874 uint64_t qword;
875 } val;
876 uint64_t data = 0;
878 /* Load the ROM lazily when the guest tries to read it */
879 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
880 vfio_pci_load_rom(vdev);
883 memcpy(&val, vdev->rom + addr,
884 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
886 switch (size) {
887 case 1:
888 data = val.byte;
889 break;
890 case 2:
891 data = le16_to_cpu(val.word);
892 break;
893 case 4:
894 data = le32_to_cpu(val.dword);
895 break;
896 default:
897 hw_error("vfio: unsupported read size, %d bytes\n", size);
898 break;
901 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
903 return data;
906 static void vfio_rom_write(void *opaque, hwaddr addr,
907 uint64_t data, unsigned size)
911 static const MemoryRegionOps vfio_rom_ops = {
912 .read = vfio_rom_read,
913 .write = vfio_rom_write,
914 .endianness = DEVICE_LITTLE_ENDIAN,
917 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
919 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
920 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
921 DeviceState *dev = DEVICE(vdev);
922 char *name;
923 int fd = vdev->vbasedev.fd;
925 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
926 /* Since pci handles romfile, just print a message and return */
927 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
928 error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
929 vdev->vbasedev.name);
931 return;
935 * Use the same size ROM BAR as the physical device. The contents
936 * will get filled in later when the guest tries to read it.
938 if (pread(fd, &orig, 4, offset) != 4 ||
939 pwrite(fd, &size, 4, offset) != 4 ||
940 pread(fd, &size, 4, offset) != 4 ||
941 pwrite(fd, &orig, 4, offset) != 4) {
942 error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
943 return;
946 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
948 if (!size) {
949 return;
952 if (vfio_blacklist_opt_rom(vdev)) {
953 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
954 error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
955 vdev->vbasedev.name);
956 } else {
957 error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
958 vdev->vbasedev.name);
959 return;
963 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
965 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
967 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
968 &vfio_rom_ops, vdev, name, size);
969 g_free(name);
971 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
972 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
974 vdev->pdev.has_rom = true;
975 vdev->rom_read_failed = false;
978 void vfio_vga_write(void *opaque, hwaddr addr,
979 uint64_t data, unsigned size)
981 VFIOVGARegion *region = opaque;
982 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
983 union {
984 uint8_t byte;
985 uint16_t word;
986 uint32_t dword;
987 uint64_t qword;
988 } buf;
989 off_t offset = vga->fd_offset + region->offset + addr;
991 switch (size) {
992 case 1:
993 buf.byte = data;
994 break;
995 case 2:
996 buf.word = cpu_to_le16(data);
997 break;
998 case 4:
999 buf.dword = cpu_to_le32(data);
1000 break;
1001 default:
1002 hw_error("vfio: unsupported write size, %d bytes", size);
1003 break;
1006 if (pwrite(vga->fd, &buf, size, offset) != size) {
1007 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1008 __func__, region->offset + addr, data, size);
1011 trace_vfio_vga_write(region->offset + addr, data, size);
1014 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1016 VFIOVGARegion *region = opaque;
1017 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1018 union {
1019 uint8_t byte;
1020 uint16_t word;
1021 uint32_t dword;
1022 uint64_t qword;
1023 } buf;
1024 uint64_t data = 0;
1025 off_t offset = vga->fd_offset + region->offset + addr;
1027 if (pread(vga->fd, &buf, size, offset) != size) {
1028 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1029 __func__, region->offset + addr, size);
1030 return (uint64_t)-1;
1033 switch (size) {
1034 case 1:
1035 data = buf.byte;
1036 break;
1037 case 2:
1038 data = le16_to_cpu(buf.word);
1039 break;
1040 case 4:
1041 data = le32_to_cpu(buf.dword);
1042 break;
1043 default:
1044 hw_error("vfio: unsupported read size, %d bytes", size);
1045 break;
1048 trace_vfio_vga_read(region->offset + addr, size, data);
1050 return data;
1053 static const MemoryRegionOps vfio_vga_ops = {
1054 .read = vfio_vga_read,
1055 .write = vfio_vga_write,
1056 .endianness = DEVICE_LITTLE_ENDIAN,
1060 * PCI config space
1062 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1064 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1065 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1067 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1068 emu_bits = le32_to_cpu(emu_bits);
1070 if (emu_bits) {
1071 emu_val = pci_default_read_config(pdev, addr, len);
1074 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1075 ssize_t ret;
1077 ret = pread(vdev->vbasedev.fd, &phys_val, len,
1078 vdev->config_offset + addr);
1079 if (ret != len) {
1080 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1081 __func__, vdev->vbasedev.name, addr, len);
1082 return -errno;
1084 phys_val = le32_to_cpu(phys_val);
1087 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1089 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1091 return val;
1094 void vfio_pci_write_config(PCIDevice *pdev,
1095 uint32_t addr, uint32_t val, int len)
1097 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1098 uint32_t val_le = cpu_to_le32(val);
1100 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1102 /* Write everything to VFIO, let it filter out what we can't write */
1103 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1104 != len) {
1105 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1106 __func__, vdev->vbasedev.name, addr, val, len);
1109 /* MSI/MSI-X Enabling/Disabling */
1110 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1111 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1112 int is_enabled, was_enabled = msi_enabled(pdev);
1114 pci_default_write_config(pdev, addr, val, len);
1116 is_enabled = msi_enabled(pdev);
1118 if (!was_enabled) {
1119 if (is_enabled) {
1120 vfio_msi_enable(vdev);
1122 } else {
1123 if (!is_enabled) {
1124 vfio_msi_disable(vdev);
1125 } else {
1126 vfio_update_msi(vdev);
1129 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1130 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1131 int is_enabled, was_enabled = msix_enabled(pdev);
1133 pci_default_write_config(pdev, addr, val, len);
1135 is_enabled = msix_enabled(pdev);
1137 if (!was_enabled && is_enabled) {
1138 vfio_msix_enable(vdev);
1139 } else if (was_enabled && !is_enabled) {
1140 vfio_msix_disable(vdev);
1142 } else {
1143 /* Write everything to QEMU to keep emulated bits correct */
1144 pci_default_write_config(pdev, addr, val, len);
1149 * Interrupt setup
1151 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1154 * More complicated than it looks. Disabling MSI/X transitions the
1155 * device to INTx mode (if supported). Therefore we need to first
1156 * disable MSI/X and then cleanup by disabling INTx.
1158 if (vdev->interrupt == VFIO_INT_MSIX) {
1159 vfio_msix_disable(vdev);
1160 } else if (vdev->interrupt == VFIO_INT_MSI) {
1161 vfio_msi_disable(vdev);
1164 if (vdev->interrupt == VFIO_INT_INTx) {
1165 vfio_intx_disable(vdev);
1169 static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
1171 uint16_t ctrl;
1172 bool msi_64bit, msi_maskbit;
1173 int ret, entries;
1174 Error *err = NULL;
1176 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1177 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1178 return -errno;
1180 ctrl = le16_to_cpu(ctrl);
1182 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1183 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1184 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1186 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1188 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1189 if (ret < 0) {
1190 if (ret == -ENOTSUP) {
1191 return 0;
1193 error_prepend(&err, "vfio: msi_init failed: ");
1194 error_report_err(err);
1195 return ret;
1197 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1199 return 0;
1202 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1204 off_t start, end;
1205 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1208 * We expect to find a single mmap covering the whole BAR, anything else
1209 * means it's either unsupported or already setup.
1211 if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1212 region->size != region->mmaps[0].size) {
1213 return;
1216 /* MSI-X table start and end aligned to host page size */
1217 start = vdev->msix->table_offset & qemu_real_host_page_mask;
1218 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1219 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1222 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1223 * NB - Host page size is necessarily a power of two and so is the PCI
1224 * BAR (not counting EA yet), therefore if we have host page aligned
1225 * @start and @end, then any remainder of the BAR before or after those
1226 * must be at least host page sized and therefore mmap'able.
1228 if (!start) {
1229 if (end >= region->size) {
1230 region->nr_mmaps = 0;
1231 g_free(region->mmaps);
1232 region->mmaps = NULL;
1233 trace_vfio_msix_fixup(vdev->vbasedev.name,
1234 vdev->msix->table_bar, 0, 0);
1235 } else {
1236 region->mmaps[0].offset = end;
1237 region->mmaps[0].size = region->size - end;
1238 trace_vfio_msix_fixup(vdev->vbasedev.name,
1239 vdev->msix->table_bar, region->mmaps[0].offset,
1240 region->mmaps[0].offset + region->mmaps[0].size);
1243 /* Maybe it's aligned at the end of the BAR */
1244 } else if (end >= region->size) {
1245 region->mmaps[0].size = start;
1246 trace_vfio_msix_fixup(vdev->vbasedev.name,
1247 vdev->msix->table_bar, region->mmaps[0].offset,
1248 region->mmaps[0].offset + region->mmaps[0].size);
1250 /* Otherwise it must split the BAR */
1251 } else {
1252 region->nr_mmaps = 2;
1253 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1255 memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1257 region->mmaps[0].size = start;
1258 trace_vfio_msix_fixup(vdev->vbasedev.name,
1259 vdev->msix->table_bar, region->mmaps[0].offset,
1260 region->mmaps[0].offset + region->mmaps[0].size);
1262 region->mmaps[1].offset = end;
1263 region->mmaps[1].size = region->size - end;
1264 trace_vfio_msix_fixup(vdev->vbasedev.name,
1265 vdev->msix->table_bar, region->mmaps[1].offset,
1266 region->mmaps[1].offset + region->mmaps[1].size);
1271 * We don't have any control over how pci_add_capability() inserts
1272 * capabilities into the chain. In order to setup MSI-X we need a
1273 * MemoryRegion for the BAR. In order to setup the BAR and not
1274 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1275 * need to first look for where the MSI-X table lives. So we
1276 * unfortunately split MSI-X setup across two functions.
1278 static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
1280 uint8_t pos;
1281 uint16_t ctrl;
1282 uint32_t table, pba;
1283 int fd = vdev->vbasedev.fd;
1284 VFIOMSIXInfo *msix;
1286 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1287 if (!pos) {
1288 return 0;
1291 if (pread(fd, &ctrl, sizeof(ctrl),
1292 vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1293 return -errno;
1296 if (pread(fd, &table, sizeof(table),
1297 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1298 return -errno;
1301 if (pread(fd, &pba, sizeof(pba),
1302 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1303 return -errno;
1306 ctrl = le16_to_cpu(ctrl);
1307 table = le32_to_cpu(table);
1308 pba = le32_to_cpu(pba);
1310 msix = g_malloc0(sizeof(*msix));
1311 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1312 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1313 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1314 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1315 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1318 * Test the size of the pba_offset variable and catch if it extends outside
1319 * of the specified BAR. If it is the case, we need to apply a hardware
1320 * specific quirk if the device is known or we have a broken configuration.
1322 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1324 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1325 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1326 * the VF PBA offset while the BAR itself is only 8k. The correct value
1327 * is 0x1000, so we hard code that here.
1329 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1330 (vdev->device_id & 0xff00) == 0x5800) {
1331 msix->pba_offset = 0x1000;
1332 } else {
1333 error_report("vfio: Hardware reports invalid configuration, "
1334 "MSIX PBA outside of specified BAR");
1335 g_free(msix);
1336 return -EINVAL;
1340 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1341 msix->table_offset, msix->entries);
1342 vdev->msix = msix;
1344 vfio_pci_fixup_msix_region(vdev);
1346 return 0;
1349 static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
1351 int ret;
1353 vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1354 sizeof(unsigned long));
1355 ret = msix_init(&vdev->pdev, vdev->msix->entries,
1356 vdev->bars[vdev->msix->table_bar].region.mem,
1357 vdev->msix->table_bar, vdev->msix->table_offset,
1358 vdev->bars[vdev->msix->pba_bar].region.mem,
1359 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
1360 if (ret < 0) {
1361 if (ret == -ENOTSUP) {
1362 return 0;
1364 error_report("vfio: msix_init failed");
1365 return ret;
1369 * The PCI spec suggests that devices provide additional alignment for
1370 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1371 * For an assigned device, this hopefully means that emulation of MSI-X
1372 * structures does not affect the performance of the device. If devices
1373 * fail to provide that alignment, a significant performance penalty may
1374 * result, for instance Mellanox MT27500 VFs:
1375 * http://www.spinics.net/lists/kvm/msg125881.html
1377 * The PBA is simply not that important for such a serious regression and
1378 * most drivers do not appear to look at it. The solution for this is to
1379 * disable the PBA MemoryRegion unless it's being used. We disable it
1380 * here and only enable it if a masked vector fires through QEMU. As the
1381 * vector-use notifier is called, which occurs on unmask, we test whether
1382 * PBA emulation is needed and again disable if not.
1384 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1386 return 0;
1389 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1391 msi_uninit(&vdev->pdev);
1393 if (vdev->msix) {
1394 msix_uninit(&vdev->pdev,
1395 vdev->bars[vdev->msix->table_bar].region.mem,
1396 vdev->bars[vdev->msix->pba_bar].region.mem);
1397 g_free(vdev->msix->pending);
1402 * Resource setup
1404 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1406 int i;
1408 for (i = 0; i < PCI_ROM_SLOT; i++) {
1409 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1413 static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
1415 VFIOBAR *bar = &vdev->bars[nr];
1417 uint32_t pci_bar;
1418 uint8_t type;
1419 int ret;
1421 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1422 if (!bar->region.size) {
1423 return;
1426 /* Determine what type of BAR this is for registration */
1427 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1428 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1429 if (ret != sizeof(pci_bar)) {
1430 error_report("vfio: Failed to read BAR %d (%m)", nr);
1431 return;
1434 pci_bar = le32_to_cpu(pci_bar);
1435 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1436 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1437 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1438 ~PCI_BASE_ADDRESS_MEM_MASK);
1440 if (vfio_region_mmap(&bar->region)) {
1441 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1442 vdev->vbasedev.name, nr);
1445 pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
1448 static void vfio_bars_setup(VFIOPCIDevice *vdev)
1450 int i;
1452 for (i = 0; i < PCI_ROM_SLOT; i++) {
1453 vfio_bar_setup(vdev, i);
1457 static void vfio_bars_exit(VFIOPCIDevice *vdev)
1459 int i;
1461 for (i = 0; i < PCI_ROM_SLOT; i++) {
1462 vfio_bar_quirk_exit(vdev, i);
1463 vfio_region_exit(&vdev->bars[i].region);
1466 if (vdev->vga) {
1467 pci_unregister_vga(&vdev->pdev);
1468 vfio_vga_quirk_exit(vdev);
1472 static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1474 int i;
1476 for (i = 0; i < PCI_ROM_SLOT; i++) {
1477 vfio_bar_quirk_finalize(vdev, i);
1478 vfio_region_finalize(&vdev->bars[i].region);
1481 if (vdev->vga) {
1482 vfio_vga_quirk_finalize(vdev);
1483 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1484 object_unparent(OBJECT(&vdev->vga->region[i].mem));
1486 g_free(vdev->vga);
1491 * General setup
1493 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1495 uint8_t tmp;
1496 uint16_t next = PCI_CONFIG_SPACE_SIZE;
1498 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1499 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1500 if (tmp > pos && tmp < next) {
1501 next = tmp;
1505 return next - pos;
1509 static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1511 uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1513 for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1514 tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1515 if (tmp > pos && tmp < next) {
1516 next = tmp;
1520 return next - pos;
1523 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1525 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1528 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1529 uint16_t val, uint16_t mask)
1531 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1532 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1533 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1536 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1538 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1541 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1542 uint32_t val, uint32_t mask)
1544 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1545 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1546 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1549 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
1551 uint16_t flags;
1552 uint8_t type;
1554 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1555 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1557 if (type != PCI_EXP_TYPE_ENDPOINT &&
1558 type != PCI_EXP_TYPE_LEG_END &&
1559 type != PCI_EXP_TYPE_RC_END) {
1561 error_report("vfio: Assignment of PCIe type 0x%x "
1562 "devices is not currently supported", type);
1563 return -EINVAL;
1566 if (!pci_bus_is_express(vdev->pdev.bus)) {
1567 PCIBus *bus = vdev->pdev.bus;
1568 PCIDevice *bridge;
1571 * Traditionally PCI device assignment exposes the PCIe capability
1572 * as-is on non-express buses. The reason being that some drivers
1573 * simply assume that it's there, for example tg3. However when
1574 * we're running on a native PCIe machine type, like Q35, we need
1575 * to hide the PCIe capability. The reason for this is twofold;
1576 * first Windows guests get a Code 10 error when the PCIe capability
1577 * is exposed in this configuration. Therefore express devices won't
1578 * work at all unless they're attached to express buses in the VM.
1579 * Second, a native PCIe machine introduces the possibility of fine
1580 * granularity IOMMUs supporting both translation and isolation.
1581 * Guest code to discover the IOMMU visibility of a device, such as
1582 * IOMMU grouping code on Linux, is very aware of device types and
1583 * valid transitions between bus types. An express device on a non-
1584 * express bus is not a valid combination on bare metal systems.
1586 * Drivers that require a PCIe capability to make the device
1587 * functional are simply going to need to have their devices placed
1588 * on a PCIe bus in the VM.
1590 while (!pci_bus_is_root(bus)) {
1591 bridge = pci_bridge_get_device(bus);
1592 bus = bridge->bus;
1595 if (pci_bus_is_express(bus)) {
1596 return 0;
1599 } else if (pci_bus_is_root(vdev->pdev.bus)) {
1601 * On a Root Complex bus Endpoints become Root Complex Integrated
1602 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1604 if (type == PCI_EXP_TYPE_ENDPOINT) {
1605 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1606 PCI_EXP_TYPE_RC_END << 4,
1607 PCI_EXP_FLAGS_TYPE);
1609 /* Link Capabilities, Status, and Control goes away */
1610 if (size > PCI_EXP_LNKCTL) {
1611 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1612 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1613 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1615 #ifndef PCI_EXP_LNKCAP2
1616 #define PCI_EXP_LNKCAP2 44
1617 #endif
1618 #ifndef PCI_EXP_LNKSTA2
1619 #define PCI_EXP_LNKSTA2 50
1620 #endif
1621 /* Link 2 Capabilities, Status, and Control goes away */
1622 if (size > PCI_EXP_LNKCAP2) {
1623 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1624 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1625 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1629 } else if (type == PCI_EXP_TYPE_LEG_END) {
1631 * Legacy endpoints don't belong on the root complex. Windows
1632 * seems to be happier with devices if we skip the capability.
1634 return 0;
1637 } else {
1639 * Convert Root Complex Integrated Endpoints to regular endpoints.
1640 * These devices don't support LNK/LNK2 capabilities, so make them up.
1642 if (type == PCI_EXP_TYPE_RC_END) {
1643 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1644 PCI_EXP_TYPE_ENDPOINT << 4,
1645 PCI_EXP_FLAGS_TYPE);
1646 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1647 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1648 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1651 /* Mark the Link Status bits as emulated to allow virtual negotiation */
1652 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1653 pci_get_word(vdev->pdev.config + pos +
1654 PCI_EXP_LNKSTA),
1655 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1658 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
1659 if (pos >= 0) {
1660 vdev->pdev.exp.exp_cap = pos;
1663 return pos;
1666 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1668 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1670 if (cap & PCI_EXP_DEVCAP_FLR) {
1671 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1672 vdev->has_flr = true;
1676 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1678 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1680 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1681 trace_vfio_check_pm_reset(vdev->vbasedev.name);
1682 vdev->has_pm_reset = true;
1686 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1688 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1690 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1691 trace_vfio_check_af_flr(vdev->vbasedev.name);
1692 vdev->has_flr = true;
1696 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
1698 PCIDevice *pdev = &vdev->pdev;
1699 uint8_t cap_id, next, size;
1700 int ret;
1702 cap_id = pdev->config[pos];
1703 next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1706 * If it becomes important to configure capabilities to their actual
1707 * size, use this as the default when it's something we don't recognize.
1708 * Since QEMU doesn't actually handle many of the config accesses,
1709 * exact size doesn't seem worthwhile.
1711 size = vfio_std_cap_max_size(pdev, pos);
1714 * pci_add_capability always inserts the new capability at the head
1715 * of the chain. Therefore to end up with a chain that matches the
1716 * physical device, we insert from the end by making this recursive.
1717 * This is also why we pre-calculate size above as cached config space
1718 * will be changed as we unwind the stack.
1720 if (next) {
1721 ret = vfio_add_std_cap(vdev, next);
1722 if (ret) {
1723 return ret;
1725 } else {
1726 /* Begin the rebuild, use QEMU emulated list bits */
1727 pdev->config[PCI_CAPABILITY_LIST] = 0;
1728 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1729 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1732 /* Use emulated next pointer to allow dropping caps */
1733 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1735 switch (cap_id) {
1736 case PCI_CAP_ID_MSI:
1737 ret = vfio_msi_setup(vdev, pos);
1738 break;
1739 case PCI_CAP_ID_EXP:
1740 vfio_check_pcie_flr(vdev, pos);
1741 ret = vfio_setup_pcie_cap(vdev, pos, size);
1742 break;
1743 case PCI_CAP_ID_MSIX:
1744 ret = vfio_msix_setup(vdev, pos);
1745 break;
1746 case PCI_CAP_ID_PM:
1747 vfio_check_pm_reset(vdev, pos);
1748 vdev->pm_cap = pos;
1749 ret = pci_add_capability(pdev, cap_id, pos, size);
1750 break;
1751 case PCI_CAP_ID_AF:
1752 vfio_check_af_flr(vdev, pos);
1753 ret = pci_add_capability(pdev, cap_id, pos, size);
1754 break;
1755 default:
1756 ret = pci_add_capability(pdev, cap_id, pos, size);
1757 break;
1760 if (ret < 0) {
1761 error_report("vfio: %s Error adding PCI capability "
1762 "0x%x[0x%x]@0x%x: %d", vdev->vbasedev.name,
1763 cap_id, size, pos, ret);
1764 return ret;
1767 return 0;
1770 static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
1772 PCIDevice *pdev = &vdev->pdev;
1773 uint32_t header;
1774 uint16_t cap_id, next, size;
1775 uint8_t cap_ver;
1776 uint8_t *config;
1778 /* Only add extended caps if we have them and the guest can see them */
1779 if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
1780 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
1781 return 0;
1785 * pcie_add_capability always inserts the new capability at the tail
1786 * of the chain. Therefore to end up with a chain that matches the
1787 * physical device, we cache the config space to avoid overwriting
1788 * the original config space when we parse the extended capabilities.
1790 config = g_memdup(pdev->config, vdev->config_size);
1793 * Extended capabilities are chained with each pointing to the next, so we
1794 * can drop anything other than the head of the chain simply by modifying
1795 * the previous next pointer. For the head of the chain, we can modify the
1796 * capability ID to something that cannot match a valid capability. ID
1797 * 0 is reserved for this since absence of capabilities is indicated by
1798 * 0 for the ID, version, AND next pointer. However, pcie_add_capability()
1799 * uses ID 0 as reserved for list management and will incorrectly match and
1800 * assert if we attempt to pre-load the head of the chain with with this
1801 * ID. Use ID 0xFFFF temporarily since it is also seems to be reserved in
1802 * part for identifying absence of capabilities in a root complex register
1803 * block. If the ID still exists after adding capabilities, switch back to
1804 * zero. We'll mark this entire first dword as emulated for this purpose.
1806 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
1807 PCI_EXT_CAP(0xFFFF, 0, 0));
1808 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
1809 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
1811 for (next = PCI_CONFIG_SPACE_SIZE; next;
1812 next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
1813 header = pci_get_long(config + next);
1814 cap_id = PCI_EXT_CAP_ID(header);
1815 cap_ver = PCI_EXT_CAP_VER(header);
1818 * If it becomes important to configure extended capabilities to their
1819 * actual size, use this as the default when it's something we don't
1820 * recognize. Since QEMU doesn't actually handle many of the config
1821 * accesses, exact size doesn't seem worthwhile.
1823 size = vfio_ext_cap_max_size(config, next);
1825 /* Use emulated next pointer to allow dropping extended caps */
1826 pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
1827 PCI_EXT_CAP_NEXT_MASK);
1829 switch (cap_id) {
1830 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
1831 case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
1832 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
1833 break;
1834 default:
1835 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
1840 /* Cleanup chain head ID if necessary */
1841 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
1842 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
1845 g_free(config);
1846 return 0;
1849 static int vfio_add_capabilities(VFIOPCIDevice *vdev)
1851 PCIDevice *pdev = &vdev->pdev;
1852 int ret;
1854 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
1855 !pdev->config[PCI_CAPABILITY_LIST]) {
1856 return 0; /* Nothing to add */
1859 ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
1860 if (ret) {
1861 return ret;
1864 return vfio_add_ext_cap(vdev);
1867 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1869 PCIDevice *pdev = &vdev->pdev;
1870 uint16_t cmd;
1872 vfio_disable_interrupts(vdev);
1874 /* Make sure the device is in D0 */
1875 if (vdev->pm_cap) {
1876 uint16_t pmcsr;
1877 uint8_t state;
1879 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1880 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1881 if (state) {
1882 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1883 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
1884 /* vfio handles the necessary delay here */
1885 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1886 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1887 if (state) {
1888 error_report("vfio: Unable to power on device, stuck in D%d",
1889 state);
1895 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
1896 * Also put INTx Disable in known state.
1898 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
1899 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1900 PCI_COMMAND_INTX_DISABLE);
1901 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
1904 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
1906 vfio_intx_enable(vdev);
1909 static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
1911 char tmp[13];
1913 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
1914 addr->bus, addr->slot, addr->function);
1916 return (strcmp(tmp, name) == 0);
1919 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
1921 VFIOGroup *group;
1922 struct vfio_pci_hot_reset_info *info;
1923 struct vfio_pci_dependent_device *devices;
1924 struct vfio_pci_hot_reset *reset;
1925 int32_t *fds;
1926 int ret, i, count;
1927 bool multi = false;
1929 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
1931 vfio_pci_pre_reset(vdev);
1932 vdev->vbasedev.needs_reset = false;
1934 info = g_malloc0(sizeof(*info));
1935 info->argsz = sizeof(*info);
1937 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1938 if (ret && errno != ENOSPC) {
1939 ret = -errno;
1940 if (!vdev->has_pm_reset) {
1941 error_report("vfio: Cannot reset device %s, "
1942 "no available reset mechanism.", vdev->vbasedev.name);
1944 goto out_single;
1947 count = info->count;
1948 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
1949 info->argsz = sizeof(*info) + (count * sizeof(*devices));
1950 devices = &info->devices[0];
1952 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1953 if (ret) {
1954 ret = -errno;
1955 error_report("vfio: hot reset info failed: %m");
1956 goto out_single;
1959 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
1961 /* Verify that we have all the groups required */
1962 for (i = 0; i < info->count; i++) {
1963 PCIHostDeviceAddress host;
1964 VFIOPCIDevice *tmp;
1965 VFIODevice *vbasedev_iter;
1967 host.domain = devices[i].segment;
1968 host.bus = devices[i].bus;
1969 host.slot = PCI_SLOT(devices[i].devfn);
1970 host.function = PCI_FUNC(devices[i].devfn);
1972 trace_vfio_pci_hot_reset_dep_devices(host.domain,
1973 host.bus, host.slot, host.function, devices[i].group_id);
1975 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1976 continue;
1979 QLIST_FOREACH(group, &vfio_group_list, next) {
1980 if (group->groupid == devices[i].group_id) {
1981 break;
1985 if (!group) {
1986 if (!vdev->has_pm_reset) {
1987 error_report("vfio: Cannot reset device %s, "
1988 "depends on group %d which is not owned.",
1989 vdev->vbasedev.name, devices[i].group_id);
1991 ret = -EPERM;
1992 goto out;
1995 /* Prep dependent devices for reset and clear our marker. */
1996 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1997 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1998 continue;
2000 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2001 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2002 if (single) {
2003 ret = -EINVAL;
2004 goto out_single;
2006 vfio_pci_pre_reset(tmp);
2007 tmp->vbasedev.needs_reset = false;
2008 multi = true;
2009 break;
2014 if (!single && !multi) {
2015 ret = -EINVAL;
2016 goto out_single;
2019 /* Determine how many group fds need to be passed */
2020 count = 0;
2021 QLIST_FOREACH(group, &vfio_group_list, next) {
2022 for (i = 0; i < info->count; i++) {
2023 if (group->groupid == devices[i].group_id) {
2024 count++;
2025 break;
2030 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2031 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2032 fds = &reset->group_fds[0];
2034 /* Fill in group fds */
2035 QLIST_FOREACH(group, &vfio_group_list, next) {
2036 for (i = 0; i < info->count; i++) {
2037 if (group->groupid == devices[i].group_id) {
2038 fds[reset->count++] = group->fd;
2039 break;
2044 /* Bus reset! */
2045 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2046 g_free(reset);
2048 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
2049 ret ? "%m" : "Success");
2051 out:
2052 /* Re-enable INTx on affected devices */
2053 for (i = 0; i < info->count; i++) {
2054 PCIHostDeviceAddress host;
2055 VFIOPCIDevice *tmp;
2056 VFIODevice *vbasedev_iter;
2058 host.domain = devices[i].segment;
2059 host.bus = devices[i].bus;
2060 host.slot = PCI_SLOT(devices[i].devfn);
2061 host.function = PCI_FUNC(devices[i].devfn);
2063 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2064 continue;
2067 QLIST_FOREACH(group, &vfio_group_list, next) {
2068 if (group->groupid == devices[i].group_id) {
2069 break;
2073 if (!group) {
2074 break;
2077 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2078 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2079 continue;
2081 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2082 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2083 vfio_pci_post_reset(tmp);
2084 break;
2088 out_single:
2089 vfio_pci_post_reset(vdev);
2090 g_free(info);
2092 return ret;
2096 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2097 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2098 * of doing hot resets when there is only a single device per bus. The in-use
2099 * here refers to how many VFIODevices are affected. A hot reset that affects
2100 * multiple devices, but only a single in-use device, means that we can call
2101 * it from our bus ->reset() callback since the extent is effectively a single
2102 * device. This allows us to make use of it in the hotplug path. When there
2103 * are multiple in-use devices, we can only trigger the hot reset during a
2104 * system reset and thus from our reset handler. We separate _one vs _multi
2105 * here so that we don't overlap and do a double reset on the system reset
2106 * path where both our reset handler and ->reset() callback are used. Calling
2107 * _one() will only do a hot reset for the one in-use devices case, calling
2108 * _multi() will do nothing if a _one() would have been sufficient.
2110 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2112 return vfio_pci_hot_reset(vdev, true);
2115 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2117 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2118 return vfio_pci_hot_reset(vdev, false);
2121 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2123 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2124 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2125 vbasedev->needs_reset = true;
2129 static VFIODeviceOps vfio_pci_ops = {
2130 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2131 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2132 .vfio_eoi = vfio_intx_eoi,
2135 int vfio_populate_vga(VFIOPCIDevice *vdev)
2137 VFIODevice *vbasedev = &vdev->vbasedev;
2138 struct vfio_region_info *reg_info;
2139 int ret;
2141 ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2142 if (ret) {
2143 return ret;
2146 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2147 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2148 reg_info->size < 0xbffff + 1) {
2149 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2150 (unsigned long)reg_info->flags,
2151 (unsigned long)reg_info->size);
2152 g_free(reg_info);
2153 return -EINVAL;
2156 vdev->vga = g_new0(VFIOVGA, 1);
2158 vdev->vga->fd_offset = reg_info->offset;
2159 vdev->vga->fd = vdev->vbasedev.fd;
2161 g_free(reg_info);
2163 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2164 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2165 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2167 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2168 OBJECT(vdev), &vfio_vga_ops,
2169 &vdev->vga->region[QEMU_PCI_VGA_MEM],
2170 "vfio-vga-mmio@0xa0000",
2171 QEMU_PCI_VGA_MEM_SIZE);
2173 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2174 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2175 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2177 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2178 OBJECT(vdev), &vfio_vga_ops,
2179 &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2180 "vfio-vga-io@0x3b0",
2181 QEMU_PCI_VGA_IO_LO_SIZE);
2183 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2184 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2185 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2187 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2188 OBJECT(vdev), &vfio_vga_ops,
2189 &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2190 "vfio-vga-io@0x3c0",
2191 QEMU_PCI_VGA_IO_HI_SIZE);
2193 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2194 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2195 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2197 return 0;
2200 static int vfio_populate_device(VFIOPCIDevice *vdev)
2202 VFIODevice *vbasedev = &vdev->vbasedev;
2203 struct vfio_region_info *reg_info;
2204 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
2205 int i, ret = -1;
2207 /* Sanity check device */
2208 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2209 error_report("vfio: Um, this isn't a PCI device");
2210 goto error;
2213 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2214 error_report("vfio: unexpected number of io regions %u",
2215 vbasedev->num_regions);
2216 goto error;
2219 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2220 error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
2221 goto error;
2224 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2225 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2227 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2228 &vdev->bars[i].region, i, name);
2229 g_free(name);
2231 if (ret) {
2232 error_report("vfio: Error getting region %d info: %m", i);
2233 goto error;
2236 QLIST_INIT(&vdev->bars[i].quirks);
2239 ret = vfio_get_region_info(vbasedev,
2240 VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2241 if (ret) {
2242 error_report("vfio: Error getting config info: %m");
2243 goto error;
2246 trace_vfio_populate_device_config(vdev->vbasedev.name,
2247 (unsigned long)reg_info->size,
2248 (unsigned long)reg_info->offset,
2249 (unsigned long)reg_info->flags);
2251 vdev->config_size = reg_info->size;
2252 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2253 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2255 vdev->config_offset = reg_info->offset;
2257 g_free(reg_info);
2259 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2260 ret = vfio_populate_vga(vdev);
2261 if (ret) {
2262 error_report(
2263 "vfio: Device does not support requested feature x-vga");
2264 goto error;
2268 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2270 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2271 if (ret) {
2272 /* This can fail for an old kernel or legacy PCI dev */
2273 trace_vfio_populate_device_get_irq_info_failure();
2274 ret = 0;
2275 } else if (irq_info.count == 1) {
2276 vdev->pci_aer = true;
2277 } else {
2278 error_report("vfio: %s "
2279 "Could not enable error recovery for the device",
2280 vbasedev->name);
2283 error:
2284 return ret;
2287 static void vfio_put_device(VFIOPCIDevice *vdev)
2289 g_free(vdev->vbasedev.name);
2290 g_free(vdev->msix);
2292 vfio_put_base_device(&vdev->vbasedev);
2295 static void vfio_err_notifier_handler(void *opaque)
2297 VFIOPCIDevice *vdev = opaque;
2299 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2300 return;
2304 * TBD. Retrieve the error details and decide what action
2305 * needs to be taken. One of the actions could be to pass
2306 * the error to the guest and have the guest driver recover
2307 * from the error. This requires that PCIe capabilities be
2308 * exposed to the guest. For now, we just terminate the
2309 * guest to contain the error.
2312 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2314 vm_stop(RUN_STATE_INTERNAL_ERROR);
2318 * Registers error notifier for devices supporting error recovery.
2319 * If we encounter a failure in this function, we report an error
2320 * and continue after disabling error recovery support for the
2321 * device.
2323 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2325 int ret;
2326 int argsz;
2327 struct vfio_irq_set *irq_set;
2328 int32_t *pfd;
2330 if (!vdev->pci_aer) {
2331 return;
2334 if (event_notifier_init(&vdev->err_notifier, 0)) {
2335 error_report("vfio: Unable to init event notifier for error detection");
2336 vdev->pci_aer = false;
2337 return;
2340 argsz = sizeof(*irq_set) + sizeof(*pfd);
2342 irq_set = g_malloc0(argsz);
2343 irq_set->argsz = argsz;
2344 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2345 VFIO_IRQ_SET_ACTION_TRIGGER;
2346 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2347 irq_set->start = 0;
2348 irq_set->count = 1;
2349 pfd = (int32_t *)&irq_set->data;
2351 *pfd = event_notifier_get_fd(&vdev->err_notifier);
2352 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2354 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2355 if (ret) {
2356 error_report("vfio: Failed to set up error notification");
2357 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2358 event_notifier_cleanup(&vdev->err_notifier);
2359 vdev->pci_aer = false;
2361 g_free(irq_set);
2364 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2366 int argsz;
2367 struct vfio_irq_set *irq_set;
2368 int32_t *pfd;
2369 int ret;
2371 if (!vdev->pci_aer) {
2372 return;
2375 argsz = sizeof(*irq_set) + sizeof(*pfd);
2377 irq_set = g_malloc0(argsz);
2378 irq_set->argsz = argsz;
2379 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2380 VFIO_IRQ_SET_ACTION_TRIGGER;
2381 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2382 irq_set->start = 0;
2383 irq_set->count = 1;
2384 pfd = (int32_t *)&irq_set->data;
2385 *pfd = -1;
2387 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2388 if (ret) {
2389 error_report("vfio: Failed to de-assign error fd: %m");
2391 g_free(irq_set);
2392 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2393 NULL, NULL, vdev);
2394 event_notifier_cleanup(&vdev->err_notifier);
2397 static void vfio_req_notifier_handler(void *opaque)
2399 VFIOPCIDevice *vdev = opaque;
2401 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2402 return;
2405 qdev_unplug(&vdev->pdev.qdev, NULL);
2408 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2410 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2411 .index = VFIO_PCI_REQ_IRQ_INDEX };
2412 int argsz;
2413 struct vfio_irq_set *irq_set;
2414 int32_t *pfd;
2416 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2417 return;
2420 if (ioctl(vdev->vbasedev.fd,
2421 VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2422 return;
2425 if (event_notifier_init(&vdev->req_notifier, 0)) {
2426 error_report("vfio: Unable to init event notifier for device request");
2427 return;
2430 argsz = sizeof(*irq_set) + sizeof(*pfd);
2432 irq_set = g_malloc0(argsz);
2433 irq_set->argsz = argsz;
2434 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2435 VFIO_IRQ_SET_ACTION_TRIGGER;
2436 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2437 irq_set->start = 0;
2438 irq_set->count = 1;
2439 pfd = (int32_t *)&irq_set->data;
2441 *pfd = event_notifier_get_fd(&vdev->req_notifier);
2442 qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2444 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2445 error_report("vfio: Failed to set up device request notification");
2446 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2447 event_notifier_cleanup(&vdev->req_notifier);
2448 } else {
2449 vdev->req_enabled = true;
2452 g_free(irq_set);
2455 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2457 int argsz;
2458 struct vfio_irq_set *irq_set;
2459 int32_t *pfd;
2461 if (!vdev->req_enabled) {
2462 return;
2465 argsz = sizeof(*irq_set) + sizeof(*pfd);
2467 irq_set = g_malloc0(argsz);
2468 irq_set->argsz = argsz;
2469 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2470 VFIO_IRQ_SET_ACTION_TRIGGER;
2471 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2472 irq_set->start = 0;
2473 irq_set->count = 1;
2474 pfd = (int32_t *)&irq_set->data;
2475 *pfd = -1;
2477 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2478 error_report("vfio: Failed to de-assign device request fd: %m");
2480 g_free(irq_set);
2481 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2482 NULL, NULL, vdev);
2483 event_notifier_cleanup(&vdev->req_notifier);
2485 vdev->req_enabled = false;
2488 static int vfio_initfn(PCIDevice *pdev)
2490 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2491 VFIODevice *vbasedev_iter;
2492 VFIOGroup *group;
2493 char *tmp, group_path[PATH_MAX], *group_name;
2494 ssize_t len;
2495 struct stat st;
2496 int groupid;
2497 int i, ret;
2499 if (!vdev->vbasedev.sysfsdev) {
2500 vdev->vbasedev.sysfsdev =
2501 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2502 vdev->host.domain, vdev->host.bus,
2503 vdev->host.slot, vdev->host.function);
2506 if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
2507 error_report("vfio: error: no such host device: %s",
2508 vdev->vbasedev.sysfsdev);
2509 return -errno;
2512 vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2513 vdev->vbasedev.ops = &vfio_pci_ops;
2514 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2516 tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2517 len = readlink(tmp, group_path, sizeof(group_path));
2518 g_free(tmp);
2520 if (len <= 0 || len >= sizeof(group_path)) {
2521 error_report("vfio: error no iommu_group for device");
2522 return len < 0 ? -errno : -ENAMETOOLONG;
2525 group_path[len] = 0;
2527 group_name = basename(group_path);
2528 if (sscanf(group_name, "%d", &groupid) != 1) {
2529 error_report("vfio: error reading %s: %m", group_path);
2530 return -errno;
2533 trace_vfio_initfn(vdev->vbasedev.name, groupid);
2535 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
2536 if (!group) {
2537 error_report("vfio: failed to get group %d", groupid);
2538 return -ENOENT;
2541 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2542 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2543 error_report("vfio: error: device %s is already attached",
2544 vdev->vbasedev.name);
2545 vfio_put_group(group);
2546 return -EBUSY;
2550 ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev);
2551 if (ret) {
2552 error_report("vfio: failed to get device %s", vdev->vbasedev.name);
2553 vfio_put_group(group);
2554 return ret;
2557 ret = vfio_populate_device(vdev);
2558 if (ret) {
2559 return ret;
2562 /* Get a copy of config space */
2563 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2564 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2565 vdev->config_offset);
2566 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2567 ret = ret < 0 ? -errno : -EFAULT;
2568 error_report("vfio: Failed to read device config space");
2569 return ret;
2572 /* vfio emulates a lot for us, but some bits need extra love */
2573 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2575 /* QEMU can choose to expose the ROM or not */
2576 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2579 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2580 * device ID is managed by the vendor and need only be a 16-bit value.
2581 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2583 if (vdev->vendor_id != PCI_ANY_ID) {
2584 if (vdev->vendor_id >= 0xffff) {
2585 error_report("vfio: Invalid PCI vendor ID provided");
2586 return -EINVAL;
2588 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2589 trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2590 } else {
2591 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2594 if (vdev->device_id != PCI_ANY_ID) {
2595 if (vdev->device_id > 0xffff) {
2596 error_report("vfio: Invalid PCI device ID provided");
2597 return -EINVAL;
2599 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2600 trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2601 } else {
2602 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2605 if (vdev->sub_vendor_id != PCI_ANY_ID) {
2606 if (vdev->sub_vendor_id > 0xffff) {
2607 error_report("vfio: Invalid PCI subsystem vendor ID provided");
2608 return -EINVAL;
2610 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2611 vdev->sub_vendor_id, ~0);
2612 trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2613 vdev->sub_vendor_id);
2616 if (vdev->sub_device_id != PCI_ANY_ID) {
2617 if (vdev->sub_device_id > 0xffff) {
2618 error_report("vfio: Invalid PCI subsystem device ID provided");
2619 return -EINVAL;
2621 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2622 trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2623 vdev->sub_device_id);
2626 /* QEMU can change multi-function devices to single function, or reverse */
2627 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2628 PCI_HEADER_TYPE_MULTI_FUNCTION;
2630 /* Restore or clear multifunction, this is always controlled by QEMU */
2631 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2632 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2633 } else {
2634 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2638 * Clear host resource mapping info. If we choose not to register a
2639 * BAR, such as might be the case with the option ROM, we can get
2640 * confusing, unwritable, residual addresses from the host here.
2642 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2643 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2645 vfio_pci_size_rom(vdev);
2647 ret = vfio_msix_early_setup(vdev);
2648 if (ret) {
2649 return ret;
2652 vfio_bars_setup(vdev);
2654 ret = vfio_add_capabilities(vdev);
2655 if (ret) {
2656 goto out_teardown;
2659 if (vdev->vga) {
2660 vfio_vga_quirk_setup(vdev);
2663 for (i = 0; i < PCI_ROM_SLOT; i++) {
2664 vfio_bar_quirk_setup(vdev, i);
2667 if (!vdev->igd_opregion &&
2668 vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2669 struct vfio_region_info *opregion;
2671 if (vdev->pdev.qdev.hotplugged) {
2672 error_report("Cannot support IGD OpRegion feature on hotplugged "
2673 "device %s", vdev->vbasedev.name);
2674 ret = -EINVAL;
2675 goto out_teardown;
2678 ret = vfio_get_dev_region_info(&vdev->vbasedev,
2679 VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
2680 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
2681 if (ret) {
2682 error_report("Device %s does not support requested IGD OpRegion "
2683 "feature", vdev->vbasedev.name);
2684 goto out_teardown;
2687 ret = vfio_pci_igd_opregion_init(vdev, opregion);
2688 g_free(opregion);
2689 if (ret) {
2690 error_report("Device %s IGD OpRegion initialization failed",
2691 vdev->vbasedev.name);
2692 goto out_teardown;
2696 /* QEMU emulates all of MSI & MSIX */
2697 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2698 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2699 MSIX_CAP_LENGTH);
2702 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2703 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2704 vdev->msi_cap_size);
2707 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2708 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2709 vfio_intx_mmap_enable, vdev);
2710 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
2711 ret = vfio_intx_enable(vdev);
2712 if (ret) {
2713 goto out_teardown;
2717 vfio_register_err_notifier(vdev);
2718 vfio_register_req_notifier(vdev);
2719 vfio_setup_resetfn_quirk(vdev);
2721 return 0;
2723 out_teardown:
2724 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2725 vfio_teardown_msi(vdev);
2726 vfio_bars_exit(vdev);
2727 return ret;
2730 static void vfio_instance_finalize(Object *obj)
2732 PCIDevice *pci_dev = PCI_DEVICE(obj);
2733 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
2734 VFIOGroup *group = vdev->vbasedev.group;
2736 vfio_bars_finalize(vdev);
2737 g_free(vdev->emulated_config_bits);
2738 g_free(vdev->rom);
2740 * XXX Leaking igd_opregion is not an oversight, we can't remove the
2741 * fw_cfg entry therefore leaking this allocation seems like the safest
2742 * option.
2744 * g_free(vdev->igd_opregion);
2746 vfio_put_device(vdev);
2747 vfio_put_group(group);
2750 static void vfio_exitfn(PCIDevice *pdev)
2752 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2754 vfio_unregister_req_notifier(vdev);
2755 vfio_unregister_err_notifier(vdev);
2756 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2757 vfio_disable_interrupts(vdev);
2758 if (vdev->intx.mmap_timer) {
2759 timer_free(vdev->intx.mmap_timer);
2761 vfio_teardown_msi(vdev);
2762 vfio_bars_exit(vdev);
2765 static void vfio_pci_reset(DeviceState *dev)
2767 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2768 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2770 trace_vfio_pci_reset(vdev->vbasedev.name);
2772 vfio_pci_pre_reset(vdev);
2774 if (vdev->resetfn && !vdev->resetfn(vdev)) {
2775 goto post_reset;
2778 if (vdev->vbasedev.reset_works &&
2779 (vdev->has_flr || !vdev->has_pm_reset) &&
2780 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2781 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2782 goto post_reset;
2785 /* See if we can do our own bus reset */
2786 if (!vfio_pci_hot_reset_one(vdev)) {
2787 goto post_reset;
2790 /* If nothing else works and the device supports PM reset, use it */
2791 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2792 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2793 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2794 goto post_reset;
2797 post_reset:
2798 vfio_pci_post_reset(vdev);
2801 static void vfio_instance_init(Object *obj)
2803 PCIDevice *pci_dev = PCI_DEVICE(obj);
2804 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2806 device_add_bootindex_property(obj, &vdev->bootindex,
2807 "bootindex", NULL,
2808 &pci_dev->qdev, NULL);
2811 static Property vfio_pci_dev_properties[] = {
2812 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2813 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
2814 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2815 intx.mmap_timeout, 1100),
2816 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2817 VFIO_FEATURE_ENABLE_VGA_BIT, false),
2818 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
2819 VFIO_FEATURE_ENABLE_REQ_BIT, true),
2820 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
2821 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
2822 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2823 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
2824 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
2825 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
2826 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
2827 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
2828 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
2829 sub_vendor_id, PCI_ANY_ID),
2830 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
2831 sub_device_id, PCI_ANY_ID),
2832 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
2834 * TODO - support passed fds... is this necessary?
2835 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
2836 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2838 DEFINE_PROP_END_OF_LIST(),
2841 static const VMStateDescription vfio_pci_vmstate = {
2842 .name = "vfio-pci",
2843 .unmigratable = 1,
2846 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
2848 DeviceClass *dc = DEVICE_CLASS(klass);
2849 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
2851 dc->reset = vfio_pci_reset;
2852 dc->props = vfio_pci_dev_properties;
2853 dc->vmsd = &vfio_pci_vmstate;
2854 dc->desc = "VFIO-based PCI device assignment";
2855 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2856 pdc->init = vfio_initfn;
2857 pdc->exit = vfio_exitfn;
2858 pdc->config_read = vfio_pci_read_config;
2859 pdc->config_write = vfio_pci_write_config;
2860 pdc->is_express = 1; /* We might be */
2863 static const TypeInfo vfio_pci_dev_info = {
2864 .name = "vfio-pci",
2865 .parent = TYPE_PCI_DEVICE,
2866 .instance_size = sizeof(VFIOPCIDevice),
2867 .class_init = vfio_pci_dev_class_init,
2868 .instance_init = vfio_instance_init,
2869 .instance_finalize = vfio_instance_finalize,
2872 static void register_vfio_pci_dev_type(void)
2874 type_register_static(&vfio_pci_dev_info);
2877 type_init(register_vfio_pci_dev_type)