2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
24 #include <linux/kvm.h>
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "hw/vfio/pci.h"
31 #include "exec/address-spaces.h"
32 #include "exec/memory.h"
33 #include "exec/ram_addr.h"
35 #include "qemu/error-report.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/range.h"
38 #include "sysemu/kvm.h"
39 #include "sysemu/reset.h"
40 #include "sysemu/runstate.h"
42 #include "qapi/error.h"
43 #include "migration/migration.h"
44 #include "migration/misc.h"
45 #include "migration/blocker.h"
46 #include "migration/qemu-file.h"
47 #include "sysemu/tpm.h"
49 VFIOGroupList vfio_group_list
=
50 QLIST_HEAD_INITIALIZER(vfio_group_list
);
51 static QLIST_HEAD(, VFIOAddressSpace
) vfio_address_spaces
=
52 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
56 * We have a single VFIO pseudo device per KVM VM. Once created it lives
57 * for the life of the VM. Closing the file descriptor only drops our
58 * reference to it and the device's reference to kvm. Therefore once
59 * initialized, this file descriptor is only released on QEMU exit and
60 * we'll re-use it should another vfio device be attached before then.
62 static int vfio_kvm_device_fd
= -1;
66 * Common VFIO interrupt disable
68 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
70 struct vfio_irq_set irq_set
= {
71 .argsz
= sizeof(irq_set
),
72 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
78 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
81 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
83 struct vfio_irq_set irq_set
= {
84 .argsz
= sizeof(irq_set
),
85 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
91 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
94 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
96 struct vfio_irq_set irq_set
= {
97 .argsz
= sizeof(irq_set
),
98 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
104 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
107 static inline const char *action_to_str(int action
)
110 case VFIO_IRQ_SET_ACTION_MASK
:
112 case VFIO_IRQ_SET_ACTION_UNMASK
:
114 case VFIO_IRQ_SET_ACTION_TRIGGER
:
117 return "UNKNOWN ACTION";
121 static const char *index_to_str(VFIODevice
*vbasedev
, int index
)
123 if (vbasedev
->type
!= VFIO_DEVICE_TYPE_PCI
) {
128 case VFIO_PCI_INTX_IRQ_INDEX
:
130 case VFIO_PCI_MSI_IRQ_INDEX
:
132 case VFIO_PCI_MSIX_IRQ_INDEX
:
134 case VFIO_PCI_ERR_IRQ_INDEX
:
136 case VFIO_PCI_REQ_IRQ_INDEX
:
143 static int vfio_ram_block_discard_disable(VFIOContainer
*container
, bool state
)
145 switch (container
->iommu_type
) {
146 case VFIO_TYPE1v2_IOMMU
:
147 case VFIO_TYPE1_IOMMU
:
149 * We support coordinated discarding of RAM via the RamDiscardManager.
151 return ram_block_uncoordinated_discard_disable(state
);
154 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
155 * RamDiscardManager, however, it is completely untested.
157 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
158 * completely the opposite of managing mapping/pinning dynamically as
159 * required by RamDiscardManager. We would have to special-case sections
160 * with a RamDiscardManager.
162 return ram_block_discard_disable(state
);
166 int vfio_set_irq_signaling(VFIODevice
*vbasedev
, int index
, int subindex
,
167 int action
, int fd
, Error
**errp
)
169 struct vfio_irq_set
*irq_set
;
174 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
176 irq_set
= g_malloc0(argsz
);
177 irq_set
->argsz
= argsz
;
178 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| action
;
179 irq_set
->index
= index
;
180 irq_set
->start
= subindex
;
182 pfd
= (int32_t *)&irq_set
->data
;
185 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
)) {
194 error_setg_errno(errp
, -ret
, "VFIO_DEVICE_SET_IRQS failure");
196 name
= index_to_str(vbasedev
, index
);
198 error_prepend(errp
, "%s-%d: ", name
, subindex
);
200 error_prepend(errp
, "index %d-%d: ", index
, subindex
);
203 "Failed to %s %s eventfd signaling for interrupt ",
204 fd
< 0 ? "tear down" : "set up", action_to_str(action
));
209 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
211 void vfio_region_write(void *opaque
, hwaddr addr
,
212 uint64_t data
, unsigned size
)
214 VFIORegion
*region
= opaque
;
215 VFIODevice
*vbasedev
= region
->vbasedev
;
228 buf
.word
= cpu_to_le16(data
);
231 buf
.dword
= cpu_to_le32(data
);
234 buf
.qword
= cpu_to_le64(data
);
237 hw_error("vfio: unsupported write size, %u bytes", size
);
241 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
242 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
244 __func__
, vbasedev
->name
, region
->nr
,
248 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
251 * A read or write to a BAR always signals an INTx EOI. This will
252 * do nothing if not pending (including not in INTx mode). We assume
253 * that a BAR access is in response to an interrupt and that BAR
254 * accesses will service the interrupt. Unfortunately, we don't know
255 * which access will service the interrupt, so we're potentially
256 * getting quite a few host interrupts per guest interrupt.
258 vbasedev
->ops
->vfio_eoi(vbasedev
);
261 uint64_t vfio_region_read(void *opaque
,
262 hwaddr addr
, unsigned size
)
264 VFIORegion
*region
= opaque
;
265 VFIODevice
*vbasedev
= region
->vbasedev
;
274 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
275 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
276 __func__
, vbasedev
->name
, region
->nr
,
285 data
= le16_to_cpu(buf
.word
);
288 data
= le32_to_cpu(buf
.dword
);
291 data
= le64_to_cpu(buf
.qword
);
294 hw_error("vfio: unsupported read size, %u bytes", size
);
298 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
300 /* Same as write above */
301 vbasedev
->ops
->vfio_eoi(vbasedev
);
306 const MemoryRegionOps vfio_region_ops
= {
307 .read
= vfio_region_read
,
308 .write
= vfio_region_write
,
309 .endianness
= DEVICE_LITTLE_ENDIAN
,
311 .min_access_size
= 1,
312 .max_access_size
= 8,
315 .min_access_size
= 1,
316 .max_access_size
= 8,
321 * Device state interfaces
325 unsigned long *bitmap
;
330 static int vfio_bitmap_alloc(VFIOBitmap
*vbmap
, hwaddr size
)
332 vbmap
->pages
= REAL_HOST_PAGE_ALIGN(size
) / qemu_real_host_page_size();
333 vbmap
->size
= ROUND_UP(vbmap
->pages
, sizeof(__u64
) * BITS_PER_BYTE
) /
335 vbmap
->bitmap
= g_try_malloc0(vbmap
->size
);
336 if (!vbmap
->bitmap
) {
343 static int vfio_get_dirty_bitmap(VFIOContainer
*container
, uint64_t iova
,
344 uint64_t size
, ram_addr_t ram_addr
);
346 bool vfio_mig_active(void)
349 VFIODevice
*vbasedev
;
351 if (QLIST_EMPTY(&vfio_group_list
)) {
355 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
356 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
357 if (vbasedev
->migration_blocker
) {
365 static Error
*multiple_devices_migration_blocker
;
368 * Multiple devices migration is allowed only if all devices support P2P
369 * migration. Single device migration is allowed regardless of P2P migration
372 static bool vfio_multiple_devices_migration_is_supported(void)
375 VFIODevice
*vbasedev
;
376 unsigned int device_num
= 0;
377 bool all_support_p2p
= true;
379 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
380 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
381 if (vbasedev
->migration
) {
384 if (!(vbasedev
->migration
->mig_flags
& VFIO_MIGRATION_P2P
)) {
385 all_support_p2p
= false;
391 return all_support_p2p
|| device_num
<= 1;
394 int vfio_block_multiple_devices_migration(VFIODevice
*vbasedev
, Error
**errp
)
398 if (vfio_multiple_devices_migration_is_supported()) {
402 if (vbasedev
->enable_migration
== ON_OFF_AUTO_ON
) {
403 error_setg(errp
, "Multiple VFIO devices migration is supported only if "
404 "all of them support P2P migration");
408 if (multiple_devices_migration_blocker
) {
412 error_setg(&multiple_devices_migration_blocker
,
413 "Multiple VFIO devices migration is supported only if all of "
414 "them support P2P migration");
415 ret
= migrate_add_blocker(multiple_devices_migration_blocker
, errp
);
417 error_free(multiple_devices_migration_blocker
);
418 multiple_devices_migration_blocker
= NULL
;
424 void vfio_unblock_multiple_devices_migration(void)
426 if (!multiple_devices_migration_blocker
||
427 !vfio_multiple_devices_migration_is_supported()) {
431 migrate_del_blocker(multiple_devices_migration_blocker
);
432 error_free(multiple_devices_migration_blocker
);
433 multiple_devices_migration_blocker
= NULL
;
436 bool vfio_viommu_preset(VFIODevice
*vbasedev
)
438 return vbasedev
->group
->container
->space
->as
!= &address_space_memory
;
441 static void vfio_set_migration_error(int err
)
443 MigrationState
*ms
= migrate_get_current();
445 if (migration_is_setup_or_active(ms
->state
)) {
446 WITH_QEMU_LOCK_GUARD(&ms
->qemu_file_lock
) {
447 if (ms
->to_dst_file
) {
448 qemu_file_set_error(ms
->to_dst_file
, err
);
454 bool vfio_device_state_is_running(VFIODevice
*vbasedev
)
456 VFIOMigration
*migration
= vbasedev
->migration
;
458 return migration
->device_state
== VFIO_DEVICE_STATE_RUNNING
||
459 migration
->device_state
== VFIO_DEVICE_STATE_RUNNING_P2P
;
462 bool vfio_device_state_is_precopy(VFIODevice
*vbasedev
)
464 VFIOMigration
*migration
= vbasedev
->migration
;
466 return migration
->device_state
== VFIO_DEVICE_STATE_PRE_COPY
||
467 migration
->device_state
== VFIO_DEVICE_STATE_PRE_COPY_P2P
;
470 static bool vfio_devices_all_dirty_tracking(VFIOContainer
*container
)
473 VFIODevice
*vbasedev
;
474 MigrationState
*ms
= migrate_get_current();
476 if (ms
->state
!= MIGRATION_STATUS_ACTIVE
&&
477 ms
->state
!= MIGRATION_STATUS_DEVICE
) {
481 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
482 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
483 VFIOMigration
*migration
= vbasedev
->migration
;
489 if (vbasedev
->pre_copy_dirty_page_tracking
== ON_OFF_AUTO_OFF
&&
490 (vfio_device_state_is_running(vbasedev
) ||
491 vfio_device_state_is_precopy(vbasedev
))) {
499 static bool vfio_devices_all_device_dirty_tracking(VFIOContainer
*container
)
502 VFIODevice
*vbasedev
;
504 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
505 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
506 if (!vbasedev
->dirty_pages_supported
) {
516 * Check if all VFIO devices are running and migration is active, which is
517 * essentially equivalent to the migration being in pre-copy phase.
519 static bool vfio_devices_all_running_and_mig_active(VFIOContainer
*container
)
522 VFIODevice
*vbasedev
;
524 if (!migration_is_active(migrate_get_current())) {
528 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
529 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
530 VFIOMigration
*migration
= vbasedev
->migration
;
536 if (vfio_device_state_is_running(vbasedev
) ||
537 vfio_device_state_is_precopy(vbasedev
)) {
547 static int vfio_dma_unmap_bitmap(VFIOContainer
*container
,
548 hwaddr iova
, ram_addr_t size
,
549 IOMMUTLBEntry
*iotlb
)
551 struct vfio_iommu_type1_dma_unmap
*unmap
;
552 struct vfio_bitmap
*bitmap
;
556 ret
= vfio_bitmap_alloc(&vbmap
, size
);
561 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
563 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
566 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
567 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
570 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
571 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
572 * to qemu_real_host_page_size.
574 bitmap
->pgsize
= qemu_real_host_page_size();
575 bitmap
->size
= vbmap
.size
;
576 bitmap
->data
= (__u64
*)vbmap
.bitmap
;
578 if (vbmap
.size
> container
->max_dirty_bitmap_size
) {
579 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
, vbmap
.size
);
584 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
586 cpu_physical_memory_set_dirty_lebitmap(vbmap
.bitmap
,
587 iotlb
->translated_addr
, vbmap
.pages
);
589 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
594 g_free(vbmap
.bitmap
);
600 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
602 static int vfio_dma_unmap(VFIOContainer
*container
,
603 hwaddr iova
, ram_addr_t size
,
604 IOMMUTLBEntry
*iotlb
)
606 struct vfio_iommu_type1_dma_unmap unmap
= {
607 .argsz
= sizeof(unmap
),
612 bool need_dirty_sync
= false;
615 if (iotlb
&& vfio_devices_all_running_and_mig_active(container
)) {
616 if (!vfio_devices_all_device_dirty_tracking(container
) &&
617 container
->dirty_pages_supported
) {
618 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
621 need_dirty_sync
= true;
624 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
626 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
627 * v4.15) where an overflow in its wrap-around check prevents us from
628 * unmapping the last page of the address space. Test for the error
629 * condition and re-try the unmap excluding the last page. The
630 * expectation is that we've never mapped the last page anyway and this
631 * unmap request comes via vIOMMU support which also makes it unlikely
632 * that this page is used. This bug was introduced well after type1 v2
633 * support was introduced, so we shouldn't need to test for v1. A fix
634 * is queued for kernel v5.0 so this workaround can be removed once
635 * affected kernels are sufficiently deprecated.
637 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
638 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
639 trace_vfio_dma_unmap_overflow_workaround();
640 unmap
.size
-= 1ULL << ctz64(container
->pgsizes
);
643 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
647 if (need_dirty_sync
) {
648 ret
= vfio_get_dirty_bitmap(container
, iova
, size
,
649 iotlb
->translated_addr
);
658 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
659 ram_addr_t size
, void *vaddr
, bool readonly
)
661 struct vfio_iommu_type1_dma_map map
= {
662 .argsz
= sizeof(map
),
663 .flags
= VFIO_DMA_MAP_FLAG_READ
,
664 .vaddr
= (__u64
)(uintptr_t)vaddr
,
670 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
674 * Try the mapping, if it fails with EBUSY, unmap the region and try
675 * again. This shouldn't be necessary, but we sometimes see it in
678 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
679 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
, NULL
) == 0 &&
680 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
684 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
688 static void vfio_host_win_add(VFIOContainer
*container
,
689 hwaddr min_iova
, hwaddr max_iova
,
690 uint64_t iova_pgsizes
)
692 VFIOHostDMAWindow
*hostwin
;
694 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
695 if (ranges_overlap(hostwin
->min_iova
,
696 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
698 max_iova
- min_iova
+ 1)) {
699 hw_error("%s: Overlapped IOMMU are not enabled", __func__
);
703 hostwin
= g_malloc0(sizeof(*hostwin
));
705 hostwin
->min_iova
= min_iova
;
706 hostwin
->max_iova
= max_iova
;
707 hostwin
->iova_pgsizes
= iova_pgsizes
;
708 QLIST_INSERT_HEAD(&container
->hostwin_list
, hostwin
, hostwin_next
);
711 static int vfio_host_win_del(VFIOContainer
*container
, hwaddr min_iova
,
714 VFIOHostDMAWindow
*hostwin
;
716 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
717 if (hostwin
->min_iova
== min_iova
&& hostwin
->max_iova
== max_iova
) {
718 QLIST_REMOVE(hostwin
, hostwin_next
);
727 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
729 return (!memory_region_is_ram(section
->mr
) &&
730 !memory_region_is_iommu(section
->mr
)) ||
731 memory_region_is_protected(section
->mr
) ||
733 * Sizing an enabled 64-bit BAR can cause spurious mappings to
734 * addresses in the upper part of the 64-bit address space. These
735 * are never accessed by the CPU and beyond the address width of
736 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
738 section
->offset_within_address_space
& (1ULL << 63);
741 /* Called with rcu_read_lock held. */
742 static bool vfio_get_xlat_addr(IOMMUTLBEntry
*iotlb
, void **vaddr
,
743 ram_addr_t
*ram_addr
, bool *read_only
)
745 bool ret
, mr_has_discard_manager
;
747 ret
= memory_get_xlat_addr(iotlb
, vaddr
, ram_addr
, read_only
,
748 &mr_has_discard_manager
);
749 if (ret
&& mr_has_discard_manager
) {
751 * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
752 * pages will remain pinned inside vfio until unmapped, resulting in a
753 * higher memory consumption than expected. If memory would get
754 * populated again later, there would be an inconsistency between pages
755 * pinned by vfio and pages seen by QEMU. This is the case until
756 * unmapped from the IOMMU (e.g., during device reset).
758 * With malicious guests, we really only care about pinning more memory
759 * than expected. RLIMIT_MEMLOCK set for the user/process can never be
760 * exceeded and can be used to mitigate this problem.
762 warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
763 " RAM (e.g., virtio-mem) works, however, malicious"
764 " guests can trigger pinning of more memory than"
765 " intended via an IOMMU. It's possible to mitigate "
766 " by setting/adjusting RLIMIT_MEMLOCK.");
771 static void vfio_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
773 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
774 VFIOContainer
*container
= giommu
->container
;
775 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
779 trace_vfio_iommu_map_notify(iotlb
->perm
== IOMMU_NONE
? "UNMAP" : "MAP",
780 iova
, iova
+ iotlb
->addr_mask
);
782 if (iotlb
->target_as
!= &address_space_memory
) {
783 error_report("Wrong target AS \"%s\", only system memory is allowed",
784 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
785 vfio_set_migration_error(-EINVAL
);
791 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
794 if (!vfio_get_xlat_addr(iotlb
, &vaddr
, NULL
, &read_only
)) {
798 * vaddr is only valid until rcu_read_unlock(). But after
799 * vfio_dma_map has set up the mapping the pages will be
800 * pinned by the kernel. This makes sure that the RAM backend
801 * of vaddr will always be there, even if the memory object is
802 * destroyed and its backing memory munmap-ed.
804 ret
= vfio_dma_map(container
, iova
,
805 iotlb
->addr_mask
+ 1, vaddr
,
808 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
809 "0x%"HWADDR_PRIx
", %p) = %d (%s)",
811 iotlb
->addr_mask
+ 1, vaddr
, ret
, strerror(-ret
));
814 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1, iotlb
);
816 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
817 "0x%"HWADDR_PRIx
") = %d (%s)",
819 iotlb
->addr_mask
+ 1, ret
, strerror(-ret
));
820 vfio_set_migration_error(ret
);
827 static void vfio_ram_discard_notify_discard(RamDiscardListener
*rdl
,
828 MemoryRegionSection
*section
)
830 VFIORamDiscardListener
*vrdl
= container_of(rdl
, VFIORamDiscardListener
,
832 const hwaddr size
= int128_get64(section
->size
);
833 const hwaddr iova
= section
->offset_within_address_space
;
836 /* Unmap with a single call. */
837 ret
= vfio_dma_unmap(vrdl
->container
, iova
, size
, NULL
);
839 error_report("%s: vfio_dma_unmap() failed: %s", __func__
,
844 static int vfio_ram_discard_notify_populate(RamDiscardListener
*rdl
,
845 MemoryRegionSection
*section
)
847 VFIORamDiscardListener
*vrdl
= container_of(rdl
, VFIORamDiscardListener
,
849 const hwaddr end
= section
->offset_within_region
+
850 int128_get64(section
->size
);
851 hwaddr start
, next
, iova
;
856 * Map in (aligned within memory region) minimum granularity, so we can
857 * unmap in minimum granularity later.
859 for (start
= section
->offset_within_region
; start
< end
; start
= next
) {
860 next
= ROUND_UP(start
+ 1, vrdl
->granularity
);
861 next
= MIN(next
, end
);
863 iova
= start
- section
->offset_within_region
+
864 section
->offset_within_address_space
;
865 vaddr
= memory_region_get_ram_ptr(section
->mr
) + start
;
867 ret
= vfio_dma_map(vrdl
->container
, iova
, next
- start
,
868 vaddr
, section
->readonly
);
871 vfio_ram_discard_notify_discard(rdl
, section
);
878 static void vfio_register_ram_discard_listener(VFIOContainer
*container
,
879 MemoryRegionSection
*section
)
881 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(section
->mr
);
882 VFIORamDiscardListener
*vrdl
;
884 /* Ignore some corner cases not relevant in practice. */
885 g_assert(QEMU_IS_ALIGNED(section
->offset_within_region
, TARGET_PAGE_SIZE
));
886 g_assert(QEMU_IS_ALIGNED(section
->offset_within_address_space
,
888 g_assert(QEMU_IS_ALIGNED(int128_get64(section
->size
), TARGET_PAGE_SIZE
));
890 vrdl
= g_new0(VFIORamDiscardListener
, 1);
891 vrdl
->container
= container
;
892 vrdl
->mr
= section
->mr
;
893 vrdl
->offset_within_address_space
= section
->offset_within_address_space
;
894 vrdl
->size
= int128_get64(section
->size
);
895 vrdl
->granularity
= ram_discard_manager_get_min_granularity(rdm
,
898 g_assert(vrdl
->granularity
&& is_power_of_2(vrdl
->granularity
));
899 g_assert(container
->pgsizes
&&
900 vrdl
->granularity
>= 1ULL << ctz64(container
->pgsizes
));
902 ram_discard_listener_init(&vrdl
->listener
,
903 vfio_ram_discard_notify_populate
,
904 vfio_ram_discard_notify_discard
, true);
905 ram_discard_manager_register_listener(rdm
, &vrdl
->listener
, section
);
906 QLIST_INSERT_HEAD(&container
->vrdl_list
, vrdl
, next
);
909 * Sanity-check if we have a theoretically problematic setup where we could
910 * exceed the maximum number of possible DMA mappings over time. We assume
911 * that each mapped section in the same address space as a RamDiscardManager
912 * section consumes exactly one DMA mapping, with the exception of
913 * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
914 * in the same address space as RamDiscardManager sections.
916 * We assume that each section in the address space consumes one memslot.
917 * We take the number of KVM memory slots as a best guess for the maximum
918 * number of sections in the address space we could have over time,
919 * also consuming DMA mappings.
921 if (container
->dma_max_mappings
) {
922 unsigned int vrdl_count
= 0, vrdl_mappings
= 0, max_memslots
= 512;
926 max_memslots
= kvm_get_max_memslots();
930 QLIST_FOREACH(vrdl
, &container
->vrdl_list
, next
) {
933 start
= QEMU_ALIGN_DOWN(vrdl
->offset_within_address_space
,
935 end
= ROUND_UP(vrdl
->offset_within_address_space
+ vrdl
->size
,
937 vrdl_mappings
+= (end
- start
) / vrdl
->granularity
;
941 if (vrdl_mappings
+ max_memslots
- vrdl_count
>
942 container
->dma_max_mappings
) {
943 warn_report("%s: possibly running out of DMA mappings. E.g., try"
944 " increasing the 'block-size' of virtio-mem devies."
945 " Maximum possible DMA mappings: %d, Maximum possible"
946 " memslots: %d", __func__
, container
->dma_max_mappings
,
952 static void vfio_unregister_ram_discard_listener(VFIOContainer
*container
,
953 MemoryRegionSection
*section
)
955 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(section
->mr
);
956 VFIORamDiscardListener
*vrdl
= NULL
;
958 QLIST_FOREACH(vrdl
, &container
->vrdl_list
, next
) {
959 if (vrdl
->mr
== section
->mr
&&
960 vrdl
->offset_within_address_space
==
961 section
->offset_within_address_space
) {
967 hw_error("vfio: Trying to unregister missing RAM discard listener");
970 ram_discard_manager_unregister_listener(rdm
, &vrdl
->listener
);
971 QLIST_REMOVE(vrdl
, next
);
975 static VFIOHostDMAWindow
*vfio_find_hostwin(VFIOContainer
*container
,
976 hwaddr iova
, hwaddr end
)
978 VFIOHostDMAWindow
*hostwin
;
979 bool hostwin_found
= false;
981 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
982 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
983 hostwin_found
= true;
988 return hostwin_found
? hostwin
: NULL
;
991 static bool vfio_known_safe_misalignment(MemoryRegionSection
*section
)
993 MemoryRegion
*mr
= section
->mr
;
995 if (!TPM_IS_CRB(mr
->owner
)) {
999 /* this is a known safe misaligned region, just trace for debug purpose */
1000 trace_vfio_known_safe_misalignment(memory_region_name(mr
),
1001 section
->offset_within_address_space
,
1002 section
->offset_within_region
,
1003 qemu_real_host_page_size());
1007 static bool vfio_listener_valid_section(MemoryRegionSection
*section
,
1010 if (vfio_listener_skipped_section(section
)) {
1011 trace_vfio_listener_region_skip(name
,
1012 section
->offset_within_address_space
,
1013 section
->offset_within_address_space
+
1014 int128_get64(int128_sub(section
->size
, int128_one())));
1018 if (unlikely((section
->offset_within_address_space
&
1019 ~qemu_real_host_page_mask()) !=
1020 (section
->offset_within_region
& ~qemu_real_host_page_mask()))) {
1021 if (!vfio_known_safe_misalignment(section
)) {
1022 error_report("%s received unaligned region %s iova=0x%"PRIx64
1023 " offset_within_region=0x%"PRIx64
1024 " qemu_real_host_page_size=0x%"PRIxPTR
,
1025 __func__
, memory_region_name(section
->mr
),
1026 section
->offset_within_address_space
,
1027 section
->offset_within_region
,
1028 qemu_real_host_page_size());
1036 static bool vfio_get_section_iova_range(VFIOContainer
*container
,
1037 MemoryRegionSection
*section
,
1038 hwaddr
*out_iova
, hwaddr
*out_end
,
1044 iova
= REAL_HOST_PAGE_ALIGN(section
->offset_within_address_space
);
1045 llend
= int128_make64(section
->offset_within_address_space
);
1046 llend
= int128_add(llend
, section
->size
);
1047 llend
= int128_and(llend
, int128_exts64(qemu_real_host_page_mask()));
1049 if (int128_ge(int128_make64(iova
), llend
)) {
1054 *out_end
= int128_get64(int128_sub(llend
, int128_one()));
1061 static void vfio_listener_region_add(MemoryListener
*listener
,
1062 MemoryRegionSection
*section
)
1064 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1066 Int128 llend
, llsize
;
1069 VFIOHostDMAWindow
*hostwin
;
1072 if (!vfio_listener_valid_section(section
, "region_add")) {
1076 if (!vfio_get_section_iova_range(container
, section
, &iova
, &end
, &llend
)) {
1077 if (memory_region_is_ram_device(section
->mr
)) {
1078 trace_vfio_listener_region_add_no_dma_map(
1079 memory_region_name(section
->mr
),
1080 section
->offset_within_address_space
,
1081 int128_getlo(section
->size
),
1082 qemu_real_host_page_size());
1087 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1090 /* For now intersections are not allowed, we may relax this later */
1091 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
1092 if (ranges_overlap(hostwin
->min_iova
,
1093 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
1094 section
->offset_within_address_space
,
1095 int128_get64(section
->size
))) {
1097 "region [0x%"PRIx64
",0x%"PRIx64
"] overlaps with existing"
1098 "host DMA window [0x%"PRIx64
",0x%"PRIx64
"]",
1099 section
->offset_within_address_space
,
1100 section
->offset_within_address_space
+
1101 int128_get64(section
->size
) - 1,
1102 hostwin
->min_iova
, hostwin
->max_iova
);
1107 ret
= vfio_spapr_create_window(container
, section
, &pgsize
);
1109 error_setg_errno(&err
, -ret
, "Failed to create SPAPR window");
1113 vfio_host_win_add(container
, section
->offset_within_address_space
,
1114 section
->offset_within_address_space
+
1115 int128_get64(section
->size
) - 1, pgsize
);
1117 if (kvm_enabled()) {
1119 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
1120 struct kvm_vfio_spapr_tce param
;
1121 struct kvm_device_attr attr
= {
1122 .group
= KVM_DEV_VFIO_GROUP
,
1123 .attr
= KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE
,
1124 .addr
= (uint64_t)(unsigned long)¶m
,
1127 if (!memory_region_iommu_get_attr(iommu_mr
, IOMMU_ATTR_SPAPR_TCE_FD
,
1129 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
1130 param
.groupfd
= group
->fd
;
1131 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1132 error_report("vfio: failed to setup fd %d "
1133 "for a group with fd %d: %s",
1134 param
.tablefd
, param
.groupfd
,
1138 trace_vfio_spapr_group_attach(param
.groupfd
, param
.tablefd
);
1145 hostwin
= vfio_find_hostwin(container
, iova
, end
);
1147 error_setg(&err
, "Container %p can't map guest IOVA region"
1148 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
, container
, iova
, end
);
1152 memory_region_ref(section
->mr
);
1154 if (memory_region_is_iommu(section
->mr
)) {
1155 VFIOGuestIOMMU
*giommu
;
1156 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
1159 trace_vfio_listener_region_add_iommu(iova
, end
);
1161 * FIXME: For VFIO iommu types which have KVM acceleration to
1162 * avoid bouncing all map/unmaps through qemu this way, this
1163 * would be the right place to wire that up (tell the KVM
1164 * device emulation the VFIO iommu handles to use).
1166 giommu
= g_malloc0(sizeof(*giommu
));
1167 giommu
->iommu_mr
= iommu_mr
;
1168 giommu
->iommu_offset
= section
->offset_within_address_space
-
1169 section
->offset_within_region
;
1170 giommu
->container
= container
;
1171 llend
= int128_add(int128_make64(section
->offset_within_region
),
1173 llend
= int128_sub(llend
, int128_one());
1174 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
1175 MEMTXATTRS_UNSPECIFIED
);
1176 iommu_notifier_init(&giommu
->n
, vfio_iommu_map_notify
,
1177 IOMMU_NOTIFIER_IOTLB_EVENTS
,
1178 section
->offset_within_region
,
1179 int128_get64(llend
),
1182 ret
= memory_region_iommu_set_page_size_mask(giommu
->iommu_mr
,
1190 ret
= memory_region_register_iommu_notifier(section
->mr
, &giommu
->n
,
1196 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
1197 memory_region_iommu_replay(giommu
->iommu_mr
, &giommu
->n
);
1202 /* Here we assume that memory_region_is_ram(section->mr)==true */
1205 * For RAM memory regions with a RamDiscardManager, we only want to map the
1206 * actually populated parts - and update the mapping whenever we're notified
1209 if (memory_region_has_ram_discard_manager(section
->mr
)) {
1210 vfio_register_ram_discard_listener(container
, section
);
1214 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
1215 section
->offset_within_region
+
1216 (iova
- section
->offset_within_address_space
);
1218 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
1220 llsize
= int128_sub(llend
, int128_make64(iova
));
1222 if (memory_region_is_ram_device(section
->mr
)) {
1223 hwaddr pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
1225 if ((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
)) {
1226 trace_vfio_listener_region_add_no_dma_map(
1227 memory_region_name(section
->mr
),
1228 section
->offset_within_address_space
,
1229 int128_getlo(section
->size
),
1235 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
1236 vaddr
, section
->readonly
);
1238 error_setg(&err
, "vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
1239 "0x%"HWADDR_PRIx
", %p) = %d (%s)",
1240 container
, iova
, int128_get64(llsize
), vaddr
, ret
,
1242 if (memory_region_is_ram_device(section
->mr
)) {
1243 /* Allow unexpected mappings not to be fatal for RAM devices */
1244 error_report_err(err
);
1253 if (memory_region_is_ram_device(section
->mr
)) {
1254 error_report("failed to vfio_dma_map. pci p2p may not work");
1258 * On the initfn path, store the first error in the container so we
1259 * can gracefully fail. Runtime, there's not much we can do other
1260 * than throw a hardware error.
1262 if (!container
->initialized
) {
1263 if (!container
->error
) {
1264 error_propagate_prepend(&container
->error
, err
,
1266 memory_region_name(section
->mr
));
1271 error_report_err(err
);
1272 hw_error("vfio: DMA mapping failed, unable to continue");
1276 static void vfio_listener_region_del(MemoryListener
*listener
,
1277 MemoryRegionSection
*section
)
1279 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1281 Int128 llend
, llsize
;
1283 bool try_unmap
= true;
1285 if (!vfio_listener_valid_section(section
, "region_del")) {
1289 if (memory_region_is_iommu(section
->mr
)) {
1290 VFIOGuestIOMMU
*giommu
;
1292 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
1293 if (MEMORY_REGION(giommu
->iommu_mr
) == section
->mr
&&
1294 giommu
->n
.start
== section
->offset_within_region
) {
1295 memory_region_unregister_iommu_notifier(section
->mr
,
1297 QLIST_REMOVE(giommu
, giommu_next
);
1304 * FIXME: We assume the one big unmap below is adequate to
1305 * remove any individual page mappings in the IOMMU which
1306 * might have been copied into VFIO. This works for a page table
1307 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
1308 * That may not be true for all IOMMU types.
1312 if (!vfio_get_section_iova_range(container
, section
, &iova
, &end
, &llend
)) {
1316 llsize
= int128_sub(llend
, int128_make64(iova
));
1318 trace_vfio_listener_region_del(iova
, end
);
1320 if (memory_region_is_ram_device(section
->mr
)) {
1322 VFIOHostDMAWindow
*hostwin
;
1324 hostwin
= vfio_find_hostwin(container
, iova
, end
);
1325 assert(hostwin
); /* or region_add() would have failed */
1327 pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
1328 try_unmap
= !((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
));
1329 } else if (memory_region_has_ram_discard_manager(section
->mr
)) {
1330 vfio_unregister_ram_discard_listener(container
, section
);
1331 /* Unregistering will trigger an unmap. */
1336 if (int128_eq(llsize
, int128_2_64())) {
1337 /* The unmap ioctl doesn't accept a full 64-bit span. */
1338 llsize
= int128_rshift(llsize
, 1);
1339 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
), NULL
);
1341 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
1342 "0x%"HWADDR_PRIx
") = %d (%s)",
1343 container
, iova
, int128_get64(llsize
), ret
,
1346 iova
+= int128_get64(llsize
);
1348 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
), NULL
);
1350 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
1351 "0x%"HWADDR_PRIx
") = %d (%s)",
1352 container
, iova
, int128_get64(llsize
), ret
,
1357 memory_region_unref(section
->mr
);
1359 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1360 vfio_spapr_remove_window(container
,
1361 section
->offset_within_address_space
);
1362 if (vfio_host_win_del(container
,
1363 section
->offset_within_address_space
,
1364 section
->offset_within_address_space
+
1365 int128_get64(section
->size
) - 1) < 0) {
1366 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx
,
1367 __func__
, section
->offset_within_address_space
);
1372 static int vfio_set_dirty_page_tracking(VFIOContainer
*container
, bool start
)
1375 struct vfio_iommu_type1_dirty_bitmap dirty
= {
1376 .argsz
= sizeof(dirty
),
1379 if (!container
->dirty_pages_supported
) {
1384 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
1386 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
1389 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
1392 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
1393 dirty
.flags
, errno
);
1399 typedef struct VFIODirtyRanges
{
1408 typedef struct VFIODirtyRangesListener
{
1409 VFIOContainer
*container
;
1410 VFIODirtyRanges ranges
;
1411 MemoryListener listener
;
1412 } VFIODirtyRangesListener
;
1414 static bool vfio_section_is_vfio_pci(MemoryRegionSection
*section
,
1415 VFIOContainer
*container
)
1417 VFIOPCIDevice
*pcidev
;
1418 VFIODevice
*vbasedev
;
1422 owner
= memory_region_owner(section
->mr
);
1424 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
1425 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1426 if (vbasedev
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1429 pcidev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
1430 if (OBJECT(pcidev
) == owner
) {
1439 static void vfio_dirty_tracking_update(MemoryListener
*listener
,
1440 MemoryRegionSection
*section
)
1442 VFIODirtyRangesListener
*dirty
= container_of(listener
,
1443 VFIODirtyRangesListener
,
1445 VFIODirtyRanges
*range
= &dirty
->ranges
;
1446 hwaddr iova
, end
, *min
, *max
;
1448 if (!vfio_listener_valid_section(section
, "tracking_update") ||
1449 !vfio_get_section_iova_range(dirty
->container
, section
,
1450 &iova
, &end
, NULL
)) {
1455 * The address space passed to the dirty tracker is reduced to three ranges:
1456 * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
1459 * The underlying reports of dirty will query a sub-interval of each of
1462 * The purpose of the three range handling is to handle known cases of big
1463 * holes in the address space, like the x86 AMD 1T hole, and firmware (like
1464 * OVMF) which may relocate the pci-hole64 to the end of the address space.
1465 * The latter would otherwise generate large ranges for tracking, stressing
1466 * the limits of supported hardware. The pci-hole32 will always be below 4G
1467 * (overlapping or not) so it doesn't need special handling and is part of
1470 * The alternative would be an IOVATree but that has a much bigger runtime
1471 * overhead and unnecessary complexity.
1473 if (vfio_section_is_vfio_pci(section
, dirty
->container
) &&
1474 iova
>= UINT32_MAX
) {
1475 min
= &range
->minpci64
;
1476 max
= &range
->maxpci64
;
1478 min
= (end
<= UINT32_MAX
) ? &range
->min32
: &range
->min64
;
1479 max
= (end
<= UINT32_MAX
) ? &range
->max32
: &range
->max64
;
1488 trace_vfio_device_dirty_tracking_update(iova
, end
, *min
, *max
);
1492 static const MemoryListener vfio_dirty_tracking_listener
= {
1493 .name
= "vfio-tracking",
1494 .region_add
= vfio_dirty_tracking_update
,
1497 static void vfio_dirty_tracking_init(VFIOContainer
*container
,
1498 VFIODirtyRanges
*ranges
)
1500 VFIODirtyRangesListener dirty
;
1502 memset(&dirty
, 0, sizeof(dirty
));
1503 dirty
.ranges
.min32
= UINT32_MAX
;
1504 dirty
.ranges
.min64
= UINT64_MAX
;
1505 dirty
.ranges
.minpci64
= UINT64_MAX
;
1506 dirty
.listener
= vfio_dirty_tracking_listener
;
1507 dirty
.container
= container
;
1509 memory_listener_register(&dirty
.listener
,
1510 container
->space
->as
);
1512 *ranges
= dirty
.ranges
;
1515 * The memory listener is synchronous, and used to calculate the range
1516 * to dirty tracking. Unregister it after we are done as we are not
1517 * interested in any follow-up updates.
1519 memory_listener_unregister(&dirty
.listener
);
1522 static void vfio_devices_dma_logging_stop(VFIOContainer
*container
)
1524 uint64_t buf
[DIV_ROUND_UP(sizeof(struct vfio_device_feature
),
1525 sizeof(uint64_t))] = {};
1526 struct vfio_device_feature
*feature
= (struct vfio_device_feature
*)buf
;
1527 VFIODevice
*vbasedev
;
1530 feature
->argsz
= sizeof(buf
);
1531 feature
->flags
= VFIO_DEVICE_FEATURE_SET
|
1532 VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP
;
1534 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
1535 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1536 if (!vbasedev
->dirty_tracking
) {
1540 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_FEATURE
, feature
)) {
1541 warn_report("%s: Failed to stop DMA logging, err %d (%s)",
1542 vbasedev
->name
, -errno
, strerror(errno
));
1544 vbasedev
->dirty_tracking
= false;
1549 static struct vfio_device_feature
*
1550 vfio_device_feature_dma_logging_start_create(VFIOContainer
*container
,
1551 VFIODirtyRanges
*tracking
)
1553 struct vfio_device_feature
*feature
;
1554 size_t feature_size
;
1555 struct vfio_device_feature_dma_logging_control
*control
;
1556 struct vfio_device_feature_dma_logging_range
*ranges
;
1558 feature_size
= sizeof(struct vfio_device_feature
) +
1559 sizeof(struct vfio_device_feature_dma_logging_control
);
1560 feature
= g_try_malloc0(feature_size
);
1565 feature
->argsz
= feature_size
;
1566 feature
->flags
= VFIO_DEVICE_FEATURE_SET
|
1567 VFIO_DEVICE_FEATURE_DMA_LOGGING_START
;
1569 control
= (struct vfio_device_feature_dma_logging_control
*)feature
->data
;
1570 control
->page_size
= qemu_real_host_page_size();
1573 * DMA logging uAPI guarantees to support at least a number of ranges that
1574 * fits into a single host kernel base page.
1576 control
->num_ranges
= !!tracking
->max32
+ !!tracking
->max64
+
1577 !!tracking
->maxpci64
;
1578 ranges
= g_try_new0(struct vfio_device_feature_dma_logging_range
,
1579 control
->num_ranges
);
1587 control
->ranges
= (__u64
)(uintptr_t)ranges
;
1588 if (tracking
->max32
) {
1589 ranges
->iova
= tracking
->min32
;
1590 ranges
->length
= (tracking
->max32
- tracking
->min32
) + 1;
1593 if (tracking
->max64
) {
1594 ranges
->iova
= tracking
->min64
;
1595 ranges
->length
= (tracking
->max64
- tracking
->min64
) + 1;
1598 if (tracking
->maxpci64
) {
1599 ranges
->iova
= tracking
->minpci64
;
1600 ranges
->length
= (tracking
->maxpci64
- tracking
->minpci64
) + 1;
1603 trace_vfio_device_dirty_tracking_start(control
->num_ranges
,
1604 tracking
->min32
, tracking
->max32
,
1605 tracking
->min64
, tracking
->max64
,
1606 tracking
->minpci64
, tracking
->maxpci64
);
1611 static void vfio_device_feature_dma_logging_start_destroy(
1612 struct vfio_device_feature
*feature
)
1614 struct vfio_device_feature_dma_logging_control
*control
=
1615 (struct vfio_device_feature_dma_logging_control
*)feature
->data
;
1616 struct vfio_device_feature_dma_logging_range
*ranges
=
1617 (struct vfio_device_feature_dma_logging_range
*)(uintptr_t)control
->ranges
;
1623 static int vfio_devices_dma_logging_start(VFIOContainer
*container
)
1625 struct vfio_device_feature
*feature
;
1626 VFIODirtyRanges ranges
;
1627 VFIODevice
*vbasedev
;
1631 vfio_dirty_tracking_init(container
, &ranges
);
1632 feature
= vfio_device_feature_dma_logging_start_create(container
,
1638 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
1639 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1640 if (vbasedev
->dirty_tracking
) {
1644 ret
= ioctl(vbasedev
->fd
, VFIO_DEVICE_FEATURE
, feature
);
1647 error_report("%s: Failed to start DMA logging, err %d (%s)",
1648 vbasedev
->name
, ret
, strerror(errno
));
1651 vbasedev
->dirty_tracking
= true;
1657 vfio_devices_dma_logging_stop(container
);
1660 vfio_device_feature_dma_logging_start_destroy(feature
);
1665 static void vfio_listener_log_global_start(MemoryListener
*listener
)
1667 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1670 if (vfio_devices_all_device_dirty_tracking(container
)) {
1671 ret
= vfio_devices_dma_logging_start(container
);
1673 ret
= vfio_set_dirty_page_tracking(container
, true);
1677 error_report("vfio: Could not start dirty page tracking, err: %d (%s)",
1678 ret
, strerror(-ret
));
1679 vfio_set_migration_error(ret
);
1683 static void vfio_listener_log_global_stop(MemoryListener
*listener
)
1685 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1688 if (vfio_devices_all_device_dirty_tracking(container
)) {
1689 vfio_devices_dma_logging_stop(container
);
1691 ret
= vfio_set_dirty_page_tracking(container
, false);
1695 error_report("vfio: Could not stop dirty page tracking, err: %d (%s)",
1696 ret
, strerror(-ret
));
1697 vfio_set_migration_error(ret
);
1701 static int vfio_device_dma_logging_report(VFIODevice
*vbasedev
, hwaddr iova
,
1702 hwaddr size
, void *bitmap
)
1704 uint64_t buf
[DIV_ROUND_UP(sizeof(struct vfio_device_feature
) +
1705 sizeof(struct vfio_device_feature_dma_logging_report
),
1706 sizeof(__u64
))] = {};
1707 struct vfio_device_feature
*feature
= (struct vfio_device_feature
*)buf
;
1708 struct vfio_device_feature_dma_logging_report
*report
=
1709 (struct vfio_device_feature_dma_logging_report
*)feature
->data
;
1711 report
->iova
= iova
;
1712 report
->length
= size
;
1713 report
->page_size
= qemu_real_host_page_size();
1714 report
->bitmap
= (__u64
)(uintptr_t)bitmap
;
1716 feature
->argsz
= sizeof(buf
);
1717 feature
->flags
= VFIO_DEVICE_FEATURE_GET
|
1718 VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT
;
1720 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_FEATURE
, feature
)) {
1727 static int vfio_devices_query_dirty_bitmap(VFIOContainer
*container
,
1728 VFIOBitmap
*vbmap
, hwaddr iova
,
1731 VFIODevice
*vbasedev
;
1735 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
1736 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1737 ret
= vfio_device_dma_logging_report(vbasedev
, iova
, size
,
1740 error_report("%s: Failed to get DMA logging report, iova: "
1741 "0x%" HWADDR_PRIx
", size: 0x%" HWADDR_PRIx
1743 vbasedev
->name
, iova
, size
, ret
, strerror(-ret
));
1753 static int vfio_query_dirty_bitmap(VFIOContainer
*container
, VFIOBitmap
*vbmap
,
1754 hwaddr iova
, hwaddr size
)
1756 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
1757 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
1760 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
1762 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
1763 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
1764 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
1769 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
1770 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
1771 * to qemu_real_host_page_size.
1773 range
->bitmap
.pgsize
= qemu_real_host_page_size();
1774 range
->bitmap
.size
= vbmap
->size
;
1775 range
->bitmap
.data
= (__u64
*)vbmap
->bitmap
;
1777 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
1780 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
1781 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
1782 (uint64_t)range
->size
, errno
);
1790 static int vfio_get_dirty_bitmap(VFIOContainer
*container
, uint64_t iova
,
1791 uint64_t size
, ram_addr_t ram_addr
)
1793 bool all_device_dirty_tracking
=
1794 vfio_devices_all_device_dirty_tracking(container
);
1795 uint64_t dirty_pages
;
1799 if (!container
->dirty_pages_supported
&& !all_device_dirty_tracking
) {
1800 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1801 tcg_enabled() ? DIRTY_CLIENTS_ALL
:
1802 DIRTY_CLIENTS_NOCODE
);
1806 ret
= vfio_bitmap_alloc(&vbmap
, size
);
1811 if (all_device_dirty_tracking
) {
1812 ret
= vfio_devices_query_dirty_bitmap(container
, &vbmap
, iova
, size
);
1814 ret
= vfio_query_dirty_bitmap(container
, &vbmap
, iova
, size
);
1821 dirty_pages
= cpu_physical_memory_set_dirty_lebitmap(vbmap
.bitmap
, ram_addr
,
1824 trace_vfio_get_dirty_bitmap(container
->fd
, iova
, size
, vbmap
.size
,
1825 ram_addr
, dirty_pages
);
1827 g_free(vbmap
.bitmap
);
1834 VFIOGuestIOMMU
*giommu
;
1835 } vfio_giommu_dirty_notifier
;
1837 static void vfio_iommu_map_dirty_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
1839 vfio_giommu_dirty_notifier
*gdn
= container_of(n
,
1840 vfio_giommu_dirty_notifier
, n
);
1841 VFIOGuestIOMMU
*giommu
= gdn
->giommu
;
1842 VFIOContainer
*container
= giommu
->container
;
1843 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
1844 ram_addr_t translated_addr
;
1847 trace_vfio_iommu_map_dirty_notify(iova
, iova
+ iotlb
->addr_mask
);
1849 if (iotlb
->target_as
!= &address_space_memory
) {
1850 error_report("Wrong target AS \"%s\", only system memory is allowed",
1851 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
1856 if (vfio_get_xlat_addr(iotlb
, NULL
, &translated_addr
, NULL
)) {
1857 ret
= vfio_get_dirty_bitmap(container
, iova
, iotlb
->addr_mask
+ 1,
1860 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx
", "
1861 "0x%"HWADDR_PRIx
") = %d (%s)",
1862 container
, iova
, iotlb
->addr_mask
+ 1, ret
,
1870 vfio_set_migration_error(ret
);
1874 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection
*section
,
1877 const hwaddr size
= int128_get64(section
->size
);
1878 const hwaddr iova
= section
->offset_within_address_space
;
1879 const ram_addr_t ram_addr
= memory_region_get_ram_addr(section
->mr
) +
1880 section
->offset_within_region
;
1881 VFIORamDiscardListener
*vrdl
= opaque
;
1884 * Sync the whole mapped region (spanning multiple individual mappings)
1887 return vfio_get_dirty_bitmap(vrdl
->container
, iova
, size
, ram_addr
);
1890 static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer
*container
,
1891 MemoryRegionSection
*section
)
1893 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(section
->mr
);
1894 VFIORamDiscardListener
*vrdl
= NULL
;
1896 QLIST_FOREACH(vrdl
, &container
->vrdl_list
, next
) {
1897 if (vrdl
->mr
== section
->mr
&&
1898 vrdl
->offset_within_address_space
==
1899 section
->offset_within_address_space
) {
1905 hw_error("vfio: Trying to sync missing RAM discard listener");
1909 * We only want/can synchronize the bitmap for actually mapped parts -
1910 * which correspond to populated parts. Replay all populated parts.
1912 return ram_discard_manager_replay_populated(rdm
, section
,
1913 vfio_ram_discard_get_dirty_bitmap
,
1917 static int vfio_sync_dirty_bitmap(VFIOContainer
*container
,
1918 MemoryRegionSection
*section
)
1920 ram_addr_t ram_addr
;
1922 if (memory_region_is_iommu(section
->mr
)) {
1923 VFIOGuestIOMMU
*giommu
;
1925 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
1926 if (MEMORY_REGION(giommu
->iommu_mr
) == section
->mr
&&
1927 giommu
->n
.start
== section
->offset_within_region
) {
1929 vfio_giommu_dirty_notifier gdn
= { .giommu
= giommu
};
1930 int idx
= memory_region_iommu_attrs_to_index(giommu
->iommu_mr
,
1931 MEMTXATTRS_UNSPECIFIED
);
1933 llend
= int128_add(int128_make64(section
->offset_within_region
),
1935 llend
= int128_sub(llend
, int128_one());
1937 iommu_notifier_init(&gdn
.n
,
1938 vfio_iommu_map_dirty_notify
,
1940 section
->offset_within_region
,
1941 int128_get64(llend
),
1943 memory_region_iommu_replay(giommu
->iommu_mr
, &gdn
.n
);
1948 } else if (memory_region_has_ram_discard_manager(section
->mr
)) {
1949 return vfio_sync_ram_discard_listener_dirty_bitmap(container
, section
);
1952 ram_addr
= memory_region_get_ram_addr(section
->mr
) +
1953 section
->offset_within_region
;
1955 return vfio_get_dirty_bitmap(container
,
1956 REAL_HOST_PAGE_ALIGN(section
->offset_within_address_space
),
1957 int128_get64(section
->size
), ram_addr
);
1960 static void vfio_listener_log_sync(MemoryListener
*listener
,
1961 MemoryRegionSection
*section
)
1963 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1966 if (vfio_listener_skipped_section(section
)) {
1970 if (vfio_devices_all_dirty_tracking(container
)) {
1971 ret
= vfio_sync_dirty_bitmap(container
, section
);
1973 error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret
,
1975 vfio_set_migration_error(ret
);
1980 static const MemoryListener vfio_memory_listener
= {
1982 .region_add
= vfio_listener_region_add
,
1983 .region_del
= vfio_listener_region_del
,
1984 .log_global_start
= vfio_listener_log_global_start
,
1985 .log_global_stop
= vfio_listener_log_global_stop
,
1986 .log_sync
= vfio_listener_log_sync
,
1989 static void vfio_listener_release(VFIOContainer
*container
)
1991 memory_listener_unregister(&container
->listener
);
1992 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1993 memory_listener_unregister(&container
->prereg_listener
);
1997 static struct vfio_info_cap_header
*
1998 vfio_get_cap(void *ptr
, uint32_t cap_offset
, uint16_t id
)
2000 struct vfio_info_cap_header
*hdr
;
2002 for (hdr
= ptr
+ cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
2003 if (hdr
->id
== id
) {
2011 struct vfio_info_cap_header
*
2012 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
2014 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
2018 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
2021 static struct vfio_info_cap_header
*
2022 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
2024 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
2028 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
2031 struct vfio_info_cap_header
*
2032 vfio_get_device_info_cap(struct vfio_device_info
*info
, uint16_t id
)
2034 if (!(info
->flags
& VFIO_DEVICE_FLAGS_CAPS
)) {
2038 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
2041 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
2042 unsigned int *avail
)
2044 struct vfio_info_cap_header
*hdr
;
2045 struct vfio_iommu_type1_info_dma_avail
*cap
;
2047 /* If the capability cannot be found, assume no DMA limiting */
2048 hdr
= vfio_get_iommu_type1_info_cap(info
,
2049 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
2054 if (avail
!= NULL
) {
2056 *avail
= cap
->avail
;
2062 static int vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
2063 struct vfio_region_info
*info
)
2065 struct vfio_info_cap_header
*hdr
;
2066 struct vfio_region_info_cap_sparse_mmap
*sparse
;
2069 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
2074 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
2076 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
2077 region
->nr
, sparse
->nr_areas
);
2079 region
->mmaps
= g_new0(VFIOMmap
, sparse
->nr_areas
);
2081 for (i
= 0, j
= 0; i
< sparse
->nr_areas
; i
++) {
2082 if (sparse
->areas
[i
].size
) {
2083 trace_vfio_region_sparse_mmap_entry(i
, sparse
->areas
[i
].offset
,
2084 sparse
->areas
[i
].offset
+
2085 sparse
->areas
[i
].size
- 1);
2086 region
->mmaps
[j
].offset
= sparse
->areas
[i
].offset
;
2087 region
->mmaps
[j
].size
= sparse
->areas
[i
].size
;
2092 region
->nr_mmaps
= j
;
2093 region
->mmaps
= g_realloc(region
->mmaps
, j
* sizeof(VFIOMmap
));
2098 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
2099 int index
, const char *name
)
2101 struct vfio_region_info
*info
;
2104 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
2109 region
->vbasedev
= vbasedev
;
2110 region
->flags
= info
->flags
;
2111 region
->size
= info
->size
;
2112 region
->fd_offset
= info
->offset
;
2116 region
->mem
= g_new0(MemoryRegion
, 1);
2117 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
2118 region
, name
, region
->size
);
2120 if (!vbasedev
->no_mmap
&&
2121 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
2123 ret
= vfio_setup_region_sparse_mmaps(region
, info
);
2126 region
->nr_mmaps
= 1;
2127 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
2128 region
->mmaps
[0].offset
= 0;
2129 region
->mmaps
[0].size
= region
->size
;
2136 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
2137 region
->flags
, region
->fd_offset
, region
->size
);
2141 static void vfio_subregion_unmap(VFIORegion
*region
, int index
)
2143 trace_vfio_region_unmap(memory_region_name(®ion
->mmaps
[index
].mem
),
2144 region
->mmaps
[index
].offset
,
2145 region
->mmaps
[index
].offset
+
2146 region
->mmaps
[index
].size
- 1);
2147 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[index
].mem
);
2148 munmap(region
->mmaps
[index
].mmap
, region
->mmaps
[index
].size
);
2149 object_unparent(OBJECT(®ion
->mmaps
[index
].mem
));
2150 region
->mmaps
[index
].mmap
= NULL
;
2153 int vfio_region_mmap(VFIORegion
*region
)
2162 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
2163 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
2165 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
2166 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
2167 MAP_SHARED
, region
->vbasedev
->fd
,
2169 region
->mmaps
[i
].offset
);
2170 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
2173 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
2175 region
->mmaps
[i
].offset
,
2177 region
->mmaps
[i
].offset
+
2178 region
->mmaps
[i
].size
- 1, ret
);
2180 region
->mmaps
[i
].mmap
= NULL
;
2182 for (i
--; i
>= 0; i
--) {
2183 vfio_subregion_unmap(region
, i
);
2189 name
= g_strdup_printf("%s mmaps[%d]",
2190 memory_region_name(region
->mem
), i
);
2191 memory_region_init_ram_device_ptr(®ion
->mmaps
[i
].mem
,
2192 memory_region_owner(region
->mem
),
2193 name
, region
->mmaps
[i
].size
,
2194 region
->mmaps
[i
].mmap
);
2196 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
2197 ®ion
->mmaps
[i
].mem
);
2199 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
2200 region
->mmaps
[i
].offset
,
2201 region
->mmaps
[i
].offset
+
2202 region
->mmaps
[i
].size
- 1);
2208 void vfio_region_unmap(VFIORegion
*region
)
2216 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
2217 if (region
->mmaps
[i
].mmap
) {
2218 vfio_subregion_unmap(region
, i
);
2223 void vfio_region_exit(VFIORegion
*region
)
2231 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
2232 if (region
->mmaps
[i
].mmap
) {
2233 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
2237 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
2240 void vfio_region_finalize(VFIORegion
*region
)
2248 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
2249 if (region
->mmaps
[i
].mmap
) {
2250 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
2251 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
2255 object_unparent(OBJECT(region
->mem
));
2257 g_free(region
->mem
);
2258 g_free(region
->mmaps
);
2260 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
2263 region
->mmaps
= NULL
;
2264 region
->nr_mmaps
= 0;
2270 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
2278 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
2279 if (region
->mmaps
[i
].mmap
) {
2280 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
2284 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
2288 void vfio_reset_handler(void *opaque
)
2291 VFIODevice
*vbasedev
;
2293 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2294 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
2295 if (vbasedev
->dev
->realized
) {
2296 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
2301 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2302 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
2303 if (vbasedev
->dev
->realized
&& vbasedev
->needs_reset
) {
2304 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
2310 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
2313 struct kvm_device_attr attr
= {
2314 .group
= KVM_DEV_VFIO_GROUP
,
2315 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
2316 .addr
= (uint64_t)(unsigned long)&group
->fd
,
2319 if (!kvm_enabled()) {
2323 if (vfio_kvm_device_fd
< 0) {
2324 struct kvm_create_device cd
= {
2325 .type
= KVM_DEV_TYPE_VFIO
,
2328 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
2329 error_report("Failed to create KVM VFIO device: %m");
2333 vfio_kvm_device_fd
= cd
.fd
;
2336 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
2337 error_report("Failed to add group %d to KVM VFIO device: %m",
2343 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
2346 struct kvm_device_attr attr
= {
2347 .group
= KVM_DEV_VFIO_GROUP
,
2348 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
2349 .addr
= (uint64_t)(unsigned long)&group
->fd
,
2352 if (vfio_kvm_device_fd
< 0) {
2356 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
2357 error_report("Failed to remove group %d from KVM VFIO device: %m",
2363 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
2365 VFIOAddressSpace
*space
;
2367 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
2368 if (space
->as
== as
) {
2373 /* No suitable VFIOAddressSpace, create a new one */
2374 space
= g_malloc0(sizeof(*space
));
2376 QLIST_INIT(&space
->containers
);
2378 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
2383 static void vfio_put_address_space(VFIOAddressSpace
*space
)
2385 if (QLIST_EMPTY(&space
->containers
)) {
2386 QLIST_REMOVE(space
, list
);
2392 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
2394 static int vfio_get_iommu_type(VFIOContainer
*container
,
2397 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
2398 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
2401 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
2402 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
2403 return iommu_types
[i
];
2406 error_setg(errp
, "No available IOMMU models");
2410 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
2413 int iommu_type
, ret
;
2415 iommu_type
= vfio_get_iommu_type(container
, errp
);
2416 if (iommu_type
< 0) {
2420 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
2422 error_setg_errno(errp
, errno
, "Failed to set group container");
2426 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
2427 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
2429 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
2430 * v2, the running platform may not support v2 and there is no
2431 * way to guess it until an IOMMU group gets added to the container.
2432 * So in case it fails with v2, try v1 as a fallback.
2434 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
2437 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
2441 container
->iommu_type
= iommu_type
;
2445 static int vfio_get_iommu_info(VFIOContainer
*container
,
2446 struct vfio_iommu_type1_info
**info
)
2449 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
2451 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
2453 (*info
)->argsz
= argsz
;
2455 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
2461 if (((*info
)->argsz
> argsz
)) {
2462 argsz
= (*info
)->argsz
;
2463 *info
= g_realloc(*info
, argsz
);
2470 static struct vfio_info_cap_header
*
2471 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
2473 struct vfio_info_cap_header
*hdr
;
2476 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
2480 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
2481 if (hdr
->id
== id
) {
2489 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
2490 struct vfio_iommu_type1_info
*info
)
2492 struct vfio_info_cap_header
*hdr
;
2493 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
2495 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
2500 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
2504 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
2505 * qemu_real_host_page_size to mark those dirty.
2507 if (cap_mig
->pgsize_bitmap
& qemu_real_host_page_size()) {
2508 container
->dirty_pages_supported
= true;
2509 container
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
2510 container
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
2514 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
2517 VFIOContainer
*container
;
2519 VFIOAddressSpace
*space
;
2521 space
= vfio_get_address_space(as
);
2524 * VFIO is currently incompatible with discarding of RAM insofar as the
2525 * madvise to purge (zap) the page from QEMU's address space does not
2526 * interact with the memory API and therefore leaves stale virtual to
2527 * physical mappings in the IOMMU if the page was previously pinned. We
2528 * therefore set discarding broken for each group added to a container,
2529 * whether the container is used individually or shared. This provides
2530 * us with options to allow devices within a group to opt-in and allow
2531 * discarding, so long as it is done consistently for a group (for instance
2532 * if the device is an mdev device where it is known that the host vendor
2533 * driver will never pin pages outside of the working set of the guest
2534 * driver, which would thus not be discarding candidates).
2536 * The first opportunity to induce pinning occurs here where we attempt to
2537 * attach the group to existing containers within the AddressSpace. If any
2538 * pages are already zapped from the virtual address space, such as from
2539 * previous discards, new pinning will cause valid mappings to be
2540 * re-established. Likewise, when the overall MemoryListener for a new
2541 * container is registered, a replay of mappings within the AddressSpace
2542 * will occur, re-establishing any previously zapped pages as well.
2544 * Especially virtio-balloon is currently only prevented from discarding
2545 * new memory, it will not yet set ram_block_discard_set_required() and
2546 * therefore, neither stops us here or deals with the sudden memory
2547 * consumption of inflated memory.
2549 * We do support discarding of memory coordinated via the RamDiscardManager
2550 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
2551 * details once we know which type of IOMMU we are using.
2554 QLIST_FOREACH(container
, &space
->containers
, next
) {
2555 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
2556 ret
= vfio_ram_block_discard_disable(container
, true);
2558 error_setg_errno(errp
, -ret
,
2559 "Cannot set discarding of RAM broken");
2560 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
,
2562 error_report("vfio: error disconnecting group %d from"
2563 " container", group
->groupid
);
2567 group
->container
= container
;
2568 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2569 vfio_kvm_device_add_group(group
);
2574 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
2576 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
2578 goto put_space_exit
;
2581 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
2582 if (ret
!= VFIO_API_VERSION
) {
2583 error_setg(errp
, "supported vfio version: %d, "
2584 "reported version: %d", VFIO_API_VERSION
, ret
);
2589 container
= g_malloc0(sizeof(*container
));
2590 container
->space
= space
;
2592 container
->error
= NULL
;
2593 container
->dirty_pages_supported
= false;
2594 container
->dma_max_mappings
= 0;
2595 QLIST_INIT(&container
->giommu_list
);
2596 QLIST_INIT(&container
->hostwin_list
);
2597 QLIST_INIT(&container
->vrdl_list
);
2599 ret
= vfio_init_container(container
, group
->fd
, errp
);
2601 goto free_container_exit
;
2604 ret
= vfio_ram_block_discard_disable(container
, true);
2606 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
2607 goto free_container_exit
;
2610 switch (container
->iommu_type
) {
2611 case VFIO_TYPE1v2_IOMMU
:
2612 case VFIO_TYPE1_IOMMU
:
2614 struct vfio_iommu_type1_info
*info
;
2616 ret
= vfio_get_iommu_info(container
, &info
);
2618 error_setg_errno(errp
, -ret
, "Failed to get VFIO IOMMU info");
2619 goto enable_discards_exit
;
2622 if (info
->flags
& VFIO_IOMMU_INFO_PGSIZES
) {
2623 container
->pgsizes
= info
->iova_pgsizes
;
2625 container
->pgsizes
= qemu_real_host_page_size();
2628 if (!vfio_get_info_dma_avail(info
, &container
->dma_max_mappings
)) {
2629 container
->dma_max_mappings
= 65535;
2631 vfio_get_iommu_info_migration(container
, info
);
2635 * FIXME: We should parse VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
2636 * information to get the actual window extent rather than assume
2637 * a 64-bit IOVA address space.
2639 vfio_host_win_add(container
, 0, (hwaddr
)-1, container
->pgsizes
);
2643 case VFIO_SPAPR_TCE_v2_IOMMU
:
2644 case VFIO_SPAPR_TCE_IOMMU
:
2646 struct vfio_iommu_spapr_tce_info info
;
2647 bool v2
= container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
;
2650 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
2651 * when container fd is closed so we do not call it explicitly
2655 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
2657 error_setg_errno(errp
, errno
, "failed to enable container");
2659 goto enable_discards_exit
;
2662 container
->prereg_listener
= vfio_prereg_listener
;
2664 memory_listener_register(&container
->prereg_listener
,
2665 &address_space_memory
);
2666 if (container
->error
) {
2667 memory_listener_unregister(&container
->prereg_listener
);
2669 error_propagate_prepend(errp
, container
->error
,
2670 "RAM memory listener initialization failed: ");
2671 goto enable_discards_exit
;
2675 info
.argsz
= sizeof(info
);
2676 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
2678 error_setg_errno(errp
, errno
,
2679 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
2682 memory_listener_unregister(&container
->prereg_listener
);
2684 goto enable_discards_exit
;
2688 container
->pgsizes
= info
.ddw
.pgsizes
;
2690 * There is a default window in just created container.
2691 * To make region_add/del simpler, we better remove this
2692 * window now and let those iommu_listener callbacks
2693 * create/remove them when needed.
2695 ret
= vfio_spapr_remove_window(container
, info
.dma32_window_start
);
2697 error_setg_errno(errp
, -ret
,
2698 "failed to remove existing window");
2699 goto enable_discards_exit
;
2702 /* The default table uses 4K pages */
2703 container
->pgsizes
= 0x1000;
2704 vfio_host_win_add(container
, info
.dma32_window_start
,
2705 info
.dma32_window_start
+
2706 info
.dma32_window_size
- 1,
2712 vfio_kvm_device_add_group(group
);
2714 QLIST_INIT(&container
->group_list
);
2715 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
2717 group
->container
= container
;
2718 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2720 container
->listener
= vfio_memory_listener
;
2722 memory_listener_register(&container
->listener
, container
->space
->as
);
2724 if (container
->error
) {
2726 error_propagate_prepend(errp
, container
->error
,
2727 "memory listener initialization failed: ");
2728 goto listener_release_exit
;
2731 container
->initialized
= true;
2734 listener_release_exit
:
2735 QLIST_REMOVE(group
, container_next
);
2736 QLIST_REMOVE(container
, next
);
2737 vfio_kvm_device_del_group(group
);
2738 vfio_listener_release(container
);
2740 enable_discards_exit
:
2741 vfio_ram_block_discard_disable(container
, false);
2743 free_container_exit
:
2750 vfio_put_address_space(space
);
2755 static void vfio_disconnect_container(VFIOGroup
*group
)
2757 VFIOContainer
*container
= group
->container
;
2759 QLIST_REMOVE(group
, container_next
);
2760 group
->container
= NULL
;
2763 * Explicitly release the listener first before unset container,
2764 * since unset may destroy the backend container if it's the last
2767 if (QLIST_EMPTY(&container
->group_list
)) {
2768 vfio_listener_release(container
);
2771 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
2772 error_report("vfio: error disconnecting group %d from container",
2776 if (QLIST_EMPTY(&container
->group_list
)) {
2777 VFIOAddressSpace
*space
= container
->space
;
2778 VFIOGuestIOMMU
*giommu
, *tmp
;
2779 VFIOHostDMAWindow
*hostwin
, *next
;
2781 QLIST_REMOVE(container
, next
);
2783 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
2784 memory_region_unregister_iommu_notifier(
2785 MEMORY_REGION(giommu
->iommu_mr
), &giommu
->n
);
2786 QLIST_REMOVE(giommu
, giommu_next
);
2790 QLIST_FOREACH_SAFE(hostwin
, &container
->hostwin_list
, hostwin_next
,
2792 QLIST_REMOVE(hostwin
, hostwin_next
);
2796 trace_vfio_disconnect_container(container
->fd
);
2797 close(container
->fd
);
2800 vfio_put_address_space(space
);
2804 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
2808 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
2810 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2811 if (group
->groupid
== groupid
) {
2812 /* Found it. Now is it already in the right context? */
2813 if (group
->container
->space
->as
== as
) {
2816 error_setg(errp
, "group %d used in multiple address spaces",
2823 group
= g_malloc0(sizeof(*group
));
2825 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
2826 group
->fd
= qemu_open_old(path
, O_RDWR
);
2827 if (group
->fd
< 0) {
2828 error_setg_errno(errp
, errno
, "failed to open %s", path
);
2829 goto free_group_exit
;
2832 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
2833 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
2837 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
2838 error_setg(errp
, "group %d is not viable", groupid
);
2839 error_append_hint(errp
,
2840 "Please ensure all devices within the iommu_group "
2841 "are bound to their vfio bus driver.\n");
2845 group
->groupid
= groupid
;
2846 QLIST_INIT(&group
->device_list
);
2848 if (vfio_connect_container(group
, as
, errp
)) {
2849 error_prepend(errp
, "failed to setup container for group %d: ",
2854 if (QLIST_EMPTY(&vfio_group_list
)) {
2855 qemu_register_reset(vfio_reset_handler
, NULL
);
2858 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
2871 void vfio_put_group(VFIOGroup
*group
)
2873 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
2877 if (!group
->ram_block_discard_allowed
) {
2878 vfio_ram_block_discard_disable(group
->container
, false);
2880 vfio_kvm_device_del_group(group
);
2881 vfio_disconnect_container(group
);
2882 QLIST_REMOVE(group
, next
);
2883 trace_vfio_put_group(group
->fd
);
2887 if (QLIST_EMPTY(&vfio_group_list
)) {
2888 qemu_unregister_reset(vfio_reset_handler
, NULL
);
2892 struct vfio_device_info
*vfio_get_device_info(int fd
)
2894 struct vfio_device_info
*info
;
2895 uint32_t argsz
= sizeof(*info
);
2897 info
= g_malloc0(argsz
);
2900 info
->argsz
= argsz
;
2902 if (ioctl(fd
, VFIO_DEVICE_GET_INFO
, info
)) {
2907 if (info
->argsz
> argsz
) {
2908 argsz
= info
->argsz
;
2909 info
= g_realloc(info
, argsz
);
2916 int vfio_get_device(VFIOGroup
*group
, const char *name
,
2917 VFIODevice
*vbasedev
, Error
**errp
)
2919 g_autofree
struct vfio_device_info
*info
= NULL
;
2922 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
2924 error_setg_errno(errp
, errno
, "error getting device from group %d",
2926 error_append_hint(errp
,
2927 "Verify all devices in group %d are bound to vfio-<bus> "
2928 "or pci-stub and not already in use\n", group
->groupid
);
2932 info
= vfio_get_device_info(fd
);
2934 error_setg_errno(errp
, errno
, "error getting device info");
2940 * Set discarding of RAM as not broken for this group if the driver knows
2941 * the device operates compatibly with discarding. Setting must be
2942 * consistent per group, but since compatibility is really only possible
2943 * with mdev currently, we expect singleton groups.
2945 if (vbasedev
->ram_block_discard_allowed
!=
2946 group
->ram_block_discard_allowed
) {
2947 if (!QLIST_EMPTY(&group
->device_list
)) {
2948 error_setg(errp
, "Inconsistent setting of support for discarding "
2949 "RAM (e.g., balloon) within group");
2954 if (!group
->ram_block_discard_allowed
) {
2955 group
->ram_block_discard_allowed
= true;
2956 vfio_ram_block_discard_disable(group
->container
, false);
2961 vbasedev
->group
= group
;
2962 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
2964 vbasedev
->num_irqs
= info
->num_irqs
;
2965 vbasedev
->num_regions
= info
->num_regions
;
2966 vbasedev
->flags
= info
->flags
;
2968 trace_vfio_get_device(name
, info
->flags
, info
->num_regions
, info
->num_irqs
);
2970 vbasedev
->reset_works
= !!(info
->flags
& VFIO_DEVICE_FLAGS_RESET
);
2975 void vfio_put_base_device(VFIODevice
*vbasedev
)
2977 if (!vbasedev
->group
) {
2980 QLIST_REMOVE(vbasedev
, next
);
2981 vbasedev
->group
= NULL
;
2982 trace_vfio_put_base_device(vbasedev
->fd
);
2983 close(vbasedev
->fd
);
2986 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
2987 struct vfio_region_info
**info
)
2989 size_t argsz
= sizeof(struct vfio_region_info
);
2991 *info
= g_malloc0(argsz
);
2993 (*info
)->index
= index
;
2995 (*info
)->argsz
= argsz
;
2997 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
3003 if ((*info
)->argsz
> argsz
) {
3004 argsz
= (*info
)->argsz
;
3005 *info
= g_realloc(*info
, argsz
);
3013 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
3014 uint32_t subtype
, struct vfio_region_info
**info
)
3018 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
3019 struct vfio_info_cap_header
*hdr
;
3020 struct vfio_region_info_cap_type
*cap_type
;
3022 if (vfio_get_region_info(vbasedev
, i
, info
)) {
3026 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
3032 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
3034 trace_vfio_get_dev_region(vbasedev
->name
, i
,
3035 cap_type
->type
, cap_type
->subtype
);
3037 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
3048 bool vfio_has_region_cap(VFIODevice
*vbasedev
, int region
, uint16_t cap_type
)
3050 struct vfio_region_info
*info
= NULL
;
3053 if (!vfio_get_region_info(vbasedev
, region
, &info
)) {
3054 if (vfio_get_region_info_cap(info
, cap_type
)) {
3064 * Interfaces for IBM EEH (Enhanced Error Handling)
3066 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
3069 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
3070 * implementation is broken if there are multiple groups in a
3071 * container. The hardware works in units of Partitionable
3072 * Endpoints (== IOMMU groups) and the EEH operations naively
3073 * iterate across all groups in the container, without any logic
3074 * to make sure the groups have their state synchronized. For
3075 * certain operations (ENABLE) that might be ok, until an error
3076 * occurs, but for others (GET_STATE) it's clearly broken.
3080 * XXX Once fixed kernels exist, test for them here
3083 if (QLIST_EMPTY(&container
->group_list
)) {
3087 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
3094 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
3096 struct vfio_eeh_pe_op pe_op
= {
3097 .argsz
= sizeof(pe_op
),
3102 if (!vfio_eeh_container_ok(container
)) {
3103 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
3104 "kernel requires a container with exactly one group", op
);
3108 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
3110 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
3117 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
3119 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
3120 VFIOContainer
*container
= NULL
;
3122 if (QLIST_EMPTY(&space
->containers
)) {
3123 /* No containers to act on */
3127 container
= QLIST_FIRST(&space
->containers
);
3129 if (QLIST_NEXT(container
, next
)) {
3130 /* We don't yet have logic to synchronize EEH state across
3131 * multiple containers */
3137 vfio_put_address_space(space
);
3141 bool vfio_eeh_as_ok(AddressSpace
*as
)
3143 VFIOContainer
*container
= vfio_eeh_as_container(as
);
3145 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
3148 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
3150 VFIOContainer
*container
= vfio_eeh_as_container(as
);
3155 return vfio_eeh_container_op(container
, op
);