2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
24 #include <linux/kvm.h>
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/balloon.h"
36 #include "sysemu/kvm.h"
38 #include "qapi/error.h"
40 VFIOGroupList vfio_group_list
=
41 QLIST_HEAD_INITIALIZER(vfio_group_list
);
42 static QLIST_HEAD(, VFIOAddressSpace
) vfio_address_spaces
=
43 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
47 * We have a single VFIO pseudo device per KVM VM. Once created it lives
48 * for the life of the VM. Closing the file descriptor only drops our
49 * reference to it and the device's reference to kvm. Therefore once
50 * initialized, this file descriptor is only released on QEMU exit and
51 * we'll re-use it should another vfio device be attached before then.
53 static int vfio_kvm_device_fd
= -1;
57 * Common VFIO interrupt disable
59 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
61 struct vfio_irq_set irq_set
= {
62 .argsz
= sizeof(irq_set
),
63 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
69 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
72 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
74 struct vfio_irq_set irq_set
= {
75 .argsz
= sizeof(irq_set
),
76 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
82 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
85 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
87 struct vfio_irq_set irq_set
= {
88 .argsz
= sizeof(irq_set
),
89 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
95 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
99 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
101 void vfio_region_write(void *opaque
, hwaddr addr
,
102 uint64_t data
, unsigned size
)
104 VFIORegion
*region
= opaque
;
105 VFIODevice
*vbasedev
= region
->vbasedev
;
118 buf
.word
= cpu_to_le16(data
);
121 buf
.dword
= cpu_to_le32(data
);
124 buf
.qword
= cpu_to_le64(data
);
127 hw_error("vfio: unsupported write size, %d bytes", size
);
131 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
132 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
134 __func__
, vbasedev
->name
, region
->nr
,
138 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
141 * A read or write to a BAR always signals an INTx EOI. This will
142 * do nothing if not pending (including not in INTx mode). We assume
143 * that a BAR access is in response to an interrupt and that BAR
144 * accesses will service the interrupt. Unfortunately, we don't know
145 * which access will service the interrupt, so we're potentially
146 * getting quite a few host interrupts per guest interrupt.
148 vbasedev
->ops
->vfio_eoi(vbasedev
);
151 uint64_t vfio_region_read(void *opaque
,
152 hwaddr addr
, unsigned size
)
154 VFIORegion
*region
= opaque
;
155 VFIODevice
*vbasedev
= region
->vbasedev
;
164 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
165 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
166 __func__
, vbasedev
->name
, region
->nr
,
175 data
= le16_to_cpu(buf
.word
);
178 data
= le32_to_cpu(buf
.dword
);
181 data
= le64_to_cpu(buf
.qword
);
184 hw_error("vfio: unsupported read size, %d bytes", size
);
188 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
190 /* Same as write above */
191 vbasedev
->ops
->vfio_eoi(vbasedev
);
196 const MemoryRegionOps vfio_region_ops
= {
197 .read
= vfio_region_read
,
198 .write
= vfio_region_write
,
199 .endianness
= DEVICE_LITTLE_ENDIAN
,
201 .min_access_size
= 1,
202 .max_access_size
= 8,
205 .min_access_size
= 1,
206 .max_access_size
= 8,
211 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
213 static int vfio_dma_unmap(VFIOContainer
*container
,
214 hwaddr iova
, ram_addr_t size
)
216 struct vfio_iommu_type1_dma_unmap unmap
= {
217 .argsz
= sizeof(unmap
),
223 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
225 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
226 * v4.15) where an overflow in its wrap-around check prevents us from
227 * unmapping the last page of the address space. Test for the error
228 * condition and re-try the unmap excluding the last page. The
229 * expectation is that we've never mapped the last page anyway and this
230 * unmap request comes via vIOMMU support which also makes it unlikely
231 * that this page is used. This bug was introduced well after type1 v2
232 * support was introduced, so we shouldn't need to test for v1. A fix
233 * is queued for kernel v5.0 so this workaround can be removed once
234 * affected kernels are sufficiently deprecated.
236 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
237 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
238 trace_vfio_dma_unmap_overflow_workaround();
239 unmap
.size
-= 1ULL << ctz64(container
->pgsizes
);
242 error_report("VFIO_UNMAP_DMA: %d", -errno
);
249 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
250 ram_addr_t size
, void *vaddr
, bool readonly
)
252 struct vfio_iommu_type1_dma_map map
= {
253 .argsz
= sizeof(map
),
254 .flags
= VFIO_DMA_MAP_FLAG_READ
,
255 .vaddr
= (__u64
)(uintptr_t)vaddr
,
261 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
265 * Try the mapping, if it fails with EBUSY, unmap the region and try
266 * again. This shouldn't be necessary, but we sometimes see it in
269 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
270 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
271 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
275 error_report("VFIO_MAP_DMA: %d", -errno
);
279 static void vfio_host_win_add(VFIOContainer
*container
,
280 hwaddr min_iova
, hwaddr max_iova
,
281 uint64_t iova_pgsizes
)
283 VFIOHostDMAWindow
*hostwin
;
285 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
286 if (ranges_overlap(hostwin
->min_iova
,
287 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
289 max_iova
- min_iova
+ 1)) {
290 hw_error("%s: Overlapped IOMMU are not enabled", __func__
);
294 hostwin
= g_malloc0(sizeof(*hostwin
));
296 hostwin
->min_iova
= min_iova
;
297 hostwin
->max_iova
= max_iova
;
298 hostwin
->iova_pgsizes
= iova_pgsizes
;
299 QLIST_INSERT_HEAD(&container
->hostwin_list
, hostwin
, hostwin_next
);
302 static int vfio_host_win_del(VFIOContainer
*container
, hwaddr min_iova
,
305 VFIOHostDMAWindow
*hostwin
;
307 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
308 if (hostwin
->min_iova
== min_iova
&& hostwin
->max_iova
== max_iova
) {
309 QLIST_REMOVE(hostwin
, hostwin_next
);
317 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
319 return (!memory_region_is_ram(section
->mr
) &&
320 !memory_region_is_iommu(section
->mr
)) ||
322 * Sizing an enabled 64-bit BAR can cause spurious mappings to
323 * addresses in the upper part of the 64-bit address space. These
324 * are never accessed by the CPU and beyond the address width of
325 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
327 section
->offset_within_address_space
& (1ULL << 63);
330 /* Called with rcu_read_lock held. */
331 static bool vfio_get_vaddr(IOMMUTLBEntry
*iotlb
, void **vaddr
,
336 hwaddr len
= iotlb
->addr_mask
+ 1;
337 bool writable
= iotlb
->perm
& IOMMU_WO
;
340 * The IOMMU TLB entry we have just covers translation through
341 * this IOMMU to its immediate target. We need to translate
342 * it the rest of the way through to memory.
344 mr
= address_space_translate(&address_space_memory
,
345 iotlb
->translated_addr
,
346 &xlat
, &len
, writable
,
347 MEMTXATTRS_UNSPECIFIED
);
348 if (!memory_region_is_ram(mr
)) {
349 error_report("iommu map to non memory area %"HWADDR_PRIx
"",
355 * Translation truncates length to the IOMMU page size,
356 * check that it did not truncate too much.
358 if (len
& iotlb
->addr_mask
) {
359 error_report("iommu has granularity incompatible with target AS");
363 *vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
364 *read_only
= !writable
|| mr
->readonly
;
369 static void vfio_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
371 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
372 VFIOContainer
*container
= giommu
->container
;
373 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
378 trace_vfio_iommu_map_notify(iotlb
->perm
== IOMMU_NONE
? "UNMAP" : "MAP",
379 iova
, iova
+ iotlb
->addr_mask
);
381 if (iotlb
->target_as
!= &address_space_memory
) {
382 error_report("Wrong target AS \"%s\", only system memory is allowed",
383 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
389 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
390 if (!vfio_get_vaddr(iotlb
, &vaddr
, &read_only
)) {
394 * vaddr is only valid until rcu_read_unlock(). But after
395 * vfio_dma_map has set up the mapping the pages will be
396 * pinned by the kernel. This makes sure that the RAM backend
397 * of vaddr will always be there, even if the memory object is
398 * destroyed and its backing memory munmap-ed.
400 ret
= vfio_dma_map(container
, iova
,
401 iotlb
->addr_mask
+ 1, vaddr
,
404 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
405 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
407 iotlb
->addr_mask
+ 1, vaddr
, ret
);
410 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1);
412 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
413 "0x%"HWADDR_PRIx
") = %d (%m)",
415 iotlb
->addr_mask
+ 1, ret
);
422 static void vfio_listener_region_add(MemoryListener
*listener
,
423 MemoryRegionSection
*section
)
425 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
427 Int128 llend
, llsize
;
430 VFIOHostDMAWindow
*hostwin
;
433 if (vfio_listener_skipped_section(section
)) {
434 trace_vfio_listener_region_add_skip(
435 section
->offset_within_address_space
,
436 section
->offset_within_address_space
+
437 int128_get64(int128_sub(section
->size
, int128_one())));
441 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
442 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
443 error_report("%s received unaligned region", __func__
);
447 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
448 llend
= int128_make64(section
->offset_within_address_space
);
449 llend
= int128_add(llend
, section
->size
);
450 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
452 if (int128_ge(int128_make64(iova
), llend
)) {
455 end
= int128_get64(int128_sub(llend
, int128_one()));
457 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
460 /* For now intersections are not allowed, we may relax this later */
461 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
462 if (ranges_overlap(hostwin
->min_iova
,
463 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
464 section
->offset_within_address_space
,
465 int128_get64(section
->size
))) {
471 ret
= vfio_spapr_create_window(container
, section
, &pgsize
);
476 vfio_host_win_add(container
, section
->offset_within_address_space
,
477 section
->offset_within_address_space
+
478 int128_get64(section
->size
) - 1, pgsize
);
482 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
483 struct kvm_vfio_spapr_tce param
;
484 struct kvm_device_attr attr
= {
485 .group
= KVM_DEV_VFIO_GROUP
,
486 .attr
= KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE
,
487 .addr
= (uint64_t)(unsigned long)¶m
,
490 if (!memory_region_iommu_get_attr(iommu_mr
, IOMMU_ATTR_SPAPR_TCE_FD
,
492 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
493 param
.groupfd
= group
->fd
;
494 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
495 error_report("vfio: failed to setup fd %d "
496 "for a group with fd %d: %s",
497 param
.tablefd
, param
.groupfd
,
501 trace_vfio_spapr_group_attach(param
.groupfd
, param
.tablefd
);
508 hostwin_found
= false;
509 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
510 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
511 hostwin_found
= true;
516 if (!hostwin_found
) {
517 error_report("vfio: IOMMU container %p can't map guest IOVA region"
518 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
,
519 container
, iova
, end
);
524 memory_region_ref(section
->mr
);
526 if (memory_region_is_iommu(section
->mr
)) {
527 VFIOGuestIOMMU
*giommu
;
528 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
531 trace_vfio_listener_region_add_iommu(iova
, end
);
533 * FIXME: For VFIO iommu types which have KVM acceleration to
534 * avoid bouncing all map/unmaps through qemu this way, this
535 * would be the right place to wire that up (tell the KVM
536 * device emulation the VFIO iommu handles to use).
538 giommu
= g_malloc0(sizeof(*giommu
));
539 giommu
->iommu
= iommu_mr
;
540 giommu
->iommu_offset
= section
->offset_within_address_space
-
541 section
->offset_within_region
;
542 giommu
->container
= container
;
543 llend
= int128_add(int128_make64(section
->offset_within_region
),
545 llend
= int128_sub(llend
, int128_one());
546 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
547 MEMTXATTRS_UNSPECIFIED
);
548 iommu_notifier_init(&giommu
->n
, vfio_iommu_map_notify
,
550 section
->offset_within_region
,
553 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
555 memory_region_register_iommu_notifier(section
->mr
, &giommu
->n
);
556 memory_region_iommu_replay(giommu
->iommu
, &giommu
->n
);
561 /* Here we assume that memory_region_is_ram(section->mr)==true */
563 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
564 section
->offset_within_region
+
565 (iova
- section
->offset_within_address_space
);
567 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
569 llsize
= int128_sub(llend
, int128_make64(iova
));
571 if (memory_region_is_ram_device(section
->mr
)) {
572 hwaddr pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
574 if ((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
)) {
575 trace_vfio_listener_region_add_no_dma_map(
576 memory_region_name(section
->mr
),
577 section
->offset_within_address_space
,
578 int128_getlo(section
->size
),
584 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
585 vaddr
, section
->readonly
);
587 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
588 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
589 container
, iova
, int128_get64(llsize
), vaddr
, ret
);
590 if (memory_region_is_ram_device(section
->mr
)) {
591 /* Allow unexpected mappings not to be fatal for RAM devices */
600 if (memory_region_is_ram_device(section
->mr
)) {
601 error_report("failed to vfio_dma_map. pci p2p may not work");
605 * On the initfn path, store the first error in the container so we
606 * can gracefully fail. Runtime, there's not much we can do other
607 * than throw a hardware error.
609 if (!container
->initialized
) {
610 if (!container
->error
) {
611 container
->error
= ret
;
614 hw_error("vfio: DMA mapping failed, unable to continue");
618 static void vfio_listener_region_del(MemoryListener
*listener
,
619 MemoryRegionSection
*section
)
621 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
623 Int128 llend
, llsize
;
625 bool try_unmap
= true;
627 if (vfio_listener_skipped_section(section
)) {
628 trace_vfio_listener_region_del_skip(
629 section
->offset_within_address_space
,
630 section
->offset_within_address_space
+
631 int128_get64(int128_sub(section
->size
, int128_one())));
635 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
636 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
637 error_report("%s received unaligned region", __func__
);
641 if (memory_region_is_iommu(section
->mr
)) {
642 VFIOGuestIOMMU
*giommu
;
644 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
645 if (MEMORY_REGION(giommu
->iommu
) == section
->mr
&&
646 giommu
->n
.start
== section
->offset_within_region
) {
647 memory_region_unregister_iommu_notifier(section
->mr
,
649 QLIST_REMOVE(giommu
, giommu_next
);
656 * FIXME: We assume the one big unmap below is adequate to
657 * remove any individual page mappings in the IOMMU which
658 * might have been copied into VFIO. This works for a page table
659 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
660 * That may not be true for all IOMMU types.
664 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
665 llend
= int128_make64(section
->offset_within_address_space
);
666 llend
= int128_add(llend
, section
->size
);
667 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
669 if (int128_ge(int128_make64(iova
), llend
)) {
672 end
= int128_get64(int128_sub(llend
, int128_one()));
674 llsize
= int128_sub(llend
, int128_make64(iova
));
676 trace_vfio_listener_region_del(iova
, end
);
678 if (memory_region_is_ram_device(section
->mr
)) {
680 VFIOHostDMAWindow
*hostwin
;
681 bool hostwin_found
= false;
683 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
684 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
685 hostwin_found
= true;
689 assert(hostwin_found
); /* or region_add() would have failed */
691 pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
692 try_unmap
= !((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
));
696 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
));
698 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
699 "0x%"HWADDR_PRIx
") = %d (%m)",
700 container
, iova
, int128_get64(llsize
), ret
);
704 memory_region_unref(section
->mr
);
706 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
707 vfio_spapr_remove_window(container
,
708 section
->offset_within_address_space
);
709 if (vfio_host_win_del(container
,
710 section
->offset_within_address_space
,
711 section
->offset_within_address_space
+
712 int128_get64(section
->size
) - 1) < 0) {
713 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx
,
714 __func__
, section
->offset_within_address_space
);
719 static const MemoryListener vfio_memory_listener
= {
720 .region_add
= vfio_listener_region_add
,
721 .region_del
= vfio_listener_region_del
,
724 static void vfio_listener_release(VFIOContainer
*container
)
726 memory_listener_unregister(&container
->listener
);
727 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
728 memory_listener_unregister(&container
->prereg_listener
);
732 static struct vfio_info_cap_header
*
733 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
735 struct vfio_info_cap_header
*hdr
;
738 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
742 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
751 static int vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
752 struct vfio_region_info
*info
)
754 struct vfio_info_cap_header
*hdr
;
755 struct vfio_region_info_cap_sparse_mmap
*sparse
;
758 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
763 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
765 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
766 region
->nr
, sparse
->nr_areas
);
768 region
->mmaps
= g_new0(VFIOMmap
, sparse
->nr_areas
);
770 for (i
= 0, j
= 0; i
< sparse
->nr_areas
; i
++) {
771 trace_vfio_region_sparse_mmap_entry(i
, sparse
->areas
[i
].offset
,
772 sparse
->areas
[i
].offset
+
773 sparse
->areas
[i
].size
);
775 if (sparse
->areas
[i
].size
) {
776 region
->mmaps
[j
].offset
= sparse
->areas
[i
].offset
;
777 region
->mmaps
[j
].size
= sparse
->areas
[i
].size
;
782 region
->nr_mmaps
= j
;
783 region
->mmaps
= g_realloc(region
->mmaps
, j
* sizeof(VFIOMmap
));
788 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
789 int index
, const char *name
)
791 struct vfio_region_info
*info
;
794 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
799 region
->vbasedev
= vbasedev
;
800 region
->flags
= info
->flags
;
801 region
->size
= info
->size
;
802 region
->fd_offset
= info
->offset
;
806 region
->mem
= g_new0(MemoryRegion
, 1);
807 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
808 region
, name
, region
->size
);
810 if (!vbasedev
->no_mmap
&&
811 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
813 ret
= vfio_setup_region_sparse_mmaps(region
, info
);
816 region
->nr_mmaps
= 1;
817 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
818 region
->mmaps
[0].offset
= 0;
819 region
->mmaps
[0].size
= region
->size
;
826 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
827 region
->flags
, region
->fd_offset
, region
->size
);
831 int vfio_region_mmap(VFIORegion
*region
)
840 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
841 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
843 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
844 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
845 MAP_SHARED
, region
->vbasedev
->fd
,
847 region
->mmaps
[i
].offset
);
848 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
851 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
853 region
->mmaps
[i
].offset
,
855 region
->mmaps
[i
].offset
+
856 region
->mmaps
[i
].size
- 1, ret
);
858 region
->mmaps
[i
].mmap
= NULL
;
860 for (i
--; i
>= 0; i
--) {
861 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
862 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
863 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
864 region
->mmaps
[i
].mmap
= NULL
;
870 name
= g_strdup_printf("%s mmaps[%d]",
871 memory_region_name(region
->mem
), i
);
872 memory_region_init_ram_device_ptr(®ion
->mmaps
[i
].mem
,
873 memory_region_owner(region
->mem
),
874 name
, region
->mmaps
[i
].size
,
875 region
->mmaps
[i
].mmap
);
877 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
878 ®ion
->mmaps
[i
].mem
);
880 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
881 region
->mmaps
[i
].offset
,
882 region
->mmaps
[i
].offset
+
883 region
->mmaps
[i
].size
- 1);
889 void vfio_region_exit(VFIORegion
*region
)
897 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
898 if (region
->mmaps
[i
].mmap
) {
899 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
903 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
906 void vfio_region_finalize(VFIORegion
*region
)
914 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
915 if (region
->mmaps
[i
].mmap
) {
916 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
917 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
921 object_unparent(OBJECT(region
->mem
));
924 g_free(region
->mmaps
);
926 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
929 region
->mmaps
= NULL
;
930 region
->nr_mmaps
= 0;
936 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
944 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
945 if (region
->mmaps
[i
].mmap
) {
946 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
950 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
954 void vfio_reset_handler(void *opaque
)
957 VFIODevice
*vbasedev
;
959 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
960 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
961 if (vbasedev
->dev
->realized
) {
962 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
967 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
968 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
969 if (vbasedev
->dev
->realized
&& vbasedev
->needs_reset
) {
970 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
976 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
979 struct kvm_device_attr attr
= {
980 .group
= KVM_DEV_VFIO_GROUP
,
981 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
982 .addr
= (uint64_t)(unsigned long)&group
->fd
,
985 if (!kvm_enabled()) {
989 if (vfio_kvm_device_fd
< 0) {
990 struct kvm_create_device cd
= {
991 .type
= KVM_DEV_TYPE_VFIO
,
994 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
995 error_report("Failed to create KVM VFIO device: %m");
999 vfio_kvm_device_fd
= cd
.fd
;
1002 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1003 error_report("Failed to add group %d to KVM VFIO device: %m",
1009 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
1012 struct kvm_device_attr attr
= {
1013 .group
= KVM_DEV_VFIO_GROUP
,
1014 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
1015 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1018 if (vfio_kvm_device_fd
< 0) {
1022 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1023 error_report("Failed to remove group %d from KVM VFIO device: %m",
1029 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
1031 VFIOAddressSpace
*space
;
1033 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
1034 if (space
->as
== as
) {
1039 /* No suitable VFIOAddressSpace, create a new one */
1040 space
= g_malloc0(sizeof(*space
));
1042 QLIST_INIT(&space
->containers
);
1044 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
1049 static void vfio_put_address_space(VFIOAddressSpace
*space
)
1051 if (QLIST_EMPTY(&space
->containers
)) {
1052 QLIST_REMOVE(space
, list
);
1057 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
1060 VFIOContainer
*container
;
1062 VFIOAddressSpace
*space
;
1064 space
= vfio_get_address_space(as
);
1067 * VFIO is currently incompatible with memory ballooning insofar as the
1068 * madvise to purge (zap) the page from QEMU's address space does not
1069 * interact with the memory API and therefore leaves stale virtual to
1070 * physical mappings in the IOMMU if the page was previously pinned. We
1071 * therefore add a balloon inhibit for each group added to a container,
1072 * whether the container is used individually or shared. This provides
1073 * us with options to allow devices within a group to opt-in and allow
1074 * ballooning, so long as it is done consistently for a group (for instance
1075 * if the device is an mdev device where it is known that the host vendor
1076 * driver will never pin pages outside of the working set of the guest
1077 * driver, which would thus not be ballooning candidates).
1079 * The first opportunity to induce pinning occurs here where we attempt to
1080 * attach the group to existing containers within the AddressSpace. If any
1081 * pages are already zapped from the virtual address space, such as from a
1082 * previous ballooning opt-in, new pinning will cause valid mappings to be
1083 * re-established. Likewise, when the overall MemoryListener for a new
1084 * container is registered, a replay of mappings within the AddressSpace
1085 * will occur, re-establishing any previously zapped pages as well.
1087 * NB. Balloon inhibiting does not currently block operation of the
1088 * balloon driver or revoke previously pinned pages, it only prevents
1089 * calling madvise to modify the virtual mapping of ballooned pages.
1091 qemu_balloon_inhibit(true);
1093 QLIST_FOREACH(container
, &space
->containers
, next
) {
1094 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
1095 group
->container
= container
;
1096 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1097 vfio_kvm_device_add_group(group
);
1102 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
1104 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
1106 goto put_space_exit
;
1109 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
1110 if (ret
!= VFIO_API_VERSION
) {
1111 error_setg(errp
, "supported vfio version: %d, "
1112 "reported version: %d", VFIO_API_VERSION
, ret
);
1117 container
= g_malloc0(sizeof(*container
));
1118 container
->space
= space
;
1120 QLIST_INIT(&container
->giommu_list
);
1121 QLIST_INIT(&container
->hostwin_list
);
1122 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
) ||
1123 ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1v2_IOMMU
)) {
1124 bool v2
= !!ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1v2_IOMMU
);
1125 struct vfio_iommu_type1_info info
;
1127 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
1129 error_setg_errno(errp
, errno
, "failed to set group container");
1131 goto free_container_exit
;
1134 container
->iommu_type
= v2
? VFIO_TYPE1v2_IOMMU
: VFIO_TYPE1_IOMMU
;
1135 ret
= ioctl(fd
, VFIO_SET_IOMMU
, container
->iommu_type
);
1137 error_setg_errno(errp
, errno
, "failed to set iommu for container");
1139 goto free_container_exit
;
1143 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1144 * IOVA whatsoever. That's not actually true, but the current
1145 * kernel interface doesn't tell us what it can map, and the
1146 * existing Type1 IOMMUs generally support any IOVA we're
1147 * going to actually try in practice.
1149 info
.argsz
= sizeof(info
);
1150 ret
= ioctl(fd
, VFIO_IOMMU_GET_INFO
, &info
);
1152 if (ret
|| !(info
.flags
& VFIO_IOMMU_INFO_PGSIZES
)) {
1153 /* Assume 4k IOVA page size */
1154 info
.iova_pgsizes
= 4096;
1156 vfio_host_win_add(container
, 0, (hwaddr
)-1, info
.iova_pgsizes
);
1157 container
->pgsizes
= info
.iova_pgsizes
;
1158 } else if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_IOMMU
) ||
1159 ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_v2_IOMMU
)) {
1160 struct vfio_iommu_spapr_tce_info info
;
1161 bool v2
= !!ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_v2_IOMMU
);
1163 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
1165 error_setg_errno(errp
, errno
, "failed to set group container");
1167 goto free_container_exit
;
1169 container
->iommu_type
=
1170 v2
? VFIO_SPAPR_TCE_v2_IOMMU
: VFIO_SPAPR_TCE_IOMMU
;
1171 ret
= ioctl(fd
, VFIO_SET_IOMMU
, container
->iommu_type
);
1173 container
->iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
1175 ret
= ioctl(fd
, VFIO_SET_IOMMU
, container
->iommu_type
);
1178 error_setg_errno(errp
, errno
, "failed to set iommu for container");
1180 goto free_container_exit
;
1184 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1185 * when container fd is closed so we do not call it explicitly
1189 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
1191 error_setg_errno(errp
, errno
, "failed to enable container");
1193 goto free_container_exit
;
1196 container
->prereg_listener
= vfio_prereg_listener
;
1198 memory_listener_register(&container
->prereg_listener
,
1199 &address_space_memory
);
1200 if (container
->error
) {
1201 memory_listener_unregister(&container
->prereg_listener
);
1202 ret
= container
->error
;
1204 "RAM memory listener initialization failed for container");
1205 goto free_container_exit
;
1209 info
.argsz
= sizeof(info
);
1210 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
1212 error_setg_errno(errp
, errno
,
1213 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1216 memory_listener_unregister(&container
->prereg_listener
);
1218 goto free_container_exit
;
1222 container
->pgsizes
= info
.ddw
.pgsizes
;
1224 * There is a default window in just created container.
1225 * To make region_add/del simpler, we better remove this
1226 * window now and let those iommu_listener callbacks
1227 * create/remove them when needed.
1229 ret
= vfio_spapr_remove_window(container
, info
.dma32_window_start
);
1231 error_setg_errno(errp
, -ret
,
1232 "failed to remove existing window");
1233 goto free_container_exit
;
1236 /* The default table uses 4K pages */
1237 container
->pgsizes
= 0x1000;
1238 vfio_host_win_add(container
, info
.dma32_window_start
,
1239 info
.dma32_window_start
+
1240 info
.dma32_window_size
- 1,
1244 error_setg(errp
, "No available IOMMU models");
1246 goto free_container_exit
;
1249 vfio_kvm_device_add_group(group
);
1251 QLIST_INIT(&container
->group_list
);
1252 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
1254 group
->container
= container
;
1255 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1257 container
->listener
= vfio_memory_listener
;
1259 memory_listener_register(&container
->listener
, container
->space
->as
);
1261 if (container
->error
) {
1262 ret
= container
->error
;
1263 error_setg_errno(errp
, -ret
,
1264 "memory listener initialization failed for container");
1265 goto listener_release_exit
;
1268 container
->initialized
= true;
1271 listener_release_exit
:
1272 QLIST_REMOVE(group
, container_next
);
1273 QLIST_REMOVE(container
, next
);
1274 vfio_kvm_device_del_group(group
);
1275 vfio_listener_release(container
);
1277 free_container_exit
:
1284 qemu_balloon_inhibit(false);
1285 vfio_put_address_space(space
);
1290 static void vfio_disconnect_container(VFIOGroup
*group
)
1292 VFIOContainer
*container
= group
->container
;
1294 QLIST_REMOVE(group
, container_next
);
1295 group
->container
= NULL
;
1298 * Explicitly release the listener first before unset container,
1299 * since unset may destroy the backend container if it's the last
1302 if (QLIST_EMPTY(&container
->group_list
)) {
1303 vfio_listener_release(container
);
1306 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
1307 error_report("vfio: error disconnecting group %d from container",
1311 if (QLIST_EMPTY(&container
->group_list
)) {
1312 VFIOAddressSpace
*space
= container
->space
;
1313 VFIOGuestIOMMU
*giommu
, *tmp
;
1315 QLIST_REMOVE(container
, next
);
1317 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
1318 memory_region_unregister_iommu_notifier(
1319 MEMORY_REGION(giommu
->iommu
), &giommu
->n
);
1320 QLIST_REMOVE(giommu
, giommu_next
);
1324 trace_vfio_disconnect_container(container
->fd
);
1325 close(container
->fd
);
1328 vfio_put_address_space(space
);
1332 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
1336 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
1338 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1339 if (group
->groupid
== groupid
) {
1340 /* Found it. Now is it already in the right context? */
1341 if (group
->container
->space
->as
== as
) {
1344 error_setg(errp
, "group %d used in multiple address spaces",
1351 group
= g_malloc0(sizeof(*group
));
1353 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
1354 group
->fd
= qemu_open(path
, O_RDWR
);
1355 if (group
->fd
< 0) {
1356 error_setg_errno(errp
, errno
, "failed to open %s", path
);
1357 goto free_group_exit
;
1360 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
1361 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
1365 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
1366 error_setg(errp
, "group %d is not viable", groupid
);
1367 error_append_hint(errp
,
1368 "Please ensure all devices within the iommu_group "
1369 "are bound to their vfio bus driver.\n");
1373 group
->groupid
= groupid
;
1374 QLIST_INIT(&group
->device_list
);
1376 if (vfio_connect_container(group
, as
, errp
)) {
1377 error_prepend(errp
, "failed to setup container for group %d: ",
1382 if (QLIST_EMPTY(&vfio_group_list
)) {
1383 qemu_register_reset(vfio_reset_handler
, NULL
);
1386 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
1399 void vfio_put_group(VFIOGroup
*group
)
1401 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
1405 if (!group
->balloon_allowed
) {
1406 qemu_balloon_inhibit(false);
1408 vfio_kvm_device_del_group(group
);
1409 vfio_disconnect_container(group
);
1410 QLIST_REMOVE(group
, next
);
1411 trace_vfio_put_group(group
->fd
);
1415 if (QLIST_EMPTY(&vfio_group_list
)) {
1416 qemu_unregister_reset(vfio_reset_handler
, NULL
);
1420 int vfio_get_device(VFIOGroup
*group
, const char *name
,
1421 VFIODevice
*vbasedev
, Error
**errp
)
1423 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
1426 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
1428 error_setg_errno(errp
, errno
, "error getting device from group %d",
1430 error_append_hint(errp
,
1431 "Verify all devices in group %d are bound to vfio-<bus> "
1432 "or pci-stub and not already in use\n", group
->groupid
);
1436 ret
= ioctl(fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
1438 error_setg_errno(errp
, errno
, "error getting device info");
1444 * Clear the balloon inhibitor for this group if the driver knows the
1445 * device operates compatibly with ballooning. Setting must be consistent
1446 * per group, but since compatibility is really only possible with mdev
1447 * currently, we expect singleton groups.
1449 if (vbasedev
->balloon_allowed
!= group
->balloon_allowed
) {
1450 if (!QLIST_EMPTY(&group
->device_list
)) {
1452 "Inconsistent device balloon setting within group");
1457 if (!group
->balloon_allowed
) {
1458 group
->balloon_allowed
= true;
1459 qemu_balloon_inhibit(false);
1464 vbasedev
->group
= group
;
1465 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
1467 vbasedev
->num_irqs
= dev_info
.num_irqs
;
1468 vbasedev
->num_regions
= dev_info
.num_regions
;
1469 vbasedev
->flags
= dev_info
.flags
;
1471 trace_vfio_get_device(name
, dev_info
.flags
, dev_info
.num_regions
,
1474 vbasedev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
1478 void vfio_put_base_device(VFIODevice
*vbasedev
)
1480 if (!vbasedev
->group
) {
1483 QLIST_REMOVE(vbasedev
, next
);
1484 vbasedev
->group
= NULL
;
1485 trace_vfio_put_base_device(vbasedev
->fd
);
1486 close(vbasedev
->fd
);
1489 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
1490 struct vfio_region_info
**info
)
1492 size_t argsz
= sizeof(struct vfio_region_info
);
1494 *info
= g_malloc0(argsz
);
1496 (*info
)->index
= index
;
1498 (*info
)->argsz
= argsz
;
1500 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
1506 if ((*info
)->argsz
> argsz
) {
1507 argsz
= (*info
)->argsz
;
1508 *info
= g_realloc(*info
, argsz
);
1516 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
1517 uint32_t subtype
, struct vfio_region_info
**info
)
1521 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
1522 struct vfio_info_cap_header
*hdr
;
1523 struct vfio_region_info_cap_type
*cap_type
;
1525 if (vfio_get_region_info(vbasedev
, i
, info
)) {
1529 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
1535 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
1537 trace_vfio_get_dev_region(vbasedev
->name
, i
,
1538 cap_type
->type
, cap_type
->subtype
);
1540 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
1551 bool vfio_has_region_cap(VFIODevice
*vbasedev
, int region
, uint16_t cap_type
)
1553 struct vfio_region_info
*info
= NULL
;
1556 if (!vfio_get_region_info(vbasedev
, region
, &info
)) {
1557 if (vfio_get_region_info_cap(info
, cap_type
)) {
1567 * Interfaces for IBM EEH (Enhanced Error Handling)
1569 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
1572 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1573 * implementation is broken if there are multiple groups in a
1574 * container. The hardware works in units of Partitionable
1575 * Endpoints (== IOMMU groups) and the EEH operations naively
1576 * iterate across all groups in the container, without any logic
1577 * to make sure the groups have their state synchronized. For
1578 * certain operations (ENABLE) that might be ok, until an error
1579 * occurs, but for others (GET_STATE) it's clearly broken.
1583 * XXX Once fixed kernels exist, test for them here
1586 if (QLIST_EMPTY(&container
->group_list
)) {
1590 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
1597 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
1599 struct vfio_eeh_pe_op pe_op
= {
1600 .argsz
= sizeof(pe_op
),
1605 if (!vfio_eeh_container_ok(container
)) {
1606 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1607 "kernel requires a container with exactly one group", op
);
1611 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
1613 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
1620 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
1622 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
1623 VFIOContainer
*container
= NULL
;
1625 if (QLIST_EMPTY(&space
->containers
)) {
1626 /* No containers to act on */
1630 container
= QLIST_FIRST(&space
->containers
);
1632 if (QLIST_NEXT(container
, next
)) {
1633 /* We don't yet have logic to synchronize EEH state across
1634 * multiple containers */
1640 vfio_put_address_space(space
);
1644 bool vfio_eeh_as_ok(AddressSpace
*as
)
1646 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1648 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
1651 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
1653 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1658 return vfio_eeh_container_op(container
, op
);