2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
24 #include <linux/kvm.h>
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
40 #include "qapi/error.h"
41 #include "migration/migration.h"
43 VFIOGroupList vfio_group_list
=
44 QLIST_HEAD_INITIALIZER(vfio_group_list
);
45 static QLIST_HEAD(, VFIOAddressSpace
) vfio_address_spaces
=
46 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
50 * We have a single VFIO pseudo device per KVM VM. Once created it lives
51 * for the life of the VM. Closing the file descriptor only drops our
52 * reference to it and the device's reference to kvm. Therefore once
53 * initialized, this file descriptor is only released on QEMU exit and
54 * we'll re-use it should another vfio device be attached before then.
56 static int vfio_kvm_device_fd
= -1;
60 * Common VFIO interrupt disable
62 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
64 struct vfio_irq_set irq_set
= {
65 .argsz
= sizeof(irq_set
),
66 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
72 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
75 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
77 struct vfio_irq_set irq_set
= {
78 .argsz
= sizeof(irq_set
),
79 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
85 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
88 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
90 struct vfio_irq_set irq_set
= {
91 .argsz
= sizeof(irq_set
),
92 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
98 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
101 static inline const char *action_to_str(int action
)
104 case VFIO_IRQ_SET_ACTION_MASK
:
106 case VFIO_IRQ_SET_ACTION_UNMASK
:
108 case VFIO_IRQ_SET_ACTION_TRIGGER
:
111 return "UNKNOWN ACTION";
115 static const char *index_to_str(VFIODevice
*vbasedev
, int index
)
117 if (vbasedev
->type
!= VFIO_DEVICE_TYPE_PCI
) {
122 case VFIO_PCI_INTX_IRQ_INDEX
:
124 case VFIO_PCI_MSI_IRQ_INDEX
:
126 case VFIO_PCI_MSIX_IRQ_INDEX
:
128 case VFIO_PCI_ERR_IRQ_INDEX
:
130 case VFIO_PCI_REQ_IRQ_INDEX
:
137 int vfio_set_irq_signaling(VFIODevice
*vbasedev
, int index
, int subindex
,
138 int action
, int fd
, Error
**errp
)
140 struct vfio_irq_set
*irq_set
;
145 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
147 irq_set
= g_malloc0(argsz
);
148 irq_set
->argsz
= argsz
;
149 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| action
;
150 irq_set
->index
= index
;
151 irq_set
->start
= subindex
;
153 pfd
= (int32_t *)&irq_set
->data
;
156 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
)) {
165 error_setg_errno(errp
, -ret
, "VFIO_DEVICE_SET_IRQS failure");
167 name
= index_to_str(vbasedev
, index
);
169 error_prepend(errp
, "%s-%d: ", name
, subindex
);
171 error_prepend(errp
, "index %d-%d: ", index
, subindex
);
174 "Failed to %s %s eventfd signaling for interrupt ",
175 fd
< 0 ? "tear down" : "set up", action_to_str(action
));
180 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
182 void vfio_region_write(void *opaque
, hwaddr addr
,
183 uint64_t data
, unsigned size
)
185 VFIORegion
*region
= opaque
;
186 VFIODevice
*vbasedev
= region
->vbasedev
;
199 buf
.word
= cpu_to_le16(data
);
202 buf
.dword
= cpu_to_le32(data
);
205 buf
.qword
= cpu_to_le64(data
);
208 hw_error("vfio: unsupported write size, %d bytes", size
);
212 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
213 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
215 __func__
, vbasedev
->name
, region
->nr
,
219 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
222 * A read or write to a BAR always signals an INTx EOI. This will
223 * do nothing if not pending (including not in INTx mode). We assume
224 * that a BAR access is in response to an interrupt and that BAR
225 * accesses will service the interrupt. Unfortunately, we don't know
226 * which access will service the interrupt, so we're potentially
227 * getting quite a few host interrupts per guest interrupt.
229 vbasedev
->ops
->vfio_eoi(vbasedev
);
232 uint64_t vfio_region_read(void *opaque
,
233 hwaddr addr
, unsigned size
)
235 VFIORegion
*region
= opaque
;
236 VFIODevice
*vbasedev
= region
->vbasedev
;
245 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
246 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
247 __func__
, vbasedev
->name
, region
->nr
,
256 data
= le16_to_cpu(buf
.word
);
259 data
= le32_to_cpu(buf
.dword
);
262 data
= le64_to_cpu(buf
.qword
);
265 hw_error("vfio: unsupported read size, %d bytes", size
);
269 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
271 /* Same as write above */
272 vbasedev
->ops
->vfio_eoi(vbasedev
);
277 const MemoryRegionOps vfio_region_ops
= {
278 .read
= vfio_region_read
,
279 .write
= vfio_region_write
,
280 .endianness
= DEVICE_LITTLE_ENDIAN
,
282 .min_access_size
= 1,
283 .max_access_size
= 8,
286 .min_access_size
= 1,
287 .max_access_size
= 8,
292 * Device state interfaces
295 static bool vfio_devices_all_stopped_and_saving(VFIOContainer
*container
)
298 VFIODevice
*vbasedev
;
299 MigrationState
*ms
= migrate_get_current();
301 if (!migration_is_setup_or_active(ms
->state
)) {
305 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
306 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
307 VFIOMigration
*migration
= vbasedev
->migration
;
313 if ((migration
->device_state
& VFIO_DEVICE_STATE_SAVING
) &&
314 !(migration
->device_state
& VFIO_DEVICE_STATE_RUNNING
)) {
325 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
327 static int vfio_dma_unmap(VFIOContainer
*container
,
328 hwaddr iova
, ram_addr_t size
)
330 struct vfio_iommu_type1_dma_unmap unmap
= {
331 .argsz
= sizeof(unmap
),
337 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
339 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
340 * v4.15) where an overflow in its wrap-around check prevents us from
341 * unmapping the last page of the address space. Test for the error
342 * condition and re-try the unmap excluding the last page. The
343 * expectation is that we've never mapped the last page anyway and this
344 * unmap request comes via vIOMMU support which also makes it unlikely
345 * that this page is used. This bug was introduced well after type1 v2
346 * support was introduced, so we shouldn't need to test for v1. A fix
347 * is queued for kernel v5.0 so this workaround can be removed once
348 * affected kernels are sufficiently deprecated.
350 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
351 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
352 trace_vfio_dma_unmap_overflow_workaround();
353 unmap
.size
-= 1ULL << ctz64(container
->pgsizes
);
356 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
363 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
364 ram_addr_t size
, void *vaddr
, bool readonly
)
366 struct vfio_iommu_type1_dma_map map
= {
367 .argsz
= sizeof(map
),
368 .flags
= VFIO_DMA_MAP_FLAG_READ
,
369 .vaddr
= (__u64
)(uintptr_t)vaddr
,
375 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
379 * Try the mapping, if it fails with EBUSY, unmap the region and try
380 * again. This shouldn't be necessary, but we sometimes see it in
383 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
384 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
385 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
389 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
393 static void vfio_host_win_add(VFIOContainer
*container
,
394 hwaddr min_iova
, hwaddr max_iova
,
395 uint64_t iova_pgsizes
)
397 VFIOHostDMAWindow
*hostwin
;
399 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
400 if (ranges_overlap(hostwin
->min_iova
,
401 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
403 max_iova
- min_iova
+ 1)) {
404 hw_error("%s: Overlapped IOMMU are not enabled", __func__
);
408 hostwin
= g_malloc0(sizeof(*hostwin
));
410 hostwin
->min_iova
= min_iova
;
411 hostwin
->max_iova
= max_iova
;
412 hostwin
->iova_pgsizes
= iova_pgsizes
;
413 QLIST_INSERT_HEAD(&container
->hostwin_list
, hostwin
, hostwin_next
);
416 static int vfio_host_win_del(VFIOContainer
*container
, hwaddr min_iova
,
419 VFIOHostDMAWindow
*hostwin
;
421 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
422 if (hostwin
->min_iova
== min_iova
&& hostwin
->max_iova
== max_iova
) {
423 QLIST_REMOVE(hostwin
, hostwin_next
);
431 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
433 return (!memory_region_is_ram(section
->mr
) &&
434 !memory_region_is_iommu(section
->mr
)) ||
436 * Sizing an enabled 64-bit BAR can cause spurious mappings to
437 * addresses in the upper part of the 64-bit address space. These
438 * are never accessed by the CPU and beyond the address width of
439 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
441 section
->offset_within_address_space
& (1ULL << 63);
444 /* Called with rcu_read_lock held. */
445 static bool vfio_get_vaddr(IOMMUTLBEntry
*iotlb
, void **vaddr
,
450 hwaddr len
= iotlb
->addr_mask
+ 1;
451 bool writable
= iotlb
->perm
& IOMMU_WO
;
454 * The IOMMU TLB entry we have just covers translation through
455 * this IOMMU to its immediate target. We need to translate
456 * it the rest of the way through to memory.
458 mr
= address_space_translate(&address_space_memory
,
459 iotlb
->translated_addr
,
460 &xlat
, &len
, writable
,
461 MEMTXATTRS_UNSPECIFIED
);
462 if (!memory_region_is_ram(mr
)) {
463 error_report("iommu map to non memory area %"HWADDR_PRIx
"",
469 * Translation truncates length to the IOMMU page size,
470 * check that it did not truncate too much.
472 if (len
& iotlb
->addr_mask
) {
473 error_report("iommu has granularity incompatible with target AS");
477 *vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
478 *read_only
= !writable
|| mr
->readonly
;
483 static void vfio_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
485 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
486 VFIOContainer
*container
= giommu
->container
;
487 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
492 trace_vfio_iommu_map_notify(iotlb
->perm
== IOMMU_NONE
? "UNMAP" : "MAP",
493 iova
, iova
+ iotlb
->addr_mask
);
495 if (iotlb
->target_as
!= &address_space_memory
) {
496 error_report("Wrong target AS \"%s\", only system memory is allowed",
497 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
503 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
504 if (!vfio_get_vaddr(iotlb
, &vaddr
, &read_only
)) {
508 * vaddr is only valid until rcu_read_unlock(). But after
509 * vfio_dma_map has set up the mapping the pages will be
510 * pinned by the kernel. This makes sure that the RAM backend
511 * of vaddr will always be there, even if the memory object is
512 * destroyed and its backing memory munmap-ed.
514 ret
= vfio_dma_map(container
, iova
,
515 iotlb
->addr_mask
+ 1, vaddr
,
518 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
519 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
521 iotlb
->addr_mask
+ 1, vaddr
, ret
);
524 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1);
526 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
527 "0x%"HWADDR_PRIx
") = %d (%m)",
529 iotlb
->addr_mask
+ 1, ret
);
536 static void vfio_listener_region_add(MemoryListener
*listener
,
537 MemoryRegionSection
*section
)
539 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
541 Int128 llend
, llsize
;
544 VFIOHostDMAWindow
*hostwin
;
548 if (vfio_listener_skipped_section(section
)) {
549 trace_vfio_listener_region_add_skip(
550 section
->offset_within_address_space
,
551 section
->offset_within_address_space
+
552 int128_get64(int128_sub(section
->size
, int128_one())));
556 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
557 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
558 error_report("%s received unaligned region", __func__
);
562 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
563 llend
= int128_make64(section
->offset_within_address_space
);
564 llend
= int128_add(llend
, section
->size
);
565 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
567 if (int128_ge(int128_make64(iova
), llend
)) {
570 end
= int128_get64(int128_sub(llend
, int128_one()));
572 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
575 /* For now intersections are not allowed, we may relax this later */
576 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
577 if (ranges_overlap(hostwin
->min_iova
,
578 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
579 section
->offset_within_address_space
,
580 int128_get64(section
->size
))) {
582 "region [0x%"PRIx64
",0x%"PRIx64
"] overlaps with existing"
583 "host DMA window [0x%"PRIx64
",0x%"PRIx64
"]",
584 section
->offset_within_address_space
,
585 section
->offset_within_address_space
+
586 int128_get64(section
->size
) - 1,
587 hostwin
->min_iova
, hostwin
->max_iova
);
592 ret
= vfio_spapr_create_window(container
, section
, &pgsize
);
594 error_setg_errno(&err
, -ret
, "Failed to create SPAPR window");
598 vfio_host_win_add(container
, section
->offset_within_address_space
,
599 section
->offset_within_address_space
+
600 int128_get64(section
->size
) - 1, pgsize
);
604 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
605 struct kvm_vfio_spapr_tce param
;
606 struct kvm_device_attr attr
= {
607 .group
= KVM_DEV_VFIO_GROUP
,
608 .attr
= KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE
,
609 .addr
= (uint64_t)(unsigned long)¶m
,
612 if (!memory_region_iommu_get_attr(iommu_mr
, IOMMU_ATTR_SPAPR_TCE_FD
,
614 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
615 param
.groupfd
= group
->fd
;
616 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
617 error_report("vfio: failed to setup fd %d "
618 "for a group with fd %d: %s",
619 param
.tablefd
, param
.groupfd
,
623 trace_vfio_spapr_group_attach(param
.groupfd
, param
.tablefd
);
630 hostwin_found
= false;
631 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
632 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
633 hostwin_found
= true;
638 if (!hostwin_found
) {
639 error_setg(&err
, "Container %p can't map guest IOVA region"
640 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
, container
, iova
, end
);
644 memory_region_ref(section
->mr
);
646 if (memory_region_is_iommu(section
->mr
)) {
647 VFIOGuestIOMMU
*giommu
;
648 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
651 trace_vfio_listener_region_add_iommu(iova
, end
);
653 * FIXME: For VFIO iommu types which have KVM acceleration to
654 * avoid bouncing all map/unmaps through qemu this way, this
655 * would be the right place to wire that up (tell the KVM
656 * device emulation the VFIO iommu handles to use).
658 giommu
= g_malloc0(sizeof(*giommu
));
659 giommu
->iommu
= iommu_mr
;
660 giommu
->iommu_offset
= section
->offset_within_address_space
-
661 section
->offset_within_region
;
662 giommu
->container
= container
;
663 llend
= int128_add(int128_make64(section
->offset_within_region
),
665 llend
= int128_sub(llend
, int128_one());
666 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
667 MEMTXATTRS_UNSPECIFIED
);
668 iommu_notifier_init(&giommu
->n
, vfio_iommu_map_notify
,
670 section
->offset_within_region
,
674 ret
= memory_region_register_iommu_notifier(section
->mr
, &giommu
->n
,
680 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
681 memory_region_iommu_replay(giommu
->iommu
, &giommu
->n
);
686 /* Here we assume that memory_region_is_ram(section->mr)==true */
688 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
689 section
->offset_within_region
+
690 (iova
- section
->offset_within_address_space
);
692 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
694 llsize
= int128_sub(llend
, int128_make64(iova
));
696 if (memory_region_is_ram_device(section
->mr
)) {
697 hwaddr pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
699 if ((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
)) {
700 trace_vfio_listener_region_add_no_dma_map(
701 memory_region_name(section
->mr
),
702 section
->offset_within_address_space
,
703 int128_getlo(section
->size
),
709 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
710 vaddr
, section
->readonly
);
712 error_setg(&err
, "vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
713 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
714 container
, iova
, int128_get64(llsize
), vaddr
, ret
);
715 if (memory_region_is_ram_device(section
->mr
)) {
716 /* Allow unexpected mappings not to be fatal for RAM devices */
717 error_report_err(err
);
726 if (memory_region_is_ram_device(section
->mr
)) {
727 error_report("failed to vfio_dma_map. pci p2p may not work");
731 * On the initfn path, store the first error in the container so we
732 * can gracefully fail. Runtime, there's not much we can do other
733 * than throw a hardware error.
735 if (!container
->initialized
) {
736 if (!container
->error
) {
737 error_propagate_prepend(&container
->error
, err
,
739 memory_region_name(section
->mr
));
744 error_report_err(err
);
745 hw_error("vfio: DMA mapping failed, unable to continue");
749 static void vfio_listener_region_del(MemoryListener
*listener
,
750 MemoryRegionSection
*section
)
752 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
754 Int128 llend
, llsize
;
756 bool try_unmap
= true;
758 if (vfio_listener_skipped_section(section
)) {
759 trace_vfio_listener_region_del_skip(
760 section
->offset_within_address_space
,
761 section
->offset_within_address_space
+
762 int128_get64(int128_sub(section
->size
, int128_one())));
766 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
767 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
768 error_report("%s received unaligned region", __func__
);
772 if (memory_region_is_iommu(section
->mr
)) {
773 VFIOGuestIOMMU
*giommu
;
775 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
776 if (MEMORY_REGION(giommu
->iommu
) == section
->mr
&&
777 giommu
->n
.start
== section
->offset_within_region
) {
778 memory_region_unregister_iommu_notifier(section
->mr
,
780 QLIST_REMOVE(giommu
, giommu_next
);
787 * FIXME: We assume the one big unmap below is adequate to
788 * remove any individual page mappings in the IOMMU which
789 * might have been copied into VFIO. This works for a page table
790 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
791 * That may not be true for all IOMMU types.
795 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
796 llend
= int128_make64(section
->offset_within_address_space
);
797 llend
= int128_add(llend
, section
->size
);
798 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
800 if (int128_ge(int128_make64(iova
), llend
)) {
803 end
= int128_get64(int128_sub(llend
, int128_one()));
805 llsize
= int128_sub(llend
, int128_make64(iova
));
807 trace_vfio_listener_region_del(iova
, end
);
809 if (memory_region_is_ram_device(section
->mr
)) {
811 VFIOHostDMAWindow
*hostwin
;
812 bool hostwin_found
= false;
814 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
815 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
816 hostwin_found
= true;
820 assert(hostwin_found
); /* or region_add() would have failed */
822 pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
823 try_unmap
= !((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
));
827 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
));
829 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
830 "0x%"HWADDR_PRIx
") = %d (%m)",
831 container
, iova
, int128_get64(llsize
), ret
);
835 memory_region_unref(section
->mr
);
837 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
838 vfio_spapr_remove_window(container
,
839 section
->offset_within_address_space
);
840 if (vfio_host_win_del(container
,
841 section
->offset_within_address_space
,
842 section
->offset_within_address_space
+
843 int128_get64(section
->size
) - 1) < 0) {
844 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx
,
845 __func__
, section
->offset_within_address_space
);
850 static int vfio_get_dirty_bitmap(VFIOContainer
*container
, uint64_t iova
,
851 uint64_t size
, ram_addr_t ram_addr
)
853 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
854 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
858 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
860 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
861 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
862 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
867 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
868 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to
871 range
->bitmap
.pgsize
= TARGET_PAGE_SIZE
;
873 pages
= TARGET_PAGE_ALIGN(range
->size
) >> TARGET_PAGE_BITS
;
874 range
->bitmap
.size
= ROUND_UP(pages
, sizeof(__u64
) * BITS_PER_BYTE
) /
876 range
->bitmap
.data
= g_try_malloc0(range
->bitmap
.size
);
877 if (!range
->bitmap
.data
) {
882 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
884 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
885 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
886 (uint64_t)range
->size
, errno
);
890 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range
->bitmap
.data
,
893 trace_vfio_get_dirty_bitmap(container
->fd
, range
->iova
, range
->size
,
894 range
->bitmap
.size
, ram_addr
);
896 g_free(range
->bitmap
.data
);
902 static int vfio_sync_dirty_bitmap(VFIOContainer
*container
,
903 MemoryRegionSection
*section
)
907 ram_addr
= memory_region_get_ram_addr(section
->mr
) +
908 section
->offset_within_region
;
910 return vfio_get_dirty_bitmap(container
,
911 TARGET_PAGE_ALIGN(section
->offset_within_address_space
),
912 int128_get64(section
->size
), ram_addr
);
915 static void vfio_listerner_log_sync(MemoryListener
*listener
,
916 MemoryRegionSection
*section
)
918 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
920 if (vfio_listener_skipped_section(section
) ||
921 !container
->dirty_pages_supported
) {
925 if (vfio_devices_all_stopped_and_saving(container
)) {
926 vfio_sync_dirty_bitmap(container
, section
);
930 static const MemoryListener vfio_memory_listener
= {
931 .region_add
= vfio_listener_region_add
,
932 .region_del
= vfio_listener_region_del
,
933 .log_sync
= vfio_listerner_log_sync
,
936 static void vfio_listener_release(VFIOContainer
*container
)
938 memory_listener_unregister(&container
->listener
);
939 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
940 memory_listener_unregister(&container
->prereg_listener
);
944 struct vfio_info_cap_header
*
945 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
947 struct vfio_info_cap_header
*hdr
;
950 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
954 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
963 static int vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
964 struct vfio_region_info
*info
)
966 struct vfio_info_cap_header
*hdr
;
967 struct vfio_region_info_cap_sparse_mmap
*sparse
;
970 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
975 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
977 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
978 region
->nr
, sparse
->nr_areas
);
980 region
->mmaps
= g_new0(VFIOMmap
, sparse
->nr_areas
);
982 for (i
= 0, j
= 0; i
< sparse
->nr_areas
; i
++) {
983 trace_vfio_region_sparse_mmap_entry(i
, sparse
->areas
[i
].offset
,
984 sparse
->areas
[i
].offset
+
985 sparse
->areas
[i
].size
);
987 if (sparse
->areas
[i
].size
) {
988 region
->mmaps
[j
].offset
= sparse
->areas
[i
].offset
;
989 region
->mmaps
[j
].size
= sparse
->areas
[i
].size
;
994 region
->nr_mmaps
= j
;
995 region
->mmaps
= g_realloc(region
->mmaps
, j
* sizeof(VFIOMmap
));
1000 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
1001 int index
, const char *name
)
1003 struct vfio_region_info
*info
;
1006 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
1011 region
->vbasedev
= vbasedev
;
1012 region
->flags
= info
->flags
;
1013 region
->size
= info
->size
;
1014 region
->fd_offset
= info
->offset
;
1018 region
->mem
= g_new0(MemoryRegion
, 1);
1019 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
1020 region
, name
, region
->size
);
1022 if (!vbasedev
->no_mmap
&&
1023 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
1025 ret
= vfio_setup_region_sparse_mmaps(region
, info
);
1028 region
->nr_mmaps
= 1;
1029 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
1030 region
->mmaps
[0].offset
= 0;
1031 region
->mmaps
[0].size
= region
->size
;
1038 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
1039 region
->flags
, region
->fd_offset
, region
->size
);
1043 static void vfio_subregion_unmap(VFIORegion
*region
, int index
)
1045 trace_vfio_region_unmap(memory_region_name(®ion
->mmaps
[index
].mem
),
1046 region
->mmaps
[index
].offset
,
1047 region
->mmaps
[index
].offset
+
1048 region
->mmaps
[index
].size
- 1);
1049 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[index
].mem
);
1050 munmap(region
->mmaps
[index
].mmap
, region
->mmaps
[index
].size
);
1051 object_unparent(OBJECT(®ion
->mmaps
[index
].mem
));
1052 region
->mmaps
[index
].mmap
= NULL
;
1055 int vfio_region_mmap(VFIORegion
*region
)
1064 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
1065 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
1067 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1068 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
1069 MAP_SHARED
, region
->vbasedev
->fd
,
1071 region
->mmaps
[i
].offset
);
1072 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
1075 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
1077 region
->mmaps
[i
].offset
,
1079 region
->mmaps
[i
].offset
+
1080 region
->mmaps
[i
].size
- 1, ret
);
1082 region
->mmaps
[i
].mmap
= NULL
;
1084 for (i
--; i
>= 0; i
--) {
1085 vfio_subregion_unmap(region
, i
);
1091 name
= g_strdup_printf("%s mmaps[%d]",
1092 memory_region_name(region
->mem
), i
);
1093 memory_region_init_ram_device_ptr(®ion
->mmaps
[i
].mem
,
1094 memory_region_owner(region
->mem
),
1095 name
, region
->mmaps
[i
].size
,
1096 region
->mmaps
[i
].mmap
);
1098 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
1099 ®ion
->mmaps
[i
].mem
);
1101 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
1102 region
->mmaps
[i
].offset
,
1103 region
->mmaps
[i
].offset
+
1104 region
->mmaps
[i
].size
- 1);
1110 void vfio_region_unmap(VFIORegion
*region
)
1118 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1119 if (region
->mmaps
[i
].mmap
) {
1120 vfio_subregion_unmap(region
, i
);
1125 void vfio_region_exit(VFIORegion
*region
)
1133 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1134 if (region
->mmaps
[i
].mmap
) {
1135 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
1139 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
1142 void vfio_region_finalize(VFIORegion
*region
)
1150 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1151 if (region
->mmaps
[i
].mmap
) {
1152 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
1153 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
1157 object_unparent(OBJECT(region
->mem
));
1159 g_free(region
->mem
);
1160 g_free(region
->mmaps
);
1162 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
1165 region
->mmaps
= NULL
;
1166 region
->nr_mmaps
= 0;
1172 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
1180 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1181 if (region
->mmaps
[i
].mmap
) {
1182 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
1186 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
1190 void vfio_reset_handler(void *opaque
)
1193 VFIODevice
*vbasedev
;
1195 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1196 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1197 if (vbasedev
->dev
->realized
) {
1198 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
1203 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1204 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1205 if (vbasedev
->dev
->realized
&& vbasedev
->needs_reset
) {
1206 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
1212 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
1215 struct kvm_device_attr attr
= {
1216 .group
= KVM_DEV_VFIO_GROUP
,
1217 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
1218 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1221 if (!kvm_enabled()) {
1225 if (vfio_kvm_device_fd
< 0) {
1226 struct kvm_create_device cd
= {
1227 .type
= KVM_DEV_TYPE_VFIO
,
1230 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
1231 error_report("Failed to create KVM VFIO device: %m");
1235 vfio_kvm_device_fd
= cd
.fd
;
1238 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1239 error_report("Failed to add group %d to KVM VFIO device: %m",
1245 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
1248 struct kvm_device_attr attr
= {
1249 .group
= KVM_DEV_VFIO_GROUP
,
1250 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
1251 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1254 if (vfio_kvm_device_fd
< 0) {
1258 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1259 error_report("Failed to remove group %d from KVM VFIO device: %m",
1265 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
1267 VFIOAddressSpace
*space
;
1269 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
1270 if (space
->as
== as
) {
1275 /* No suitable VFIOAddressSpace, create a new one */
1276 space
= g_malloc0(sizeof(*space
));
1278 QLIST_INIT(&space
->containers
);
1280 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
1285 static void vfio_put_address_space(VFIOAddressSpace
*space
)
1287 if (QLIST_EMPTY(&space
->containers
)) {
1288 QLIST_REMOVE(space
, list
);
1294 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1296 static int vfio_get_iommu_type(VFIOContainer
*container
,
1299 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
1300 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
1303 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
1304 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
1305 return iommu_types
[i
];
1308 error_setg(errp
, "No available IOMMU models");
1312 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
1315 int iommu_type
, ret
;
1317 iommu_type
= vfio_get_iommu_type(container
, errp
);
1318 if (iommu_type
< 0) {
1322 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
1324 error_setg_errno(errp
, errno
, "Failed to set group container");
1328 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
1329 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1331 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1332 * v2, the running platform may not support v2 and there is no
1333 * way to guess it until an IOMMU group gets added to the container.
1334 * So in case it fails with v2, try v1 as a fallback.
1336 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
1339 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
1343 container
->iommu_type
= iommu_type
;
1347 static int vfio_get_iommu_info(VFIOContainer
*container
,
1348 struct vfio_iommu_type1_info
**info
)
1351 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
1353 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
1355 (*info
)->argsz
= argsz
;
1357 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
1363 if (((*info
)->argsz
> argsz
)) {
1364 argsz
= (*info
)->argsz
;
1365 *info
= g_realloc(*info
, argsz
);
1372 static struct vfio_info_cap_header
*
1373 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
1375 struct vfio_info_cap_header
*hdr
;
1378 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
1382 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
1383 if (hdr
->id
== id
) {
1391 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
1392 struct vfio_iommu_type1_info
*info
)
1394 struct vfio_info_cap_header
*hdr
;
1395 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
1397 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
1402 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
1406 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
1407 * TARGET_PAGE_SIZE to mark those dirty.
1409 if (cap_mig
->pgsize_bitmap
& TARGET_PAGE_SIZE
) {
1410 container
->dirty_pages_supported
= true;
1411 container
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
1412 container
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
1416 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
1419 VFIOContainer
*container
;
1421 VFIOAddressSpace
*space
;
1423 space
= vfio_get_address_space(as
);
1426 * VFIO is currently incompatible with discarding of RAM insofar as the
1427 * madvise to purge (zap) the page from QEMU's address space does not
1428 * interact with the memory API and therefore leaves stale virtual to
1429 * physical mappings in the IOMMU if the page was previously pinned. We
1430 * therefore set discarding broken for each group added to a container,
1431 * whether the container is used individually or shared. This provides
1432 * us with options to allow devices within a group to opt-in and allow
1433 * discarding, so long as it is done consistently for a group (for instance
1434 * if the device is an mdev device where it is known that the host vendor
1435 * driver will never pin pages outside of the working set of the guest
1436 * driver, which would thus not be discarding candidates).
1438 * The first opportunity to induce pinning occurs here where we attempt to
1439 * attach the group to existing containers within the AddressSpace. If any
1440 * pages are already zapped from the virtual address space, such as from
1441 * previous discards, new pinning will cause valid mappings to be
1442 * re-established. Likewise, when the overall MemoryListener for a new
1443 * container is registered, a replay of mappings within the AddressSpace
1444 * will occur, re-establishing any previously zapped pages as well.
1446 * Especially virtio-balloon is currently only prevented from discarding
1447 * new memory, it will not yet set ram_block_discard_set_required() and
1448 * therefore, neither stops us here or deals with the sudden memory
1449 * consumption of inflated memory.
1451 ret
= ram_block_discard_disable(true);
1453 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
1457 QLIST_FOREACH(container
, &space
->containers
, next
) {
1458 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
1459 group
->container
= container
;
1460 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1461 vfio_kvm_device_add_group(group
);
1466 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
1468 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
1470 goto put_space_exit
;
1473 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
1474 if (ret
!= VFIO_API_VERSION
) {
1475 error_setg(errp
, "supported vfio version: %d, "
1476 "reported version: %d", VFIO_API_VERSION
, ret
);
1481 container
= g_malloc0(sizeof(*container
));
1482 container
->space
= space
;
1484 container
->error
= NULL
;
1485 container
->dirty_pages_supported
= false;
1486 QLIST_INIT(&container
->giommu_list
);
1487 QLIST_INIT(&container
->hostwin_list
);
1489 ret
= vfio_init_container(container
, group
->fd
, errp
);
1491 goto free_container_exit
;
1494 switch (container
->iommu_type
) {
1495 case VFIO_TYPE1v2_IOMMU
:
1496 case VFIO_TYPE1_IOMMU
:
1498 struct vfio_iommu_type1_info
*info
;
1501 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1502 * IOVA whatsoever. That's not actually true, but the current
1503 * kernel interface doesn't tell us what it can map, and the
1504 * existing Type1 IOMMUs generally support any IOVA we're
1505 * going to actually try in practice.
1507 ret
= vfio_get_iommu_info(container
, &info
);
1509 if (ret
|| !(info
->flags
& VFIO_IOMMU_INFO_PGSIZES
)) {
1510 /* Assume 4k IOVA page size */
1511 info
->iova_pgsizes
= 4096;
1513 vfio_host_win_add(container
, 0, (hwaddr
)-1, info
->iova_pgsizes
);
1514 container
->pgsizes
= info
->iova_pgsizes
;
1517 vfio_get_iommu_info_migration(container
, info
);
1522 case VFIO_SPAPR_TCE_v2_IOMMU
:
1523 case VFIO_SPAPR_TCE_IOMMU
:
1525 struct vfio_iommu_spapr_tce_info info
;
1526 bool v2
= container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
;
1529 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1530 * when container fd is closed so we do not call it explicitly
1534 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
1536 error_setg_errno(errp
, errno
, "failed to enable container");
1538 goto free_container_exit
;
1541 container
->prereg_listener
= vfio_prereg_listener
;
1543 memory_listener_register(&container
->prereg_listener
,
1544 &address_space_memory
);
1545 if (container
->error
) {
1546 memory_listener_unregister(&container
->prereg_listener
);
1548 error_propagate_prepend(errp
, container
->error
,
1549 "RAM memory listener initialization failed: ");
1550 goto free_container_exit
;
1554 info
.argsz
= sizeof(info
);
1555 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
1557 error_setg_errno(errp
, errno
,
1558 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1561 memory_listener_unregister(&container
->prereg_listener
);
1563 goto free_container_exit
;
1567 container
->pgsizes
= info
.ddw
.pgsizes
;
1569 * There is a default window in just created container.
1570 * To make region_add/del simpler, we better remove this
1571 * window now and let those iommu_listener callbacks
1572 * create/remove them when needed.
1574 ret
= vfio_spapr_remove_window(container
, info
.dma32_window_start
);
1576 error_setg_errno(errp
, -ret
,
1577 "failed to remove existing window");
1578 goto free_container_exit
;
1581 /* The default table uses 4K pages */
1582 container
->pgsizes
= 0x1000;
1583 vfio_host_win_add(container
, info
.dma32_window_start
,
1584 info
.dma32_window_start
+
1585 info
.dma32_window_size
- 1,
1591 vfio_kvm_device_add_group(group
);
1593 QLIST_INIT(&container
->group_list
);
1594 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
1596 group
->container
= container
;
1597 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1599 container
->listener
= vfio_memory_listener
;
1601 memory_listener_register(&container
->listener
, container
->space
->as
);
1603 if (container
->error
) {
1605 error_propagate_prepend(errp
, container
->error
,
1606 "memory listener initialization failed: ");
1607 goto listener_release_exit
;
1610 container
->initialized
= true;
1613 listener_release_exit
:
1614 QLIST_REMOVE(group
, container_next
);
1615 QLIST_REMOVE(container
, next
);
1616 vfio_kvm_device_del_group(group
);
1617 vfio_listener_release(container
);
1619 free_container_exit
:
1626 ram_block_discard_disable(false);
1627 vfio_put_address_space(space
);
1632 static void vfio_disconnect_container(VFIOGroup
*group
)
1634 VFIOContainer
*container
= group
->container
;
1636 QLIST_REMOVE(group
, container_next
);
1637 group
->container
= NULL
;
1640 * Explicitly release the listener first before unset container,
1641 * since unset may destroy the backend container if it's the last
1644 if (QLIST_EMPTY(&container
->group_list
)) {
1645 vfio_listener_release(container
);
1648 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
1649 error_report("vfio: error disconnecting group %d from container",
1653 if (QLIST_EMPTY(&container
->group_list
)) {
1654 VFIOAddressSpace
*space
= container
->space
;
1655 VFIOGuestIOMMU
*giommu
, *tmp
;
1657 QLIST_REMOVE(container
, next
);
1659 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
1660 memory_region_unregister_iommu_notifier(
1661 MEMORY_REGION(giommu
->iommu
), &giommu
->n
);
1662 QLIST_REMOVE(giommu
, giommu_next
);
1666 trace_vfio_disconnect_container(container
->fd
);
1667 close(container
->fd
);
1670 vfio_put_address_space(space
);
1674 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
1678 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
1680 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1681 if (group
->groupid
== groupid
) {
1682 /* Found it. Now is it already in the right context? */
1683 if (group
->container
->space
->as
== as
) {
1686 error_setg(errp
, "group %d used in multiple address spaces",
1693 group
= g_malloc0(sizeof(*group
));
1695 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
1696 group
->fd
= qemu_open_old(path
, O_RDWR
);
1697 if (group
->fd
< 0) {
1698 error_setg_errno(errp
, errno
, "failed to open %s", path
);
1699 goto free_group_exit
;
1702 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
1703 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
1707 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
1708 error_setg(errp
, "group %d is not viable", groupid
);
1709 error_append_hint(errp
,
1710 "Please ensure all devices within the iommu_group "
1711 "are bound to their vfio bus driver.\n");
1715 group
->groupid
= groupid
;
1716 QLIST_INIT(&group
->device_list
);
1718 if (vfio_connect_container(group
, as
, errp
)) {
1719 error_prepend(errp
, "failed to setup container for group %d: ",
1724 if (QLIST_EMPTY(&vfio_group_list
)) {
1725 qemu_register_reset(vfio_reset_handler
, NULL
);
1728 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
1741 void vfio_put_group(VFIOGroup
*group
)
1743 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
1747 if (!group
->ram_block_discard_allowed
) {
1748 ram_block_discard_disable(false);
1750 vfio_kvm_device_del_group(group
);
1751 vfio_disconnect_container(group
);
1752 QLIST_REMOVE(group
, next
);
1753 trace_vfio_put_group(group
->fd
);
1757 if (QLIST_EMPTY(&vfio_group_list
)) {
1758 qemu_unregister_reset(vfio_reset_handler
, NULL
);
1762 int vfio_get_device(VFIOGroup
*group
, const char *name
,
1763 VFIODevice
*vbasedev
, Error
**errp
)
1765 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
1768 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
1770 error_setg_errno(errp
, errno
, "error getting device from group %d",
1772 error_append_hint(errp
,
1773 "Verify all devices in group %d are bound to vfio-<bus> "
1774 "or pci-stub and not already in use\n", group
->groupid
);
1778 ret
= ioctl(fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
1780 error_setg_errno(errp
, errno
, "error getting device info");
1786 * Set discarding of RAM as not broken for this group if the driver knows
1787 * the device operates compatibly with discarding. Setting must be
1788 * consistent per group, but since compatibility is really only possible
1789 * with mdev currently, we expect singleton groups.
1791 if (vbasedev
->ram_block_discard_allowed
!=
1792 group
->ram_block_discard_allowed
) {
1793 if (!QLIST_EMPTY(&group
->device_list
)) {
1794 error_setg(errp
, "Inconsistent setting of support for discarding "
1795 "RAM (e.g., balloon) within group");
1800 if (!group
->ram_block_discard_allowed
) {
1801 group
->ram_block_discard_allowed
= true;
1802 ram_block_discard_disable(false);
1807 vbasedev
->group
= group
;
1808 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
1810 vbasedev
->num_irqs
= dev_info
.num_irqs
;
1811 vbasedev
->num_regions
= dev_info
.num_regions
;
1812 vbasedev
->flags
= dev_info
.flags
;
1814 trace_vfio_get_device(name
, dev_info
.flags
, dev_info
.num_regions
,
1817 vbasedev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
1821 void vfio_put_base_device(VFIODevice
*vbasedev
)
1823 if (!vbasedev
->group
) {
1826 QLIST_REMOVE(vbasedev
, next
);
1827 vbasedev
->group
= NULL
;
1828 trace_vfio_put_base_device(vbasedev
->fd
);
1829 close(vbasedev
->fd
);
1832 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
1833 struct vfio_region_info
**info
)
1835 size_t argsz
= sizeof(struct vfio_region_info
);
1837 *info
= g_malloc0(argsz
);
1839 (*info
)->index
= index
;
1841 (*info
)->argsz
= argsz
;
1843 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
1849 if ((*info
)->argsz
> argsz
) {
1850 argsz
= (*info
)->argsz
;
1851 *info
= g_realloc(*info
, argsz
);
1859 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
1860 uint32_t subtype
, struct vfio_region_info
**info
)
1864 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
1865 struct vfio_info_cap_header
*hdr
;
1866 struct vfio_region_info_cap_type
*cap_type
;
1868 if (vfio_get_region_info(vbasedev
, i
, info
)) {
1872 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
1878 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
1880 trace_vfio_get_dev_region(vbasedev
->name
, i
,
1881 cap_type
->type
, cap_type
->subtype
);
1883 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
1894 bool vfio_has_region_cap(VFIODevice
*vbasedev
, int region
, uint16_t cap_type
)
1896 struct vfio_region_info
*info
= NULL
;
1899 if (!vfio_get_region_info(vbasedev
, region
, &info
)) {
1900 if (vfio_get_region_info_cap(info
, cap_type
)) {
1910 * Interfaces for IBM EEH (Enhanced Error Handling)
1912 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
1915 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1916 * implementation is broken if there are multiple groups in a
1917 * container. The hardware works in units of Partitionable
1918 * Endpoints (== IOMMU groups) and the EEH operations naively
1919 * iterate across all groups in the container, without any logic
1920 * to make sure the groups have their state synchronized. For
1921 * certain operations (ENABLE) that might be ok, until an error
1922 * occurs, but for others (GET_STATE) it's clearly broken.
1926 * XXX Once fixed kernels exist, test for them here
1929 if (QLIST_EMPTY(&container
->group_list
)) {
1933 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
1940 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
1942 struct vfio_eeh_pe_op pe_op
= {
1943 .argsz
= sizeof(pe_op
),
1948 if (!vfio_eeh_container_ok(container
)) {
1949 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1950 "kernel requires a container with exactly one group", op
);
1954 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
1956 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
1963 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
1965 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
1966 VFIOContainer
*container
= NULL
;
1968 if (QLIST_EMPTY(&space
->containers
)) {
1969 /* No containers to act on */
1973 container
= QLIST_FIRST(&space
->containers
);
1975 if (QLIST_NEXT(container
, next
)) {
1976 /* We don't yet have logic to synchronize EEH state across
1977 * multiple containers */
1983 vfio_put_address_space(space
);
1987 bool vfio_eeh_as_ok(AddressSpace
*as
)
1989 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1991 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
1994 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
1996 VFIOContainer
*container
= vfio_eeh_as_container(as
);
2001 return vfio_eeh_container_op(container
, op
);