2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
24 #include <linux/kvm.h>
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
40 #include "qapi/error.h"
41 #include "migration/migration.h"
43 VFIOGroupList vfio_group_list
=
44 QLIST_HEAD_INITIALIZER(vfio_group_list
);
45 static QLIST_HEAD(, VFIOAddressSpace
) vfio_address_spaces
=
46 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
50 * We have a single VFIO pseudo device per KVM VM. Once created it lives
51 * for the life of the VM. Closing the file descriptor only drops our
52 * reference to it and the device's reference to kvm. Therefore once
53 * initialized, this file descriptor is only released on QEMU exit and
54 * we'll re-use it should another vfio device be attached before then.
56 static int vfio_kvm_device_fd
= -1;
60 * Common VFIO interrupt disable
62 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
64 struct vfio_irq_set irq_set
= {
65 .argsz
= sizeof(irq_set
),
66 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
72 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
75 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
77 struct vfio_irq_set irq_set
= {
78 .argsz
= sizeof(irq_set
),
79 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
85 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
88 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
90 struct vfio_irq_set irq_set
= {
91 .argsz
= sizeof(irq_set
),
92 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
98 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
101 static inline const char *action_to_str(int action
)
104 case VFIO_IRQ_SET_ACTION_MASK
:
106 case VFIO_IRQ_SET_ACTION_UNMASK
:
108 case VFIO_IRQ_SET_ACTION_TRIGGER
:
111 return "UNKNOWN ACTION";
115 static const char *index_to_str(VFIODevice
*vbasedev
, int index
)
117 if (vbasedev
->type
!= VFIO_DEVICE_TYPE_PCI
) {
122 case VFIO_PCI_INTX_IRQ_INDEX
:
124 case VFIO_PCI_MSI_IRQ_INDEX
:
126 case VFIO_PCI_MSIX_IRQ_INDEX
:
128 case VFIO_PCI_ERR_IRQ_INDEX
:
130 case VFIO_PCI_REQ_IRQ_INDEX
:
137 int vfio_set_irq_signaling(VFIODevice
*vbasedev
, int index
, int subindex
,
138 int action
, int fd
, Error
**errp
)
140 struct vfio_irq_set
*irq_set
;
145 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
147 irq_set
= g_malloc0(argsz
);
148 irq_set
->argsz
= argsz
;
149 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| action
;
150 irq_set
->index
= index
;
151 irq_set
->start
= subindex
;
153 pfd
= (int32_t *)&irq_set
->data
;
156 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
)) {
165 error_setg_errno(errp
, -ret
, "VFIO_DEVICE_SET_IRQS failure");
167 name
= index_to_str(vbasedev
, index
);
169 error_prepend(errp
, "%s-%d: ", name
, subindex
);
171 error_prepend(errp
, "index %d-%d: ", index
, subindex
);
174 "Failed to %s %s eventfd signaling for interrupt ",
175 fd
< 0 ? "tear down" : "set up", action_to_str(action
));
180 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
182 void vfio_region_write(void *opaque
, hwaddr addr
,
183 uint64_t data
, unsigned size
)
185 VFIORegion
*region
= opaque
;
186 VFIODevice
*vbasedev
= region
->vbasedev
;
199 buf
.word
= cpu_to_le16(data
);
202 buf
.dword
= cpu_to_le32(data
);
205 buf
.qword
= cpu_to_le64(data
);
208 hw_error("vfio: unsupported write size, %u bytes", size
);
212 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
213 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
215 __func__
, vbasedev
->name
, region
->nr
,
219 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
222 * A read or write to a BAR always signals an INTx EOI. This will
223 * do nothing if not pending (including not in INTx mode). We assume
224 * that a BAR access is in response to an interrupt and that BAR
225 * accesses will service the interrupt. Unfortunately, we don't know
226 * which access will service the interrupt, so we're potentially
227 * getting quite a few host interrupts per guest interrupt.
229 vbasedev
->ops
->vfio_eoi(vbasedev
);
232 uint64_t vfio_region_read(void *opaque
,
233 hwaddr addr
, unsigned size
)
235 VFIORegion
*region
= opaque
;
236 VFIODevice
*vbasedev
= region
->vbasedev
;
245 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
246 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
247 __func__
, vbasedev
->name
, region
->nr
,
256 data
= le16_to_cpu(buf
.word
);
259 data
= le32_to_cpu(buf
.dword
);
262 data
= le64_to_cpu(buf
.qword
);
265 hw_error("vfio: unsupported read size, %u bytes", size
);
269 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
271 /* Same as write above */
272 vbasedev
->ops
->vfio_eoi(vbasedev
);
277 const MemoryRegionOps vfio_region_ops
= {
278 .read
= vfio_region_read
,
279 .write
= vfio_region_write
,
280 .endianness
= DEVICE_LITTLE_ENDIAN
,
282 .min_access_size
= 1,
283 .max_access_size
= 8,
286 .min_access_size
= 1,
287 .max_access_size
= 8,
292 * Device state interfaces
295 bool vfio_mig_active(void)
298 VFIODevice
*vbasedev
;
300 if (QLIST_EMPTY(&vfio_group_list
)) {
304 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
305 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
306 if (vbasedev
->migration_blocker
) {
314 static bool vfio_devices_all_saving(VFIOContainer
*container
)
317 VFIODevice
*vbasedev
;
318 MigrationState
*ms
= migrate_get_current();
320 if (!migration_is_setup_or_active(ms
->state
)) {
324 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
325 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
326 VFIOMigration
*migration
= vbasedev
->migration
;
332 if (migration
->device_state
& VFIO_DEVICE_STATE_SAVING
) {
333 if ((vbasedev
->pre_copy_dirty_page_tracking
== ON_OFF_AUTO_OFF
)
334 && (migration
->device_state
& VFIO_DEVICE_STATE_RUNNING
)) {
346 static bool vfio_devices_all_running_and_saving(VFIOContainer
*container
)
349 VFIODevice
*vbasedev
;
350 MigrationState
*ms
= migrate_get_current();
352 if (!migration_is_setup_or_active(ms
->state
)) {
356 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
357 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
358 VFIOMigration
*migration
= vbasedev
->migration
;
364 if ((migration
->device_state
& VFIO_DEVICE_STATE_SAVING
) &&
365 (migration
->device_state
& VFIO_DEVICE_STATE_RUNNING
)) {
375 static int vfio_dma_unmap_bitmap(VFIOContainer
*container
,
376 hwaddr iova
, ram_addr_t size
,
377 IOMMUTLBEntry
*iotlb
)
379 struct vfio_iommu_type1_dma_unmap
*unmap
;
380 struct vfio_bitmap
*bitmap
;
381 uint64_t pages
= TARGET_PAGE_ALIGN(size
) >> TARGET_PAGE_BITS
;
384 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
386 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
389 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
390 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
393 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
394 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to
398 bitmap
->pgsize
= TARGET_PAGE_SIZE
;
399 bitmap
->size
= ROUND_UP(pages
, sizeof(__u64
) * BITS_PER_BYTE
) /
402 if (bitmap
->size
> container
->max_dirty_bitmap_size
) {
403 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
,
404 (uint64_t)bitmap
->size
);
409 bitmap
->data
= g_try_malloc0(bitmap
->size
);
415 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
417 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap
->data
,
418 iotlb
->translated_addr
, pages
);
420 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
423 g_free(bitmap
->data
);
430 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
432 static int vfio_dma_unmap(VFIOContainer
*container
,
433 hwaddr iova
, ram_addr_t size
,
434 IOMMUTLBEntry
*iotlb
)
436 struct vfio_iommu_type1_dma_unmap unmap
= {
437 .argsz
= sizeof(unmap
),
443 if (iotlb
&& container
->dirty_pages_supported
&&
444 vfio_devices_all_running_and_saving(container
)) {
445 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
448 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
450 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
451 * v4.15) where an overflow in its wrap-around check prevents us from
452 * unmapping the last page of the address space. Test for the error
453 * condition and re-try the unmap excluding the last page. The
454 * expectation is that we've never mapped the last page anyway and this
455 * unmap request comes via vIOMMU support which also makes it unlikely
456 * that this page is used. This bug was introduced well after type1 v2
457 * support was introduced, so we shouldn't need to test for v1. A fix
458 * is queued for kernel v5.0 so this workaround can be removed once
459 * affected kernels are sufficiently deprecated.
461 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
462 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
463 trace_vfio_dma_unmap_overflow_workaround();
464 unmap
.size
-= 1ULL << ctz64(container
->pgsizes
);
467 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
474 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
475 ram_addr_t size
, void *vaddr
, bool readonly
)
477 struct vfio_iommu_type1_dma_map map
= {
478 .argsz
= sizeof(map
),
479 .flags
= VFIO_DMA_MAP_FLAG_READ
,
480 .vaddr
= (__u64
)(uintptr_t)vaddr
,
486 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
490 * Try the mapping, if it fails with EBUSY, unmap the region and try
491 * again. This shouldn't be necessary, but we sometimes see it in
494 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
495 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
, NULL
) == 0 &&
496 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
500 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
504 static void vfio_host_win_add(VFIOContainer
*container
,
505 hwaddr min_iova
, hwaddr max_iova
,
506 uint64_t iova_pgsizes
)
508 VFIOHostDMAWindow
*hostwin
;
510 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
511 if (ranges_overlap(hostwin
->min_iova
,
512 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
514 max_iova
- min_iova
+ 1)) {
515 hw_error("%s: Overlapped IOMMU are not enabled", __func__
);
519 hostwin
= g_malloc0(sizeof(*hostwin
));
521 hostwin
->min_iova
= min_iova
;
522 hostwin
->max_iova
= max_iova
;
523 hostwin
->iova_pgsizes
= iova_pgsizes
;
524 QLIST_INSERT_HEAD(&container
->hostwin_list
, hostwin
, hostwin_next
);
527 static int vfio_host_win_del(VFIOContainer
*container
, hwaddr min_iova
,
530 VFIOHostDMAWindow
*hostwin
;
532 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
533 if (hostwin
->min_iova
== min_iova
&& hostwin
->max_iova
== max_iova
) {
534 QLIST_REMOVE(hostwin
, hostwin_next
);
542 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
544 return (!memory_region_is_ram(section
->mr
) &&
545 !memory_region_is_iommu(section
->mr
)) ||
547 * Sizing an enabled 64-bit BAR can cause spurious mappings to
548 * addresses in the upper part of the 64-bit address space. These
549 * are never accessed by the CPU and beyond the address width of
550 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
552 section
->offset_within_address_space
& (1ULL << 63);
555 /* Called with rcu_read_lock held. */
556 static bool vfio_get_xlat_addr(IOMMUTLBEntry
*iotlb
, void **vaddr
,
557 ram_addr_t
*ram_addr
, bool *read_only
)
561 hwaddr len
= iotlb
->addr_mask
+ 1;
562 bool writable
= iotlb
->perm
& IOMMU_WO
;
565 * The IOMMU TLB entry we have just covers translation through
566 * this IOMMU to its immediate target. We need to translate
567 * it the rest of the way through to memory.
569 mr
= address_space_translate(&address_space_memory
,
570 iotlb
->translated_addr
,
571 &xlat
, &len
, writable
,
572 MEMTXATTRS_UNSPECIFIED
);
573 if (!memory_region_is_ram(mr
)) {
574 error_report("iommu map to non memory area %"HWADDR_PRIx
"",
580 * Translation truncates length to the IOMMU page size,
581 * check that it did not truncate too much.
583 if (len
& iotlb
->addr_mask
) {
584 error_report("iommu has granularity incompatible with target AS");
589 *vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
593 *ram_addr
= memory_region_get_ram_addr(mr
) + xlat
;
597 *read_only
= !writable
|| mr
->readonly
;
603 static void vfio_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
605 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
606 VFIOContainer
*container
= giommu
->container
;
607 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
611 trace_vfio_iommu_map_notify(iotlb
->perm
== IOMMU_NONE
? "UNMAP" : "MAP",
612 iova
, iova
+ iotlb
->addr_mask
);
614 if (iotlb
->target_as
!= &address_space_memory
) {
615 error_report("Wrong target AS \"%s\", only system memory is allowed",
616 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
622 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
625 if (!vfio_get_xlat_addr(iotlb
, &vaddr
, NULL
, &read_only
)) {
629 * vaddr is only valid until rcu_read_unlock(). But after
630 * vfio_dma_map has set up the mapping the pages will be
631 * pinned by the kernel. This makes sure that the RAM backend
632 * of vaddr will always be there, even if the memory object is
633 * destroyed and its backing memory munmap-ed.
635 ret
= vfio_dma_map(container
, iova
,
636 iotlb
->addr_mask
+ 1, vaddr
,
639 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
640 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
642 iotlb
->addr_mask
+ 1, vaddr
, ret
);
645 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1, iotlb
);
647 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
648 "0x%"HWADDR_PRIx
") = %d (%m)",
650 iotlb
->addr_mask
+ 1, ret
);
657 static void vfio_listener_region_add(MemoryListener
*listener
,
658 MemoryRegionSection
*section
)
660 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
662 Int128 llend
, llsize
;
665 VFIOHostDMAWindow
*hostwin
;
669 if (vfio_listener_skipped_section(section
)) {
670 trace_vfio_listener_region_add_skip(
671 section
->offset_within_address_space
,
672 section
->offset_within_address_space
+
673 int128_get64(int128_sub(section
->size
, int128_one())));
677 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
678 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
679 error_report("%s received unaligned region", __func__
);
683 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
684 llend
= int128_make64(section
->offset_within_address_space
);
685 llend
= int128_add(llend
, section
->size
);
686 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
688 if (int128_ge(int128_make64(iova
), llend
)) {
691 end
= int128_get64(int128_sub(llend
, int128_one()));
693 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
696 /* For now intersections are not allowed, we may relax this later */
697 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
698 if (ranges_overlap(hostwin
->min_iova
,
699 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
700 section
->offset_within_address_space
,
701 int128_get64(section
->size
))) {
703 "region [0x%"PRIx64
",0x%"PRIx64
"] overlaps with existing"
704 "host DMA window [0x%"PRIx64
",0x%"PRIx64
"]",
705 section
->offset_within_address_space
,
706 section
->offset_within_address_space
+
707 int128_get64(section
->size
) - 1,
708 hostwin
->min_iova
, hostwin
->max_iova
);
713 ret
= vfio_spapr_create_window(container
, section
, &pgsize
);
715 error_setg_errno(&err
, -ret
, "Failed to create SPAPR window");
719 vfio_host_win_add(container
, section
->offset_within_address_space
,
720 section
->offset_within_address_space
+
721 int128_get64(section
->size
) - 1, pgsize
);
725 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
726 struct kvm_vfio_spapr_tce param
;
727 struct kvm_device_attr attr
= {
728 .group
= KVM_DEV_VFIO_GROUP
,
729 .attr
= KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE
,
730 .addr
= (uint64_t)(unsigned long)¶m
,
733 if (!memory_region_iommu_get_attr(iommu_mr
, IOMMU_ATTR_SPAPR_TCE_FD
,
735 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
736 param
.groupfd
= group
->fd
;
737 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
738 error_report("vfio: failed to setup fd %d "
739 "for a group with fd %d: %s",
740 param
.tablefd
, param
.groupfd
,
744 trace_vfio_spapr_group_attach(param
.groupfd
, param
.tablefd
);
751 hostwin_found
= false;
752 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
753 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
754 hostwin_found
= true;
759 if (!hostwin_found
) {
760 error_setg(&err
, "Container %p can't map guest IOVA region"
761 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
, container
, iova
, end
);
765 memory_region_ref(section
->mr
);
767 if (memory_region_is_iommu(section
->mr
)) {
768 VFIOGuestIOMMU
*giommu
;
769 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
772 trace_vfio_listener_region_add_iommu(iova
, end
);
774 * FIXME: For VFIO iommu types which have KVM acceleration to
775 * avoid bouncing all map/unmaps through qemu this way, this
776 * would be the right place to wire that up (tell the KVM
777 * device emulation the VFIO iommu handles to use).
779 giommu
= g_malloc0(sizeof(*giommu
));
780 giommu
->iommu
= iommu_mr
;
781 giommu
->iommu_offset
= section
->offset_within_address_space
-
782 section
->offset_within_region
;
783 giommu
->container
= container
;
784 llend
= int128_add(int128_make64(section
->offset_within_region
),
786 llend
= int128_sub(llend
, int128_one());
787 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
788 MEMTXATTRS_UNSPECIFIED
);
789 iommu_notifier_init(&giommu
->n
, vfio_iommu_map_notify
,
791 section
->offset_within_region
,
795 ret
= memory_region_iommu_set_page_size_mask(giommu
->iommu
,
803 ret
= memory_region_register_iommu_notifier(section
->mr
, &giommu
->n
,
809 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
810 memory_region_iommu_replay(giommu
->iommu
, &giommu
->n
);
815 /* Here we assume that memory_region_is_ram(section->mr)==true */
817 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
818 section
->offset_within_region
+
819 (iova
- section
->offset_within_address_space
);
821 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
823 llsize
= int128_sub(llend
, int128_make64(iova
));
825 if (memory_region_is_ram_device(section
->mr
)) {
826 hwaddr pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
828 if ((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
)) {
829 trace_vfio_listener_region_add_no_dma_map(
830 memory_region_name(section
->mr
),
831 section
->offset_within_address_space
,
832 int128_getlo(section
->size
),
838 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
839 vaddr
, section
->readonly
);
841 error_setg(&err
, "vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
842 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
843 container
, iova
, int128_get64(llsize
), vaddr
, ret
);
844 if (memory_region_is_ram_device(section
->mr
)) {
845 /* Allow unexpected mappings not to be fatal for RAM devices */
846 error_report_err(err
);
855 if (memory_region_is_ram_device(section
->mr
)) {
856 error_report("failed to vfio_dma_map. pci p2p may not work");
860 * On the initfn path, store the first error in the container so we
861 * can gracefully fail. Runtime, there's not much we can do other
862 * than throw a hardware error.
864 if (!container
->initialized
) {
865 if (!container
->error
) {
866 error_propagate_prepend(&container
->error
, err
,
868 memory_region_name(section
->mr
));
873 error_report_err(err
);
874 hw_error("vfio: DMA mapping failed, unable to continue");
878 static void vfio_listener_region_del(MemoryListener
*listener
,
879 MemoryRegionSection
*section
)
881 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
883 Int128 llend
, llsize
;
885 bool try_unmap
= true;
887 if (vfio_listener_skipped_section(section
)) {
888 trace_vfio_listener_region_del_skip(
889 section
->offset_within_address_space
,
890 section
->offset_within_address_space
+
891 int128_get64(int128_sub(section
->size
, int128_one())));
895 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
896 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
897 error_report("%s received unaligned region", __func__
);
901 if (memory_region_is_iommu(section
->mr
)) {
902 VFIOGuestIOMMU
*giommu
;
904 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
905 if (MEMORY_REGION(giommu
->iommu
) == section
->mr
&&
906 giommu
->n
.start
== section
->offset_within_region
) {
907 memory_region_unregister_iommu_notifier(section
->mr
,
909 QLIST_REMOVE(giommu
, giommu_next
);
916 * FIXME: We assume the one big unmap below is adequate to
917 * remove any individual page mappings in the IOMMU which
918 * might have been copied into VFIO. This works for a page table
919 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
920 * That may not be true for all IOMMU types.
924 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
925 llend
= int128_make64(section
->offset_within_address_space
);
926 llend
= int128_add(llend
, section
->size
);
927 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
929 if (int128_ge(int128_make64(iova
), llend
)) {
932 end
= int128_get64(int128_sub(llend
, int128_one()));
934 llsize
= int128_sub(llend
, int128_make64(iova
));
936 trace_vfio_listener_region_del(iova
, end
);
938 if (memory_region_is_ram_device(section
->mr
)) {
940 VFIOHostDMAWindow
*hostwin
;
941 bool hostwin_found
= false;
943 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
944 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
945 hostwin_found
= true;
949 assert(hostwin_found
); /* or region_add() would have failed */
951 pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
952 try_unmap
= !((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
));
956 if (int128_eq(llsize
, int128_2_64())) {
957 /* The unmap ioctl doesn't accept a full 64-bit span. */
958 llsize
= int128_rshift(llsize
, 1);
959 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
), NULL
);
961 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
962 "0x%"HWADDR_PRIx
") = %d (%m)",
963 container
, iova
, int128_get64(llsize
), ret
);
965 iova
+= int128_get64(llsize
);
967 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
), NULL
);
969 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
970 "0x%"HWADDR_PRIx
") = %d (%m)",
971 container
, iova
, int128_get64(llsize
), ret
);
975 memory_region_unref(section
->mr
);
977 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
978 vfio_spapr_remove_window(container
,
979 section
->offset_within_address_space
);
980 if (vfio_host_win_del(container
,
981 section
->offset_within_address_space
,
982 section
->offset_within_address_space
+
983 int128_get64(section
->size
) - 1) < 0) {
984 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx
,
985 __func__
, section
->offset_within_address_space
);
990 static int vfio_get_dirty_bitmap(VFIOContainer
*container
, uint64_t iova
,
991 uint64_t size
, ram_addr_t ram_addr
)
993 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
994 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
998 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
1000 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
1001 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
1002 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
1007 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
1008 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to
1011 range
->bitmap
.pgsize
= TARGET_PAGE_SIZE
;
1013 pages
= TARGET_PAGE_ALIGN(range
->size
) >> TARGET_PAGE_BITS
;
1014 range
->bitmap
.size
= ROUND_UP(pages
, sizeof(__u64
) * BITS_PER_BYTE
) /
1016 range
->bitmap
.data
= g_try_malloc0(range
->bitmap
.size
);
1017 if (!range
->bitmap
.data
) {
1022 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
1024 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
1025 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
1026 (uint64_t)range
->size
, errno
);
1030 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range
->bitmap
.data
,
1033 trace_vfio_get_dirty_bitmap(container
->fd
, range
->iova
, range
->size
,
1034 range
->bitmap
.size
, ram_addr
);
1036 g_free(range
->bitmap
.data
);
1044 VFIOGuestIOMMU
*giommu
;
1045 } vfio_giommu_dirty_notifier
;
1047 static void vfio_iommu_map_dirty_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
1049 vfio_giommu_dirty_notifier
*gdn
= container_of(n
,
1050 vfio_giommu_dirty_notifier
, n
);
1051 VFIOGuestIOMMU
*giommu
= gdn
->giommu
;
1052 VFIOContainer
*container
= giommu
->container
;
1053 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
1054 ram_addr_t translated_addr
;
1056 trace_vfio_iommu_map_dirty_notify(iova
, iova
+ iotlb
->addr_mask
);
1058 if (iotlb
->target_as
!= &address_space_memory
) {
1059 error_report("Wrong target AS \"%s\", only system memory is allowed",
1060 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
1065 if (vfio_get_xlat_addr(iotlb
, NULL
, &translated_addr
, NULL
)) {
1068 ret
= vfio_get_dirty_bitmap(container
, iova
, iotlb
->addr_mask
+ 1,
1071 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx
", "
1072 "0x%"HWADDR_PRIx
") = %d (%m)",
1074 iotlb
->addr_mask
+ 1, ret
);
1080 static int vfio_sync_dirty_bitmap(VFIOContainer
*container
,
1081 MemoryRegionSection
*section
)
1083 ram_addr_t ram_addr
;
1085 if (memory_region_is_iommu(section
->mr
)) {
1086 VFIOGuestIOMMU
*giommu
;
1088 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
1089 if (MEMORY_REGION(giommu
->iommu
) == section
->mr
&&
1090 giommu
->n
.start
== section
->offset_within_region
) {
1092 vfio_giommu_dirty_notifier gdn
= { .giommu
= giommu
};
1093 int idx
= memory_region_iommu_attrs_to_index(giommu
->iommu
,
1094 MEMTXATTRS_UNSPECIFIED
);
1096 llend
= int128_add(int128_make64(section
->offset_within_region
),
1098 llend
= int128_sub(llend
, int128_one());
1100 iommu_notifier_init(&gdn
.n
,
1101 vfio_iommu_map_dirty_notify
,
1103 section
->offset_within_region
,
1104 int128_get64(llend
),
1106 memory_region_iommu_replay(giommu
->iommu
, &gdn
.n
);
1113 ram_addr
= memory_region_get_ram_addr(section
->mr
) +
1114 section
->offset_within_region
;
1116 return vfio_get_dirty_bitmap(container
,
1117 TARGET_PAGE_ALIGN(section
->offset_within_address_space
),
1118 int128_get64(section
->size
), ram_addr
);
1121 static void vfio_listerner_log_sync(MemoryListener
*listener
,
1122 MemoryRegionSection
*section
)
1124 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1126 if (vfio_listener_skipped_section(section
) ||
1127 !container
->dirty_pages_supported
) {
1131 if (vfio_devices_all_saving(container
)) {
1132 vfio_sync_dirty_bitmap(container
, section
);
1136 static const MemoryListener vfio_memory_listener
= {
1137 .region_add
= vfio_listener_region_add
,
1138 .region_del
= vfio_listener_region_del
,
1139 .log_sync
= vfio_listerner_log_sync
,
1142 static void vfio_listener_release(VFIOContainer
*container
)
1144 memory_listener_unregister(&container
->listener
);
1145 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1146 memory_listener_unregister(&container
->prereg_listener
);
1150 static struct vfio_info_cap_header
*
1151 vfio_get_cap(void *ptr
, uint32_t cap_offset
, uint16_t id
)
1153 struct vfio_info_cap_header
*hdr
;
1155 for (hdr
= ptr
+ cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
1156 if (hdr
->id
== id
) {
1164 struct vfio_info_cap_header
*
1165 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
1167 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
1171 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
1174 static struct vfio_info_cap_header
*
1175 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
1177 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
1181 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
1184 struct vfio_info_cap_header
*
1185 vfio_get_device_info_cap(struct vfio_device_info
*info
, uint16_t id
)
1187 if (!(info
->flags
& VFIO_DEVICE_FLAGS_CAPS
)) {
1191 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
1194 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
1195 unsigned int *avail
)
1197 struct vfio_info_cap_header
*hdr
;
1198 struct vfio_iommu_type1_info_dma_avail
*cap
;
1200 /* If the capability cannot be found, assume no DMA limiting */
1201 hdr
= vfio_get_iommu_type1_info_cap(info
,
1202 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
1207 if (avail
!= NULL
) {
1209 *avail
= cap
->avail
;
1215 static int vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
1216 struct vfio_region_info
*info
)
1218 struct vfio_info_cap_header
*hdr
;
1219 struct vfio_region_info_cap_sparse_mmap
*sparse
;
1222 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
1227 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
1229 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
1230 region
->nr
, sparse
->nr_areas
);
1232 region
->mmaps
= g_new0(VFIOMmap
, sparse
->nr_areas
);
1234 for (i
= 0, j
= 0; i
< sparse
->nr_areas
; i
++) {
1235 trace_vfio_region_sparse_mmap_entry(i
, sparse
->areas
[i
].offset
,
1236 sparse
->areas
[i
].offset
+
1237 sparse
->areas
[i
].size
);
1239 if (sparse
->areas
[i
].size
) {
1240 region
->mmaps
[j
].offset
= sparse
->areas
[i
].offset
;
1241 region
->mmaps
[j
].size
= sparse
->areas
[i
].size
;
1246 region
->nr_mmaps
= j
;
1247 region
->mmaps
= g_realloc(region
->mmaps
, j
* sizeof(VFIOMmap
));
1252 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
1253 int index
, const char *name
)
1255 struct vfio_region_info
*info
;
1258 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
1263 region
->vbasedev
= vbasedev
;
1264 region
->flags
= info
->flags
;
1265 region
->size
= info
->size
;
1266 region
->fd_offset
= info
->offset
;
1270 region
->mem
= g_new0(MemoryRegion
, 1);
1271 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
1272 region
, name
, region
->size
);
1274 if (!vbasedev
->no_mmap
&&
1275 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
1277 ret
= vfio_setup_region_sparse_mmaps(region
, info
);
1280 region
->nr_mmaps
= 1;
1281 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
1282 region
->mmaps
[0].offset
= 0;
1283 region
->mmaps
[0].size
= region
->size
;
1290 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
1291 region
->flags
, region
->fd_offset
, region
->size
);
1295 static void vfio_subregion_unmap(VFIORegion
*region
, int index
)
1297 trace_vfio_region_unmap(memory_region_name(®ion
->mmaps
[index
].mem
),
1298 region
->mmaps
[index
].offset
,
1299 region
->mmaps
[index
].offset
+
1300 region
->mmaps
[index
].size
- 1);
1301 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[index
].mem
);
1302 munmap(region
->mmaps
[index
].mmap
, region
->mmaps
[index
].size
);
1303 object_unparent(OBJECT(®ion
->mmaps
[index
].mem
));
1304 region
->mmaps
[index
].mmap
= NULL
;
1307 int vfio_region_mmap(VFIORegion
*region
)
1316 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
1317 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
1319 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1320 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
1321 MAP_SHARED
, region
->vbasedev
->fd
,
1323 region
->mmaps
[i
].offset
);
1324 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
1327 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
1329 region
->mmaps
[i
].offset
,
1331 region
->mmaps
[i
].offset
+
1332 region
->mmaps
[i
].size
- 1, ret
);
1334 region
->mmaps
[i
].mmap
= NULL
;
1336 for (i
--; i
>= 0; i
--) {
1337 vfio_subregion_unmap(region
, i
);
1343 name
= g_strdup_printf("%s mmaps[%d]",
1344 memory_region_name(region
->mem
), i
);
1345 memory_region_init_ram_device_ptr(®ion
->mmaps
[i
].mem
,
1346 memory_region_owner(region
->mem
),
1347 name
, region
->mmaps
[i
].size
,
1348 region
->mmaps
[i
].mmap
);
1350 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
1351 ®ion
->mmaps
[i
].mem
);
1353 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
1354 region
->mmaps
[i
].offset
,
1355 region
->mmaps
[i
].offset
+
1356 region
->mmaps
[i
].size
- 1);
1362 void vfio_region_unmap(VFIORegion
*region
)
1370 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1371 if (region
->mmaps
[i
].mmap
) {
1372 vfio_subregion_unmap(region
, i
);
1377 void vfio_region_exit(VFIORegion
*region
)
1385 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1386 if (region
->mmaps
[i
].mmap
) {
1387 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
1391 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
1394 void vfio_region_finalize(VFIORegion
*region
)
1402 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1403 if (region
->mmaps
[i
].mmap
) {
1404 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
1405 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
1409 object_unparent(OBJECT(region
->mem
));
1411 g_free(region
->mem
);
1412 g_free(region
->mmaps
);
1414 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
1417 region
->mmaps
= NULL
;
1418 region
->nr_mmaps
= 0;
1424 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
1432 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1433 if (region
->mmaps
[i
].mmap
) {
1434 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
1438 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
1442 void vfio_reset_handler(void *opaque
)
1445 VFIODevice
*vbasedev
;
1447 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1448 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1449 if (vbasedev
->dev
->realized
) {
1450 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
1455 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1456 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1457 if (vbasedev
->dev
->realized
&& vbasedev
->needs_reset
) {
1458 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
1464 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
1467 struct kvm_device_attr attr
= {
1468 .group
= KVM_DEV_VFIO_GROUP
,
1469 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
1470 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1473 if (!kvm_enabled()) {
1477 if (vfio_kvm_device_fd
< 0) {
1478 struct kvm_create_device cd
= {
1479 .type
= KVM_DEV_TYPE_VFIO
,
1482 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
1483 error_report("Failed to create KVM VFIO device: %m");
1487 vfio_kvm_device_fd
= cd
.fd
;
1490 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1491 error_report("Failed to add group %d to KVM VFIO device: %m",
1497 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
1500 struct kvm_device_attr attr
= {
1501 .group
= KVM_DEV_VFIO_GROUP
,
1502 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
1503 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1506 if (vfio_kvm_device_fd
< 0) {
1510 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1511 error_report("Failed to remove group %d from KVM VFIO device: %m",
1517 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
1519 VFIOAddressSpace
*space
;
1521 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
1522 if (space
->as
== as
) {
1527 /* No suitable VFIOAddressSpace, create a new one */
1528 space
= g_malloc0(sizeof(*space
));
1530 QLIST_INIT(&space
->containers
);
1532 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
1537 static void vfio_put_address_space(VFIOAddressSpace
*space
)
1539 if (QLIST_EMPTY(&space
->containers
)) {
1540 QLIST_REMOVE(space
, list
);
1546 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1548 static int vfio_get_iommu_type(VFIOContainer
*container
,
1551 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
1552 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
1555 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
1556 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
1557 return iommu_types
[i
];
1560 error_setg(errp
, "No available IOMMU models");
1564 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
1567 int iommu_type
, ret
;
1569 iommu_type
= vfio_get_iommu_type(container
, errp
);
1570 if (iommu_type
< 0) {
1574 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
1576 error_setg_errno(errp
, errno
, "Failed to set group container");
1580 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
1581 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1583 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1584 * v2, the running platform may not support v2 and there is no
1585 * way to guess it until an IOMMU group gets added to the container.
1586 * So in case it fails with v2, try v1 as a fallback.
1588 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
1591 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
1595 container
->iommu_type
= iommu_type
;
1599 static int vfio_get_iommu_info(VFIOContainer
*container
,
1600 struct vfio_iommu_type1_info
**info
)
1603 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
1605 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
1607 (*info
)->argsz
= argsz
;
1609 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
1615 if (((*info
)->argsz
> argsz
)) {
1616 argsz
= (*info
)->argsz
;
1617 *info
= g_realloc(*info
, argsz
);
1624 static struct vfio_info_cap_header
*
1625 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
1627 struct vfio_info_cap_header
*hdr
;
1630 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
1634 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
1635 if (hdr
->id
== id
) {
1643 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
1644 struct vfio_iommu_type1_info
*info
)
1646 struct vfio_info_cap_header
*hdr
;
1647 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
1649 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
1654 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
1658 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
1659 * TARGET_PAGE_SIZE to mark those dirty.
1661 if (cap_mig
->pgsize_bitmap
& TARGET_PAGE_SIZE
) {
1662 container
->dirty_pages_supported
= true;
1663 container
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
1664 container
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
1668 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
1671 VFIOContainer
*container
;
1673 VFIOAddressSpace
*space
;
1675 space
= vfio_get_address_space(as
);
1678 * VFIO is currently incompatible with discarding of RAM insofar as the
1679 * madvise to purge (zap) the page from QEMU's address space does not
1680 * interact with the memory API and therefore leaves stale virtual to
1681 * physical mappings in the IOMMU if the page was previously pinned. We
1682 * therefore set discarding broken for each group added to a container,
1683 * whether the container is used individually or shared. This provides
1684 * us with options to allow devices within a group to opt-in and allow
1685 * discarding, so long as it is done consistently for a group (for instance
1686 * if the device is an mdev device where it is known that the host vendor
1687 * driver will never pin pages outside of the working set of the guest
1688 * driver, which would thus not be discarding candidates).
1690 * The first opportunity to induce pinning occurs here where we attempt to
1691 * attach the group to existing containers within the AddressSpace. If any
1692 * pages are already zapped from the virtual address space, such as from
1693 * previous discards, new pinning will cause valid mappings to be
1694 * re-established. Likewise, when the overall MemoryListener for a new
1695 * container is registered, a replay of mappings within the AddressSpace
1696 * will occur, re-establishing any previously zapped pages as well.
1698 * Especially virtio-balloon is currently only prevented from discarding
1699 * new memory, it will not yet set ram_block_discard_set_required() and
1700 * therefore, neither stops us here or deals with the sudden memory
1701 * consumption of inflated memory.
1703 ret
= ram_block_discard_disable(true);
1705 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
1709 QLIST_FOREACH(container
, &space
->containers
, next
) {
1710 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
1711 group
->container
= container
;
1712 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1713 vfio_kvm_device_add_group(group
);
1718 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
1720 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
1722 goto put_space_exit
;
1725 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
1726 if (ret
!= VFIO_API_VERSION
) {
1727 error_setg(errp
, "supported vfio version: %d, "
1728 "reported version: %d", VFIO_API_VERSION
, ret
);
1733 container
= g_malloc0(sizeof(*container
));
1734 container
->space
= space
;
1736 container
->error
= NULL
;
1737 container
->dirty_pages_supported
= false;
1738 QLIST_INIT(&container
->giommu_list
);
1739 QLIST_INIT(&container
->hostwin_list
);
1741 ret
= vfio_init_container(container
, group
->fd
, errp
);
1743 goto free_container_exit
;
1746 switch (container
->iommu_type
) {
1747 case VFIO_TYPE1v2_IOMMU
:
1748 case VFIO_TYPE1_IOMMU
:
1750 struct vfio_iommu_type1_info
*info
;
1753 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1754 * IOVA whatsoever. That's not actually true, but the current
1755 * kernel interface doesn't tell us what it can map, and the
1756 * existing Type1 IOMMUs generally support any IOVA we're
1757 * going to actually try in practice.
1759 ret
= vfio_get_iommu_info(container
, &info
);
1761 if (ret
|| !(info
->flags
& VFIO_IOMMU_INFO_PGSIZES
)) {
1762 /* Assume 4k IOVA page size */
1763 info
->iova_pgsizes
= 4096;
1765 vfio_host_win_add(container
, 0, (hwaddr
)-1, info
->iova_pgsizes
);
1766 container
->pgsizes
= info
->iova_pgsizes
;
1769 vfio_get_iommu_info_migration(container
, info
);
1774 case VFIO_SPAPR_TCE_v2_IOMMU
:
1775 case VFIO_SPAPR_TCE_IOMMU
:
1777 struct vfio_iommu_spapr_tce_info info
;
1778 bool v2
= container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
;
1781 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1782 * when container fd is closed so we do not call it explicitly
1786 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
1788 error_setg_errno(errp
, errno
, "failed to enable container");
1790 goto free_container_exit
;
1793 container
->prereg_listener
= vfio_prereg_listener
;
1795 memory_listener_register(&container
->prereg_listener
,
1796 &address_space_memory
);
1797 if (container
->error
) {
1798 memory_listener_unregister(&container
->prereg_listener
);
1800 error_propagate_prepend(errp
, container
->error
,
1801 "RAM memory listener initialization failed: ");
1802 goto free_container_exit
;
1806 info
.argsz
= sizeof(info
);
1807 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
1809 error_setg_errno(errp
, errno
,
1810 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1813 memory_listener_unregister(&container
->prereg_listener
);
1815 goto free_container_exit
;
1819 container
->pgsizes
= info
.ddw
.pgsizes
;
1821 * There is a default window in just created container.
1822 * To make region_add/del simpler, we better remove this
1823 * window now and let those iommu_listener callbacks
1824 * create/remove them when needed.
1826 ret
= vfio_spapr_remove_window(container
, info
.dma32_window_start
);
1828 error_setg_errno(errp
, -ret
,
1829 "failed to remove existing window");
1830 goto free_container_exit
;
1833 /* The default table uses 4K pages */
1834 container
->pgsizes
= 0x1000;
1835 vfio_host_win_add(container
, info
.dma32_window_start
,
1836 info
.dma32_window_start
+
1837 info
.dma32_window_size
- 1,
1843 vfio_kvm_device_add_group(group
);
1845 QLIST_INIT(&container
->group_list
);
1846 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
1848 group
->container
= container
;
1849 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1851 container
->listener
= vfio_memory_listener
;
1853 memory_listener_register(&container
->listener
, container
->space
->as
);
1855 if (container
->error
) {
1857 error_propagate_prepend(errp
, container
->error
,
1858 "memory listener initialization failed: ");
1859 goto listener_release_exit
;
1862 container
->initialized
= true;
1865 listener_release_exit
:
1866 QLIST_REMOVE(group
, container_next
);
1867 QLIST_REMOVE(container
, next
);
1868 vfio_kvm_device_del_group(group
);
1869 vfio_listener_release(container
);
1871 free_container_exit
:
1878 ram_block_discard_disable(false);
1879 vfio_put_address_space(space
);
1884 static void vfio_disconnect_container(VFIOGroup
*group
)
1886 VFIOContainer
*container
= group
->container
;
1888 QLIST_REMOVE(group
, container_next
);
1889 group
->container
= NULL
;
1892 * Explicitly release the listener first before unset container,
1893 * since unset may destroy the backend container if it's the last
1896 if (QLIST_EMPTY(&container
->group_list
)) {
1897 vfio_listener_release(container
);
1900 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
1901 error_report("vfio: error disconnecting group %d from container",
1905 if (QLIST_EMPTY(&container
->group_list
)) {
1906 VFIOAddressSpace
*space
= container
->space
;
1907 VFIOGuestIOMMU
*giommu
, *tmp
;
1909 QLIST_REMOVE(container
, next
);
1911 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
1912 memory_region_unregister_iommu_notifier(
1913 MEMORY_REGION(giommu
->iommu
), &giommu
->n
);
1914 QLIST_REMOVE(giommu
, giommu_next
);
1918 trace_vfio_disconnect_container(container
->fd
);
1919 close(container
->fd
);
1922 vfio_put_address_space(space
);
1926 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
1930 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
1932 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1933 if (group
->groupid
== groupid
) {
1934 /* Found it. Now is it already in the right context? */
1935 if (group
->container
->space
->as
== as
) {
1938 error_setg(errp
, "group %d used in multiple address spaces",
1945 group
= g_malloc0(sizeof(*group
));
1947 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
1948 group
->fd
= qemu_open_old(path
, O_RDWR
);
1949 if (group
->fd
< 0) {
1950 error_setg_errno(errp
, errno
, "failed to open %s", path
);
1951 goto free_group_exit
;
1954 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
1955 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
1959 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
1960 error_setg(errp
, "group %d is not viable", groupid
);
1961 error_append_hint(errp
,
1962 "Please ensure all devices within the iommu_group "
1963 "are bound to their vfio bus driver.\n");
1967 group
->groupid
= groupid
;
1968 QLIST_INIT(&group
->device_list
);
1970 if (vfio_connect_container(group
, as
, errp
)) {
1971 error_prepend(errp
, "failed to setup container for group %d: ",
1976 if (QLIST_EMPTY(&vfio_group_list
)) {
1977 qemu_register_reset(vfio_reset_handler
, NULL
);
1980 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
1993 void vfio_put_group(VFIOGroup
*group
)
1995 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
1999 if (!group
->ram_block_discard_allowed
) {
2000 ram_block_discard_disable(false);
2002 vfio_kvm_device_del_group(group
);
2003 vfio_disconnect_container(group
);
2004 QLIST_REMOVE(group
, next
);
2005 trace_vfio_put_group(group
->fd
);
2009 if (QLIST_EMPTY(&vfio_group_list
)) {
2010 qemu_unregister_reset(vfio_reset_handler
, NULL
);
2014 int vfio_get_device(VFIOGroup
*group
, const char *name
,
2015 VFIODevice
*vbasedev
, Error
**errp
)
2017 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
2020 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
2022 error_setg_errno(errp
, errno
, "error getting device from group %d",
2024 error_append_hint(errp
,
2025 "Verify all devices in group %d are bound to vfio-<bus> "
2026 "or pci-stub and not already in use\n", group
->groupid
);
2030 ret
= ioctl(fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
2032 error_setg_errno(errp
, errno
, "error getting device info");
2038 * Set discarding of RAM as not broken for this group if the driver knows
2039 * the device operates compatibly with discarding. Setting must be
2040 * consistent per group, but since compatibility is really only possible
2041 * with mdev currently, we expect singleton groups.
2043 if (vbasedev
->ram_block_discard_allowed
!=
2044 group
->ram_block_discard_allowed
) {
2045 if (!QLIST_EMPTY(&group
->device_list
)) {
2046 error_setg(errp
, "Inconsistent setting of support for discarding "
2047 "RAM (e.g., balloon) within group");
2052 if (!group
->ram_block_discard_allowed
) {
2053 group
->ram_block_discard_allowed
= true;
2054 ram_block_discard_disable(false);
2059 vbasedev
->group
= group
;
2060 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
2062 vbasedev
->num_irqs
= dev_info
.num_irqs
;
2063 vbasedev
->num_regions
= dev_info
.num_regions
;
2064 vbasedev
->flags
= dev_info
.flags
;
2066 trace_vfio_get_device(name
, dev_info
.flags
, dev_info
.num_regions
,
2069 vbasedev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
2073 void vfio_put_base_device(VFIODevice
*vbasedev
)
2075 if (!vbasedev
->group
) {
2078 QLIST_REMOVE(vbasedev
, next
);
2079 vbasedev
->group
= NULL
;
2080 trace_vfio_put_base_device(vbasedev
->fd
);
2081 close(vbasedev
->fd
);
2084 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
2085 struct vfio_region_info
**info
)
2087 size_t argsz
= sizeof(struct vfio_region_info
);
2089 *info
= g_malloc0(argsz
);
2091 (*info
)->index
= index
;
2093 (*info
)->argsz
= argsz
;
2095 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
2101 if ((*info
)->argsz
> argsz
) {
2102 argsz
= (*info
)->argsz
;
2103 *info
= g_realloc(*info
, argsz
);
2111 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
2112 uint32_t subtype
, struct vfio_region_info
**info
)
2116 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
2117 struct vfio_info_cap_header
*hdr
;
2118 struct vfio_region_info_cap_type
*cap_type
;
2120 if (vfio_get_region_info(vbasedev
, i
, info
)) {
2124 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
2130 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
2132 trace_vfio_get_dev_region(vbasedev
->name
, i
,
2133 cap_type
->type
, cap_type
->subtype
);
2135 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
2146 bool vfio_has_region_cap(VFIODevice
*vbasedev
, int region
, uint16_t cap_type
)
2148 struct vfio_region_info
*info
= NULL
;
2151 if (!vfio_get_region_info(vbasedev
, region
, &info
)) {
2152 if (vfio_get_region_info_cap(info
, cap_type
)) {
2162 * Interfaces for IBM EEH (Enhanced Error Handling)
2164 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
2167 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
2168 * implementation is broken if there are multiple groups in a
2169 * container. The hardware works in units of Partitionable
2170 * Endpoints (== IOMMU groups) and the EEH operations naively
2171 * iterate across all groups in the container, without any logic
2172 * to make sure the groups have their state synchronized. For
2173 * certain operations (ENABLE) that might be ok, until an error
2174 * occurs, but for others (GET_STATE) it's clearly broken.
2178 * XXX Once fixed kernels exist, test for them here
2181 if (QLIST_EMPTY(&container
->group_list
)) {
2185 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
2192 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
2194 struct vfio_eeh_pe_op pe_op
= {
2195 .argsz
= sizeof(pe_op
),
2200 if (!vfio_eeh_container_ok(container
)) {
2201 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
2202 "kernel requires a container with exactly one group", op
);
2206 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
2208 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
2215 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
2217 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
2218 VFIOContainer
*container
= NULL
;
2220 if (QLIST_EMPTY(&space
->containers
)) {
2221 /* No containers to act on */
2225 container
= QLIST_FIRST(&space
->containers
);
2227 if (QLIST_NEXT(container
, next
)) {
2228 /* We don't yet have logic to synchronize EEH state across
2229 * multiple containers */
2235 vfio_put_address_space(space
);
2239 bool vfio_eeh_as_ok(AddressSpace
*as
)
2241 VFIOContainer
*container
= vfio_eeh_as_container(as
);
2243 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
2246 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
2248 VFIOContainer
*container
= vfio_eeh_as_container(as
);
2253 return vfio_eeh_container_op(container
, op
);