2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
27 #include "exec/memory.h"
28 #include "exec/ram_addr.h"
29 #include "qemu/error-report.h"
30 #include "qemu/range.h"
31 #include "sysemu/reset.h"
33 #include "qapi/error.h"
36 VFIOGroupList vfio_group_list
=
37 QLIST_HEAD_INITIALIZER(vfio_group_list
);
39 static int vfio_ram_block_discard_disable(VFIOContainer
*container
, bool state
)
41 switch (container
->iommu_type
) {
42 case VFIO_TYPE1v2_IOMMU
:
43 case VFIO_TYPE1_IOMMU
:
45 * We support coordinated discarding of RAM via the RamDiscardManager.
47 return ram_block_uncoordinated_discard_disable(state
);
50 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
51 * RamDiscardManager, however, it is completely untested.
53 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
54 * completely the opposite of managing mapping/pinning dynamically as
55 * required by RamDiscardManager. We would have to special-case sections
56 * with a RamDiscardManager.
58 return ram_block_discard_disable(state
);
62 static int vfio_dma_unmap_bitmap(const VFIOContainer
*container
,
63 hwaddr iova
, ram_addr_t size
,
66 const VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
67 struct vfio_iommu_type1_dma_unmap
*unmap
;
68 struct vfio_bitmap
*bitmap
;
72 ret
= vfio_bitmap_alloc(&vbmap
, size
);
77 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
79 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
82 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
83 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
86 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
87 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
88 * to qemu_real_host_page_size.
90 bitmap
->pgsize
= qemu_real_host_page_size();
91 bitmap
->size
= vbmap
.size
;
92 bitmap
->data
= (__u64
*)vbmap
.bitmap
;
94 if (vbmap
.size
> bcontainer
->max_dirty_bitmap_size
) {
95 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
, vbmap
.size
);
100 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
102 cpu_physical_memory_set_dirty_lebitmap(vbmap
.bitmap
,
103 iotlb
->translated_addr
, vbmap
.pages
);
105 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
110 g_free(vbmap
.bitmap
);
116 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
118 static int vfio_legacy_dma_unmap(const VFIOContainerBase
*bcontainer
,
119 hwaddr iova
, ram_addr_t size
,
120 IOMMUTLBEntry
*iotlb
)
122 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
124 struct vfio_iommu_type1_dma_unmap unmap
= {
125 .argsz
= sizeof(unmap
),
130 bool need_dirty_sync
= false;
132 Error
*local_err
= NULL
;
134 if (iotlb
&& vfio_devices_all_running_and_mig_active(bcontainer
)) {
135 if (!vfio_devices_all_device_dirty_tracking(bcontainer
) &&
136 bcontainer
->dirty_pages_supported
) {
137 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
140 need_dirty_sync
= true;
143 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
145 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
146 * v4.15) where an overflow in its wrap-around check prevents us from
147 * unmapping the last page of the address space. Test for the error
148 * condition and re-try the unmap excluding the last page. The
149 * expectation is that we've never mapped the last page anyway and this
150 * unmap request comes via vIOMMU support which also makes it unlikely
151 * that this page is used. This bug was introduced well after type1 v2
152 * support was introduced, so we shouldn't need to test for v1. A fix
153 * is queued for kernel v5.0 so this workaround can be removed once
154 * affected kernels are sufficiently deprecated.
156 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
157 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
158 trace_vfio_legacy_dma_unmap_overflow_workaround();
159 unmap
.size
-= 1ULL << ctz64(bcontainer
->pgsizes
);
162 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
166 if (need_dirty_sync
) {
167 ret
= vfio_get_dirty_bitmap(bcontainer
, iova
, size
,
168 iotlb
->translated_addr
, &local_err
);
170 error_report_err(local_err
);
178 static int vfio_legacy_dma_map(const VFIOContainerBase
*bcontainer
, hwaddr iova
,
179 ram_addr_t size
, void *vaddr
, bool readonly
)
181 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
183 struct vfio_iommu_type1_dma_map map
= {
184 .argsz
= sizeof(map
),
185 .flags
= VFIO_DMA_MAP_FLAG_READ
,
186 .vaddr
= (__u64
)(uintptr_t)vaddr
,
192 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
196 * Try the mapping, if it fails with EBUSY, unmap the region and try
197 * again. This shouldn't be necessary, but we sometimes see it in
200 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
202 vfio_legacy_dma_unmap(bcontainer
, iova
, size
, NULL
) == 0 &&
203 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
207 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
212 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase
*bcontainer
,
213 bool start
, Error
**errp
)
215 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
218 struct vfio_iommu_type1_dirty_bitmap dirty
= {
219 .argsz
= sizeof(dirty
),
223 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
225 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
228 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
231 error_setg_errno(errp
, errno
, "Failed to set dirty tracking flag 0x%x",
238 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase
*bcontainer
,
239 VFIOBitmap
*vbmap
, hwaddr iova
, hwaddr size
, Error
**errp
)
241 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
243 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
244 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
247 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
249 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
250 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
251 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
256 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
257 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
258 * to qemu_real_host_page_size.
260 range
->bitmap
.pgsize
= qemu_real_host_page_size();
261 range
->bitmap
.size
= vbmap
->size
;
262 range
->bitmap
.data
= (__u64
*)vbmap
->bitmap
;
264 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
267 error_setg_errno(errp
, errno
,
268 "Failed to get dirty bitmap for iova: 0x%"PRIx64
269 " size: 0x%"PRIx64
, (uint64_t)range
->iova
,
270 (uint64_t)range
->size
);
278 static struct vfio_info_cap_header
*
279 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
281 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
285 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
288 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
291 struct vfio_info_cap_header
*hdr
;
292 struct vfio_iommu_type1_info_dma_avail
*cap
;
294 /* If the capability cannot be found, assume no DMA limiting */
295 hdr
= vfio_get_iommu_type1_info_cap(info
,
296 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
309 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info
*info
,
310 VFIOContainerBase
*bcontainer
)
312 struct vfio_info_cap_header
*hdr
;
313 struct vfio_iommu_type1_info_cap_iova_range
*cap
;
315 hdr
= vfio_get_iommu_type1_info_cap(info
,
316 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
);
323 for (int i
= 0; i
< cap
->nr_iovas
; i
++) {
324 Range
*range
= g_new(Range
, 1);
326 range_set_bounds(range
, cap
->iova_ranges
[i
].start
,
327 cap
->iova_ranges
[i
].end
);
328 bcontainer
->iova_ranges
=
329 range_list_insert(bcontainer
->iova_ranges
, range
);
335 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
339 if (vfio_kvm_device_add_fd(group
->fd
, &err
)) {
340 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
344 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
348 if (vfio_kvm_device_del_fd(group
->fd
, &err
)) {
349 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
354 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
356 static int vfio_get_iommu_type(int container_fd
,
359 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
360 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
363 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
364 if (ioctl(container_fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
365 return iommu_types
[i
];
368 error_setg(errp
, "No available IOMMU models");
373 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type
375 static const char *vfio_get_iommu_class_name(int iommu_type
)
377 switch (iommu_type
) {
378 case VFIO_TYPE1v2_IOMMU
:
379 case VFIO_TYPE1_IOMMU
:
380 return TYPE_VFIO_IOMMU_LEGACY
;
382 case VFIO_SPAPR_TCE_v2_IOMMU
:
383 case VFIO_SPAPR_TCE_IOMMU
:
384 return TYPE_VFIO_IOMMU_SPAPR
;
387 g_assert_not_reached();
391 static bool vfio_set_iommu(int container_fd
, int group_fd
,
392 int *iommu_type
, Error
**errp
)
394 if (ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container_fd
)) {
395 error_setg_errno(errp
, errno
, "Failed to set group container");
399 while (ioctl(container_fd
, VFIO_SET_IOMMU
, *iommu_type
)) {
400 if (*iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
402 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
403 * v2, the running platform may not support v2 and there is no
404 * way to guess it until an IOMMU group gets added to the container.
405 * So in case it fails with v2, try v1 as a fallback.
407 *iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
410 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
417 static VFIOContainer
*vfio_create_container(int fd
, VFIOGroup
*group
,
421 const char *vioc_name
;
422 VFIOContainer
*container
;
424 iommu_type
= vfio_get_iommu_type(fd
, errp
);
425 if (iommu_type
< 0) {
429 if (!vfio_set_iommu(fd
, group
->fd
, &iommu_type
, errp
)) {
433 vioc_name
= vfio_get_iommu_class_name(iommu_type
);
435 container
= VFIO_IOMMU_LEGACY(object_new(vioc_name
));
437 container
->iommu_type
= iommu_type
;
441 static int vfio_get_iommu_info(VFIOContainer
*container
,
442 struct vfio_iommu_type1_info
**info
)
445 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
447 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
449 (*info
)->argsz
= argsz
;
451 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
457 if (((*info
)->argsz
> argsz
)) {
458 argsz
= (*info
)->argsz
;
459 *info
= g_realloc(*info
, argsz
);
466 static struct vfio_info_cap_header
*
467 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
469 struct vfio_info_cap_header
*hdr
;
472 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
476 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
485 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
486 struct vfio_iommu_type1_info
*info
)
488 struct vfio_info_cap_header
*hdr
;
489 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
490 VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
492 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
497 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
501 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
502 * qemu_real_host_page_size to mark those dirty.
504 if (cap_mig
->pgsize_bitmap
& qemu_real_host_page_size()) {
505 bcontainer
->dirty_pages_supported
= true;
506 bcontainer
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
507 bcontainer
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
511 static bool vfio_legacy_setup(VFIOContainerBase
*bcontainer
, Error
**errp
)
513 VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
515 g_autofree
struct vfio_iommu_type1_info
*info
= NULL
;
518 ret
= vfio_get_iommu_info(container
, &info
);
520 error_setg_errno(errp
, -ret
, "Failed to get VFIO IOMMU info");
524 if (info
->flags
& VFIO_IOMMU_INFO_PGSIZES
) {
525 bcontainer
->pgsizes
= info
->iova_pgsizes
;
527 bcontainer
->pgsizes
= qemu_real_host_page_size();
530 if (!vfio_get_info_dma_avail(info
, &bcontainer
->dma_max_mappings
)) {
531 bcontainer
->dma_max_mappings
= 65535;
534 vfio_get_info_iova_range(info
, bcontainer
);
536 vfio_get_iommu_info_migration(container
, info
);
540 static bool vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
543 VFIOContainer
*container
;
544 VFIOContainerBase
*bcontainer
;
546 VFIOAddressSpace
*space
;
547 VFIOIOMMUClass
*vioc
;
549 space
= vfio_get_address_space(as
);
552 * VFIO is currently incompatible with discarding of RAM insofar as the
553 * madvise to purge (zap) the page from QEMU's address space does not
554 * interact with the memory API and therefore leaves stale virtual to
555 * physical mappings in the IOMMU if the page was previously pinned. We
556 * therefore set discarding broken for each group added to a container,
557 * whether the container is used individually or shared. This provides
558 * us with options to allow devices within a group to opt-in and allow
559 * discarding, so long as it is done consistently for a group (for instance
560 * if the device is an mdev device where it is known that the host vendor
561 * driver will never pin pages outside of the working set of the guest
562 * driver, which would thus not be discarding candidates).
564 * The first opportunity to induce pinning occurs here where we attempt to
565 * attach the group to existing containers within the AddressSpace. If any
566 * pages are already zapped from the virtual address space, such as from
567 * previous discards, new pinning will cause valid mappings to be
568 * re-established. Likewise, when the overall MemoryListener for a new
569 * container is registered, a replay of mappings within the AddressSpace
570 * will occur, re-establishing any previously zapped pages as well.
572 * Especially virtio-balloon is currently only prevented from discarding
573 * new memory, it will not yet set ram_block_discard_set_required() and
574 * therefore, neither stops us here or deals with the sudden memory
575 * consumption of inflated memory.
577 * We do support discarding of memory coordinated via the RamDiscardManager
578 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
579 * details once we know which type of IOMMU we are using.
582 QLIST_FOREACH(bcontainer
, &space
->containers
, next
) {
583 container
= container_of(bcontainer
, VFIOContainer
, bcontainer
);
584 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
585 ret
= vfio_ram_block_discard_disable(container
, true);
587 error_setg_errno(errp
, -ret
,
588 "Cannot set discarding of RAM broken");
589 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
,
591 error_report("vfio: error disconnecting group %d from"
592 " container", group
->groupid
);
596 group
->container
= container
;
597 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
598 vfio_kvm_device_add_group(group
);
603 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
, errp
);
608 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
609 if (ret
!= VFIO_API_VERSION
) {
610 error_setg(errp
, "supported vfio version: %d, "
611 "reported version: %d", VFIO_API_VERSION
, ret
);
615 container
= vfio_create_container(fd
, group
, errp
);
619 bcontainer
= &container
->bcontainer
;
621 if (!vfio_cpr_register_container(bcontainer
, errp
)) {
622 goto free_container_exit
;
625 ret
= vfio_ram_block_discard_disable(container
, true);
627 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
628 goto unregister_container_exit
;
631 vioc
= VFIO_IOMMU_GET_CLASS(bcontainer
);
634 if (!vioc
->setup(bcontainer
, errp
)) {
635 goto enable_discards_exit
;
638 vfio_kvm_device_add_group(group
);
640 vfio_address_space_insert(space
, bcontainer
);
642 group
->container
= container
;
643 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
645 bcontainer
->listener
= vfio_memory_listener
;
646 memory_listener_register(&bcontainer
->listener
, bcontainer
->space
->as
);
648 if (bcontainer
->error
) {
649 error_propagate_prepend(errp
, bcontainer
->error
,
650 "memory listener initialization failed: ");
651 goto listener_release_exit
;
654 bcontainer
->initialized
= true;
657 listener_release_exit
:
658 QLIST_REMOVE(group
, container_next
);
659 vfio_kvm_device_del_group(group
);
660 memory_listener_unregister(&bcontainer
->listener
);
662 vioc
->release(bcontainer
);
665 enable_discards_exit
:
666 vfio_ram_block_discard_disable(container
, false);
668 unregister_container_exit
:
669 vfio_cpr_unregister_container(bcontainer
);
672 object_unref(container
);
678 vfio_put_address_space(space
);
683 static void vfio_disconnect_container(VFIOGroup
*group
)
685 VFIOContainer
*container
= group
->container
;
686 VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
687 VFIOIOMMUClass
*vioc
= VFIO_IOMMU_GET_CLASS(bcontainer
);
689 QLIST_REMOVE(group
, container_next
);
690 group
->container
= NULL
;
693 * Explicitly release the listener first before unset container,
694 * since unset may destroy the backend container if it's the last
697 if (QLIST_EMPTY(&container
->group_list
)) {
698 memory_listener_unregister(&bcontainer
->listener
);
700 vioc
->release(bcontainer
);
704 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
705 error_report("vfio: error disconnecting group %d from container",
709 if (QLIST_EMPTY(&container
->group_list
)) {
710 VFIOAddressSpace
*space
= bcontainer
->space
;
712 trace_vfio_disconnect_container(container
->fd
);
713 vfio_cpr_unregister_container(bcontainer
);
714 close(container
->fd
);
715 object_unref(container
);
717 vfio_put_address_space(space
);
721 static VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
726 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
728 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
729 if (group
->groupid
== groupid
) {
730 /* Found it. Now is it already in the right context? */
731 if (group
->container
->bcontainer
.space
->as
== as
) {
734 error_setg(errp
, "group %d used in multiple address spaces",
741 group
= g_malloc0(sizeof(*group
));
743 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
744 group
->fd
= qemu_open(path
, O_RDWR
, errp
);
746 goto free_group_exit
;
749 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
750 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
754 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
755 error_setg(errp
, "group %d is not viable", groupid
);
756 error_append_hint(errp
,
757 "Please ensure all devices within the iommu_group "
758 "are bound to their vfio bus driver.\n");
762 group
->groupid
= groupid
;
763 QLIST_INIT(&group
->device_list
);
765 if (!vfio_connect_container(group
, as
, errp
)) {
766 error_prepend(errp
, "failed to setup container for group %d: ",
771 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
784 static void vfio_put_group(VFIOGroup
*group
)
786 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
790 if (!group
->ram_block_discard_allowed
) {
791 vfio_ram_block_discard_disable(group
->container
, false);
793 vfio_kvm_device_del_group(group
);
794 vfio_disconnect_container(group
);
795 QLIST_REMOVE(group
, next
);
796 trace_vfio_put_group(group
->fd
);
801 static bool vfio_get_device(VFIOGroup
*group
, const char *name
,
802 VFIODevice
*vbasedev
, Error
**errp
)
804 g_autofree
struct vfio_device_info
*info
= NULL
;
807 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
809 error_setg_errno(errp
, errno
, "error getting device from group %d",
811 error_append_hint(errp
,
812 "Verify all devices in group %d are bound to vfio-<bus> "
813 "or pci-stub and not already in use\n", group
->groupid
);
817 info
= vfio_get_device_info(fd
);
819 error_setg_errno(errp
, errno
, "error getting device info");
825 * Set discarding of RAM as not broken for this group if the driver knows
826 * the device operates compatibly with discarding. Setting must be
827 * consistent per group, but since compatibility is really only possible
828 * with mdev currently, we expect singleton groups.
830 if (vbasedev
->ram_block_discard_allowed
!=
831 group
->ram_block_discard_allowed
) {
832 if (!QLIST_EMPTY(&group
->device_list
)) {
833 error_setg(errp
, "Inconsistent setting of support for discarding "
834 "RAM (e.g., balloon) within group");
839 if (!group
->ram_block_discard_allowed
) {
840 group
->ram_block_discard_allowed
= true;
841 vfio_ram_block_discard_disable(group
->container
, false);
846 vbasedev
->group
= group
;
847 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
849 vbasedev
->num_irqs
= info
->num_irqs
;
850 vbasedev
->num_regions
= info
->num_regions
;
851 vbasedev
->flags
= info
->flags
;
853 trace_vfio_get_device(name
, info
->flags
, info
->num_regions
, info
->num_irqs
);
855 vbasedev
->reset_works
= !!(info
->flags
& VFIO_DEVICE_FLAGS_RESET
);
860 static void vfio_put_base_device(VFIODevice
*vbasedev
)
862 if (!vbasedev
->group
) {
865 QLIST_REMOVE(vbasedev
, next
);
866 vbasedev
->group
= NULL
;
867 trace_vfio_put_base_device(vbasedev
->fd
);
871 static int vfio_device_groupid(VFIODevice
*vbasedev
, Error
**errp
)
873 char *tmp
, group_path
[PATH_MAX
];
874 g_autofree
char *group_name
= NULL
;
878 tmp
= g_strdup_printf("%s/iommu_group", vbasedev
->sysfsdev
);
879 len
= readlink(tmp
, group_path
, sizeof(group_path
));
882 if (len
<= 0 || len
>= sizeof(group_path
)) {
883 ret
= len
< 0 ? -errno
: -ENAMETOOLONG
;
884 error_setg_errno(errp
, -ret
, "no iommu_group found");
890 group_name
= g_path_get_basename(group_path
);
891 if (sscanf(group_name
, "%d", &groupid
) != 1) {
892 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
899 * vfio_attach_device: attach a device to a security context
900 * @name and @vbasedev->name are likely to be different depending
901 * on the type of the device, hence the need for passing @name
903 static bool vfio_legacy_attach_device(const char *name
, VFIODevice
*vbasedev
,
904 AddressSpace
*as
, Error
**errp
)
906 int groupid
= vfio_device_groupid(vbasedev
, errp
);
907 VFIODevice
*vbasedev_iter
;
909 VFIOContainerBase
*bcontainer
;
915 trace_vfio_attach_device(vbasedev
->name
, groupid
);
917 if (!vfio_device_hiod_realize(vbasedev
, errp
)) {
921 group
= vfio_get_group(groupid
, as
, errp
);
926 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
927 if (strcmp(vbasedev_iter
->name
, vbasedev
->name
) == 0) {
928 error_setg(errp
, "device is already attached");
929 vfio_put_group(group
);
933 if (!vfio_get_device(group
, name
, vbasedev
, errp
)) {
934 vfio_put_group(group
);
938 bcontainer
= &group
->container
->bcontainer
;
939 vbasedev
->bcontainer
= bcontainer
;
940 QLIST_INSERT_HEAD(&bcontainer
->device_list
, vbasedev
, container_next
);
941 QLIST_INSERT_HEAD(&vfio_device_list
, vbasedev
, global_next
);
946 static void vfio_legacy_detach_device(VFIODevice
*vbasedev
)
948 VFIOGroup
*group
= vbasedev
->group
;
950 QLIST_REMOVE(vbasedev
, global_next
);
951 QLIST_REMOVE(vbasedev
, container_next
);
952 vbasedev
->bcontainer
= NULL
;
953 trace_vfio_detach_device(vbasedev
->name
, group
->groupid
);
954 vfio_put_base_device(vbasedev
);
955 vfio_put_group(group
);
958 static int vfio_legacy_pci_hot_reset(VFIODevice
*vbasedev
, bool single
)
960 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
962 struct vfio_pci_hot_reset_info
*info
= NULL
;
963 struct vfio_pci_dependent_device
*devices
;
964 struct vfio_pci_hot_reset
*reset
;
969 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
972 vfio_pci_pre_reset(vdev
);
974 vdev
->vbasedev
.needs_reset
= false;
976 ret
= vfio_pci_get_pci_hot_reset_info(vdev
, &info
);
981 devices
= &info
->devices
[0];
983 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
985 /* Verify that we have all the groups required */
986 for (i
= 0; i
< info
->count
; i
++) {
987 PCIHostDeviceAddress host
;
989 VFIODevice
*vbasedev_iter
;
991 host
.domain
= devices
[i
].segment
;
992 host
.bus
= devices
[i
].bus
;
993 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
994 host
.function
= PCI_FUNC(devices
[i
].devfn
);
996 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
997 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
999 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
1003 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1004 if (group
->groupid
== devices
[i
].group_id
) {
1010 if (!vdev
->has_pm_reset
) {
1011 error_report("vfio: Cannot reset device %s, "
1012 "depends on group %d which is not owned.",
1013 vdev
->vbasedev
.name
, devices
[i
].group_id
);
1019 /* Prep dependent devices for reset and clear our marker. */
1020 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
1021 if (!vbasedev_iter
->dev
->realized
||
1022 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1025 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
1026 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
1031 vfio_pci_pre_reset(tmp
);
1032 tmp
->vbasedev
.needs_reset
= false;
1039 if (!single
&& !multi
) {
1044 /* Determine how many group fds need to be passed */
1046 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1047 for (i
= 0; i
< info
->count
; i
++) {
1048 if (group
->groupid
== devices
[i
].group_id
) {
1055 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
1056 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
1057 fds
= &reset
->group_fds
[0];
1059 /* Fill in group fds */
1060 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1061 for (i
= 0; i
< info
->count
; i
++) {
1062 if (group
->groupid
== devices
[i
].group_id
) {
1063 fds
[reset
->count
++] = group
->fd
;
1070 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
1076 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
1077 ret
? strerror(errno
) : "Success");
1080 /* Re-enable INTx on affected devices */
1081 for (i
= 0; i
< info
->count
; i
++) {
1082 PCIHostDeviceAddress host
;
1084 VFIODevice
*vbasedev_iter
;
1086 host
.domain
= devices
[i
].segment
;
1087 host
.bus
= devices
[i
].bus
;
1088 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
1089 host
.function
= PCI_FUNC(devices
[i
].devfn
);
1091 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
1095 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1096 if (group
->groupid
== devices
[i
].group_id
) {
1105 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
1106 if (!vbasedev_iter
->dev
->realized
||
1107 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1110 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
1111 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
1112 vfio_pci_post_reset(tmp
);
1119 vfio_pci_post_reset(vdev
);
1126 static void vfio_iommu_legacy_class_init(ObjectClass
*klass
, void *data
)
1128 VFIOIOMMUClass
*vioc
= VFIO_IOMMU_CLASS(klass
);
1130 vioc
->hiod_typename
= TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO
;
1132 vioc
->setup
= vfio_legacy_setup
;
1133 vioc
->dma_map
= vfio_legacy_dma_map
;
1134 vioc
->dma_unmap
= vfio_legacy_dma_unmap
;
1135 vioc
->attach_device
= vfio_legacy_attach_device
;
1136 vioc
->detach_device
= vfio_legacy_detach_device
;
1137 vioc
->set_dirty_page_tracking
= vfio_legacy_set_dirty_page_tracking
;
1138 vioc
->query_dirty_bitmap
= vfio_legacy_query_dirty_bitmap
;
1139 vioc
->pci_hot_reset
= vfio_legacy_pci_hot_reset
;
1142 static bool hiod_legacy_vfio_realize(HostIOMMUDevice
*hiod
, void *opaque
,
1145 VFIODevice
*vdev
= opaque
;
1147 hiod
->name
= g_strdup(vdev
->name
);
1148 hiod
->agent
= opaque
;
1153 static int hiod_legacy_vfio_get_cap(HostIOMMUDevice
*hiod
, int cap
,
1157 case HOST_IOMMU_DEVICE_CAP_AW_BITS
:
1158 return vfio_device_get_aw_bits(hiod
->agent
);
1160 error_setg(errp
, "%s: unsupported capability %x", hiod
->name
, cap
);
1166 hiod_legacy_vfio_get_iova_ranges(HostIOMMUDevice
*hiod
)
1168 VFIODevice
*vdev
= hiod
->agent
;
1171 return vfio_container_get_iova_ranges(vdev
->bcontainer
);
1175 hiod_legacy_vfio_get_page_size_mask(HostIOMMUDevice
*hiod
)
1177 VFIODevice
*vdev
= hiod
->agent
;
1180 return vfio_container_get_page_size_mask(vdev
->bcontainer
);
1183 static void vfio_iommu_legacy_instance_init(Object
*obj
)
1185 VFIOContainer
*container
= VFIO_IOMMU_LEGACY(obj
);
1187 QLIST_INIT(&container
->group_list
);
1190 static void hiod_legacy_vfio_class_init(ObjectClass
*oc
, void *data
)
1192 HostIOMMUDeviceClass
*hioc
= HOST_IOMMU_DEVICE_CLASS(oc
);
1194 hioc
->realize
= hiod_legacy_vfio_realize
;
1195 hioc
->get_cap
= hiod_legacy_vfio_get_cap
;
1196 hioc
->get_iova_ranges
= hiod_legacy_vfio_get_iova_ranges
;
1197 hioc
->get_page_size_mask
= hiod_legacy_vfio_get_page_size_mask
;
1200 static const TypeInfo types
[] = {
1202 .name
= TYPE_VFIO_IOMMU_LEGACY
,
1203 .parent
= TYPE_VFIO_IOMMU
,
1204 .instance_init
= vfio_iommu_legacy_instance_init
,
1205 .instance_size
= sizeof(VFIOContainer
),
1206 .class_init
= vfio_iommu_legacy_class_init
,
1208 .name
= TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO
,
1209 .parent
= TYPE_HOST_IOMMU_DEVICE
,
1210 .class_init
= hiod_legacy_vfio_class_init
,