2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
27 #include "exec/memory.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
31 #include "qemu/range.h"
32 #include "sysemu/reset.h"
34 #include "qapi/error.h"
35 #include "migration/migration.h"
38 VFIOGroupList vfio_group_list
=
39 QLIST_HEAD_INITIALIZER(vfio_group_list
);
41 static int vfio_ram_block_discard_disable(VFIOContainer
*container
, bool state
)
43 switch (container
->iommu_type
) {
44 case VFIO_TYPE1v2_IOMMU
:
45 case VFIO_TYPE1_IOMMU
:
47 * We support coordinated discarding of RAM via the RamDiscardManager.
49 return ram_block_uncoordinated_discard_disable(state
);
52 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
53 * RamDiscardManager, however, it is completely untested.
55 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
56 * completely the opposite of managing mapping/pinning dynamically as
57 * required by RamDiscardManager. We would have to special-case sections
58 * with a RamDiscardManager.
60 return ram_block_discard_disable(state
);
64 static int vfio_dma_unmap_bitmap(const VFIOContainer
*container
,
65 hwaddr iova
, ram_addr_t size
,
68 const VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
69 struct vfio_iommu_type1_dma_unmap
*unmap
;
70 struct vfio_bitmap
*bitmap
;
74 ret
= vfio_bitmap_alloc(&vbmap
, size
);
79 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
81 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
84 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
85 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
88 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
89 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
90 * to qemu_real_host_page_size.
92 bitmap
->pgsize
= qemu_real_host_page_size();
93 bitmap
->size
= vbmap
.size
;
94 bitmap
->data
= (__u64
*)vbmap
.bitmap
;
96 if (vbmap
.size
> bcontainer
->max_dirty_bitmap_size
) {
97 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
, vbmap
.size
);
102 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
104 cpu_physical_memory_set_dirty_lebitmap(vbmap
.bitmap
,
105 iotlb
->translated_addr
, vbmap
.pages
);
107 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
112 g_free(vbmap
.bitmap
);
118 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
120 static int vfio_legacy_dma_unmap(const VFIOContainerBase
*bcontainer
,
121 hwaddr iova
, ram_addr_t size
,
122 IOMMUTLBEntry
*iotlb
)
124 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
126 struct vfio_iommu_type1_dma_unmap unmap
= {
127 .argsz
= sizeof(unmap
),
132 bool need_dirty_sync
= false;
135 if (iotlb
&& vfio_devices_all_running_and_mig_active(bcontainer
)) {
136 if (!vfio_devices_all_device_dirty_tracking(bcontainer
) &&
137 bcontainer
->dirty_pages_supported
) {
138 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
141 need_dirty_sync
= true;
144 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
146 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
147 * v4.15) where an overflow in its wrap-around check prevents us from
148 * unmapping the last page of the address space. Test for the error
149 * condition and re-try the unmap excluding the last page. The
150 * expectation is that we've never mapped the last page anyway and this
151 * unmap request comes via vIOMMU support which also makes it unlikely
152 * that this page is used. This bug was introduced well after type1 v2
153 * support was introduced, so we shouldn't need to test for v1. A fix
154 * is queued for kernel v5.0 so this workaround can be removed once
155 * affected kernels are sufficiently deprecated.
157 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
158 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
159 trace_vfio_legacy_dma_unmap_overflow_workaround();
160 unmap
.size
-= 1ULL << ctz64(bcontainer
->pgsizes
);
163 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
167 if (need_dirty_sync
) {
168 ret
= vfio_get_dirty_bitmap(bcontainer
, iova
, size
,
169 iotlb
->translated_addr
);
178 static int vfio_legacy_dma_map(const VFIOContainerBase
*bcontainer
, hwaddr iova
,
179 ram_addr_t size
, void *vaddr
, bool readonly
)
181 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
183 struct vfio_iommu_type1_dma_map map
= {
184 .argsz
= sizeof(map
),
185 .flags
= VFIO_DMA_MAP_FLAG_READ
,
186 .vaddr
= (__u64
)(uintptr_t)vaddr
,
192 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
196 * Try the mapping, if it fails with EBUSY, unmap the region and try
197 * again. This shouldn't be necessary, but we sometimes see it in
200 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
202 vfio_legacy_dma_unmap(bcontainer
, iova
, size
, NULL
) == 0 &&
203 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
207 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
212 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase
*bcontainer
,
215 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
218 struct vfio_iommu_type1_dirty_bitmap dirty
= {
219 .argsz
= sizeof(dirty
),
223 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
225 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
228 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
231 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
238 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase
*bcontainer
,
240 hwaddr iova
, hwaddr size
)
242 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
244 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
245 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
248 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
250 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
251 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
252 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
257 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
258 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
259 * to qemu_real_host_page_size.
261 range
->bitmap
.pgsize
= qemu_real_host_page_size();
262 range
->bitmap
.size
= vbmap
->size
;
263 range
->bitmap
.data
= (__u64
*)vbmap
->bitmap
;
265 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
268 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
269 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
270 (uint64_t)range
->size
, errno
);
278 static struct vfio_info_cap_header
*
279 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
281 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
285 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
288 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
291 struct vfio_info_cap_header
*hdr
;
292 struct vfio_iommu_type1_info_dma_avail
*cap
;
294 /* If the capability cannot be found, assume no DMA limiting */
295 hdr
= vfio_get_iommu_type1_info_cap(info
,
296 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
309 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info
*info
,
310 VFIOContainerBase
*bcontainer
)
312 struct vfio_info_cap_header
*hdr
;
313 struct vfio_iommu_type1_info_cap_iova_range
*cap
;
315 hdr
= vfio_get_iommu_type1_info_cap(info
,
316 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
);
323 for (int i
= 0; i
< cap
->nr_iovas
; i
++) {
324 Range
*range
= g_new(Range
, 1);
326 range_set_bounds(range
, cap
->iova_ranges
[i
].start
,
327 cap
->iova_ranges
[i
].end
);
328 bcontainer
->iova_ranges
=
329 range_list_insert(bcontainer
->iova_ranges
, range
);
335 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
339 if (vfio_kvm_device_add_fd(group
->fd
, &err
)) {
340 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
344 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
348 if (vfio_kvm_device_del_fd(group
->fd
, &err
)) {
349 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
354 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
356 static int vfio_get_iommu_type(VFIOContainer
*container
,
359 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
360 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
363 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
364 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
365 return iommu_types
[i
];
368 error_setg(errp
, "No available IOMMU models");
373 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type
375 static const VFIOIOMMUClass
*vfio_get_iommu_class(int iommu_type
, Error
**errp
)
377 ObjectClass
*klass
= NULL
;
379 switch (iommu_type
) {
380 case VFIO_TYPE1v2_IOMMU
:
381 case VFIO_TYPE1_IOMMU
:
382 klass
= object_class_by_name(TYPE_VFIO_IOMMU_LEGACY
);
384 case VFIO_SPAPR_TCE_v2_IOMMU
:
385 case VFIO_SPAPR_TCE_IOMMU
:
386 klass
= object_class_by_name(TYPE_VFIO_IOMMU_SPAPR
);
389 g_assert_not_reached();
392 return VFIO_IOMMU_CLASS(klass
);
395 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
396 VFIOAddressSpace
*space
, Error
**errp
)
399 const VFIOIOMMUClass
*vioc
;
401 iommu_type
= vfio_get_iommu_type(container
, errp
);
402 if (iommu_type
< 0) {
406 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
408 error_setg_errno(errp
, errno
, "Failed to set group container");
412 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
413 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
415 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
416 * v2, the running platform may not support v2 and there is no
417 * way to guess it until an IOMMU group gets added to the container.
418 * So in case it fails with v2, try v1 as a fallback.
420 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
423 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
427 container
->iommu_type
= iommu_type
;
429 vioc
= vfio_get_iommu_class(iommu_type
, errp
);
431 error_setg(errp
, "No available IOMMU models");
435 vfio_container_init(&container
->bcontainer
, space
, vioc
);
439 static int vfio_get_iommu_info(VFIOContainer
*container
,
440 struct vfio_iommu_type1_info
**info
)
443 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
445 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
447 (*info
)->argsz
= argsz
;
449 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
455 if (((*info
)->argsz
> argsz
)) {
456 argsz
= (*info
)->argsz
;
457 *info
= g_realloc(*info
, argsz
);
464 static struct vfio_info_cap_header
*
465 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
467 struct vfio_info_cap_header
*hdr
;
470 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
474 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
483 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
484 struct vfio_iommu_type1_info
*info
)
486 struct vfio_info_cap_header
*hdr
;
487 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
488 VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
490 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
495 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
499 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
500 * qemu_real_host_page_size to mark those dirty.
502 if (cap_mig
->pgsize_bitmap
& qemu_real_host_page_size()) {
503 bcontainer
->dirty_pages_supported
= true;
504 bcontainer
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
505 bcontainer
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
509 static int vfio_legacy_setup(VFIOContainerBase
*bcontainer
, Error
**errp
)
511 VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
513 g_autofree
struct vfio_iommu_type1_info
*info
= NULL
;
516 ret
= vfio_get_iommu_info(container
, &info
);
518 error_setg_errno(errp
, -ret
, "Failed to get VFIO IOMMU info");
522 if (info
->flags
& VFIO_IOMMU_INFO_PGSIZES
) {
523 bcontainer
->pgsizes
= info
->iova_pgsizes
;
525 bcontainer
->pgsizes
= qemu_real_host_page_size();
528 if (!vfio_get_info_dma_avail(info
, &bcontainer
->dma_max_mappings
)) {
529 bcontainer
->dma_max_mappings
= 65535;
532 vfio_get_info_iova_range(info
, bcontainer
);
534 vfio_get_iommu_info_migration(container
, info
);
538 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
541 VFIOContainer
*container
;
542 VFIOContainerBase
*bcontainer
;
544 VFIOAddressSpace
*space
;
546 space
= vfio_get_address_space(as
);
549 * VFIO is currently incompatible with discarding of RAM insofar as the
550 * madvise to purge (zap) the page from QEMU's address space does not
551 * interact with the memory API and therefore leaves stale virtual to
552 * physical mappings in the IOMMU if the page was previously pinned. We
553 * therefore set discarding broken for each group added to a container,
554 * whether the container is used individually or shared. This provides
555 * us with options to allow devices within a group to opt-in and allow
556 * discarding, so long as it is done consistently for a group (for instance
557 * if the device is an mdev device where it is known that the host vendor
558 * driver will never pin pages outside of the working set of the guest
559 * driver, which would thus not be discarding candidates).
561 * The first opportunity to induce pinning occurs here where we attempt to
562 * attach the group to existing containers within the AddressSpace. If any
563 * pages are already zapped from the virtual address space, such as from
564 * previous discards, new pinning will cause valid mappings to be
565 * re-established. Likewise, when the overall MemoryListener for a new
566 * container is registered, a replay of mappings within the AddressSpace
567 * will occur, re-establishing any previously zapped pages as well.
569 * Especially virtio-balloon is currently only prevented from discarding
570 * new memory, it will not yet set ram_block_discard_set_required() and
571 * therefore, neither stops us here or deals with the sudden memory
572 * consumption of inflated memory.
574 * We do support discarding of memory coordinated via the RamDiscardManager
575 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
576 * details once we know which type of IOMMU we are using.
579 QLIST_FOREACH(bcontainer
, &space
->containers
, next
) {
580 container
= container_of(bcontainer
, VFIOContainer
, bcontainer
);
581 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
582 ret
= vfio_ram_block_discard_disable(container
, true);
584 error_setg_errno(errp
, -ret
,
585 "Cannot set discarding of RAM broken");
586 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
,
588 error_report("vfio: error disconnecting group %d from"
589 " container", group
->groupid
);
593 group
->container
= container
;
594 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
595 vfio_kvm_device_add_group(group
);
600 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
602 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
607 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
608 if (ret
!= VFIO_API_VERSION
) {
609 error_setg(errp
, "supported vfio version: %d, "
610 "reported version: %d", VFIO_API_VERSION
, ret
);
615 container
= g_malloc0(sizeof(*container
));
617 bcontainer
= &container
->bcontainer
;
619 ret
= vfio_init_container(container
, group
->fd
, space
, errp
);
621 goto free_container_exit
;
624 ret
= vfio_ram_block_discard_disable(container
, true);
626 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
627 goto free_container_exit
;
630 assert(bcontainer
->ops
->setup
);
632 ret
= bcontainer
->ops
->setup(bcontainer
, errp
);
634 goto enable_discards_exit
;
637 vfio_kvm_device_add_group(group
);
639 QLIST_INIT(&container
->group_list
);
640 QLIST_INSERT_HEAD(&space
->containers
, bcontainer
, next
);
642 group
->container
= container
;
643 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
645 bcontainer
->listener
= vfio_memory_listener
;
646 memory_listener_register(&bcontainer
->listener
, bcontainer
->space
->as
);
648 if (bcontainer
->error
) {
650 error_propagate_prepend(errp
, bcontainer
->error
,
651 "memory listener initialization failed: ");
652 goto listener_release_exit
;
655 bcontainer
->initialized
= true;
658 listener_release_exit
:
659 QLIST_REMOVE(group
, container_next
);
660 QLIST_REMOVE(bcontainer
, next
);
661 vfio_kvm_device_del_group(group
);
662 memory_listener_unregister(&bcontainer
->listener
);
663 if (bcontainer
->ops
->release
) {
664 bcontainer
->ops
->release(bcontainer
);
667 enable_discards_exit
:
668 vfio_ram_block_discard_disable(container
, false);
677 vfio_put_address_space(space
);
682 static void vfio_disconnect_container(VFIOGroup
*group
)
684 VFIOContainer
*container
= group
->container
;
685 VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
687 QLIST_REMOVE(group
, container_next
);
688 group
->container
= NULL
;
691 * Explicitly release the listener first before unset container,
692 * since unset may destroy the backend container if it's the last
695 if (QLIST_EMPTY(&container
->group_list
)) {
696 memory_listener_unregister(&bcontainer
->listener
);
697 if (bcontainer
->ops
->release
) {
698 bcontainer
->ops
->release(bcontainer
);
702 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
703 error_report("vfio: error disconnecting group %d from container",
707 if (QLIST_EMPTY(&container
->group_list
)) {
708 VFIOAddressSpace
*space
= bcontainer
->space
;
710 vfio_container_destroy(bcontainer
);
712 trace_vfio_disconnect_container(container
->fd
);
713 close(container
->fd
);
716 vfio_put_address_space(space
);
720 static VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
724 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
726 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
727 if (group
->groupid
== groupid
) {
728 /* Found it. Now is it already in the right context? */
729 if (group
->container
->bcontainer
.space
->as
== as
) {
732 error_setg(errp
, "group %d used in multiple address spaces",
739 group
= g_malloc0(sizeof(*group
));
741 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
742 group
->fd
= qemu_open_old(path
, O_RDWR
);
744 error_setg_errno(errp
, errno
, "failed to open %s", path
);
745 goto free_group_exit
;
748 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
749 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
753 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
754 error_setg(errp
, "group %d is not viable", groupid
);
755 error_append_hint(errp
,
756 "Please ensure all devices within the iommu_group "
757 "are bound to their vfio bus driver.\n");
761 group
->groupid
= groupid
;
762 QLIST_INIT(&group
->device_list
);
764 if (vfio_connect_container(group
, as
, errp
)) {
765 error_prepend(errp
, "failed to setup container for group %d: ",
770 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
783 static void vfio_put_group(VFIOGroup
*group
)
785 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
789 if (!group
->ram_block_discard_allowed
) {
790 vfio_ram_block_discard_disable(group
->container
, false);
792 vfio_kvm_device_del_group(group
);
793 vfio_disconnect_container(group
);
794 QLIST_REMOVE(group
, next
);
795 trace_vfio_put_group(group
->fd
);
800 static int vfio_get_device(VFIOGroup
*group
, const char *name
,
801 VFIODevice
*vbasedev
, Error
**errp
)
803 g_autofree
struct vfio_device_info
*info
= NULL
;
806 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
808 error_setg_errno(errp
, errno
, "error getting device from group %d",
810 error_append_hint(errp
,
811 "Verify all devices in group %d are bound to vfio-<bus> "
812 "or pci-stub and not already in use\n", group
->groupid
);
816 info
= vfio_get_device_info(fd
);
818 error_setg_errno(errp
, errno
, "error getting device info");
824 * Set discarding of RAM as not broken for this group if the driver knows
825 * the device operates compatibly with discarding. Setting must be
826 * consistent per group, but since compatibility is really only possible
827 * with mdev currently, we expect singleton groups.
829 if (vbasedev
->ram_block_discard_allowed
!=
830 group
->ram_block_discard_allowed
) {
831 if (!QLIST_EMPTY(&group
->device_list
)) {
832 error_setg(errp
, "Inconsistent setting of support for discarding "
833 "RAM (e.g., balloon) within group");
838 if (!group
->ram_block_discard_allowed
) {
839 group
->ram_block_discard_allowed
= true;
840 vfio_ram_block_discard_disable(group
->container
, false);
845 vbasedev
->group
= group
;
846 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
848 vbasedev
->num_irqs
= info
->num_irqs
;
849 vbasedev
->num_regions
= info
->num_regions
;
850 vbasedev
->flags
= info
->flags
;
852 trace_vfio_get_device(name
, info
->flags
, info
->num_regions
, info
->num_irqs
);
854 vbasedev
->reset_works
= !!(info
->flags
& VFIO_DEVICE_FLAGS_RESET
);
859 static void vfio_put_base_device(VFIODevice
*vbasedev
)
861 if (!vbasedev
->group
) {
864 QLIST_REMOVE(vbasedev
, next
);
865 vbasedev
->group
= NULL
;
866 trace_vfio_put_base_device(vbasedev
->fd
);
870 static int vfio_device_groupid(VFIODevice
*vbasedev
, Error
**errp
)
872 char *tmp
, group_path
[PATH_MAX
], *group_name
;
876 tmp
= g_strdup_printf("%s/iommu_group", vbasedev
->sysfsdev
);
877 len
= readlink(tmp
, group_path
, sizeof(group_path
));
880 if (len
<= 0 || len
>= sizeof(group_path
)) {
881 ret
= len
< 0 ? -errno
: -ENAMETOOLONG
;
882 error_setg_errno(errp
, -ret
, "no iommu_group found");
888 group_name
= basename(group_path
);
889 if (sscanf(group_name
, "%d", &groupid
) != 1) {
890 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
897 * vfio_attach_device: attach a device to a security context
898 * @name and @vbasedev->name are likely to be different depending
899 * on the type of the device, hence the need for passing @name
901 static int vfio_legacy_attach_device(const char *name
, VFIODevice
*vbasedev
,
902 AddressSpace
*as
, Error
**errp
)
904 int groupid
= vfio_device_groupid(vbasedev
, errp
);
905 VFIODevice
*vbasedev_iter
;
907 VFIOContainerBase
*bcontainer
;
914 trace_vfio_attach_device(vbasedev
->name
, groupid
);
916 group
= vfio_get_group(groupid
, as
, errp
);
921 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
922 if (strcmp(vbasedev_iter
->name
, vbasedev
->name
) == 0) {
923 error_setg(errp
, "device is already attached");
924 vfio_put_group(group
);
928 ret
= vfio_get_device(group
, name
, vbasedev
, errp
);
930 vfio_put_group(group
);
934 bcontainer
= &group
->container
->bcontainer
;
935 vbasedev
->bcontainer
= bcontainer
;
936 QLIST_INSERT_HEAD(&bcontainer
->device_list
, vbasedev
, container_next
);
937 QLIST_INSERT_HEAD(&vfio_device_list
, vbasedev
, global_next
);
942 static void vfio_legacy_detach_device(VFIODevice
*vbasedev
)
944 VFIOGroup
*group
= vbasedev
->group
;
946 QLIST_REMOVE(vbasedev
, global_next
);
947 QLIST_REMOVE(vbasedev
, container_next
);
948 vbasedev
->bcontainer
= NULL
;
949 trace_vfio_detach_device(vbasedev
->name
, group
->groupid
);
950 vfio_put_base_device(vbasedev
);
951 vfio_put_group(group
);
954 static int vfio_legacy_pci_hot_reset(VFIODevice
*vbasedev
, bool single
)
956 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
958 struct vfio_pci_hot_reset_info
*info
= NULL
;
959 struct vfio_pci_dependent_device
*devices
;
960 struct vfio_pci_hot_reset
*reset
;
965 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
968 vfio_pci_pre_reset(vdev
);
970 vdev
->vbasedev
.needs_reset
= false;
972 ret
= vfio_pci_get_pci_hot_reset_info(vdev
, &info
);
977 devices
= &info
->devices
[0];
979 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
981 /* Verify that we have all the groups required */
982 for (i
= 0; i
< info
->count
; i
++) {
983 PCIHostDeviceAddress host
;
985 VFIODevice
*vbasedev_iter
;
987 host
.domain
= devices
[i
].segment
;
988 host
.bus
= devices
[i
].bus
;
989 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
990 host
.function
= PCI_FUNC(devices
[i
].devfn
);
992 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
993 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
995 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
999 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1000 if (group
->groupid
== devices
[i
].group_id
) {
1006 if (!vdev
->has_pm_reset
) {
1007 error_report("vfio: Cannot reset device %s, "
1008 "depends on group %d which is not owned.",
1009 vdev
->vbasedev
.name
, devices
[i
].group_id
);
1015 /* Prep dependent devices for reset and clear our marker. */
1016 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
1017 if (!vbasedev_iter
->dev
->realized
||
1018 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1021 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
1022 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
1027 vfio_pci_pre_reset(tmp
);
1028 tmp
->vbasedev
.needs_reset
= false;
1035 if (!single
&& !multi
) {
1040 /* Determine how many group fds need to be passed */
1042 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1043 for (i
= 0; i
< info
->count
; i
++) {
1044 if (group
->groupid
== devices
[i
].group_id
) {
1051 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
1052 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
1053 fds
= &reset
->group_fds
[0];
1055 /* Fill in group fds */
1056 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1057 for (i
= 0; i
< info
->count
; i
++) {
1058 if (group
->groupid
== devices
[i
].group_id
) {
1059 fds
[reset
->count
++] = group
->fd
;
1066 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
1072 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
1073 ret
? strerror(errno
) : "Success");
1076 /* Re-enable INTx on affected devices */
1077 for (i
= 0; i
< info
->count
; i
++) {
1078 PCIHostDeviceAddress host
;
1080 VFIODevice
*vbasedev_iter
;
1082 host
.domain
= devices
[i
].segment
;
1083 host
.bus
= devices
[i
].bus
;
1084 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
1085 host
.function
= PCI_FUNC(devices
[i
].devfn
);
1087 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
1091 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1092 if (group
->groupid
== devices
[i
].group_id
) {
1101 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
1102 if (!vbasedev_iter
->dev
->realized
||
1103 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1106 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
1107 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
1108 vfio_pci_post_reset(tmp
);
1115 vfio_pci_post_reset(vdev
);
1122 static void vfio_iommu_legacy_class_init(ObjectClass
*klass
, void *data
)
1124 VFIOIOMMUClass
*vioc
= VFIO_IOMMU_CLASS(klass
);
1126 vioc
->setup
= vfio_legacy_setup
;
1127 vioc
->dma_map
= vfio_legacy_dma_map
;
1128 vioc
->dma_unmap
= vfio_legacy_dma_unmap
;
1129 vioc
->attach_device
= vfio_legacy_attach_device
;
1130 vioc
->detach_device
= vfio_legacy_detach_device
;
1131 vioc
->set_dirty_page_tracking
= vfio_legacy_set_dirty_page_tracking
;
1132 vioc
->query_dirty_bitmap
= vfio_legacy_query_dirty_bitmap
;
1133 vioc
->pci_hot_reset
= vfio_legacy_pci_hot_reset
;
1136 static const TypeInfo types
[] = {
1138 .name
= TYPE_VFIO_IOMMU_LEGACY
,
1139 .parent
= TYPE_VFIO_IOMMU
,
1140 .class_init
= vfio_iommu_legacy_class_init
,