2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
27 #include "exec/memory.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
31 #include "qemu/range.h"
32 #include "sysemu/reset.h"
34 #include "qapi/error.h"
35 #include "migration/migration.h"
38 VFIOGroupList vfio_group_list
=
39 QLIST_HEAD_INITIALIZER(vfio_group_list
);
41 static int vfio_ram_block_discard_disable(VFIOContainer
*container
, bool state
)
43 switch (container
->iommu_type
) {
44 case VFIO_TYPE1v2_IOMMU
:
45 case VFIO_TYPE1_IOMMU
:
47 * We support coordinated discarding of RAM via the RamDiscardManager.
49 return ram_block_uncoordinated_discard_disable(state
);
52 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
53 * RamDiscardManager, however, it is completely untested.
55 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
56 * completely the opposite of managing mapping/pinning dynamically as
57 * required by RamDiscardManager. We would have to special-case sections
58 * with a RamDiscardManager.
60 return ram_block_discard_disable(state
);
64 static int vfio_dma_unmap_bitmap(const VFIOContainer
*container
,
65 hwaddr iova
, ram_addr_t size
,
68 const VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
69 struct vfio_iommu_type1_dma_unmap
*unmap
;
70 struct vfio_bitmap
*bitmap
;
74 ret
= vfio_bitmap_alloc(&vbmap
, size
);
79 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
81 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
84 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
85 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
88 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
89 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
90 * to qemu_real_host_page_size.
92 bitmap
->pgsize
= qemu_real_host_page_size();
93 bitmap
->size
= vbmap
.size
;
94 bitmap
->data
= (__u64
*)vbmap
.bitmap
;
96 if (vbmap
.size
> bcontainer
->max_dirty_bitmap_size
) {
97 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
, vbmap
.size
);
102 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
104 cpu_physical_memory_set_dirty_lebitmap(vbmap
.bitmap
,
105 iotlb
->translated_addr
, vbmap
.pages
);
107 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
112 g_free(vbmap
.bitmap
);
118 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
120 static int vfio_legacy_dma_unmap(const VFIOContainerBase
*bcontainer
,
121 hwaddr iova
, ram_addr_t size
,
122 IOMMUTLBEntry
*iotlb
)
124 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
126 struct vfio_iommu_type1_dma_unmap unmap
= {
127 .argsz
= sizeof(unmap
),
132 bool need_dirty_sync
= false;
135 if (iotlb
&& vfio_devices_all_running_and_mig_active(bcontainer
)) {
136 if (!vfio_devices_all_device_dirty_tracking(bcontainer
) &&
137 bcontainer
->dirty_pages_supported
) {
138 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
141 need_dirty_sync
= true;
144 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
146 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
147 * v4.15) where an overflow in its wrap-around check prevents us from
148 * unmapping the last page of the address space. Test for the error
149 * condition and re-try the unmap excluding the last page. The
150 * expectation is that we've never mapped the last page anyway and this
151 * unmap request comes via vIOMMU support which also makes it unlikely
152 * that this page is used. This bug was introduced well after type1 v2
153 * support was introduced, so we shouldn't need to test for v1. A fix
154 * is queued for kernel v5.0 so this workaround can be removed once
155 * affected kernels are sufficiently deprecated.
157 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
158 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
159 trace_vfio_legacy_dma_unmap_overflow_workaround();
160 unmap
.size
-= 1ULL << ctz64(bcontainer
->pgsizes
);
163 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
167 if (need_dirty_sync
) {
168 ret
= vfio_get_dirty_bitmap(bcontainer
, iova
, size
,
169 iotlb
->translated_addr
);
178 static int vfio_legacy_dma_map(const VFIOContainerBase
*bcontainer
, hwaddr iova
,
179 ram_addr_t size
, void *vaddr
, bool readonly
)
181 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
183 struct vfio_iommu_type1_dma_map map
= {
184 .argsz
= sizeof(map
),
185 .flags
= VFIO_DMA_MAP_FLAG_READ
,
186 .vaddr
= (__u64
)(uintptr_t)vaddr
,
192 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
196 * Try the mapping, if it fails with EBUSY, unmap the region and try
197 * again. This shouldn't be necessary, but we sometimes see it in
200 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
202 vfio_legacy_dma_unmap(bcontainer
, iova
, size
, NULL
) == 0 &&
203 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
207 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
212 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase
*bcontainer
,
215 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
218 struct vfio_iommu_type1_dirty_bitmap dirty
= {
219 .argsz
= sizeof(dirty
),
223 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
225 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
228 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
231 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
238 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase
*bcontainer
,
240 hwaddr iova
, hwaddr size
)
242 const VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
244 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
245 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
248 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
250 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
251 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
252 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
257 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
258 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
259 * to qemu_real_host_page_size.
261 range
->bitmap
.pgsize
= qemu_real_host_page_size();
262 range
->bitmap
.size
= vbmap
->size
;
263 range
->bitmap
.data
= (__u64
*)vbmap
->bitmap
;
265 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
268 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
269 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
270 (uint64_t)range
->size
, errno
);
278 static struct vfio_info_cap_header
*
279 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
281 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
285 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
288 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
291 struct vfio_info_cap_header
*hdr
;
292 struct vfio_iommu_type1_info_dma_avail
*cap
;
294 /* If the capability cannot be found, assume no DMA limiting */
295 hdr
= vfio_get_iommu_type1_info_cap(info
,
296 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
309 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info
*info
,
310 VFIOContainerBase
*bcontainer
)
312 struct vfio_info_cap_header
*hdr
;
313 struct vfio_iommu_type1_info_cap_iova_range
*cap
;
315 hdr
= vfio_get_iommu_type1_info_cap(info
,
316 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
);
323 for (int i
= 0; i
< cap
->nr_iovas
; i
++) {
324 Range
*range
= g_new(Range
, 1);
326 range_set_bounds(range
, cap
->iova_ranges
[i
].start
,
327 cap
->iova_ranges
[i
].end
);
328 bcontainer
->iova_ranges
=
329 range_list_insert(bcontainer
->iova_ranges
, range
);
335 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
339 if (vfio_kvm_device_add_fd(group
->fd
, &err
)) {
340 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
344 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
348 if (vfio_kvm_device_del_fd(group
->fd
, &err
)) {
349 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
354 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
356 static int vfio_get_iommu_type(VFIOContainer
*container
,
359 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
360 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
363 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
364 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
365 return iommu_types
[i
];
368 error_setg(errp
, "No available IOMMU models");
372 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
377 iommu_type
= vfio_get_iommu_type(container
, errp
);
378 if (iommu_type
< 0) {
382 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
384 error_setg_errno(errp
, errno
, "Failed to set group container");
388 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
389 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
391 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
392 * v2, the running platform may not support v2 and there is no
393 * way to guess it until an IOMMU group gets added to the container.
394 * So in case it fails with v2, try v1 as a fallback.
396 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
399 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
403 container
->iommu_type
= iommu_type
;
407 static int vfio_get_iommu_info(VFIOContainer
*container
,
408 struct vfio_iommu_type1_info
**info
)
411 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
413 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
415 (*info
)->argsz
= argsz
;
417 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
423 if (((*info
)->argsz
> argsz
)) {
424 argsz
= (*info
)->argsz
;
425 *info
= g_realloc(*info
, argsz
);
432 static struct vfio_info_cap_header
*
433 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
435 struct vfio_info_cap_header
*hdr
;
438 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
442 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
451 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
452 struct vfio_iommu_type1_info
*info
)
454 struct vfio_info_cap_header
*hdr
;
455 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
456 VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
458 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
463 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
467 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
468 * qemu_real_host_page_size to mark those dirty.
470 if (cap_mig
->pgsize_bitmap
& qemu_real_host_page_size()) {
471 bcontainer
->dirty_pages_supported
= true;
472 bcontainer
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
473 bcontainer
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
477 static int vfio_legacy_setup(VFIOContainerBase
*bcontainer
, Error
**errp
)
479 VFIOContainer
*container
= container_of(bcontainer
, VFIOContainer
,
481 g_autofree
struct vfio_iommu_type1_info
*info
= NULL
;
484 ret
= vfio_get_iommu_info(container
, &info
);
486 error_setg_errno(errp
, -ret
, "Failed to get VFIO IOMMU info");
490 if (info
->flags
& VFIO_IOMMU_INFO_PGSIZES
) {
491 bcontainer
->pgsizes
= info
->iova_pgsizes
;
493 bcontainer
->pgsizes
= qemu_real_host_page_size();
496 if (!vfio_get_info_dma_avail(info
, &bcontainer
->dma_max_mappings
)) {
497 bcontainer
->dma_max_mappings
= 65535;
500 vfio_get_info_iova_range(info
, bcontainer
);
502 vfio_get_iommu_info_migration(container
, info
);
506 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
509 VFIOContainer
*container
;
510 VFIOContainerBase
*bcontainer
;
512 VFIOAddressSpace
*space
;
514 space
= vfio_get_address_space(as
);
517 * VFIO is currently incompatible with discarding of RAM insofar as the
518 * madvise to purge (zap) the page from QEMU's address space does not
519 * interact with the memory API and therefore leaves stale virtual to
520 * physical mappings in the IOMMU if the page was previously pinned. We
521 * therefore set discarding broken for each group added to a container,
522 * whether the container is used individually or shared. This provides
523 * us with options to allow devices within a group to opt-in and allow
524 * discarding, so long as it is done consistently for a group (for instance
525 * if the device is an mdev device where it is known that the host vendor
526 * driver will never pin pages outside of the working set of the guest
527 * driver, which would thus not be discarding candidates).
529 * The first opportunity to induce pinning occurs here where we attempt to
530 * attach the group to existing containers within the AddressSpace. If any
531 * pages are already zapped from the virtual address space, such as from
532 * previous discards, new pinning will cause valid mappings to be
533 * re-established. Likewise, when the overall MemoryListener for a new
534 * container is registered, a replay of mappings within the AddressSpace
535 * will occur, re-establishing any previously zapped pages as well.
537 * Especially virtio-balloon is currently only prevented from discarding
538 * new memory, it will not yet set ram_block_discard_set_required() and
539 * therefore, neither stops us here or deals with the sudden memory
540 * consumption of inflated memory.
542 * We do support discarding of memory coordinated via the RamDiscardManager
543 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
544 * details once we know which type of IOMMU we are using.
547 QLIST_FOREACH(bcontainer
, &space
->containers
, next
) {
548 container
= container_of(bcontainer
, VFIOContainer
, bcontainer
);
549 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
550 ret
= vfio_ram_block_discard_disable(container
, true);
552 error_setg_errno(errp
, -ret
,
553 "Cannot set discarding of RAM broken");
554 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
,
556 error_report("vfio: error disconnecting group %d from"
557 " container", group
->groupid
);
561 group
->container
= container
;
562 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
563 vfio_kvm_device_add_group(group
);
568 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
570 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
575 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
576 if (ret
!= VFIO_API_VERSION
) {
577 error_setg(errp
, "supported vfio version: %d, "
578 "reported version: %d", VFIO_API_VERSION
, ret
);
583 container
= g_malloc0(sizeof(*container
));
585 bcontainer
= &container
->bcontainer
;
586 vfio_container_init(bcontainer
, space
, &vfio_legacy_ops
);
588 ret
= vfio_init_container(container
, group
->fd
, errp
);
590 goto free_container_exit
;
593 ret
= vfio_ram_block_discard_disable(container
, true);
595 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
596 goto free_container_exit
;
599 switch (container
->iommu_type
) {
600 case VFIO_TYPE1v2_IOMMU
:
601 case VFIO_TYPE1_IOMMU
:
602 ret
= vfio_legacy_setup(bcontainer
, errp
);
604 case VFIO_SPAPR_TCE_v2_IOMMU
:
605 case VFIO_SPAPR_TCE_IOMMU
:
606 ret
= vfio_spapr_container_init(container
, errp
);
609 g_assert_not_reached();
613 goto enable_discards_exit
;
616 vfio_kvm_device_add_group(group
);
618 QLIST_INIT(&container
->group_list
);
619 QLIST_INSERT_HEAD(&space
->containers
, bcontainer
, next
);
621 group
->container
= container
;
622 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
624 bcontainer
->listener
= vfio_memory_listener
;
625 memory_listener_register(&bcontainer
->listener
, bcontainer
->space
->as
);
627 if (bcontainer
->error
) {
629 error_propagate_prepend(errp
, bcontainer
->error
,
630 "memory listener initialization failed: ");
631 goto listener_release_exit
;
634 bcontainer
->initialized
= true;
637 listener_release_exit
:
638 QLIST_REMOVE(group
, container_next
);
639 QLIST_REMOVE(bcontainer
, next
);
640 vfio_kvm_device_del_group(group
);
641 memory_listener_unregister(&bcontainer
->listener
);
642 if (bcontainer
->ops
->release
) {
643 bcontainer
->ops
->release(bcontainer
);
646 enable_discards_exit
:
647 vfio_ram_block_discard_disable(container
, false);
656 vfio_put_address_space(space
);
661 static void vfio_disconnect_container(VFIOGroup
*group
)
663 VFIOContainer
*container
= group
->container
;
664 VFIOContainerBase
*bcontainer
= &container
->bcontainer
;
666 QLIST_REMOVE(group
, container_next
);
667 group
->container
= NULL
;
670 * Explicitly release the listener first before unset container,
671 * since unset may destroy the backend container if it's the last
674 if (QLIST_EMPTY(&container
->group_list
)) {
675 memory_listener_unregister(&bcontainer
->listener
);
676 if (bcontainer
->ops
->release
) {
677 bcontainer
->ops
->release(bcontainer
);
681 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
682 error_report("vfio: error disconnecting group %d from container",
686 if (QLIST_EMPTY(&container
->group_list
)) {
687 VFIOAddressSpace
*space
= bcontainer
->space
;
689 vfio_container_destroy(bcontainer
);
691 trace_vfio_disconnect_container(container
->fd
);
692 close(container
->fd
);
695 vfio_put_address_space(space
);
699 static VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
703 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
705 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
706 if (group
->groupid
== groupid
) {
707 /* Found it. Now is it already in the right context? */
708 if (group
->container
->bcontainer
.space
->as
== as
) {
711 error_setg(errp
, "group %d used in multiple address spaces",
718 group
= g_malloc0(sizeof(*group
));
720 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
721 group
->fd
= qemu_open_old(path
, O_RDWR
);
723 error_setg_errno(errp
, errno
, "failed to open %s", path
);
724 goto free_group_exit
;
727 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
728 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
732 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
733 error_setg(errp
, "group %d is not viable", groupid
);
734 error_append_hint(errp
,
735 "Please ensure all devices within the iommu_group "
736 "are bound to their vfio bus driver.\n");
740 group
->groupid
= groupid
;
741 QLIST_INIT(&group
->device_list
);
743 if (vfio_connect_container(group
, as
, errp
)) {
744 error_prepend(errp
, "failed to setup container for group %d: ",
749 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
762 static void vfio_put_group(VFIOGroup
*group
)
764 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
768 if (!group
->ram_block_discard_allowed
) {
769 vfio_ram_block_discard_disable(group
->container
, false);
771 vfio_kvm_device_del_group(group
);
772 vfio_disconnect_container(group
);
773 QLIST_REMOVE(group
, next
);
774 trace_vfio_put_group(group
->fd
);
779 static int vfio_get_device(VFIOGroup
*group
, const char *name
,
780 VFIODevice
*vbasedev
, Error
**errp
)
782 g_autofree
struct vfio_device_info
*info
= NULL
;
785 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
787 error_setg_errno(errp
, errno
, "error getting device from group %d",
789 error_append_hint(errp
,
790 "Verify all devices in group %d are bound to vfio-<bus> "
791 "or pci-stub and not already in use\n", group
->groupid
);
795 info
= vfio_get_device_info(fd
);
797 error_setg_errno(errp
, errno
, "error getting device info");
803 * Set discarding of RAM as not broken for this group if the driver knows
804 * the device operates compatibly with discarding. Setting must be
805 * consistent per group, but since compatibility is really only possible
806 * with mdev currently, we expect singleton groups.
808 if (vbasedev
->ram_block_discard_allowed
!=
809 group
->ram_block_discard_allowed
) {
810 if (!QLIST_EMPTY(&group
->device_list
)) {
811 error_setg(errp
, "Inconsistent setting of support for discarding "
812 "RAM (e.g., balloon) within group");
817 if (!group
->ram_block_discard_allowed
) {
818 group
->ram_block_discard_allowed
= true;
819 vfio_ram_block_discard_disable(group
->container
, false);
824 vbasedev
->group
= group
;
825 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
827 vbasedev
->num_irqs
= info
->num_irqs
;
828 vbasedev
->num_regions
= info
->num_regions
;
829 vbasedev
->flags
= info
->flags
;
831 trace_vfio_get_device(name
, info
->flags
, info
->num_regions
, info
->num_irqs
);
833 vbasedev
->reset_works
= !!(info
->flags
& VFIO_DEVICE_FLAGS_RESET
);
838 static void vfio_put_base_device(VFIODevice
*vbasedev
)
840 if (!vbasedev
->group
) {
843 QLIST_REMOVE(vbasedev
, next
);
844 vbasedev
->group
= NULL
;
845 trace_vfio_put_base_device(vbasedev
->fd
);
849 static int vfio_device_groupid(VFIODevice
*vbasedev
, Error
**errp
)
851 char *tmp
, group_path
[PATH_MAX
], *group_name
;
855 tmp
= g_strdup_printf("%s/iommu_group", vbasedev
->sysfsdev
);
856 len
= readlink(tmp
, group_path
, sizeof(group_path
));
859 if (len
<= 0 || len
>= sizeof(group_path
)) {
860 ret
= len
< 0 ? -errno
: -ENAMETOOLONG
;
861 error_setg_errno(errp
, -ret
, "no iommu_group found");
867 group_name
= basename(group_path
);
868 if (sscanf(group_name
, "%d", &groupid
) != 1) {
869 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
876 * vfio_attach_device: attach a device to a security context
877 * @name and @vbasedev->name are likely to be different depending
878 * on the type of the device, hence the need for passing @name
880 static int vfio_legacy_attach_device(const char *name
, VFIODevice
*vbasedev
,
881 AddressSpace
*as
, Error
**errp
)
883 int groupid
= vfio_device_groupid(vbasedev
, errp
);
884 VFIODevice
*vbasedev_iter
;
886 VFIOContainerBase
*bcontainer
;
893 trace_vfio_attach_device(vbasedev
->name
, groupid
);
895 group
= vfio_get_group(groupid
, as
, errp
);
900 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
901 if (strcmp(vbasedev_iter
->name
, vbasedev
->name
) == 0) {
902 error_setg(errp
, "device is already attached");
903 vfio_put_group(group
);
907 ret
= vfio_get_device(group
, name
, vbasedev
, errp
);
909 vfio_put_group(group
);
913 bcontainer
= &group
->container
->bcontainer
;
914 vbasedev
->bcontainer
= bcontainer
;
915 QLIST_INSERT_HEAD(&bcontainer
->device_list
, vbasedev
, container_next
);
916 QLIST_INSERT_HEAD(&vfio_device_list
, vbasedev
, global_next
);
921 static void vfio_legacy_detach_device(VFIODevice
*vbasedev
)
923 VFIOGroup
*group
= vbasedev
->group
;
925 QLIST_REMOVE(vbasedev
, global_next
);
926 QLIST_REMOVE(vbasedev
, container_next
);
927 vbasedev
->bcontainer
= NULL
;
928 trace_vfio_detach_device(vbasedev
->name
, group
->groupid
);
929 vfio_put_base_device(vbasedev
);
930 vfio_put_group(group
);
933 static int vfio_legacy_pci_hot_reset(VFIODevice
*vbasedev
, bool single
)
935 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
937 struct vfio_pci_hot_reset_info
*info
= NULL
;
938 struct vfio_pci_dependent_device
*devices
;
939 struct vfio_pci_hot_reset
*reset
;
944 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
947 vfio_pci_pre_reset(vdev
);
949 vdev
->vbasedev
.needs_reset
= false;
951 ret
= vfio_pci_get_pci_hot_reset_info(vdev
, &info
);
956 devices
= &info
->devices
[0];
958 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
960 /* Verify that we have all the groups required */
961 for (i
= 0; i
< info
->count
; i
++) {
962 PCIHostDeviceAddress host
;
964 VFIODevice
*vbasedev_iter
;
966 host
.domain
= devices
[i
].segment
;
967 host
.bus
= devices
[i
].bus
;
968 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
969 host
.function
= PCI_FUNC(devices
[i
].devfn
);
971 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
972 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
974 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
978 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
979 if (group
->groupid
== devices
[i
].group_id
) {
985 if (!vdev
->has_pm_reset
) {
986 error_report("vfio: Cannot reset device %s, "
987 "depends on group %d which is not owned.",
988 vdev
->vbasedev
.name
, devices
[i
].group_id
);
994 /* Prep dependent devices for reset and clear our marker. */
995 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
996 if (!vbasedev_iter
->dev
->realized
||
997 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1000 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
1001 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
1006 vfio_pci_pre_reset(tmp
);
1007 tmp
->vbasedev
.needs_reset
= false;
1014 if (!single
&& !multi
) {
1019 /* Determine how many group fds need to be passed */
1021 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1022 for (i
= 0; i
< info
->count
; i
++) {
1023 if (group
->groupid
== devices
[i
].group_id
) {
1030 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
1031 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
1032 fds
= &reset
->group_fds
[0];
1034 /* Fill in group fds */
1035 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1036 for (i
= 0; i
< info
->count
; i
++) {
1037 if (group
->groupid
== devices
[i
].group_id
) {
1038 fds
[reset
->count
++] = group
->fd
;
1045 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
1051 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
1052 ret
? strerror(errno
) : "Success");
1055 /* Re-enable INTx on affected devices */
1056 for (i
= 0; i
< info
->count
; i
++) {
1057 PCIHostDeviceAddress host
;
1059 VFIODevice
*vbasedev_iter
;
1061 host
.domain
= devices
[i
].segment
;
1062 host
.bus
= devices
[i
].bus
;
1063 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
1064 host
.function
= PCI_FUNC(devices
[i
].devfn
);
1066 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
1070 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1071 if (group
->groupid
== devices
[i
].group_id
) {
1080 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
1081 if (!vbasedev_iter
->dev
->realized
||
1082 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
1085 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
1086 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
1087 vfio_pci_post_reset(tmp
);
1094 vfio_pci_post_reset(vdev
);
1101 const VFIOIOMMUOps vfio_legacy_ops
= {
1102 .dma_map
= vfio_legacy_dma_map
,
1103 .dma_unmap
= vfio_legacy_dma_unmap
,
1104 .attach_device
= vfio_legacy_attach_device
,
1105 .detach_device
= vfio_legacy_detach_device
,
1106 .set_dirty_page_tracking
= vfio_legacy_set_dirty_page_tracking
,
1107 .query_dirty_bitmap
= vfio_legacy_query_dirty_bitmap
,
1108 .pci_hot_reset
= vfio_legacy_pci_hot_reset
,