2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
27 #include "exec/memory.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
31 #include "qemu/range.h"
32 #include "sysemu/reset.h"
34 #include "qapi/error.h"
35 #include "migration/migration.h"
37 VFIOGroupList vfio_group_list
=
38 QLIST_HEAD_INITIALIZER(vfio_group_list
);
40 static int vfio_ram_block_discard_disable(VFIOContainer
*container
, bool state
)
42 switch (container
->iommu_type
) {
43 case VFIO_TYPE1v2_IOMMU
:
44 case VFIO_TYPE1_IOMMU
:
46 * We support coordinated discarding of RAM via the RamDiscardManager.
48 return ram_block_uncoordinated_discard_disable(state
);
51 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
52 * RamDiscardManager, however, it is completely untested.
54 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
55 * completely the opposite of managing mapping/pinning dynamically as
56 * required by RamDiscardManager. We would have to special-case sections
57 * with a RamDiscardManager.
59 return ram_block_discard_disable(state
);
63 static int vfio_dma_unmap_bitmap(VFIOContainer
*container
,
64 hwaddr iova
, ram_addr_t size
,
67 struct vfio_iommu_type1_dma_unmap
*unmap
;
68 struct vfio_bitmap
*bitmap
;
72 ret
= vfio_bitmap_alloc(&vbmap
, size
);
77 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
79 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
82 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
83 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
86 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
87 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
88 * to qemu_real_host_page_size.
90 bitmap
->pgsize
= qemu_real_host_page_size();
91 bitmap
->size
= vbmap
.size
;
92 bitmap
->data
= (__u64
*)vbmap
.bitmap
;
94 if (vbmap
.size
> container
->max_dirty_bitmap_size
) {
95 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
, vbmap
.size
);
100 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
102 cpu_physical_memory_set_dirty_lebitmap(vbmap
.bitmap
,
103 iotlb
->translated_addr
, vbmap
.pages
);
105 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
110 g_free(vbmap
.bitmap
);
116 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
118 int vfio_dma_unmap(VFIOContainer
*container
, hwaddr iova
,
119 ram_addr_t size
, IOMMUTLBEntry
*iotlb
)
121 struct vfio_iommu_type1_dma_unmap unmap
= {
122 .argsz
= sizeof(unmap
),
127 bool need_dirty_sync
= false;
130 if (iotlb
&& vfio_devices_all_running_and_mig_active(container
)) {
131 if (!vfio_devices_all_device_dirty_tracking(container
) &&
132 container
->dirty_pages_supported
) {
133 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
136 need_dirty_sync
= true;
139 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
141 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
142 * v4.15) where an overflow in its wrap-around check prevents us from
143 * unmapping the last page of the address space. Test for the error
144 * condition and re-try the unmap excluding the last page. The
145 * expectation is that we've never mapped the last page anyway and this
146 * unmap request comes via vIOMMU support which also makes it unlikely
147 * that this page is used. This bug was introduced well after type1 v2
148 * support was introduced, so we shouldn't need to test for v1. A fix
149 * is queued for kernel v5.0 so this workaround can be removed once
150 * affected kernels are sufficiently deprecated.
152 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
153 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
154 trace_vfio_dma_unmap_overflow_workaround();
155 unmap
.size
-= 1ULL << ctz64(container
->pgsizes
);
158 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
162 if (need_dirty_sync
) {
163 ret
= vfio_get_dirty_bitmap(container
, iova
, size
,
164 iotlb
->translated_addr
);
173 int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
174 ram_addr_t size
, void *vaddr
, bool readonly
)
176 struct vfio_iommu_type1_dma_map map
= {
177 .argsz
= sizeof(map
),
178 .flags
= VFIO_DMA_MAP_FLAG_READ
,
179 .vaddr
= (__u64
)(uintptr_t)vaddr
,
185 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
189 * Try the mapping, if it fails with EBUSY, unmap the region and try
190 * again. This shouldn't be necessary, but we sometimes see it in
193 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
194 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
, NULL
) == 0 &&
195 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
199 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
203 int vfio_set_dirty_page_tracking(VFIOContainer
*container
, bool start
)
206 struct vfio_iommu_type1_dirty_bitmap dirty
= {
207 .argsz
= sizeof(dirty
),
210 if (!container
->dirty_pages_supported
) {
215 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
217 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
220 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
223 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
230 int vfio_query_dirty_bitmap(VFIOContainer
*container
, VFIOBitmap
*vbmap
,
231 hwaddr iova
, hwaddr size
)
233 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
234 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
237 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
239 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
240 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
241 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
246 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
247 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
248 * to qemu_real_host_page_size.
250 range
->bitmap
.pgsize
= qemu_real_host_page_size();
251 range
->bitmap
.size
= vbmap
->size
;
252 range
->bitmap
.data
= (__u64
*)vbmap
->bitmap
;
254 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
257 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
258 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
259 (uint64_t)range
->size
, errno
);
267 static struct vfio_info_cap_header
*
268 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
270 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
274 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
277 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
280 struct vfio_info_cap_header
*hdr
;
281 struct vfio_iommu_type1_info_dma_avail
*cap
;
283 /* If the capability cannot be found, assume no DMA limiting */
284 hdr
= vfio_get_iommu_type1_info_cap(info
,
285 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
298 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info
*info
,
299 VFIOContainer
*container
)
301 struct vfio_info_cap_header
*hdr
;
302 struct vfio_iommu_type1_info_cap_iova_range
*cap
;
304 hdr
= vfio_get_iommu_type1_info_cap(info
,
305 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
);
312 for (int i
= 0; i
< cap
->nr_iovas
; i
++) {
313 Range
*range
= g_new(Range
, 1);
315 range_set_bounds(range
, cap
->iova_ranges
[i
].start
,
316 cap
->iova_ranges
[i
].end
);
317 container
->iova_ranges
=
318 range_list_insert(container
->iova_ranges
, range
);
324 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
328 if (vfio_kvm_device_add_fd(group
->fd
, &err
)) {
329 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
333 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
337 if (vfio_kvm_device_del_fd(group
->fd
, &err
)) {
338 error_reportf_err(err
, "group ID %d: ", group
->groupid
);
343 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
345 static int vfio_get_iommu_type(VFIOContainer
*container
,
348 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
349 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
352 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
353 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
354 return iommu_types
[i
];
357 error_setg(errp
, "No available IOMMU models");
361 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
366 iommu_type
= vfio_get_iommu_type(container
, errp
);
367 if (iommu_type
< 0) {
371 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
373 error_setg_errno(errp
, errno
, "Failed to set group container");
377 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
378 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
380 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
381 * v2, the running platform may not support v2 and there is no
382 * way to guess it until an IOMMU group gets added to the container.
383 * So in case it fails with v2, try v1 as a fallback.
385 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
388 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
392 container
->iommu_type
= iommu_type
;
396 static int vfio_get_iommu_info(VFIOContainer
*container
,
397 struct vfio_iommu_type1_info
**info
)
400 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
402 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
404 (*info
)->argsz
= argsz
;
406 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
412 if (((*info
)->argsz
> argsz
)) {
413 argsz
= (*info
)->argsz
;
414 *info
= g_realloc(*info
, argsz
);
421 static struct vfio_info_cap_header
*
422 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
424 struct vfio_info_cap_header
*hdr
;
427 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
431 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
440 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
441 struct vfio_iommu_type1_info
*info
)
443 struct vfio_info_cap_header
*hdr
;
444 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
446 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
451 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
455 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
456 * qemu_real_host_page_size to mark those dirty.
458 if (cap_mig
->pgsize_bitmap
& qemu_real_host_page_size()) {
459 container
->dirty_pages_supported
= true;
460 container
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
461 container
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
465 static void vfio_free_container(VFIOContainer
*container
)
467 g_list_free_full(container
->iova_ranges
, g_free
);
471 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
474 VFIOContainer
*container
;
476 VFIOAddressSpace
*space
;
478 space
= vfio_get_address_space(as
);
481 * VFIO is currently incompatible with discarding of RAM insofar as the
482 * madvise to purge (zap) the page from QEMU's address space does not
483 * interact with the memory API and therefore leaves stale virtual to
484 * physical mappings in the IOMMU if the page was previously pinned. We
485 * therefore set discarding broken for each group added to a container,
486 * whether the container is used individually or shared. This provides
487 * us with options to allow devices within a group to opt-in and allow
488 * discarding, so long as it is done consistently for a group (for instance
489 * if the device is an mdev device where it is known that the host vendor
490 * driver will never pin pages outside of the working set of the guest
491 * driver, which would thus not be discarding candidates).
493 * The first opportunity to induce pinning occurs here where we attempt to
494 * attach the group to existing containers within the AddressSpace. If any
495 * pages are already zapped from the virtual address space, such as from
496 * previous discards, new pinning will cause valid mappings to be
497 * re-established. Likewise, when the overall MemoryListener for a new
498 * container is registered, a replay of mappings within the AddressSpace
499 * will occur, re-establishing any previously zapped pages as well.
501 * Especially virtio-balloon is currently only prevented from discarding
502 * new memory, it will not yet set ram_block_discard_set_required() and
503 * therefore, neither stops us here or deals with the sudden memory
504 * consumption of inflated memory.
506 * We do support discarding of memory coordinated via the RamDiscardManager
507 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
508 * details once we know which type of IOMMU we are using.
511 QLIST_FOREACH(container
, &space
->containers
, next
) {
512 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
513 ret
= vfio_ram_block_discard_disable(container
, true);
515 error_setg_errno(errp
, -ret
,
516 "Cannot set discarding of RAM broken");
517 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
,
519 error_report("vfio: error disconnecting group %d from"
520 " container", group
->groupid
);
524 group
->container
= container
;
525 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
526 vfio_kvm_device_add_group(group
);
531 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
533 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
538 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
539 if (ret
!= VFIO_API_VERSION
) {
540 error_setg(errp
, "supported vfio version: %d, "
541 "reported version: %d", VFIO_API_VERSION
, ret
);
546 container
= g_malloc0(sizeof(*container
));
547 container
->space
= space
;
549 container
->error
= NULL
;
550 container
->dirty_pages_supported
= false;
551 container
->dma_max_mappings
= 0;
552 container
->iova_ranges
= NULL
;
553 QLIST_INIT(&container
->giommu_list
);
554 QLIST_INIT(&container
->vrdl_list
);
556 ret
= vfio_init_container(container
, group
->fd
, errp
);
558 goto free_container_exit
;
561 ret
= vfio_ram_block_discard_disable(container
, true);
563 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
564 goto free_container_exit
;
567 switch (container
->iommu_type
) {
568 case VFIO_TYPE1v2_IOMMU
:
569 case VFIO_TYPE1_IOMMU
:
571 struct vfio_iommu_type1_info
*info
;
573 ret
= vfio_get_iommu_info(container
, &info
);
575 error_setg_errno(errp
, -ret
, "Failed to get VFIO IOMMU info");
576 goto enable_discards_exit
;
579 if (info
->flags
& VFIO_IOMMU_INFO_PGSIZES
) {
580 container
->pgsizes
= info
->iova_pgsizes
;
582 container
->pgsizes
= qemu_real_host_page_size();
585 if (!vfio_get_info_dma_avail(info
, &container
->dma_max_mappings
)) {
586 container
->dma_max_mappings
= 65535;
589 vfio_get_info_iova_range(info
, container
);
591 vfio_get_iommu_info_migration(container
, info
);
595 case VFIO_SPAPR_TCE_v2_IOMMU
:
596 case VFIO_SPAPR_TCE_IOMMU
:
598 ret
= vfio_spapr_container_init(container
, errp
);
600 goto enable_discards_exit
;
606 vfio_kvm_device_add_group(group
);
608 QLIST_INIT(&container
->group_list
);
609 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
611 group
->container
= container
;
612 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
614 container
->listener
= vfio_memory_listener
;
616 memory_listener_register(&container
->listener
, container
->space
->as
);
618 if (container
->error
) {
620 error_propagate_prepend(errp
, container
->error
,
621 "memory listener initialization failed: ");
622 goto listener_release_exit
;
625 container
->initialized
= true;
628 listener_release_exit
:
629 QLIST_REMOVE(group
, container_next
);
630 QLIST_REMOVE(container
, next
);
631 vfio_kvm_device_del_group(group
);
632 memory_listener_unregister(&container
->listener
);
633 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
||
634 container
->iommu_type
== VFIO_SPAPR_TCE_IOMMU
) {
635 vfio_spapr_container_deinit(container
);
638 enable_discards_exit
:
639 vfio_ram_block_discard_disable(container
, false);
642 vfio_free_container(container
);
648 vfio_put_address_space(space
);
653 static void vfio_disconnect_container(VFIOGroup
*group
)
655 VFIOContainer
*container
= group
->container
;
657 QLIST_REMOVE(group
, container_next
);
658 group
->container
= NULL
;
661 * Explicitly release the listener first before unset container,
662 * since unset may destroy the backend container if it's the last
665 if (QLIST_EMPTY(&container
->group_list
)) {
666 memory_listener_unregister(&container
->listener
);
667 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
||
668 container
->iommu_type
== VFIO_SPAPR_TCE_IOMMU
) {
669 vfio_spapr_container_deinit(container
);
673 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
674 error_report("vfio: error disconnecting group %d from container",
678 if (QLIST_EMPTY(&container
->group_list
)) {
679 VFIOAddressSpace
*space
= container
->space
;
680 VFIOGuestIOMMU
*giommu
, *tmp
;
682 QLIST_REMOVE(container
, next
);
684 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
685 memory_region_unregister_iommu_notifier(
686 MEMORY_REGION(giommu
->iommu_mr
), &giommu
->n
);
687 QLIST_REMOVE(giommu
, giommu_next
);
691 trace_vfio_disconnect_container(container
->fd
);
692 close(container
->fd
);
693 vfio_free_container(container
);
695 vfio_put_address_space(space
);
699 static VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
703 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
705 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
706 if (group
->groupid
== groupid
) {
707 /* Found it. Now is it already in the right context? */
708 if (group
->container
->space
->as
== as
) {
711 error_setg(errp
, "group %d used in multiple address spaces",
718 group
= g_malloc0(sizeof(*group
));
720 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
721 group
->fd
= qemu_open_old(path
, O_RDWR
);
723 error_setg_errno(errp
, errno
, "failed to open %s", path
);
724 goto free_group_exit
;
727 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
728 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
732 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
733 error_setg(errp
, "group %d is not viable", groupid
);
734 error_append_hint(errp
,
735 "Please ensure all devices within the iommu_group "
736 "are bound to their vfio bus driver.\n");
740 group
->groupid
= groupid
;
741 QLIST_INIT(&group
->device_list
);
743 if (vfio_connect_container(group
, as
, errp
)) {
744 error_prepend(errp
, "failed to setup container for group %d: ",
749 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
762 static void vfio_put_group(VFIOGroup
*group
)
764 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
768 if (!group
->ram_block_discard_allowed
) {
769 vfio_ram_block_discard_disable(group
->container
, false);
771 vfio_kvm_device_del_group(group
);
772 vfio_disconnect_container(group
);
773 QLIST_REMOVE(group
, next
);
774 trace_vfio_put_group(group
->fd
);
779 static int vfio_get_device(VFIOGroup
*group
, const char *name
,
780 VFIODevice
*vbasedev
, Error
**errp
)
782 g_autofree
struct vfio_device_info
*info
= NULL
;
785 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
787 error_setg_errno(errp
, errno
, "error getting device from group %d",
789 error_append_hint(errp
,
790 "Verify all devices in group %d are bound to vfio-<bus> "
791 "or pci-stub and not already in use\n", group
->groupid
);
795 info
= vfio_get_device_info(fd
);
797 error_setg_errno(errp
, errno
, "error getting device info");
803 * Set discarding of RAM as not broken for this group if the driver knows
804 * the device operates compatibly with discarding. Setting must be
805 * consistent per group, but since compatibility is really only possible
806 * with mdev currently, we expect singleton groups.
808 if (vbasedev
->ram_block_discard_allowed
!=
809 group
->ram_block_discard_allowed
) {
810 if (!QLIST_EMPTY(&group
->device_list
)) {
811 error_setg(errp
, "Inconsistent setting of support for discarding "
812 "RAM (e.g., balloon) within group");
817 if (!group
->ram_block_discard_allowed
) {
818 group
->ram_block_discard_allowed
= true;
819 vfio_ram_block_discard_disable(group
->container
, false);
824 vbasedev
->group
= group
;
825 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
827 vbasedev
->num_irqs
= info
->num_irqs
;
828 vbasedev
->num_regions
= info
->num_regions
;
829 vbasedev
->flags
= info
->flags
;
831 trace_vfio_get_device(name
, info
->flags
, info
->num_regions
, info
->num_irqs
);
833 vbasedev
->reset_works
= !!(info
->flags
& VFIO_DEVICE_FLAGS_RESET
);
838 static void vfio_put_base_device(VFIODevice
*vbasedev
)
840 if (!vbasedev
->group
) {
843 QLIST_REMOVE(vbasedev
, next
);
844 vbasedev
->group
= NULL
;
845 trace_vfio_put_base_device(vbasedev
->fd
);
849 static int vfio_device_groupid(VFIODevice
*vbasedev
, Error
**errp
)
851 char *tmp
, group_path
[PATH_MAX
], *group_name
;
855 tmp
= g_strdup_printf("%s/iommu_group", vbasedev
->sysfsdev
);
856 len
= readlink(tmp
, group_path
, sizeof(group_path
));
859 if (len
<= 0 || len
>= sizeof(group_path
)) {
860 ret
= len
< 0 ? -errno
: -ENAMETOOLONG
;
861 error_setg_errno(errp
, -ret
, "no iommu_group found");
867 group_name
= basename(group_path
);
868 if (sscanf(group_name
, "%d", &groupid
) != 1) {
869 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
876 * vfio_attach_device: attach a device to a security context
877 * @name and @vbasedev->name are likely to be different depending
878 * on the type of the device, hence the need for passing @name
880 int vfio_attach_device(char *name
, VFIODevice
*vbasedev
,
881 AddressSpace
*as
, Error
**errp
)
883 int groupid
= vfio_device_groupid(vbasedev
, errp
);
884 VFIODevice
*vbasedev_iter
;
886 VFIOContainer
*container
;
893 trace_vfio_attach_device(vbasedev
->name
, groupid
);
895 group
= vfio_get_group(groupid
, as
, errp
);
900 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
901 if (strcmp(vbasedev_iter
->name
, vbasedev
->name
) == 0) {
902 error_setg(errp
, "device is already attached");
903 vfio_put_group(group
);
907 ret
= vfio_get_device(group
, name
, vbasedev
, errp
);
909 vfio_put_group(group
);
913 container
= group
->container
;
914 vbasedev
->container
= container
;
915 QLIST_INSERT_HEAD(&container
->device_list
, vbasedev
, container_next
);
916 QLIST_INSERT_HEAD(&vfio_device_list
, vbasedev
, global_next
);
921 void vfio_detach_device(VFIODevice
*vbasedev
)
923 VFIOGroup
*group
= vbasedev
->group
;
925 if (!vbasedev
->container
) {
929 QLIST_REMOVE(vbasedev
, global_next
);
930 QLIST_REMOVE(vbasedev
, container_next
);
931 vbasedev
->container
= NULL
;
932 trace_vfio_detach_device(vbasedev
->name
, group
->groupid
);
933 vfio_put_base_device(vbasedev
);
934 vfio_put_group(group
);