vfio/container: Introduce vfio_legacy_setup() for further cleanups
[qemu/armbru.git] / hw / vfio / container.c
blobafcfe8048805c58291d1104ff0ef20bdc457f99c
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
27 #include "exec/memory.h"
28 #include "exec/ram_addr.h"
29 #include "hw/hw.h"
30 #include "qemu/error-report.h"
31 #include "qemu/range.h"
32 #include "sysemu/reset.h"
33 #include "trace.h"
34 #include "qapi/error.h"
35 #include "migration/migration.h"
36 #include "pci.h"
38 VFIOGroupList vfio_group_list =
39 QLIST_HEAD_INITIALIZER(vfio_group_list);
41 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
43 switch (container->iommu_type) {
44 case VFIO_TYPE1v2_IOMMU:
45 case VFIO_TYPE1_IOMMU:
47 * We support coordinated discarding of RAM via the RamDiscardManager.
49 return ram_block_uncoordinated_discard_disable(state);
50 default:
52 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
53 * RamDiscardManager, however, it is completely untested.
55 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
56 * completely the opposite of managing mapping/pinning dynamically as
57 * required by RamDiscardManager. We would have to special-case sections
58 * with a RamDiscardManager.
60 return ram_block_discard_disable(state);
64 static int vfio_dma_unmap_bitmap(const VFIOContainer *container,
65 hwaddr iova, ram_addr_t size,
66 IOMMUTLBEntry *iotlb)
68 const VFIOContainerBase *bcontainer = &container->bcontainer;
69 struct vfio_iommu_type1_dma_unmap *unmap;
70 struct vfio_bitmap *bitmap;
71 VFIOBitmap vbmap;
72 int ret;
74 ret = vfio_bitmap_alloc(&vbmap, size);
75 if (ret) {
76 return ret;
79 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
81 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
82 unmap->iova = iova;
83 unmap->size = size;
84 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
85 bitmap = (struct vfio_bitmap *)&unmap->data;
88 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
89 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
90 * to qemu_real_host_page_size.
92 bitmap->pgsize = qemu_real_host_page_size();
93 bitmap->size = vbmap.size;
94 bitmap->data = (__u64 *)vbmap.bitmap;
96 if (vbmap.size > bcontainer->max_dirty_bitmap_size) {
97 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
98 ret = -E2BIG;
99 goto unmap_exit;
102 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
103 if (!ret) {
104 cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
105 iotlb->translated_addr, vbmap.pages);
106 } else {
107 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
110 unmap_exit:
111 g_free(unmap);
112 g_free(vbmap.bitmap);
114 return ret;
118 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
120 static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
121 hwaddr iova, ram_addr_t size,
122 IOMMUTLBEntry *iotlb)
124 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
125 bcontainer);
126 struct vfio_iommu_type1_dma_unmap unmap = {
127 .argsz = sizeof(unmap),
128 .flags = 0,
129 .iova = iova,
130 .size = size,
132 bool need_dirty_sync = false;
133 int ret;
135 if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
136 if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
137 bcontainer->dirty_pages_supported) {
138 return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
141 need_dirty_sync = true;
144 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
146 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
147 * v4.15) where an overflow in its wrap-around check prevents us from
148 * unmapping the last page of the address space. Test for the error
149 * condition and re-try the unmap excluding the last page. The
150 * expectation is that we've never mapped the last page anyway and this
151 * unmap request comes via vIOMMU support which also makes it unlikely
152 * that this page is used. This bug was introduced well after type1 v2
153 * support was introduced, so we shouldn't need to test for v1. A fix
154 * is queued for kernel v5.0 so this workaround can be removed once
155 * affected kernels are sufficiently deprecated.
157 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
158 container->iommu_type == VFIO_TYPE1v2_IOMMU) {
159 trace_vfio_legacy_dma_unmap_overflow_workaround();
160 unmap.size -= 1ULL << ctz64(bcontainer->pgsizes);
161 continue;
163 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
164 return -errno;
167 if (need_dirty_sync) {
168 ret = vfio_get_dirty_bitmap(bcontainer, iova, size,
169 iotlb->translated_addr);
170 if (ret) {
171 return ret;
175 return 0;
178 static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
179 ram_addr_t size, void *vaddr, bool readonly)
181 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
182 bcontainer);
183 struct vfio_iommu_type1_dma_map map = {
184 .argsz = sizeof(map),
185 .flags = VFIO_DMA_MAP_FLAG_READ,
186 .vaddr = (__u64)(uintptr_t)vaddr,
187 .iova = iova,
188 .size = size,
191 if (!readonly) {
192 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
196 * Try the mapping, if it fails with EBUSY, unmap the region and try
197 * again. This shouldn't be necessary, but we sometimes see it in
198 * the VGA ROM space.
200 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
201 (errno == EBUSY &&
202 vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 &&
203 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
204 return 0;
207 error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
208 return -errno;
211 static int
212 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
213 bool start)
215 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
216 bcontainer);
217 int ret;
218 struct vfio_iommu_type1_dirty_bitmap dirty = {
219 .argsz = sizeof(dirty),
222 if (start) {
223 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
224 } else {
225 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
228 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
229 if (ret) {
230 ret = -errno;
231 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
232 dirty.flags, errno);
235 return ret;
238 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
239 VFIOBitmap *vbmap,
240 hwaddr iova, hwaddr size)
242 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
243 bcontainer);
244 struct vfio_iommu_type1_dirty_bitmap *dbitmap;
245 struct vfio_iommu_type1_dirty_bitmap_get *range;
246 int ret;
248 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
250 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
251 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
252 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
253 range->iova = iova;
254 range->size = size;
257 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
258 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
259 * to qemu_real_host_page_size.
261 range->bitmap.pgsize = qemu_real_host_page_size();
262 range->bitmap.size = vbmap->size;
263 range->bitmap.data = (__u64 *)vbmap->bitmap;
265 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
266 if (ret) {
267 ret = -errno;
268 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
269 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
270 (uint64_t)range->size, errno);
273 g_free(dbitmap);
275 return ret;
278 static struct vfio_info_cap_header *
279 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
281 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
282 return NULL;
285 return vfio_get_cap((void *)info, info->cap_offset, id);
288 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
289 unsigned int *avail)
291 struct vfio_info_cap_header *hdr;
292 struct vfio_iommu_type1_info_dma_avail *cap;
294 /* If the capability cannot be found, assume no DMA limiting */
295 hdr = vfio_get_iommu_type1_info_cap(info,
296 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
297 if (!hdr) {
298 return false;
301 if (avail != NULL) {
302 cap = (void *) hdr;
303 *avail = cap->avail;
306 return true;
309 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
310 VFIOContainerBase *bcontainer)
312 struct vfio_info_cap_header *hdr;
313 struct vfio_iommu_type1_info_cap_iova_range *cap;
315 hdr = vfio_get_iommu_type1_info_cap(info,
316 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
317 if (!hdr) {
318 return false;
321 cap = (void *)hdr;
323 for (int i = 0; i < cap->nr_iovas; i++) {
324 Range *range = g_new(Range, 1);
326 range_set_bounds(range, cap->iova_ranges[i].start,
327 cap->iova_ranges[i].end);
328 bcontainer->iova_ranges =
329 range_list_insert(bcontainer->iova_ranges, range);
332 return true;
335 static void vfio_kvm_device_add_group(VFIOGroup *group)
337 Error *err = NULL;
339 if (vfio_kvm_device_add_fd(group->fd, &err)) {
340 error_reportf_err(err, "group ID %d: ", group->groupid);
344 static void vfio_kvm_device_del_group(VFIOGroup *group)
346 Error *err = NULL;
348 if (vfio_kvm_device_del_fd(group->fd, &err)) {
349 error_reportf_err(err, "group ID %d: ", group->groupid);
354 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
356 static int vfio_get_iommu_type(VFIOContainer *container,
357 Error **errp)
359 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
360 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
361 int i;
363 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
364 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
365 return iommu_types[i];
368 error_setg(errp, "No available IOMMU models");
369 return -EINVAL;
372 static int vfio_init_container(VFIOContainer *container, int group_fd,
373 Error **errp)
375 int iommu_type, ret;
377 iommu_type = vfio_get_iommu_type(container, errp);
378 if (iommu_type < 0) {
379 return iommu_type;
382 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
383 if (ret) {
384 error_setg_errno(errp, errno, "Failed to set group container");
385 return -errno;
388 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
389 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
391 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
392 * v2, the running platform may not support v2 and there is no
393 * way to guess it until an IOMMU group gets added to the container.
394 * So in case it fails with v2, try v1 as a fallback.
396 iommu_type = VFIO_SPAPR_TCE_IOMMU;
397 continue;
399 error_setg_errno(errp, errno, "Failed to set iommu for container");
400 return -errno;
403 container->iommu_type = iommu_type;
404 return 0;
407 static int vfio_get_iommu_info(VFIOContainer *container,
408 struct vfio_iommu_type1_info **info)
411 size_t argsz = sizeof(struct vfio_iommu_type1_info);
413 *info = g_new0(struct vfio_iommu_type1_info, 1);
414 again:
415 (*info)->argsz = argsz;
417 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
418 g_free(*info);
419 *info = NULL;
420 return -errno;
423 if (((*info)->argsz > argsz)) {
424 argsz = (*info)->argsz;
425 *info = g_realloc(*info, argsz);
426 goto again;
429 return 0;
432 static struct vfio_info_cap_header *
433 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
435 struct vfio_info_cap_header *hdr;
436 void *ptr = info;
438 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
439 return NULL;
442 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
443 if (hdr->id == id) {
444 return hdr;
448 return NULL;
451 static void vfio_get_iommu_info_migration(VFIOContainer *container,
452 struct vfio_iommu_type1_info *info)
454 struct vfio_info_cap_header *hdr;
455 struct vfio_iommu_type1_info_cap_migration *cap_mig;
456 VFIOContainerBase *bcontainer = &container->bcontainer;
458 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
459 if (!hdr) {
460 return;
463 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
464 header);
467 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
468 * qemu_real_host_page_size to mark those dirty.
470 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
471 bcontainer->dirty_pages_supported = true;
472 bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
473 bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap;
477 static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
479 VFIOContainer *container = container_of(bcontainer, VFIOContainer,
480 bcontainer);
481 g_autofree struct vfio_iommu_type1_info *info = NULL;
482 int ret;
484 ret = vfio_get_iommu_info(container, &info);
485 if (ret) {
486 error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info");
487 return ret;
490 if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
491 bcontainer->pgsizes = info->iova_pgsizes;
492 } else {
493 bcontainer->pgsizes = qemu_real_host_page_size();
496 if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) {
497 bcontainer->dma_max_mappings = 65535;
500 vfio_get_info_iova_range(info, bcontainer);
502 vfio_get_iommu_info_migration(container, info);
503 return 0;
506 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
507 Error **errp)
509 VFIOContainer *container;
510 VFIOContainerBase *bcontainer;
511 int ret, fd;
512 VFIOAddressSpace *space;
514 space = vfio_get_address_space(as);
517 * VFIO is currently incompatible with discarding of RAM insofar as the
518 * madvise to purge (zap) the page from QEMU's address space does not
519 * interact with the memory API and therefore leaves stale virtual to
520 * physical mappings in the IOMMU if the page was previously pinned. We
521 * therefore set discarding broken for each group added to a container,
522 * whether the container is used individually or shared. This provides
523 * us with options to allow devices within a group to opt-in and allow
524 * discarding, so long as it is done consistently for a group (for instance
525 * if the device is an mdev device where it is known that the host vendor
526 * driver will never pin pages outside of the working set of the guest
527 * driver, which would thus not be discarding candidates).
529 * The first opportunity to induce pinning occurs here where we attempt to
530 * attach the group to existing containers within the AddressSpace. If any
531 * pages are already zapped from the virtual address space, such as from
532 * previous discards, new pinning will cause valid mappings to be
533 * re-established. Likewise, when the overall MemoryListener for a new
534 * container is registered, a replay of mappings within the AddressSpace
535 * will occur, re-establishing any previously zapped pages as well.
537 * Especially virtio-balloon is currently only prevented from discarding
538 * new memory, it will not yet set ram_block_discard_set_required() and
539 * therefore, neither stops us here or deals with the sudden memory
540 * consumption of inflated memory.
542 * We do support discarding of memory coordinated via the RamDiscardManager
543 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
544 * details once we know which type of IOMMU we are using.
547 QLIST_FOREACH(bcontainer, &space->containers, next) {
548 container = container_of(bcontainer, VFIOContainer, bcontainer);
549 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
550 ret = vfio_ram_block_discard_disable(container, true);
551 if (ret) {
552 error_setg_errno(errp, -ret,
553 "Cannot set discarding of RAM broken");
554 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
555 &container->fd)) {
556 error_report("vfio: error disconnecting group %d from"
557 " container", group->groupid);
559 return ret;
561 group->container = container;
562 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
563 vfio_kvm_device_add_group(group);
564 return 0;
568 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
569 if (fd < 0) {
570 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
571 ret = -errno;
572 goto put_space_exit;
575 ret = ioctl(fd, VFIO_GET_API_VERSION);
576 if (ret != VFIO_API_VERSION) {
577 error_setg(errp, "supported vfio version: %d, "
578 "reported version: %d", VFIO_API_VERSION, ret);
579 ret = -EINVAL;
580 goto close_fd_exit;
583 container = g_malloc0(sizeof(*container));
584 container->fd = fd;
585 bcontainer = &container->bcontainer;
586 vfio_container_init(bcontainer, space, &vfio_legacy_ops);
588 ret = vfio_init_container(container, group->fd, errp);
589 if (ret) {
590 goto free_container_exit;
593 ret = vfio_ram_block_discard_disable(container, true);
594 if (ret) {
595 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
596 goto free_container_exit;
599 switch (container->iommu_type) {
600 case VFIO_TYPE1v2_IOMMU:
601 case VFIO_TYPE1_IOMMU:
602 ret = vfio_legacy_setup(bcontainer, errp);
603 break;
604 case VFIO_SPAPR_TCE_v2_IOMMU:
605 case VFIO_SPAPR_TCE_IOMMU:
606 ret = vfio_spapr_container_init(container, errp);
607 break;
608 default:
609 g_assert_not_reached();
612 if (ret) {
613 goto enable_discards_exit;
616 vfio_kvm_device_add_group(group);
618 QLIST_INIT(&container->group_list);
619 QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
621 group->container = container;
622 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
624 bcontainer->listener = vfio_memory_listener;
625 memory_listener_register(&bcontainer->listener, bcontainer->space->as);
627 if (bcontainer->error) {
628 ret = -1;
629 error_propagate_prepend(errp, bcontainer->error,
630 "memory listener initialization failed: ");
631 goto listener_release_exit;
634 bcontainer->initialized = true;
636 return 0;
637 listener_release_exit:
638 QLIST_REMOVE(group, container_next);
639 QLIST_REMOVE(bcontainer, next);
640 vfio_kvm_device_del_group(group);
641 memory_listener_unregister(&bcontainer->listener);
642 if (bcontainer->ops->release) {
643 bcontainer->ops->release(bcontainer);
646 enable_discards_exit:
647 vfio_ram_block_discard_disable(container, false);
649 free_container_exit:
650 g_free(container);
652 close_fd_exit:
653 close(fd);
655 put_space_exit:
656 vfio_put_address_space(space);
658 return ret;
661 static void vfio_disconnect_container(VFIOGroup *group)
663 VFIOContainer *container = group->container;
664 VFIOContainerBase *bcontainer = &container->bcontainer;
666 QLIST_REMOVE(group, container_next);
667 group->container = NULL;
670 * Explicitly release the listener first before unset container,
671 * since unset may destroy the backend container if it's the last
672 * group.
674 if (QLIST_EMPTY(&container->group_list)) {
675 memory_listener_unregister(&bcontainer->listener);
676 if (bcontainer->ops->release) {
677 bcontainer->ops->release(bcontainer);
681 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
682 error_report("vfio: error disconnecting group %d from container",
683 group->groupid);
686 if (QLIST_EMPTY(&container->group_list)) {
687 VFIOAddressSpace *space = bcontainer->space;
689 vfio_container_destroy(bcontainer);
691 trace_vfio_disconnect_container(container->fd);
692 close(container->fd);
693 g_free(container);
695 vfio_put_address_space(space);
699 static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
701 VFIOGroup *group;
702 char path[32];
703 struct vfio_group_status status = { .argsz = sizeof(status) };
705 QLIST_FOREACH(group, &vfio_group_list, next) {
706 if (group->groupid == groupid) {
707 /* Found it. Now is it already in the right context? */
708 if (group->container->bcontainer.space->as == as) {
709 return group;
710 } else {
711 error_setg(errp, "group %d used in multiple address spaces",
712 group->groupid);
713 return NULL;
718 group = g_malloc0(sizeof(*group));
720 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
721 group->fd = qemu_open_old(path, O_RDWR);
722 if (group->fd < 0) {
723 error_setg_errno(errp, errno, "failed to open %s", path);
724 goto free_group_exit;
727 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
728 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
729 goto close_fd_exit;
732 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
733 error_setg(errp, "group %d is not viable", groupid);
734 error_append_hint(errp,
735 "Please ensure all devices within the iommu_group "
736 "are bound to their vfio bus driver.\n");
737 goto close_fd_exit;
740 group->groupid = groupid;
741 QLIST_INIT(&group->device_list);
743 if (vfio_connect_container(group, as, errp)) {
744 error_prepend(errp, "failed to setup container for group %d: ",
745 groupid);
746 goto close_fd_exit;
749 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
751 return group;
753 close_fd_exit:
754 close(group->fd);
756 free_group_exit:
757 g_free(group);
759 return NULL;
762 static void vfio_put_group(VFIOGroup *group)
764 if (!group || !QLIST_EMPTY(&group->device_list)) {
765 return;
768 if (!group->ram_block_discard_allowed) {
769 vfio_ram_block_discard_disable(group->container, false);
771 vfio_kvm_device_del_group(group);
772 vfio_disconnect_container(group);
773 QLIST_REMOVE(group, next);
774 trace_vfio_put_group(group->fd);
775 close(group->fd);
776 g_free(group);
779 static int vfio_get_device(VFIOGroup *group, const char *name,
780 VFIODevice *vbasedev, Error **errp)
782 g_autofree struct vfio_device_info *info = NULL;
783 int fd;
785 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
786 if (fd < 0) {
787 error_setg_errno(errp, errno, "error getting device from group %d",
788 group->groupid);
789 error_append_hint(errp,
790 "Verify all devices in group %d are bound to vfio-<bus> "
791 "or pci-stub and not already in use\n", group->groupid);
792 return fd;
795 info = vfio_get_device_info(fd);
796 if (!info) {
797 error_setg_errno(errp, errno, "error getting device info");
798 close(fd);
799 return -1;
803 * Set discarding of RAM as not broken for this group if the driver knows
804 * the device operates compatibly with discarding. Setting must be
805 * consistent per group, but since compatibility is really only possible
806 * with mdev currently, we expect singleton groups.
808 if (vbasedev->ram_block_discard_allowed !=
809 group->ram_block_discard_allowed) {
810 if (!QLIST_EMPTY(&group->device_list)) {
811 error_setg(errp, "Inconsistent setting of support for discarding "
812 "RAM (e.g., balloon) within group");
813 close(fd);
814 return -1;
817 if (!group->ram_block_discard_allowed) {
818 group->ram_block_discard_allowed = true;
819 vfio_ram_block_discard_disable(group->container, false);
823 vbasedev->fd = fd;
824 vbasedev->group = group;
825 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
827 vbasedev->num_irqs = info->num_irqs;
828 vbasedev->num_regions = info->num_regions;
829 vbasedev->flags = info->flags;
831 trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs);
833 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
835 return 0;
838 static void vfio_put_base_device(VFIODevice *vbasedev)
840 if (!vbasedev->group) {
841 return;
843 QLIST_REMOVE(vbasedev, next);
844 vbasedev->group = NULL;
845 trace_vfio_put_base_device(vbasedev->fd);
846 close(vbasedev->fd);
849 static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp)
851 char *tmp, group_path[PATH_MAX], *group_name;
852 int ret, groupid;
853 ssize_t len;
855 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
856 len = readlink(tmp, group_path, sizeof(group_path));
857 g_free(tmp);
859 if (len <= 0 || len >= sizeof(group_path)) {
860 ret = len < 0 ? -errno : -ENAMETOOLONG;
861 error_setg_errno(errp, -ret, "no iommu_group found");
862 return ret;
865 group_path[len] = 0;
867 group_name = basename(group_path);
868 if (sscanf(group_name, "%d", &groupid) != 1) {
869 error_setg_errno(errp, errno, "failed to read %s", group_path);
870 return -errno;
872 return groupid;
876 * vfio_attach_device: attach a device to a security context
877 * @name and @vbasedev->name are likely to be different depending
878 * on the type of the device, hence the need for passing @name
880 static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
881 AddressSpace *as, Error **errp)
883 int groupid = vfio_device_groupid(vbasedev, errp);
884 VFIODevice *vbasedev_iter;
885 VFIOGroup *group;
886 VFIOContainerBase *bcontainer;
887 int ret;
889 if (groupid < 0) {
890 return groupid;
893 trace_vfio_attach_device(vbasedev->name, groupid);
895 group = vfio_get_group(groupid, as, errp);
896 if (!group) {
897 return -ENOENT;
900 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
901 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
902 error_setg(errp, "device is already attached");
903 vfio_put_group(group);
904 return -EBUSY;
907 ret = vfio_get_device(group, name, vbasedev, errp);
908 if (ret) {
909 vfio_put_group(group);
910 return ret;
913 bcontainer = &group->container->bcontainer;
914 vbasedev->bcontainer = bcontainer;
915 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
916 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
918 return ret;
921 static void vfio_legacy_detach_device(VFIODevice *vbasedev)
923 VFIOGroup *group = vbasedev->group;
925 QLIST_REMOVE(vbasedev, global_next);
926 QLIST_REMOVE(vbasedev, container_next);
927 vbasedev->bcontainer = NULL;
928 trace_vfio_detach_device(vbasedev->name, group->groupid);
929 vfio_put_base_device(vbasedev);
930 vfio_put_group(group);
933 static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single)
935 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
936 VFIOGroup *group;
937 struct vfio_pci_hot_reset_info *info = NULL;
938 struct vfio_pci_dependent_device *devices;
939 struct vfio_pci_hot_reset *reset;
940 int32_t *fds;
941 int ret, i, count;
942 bool multi = false;
944 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
946 if (!single) {
947 vfio_pci_pre_reset(vdev);
949 vdev->vbasedev.needs_reset = false;
951 ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
953 if (ret) {
954 goto out_single;
956 devices = &info->devices[0];
958 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
960 /* Verify that we have all the groups required */
961 for (i = 0; i < info->count; i++) {
962 PCIHostDeviceAddress host;
963 VFIOPCIDevice *tmp;
964 VFIODevice *vbasedev_iter;
966 host.domain = devices[i].segment;
967 host.bus = devices[i].bus;
968 host.slot = PCI_SLOT(devices[i].devfn);
969 host.function = PCI_FUNC(devices[i].devfn);
971 trace_vfio_pci_hot_reset_dep_devices(host.domain,
972 host.bus, host.slot, host.function, devices[i].group_id);
974 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
975 continue;
978 QLIST_FOREACH(group, &vfio_group_list, next) {
979 if (group->groupid == devices[i].group_id) {
980 break;
984 if (!group) {
985 if (!vdev->has_pm_reset) {
986 error_report("vfio: Cannot reset device %s, "
987 "depends on group %d which is not owned.",
988 vdev->vbasedev.name, devices[i].group_id);
990 ret = -EPERM;
991 goto out;
994 /* Prep dependent devices for reset and clear our marker. */
995 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
996 if (!vbasedev_iter->dev->realized ||
997 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
998 continue;
1000 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1001 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
1002 if (single) {
1003 ret = -EINVAL;
1004 goto out_single;
1006 vfio_pci_pre_reset(tmp);
1007 tmp->vbasedev.needs_reset = false;
1008 multi = true;
1009 break;
1014 if (!single && !multi) {
1015 ret = -EINVAL;
1016 goto out_single;
1019 /* Determine how many group fds need to be passed */
1020 count = 0;
1021 QLIST_FOREACH(group, &vfio_group_list, next) {
1022 for (i = 0; i < info->count; i++) {
1023 if (group->groupid == devices[i].group_id) {
1024 count++;
1025 break;
1030 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
1031 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
1032 fds = &reset->group_fds[0];
1034 /* Fill in group fds */
1035 QLIST_FOREACH(group, &vfio_group_list, next) {
1036 for (i = 0; i < info->count; i++) {
1037 if (group->groupid == devices[i].group_id) {
1038 fds[reset->count++] = group->fd;
1039 break;
1044 /* Bus reset! */
1045 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
1046 g_free(reset);
1047 if (ret) {
1048 ret = -errno;
1051 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
1052 ret ? strerror(errno) : "Success");
1054 out:
1055 /* Re-enable INTx on affected devices */
1056 for (i = 0; i < info->count; i++) {
1057 PCIHostDeviceAddress host;
1058 VFIOPCIDevice *tmp;
1059 VFIODevice *vbasedev_iter;
1061 host.domain = devices[i].segment;
1062 host.bus = devices[i].bus;
1063 host.slot = PCI_SLOT(devices[i].devfn);
1064 host.function = PCI_FUNC(devices[i].devfn);
1066 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1067 continue;
1070 QLIST_FOREACH(group, &vfio_group_list, next) {
1071 if (group->groupid == devices[i].group_id) {
1072 break;
1076 if (!group) {
1077 break;
1080 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1081 if (!vbasedev_iter->dev->realized ||
1082 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1083 continue;
1085 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1086 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
1087 vfio_pci_post_reset(tmp);
1088 break;
1092 out_single:
1093 if (!single) {
1094 vfio_pci_post_reset(vdev);
1096 g_free(info);
1098 return ret;
1101 const VFIOIOMMUOps vfio_legacy_ops = {
1102 .dma_map = vfio_legacy_dma_map,
1103 .dma_unmap = vfio_legacy_dma_unmap,
1104 .attach_device = vfio_legacy_attach_device,
1105 .detach_device = vfio_legacy_detach_device,
1106 .set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking,
1107 .query_dirty_bitmap = vfio_legacy_query_dirty_bitmap,
1108 .pci_hot_reset = vfio_legacy_pci_hot_reset,