vfio/iommufd: Remove CONFIG_IOMMUFD usage
[qemu/ar7.git] / hw / vfio / common.c
blob0d4d8b8416c6a4770677e1ebe5e1fc7dbaaef004
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/pci.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
33 #include "hw/hw.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
39 #include "sysemu/runstate.h"
40 #include "trace.h"
41 #include "qapi/error.h"
42 #include "migration/migration.h"
43 #include "migration/misc.h"
44 #include "migration/blocker.h"
45 #include "migration/qemu-file.h"
46 #include "sysemu/tpm.h"
48 VFIODeviceList vfio_device_list =
49 QLIST_HEAD_INITIALIZER(vfio_device_list);
50 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
51 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
53 #ifdef CONFIG_KVM
55 * We have a single VFIO pseudo device per KVM VM. Once created it lives
56 * for the life of the VM. Closing the file descriptor only drops our
57 * reference to it and the device's reference to kvm. Therefore once
58 * initialized, this file descriptor is only released on QEMU exit and
59 * we'll re-use it should another vfio device be attached before then.
61 int vfio_kvm_device_fd = -1;
62 #endif
65 * Device state interfaces
68 bool vfio_mig_active(void)
70 VFIODevice *vbasedev;
72 if (QLIST_EMPTY(&vfio_device_list)) {
73 return false;
76 QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
77 if (vbasedev->migration_blocker) {
78 return false;
81 return true;
84 static Error *multiple_devices_migration_blocker;
87 * Multiple devices migration is allowed only if all devices support P2P
88 * migration. Single device migration is allowed regardless of P2P migration
89 * support.
91 static bool vfio_multiple_devices_migration_is_supported(void)
93 VFIODevice *vbasedev;
94 unsigned int device_num = 0;
95 bool all_support_p2p = true;
97 QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
98 if (vbasedev->migration) {
99 device_num++;
101 if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
102 all_support_p2p = false;
107 return all_support_p2p || device_num <= 1;
110 int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
112 int ret;
114 if (vfio_multiple_devices_migration_is_supported()) {
115 return 0;
118 if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
119 error_setg(errp, "Multiple VFIO devices migration is supported only if "
120 "all of them support P2P migration");
121 return -EINVAL;
124 if (multiple_devices_migration_blocker) {
125 return 0;
128 error_setg(&multiple_devices_migration_blocker,
129 "Multiple VFIO devices migration is supported only if all of "
130 "them support P2P migration");
131 ret = migrate_add_blocker(&multiple_devices_migration_blocker, errp);
133 return ret;
136 void vfio_unblock_multiple_devices_migration(void)
138 if (!multiple_devices_migration_blocker ||
139 !vfio_multiple_devices_migration_is_supported()) {
140 return;
143 migrate_del_blocker(&multiple_devices_migration_blocker);
146 bool vfio_viommu_preset(VFIODevice *vbasedev)
148 return vbasedev->bcontainer->space->as != &address_space_memory;
151 static void vfio_set_migration_error(int err)
153 MigrationState *ms = migrate_get_current();
155 if (migration_is_setup_or_active(ms->state)) {
156 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
157 if (ms->to_dst_file) {
158 qemu_file_set_error(ms->to_dst_file, err);
164 bool vfio_device_state_is_running(VFIODevice *vbasedev)
166 VFIOMigration *migration = vbasedev->migration;
168 return migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
169 migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P;
172 bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
174 VFIOMigration *migration = vbasedev->migration;
176 return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ||
177 migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
180 static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer)
182 VFIODevice *vbasedev;
183 MigrationState *ms = migrate_get_current();
185 if (ms->state != MIGRATION_STATUS_ACTIVE &&
186 ms->state != MIGRATION_STATUS_DEVICE) {
187 return false;
190 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
191 VFIOMigration *migration = vbasedev->migration;
193 if (!migration) {
194 return false;
197 if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
198 (vfio_device_state_is_running(vbasedev) ||
199 vfio_device_state_is_precopy(vbasedev))) {
200 return false;
203 return true;
206 bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer)
208 VFIODevice *vbasedev;
210 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
211 if (!vbasedev->dirty_pages_supported) {
212 return false;
216 return true;
220 * Check if all VFIO devices are running and migration is active, which is
221 * essentially equivalent to the migration being in pre-copy phase.
223 bool
224 vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer)
226 VFIODevice *vbasedev;
228 if (!migration_is_active(migrate_get_current())) {
229 return false;
232 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
233 VFIOMigration *migration = vbasedev->migration;
235 if (!migration) {
236 return false;
239 if (vfio_device_state_is_running(vbasedev) ||
240 vfio_device_state_is_precopy(vbasedev)) {
241 continue;
242 } else {
243 return false;
246 return true;
249 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
251 return (!memory_region_is_ram(section->mr) &&
252 !memory_region_is_iommu(section->mr)) ||
253 memory_region_is_protected(section->mr) ||
255 * Sizing an enabled 64-bit BAR can cause spurious mappings to
256 * addresses in the upper part of the 64-bit address space. These
257 * are never accessed by the CPU and beyond the address width of
258 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
260 section->offset_within_address_space & (1ULL << 63);
263 /* Called with rcu_read_lock held. */
264 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
265 ram_addr_t *ram_addr, bool *read_only)
267 bool ret, mr_has_discard_manager;
269 ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
270 &mr_has_discard_manager);
271 if (ret && mr_has_discard_manager) {
273 * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
274 * pages will remain pinned inside vfio until unmapped, resulting in a
275 * higher memory consumption than expected. If memory would get
276 * populated again later, there would be an inconsistency between pages
277 * pinned by vfio and pages seen by QEMU. This is the case until
278 * unmapped from the IOMMU (e.g., during device reset).
280 * With malicious guests, we really only care about pinning more memory
281 * than expected. RLIMIT_MEMLOCK set for the user/process can never be
282 * exceeded and can be used to mitigate this problem.
284 warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
285 " RAM (e.g., virtio-mem) works, however, malicious"
286 " guests can trigger pinning of more memory than"
287 " intended via an IOMMU. It's possible to mitigate "
288 " by setting/adjusting RLIMIT_MEMLOCK.");
290 return ret;
293 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
295 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
296 VFIOContainerBase *bcontainer = giommu->bcontainer;
297 hwaddr iova = iotlb->iova + giommu->iommu_offset;
298 void *vaddr;
299 int ret;
301 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
302 iova, iova + iotlb->addr_mask);
304 if (iotlb->target_as != &address_space_memory) {
305 error_report("Wrong target AS \"%s\", only system memory is allowed",
306 iotlb->target_as->name ? iotlb->target_as->name : "none");
307 vfio_set_migration_error(-EINVAL);
308 return;
311 rcu_read_lock();
313 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
314 bool read_only;
316 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
317 goto out;
320 * vaddr is only valid until rcu_read_unlock(). But after
321 * vfio_dma_map has set up the mapping the pages will be
322 * pinned by the kernel. This makes sure that the RAM backend
323 * of vaddr will always be there, even if the memory object is
324 * destroyed and its backing memory munmap-ed.
326 ret = vfio_container_dma_map(bcontainer, iova,
327 iotlb->addr_mask + 1, vaddr,
328 read_only);
329 if (ret) {
330 error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
331 "0x%"HWADDR_PRIx", %p) = %d (%s)",
332 bcontainer, iova,
333 iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
335 } else {
336 ret = vfio_container_dma_unmap(bcontainer, iova,
337 iotlb->addr_mask + 1, iotlb);
338 if (ret) {
339 error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
340 "0x%"HWADDR_PRIx") = %d (%s)",
341 bcontainer, iova,
342 iotlb->addr_mask + 1, ret, strerror(-ret));
343 vfio_set_migration_error(ret);
346 out:
347 rcu_read_unlock();
350 static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
351 MemoryRegionSection *section)
353 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
354 listener);
355 VFIOContainerBase *bcontainer = vrdl->bcontainer;
356 const hwaddr size = int128_get64(section->size);
357 const hwaddr iova = section->offset_within_address_space;
358 int ret;
360 /* Unmap with a single call. */
361 ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL);
362 if (ret) {
363 error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
364 strerror(-ret));
368 static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
369 MemoryRegionSection *section)
371 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
372 listener);
373 VFIOContainerBase *bcontainer = vrdl->bcontainer;
374 const hwaddr end = section->offset_within_region +
375 int128_get64(section->size);
376 hwaddr start, next, iova;
377 void *vaddr;
378 int ret;
381 * Map in (aligned within memory region) minimum granularity, so we can
382 * unmap in minimum granularity later.
384 for (start = section->offset_within_region; start < end; start = next) {
385 next = ROUND_UP(start + 1, vrdl->granularity);
386 next = MIN(next, end);
388 iova = start - section->offset_within_region +
389 section->offset_within_address_space;
390 vaddr = memory_region_get_ram_ptr(section->mr) + start;
392 ret = vfio_container_dma_map(bcontainer, iova, next - start,
393 vaddr, section->readonly);
394 if (ret) {
395 /* Rollback */
396 vfio_ram_discard_notify_discard(rdl, section);
397 return ret;
400 return 0;
403 static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer,
404 MemoryRegionSection *section)
406 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
407 VFIORamDiscardListener *vrdl;
409 /* Ignore some corner cases not relevant in practice. */
410 g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
411 g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
412 TARGET_PAGE_SIZE));
413 g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
415 vrdl = g_new0(VFIORamDiscardListener, 1);
416 vrdl->bcontainer = bcontainer;
417 vrdl->mr = section->mr;
418 vrdl->offset_within_address_space = section->offset_within_address_space;
419 vrdl->size = int128_get64(section->size);
420 vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
421 section->mr);
423 g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
424 g_assert(bcontainer->pgsizes &&
425 vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes));
427 ram_discard_listener_init(&vrdl->listener,
428 vfio_ram_discard_notify_populate,
429 vfio_ram_discard_notify_discard, true);
430 ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
431 QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next);
434 * Sanity-check if we have a theoretically problematic setup where we could
435 * exceed the maximum number of possible DMA mappings over time. We assume
436 * that each mapped section in the same address space as a RamDiscardManager
437 * section consumes exactly one DMA mapping, with the exception of
438 * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
439 * in the same address space as RamDiscardManager sections.
441 * We assume that each section in the address space consumes one memslot.
442 * We take the number of KVM memory slots as a best guess for the maximum
443 * number of sections in the address space we could have over time,
444 * also consuming DMA mappings.
446 if (bcontainer->dma_max_mappings) {
447 unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
449 #ifdef CONFIG_KVM
450 if (kvm_enabled()) {
451 max_memslots = kvm_get_max_memslots();
453 #endif
455 QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
456 hwaddr start, end;
458 start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
459 vrdl->granularity);
460 end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
461 vrdl->granularity);
462 vrdl_mappings += (end - start) / vrdl->granularity;
463 vrdl_count++;
466 if (vrdl_mappings + max_memslots - vrdl_count >
467 bcontainer->dma_max_mappings) {
468 warn_report("%s: possibly running out of DMA mappings. E.g., try"
469 " increasing the 'block-size' of virtio-mem devies."
470 " Maximum possible DMA mappings: %d, Maximum possible"
471 " memslots: %d", __func__, bcontainer->dma_max_mappings,
472 max_memslots);
477 static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer,
478 MemoryRegionSection *section)
480 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
481 VFIORamDiscardListener *vrdl = NULL;
483 QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
484 if (vrdl->mr == section->mr &&
485 vrdl->offset_within_address_space ==
486 section->offset_within_address_space) {
487 break;
491 if (!vrdl) {
492 hw_error("vfio: Trying to unregister missing RAM discard listener");
495 ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
496 QLIST_REMOVE(vrdl, next);
497 g_free(vrdl);
500 static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
502 MemoryRegion *mr = section->mr;
504 if (!TPM_IS_CRB(mr->owner)) {
505 return false;
508 /* this is a known safe misaligned region, just trace for debug purpose */
509 trace_vfio_known_safe_misalignment(memory_region_name(mr),
510 section->offset_within_address_space,
511 section->offset_within_region,
512 qemu_real_host_page_size());
513 return true;
516 static bool vfio_listener_valid_section(MemoryRegionSection *section,
517 const char *name)
519 if (vfio_listener_skipped_section(section)) {
520 trace_vfio_listener_region_skip(name,
521 section->offset_within_address_space,
522 section->offset_within_address_space +
523 int128_get64(int128_sub(section->size, int128_one())));
524 return false;
527 if (unlikely((section->offset_within_address_space &
528 ~qemu_real_host_page_mask()) !=
529 (section->offset_within_region & ~qemu_real_host_page_mask()))) {
530 if (!vfio_known_safe_misalignment(section)) {
531 error_report("%s received unaligned region %s iova=0x%"PRIx64
532 " offset_within_region=0x%"PRIx64
533 " qemu_real_host_page_size=0x%"PRIxPTR,
534 __func__, memory_region_name(section->mr),
535 section->offset_within_address_space,
536 section->offset_within_region,
537 qemu_real_host_page_size());
539 return false;
542 return true;
545 static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer,
546 MemoryRegionSection *section,
547 hwaddr *out_iova, hwaddr *out_end,
548 Int128 *out_llend)
550 Int128 llend;
551 hwaddr iova;
553 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
554 llend = int128_make64(section->offset_within_address_space);
555 llend = int128_add(llend, section->size);
556 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
558 if (int128_ge(int128_make64(iova), llend)) {
559 return false;
562 *out_iova = iova;
563 *out_end = int128_get64(int128_sub(llend, int128_one()));
564 if (out_llend) {
565 *out_llend = llend;
567 return true;
570 static void vfio_listener_region_add(MemoryListener *listener,
571 MemoryRegionSection *section)
573 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
574 listener);
575 hwaddr iova, end;
576 Int128 llend, llsize;
577 void *vaddr;
578 int ret;
579 Error *err = NULL;
581 if (!vfio_listener_valid_section(section, "region_add")) {
582 return;
585 if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
586 &llend)) {
587 if (memory_region_is_ram_device(section->mr)) {
588 trace_vfio_listener_region_add_no_dma_map(
589 memory_region_name(section->mr),
590 section->offset_within_address_space,
591 int128_getlo(section->size),
592 qemu_real_host_page_size());
594 return;
597 if (vfio_container_add_section_window(bcontainer, section, &err)) {
598 goto fail;
601 memory_region_ref(section->mr);
603 if (memory_region_is_iommu(section->mr)) {
604 VFIOGuestIOMMU *giommu;
605 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
606 int iommu_idx;
608 trace_vfio_listener_region_add_iommu(iova, end);
610 * FIXME: For VFIO iommu types which have KVM acceleration to
611 * avoid bouncing all map/unmaps through qemu this way, this
612 * would be the right place to wire that up (tell the KVM
613 * device emulation the VFIO iommu handles to use).
615 giommu = g_malloc0(sizeof(*giommu));
616 giommu->iommu_mr = iommu_mr;
617 giommu->iommu_offset = section->offset_within_address_space -
618 section->offset_within_region;
619 giommu->bcontainer = bcontainer;
620 llend = int128_add(int128_make64(section->offset_within_region),
621 section->size);
622 llend = int128_sub(llend, int128_one());
623 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
624 MEMTXATTRS_UNSPECIFIED);
625 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
626 IOMMU_NOTIFIER_IOTLB_EVENTS,
627 section->offset_within_region,
628 int128_get64(llend),
629 iommu_idx);
631 ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr,
632 bcontainer->pgsizes,
633 &err);
634 if (ret) {
635 g_free(giommu);
636 goto fail;
639 if (bcontainer->iova_ranges) {
640 ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr,
641 bcontainer->iova_ranges,
642 &err);
643 if (ret) {
644 g_free(giommu);
645 goto fail;
649 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
650 &err);
651 if (ret) {
652 g_free(giommu);
653 goto fail;
655 QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next);
656 memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
658 return;
661 /* Here we assume that memory_region_is_ram(section->mr)==true */
664 * For RAM memory regions with a RamDiscardManager, we only want to map the
665 * actually populated parts - and update the mapping whenever we're notified
666 * about changes.
668 if (memory_region_has_ram_discard_manager(section->mr)) {
669 vfio_register_ram_discard_listener(bcontainer, section);
670 return;
673 vaddr = memory_region_get_ram_ptr(section->mr) +
674 section->offset_within_region +
675 (iova - section->offset_within_address_space);
677 trace_vfio_listener_region_add_ram(iova, end, vaddr);
679 llsize = int128_sub(llend, int128_make64(iova));
681 if (memory_region_is_ram_device(section->mr)) {
682 hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
684 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
685 trace_vfio_listener_region_add_no_dma_map(
686 memory_region_name(section->mr),
687 section->offset_within_address_space,
688 int128_getlo(section->size),
689 pgmask + 1);
690 return;
694 ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize),
695 vaddr, section->readonly);
696 if (ret) {
697 error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
698 "0x%"HWADDR_PRIx", %p) = %d (%s)",
699 bcontainer, iova, int128_get64(llsize), vaddr, ret,
700 strerror(-ret));
701 if (memory_region_is_ram_device(section->mr)) {
702 /* Allow unexpected mappings not to be fatal for RAM devices */
703 error_report_err(err);
704 return;
706 goto fail;
709 return;
711 fail:
712 if (memory_region_is_ram_device(section->mr)) {
713 error_reportf_err(err, "PCI p2p may not work: ");
714 return;
717 * On the initfn path, store the first error in the container so we
718 * can gracefully fail. Runtime, there's not much we can do other
719 * than throw a hardware error.
721 if (!bcontainer->initialized) {
722 if (!bcontainer->error) {
723 error_propagate_prepend(&bcontainer->error, err,
724 "Region %s: ",
725 memory_region_name(section->mr));
726 } else {
727 error_free(err);
729 } else {
730 error_report_err(err);
731 hw_error("vfio: DMA mapping failed, unable to continue");
735 static void vfio_listener_region_del(MemoryListener *listener,
736 MemoryRegionSection *section)
738 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
739 listener);
740 hwaddr iova, end;
741 Int128 llend, llsize;
742 int ret;
743 bool try_unmap = true;
745 if (!vfio_listener_valid_section(section, "region_del")) {
746 return;
749 if (memory_region_is_iommu(section->mr)) {
750 VFIOGuestIOMMU *giommu;
752 QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
753 if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
754 giommu->n.start == section->offset_within_region) {
755 memory_region_unregister_iommu_notifier(section->mr,
756 &giommu->n);
757 QLIST_REMOVE(giommu, giommu_next);
758 g_free(giommu);
759 break;
764 * FIXME: We assume the one big unmap below is adequate to
765 * remove any individual page mappings in the IOMMU which
766 * might have been copied into VFIO. This works for a page table
767 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
768 * That may not be true for all IOMMU types.
772 if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
773 &llend)) {
774 return;
777 llsize = int128_sub(llend, int128_make64(iova));
779 trace_vfio_listener_region_del(iova, end);
781 if (memory_region_is_ram_device(section->mr)) {
782 hwaddr pgmask;
784 pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
785 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
786 } else if (memory_region_has_ram_discard_manager(section->mr)) {
787 vfio_unregister_ram_discard_listener(bcontainer, section);
788 /* Unregistering will trigger an unmap. */
789 try_unmap = false;
792 if (try_unmap) {
793 if (int128_eq(llsize, int128_2_64())) {
794 /* The unmap ioctl doesn't accept a full 64-bit span. */
795 llsize = int128_rshift(llsize, 1);
796 ret = vfio_container_dma_unmap(bcontainer, iova,
797 int128_get64(llsize), NULL);
798 if (ret) {
799 error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
800 "0x%"HWADDR_PRIx") = %d (%s)",
801 bcontainer, iova, int128_get64(llsize), ret,
802 strerror(-ret));
804 iova += int128_get64(llsize);
806 ret = vfio_container_dma_unmap(bcontainer, iova,
807 int128_get64(llsize), NULL);
808 if (ret) {
809 error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
810 "0x%"HWADDR_PRIx") = %d (%s)",
811 bcontainer, iova, int128_get64(llsize), ret,
812 strerror(-ret));
816 memory_region_unref(section->mr);
818 vfio_container_del_section_window(bcontainer, section);
821 typedef struct VFIODirtyRanges {
822 hwaddr min32;
823 hwaddr max32;
824 hwaddr min64;
825 hwaddr max64;
826 hwaddr minpci64;
827 hwaddr maxpci64;
828 } VFIODirtyRanges;
830 typedef struct VFIODirtyRangesListener {
831 VFIOContainerBase *bcontainer;
832 VFIODirtyRanges ranges;
833 MemoryListener listener;
834 } VFIODirtyRangesListener;
836 static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
837 VFIOContainerBase *bcontainer)
839 VFIOPCIDevice *pcidev;
840 VFIODevice *vbasedev;
841 Object *owner;
843 owner = memory_region_owner(section->mr);
845 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
846 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
847 continue;
849 pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
850 if (OBJECT(pcidev) == owner) {
851 return true;
855 return false;
858 static void vfio_dirty_tracking_update(MemoryListener *listener,
859 MemoryRegionSection *section)
861 VFIODirtyRangesListener *dirty = container_of(listener,
862 VFIODirtyRangesListener,
863 listener);
864 VFIODirtyRanges *range = &dirty->ranges;
865 hwaddr iova, end, *min, *max;
867 if (!vfio_listener_valid_section(section, "tracking_update") ||
868 !vfio_get_section_iova_range(dirty->bcontainer, section,
869 &iova, &end, NULL)) {
870 return;
874 * The address space passed to the dirty tracker is reduced to three ranges:
875 * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
876 * PCI 64-bit hole.
878 * The underlying reports of dirty will query a sub-interval of each of
879 * these ranges.
881 * The purpose of the three range handling is to handle known cases of big
882 * holes in the address space, like the x86 AMD 1T hole, and firmware (like
883 * OVMF) which may relocate the pci-hole64 to the end of the address space.
884 * The latter would otherwise generate large ranges for tracking, stressing
885 * the limits of supported hardware. The pci-hole32 will always be below 4G
886 * (overlapping or not) so it doesn't need special handling and is part of
887 * the 32-bit range.
889 * The alternative would be an IOVATree but that has a much bigger runtime
890 * overhead and unnecessary complexity.
892 if (vfio_section_is_vfio_pci(section, dirty->bcontainer) &&
893 iova >= UINT32_MAX) {
894 min = &range->minpci64;
895 max = &range->maxpci64;
896 } else {
897 min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
898 max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
900 if (*min > iova) {
901 *min = iova;
903 if (*max < end) {
904 *max = end;
907 trace_vfio_device_dirty_tracking_update(iova, end, *min, *max);
908 return;
911 static const MemoryListener vfio_dirty_tracking_listener = {
912 .name = "vfio-tracking",
913 .region_add = vfio_dirty_tracking_update,
916 static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer,
917 VFIODirtyRanges *ranges)
919 VFIODirtyRangesListener dirty;
921 memset(&dirty, 0, sizeof(dirty));
922 dirty.ranges.min32 = UINT32_MAX;
923 dirty.ranges.min64 = UINT64_MAX;
924 dirty.ranges.minpci64 = UINT64_MAX;
925 dirty.listener = vfio_dirty_tracking_listener;
926 dirty.bcontainer = bcontainer;
928 memory_listener_register(&dirty.listener,
929 bcontainer->space->as);
931 *ranges = dirty.ranges;
934 * The memory listener is synchronous, and used to calculate the range
935 * to dirty tracking. Unregister it after we are done as we are not
936 * interested in any follow-up updates.
938 memory_listener_unregister(&dirty.listener);
941 static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer)
943 uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
944 sizeof(uint64_t))] = {};
945 struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
946 VFIODevice *vbasedev;
948 feature->argsz = sizeof(buf);
949 feature->flags = VFIO_DEVICE_FEATURE_SET |
950 VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
952 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
953 if (!vbasedev->dirty_tracking) {
954 continue;
957 if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
958 warn_report("%s: Failed to stop DMA logging, err %d (%s)",
959 vbasedev->name, -errno, strerror(errno));
961 vbasedev->dirty_tracking = false;
965 static struct vfio_device_feature *
966 vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer,
967 VFIODirtyRanges *tracking)
969 struct vfio_device_feature *feature;
970 size_t feature_size;
971 struct vfio_device_feature_dma_logging_control *control;
972 struct vfio_device_feature_dma_logging_range *ranges;
974 feature_size = sizeof(struct vfio_device_feature) +
975 sizeof(struct vfio_device_feature_dma_logging_control);
976 feature = g_try_malloc0(feature_size);
977 if (!feature) {
978 errno = ENOMEM;
979 return NULL;
981 feature->argsz = feature_size;
982 feature->flags = VFIO_DEVICE_FEATURE_SET |
983 VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
985 control = (struct vfio_device_feature_dma_logging_control *)feature->data;
986 control->page_size = qemu_real_host_page_size();
989 * DMA logging uAPI guarantees to support at least a number of ranges that
990 * fits into a single host kernel base page.
992 control->num_ranges = !!tracking->max32 + !!tracking->max64 +
993 !!tracking->maxpci64;
994 ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
995 control->num_ranges);
996 if (!ranges) {
997 g_free(feature);
998 errno = ENOMEM;
1000 return NULL;
1003 control->ranges = (__u64)(uintptr_t)ranges;
1004 if (tracking->max32) {
1005 ranges->iova = tracking->min32;
1006 ranges->length = (tracking->max32 - tracking->min32) + 1;
1007 ranges++;
1009 if (tracking->max64) {
1010 ranges->iova = tracking->min64;
1011 ranges->length = (tracking->max64 - tracking->min64) + 1;
1012 ranges++;
1014 if (tracking->maxpci64) {
1015 ranges->iova = tracking->minpci64;
1016 ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1;
1019 trace_vfio_device_dirty_tracking_start(control->num_ranges,
1020 tracking->min32, tracking->max32,
1021 tracking->min64, tracking->max64,
1022 tracking->minpci64, tracking->maxpci64);
1024 return feature;
1027 static void vfio_device_feature_dma_logging_start_destroy(
1028 struct vfio_device_feature *feature)
1030 struct vfio_device_feature_dma_logging_control *control =
1031 (struct vfio_device_feature_dma_logging_control *)feature->data;
1032 struct vfio_device_feature_dma_logging_range *ranges =
1033 (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges;
1035 g_free(ranges);
1036 g_free(feature);
1039 static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer)
1041 struct vfio_device_feature *feature;
1042 VFIODirtyRanges ranges;
1043 VFIODevice *vbasedev;
1044 int ret = 0;
1046 vfio_dirty_tracking_init(bcontainer, &ranges);
1047 feature = vfio_device_feature_dma_logging_start_create(bcontainer,
1048 &ranges);
1049 if (!feature) {
1050 return -errno;
1053 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
1054 if (vbasedev->dirty_tracking) {
1055 continue;
1058 ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
1059 if (ret) {
1060 ret = -errno;
1061 error_report("%s: Failed to start DMA logging, err %d (%s)",
1062 vbasedev->name, ret, strerror(errno));
1063 goto out;
1065 vbasedev->dirty_tracking = true;
1068 out:
1069 if (ret) {
1070 vfio_devices_dma_logging_stop(bcontainer);
1073 vfio_device_feature_dma_logging_start_destroy(feature);
1075 return ret;
1078 static void vfio_listener_log_global_start(MemoryListener *listener)
1080 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
1081 listener);
1082 int ret;
1084 if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
1085 ret = vfio_devices_dma_logging_start(bcontainer);
1086 } else {
1087 ret = vfio_container_set_dirty_page_tracking(bcontainer, true);
1090 if (ret) {
1091 error_report("vfio: Could not start dirty page tracking, err: %d (%s)",
1092 ret, strerror(-ret));
1093 vfio_set_migration_error(ret);
1097 static void vfio_listener_log_global_stop(MemoryListener *listener)
1099 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
1100 listener);
1101 int ret = 0;
1103 if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
1104 vfio_devices_dma_logging_stop(bcontainer);
1105 } else {
1106 ret = vfio_container_set_dirty_page_tracking(bcontainer, false);
1109 if (ret) {
1110 error_report("vfio: Could not stop dirty page tracking, err: %d (%s)",
1111 ret, strerror(-ret));
1112 vfio_set_migration_error(ret);
1116 static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
1117 hwaddr size, void *bitmap)
1119 uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
1120 sizeof(struct vfio_device_feature_dma_logging_report),
1121 sizeof(__u64))] = {};
1122 struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
1123 struct vfio_device_feature_dma_logging_report *report =
1124 (struct vfio_device_feature_dma_logging_report *)feature->data;
1126 report->iova = iova;
1127 report->length = size;
1128 report->page_size = qemu_real_host_page_size();
1129 report->bitmap = (__u64)(uintptr_t)bitmap;
1131 feature->argsz = sizeof(buf);
1132 feature->flags = VFIO_DEVICE_FEATURE_GET |
1133 VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
1135 if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
1136 return -errno;
1139 return 0;
1142 int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
1143 VFIOBitmap *vbmap, hwaddr iova,
1144 hwaddr size)
1146 VFIODevice *vbasedev;
1147 int ret;
1149 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
1150 ret = vfio_device_dma_logging_report(vbasedev, iova, size,
1151 vbmap->bitmap);
1152 if (ret) {
1153 error_report("%s: Failed to get DMA logging report, iova: "
1154 "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
1155 ", err: %d (%s)",
1156 vbasedev->name, iova, size, ret, strerror(-ret));
1158 return ret;
1162 return 0;
1165 int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
1166 uint64_t size, ram_addr_t ram_addr)
1168 bool all_device_dirty_tracking =
1169 vfio_devices_all_device_dirty_tracking(bcontainer);
1170 uint64_t dirty_pages;
1171 VFIOBitmap vbmap;
1172 int ret;
1174 if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
1175 cpu_physical_memory_set_dirty_range(ram_addr, size,
1176 tcg_enabled() ? DIRTY_CLIENTS_ALL :
1177 DIRTY_CLIENTS_NOCODE);
1178 return 0;
1181 ret = vfio_bitmap_alloc(&vbmap, size);
1182 if (ret) {
1183 return ret;
1186 if (all_device_dirty_tracking) {
1187 ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size);
1188 } else {
1189 ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size);
1192 if (ret) {
1193 goto out;
1196 dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
1197 vbmap.pages);
1199 trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages);
1200 out:
1201 g_free(vbmap.bitmap);
1203 return ret;
1206 typedef struct {
1207 IOMMUNotifier n;
1208 VFIOGuestIOMMU *giommu;
1209 } vfio_giommu_dirty_notifier;
1211 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1213 vfio_giommu_dirty_notifier *gdn = container_of(n,
1214 vfio_giommu_dirty_notifier, n);
1215 VFIOGuestIOMMU *giommu = gdn->giommu;
1216 VFIOContainerBase *bcontainer = giommu->bcontainer;
1217 hwaddr iova = iotlb->iova + giommu->iommu_offset;
1218 ram_addr_t translated_addr;
1219 int ret = -EINVAL;
1221 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
1223 if (iotlb->target_as != &address_space_memory) {
1224 error_report("Wrong target AS \"%s\", only system memory is allowed",
1225 iotlb->target_as->name ? iotlb->target_as->name : "none");
1226 goto out;
1229 rcu_read_lock();
1230 if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
1231 ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
1232 translated_addr);
1233 if (ret) {
1234 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
1235 "0x%"HWADDR_PRIx") = %d (%s)",
1236 bcontainer, iova, iotlb->addr_mask + 1, ret,
1237 strerror(-ret));
1240 rcu_read_unlock();
1242 out:
1243 if (ret) {
1244 vfio_set_migration_error(ret);
1248 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
1249 void *opaque)
1251 const hwaddr size = int128_get64(section->size);
1252 const hwaddr iova = section->offset_within_address_space;
1253 const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
1254 section->offset_within_region;
1255 VFIORamDiscardListener *vrdl = opaque;
1258 * Sync the whole mapped region (spanning multiple individual mappings)
1259 * in one go.
1261 return vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr);
1264 static int
1265 vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
1266 MemoryRegionSection *section)
1268 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
1269 VFIORamDiscardListener *vrdl = NULL;
1271 QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
1272 if (vrdl->mr == section->mr &&
1273 vrdl->offset_within_address_space ==
1274 section->offset_within_address_space) {
1275 break;
1279 if (!vrdl) {
1280 hw_error("vfio: Trying to sync missing RAM discard listener");
1284 * We only want/can synchronize the bitmap for actually mapped parts -
1285 * which correspond to populated parts. Replay all populated parts.
1287 return ram_discard_manager_replay_populated(rdm, section,
1288 vfio_ram_discard_get_dirty_bitmap,
1289 &vrdl);
1292 static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
1293 MemoryRegionSection *section)
1295 ram_addr_t ram_addr;
1297 if (memory_region_is_iommu(section->mr)) {
1298 VFIOGuestIOMMU *giommu;
1300 QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
1301 if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
1302 giommu->n.start == section->offset_within_region) {
1303 Int128 llend;
1304 vfio_giommu_dirty_notifier gdn = { .giommu = giommu };
1305 int idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr,
1306 MEMTXATTRS_UNSPECIFIED);
1308 llend = int128_add(int128_make64(section->offset_within_region),
1309 section->size);
1310 llend = int128_sub(llend, int128_one());
1312 iommu_notifier_init(&gdn.n,
1313 vfio_iommu_map_dirty_notify,
1314 IOMMU_NOTIFIER_MAP,
1315 section->offset_within_region,
1316 int128_get64(llend),
1317 idx);
1318 memory_region_iommu_replay(giommu->iommu_mr, &gdn.n);
1319 break;
1322 return 0;
1323 } else if (memory_region_has_ram_discard_manager(section->mr)) {
1324 return vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
1327 ram_addr = memory_region_get_ram_addr(section->mr) +
1328 section->offset_within_region;
1330 return vfio_get_dirty_bitmap(bcontainer,
1331 REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
1332 int128_get64(section->size), ram_addr);
1335 static void vfio_listener_log_sync(MemoryListener *listener,
1336 MemoryRegionSection *section)
1338 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
1339 listener);
1340 int ret;
1342 if (vfio_listener_skipped_section(section)) {
1343 return;
1346 if (vfio_devices_all_dirty_tracking(bcontainer)) {
1347 ret = vfio_sync_dirty_bitmap(bcontainer, section);
1348 if (ret) {
1349 error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret,
1350 strerror(-ret));
1351 vfio_set_migration_error(ret);
1356 const MemoryListener vfio_memory_listener = {
1357 .name = "vfio",
1358 .region_add = vfio_listener_region_add,
1359 .region_del = vfio_listener_region_del,
1360 .log_global_start = vfio_listener_log_global_start,
1361 .log_global_stop = vfio_listener_log_global_stop,
1362 .log_sync = vfio_listener_log_sync,
1365 void vfio_reset_handler(void *opaque)
1367 VFIODevice *vbasedev;
1369 QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
1370 if (vbasedev->dev->realized) {
1371 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1375 QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
1376 if (vbasedev->dev->realized && vbasedev->needs_reset) {
1377 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1382 int vfio_kvm_device_add_fd(int fd, Error **errp)
1384 #ifdef CONFIG_KVM
1385 struct kvm_device_attr attr = {
1386 .group = KVM_DEV_VFIO_FILE,
1387 .attr = KVM_DEV_VFIO_FILE_ADD,
1388 .addr = (uint64_t)(unsigned long)&fd,
1391 if (!kvm_enabled()) {
1392 return 0;
1395 if (vfio_kvm_device_fd < 0) {
1396 struct kvm_create_device cd = {
1397 .type = KVM_DEV_TYPE_VFIO,
1400 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1401 error_setg_errno(errp, errno, "Failed to create KVM VFIO device");
1402 return -errno;
1405 vfio_kvm_device_fd = cd.fd;
1408 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1409 error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device",
1410 fd);
1411 return -errno;
1413 #endif
1414 return 0;
1417 int vfio_kvm_device_del_fd(int fd, Error **errp)
1419 #ifdef CONFIG_KVM
1420 struct kvm_device_attr attr = {
1421 .group = KVM_DEV_VFIO_FILE,
1422 .attr = KVM_DEV_VFIO_FILE_DEL,
1423 .addr = (uint64_t)(unsigned long)&fd,
1426 if (vfio_kvm_device_fd < 0) {
1427 error_setg(errp, "KVM VFIO device isn't created yet");
1428 return -EINVAL;
1431 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1432 error_setg_errno(errp, errno,
1433 "Failed to remove fd %d from KVM VFIO device", fd);
1434 return -errno;
1436 #endif
1437 return 0;
1440 VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1442 VFIOAddressSpace *space;
1444 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1445 if (space->as == as) {
1446 return space;
1450 /* No suitable VFIOAddressSpace, create a new one */
1451 space = g_malloc0(sizeof(*space));
1452 space->as = as;
1453 QLIST_INIT(&space->containers);
1455 if (QLIST_EMPTY(&vfio_address_spaces)) {
1456 qemu_register_reset(vfio_reset_handler, NULL);
1459 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1461 return space;
1464 void vfio_put_address_space(VFIOAddressSpace *space)
1466 if (!QLIST_EMPTY(&space->containers)) {
1467 return;
1470 QLIST_REMOVE(space, list);
1471 g_free(space);
1473 if (QLIST_EMPTY(&vfio_address_spaces)) {
1474 qemu_unregister_reset(vfio_reset_handler, NULL);
1478 struct vfio_device_info *vfio_get_device_info(int fd)
1480 struct vfio_device_info *info;
1481 uint32_t argsz = sizeof(*info);
1483 info = g_malloc0(argsz);
1485 retry:
1486 info->argsz = argsz;
1488 if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) {
1489 g_free(info);
1490 return NULL;
1493 if (info->argsz > argsz) {
1494 argsz = info->argsz;
1495 info = g_realloc(info, argsz);
1496 goto retry;
1499 return info;
1502 int vfio_attach_device(char *name, VFIODevice *vbasedev,
1503 AddressSpace *as, Error **errp)
1505 const VFIOIOMMUClass *ops =
1506 VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY));
1508 if (vbasedev->iommufd) {
1509 ops = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD));
1512 assert(ops);
1514 return ops->attach_device(name, vbasedev, as, errp);
1517 void vfio_detach_device(VFIODevice *vbasedev)
1519 if (!vbasedev->bcontainer) {
1520 return;
1522 vbasedev->bcontainer->ops->detach_device(vbasedev);