2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
24 #include <linux/kvm.h>
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
39 #include "sysemu/runstate.h"
41 #include "qapi/error.h"
42 #include "migration/migration.h"
44 VFIOGroupList vfio_group_list
=
45 QLIST_HEAD_INITIALIZER(vfio_group_list
);
46 static QLIST_HEAD(, VFIOAddressSpace
) vfio_address_spaces
=
47 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
51 * We have a single VFIO pseudo device per KVM VM. Once created it lives
52 * for the life of the VM. Closing the file descriptor only drops our
53 * reference to it and the device's reference to kvm. Therefore once
54 * initialized, this file descriptor is only released on QEMU exit and
55 * we'll re-use it should another vfio device be attached before then.
57 static int vfio_kvm_device_fd
= -1;
61 * Common VFIO interrupt disable
63 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
65 struct vfio_irq_set irq_set
= {
66 .argsz
= sizeof(irq_set
),
67 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
73 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
76 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
78 struct vfio_irq_set irq_set
= {
79 .argsz
= sizeof(irq_set
),
80 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
86 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
89 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
91 struct vfio_irq_set irq_set
= {
92 .argsz
= sizeof(irq_set
),
93 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
99 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
102 static inline const char *action_to_str(int action
)
105 case VFIO_IRQ_SET_ACTION_MASK
:
107 case VFIO_IRQ_SET_ACTION_UNMASK
:
109 case VFIO_IRQ_SET_ACTION_TRIGGER
:
112 return "UNKNOWN ACTION";
116 static const char *index_to_str(VFIODevice
*vbasedev
, int index
)
118 if (vbasedev
->type
!= VFIO_DEVICE_TYPE_PCI
) {
123 case VFIO_PCI_INTX_IRQ_INDEX
:
125 case VFIO_PCI_MSI_IRQ_INDEX
:
127 case VFIO_PCI_MSIX_IRQ_INDEX
:
129 case VFIO_PCI_ERR_IRQ_INDEX
:
131 case VFIO_PCI_REQ_IRQ_INDEX
:
138 int vfio_set_irq_signaling(VFIODevice
*vbasedev
, int index
, int subindex
,
139 int action
, int fd
, Error
**errp
)
141 struct vfio_irq_set
*irq_set
;
146 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
148 irq_set
= g_malloc0(argsz
);
149 irq_set
->argsz
= argsz
;
150 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| action
;
151 irq_set
->index
= index
;
152 irq_set
->start
= subindex
;
154 pfd
= (int32_t *)&irq_set
->data
;
157 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
)) {
166 error_setg_errno(errp
, -ret
, "VFIO_DEVICE_SET_IRQS failure");
168 name
= index_to_str(vbasedev
, index
);
170 error_prepend(errp
, "%s-%d: ", name
, subindex
);
172 error_prepend(errp
, "index %d-%d: ", index
, subindex
);
175 "Failed to %s %s eventfd signaling for interrupt ",
176 fd
< 0 ? "tear down" : "set up", action_to_str(action
));
181 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
183 void vfio_region_write(void *opaque
, hwaddr addr
,
184 uint64_t data
, unsigned size
)
186 VFIORegion
*region
= opaque
;
187 VFIODevice
*vbasedev
= region
->vbasedev
;
200 buf
.word
= cpu_to_le16(data
);
203 buf
.dword
= cpu_to_le32(data
);
206 buf
.qword
= cpu_to_le64(data
);
209 hw_error("vfio: unsupported write size, %u bytes", size
);
213 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
214 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
216 __func__
, vbasedev
->name
, region
->nr
,
220 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
223 * A read or write to a BAR always signals an INTx EOI. This will
224 * do nothing if not pending (including not in INTx mode). We assume
225 * that a BAR access is in response to an interrupt and that BAR
226 * accesses will service the interrupt. Unfortunately, we don't know
227 * which access will service the interrupt, so we're potentially
228 * getting quite a few host interrupts per guest interrupt.
230 vbasedev
->ops
->vfio_eoi(vbasedev
);
233 uint64_t vfio_region_read(void *opaque
,
234 hwaddr addr
, unsigned size
)
236 VFIORegion
*region
= opaque
;
237 VFIODevice
*vbasedev
= region
->vbasedev
;
246 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
247 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
248 __func__
, vbasedev
->name
, region
->nr
,
257 data
= le16_to_cpu(buf
.word
);
260 data
= le32_to_cpu(buf
.dword
);
263 data
= le64_to_cpu(buf
.qword
);
266 hw_error("vfio: unsupported read size, %u bytes", size
);
270 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
272 /* Same as write above */
273 vbasedev
->ops
->vfio_eoi(vbasedev
);
278 const MemoryRegionOps vfio_region_ops
= {
279 .read
= vfio_region_read
,
280 .write
= vfio_region_write
,
281 .endianness
= DEVICE_LITTLE_ENDIAN
,
283 .min_access_size
= 1,
284 .max_access_size
= 8,
287 .min_access_size
= 1,
288 .max_access_size
= 8,
293 * Device state interfaces
296 bool vfio_mig_active(void)
299 VFIODevice
*vbasedev
;
301 if (QLIST_EMPTY(&vfio_group_list
)) {
305 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
306 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
307 if (vbasedev
->migration_blocker
) {
315 static bool vfio_devices_all_dirty_tracking(VFIOContainer
*container
)
318 VFIODevice
*vbasedev
;
319 MigrationState
*ms
= migrate_get_current();
321 if (!migration_is_setup_or_active(ms
->state
)) {
325 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
326 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
327 VFIOMigration
*migration
= vbasedev
->migration
;
333 if ((vbasedev
->pre_copy_dirty_page_tracking
== ON_OFF_AUTO_OFF
)
334 && (migration
->device_state
& VFIO_DEVICE_STATE_RUNNING
)) {
342 static bool vfio_devices_all_running_and_saving(VFIOContainer
*container
)
345 VFIODevice
*vbasedev
;
346 MigrationState
*ms
= migrate_get_current();
348 if (!migration_is_setup_or_active(ms
->state
)) {
352 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
353 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
354 VFIOMigration
*migration
= vbasedev
->migration
;
360 if ((migration
->device_state
& VFIO_DEVICE_STATE_SAVING
) &&
361 (migration
->device_state
& VFIO_DEVICE_STATE_RUNNING
)) {
371 static int vfio_dma_unmap_bitmap(VFIOContainer
*container
,
372 hwaddr iova
, ram_addr_t size
,
373 IOMMUTLBEntry
*iotlb
)
375 struct vfio_iommu_type1_dma_unmap
*unmap
;
376 struct vfio_bitmap
*bitmap
;
377 uint64_t pages
= REAL_HOST_PAGE_ALIGN(size
) / qemu_real_host_page_size
;
380 unmap
= g_malloc0(sizeof(*unmap
) + sizeof(*bitmap
));
382 unmap
->argsz
= sizeof(*unmap
) + sizeof(*bitmap
);
385 unmap
->flags
|= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP
;
386 bitmap
= (struct vfio_bitmap
*)&unmap
->data
;
389 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
390 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
391 * to qemu_real_host_page_size.
394 bitmap
->pgsize
= qemu_real_host_page_size
;
395 bitmap
->size
= ROUND_UP(pages
, sizeof(__u64
) * BITS_PER_BYTE
) /
398 if (bitmap
->size
> container
->max_dirty_bitmap_size
) {
399 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64
,
400 (uint64_t)bitmap
->size
);
405 bitmap
->data
= g_try_malloc0(bitmap
->size
);
411 ret
= ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, unmap
);
413 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap
->data
,
414 iotlb
->translated_addr
, pages
);
416 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
419 g_free(bitmap
->data
);
426 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
428 static int vfio_dma_unmap(VFIOContainer
*container
,
429 hwaddr iova
, ram_addr_t size
,
430 IOMMUTLBEntry
*iotlb
)
432 struct vfio_iommu_type1_dma_unmap unmap
= {
433 .argsz
= sizeof(unmap
),
439 if (iotlb
&& container
->dirty_pages_supported
&&
440 vfio_devices_all_running_and_saving(container
)) {
441 return vfio_dma_unmap_bitmap(container
, iova
, size
, iotlb
);
444 while (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
446 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
447 * v4.15) where an overflow in its wrap-around check prevents us from
448 * unmapping the last page of the address space. Test for the error
449 * condition and re-try the unmap excluding the last page. The
450 * expectation is that we've never mapped the last page anyway and this
451 * unmap request comes via vIOMMU support which also makes it unlikely
452 * that this page is used. This bug was introduced well after type1 v2
453 * support was introduced, so we shouldn't need to test for v1. A fix
454 * is queued for kernel v5.0 so this workaround can be removed once
455 * affected kernels are sufficiently deprecated.
457 if (errno
== EINVAL
&& unmap
.size
&& !(unmap
.iova
+ unmap
.size
) &&
458 container
->iommu_type
== VFIO_TYPE1v2_IOMMU
) {
459 trace_vfio_dma_unmap_overflow_workaround();
460 unmap
.size
-= 1ULL << ctz64(container
->pgsizes
);
463 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno
));
470 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
471 ram_addr_t size
, void *vaddr
, bool readonly
)
473 struct vfio_iommu_type1_dma_map map
= {
474 .argsz
= sizeof(map
),
475 .flags
= VFIO_DMA_MAP_FLAG_READ
,
476 .vaddr
= (__u64
)(uintptr_t)vaddr
,
482 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
486 * Try the mapping, if it fails with EBUSY, unmap the region and try
487 * again. This shouldn't be necessary, but we sometimes see it in
490 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
491 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
, NULL
) == 0 &&
492 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
496 error_report("VFIO_MAP_DMA failed: %s", strerror(errno
));
500 static void vfio_host_win_add(VFIOContainer
*container
,
501 hwaddr min_iova
, hwaddr max_iova
,
502 uint64_t iova_pgsizes
)
504 VFIOHostDMAWindow
*hostwin
;
506 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
507 if (ranges_overlap(hostwin
->min_iova
,
508 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
510 max_iova
- min_iova
+ 1)) {
511 hw_error("%s: Overlapped IOMMU are not enabled", __func__
);
515 hostwin
= g_malloc0(sizeof(*hostwin
));
517 hostwin
->min_iova
= min_iova
;
518 hostwin
->max_iova
= max_iova
;
519 hostwin
->iova_pgsizes
= iova_pgsizes
;
520 QLIST_INSERT_HEAD(&container
->hostwin_list
, hostwin
, hostwin_next
);
523 static int vfio_host_win_del(VFIOContainer
*container
, hwaddr min_iova
,
526 VFIOHostDMAWindow
*hostwin
;
528 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
529 if (hostwin
->min_iova
== min_iova
&& hostwin
->max_iova
== max_iova
) {
530 QLIST_REMOVE(hostwin
, hostwin_next
);
538 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
540 return (!memory_region_is_ram(section
->mr
) &&
541 !memory_region_is_iommu(section
->mr
)) ||
543 * Sizing an enabled 64-bit BAR can cause spurious mappings to
544 * addresses in the upper part of the 64-bit address space. These
545 * are never accessed by the CPU and beyond the address width of
546 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
548 section
->offset_within_address_space
& (1ULL << 63);
551 /* Called with rcu_read_lock held. */
552 static bool vfio_get_xlat_addr(IOMMUTLBEntry
*iotlb
, void **vaddr
,
553 ram_addr_t
*ram_addr
, bool *read_only
)
557 hwaddr len
= iotlb
->addr_mask
+ 1;
558 bool writable
= iotlb
->perm
& IOMMU_WO
;
561 * The IOMMU TLB entry we have just covers translation through
562 * this IOMMU to its immediate target. We need to translate
563 * it the rest of the way through to memory.
565 mr
= address_space_translate(&address_space_memory
,
566 iotlb
->translated_addr
,
567 &xlat
, &len
, writable
,
568 MEMTXATTRS_UNSPECIFIED
);
569 if (!memory_region_is_ram(mr
)) {
570 error_report("iommu map to non memory area %"HWADDR_PRIx
"",
573 } else if (memory_region_has_ram_discard_manager(mr
)) {
574 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(mr
);
575 MemoryRegionSection tmp
= {
577 .offset_within_region
= xlat
,
578 .size
= int128_make64(len
),
582 * Malicious VMs can map memory into the IOMMU, which is expected
583 * to remain discarded. vfio will pin all pages, populating memory.
584 * Disallow that. vmstate priorities make sure any RamDiscardManager
585 * were already restored before IOMMUs are restored.
587 if (!ram_discard_manager_is_populated(rdm
, &tmp
)) {
588 error_report("iommu map to discarded memory (e.g., unplugged via"
589 " virtio-mem): %"HWADDR_PRIx
"",
590 iotlb
->translated_addr
);
595 * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
596 * pages will remain pinned inside vfio until unmapped, resulting in a
597 * higher memory consumption than expected. If memory would get
598 * populated again later, there would be an inconsistency between pages
599 * pinned by vfio and pages seen by QEMU. This is the case until
600 * unmapped from the IOMMU (e.g., during device reset).
602 * With malicious guests, we really only care about pinning more memory
603 * than expected. RLIMIT_MEMLOCK set for the user/process can never be
604 * exceeded and can be used to mitigate this problem.
606 warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
607 " RAM (e.g., virtio-mem) works, however, malicious"
608 " guests can trigger pinning of more memory than"
609 " intended via an IOMMU. It's possible to mitigate "
610 " by setting/adjusting RLIMIT_MEMLOCK.");
614 * Translation truncates length to the IOMMU page size,
615 * check that it did not truncate too much.
617 if (len
& iotlb
->addr_mask
) {
618 error_report("iommu has granularity incompatible with target AS");
623 *vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
627 *ram_addr
= memory_region_get_ram_addr(mr
) + xlat
;
631 *read_only
= !writable
|| mr
->readonly
;
637 static void vfio_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
639 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
640 VFIOContainer
*container
= giommu
->container
;
641 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
645 trace_vfio_iommu_map_notify(iotlb
->perm
== IOMMU_NONE
? "UNMAP" : "MAP",
646 iova
, iova
+ iotlb
->addr_mask
);
648 if (iotlb
->target_as
!= &address_space_memory
) {
649 error_report("Wrong target AS \"%s\", only system memory is allowed",
650 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
656 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
659 if (!vfio_get_xlat_addr(iotlb
, &vaddr
, NULL
, &read_only
)) {
663 * vaddr is only valid until rcu_read_unlock(). But after
664 * vfio_dma_map has set up the mapping the pages will be
665 * pinned by the kernel. This makes sure that the RAM backend
666 * of vaddr will always be there, even if the memory object is
667 * destroyed and its backing memory munmap-ed.
669 ret
= vfio_dma_map(container
, iova
,
670 iotlb
->addr_mask
+ 1, vaddr
,
673 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
674 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
676 iotlb
->addr_mask
+ 1, vaddr
, ret
);
679 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1, iotlb
);
681 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
682 "0x%"HWADDR_PRIx
") = %d (%m)",
684 iotlb
->addr_mask
+ 1, ret
);
691 static void vfio_ram_discard_notify_discard(RamDiscardListener
*rdl
,
692 MemoryRegionSection
*section
)
694 VFIORamDiscardListener
*vrdl
= container_of(rdl
, VFIORamDiscardListener
,
696 const hwaddr size
= int128_get64(section
->size
);
697 const hwaddr iova
= section
->offset_within_address_space
;
700 /* Unmap with a single call. */
701 ret
= vfio_dma_unmap(vrdl
->container
, iova
, size
, NULL
);
703 error_report("%s: vfio_dma_unmap() failed: %s", __func__
,
708 static int vfio_ram_discard_notify_populate(RamDiscardListener
*rdl
,
709 MemoryRegionSection
*section
)
711 VFIORamDiscardListener
*vrdl
= container_of(rdl
, VFIORamDiscardListener
,
713 const hwaddr end
= section
->offset_within_region
+
714 int128_get64(section
->size
);
715 hwaddr start
, next
, iova
;
720 * Map in (aligned within memory region) minimum granularity, so we can
721 * unmap in minimum granularity later.
723 for (start
= section
->offset_within_region
; start
< end
; start
= next
) {
724 next
= ROUND_UP(start
+ 1, vrdl
->granularity
);
725 next
= MIN(next
, end
);
727 iova
= start
- section
->offset_within_region
+
728 section
->offset_within_address_space
;
729 vaddr
= memory_region_get_ram_ptr(section
->mr
) + start
;
731 ret
= vfio_dma_map(vrdl
->container
, iova
, next
- start
,
732 vaddr
, section
->readonly
);
735 vfio_ram_discard_notify_discard(rdl
, section
);
742 static void vfio_register_ram_discard_listener(VFIOContainer
*container
,
743 MemoryRegionSection
*section
)
745 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(section
->mr
);
746 VFIORamDiscardListener
*vrdl
;
748 /* Ignore some corner cases not relevant in practice. */
749 g_assert(QEMU_IS_ALIGNED(section
->offset_within_region
, TARGET_PAGE_SIZE
));
750 g_assert(QEMU_IS_ALIGNED(section
->offset_within_address_space
,
752 g_assert(QEMU_IS_ALIGNED(int128_get64(section
->size
), TARGET_PAGE_SIZE
));
754 vrdl
= g_new0(VFIORamDiscardListener
, 1);
755 vrdl
->container
= container
;
756 vrdl
->mr
= section
->mr
;
757 vrdl
->offset_within_address_space
= section
->offset_within_address_space
;
758 vrdl
->size
= int128_get64(section
->size
);
759 vrdl
->granularity
= ram_discard_manager_get_min_granularity(rdm
,
762 g_assert(vrdl
->granularity
&& is_power_of_2(vrdl
->granularity
));
763 g_assert(vrdl
->granularity
>= 1 << ctz64(container
->pgsizes
));
765 ram_discard_listener_init(&vrdl
->listener
,
766 vfio_ram_discard_notify_populate
,
767 vfio_ram_discard_notify_discard
, true);
768 ram_discard_manager_register_listener(rdm
, &vrdl
->listener
, section
);
769 QLIST_INSERT_HEAD(&container
->vrdl_list
, vrdl
, next
);
772 * Sanity-check if we have a theoretically problematic setup where we could
773 * exceed the maximum number of possible DMA mappings over time. We assume
774 * that each mapped section in the same address space as a RamDiscardManager
775 * section consumes exactly one DMA mapping, with the exception of
776 * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
777 * in the same address space as RamDiscardManager sections.
779 * We assume that each section in the address space consumes one memslot.
780 * We take the number of KVM memory slots as a best guess for the maximum
781 * number of sections in the address space we could have over time,
782 * also consuming DMA mappings.
784 if (container
->dma_max_mappings
) {
785 unsigned int vrdl_count
= 0, vrdl_mappings
= 0, max_memslots
= 512;
789 max_memslots
= kvm_get_max_memslots();
793 QLIST_FOREACH(vrdl
, &container
->vrdl_list
, next
) {
796 start
= QEMU_ALIGN_DOWN(vrdl
->offset_within_address_space
,
798 end
= ROUND_UP(vrdl
->offset_within_address_space
+ vrdl
->size
,
800 vrdl_mappings
+= (end
- start
) / vrdl
->granularity
;
804 if (vrdl_mappings
+ max_memslots
- vrdl_count
>
805 container
->dma_max_mappings
) {
806 warn_report("%s: possibly running out of DMA mappings. E.g., try"
807 " increasing the 'block-size' of virtio-mem devies."
808 " Maximum possible DMA mappings: %d, Maximum possible"
809 " memslots: %d", __func__
, container
->dma_max_mappings
,
815 static void vfio_unregister_ram_discard_listener(VFIOContainer
*container
,
816 MemoryRegionSection
*section
)
818 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(section
->mr
);
819 VFIORamDiscardListener
*vrdl
= NULL
;
821 QLIST_FOREACH(vrdl
, &container
->vrdl_list
, next
) {
822 if (vrdl
->mr
== section
->mr
&&
823 vrdl
->offset_within_address_space
==
824 section
->offset_within_address_space
) {
830 hw_error("vfio: Trying to unregister missing RAM discard listener");
833 ram_discard_manager_unregister_listener(rdm
, &vrdl
->listener
);
834 QLIST_REMOVE(vrdl
, next
);
838 static void vfio_listener_region_add(MemoryListener
*listener
,
839 MemoryRegionSection
*section
)
841 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
843 Int128 llend
, llsize
;
846 VFIOHostDMAWindow
*hostwin
;
850 if (vfio_listener_skipped_section(section
)) {
851 trace_vfio_listener_region_add_skip(
852 section
->offset_within_address_space
,
853 section
->offset_within_address_space
+
854 int128_get64(int128_sub(section
->size
, int128_one())));
858 if (unlikely((section
->offset_within_address_space
&
859 ~qemu_real_host_page_mask
) !=
860 (section
->offset_within_region
& ~qemu_real_host_page_mask
))) {
861 error_report("%s received unaligned region", __func__
);
865 iova
= REAL_HOST_PAGE_ALIGN(section
->offset_within_address_space
);
866 llend
= int128_make64(section
->offset_within_address_space
);
867 llend
= int128_add(llend
, section
->size
);
868 llend
= int128_and(llend
, int128_exts64(qemu_real_host_page_mask
));
870 if (int128_ge(int128_make64(iova
), llend
)) {
873 end
= int128_get64(int128_sub(llend
, int128_one()));
875 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
878 /* For now intersections are not allowed, we may relax this later */
879 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
880 if (ranges_overlap(hostwin
->min_iova
,
881 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
882 section
->offset_within_address_space
,
883 int128_get64(section
->size
))) {
885 "region [0x%"PRIx64
",0x%"PRIx64
"] overlaps with existing"
886 "host DMA window [0x%"PRIx64
",0x%"PRIx64
"]",
887 section
->offset_within_address_space
,
888 section
->offset_within_address_space
+
889 int128_get64(section
->size
) - 1,
890 hostwin
->min_iova
, hostwin
->max_iova
);
895 ret
= vfio_spapr_create_window(container
, section
, &pgsize
);
897 error_setg_errno(&err
, -ret
, "Failed to create SPAPR window");
901 vfio_host_win_add(container
, section
->offset_within_address_space
,
902 section
->offset_within_address_space
+
903 int128_get64(section
->size
) - 1, pgsize
);
907 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
908 struct kvm_vfio_spapr_tce param
;
909 struct kvm_device_attr attr
= {
910 .group
= KVM_DEV_VFIO_GROUP
,
911 .attr
= KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE
,
912 .addr
= (uint64_t)(unsigned long)¶m
,
915 if (!memory_region_iommu_get_attr(iommu_mr
, IOMMU_ATTR_SPAPR_TCE_FD
,
917 QLIST_FOREACH(group
, &container
->group_list
, container_next
) {
918 param
.groupfd
= group
->fd
;
919 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
920 error_report("vfio: failed to setup fd %d "
921 "for a group with fd %d: %s",
922 param
.tablefd
, param
.groupfd
,
926 trace_vfio_spapr_group_attach(param
.groupfd
, param
.tablefd
);
933 hostwin_found
= false;
934 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
935 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
936 hostwin_found
= true;
941 if (!hostwin_found
) {
942 error_setg(&err
, "Container %p can't map guest IOVA region"
943 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
, container
, iova
, end
);
947 memory_region_ref(section
->mr
);
949 if (memory_region_is_iommu(section
->mr
)) {
950 VFIOGuestIOMMU
*giommu
;
951 IOMMUMemoryRegion
*iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
954 trace_vfio_listener_region_add_iommu(iova
, end
);
956 * FIXME: For VFIO iommu types which have KVM acceleration to
957 * avoid bouncing all map/unmaps through qemu this way, this
958 * would be the right place to wire that up (tell the KVM
959 * device emulation the VFIO iommu handles to use).
961 giommu
= g_malloc0(sizeof(*giommu
));
962 giommu
->iommu
= iommu_mr
;
963 giommu
->iommu_offset
= section
->offset_within_address_space
-
964 section
->offset_within_region
;
965 giommu
->container
= container
;
966 llend
= int128_add(int128_make64(section
->offset_within_region
),
968 llend
= int128_sub(llend
, int128_one());
969 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
970 MEMTXATTRS_UNSPECIFIED
);
971 iommu_notifier_init(&giommu
->n
, vfio_iommu_map_notify
,
972 IOMMU_NOTIFIER_IOTLB_EVENTS
,
973 section
->offset_within_region
,
977 ret
= memory_region_iommu_set_page_size_mask(giommu
->iommu
,
985 ret
= memory_region_register_iommu_notifier(section
->mr
, &giommu
->n
,
991 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
992 memory_region_iommu_replay(giommu
->iommu
, &giommu
->n
);
997 /* Here we assume that memory_region_is_ram(section->mr)==true */
1000 * For RAM memory regions with a RamDiscardManager, we only want to map the
1001 * actually populated parts - and update the mapping whenever we're notified
1004 if (memory_region_has_ram_discard_manager(section
->mr
)) {
1005 vfio_register_ram_discard_listener(container
, section
);
1009 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
1010 section
->offset_within_region
+
1011 (iova
- section
->offset_within_address_space
);
1013 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
1015 llsize
= int128_sub(llend
, int128_make64(iova
));
1017 if (memory_region_is_ram_device(section
->mr
)) {
1018 hwaddr pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
1020 if ((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
)) {
1021 trace_vfio_listener_region_add_no_dma_map(
1022 memory_region_name(section
->mr
),
1023 section
->offset_within_address_space
,
1024 int128_getlo(section
->size
),
1030 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
1031 vaddr
, section
->readonly
);
1033 error_setg(&err
, "vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
1034 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
1035 container
, iova
, int128_get64(llsize
), vaddr
, ret
);
1036 if (memory_region_is_ram_device(section
->mr
)) {
1037 /* Allow unexpected mappings not to be fatal for RAM devices */
1038 error_report_err(err
);
1047 if (memory_region_is_ram_device(section
->mr
)) {
1048 error_report("failed to vfio_dma_map. pci p2p may not work");
1052 * On the initfn path, store the first error in the container so we
1053 * can gracefully fail. Runtime, there's not much we can do other
1054 * than throw a hardware error.
1056 if (!container
->initialized
) {
1057 if (!container
->error
) {
1058 error_propagate_prepend(&container
->error
, err
,
1060 memory_region_name(section
->mr
));
1065 error_report_err(err
);
1066 hw_error("vfio: DMA mapping failed, unable to continue");
1070 static void vfio_listener_region_del(MemoryListener
*listener
,
1071 MemoryRegionSection
*section
)
1073 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1075 Int128 llend
, llsize
;
1077 bool try_unmap
= true;
1079 if (vfio_listener_skipped_section(section
)) {
1080 trace_vfio_listener_region_del_skip(
1081 section
->offset_within_address_space
,
1082 section
->offset_within_address_space
+
1083 int128_get64(int128_sub(section
->size
, int128_one())));
1087 if (unlikely((section
->offset_within_address_space
&
1088 ~qemu_real_host_page_mask
) !=
1089 (section
->offset_within_region
& ~qemu_real_host_page_mask
))) {
1090 error_report("%s received unaligned region", __func__
);
1094 if (memory_region_is_iommu(section
->mr
)) {
1095 VFIOGuestIOMMU
*giommu
;
1097 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
1098 if (MEMORY_REGION(giommu
->iommu
) == section
->mr
&&
1099 giommu
->n
.start
== section
->offset_within_region
) {
1100 memory_region_unregister_iommu_notifier(section
->mr
,
1102 QLIST_REMOVE(giommu
, giommu_next
);
1109 * FIXME: We assume the one big unmap below is adequate to
1110 * remove any individual page mappings in the IOMMU which
1111 * might have been copied into VFIO. This works for a page table
1112 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
1113 * That may not be true for all IOMMU types.
1117 iova
= REAL_HOST_PAGE_ALIGN(section
->offset_within_address_space
);
1118 llend
= int128_make64(section
->offset_within_address_space
);
1119 llend
= int128_add(llend
, section
->size
);
1120 llend
= int128_and(llend
, int128_exts64(qemu_real_host_page_mask
));
1122 if (int128_ge(int128_make64(iova
), llend
)) {
1125 end
= int128_get64(int128_sub(llend
, int128_one()));
1127 llsize
= int128_sub(llend
, int128_make64(iova
));
1129 trace_vfio_listener_region_del(iova
, end
);
1131 if (memory_region_is_ram_device(section
->mr
)) {
1133 VFIOHostDMAWindow
*hostwin
;
1134 bool hostwin_found
= false;
1136 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
1137 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
1138 hostwin_found
= true;
1142 assert(hostwin_found
); /* or region_add() would have failed */
1144 pgmask
= (1ULL << ctz64(hostwin
->iova_pgsizes
)) - 1;
1145 try_unmap
= !((iova
& pgmask
) || (int128_get64(llsize
) & pgmask
));
1146 } else if (memory_region_has_ram_discard_manager(section
->mr
)) {
1147 vfio_unregister_ram_discard_listener(container
, section
);
1148 /* Unregistering will trigger an unmap. */
1153 if (int128_eq(llsize
, int128_2_64())) {
1154 /* The unmap ioctl doesn't accept a full 64-bit span. */
1155 llsize
= int128_rshift(llsize
, 1);
1156 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
), NULL
);
1158 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
1159 "0x%"HWADDR_PRIx
") = %d (%m)",
1160 container
, iova
, int128_get64(llsize
), ret
);
1162 iova
+= int128_get64(llsize
);
1164 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
), NULL
);
1166 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
1167 "0x%"HWADDR_PRIx
") = %d (%m)",
1168 container
, iova
, int128_get64(llsize
), ret
);
1172 memory_region_unref(section
->mr
);
1174 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1175 vfio_spapr_remove_window(container
,
1176 section
->offset_within_address_space
);
1177 if (vfio_host_win_del(container
,
1178 section
->offset_within_address_space
,
1179 section
->offset_within_address_space
+
1180 int128_get64(section
->size
) - 1) < 0) {
1181 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx
,
1182 __func__
, section
->offset_within_address_space
);
1187 static void vfio_set_dirty_page_tracking(VFIOContainer
*container
, bool start
)
1190 struct vfio_iommu_type1_dirty_bitmap dirty
= {
1191 .argsz
= sizeof(dirty
),
1195 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
1197 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
1200 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
1202 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
1203 dirty
.flags
, errno
);
1207 static void vfio_listener_log_global_start(MemoryListener
*listener
)
1209 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1211 vfio_set_dirty_page_tracking(container
, true);
1214 static void vfio_listener_log_global_stop(MemoryListener
*listener
)
1216 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1218 vfio_set_dirty_page_tracking(container
, false);
1221 static int vfio_get_dirty_bitmap(VFIOContainer
*container
, uint64_t iova
,
1222 uint64_t size
, ram_addr_t ram_addr
)
1224 struct vfio_iommu_type1_dirty_bitmap
*dbitmap
;
1225 struct vfio_iommu_type1_dirty_bitmap_get
*range
;
1229 dbitmap
= g_malloc0(sizeof(*dbitmap
) + sizeof(*range
));
1231 dbitmap
->argsz
= sizeof(*dbitmap
) + sizeof(*range
);
1232 dbitmap
->flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
;
1233 range
= (struct vfio_iommu_type1_dirty_bitmap_get
*)&dbitmap
->data
;
1238 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
1239 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
1240 * to qemu_real_host_page_size.
1242 range
->bitmap
.pgsize
= qemu_real_host_page_size
;
1244 pages
= REAL_HOST_PAGE_ALIGN(range
->size
) / qemu_real_host_page_size
;
1245 range
->bitmap
.size
= ROUND_UP(pages
, sizeof(__u64
) * BITS_PER_BYTE
) /
1247 range
->bitmap
.data
= g_try_malloc0(range
->bitmap
.size
);
1248 if (!range
->bitmap
.data
) {
1253 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, dbitmap
);
1255 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
1256 " size: 0x%"PRIx64
" err: %d", (uint64_t)range
->iova
,
1257 (uint64_t)range
->size
, errno
);
1261 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range
->bitmap
.data
,
1264 trace_vfio_get_dirty_bitmap(container
->fd
, range
->iova
, range
->size
,
1265 range
->bitmap
.size
, ram_addr
);
1267 g_free(range
->bitmap
.data
);
1275 VFIOGuestIOMMU
*giommu
;
1276 } vfio_giommu_dirty_notifier
;
1278 static void vfio_iommu_map_dirty_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
1280 vfio_giommu_dirty_notifier
*gdn
= container_of(n
,
1281 vfio_giommu_dirty_notifier
, n
);
1282 VFIOGuestIOMMU
*giommu
= gdn
->giommu
;
1283 VFIOContainer
*container
= giommu
->container
;
1284 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
1285 ram_addr_t translated_addr
;
1287 trace_vfio_iommu_map_dirty_notify(iova
, iova
+ iotlb
->addr_mask
);
1289 if (iotlb
->target_as
!= &address_space_memory
) {
1290 error_report("Wrong target AS \"%s\", only system memory is allowed",
1291 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
1296 if (vfio_get_xlat_addr(iotlb
, NULL
, &translated_addr
, NULL
)) {
1299 ret
= vfio_get_dirty_bitmap(container
, iova
, iotlb
->addr_mask
+ 1,
1302 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx
", "
1303 "0x%"HWADDR_PRIx
") = %d (%m)",
1305 iotlb
->addr_mask
+ 1, ret
);
1311 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection
*section
,
1314 const hwaddr size
= int128_get64(section
->size
);
1315 const hwaddr iova
= section
->offset_within_address_space
;
1316 const ram_addr_t ram_addr
= memory_region_get_ram_addr(section
->mr
) +
1317 section
->offset_within_region
;
1318 VFIORamDiscardListener
*vrdl
= opaque
;
1321 * Sync the whole mapped region (spanning multiple individual mappings)
1324 return vfio_get_dirty_bitmap(vrdl
->container
, iova
, size
, ram_addr
);
1327 static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer
*container
,
1328 MemoryRegionSection
*section
)
1330 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(section
->mr
);
1331 VFIORamDiscardListener
*vrdl
= NULL
;
1333 QLIST_FOREACH(vrdl
, &container
->vrdl_list
, next
) {
1334 if (vrdl
->mr
== section
->mr
&&
1335 vrdl
->offset_within_address_space
==
1336 section
->offset_within_address_space
) {
1342 hw_error("vfio: Trying to sync missing RAM discard listener");
1346 * We only want/can synchronize the bitmap for actually mapped parts -
1347 * which correspond to populated parts. Replay all populated parts.
1349 return ram_discard_manager_replay_populated(rdm
, section
,
1350 vfio_ram_discard_get_dirty_bitmap
,
1354 static int vfio_sync_dirty_bitmap(VFIOContainer
*container
,
1355 MemoryRegionSection
*section
)
1357 ram_addr_t ram_addr
;
1359 if (memory_region_is_iommu(section
->mr
)) {
1360 VFIOGuestIOMMU
*giommu
;
1362 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
1363 if (MEMORY_REGION(giommu
->iommu
) == section
->mr
&&
1364 giommu
->n
.start
== section
->offset_within_region
) {
1366 vfio_giommu_dirty_notifier gdn
= { .giommu
= giommu
};
1367 int idx
= memory_region_iommu_attrs_to_index(giommu
->iommu
,
1368 MEMTXATTRS_UNSPECIFIED
);
1370 llend
= int128_add(int128_make64(section
->offset_within_region
),
1372 llend
= int128_sub(llend
, int128_one());
1374 iommu_notifier_init(&gdn
.n
,
1375 vfio_iommu_map_dirty_notify
,
1377 section
->offset_within_region
,
1378 int128_get64(llend
),
1380 memory_region_iommu_replay(giommu
->iommu
, &gdn
.n
);
1385 } else if (memory_region_has_ram_discard_manager(section
->mr
)) {
1386 return vfio_sync_ram_discard_listener_dirty_bitmap(container
, section
);
1389 ram_addr
= memory_region_get_ram_addr(section
->mr
) +
1390 section
->offset_within_region
;
1392 return vfio_get_dirty_bitmap(container
,
1393 REAL_HOST_PAGE_ALIGN(section
->offset_within_address_space
),
1394 int128_get64(section
->size
), ram_addr
);
1397 static void vfio_listener_log_sync(MemoryListener
*listener
,
1398 MemoryRegionSection
*section
)
1400 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
1402 if (vfio_listener_skipped_section(section
) ||
1403 !container
->dirty_pages_supported
) {
1407 if (vfio_devices_all_dirty_tracking(container
)) {
1408 vfio_sync_dirty_bitmap(container
, section
);
1412 static const MemoryListener vfio_memory_listener
= {
1413 .region_add
= vfio_listener_region_add
,
1414 .region_del
= vfio_listener_region_del
,
1415 .log_global_start
= vfio_listener_log_global_start
,
1416 .log_global_stop
= vfio_listener_log_global_stop
,
1417 .log_sync
= vfio_listener_log_sync
,
1420 static void vfio_listener_release(VFIOContainer
*container
)
1422 memory_listener_unregister(&container
->listener
);
1423 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1424 memory_listener_unregister(&container
->prereg_listener
);
1428 static struct vfio_info_cap_header
*
1429 vfio_get_cap(void *ptr
, uint32_t cap_offset
, uint16_t id
)
1431 struct vfio_info_cap_header
*hdr
;
1433 for (hdr
= ptr
+ cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
1434 if (hdr
->id
== id
) {
1442 struct vfio_info_cap_header
*
1443 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
1445 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
1449 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
1452 static struct vfio_info_cap_header
*
1453 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
1455 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
1459 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
1462 struct vfio_info_cap_header
*
1463 vfio_get_device_info_cap(struct vfio_device_info
*info
, uint16_t id
)
1465 if (!(info
->flags
& VFIO_DEVICE_FLAGS_CAPS
)) {
1469 return vfio_get_cap((void *)info
, info
->cap_offset
, id
);
1472 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info
*info
,
1473 unsigned int *avail
)
1475 struct vfio_info_cap_header
*hdr
;
1476 struct vfio_iommu_type1_info_dma_avail
*cap
;
1478 /* If the capability cannot be found, assume no DMA limiting */
1479 hdr
= vfio_get_iommu_type1_info_cap(info
,
1480 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL
);
1485 if (avail
!= NULL
) {
1487 *avail
= cap
->avail
;
1493 static int vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
1494 struct vfio_region_info
*info
)
1496 struct vfio_info_cap_header
*hdr
;
1497 struct vfio_region_info_cap_sparse_mmap
*sparse
;
1500 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
1505 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
1507 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
1508 region
->nr
, sparse
->nr_areas
);
1510 region
->mmaps
= g_new0(VFIOMmap
, sparse
->nr_areas
);
1512 for (i
= 0, j
= 0; i
< sparse
->nr_areas
; i
++) {
1513 trace_vfio_region_sparse_mmap_entry(i
, sparse
->areas
[i
].offset
,
1514 sparse
->areas
[i
].offset
+
1515 sparse
->areas
[i
].size
);
1517 if (sparse
->areas
[i
].size
) {
1518 region
->mmaps
[j
].offset
= sparse
->areas
[i
].offset
;
1519 region
->mmaps
[j
].size
= sparse
->areas
[i
].size
;
1524 region
->nr_mmaps
= j
;
1525 region
->mmaps
= g_realloc(region
->mmaps
, j
* sizeof(VFIOMmap
));
1530 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
1531 int index
, const char *name
)
1533 struct vfio_region_info
*info
;
1536 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
1541 region
->vbasedev
= vbasedev
;
1542 region
->flags
= info
->flags
;
1543 region
->size
= info
->size
;
1544 region
->fd_offset
= info
->offset
;
1548 region
->mem
= g_new0(MemoryRegion
, 1);
1549 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
1550 region
, name
, region
->size
);
1552 if (!vbasedev
->no_mmap
&&
1553 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
1555 ret
= vfio_setup_region_sparse_mmaps(region
, info
);
1558 region
->nr_mmaps
= 1;
1559 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
1560 region
->mmaps
[0].offset
= 0;
1561 region
->mmaps
[0].size
= region
->size
;
1568 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
1569 region
->flags
, region
->fd_offset
, region
->size
);
1573 static void vfio_subregion_unmap(VFIORegion
*region
, int index
)
1575 trace_vfio_region_unmap(memory_region_name(®ion
->mmaps
[index
].mem
),
1576 region
->mmaps
[index
].offset
,
1577 region
->mmaps
[index
].offset
+
1578 region
->mmaps
[index
].size
- 1);
1579 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[index
].mem
);
1580 munmap(region
->mmaps
[index
].mmap
, region
->mmaps
[index
].size
);
1581 object_unparent(OBJECT(®ion
->mmaps
[index
].mem
));
1582 region
->mmaps
[index
].mmap
= NULL
;
1585 int vfio_region_mmap(VFIORegion
*region
)
1594 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
1595 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
1597 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1598 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
1599 MAP_SHARED
, region
->vbasedev
->fd
,
1601 region
->mmaps
[i
].offset
);
1602 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
1605 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
1607 region
->mmaps
[i
].offset
,
1609 region
->mmaps
[i
].offset
+
1610 region
->mmaps
[i
].size
- 1, ret
);
1612 region
->mmaps
[i
].mmap
= NULL
;
1614 for (i
--; i
>= 0; i
--) {
1615 vfio_subregion_unmap(region
, i
);
1621 name
= g_strdup_printf("%s mmaps[%d]",
1622 memory_region_name(region
->mem
), i
);
1623 memory_region_init_ram_device_ptr(®ion
->mmaps
[i
].mem
,
1624 memory_region_owner(region
->mem
),
1625 name
, region
->mmaps
[i
].size
,
1626 region
->mmaps
[i
].mmap
);
1628 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
1629 ®ion
->mmaps
[i
].mem
);
1631 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
1632 region
->mmaps
[i
].offset
,
1633 region
->mmaps
[i
].offset
+
1634 region
->mmaps
[i
].size
- 1);
1640 void vfio_region_unmap(VFIORegion
*region
)
1648 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1649 if (region
->mmaps
[i
].mmap
) {
1650 vfio_subregion_unmap(region
, i
);
1655 void vfio_region_exit(VFIORegion
*region
)
1663 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1664 if (region
->mmaps
[i
].mmap
) {
1665 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
1669 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
1672 void vfio_region_finalize(VFIORegion
*region
)
1680 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1681 if (region
->mmaps
[i
].mmap
) {
1682 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
1683 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
1687 object_unparent(OBJECT(region
->mem
));
1689 g_free(region
->mem
);
1690 g_free(region
->mmaps
);
1692 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
1695 region
->mmaps
= NULL
;
1696 region
->nr_mmaps
= 0;
1702 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
1710 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
1711 if (region
->mmaps
[i
].mmap
) {
1712 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
1716 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
1720 void vfio_reset_handler(void *opaque
)
1723 VFIODevice
*vbasedev
;
1725 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1726 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1727 if (vbasedev
->dev
->realized
) {
1728 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
1733 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1734 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
1735 if (vbasedev
->dev
->realized
&& vbasedev
->needs_reset
) {
1736 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
1742 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
1745 struct kvm_device_attr attr
= {
1746 .group
= KVM_DEV_VFIO_GROUP
,
1747 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
1748 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1751 if (!kvm_enabled()) {
1755 if (vfio_kvm_device_fd
< 0) {
1756 struct kvm_create_device cd
= {
1757 .type
= KVM_DEV_TYPE_VFIO
,
1760 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
1761 error_report("Failed to create KVM VFIO device: %m");
1765 vfio_kvm_device_fd
= cd
.fd
;
1768 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1769 error_report("Failed to add group %d to KVM VFIO device: %m",
1775 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
1778 struct kvm_device_attr attr
= {
1779 .group
= KVM_DEV_VFIO_GROUP
,
1780 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
1781 .addr
= (uint64_t)(unsigned long)&group
->fd
,
1784 if (vfio_kvm_device_fd
< 0) {
1788 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
1789 error_report("Failed to remove group %d from KVM VFIO device: %m",
1795 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
1797 VFIOAddressSpace
*space
;
1799 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
1800 if (space
->as
== as
) {
1805 /* No suitable VFIOAddressSpace, create a new one */
1806 space
= g_malloc0(sizeof(*space
));
1808 QLIST_INIT(&space
->containers
);
1810 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
1815 static void vfio_put_address_space(VFIOAddressSpace
*space
)
1817 if (QLIST_EMPTY(&space
->containers
)) {
1818 QLIST_REMOVE(space
, list
);
1824 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1826 static int vfio_get_iommu_type(VFIOContainer
*container
,
1829 int iommu_types
[] = { VFIO_TYPE1v2_IOMMU
, VFIO_TYPE1_IOMMU
,
1830 VFIO_SPAPR_TCE_v2_IOMMU
, VFIO_SPAPR_TCE_IOMMU
};
1833 for (i
= 0; i
< ARRAY_SIZE(iommu_types
); i
++) {
1834 if (ioctl(container
->fd
, VFIO_CHECK_EXTENSION
, iommu_types
[i
])) {
1835 return iommu_types
[i
];
1838 error_setg(errp
, "No available IOMMU models");
1842 static int vfio_init_container(VFIOContainer
*container
, int group_fd
,
1845 int iommu_type
, ret
;
1847 iommu_type
= vfio_get_iommu_type(container
, errp
);
1848 if (iommu_type
< 0) {
1852 ret
= ioctl(group_fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
);
1854 error_setg_errno(errp
, errno
, "Failed to set group container");
1858 while (ioctl(container
->fd
, VFIO_SET_IOMMU
, iommu_type
)) {
1859 if (iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
1861 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1862 * v2, the running platform may not support v2 and there is no
1863 * way to guess it until an IOMMU group gets added to the container.
1864 * So in case it fails with v2, try v1 as a fallback.
1866 iommu_type
= VFIO_SPAPR_TCE_IOMMU
;
1869 error_setg_errno(errp
, errno
, "Failed to set iommu for container");
1873 container
->iommu_type
= iommu_type
;
1877 static int vfio_get_iommu_info(VFIOContainer
*container
,
1878 struct vfio_iommu_type1_info
**info
)
1881 size_t argsz
= sizeof(struct vfio_iommu_type1_info
);
1883 *info
= g_new0(struct vfio_iommu_type1_info
, 1);
1885 (*info
)->argsz
= argsz
;
1887 if (ioctl(container
->fd
, VFIO_IOMMU_GET_INFO
, *info
)) {
1893 if (((*info
)->argsz
> argsz
)) {
1894 argsz
= (*info
)->argsz
;
1895 *info
= g_realloc(*info
, argsz
);
1902 static struct vfio_info_cap_header
*
1903 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info
*info
, uint16_t id
)
1905 struct vfio_info_cap_header
*hdr
;
1908 if (!(info
->flags
& VFIO_IOMMU_INFO_CAPS
)) {
1912 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
1913 if (hdr
->id
== id
) {
1921 static void vfio_get_iommu_info_migration(VFIOContainer
*container
,
1922 struct vfio_iommu_type1_info
*info
)
1924 struct vfio_info_cap_header
*hdr
;
1925 struct vfio_iommu_type1_info_cap_migration
*cap_mig
;
1927 hdr
= vfio_get_iommu_info_cap(info
, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
);
1932 cap_mig
= container_of(hdr
, struct vfio_iommu_type1_info_cap_migration
,
1936 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
1937 * qemu_real_host_page_size to mark those dirty.
1939 if (cap_mig
->pgsize_bitmap
& qemu_real_host_page_size
) {
1940 container
->dirty_pages_supported
= true;
1941 container
->max_dirty_bitmap_size
= cap_mig
->max_dirty_bitmap_size
;
1942 container
->dirty_pgsizes
= cap_mig
->pgsize_bitmap
;
1946 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
1949 VFIOContainer
*container
;
1951 VFIOAddressSpace
*space
;
1953 space
= vfio_get_address_space(as
);
1956 * VFIO is currently incompatible with discarding of RAM insofar as the
1957 * madvise to purge (zap) the page from QEMU's address space does not
1958 * interact with the memory API and therefore leaves stale virtual to
1959 * physical mappings in the IOMMU if the page was previously pinned. We
1960 * therefore set discarding broken for each group added to a container,
1961 * whether the container is used individually or shared. This provides
1962 * us with options to allow devices within a group to opt-in and allow
1963 * discarding, so long as it is done consistently for a group (for instance
1964 * if the device is an mdev device where it is known that the host vendor
1965 * driver will never pin pages outside of the working set of the guest
1966 * driver, which would thus not be discarding candidates).
1968 * The first opportunity to induce pinning occurs here where we attempt to
1969 * attach the group to existing containers within the AddressSpace. If any
1970 * pages are already zapped from the virtual address space, such as from
1971 * previous discards, new pinning will cause valid mappings to be
1972 * re-established. Likewise, when the overall MemoryListener for a new
1973 * container is registered, a replay of mappings within the AddressSpace
1974 * will occur, re-establishing any previously zapped pages as well.
1976 * Especially virtio-balloon is currently only prevented from discarding
1977 * new memory, it will not yet set ram_block_discard_set_required() and
1978 * therefore, neither stops us here or deals with the sudden memory
1979 * consumption of inflated memory.
1981 ret
= ram_block_discard_disable(true);
1983 error_setg_errno(errp
, -ret
, "Cannot set discarding of RAM broken");
1987 QLIST_FOREACH(container
, &space
->containers
, next
) {
1988 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
1989 group
->container
= container
;
1990 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1991 vfio_kvm_device_add_group(group
);
1996 fd
= qemu_open_old("/dev/vfio/vfio", O_RDWR
);
1998 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
2000 goto put_space_exit
;
2003 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
2004 if (ret
!= VFIO_API_VERSION
) {
2005 error_setg(errp
, "supported vfio version: %d, "
2006 "reported version: %d", VFIO_API_VERSION
, ret
);
2011 container
= g_malloc0(sizeof(*container
));
2012 container
->space
= space
;
2014 container
->error
= NULL
;
2015 container
->dirty_pages_supported
= false;
2016 container
->dma_max_mappings
= 0;
2017 QLIST_INIT(&container
->giommu_list
);
2018 QLIST_INIT(&container
->hostwin_list
);
2019 QLIST_INIT(&container
->vrdl_list
);
2021 ret
= vfio_init_container(container
, group
->fd
, errp
);
2023 goto free_container_exit
;
2026 switch (container
->iommu_type
) {
2027 case VFIO_TYPE1v2_IOMMU
:
2028 case VFIO_TYPE1_IOMMU
:
2030 struct vfio_iommu_type1_info
*info
;
2033 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
2034 * IOVA whatsoever. That's not actually true, but the current
2035 * kernel interface doesn't tell us what it can map, and the
2036 * existing Type1 IOMMUs generally support any IOVA we're
2037 * going to actually try in practice.
2039 ret
= vfio_get_iommu_info(container
, &info
);
2041 if (ret
|| !(info
->flags
& VFIO_IOMMU_INFO_PGSIZES
)) {
2042 /* Assume 4k IOVA page size */
2043 info
->iova_pgsizes
= 4096;
2045 vfio_host_win_add(container
, 0, (hwaddr
)-1, info
->iova_pgsizes
);
2046 container
->pgsizes
= info
->iova_pgsizes
;
2048 /* The default in the kernel ("dma_entry_limit") is 65535. */
2049 container
->dma_max_mappings
= 65535;
2051 vfio_get_info_dma_avail(info
, &container
->dma_max_mappings
);
2052 vfio_get_iommu_info_migration(container
, info
);
2057 case VFIO_SPAPR_TCE_v2_IOMMU
:
2058 case VFIO_SPAPR_TCE_IOMMU
:
2060 struct vfio_iommu_spapr_tce_info info
;
2061 bool v2
= container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
;
2064 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
2065 * when container fd is closed so we do not call it explicitly
2069 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
2071 error_setg_errno(errp
, errno
, "failed to enable container");
2073 goto free_container_exit
;
2076 container
->prereg_listener
= vfio_prereg_listener
;
2078 memory_listener_register(&container
->prereg_listener
,
2079 &address_space_memory
);
2080 if (container
->error
) {
2081 memory_listener_unregister(&container
->prereg_listener
);
2083 error_propagate_prepend(errp
, container
->error
,
2084 "RAM memory listener initialization failed: ");
2085 goto free_container_exit
;
2089 info
.argsz
= sizeof(info
);
2090 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
2092 error_setg_errno(errp
, errno
,
2093 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
2096 memory_listener_unregister(&container
->prereg_listener
);
2098 goto free_container_exit
;
2102 container
->pgsizes
= info
.ddw
.pgsizes
;
2104 * There is a default window in just created container.
2105 * To make region_add/del simpler, we better remove this
2106 * window now and let those iommu_listener callbacks
2107 * create/remove them when needed.
2109 ret
= vfio_spapr_remove_window(container
, info
.dma32_window_start
);
2111 error_setg_errno(errp
, -ret
,
2112 "failed to remove existing window");
2113 goto free_container_exit
;
2116 /* The default table uses 4K pages */
2117 container
->pgsizes
= 0x1000;
2118 vfio_host_win_add(container
, info
.dma32_window_start
,
2119 info
.dma32_window_start
+
2120 info
.dma32_window_size
- 1,
2126 vfio_kvm_device_add_group(group
);
2128 QLIST_INIT(&container
->group_list
);
2129 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
2131 group
->container
= container
;
2132 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
2134 container
->listener
= vfio_memory_listener
;
2136 memory_listener_register(&container
->listener
, container
->space
->as
);
2138 if (container
->error
) {
2140 error_propagate_prepend(errp
, container
->error
,
2141 "memory listener initialization failed: ");
2142 goto listener_release_exit
;
2145 container
->initialized
= true;
2148 listener_release_exit
:
2149 QLIST_REMOVE(group
, container_next
);
2150 QLIST_REMOVE(container
, next
);
2151 vfio_kvm_device_del_group(group
);
2152 vfio_listener_release(container
);
2154 free_container_exit
:
2161 ram_block_discard_disable(false);
2162 vfio_put_address_space(space
);
2167 static void vfio_disconnect_container(VFIOGroup
*group
)
2169 VFIOContainer
*container
= group
->container
;
2171 QLIST_REMOVE(group
, container_next
);
2172 group
->container
= NULL
;
2175 * Explicitly release the listener first before unset container,
2176 * since unset may destroy the backend container if it's the last
2179 if (QLIST_EMPTY(&container
->group_list
)) {
2180 vfio_listener_release(container
);
2183 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
2184 error_report("vfio: error disconnecting group %d from container",
2188 if (QLIST_EMPTY(&container
->group_list
)) {
2189 VFIOAddressSpace
*space
= container
->space
;
2190 VFIOGuestIOMMU
*giommu
, *tmp
;
2192 QLIST_REMOVE(container
, next
);
2194 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
2195 memory_region_unregister_iommu_notifier(
2196 MEMORY_REGION(giommu
->iommu
), &giommu
->n
);
2197 QLIST_REMOVE(giommu
, giommu_next
);
2201 trace_vfio_disconnect_container(container
->fd
);
2202 close(container
->fd
);
2205 vfio_put_address_space(space
);
2209 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
2213 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
2215 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2216 if (group
->groupid
== groupid
) {
2217 /* Found it. Now is it already in the right context? */
2218 if (group
->container
->space
->as
== as
) {
2221 error_setg(errp
, "group %d used in multiple address spaces",
2228 group
= g_malloc0(sizeof(*group
));
2230 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
2231 group
->fd
= qemu_open_old(path
, O_RDWR
);
2232 if (group
->fd
< 0) {
2233 error_setg_errno(errp
, errno
, "failed to open %s", path
);
2234 goto free_group_exit
;
2237 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
2238 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
2242 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
2243 error_setg(errp
, "group %d is not viable", groupid
);
2244 error_append_hint(errp
,
2245 "Please ensure all devices within the iommu_group "
2246 "are bound to their vfio bus driver.\n");
2250 group
->groupid
= groupid
;
2251 QLIST_INIT(&group
->device_list
);
2253 if (vfio_connect_container(group
, as
, errp
)) {
2254 error_prepend(errp
, "failed to setup container for group %d: ",
2259 if (QLIST_EMPTY(&vfio_group_list
)) {
2260 qemu_register_reset(vfio_reset_handler
, NULL
);
2263 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
2276 void vfio_put_group(VFIOGroup
*group
)
2278 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
2282 if (!group
->ram_block_discard_allowed
) {
2283 ram_block_discard_disable(false);
2285 vfio_kvm_device_del_group(group
);
2286 vfio_disconnect_container(group
);
2287 QLIST_REMOVE(group
, next
);
2288 trace_vfio_put_group(group
->fd
);
2292 if (QLIST_EMPTY(&vfio_group_list
)) {
2293 qemu_unregister_reset(vfio_reset_handler
, NULL
);
2297 int vfio_get_device(VFIOGroup
*group
, const char *name
,
2298 VFIODevice
*vbasedev
, Error
**errp
)
2300 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
2303 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
2305 error_setg_errno(errp
, errno
, "error getting device from group %d",
2307 error_append_hint(errp
,
2308 "Verify all devices in group %d are bound to vfio-<bus> "
2309 "or pci-stub and not already in use\n", group
->groupid
);
2313 ret
= ioctl(fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
2315 error_setg_errno(errp
, errno
, "error getting device info");
2321 * Set discarding of RAM as not broken for this group if the driver knows
2322 * the device operates compatibly with discarding. Setting must be
2323 * consistent per group, but since compatibility is really only possible
2324 * with mdev currently, we expect singleton groups.
2326 if (vbasedev
->ram_block_discard_allowed
!=
2327 group
->ram_block_discard_allowed
) {
2328 if (!QLIST_EMPTY(&group
->device_list
)) {
2329 error_setg(errp
, "Inconsistent setting of support for discarding "
2330 "RAM (e.g., balloon) within group");
2335 if (!group
->ram_block_discard_allowed
) {
2336 group
->ram_block_discard_allowed
= true;
2337 ram_block_discard_disable(false);
2342 vbasedev
->group
= group
;
2343 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
2345 vbasedev
->num_irqs
= dev_info
.num_irqs
;
2346 vbasedev
->num_regions
= dev_info
.num_regions
;
2347 vbasedev
->flags
= dev_info
.flags
;
2349 trace_vfio_get_device(name
, dev_info
.flags
, dev_info
.num_regions
,
2352 vbasedev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
2356 void vfio_put_base_device(VFIODevice
*vbasedev
)
2358 if (!vbasedev
->group
) {
2361 QLIST_REMOVE(vbasedev
, next
);
2362 vbasedev
->group
= NULL
;
2363 trace_vfio_put_base_device(vbasedev
->fd
);
2364 close(vbasedev
->fd
);
2367 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
2368 struct vfio_region_info
**info
)
2370 size_t argsz
= sizeof(struct vfio_region_info
);
2372 *info
= g_malloc0(argsz
);
2374 (*info
)->index
= index
;
2376 (*info
)->argsz
= argsz
;
2378 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
2384 if ((*info
)->argsz
> argsz
) {
2385 argsz
= (*info
)->argsz
;
2386 *info
= g_realloc(*info
, argsz
);
2394 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
2395 uint32_t subtype
, struct vfio_region_info
**info
)
2399 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
2400 struct vfio_info_cap_header
*hdr
;
2401 struct vfio_region_info_cap_type
*cap_type
;
2403 if (vfio_get_region_info(vbasedev
, i
, info
)) {
2407 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
2413 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
2415 trace_vfio_get_dev_region(vbasedev
->name
, i
,
2416 cap_type
->type
, cap_type
->subtype
);
2418 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
2429 bool vfio_has_region_cap(VFIODevice
*vbasedev
, int region
, uint16_t cap_type
)
2431 struct vfio_region_info
*info
= NULL
;
2434 if (!vfio_get_region_info(vbasedev
, region
, &info
)) {
2435 if (vfio_get_region_info_cap(info
, cap_type
)) {
2445 * Interfaces for IBM EEH (Enhanced Error Handling)
2447 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
2450 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
2451 * implementation is broken if there are multiple groups in a
2452 * container. The hardware works in units of Partitionable
2453 * Endpoints (== IOMMU groups) and the EEH operations naively
2454 * iterate across all groups in the container, without any logic
2455 * to make sure the groups have their state synchronized. For
2456 * certain operations (ENABLE) that might be ok, until an error
2457 * occurs, but for others (GET_STATE) it's clearly broken.
2461 * XXX Once fixed kernels exist, test for them here
2464 if (QLIST_EMPTY(&container
->group_list
)) {
2468 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
2475 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
2477 struct vfio_eeh_pe_op pe_op
= {
2478 .argsz
= sizeof(pe_op
),
2483 if (!vfio_eeh_container_ok(container
)) {
2484 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
2485 "kernel requires a container with exactly one group", op
);
2489 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
2491 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
2498 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
2500 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
2501 VFIOContainer
*container
= NULL
;
2503 if (QLIST_EMPTY(&space
->containers
)) {
2504 /* No containers to act on */
2508 container
= QLIST_FIRST(&space
->containers
);
2510 if (QLIST_NEXT(container
, next
)) {
2511 /* We don't yet have logic to synchronize EEH state across
2512 * multiple containers */
2518 vfio_put_address_space(space
);
2522 bool vfio_eeh_as_ok(AddressSpace
*as
)
2524 VFIOContainer
*container
= vfio_eeh_as_container(as
);
2526 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
2529 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
2531 VFIOContainer
*container
= vfio_eeh_as_container(as
);
2536 return vfio_eeh_container_op(container
, op
);