2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
24 #include <linux/kvm.h>
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/kvm.h"
37 #include "qapi/error.h"
39 struct vfio_group_head vfio_group_list
=
40 QLIST_HEAD_INITIALIZER(vfio_group_list
);
41 struct vfio_as_head vfio_address_spaces
=
42 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
46 * We have a single VFIO pseudo device per KVM VM. Once created it lives
47 * for the life of the VM. Closing the file descriptor only drops our
48 * reference to it and the device's reference to kvm. Therefore once
49 * initialized, this file descriptor is only released on QEMU exit and
50 * we'll re-use it should another vfio device be attached before then.
52 static int vfio_kvm_device_fd
= -1;
56 * Common VFIO interrupt disable
58 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
60 struct vfio_irq_set irq_set
= {
61 .argsz
= sizeof(irq_set
),
62 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
68 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
71 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
73 struct vfio_irq_set irq_set
= {
74 .argsz
= sizeof(irq_set
),
75 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
81 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
84 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
86 struct vfio_irq_set irq_set
= {
87 .argsz
= sizeof(irq_set
),
88 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
94 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
98 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
100 void vfio_region_write(void *opaque
, hwaddr addr
,
101 uint64_t data
, unsigned size
)
103 VFIORegion
*region
= opaque
;
104 VFIODevice
*vbasedev
= region
->vbasedev
;
117 buf
.word
= cpu_to_le16(data
);
120 buf
.dword
= cpu_to_le32(data
);
123 hw_error("vfio: unsupported write size, %d bytes", size
);
127 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
128 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
130 __func__
, vbasedev
->name
, region
->nr
,
134 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
137 * A read or write to a BAR always signals an INTx EOI. This will
138 * do nothing if not pending (including not in INTx mode). We assume
139 * that a BAR access is in response to an interrupt and that BAR
140 * accesses will service the interrupt. Unfortunately, we don't know
141 * which access will service the interrupt, so we're potentially
142 * getting quite a few host interrupts per guest interrupt.
144 vbasedev
->ops
->vfio_eoi(vbasedev
);
147 uint64_t vfio_region_read(void *opaque
,
148 hwaddr addr
, unsigned size
)
150 VFIORegion
*region
= opaque
;
151 VFIODevice
*vbasedev
= region
->vbasedev
;
160 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
161 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
162 __func__
, vbasedev
->name
, region
->nr
,
171 data
= le16_to_cpu(buf
.word
);
174 data
= le32_to_cpu(buf
.dword
);
177 hw_error("vfio: unsupported read size, %d bytes", size
);
181 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
183 /* Same as write above */
184 vbasedev
->ops
->vfio_eoi(vbasedev
);
189 const MemoryRegionOps vfio_region_ops
= {
190 .read
= vfio_region_read
,
191 .write
= vfio_region_write
,
192 .endianness
= DEVICE_LITTLE_ENDIAN
,
196 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
198 static int vfio_dma_unmap(VFIOContainer
*container
,
199 hwaddr iova
, ram_addr_t size
)
201 struct vfio_iommu_type1_dma_unmap unmap
= {
202 .argsz
= sizeof(unmap
),
208 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
209 error_report("VFIO_UNMAP_DMA: %d", -errno
);
216 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
217 ram_addr_t size
, void *vaddr
, bool readonly
)
219 struct vfio_iommu_type1_dma_map map
= {
220 .argsz
= sizeof(map
),
221 .flags
= VFIO_DMA_MAP_FLAG_READ
,
222 .vaddr
= (__u64
)(uintptr_t)vaddr
,
228 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
232 * Try the mapping, if it fails with EBUSY, unmap the region and try
233 * again. This shouldn't be necessary, but we sometimes see it in
236 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
237 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
238 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
242 error_report("VFIO_MAP_DMA: %d", -errno
);
246 static void vfio_host_win_add(VFIOContainer
*container
,
247 hwaddr min_iova
, hwaddr max_iova
,
248 uint64_t iova_pgsizes
)
250 VFIOHostDMAWindow
*hostwin
;
252 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
253 if (ranges_overlap(hostwin
->min_iova
,
254 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
256 max_iova
- min_iova
+ 1)) {
257 hw_error("%s: Overlapped IOMMU are not enabled", __func__
);
261 hostwin
= g_malloc0(sizeof(*hostwin
));
263 hostwin
->min_iova
= min_iova
;
264 hostwin
->max_iova
= max_iova
;
265 hostwin
->iova_pgsizes
= iova_pgsizes
;
266 QLIST_INSERT_HEAD(&container
->hostwin_list
, hostwin
, hostwin_next
);
269 static int vfio_host_win_del(VFIOContainer
*container
, hwaddr min_iova
,
272 VFIOHostDMAWindow
*hostwin
;
274 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
275 if (hostwin
->min_iova
== min_iova
&& hostwin
->max_iova
== max_iova
) {
276 QLIST_REMOVE(hostwin
, hostwin_next
);
284 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
286 return (!memory_region_is_ram(section
->mr
) &&
287 !memory_region_is_iommu(section
->mr
)) ||
289 * Sizing an enabled 64-bit BAR can cause spurious mappings to
290 * addresses in the upper part of the 64-bit address space. These
291 * are never accessed by the CPU and beyond the address width of
292 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
294 section
->offset_within_address_space
& (1ULL << 63);
297 static void vfio_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
299 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
300 VFIOContainer
*container
= giommu
->container
;
301 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
304 hwaddr len
= iotlb
->addr_mask
+ 1;
308 trace_vfio_iommu_map_notify(iova
, iova
+ iotlb
->addr_mask
);
310 if (iotlb
->target_as
!= &address_space_memory
) {
311 error_report("Wrong target AS \"%s\", only system memory is allowed",
312 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
317 * The IOMMU TLB entry we have just covers translation through
318 * this IOMMU to its immediate target. We need to translate
319 * it the rest of the way through to memory.
322 mr
= address_space_translate(&address_space_memory
,
323 iotlb
->translated_addr
,
324 &xlat
, &len
, iotlb
->perm
& IOMMU_WO
);
325 if (!memory_region_is_ram(mr
)) {
326 error_report("iommu map to non memory area %"HWADDR_PRIx
"",
331 * Translation truncates length to the IOMMU page size,
332 * check that it did not truncate too much.
334 if (len
& iotlb
->addr_mask
) {
335 error_report("iommu has granularity incompatible with target AS");
339 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
340 vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
341 ret
= vfio_dma_map(container
, iova
,
342 iotlb
->addr_mask
+ 1, vaddr
,
343 !(iotlb
->perm
& IOMMU_WO
) || mr
->readonly
);
345 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
346 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
348 iotlb
->addr_mask
+ 1, vaddr
, ret
);
351 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1);
353 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
354 "0x%"HWADDR_PRIx
") = %d (%m)",
356 iotlb
->addr_mask
+ 1, ret
);
363 static void vfio_listener_region_add(MemoryListener
*listener
,
364 MemoryRegionSection
*section
)
366 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
368 Int128 llend
, llsize
;
371 VFIOHostDMAWindow
*hostwin
;
374 if (vfio_listener_skipped_section(section
)) {
375 trace_vfio_listener_region_add_skip(
376 section
->offset_within_address_space
,
377 section
->offset_within_address_space
+
378 int128_get64(int128_sub(section
->size
, int128_one())));
382 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
383 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
384 error_report("%s received unaligned region", __func__
);
388 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
389 llend
= int128_make64(section
->offset_within_address_space
);
390 llend
= int128_add(llend
, section
->size
);
391 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
393 if (int128_ge(int128_make64(iova
), llend
)) {
396 end
= int128_get64(int128_sub(llend
, int128_one()));
398 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
399 VFIOHostDMAWindow
*hostwin
;
402 /* For now intersections are not allowed, we may relax this later */
403 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
404 if (ranges_overlap(hostwin
->min_iova
,
405 hostwin
->max_iova
- hostwin
->min_iova
+ 1,
406 section
->offset_within_address_space
,
407 int128_get64(section
->size
))) {
413 ret
= vfio_spapr_create_window(container
, section
, &pgsize
);
418 vfio_host_win_add(container
, section
->offset_within_address_space
,
419 section
->offset_within_address_space
+
420 int128_get64(section
->size
) - 1, pgsize
);
423 hostwin_found
= false;
424 QLIST_FOREACH(hostwin
, &container
->hostwin_list
, hostwin_next
) {
425 if (hostwin
->min_iova
<= iova
&& end
<= hostwin
->max_iova
) {
426 hostwin_found
= true;
431 if (!hostwin_found
) {
432 error_report("vfio: IOMMU container %p can't map guest IOVA region"
433 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
,
434 container
, iova
, end
);
439 memory_region_ref(section
->mr
);
441 if (memory_region_is_iommu(section
->mr
)) {
442 VFIOGuestIOMMU
*giommu
;
444 trace_vfio_listener_region_add_iommu(iova
, end
);
446 * FIXME: For VFIO iommu types which have KVM acceleration to
447 * avoid bouncing all map/unmaps through qemu this way, this
448 * would be the right place to wire that up (tell the KVM
449 * device emulation the VFIO iommu handles to use).
451 giommu
= g_malloc0(sizeof(*giommu
));
452 giommu
->iommu
= section
->mr
;
453 giommu
->iommu_offset
= section
->offset_within_address_space
-
454 section
->offset_within_region
;
455 giommu
->container
= container
;
456 giommu
->n
.notify
= vfio_iommu_map_notify
;
457 giommu
->n
.notifier_flags
= IOMMU_NOTIFIER_ALL
;
458 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
460 memory_region_register_iommu_notifier(giommu
->iommu
, &giommu
->n
);
461 memory_region_iommu_replay(giommu
->iommu
, &giommu
->n
, false);
466 /* Here we assume that memory_region_is_ram(section->mr)==true */
468 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
469 section
->offset_within_region
+
470 (iova
- section
->offset_within_address_space
);
472 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
474 llsize
= int128_sub(llend
, int128_make64(iova
));
476 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
477 vaddr
, section
->readonly
);
479 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
480 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
481 container
, iova
, int128_get64(llsize
), vaddr
, ret
);
489 * On the initfn path, store the first error in the container so we
490 * can gracefully fail. Runtime, there's not much we can do other
491 * than throw a hardware error.
493 if (!container
->initialized
) {
494 if (!container
->error
) {
495 container
->error
= ret
;
498 hw_error("vfio: DMA mapping failed, unable to continue");
502 static void vfio_listener_region_del(MemoryListener
*listener
,
503 MemoryRegionSection
*section
)
505 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
507 Int128 llend
, llsize
;
510 if (vfio_listener_skipped_section(section
)) {
511 trace_vfio_listener_region_del_skip(
512 section
->offset_within_address_space
,
513 section
->offset_within_address_space
+
514 int128_get64(int128_sub(section
->size
, int128_one())));
518 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
519 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
520 error_report("%s received unaligned region", __func__
);
524 if (memory_region_is_iommu(section
->mr
)) {
525 VFIOGuestIOMMU
*giommu
;
527 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
528 if (giommu
->iommu
== section
->mr
) {
529 memory_region_unregister_iommu_notifier(giommu
->iommu
,
531 QLIST_REMOVE(giommu
, giommu_next
);
538 * FIXME: We assume the one big unmap below is adequate to
539 * remove any individual page mappings in the IOMMU which
540 * might have been copied into VFIO. This works for a page table
541 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
542 * That may not be true for all IOMMU types.
546 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
547 llend
= int128_make64(section
->offset_within_address_space
);
548 llend
= int128_add(llend
, section
->size
);
549 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
551 if (int128_ge(int128_make64(iova
), llend
)) {
554 end
= int128_get64(int128_sub(llend
, int128_one()));
556 llsize
= int128_sub(llend
, int128_make64(iova
));
558 trace_vfio_listener_region_del(iova
, end
);
560 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
));
561 memory_region_unref(section
->mr
);
563 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
564 "0x%"HWADDR_PRIx
") = %d (%m)",
565 container
, iova
, int128_get64(llsize
), ret
);
568 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
569 vfio_spapr_remove_window(container
,
570 section
->offset_within_address_space
);
571 if (vfio_host_win_del(container
,
572 section
->offset_within_address_space
,
573 section
->offset_within_address_space
+
574 int128_get64(section
->size
) - 1) < 0) {
575 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx
,
576 __func__
, section
->offset_within_address_space
);
581 static const MemoryListener vfio_memory_listener
= {
582 .region_add
= vfio_listener_region_add
,
583 .region_del
= vfio_listener_region_del
,
586 static void vfio_listener_release(VFIOContainer
*container
)
588 memory_listener_unregister(&container
->listener
);
589 if (container
->iommu_type
== VFIO_SPAPR_TCE_v2_IOMMU
) {
590 memory_listener_unregister(&container
->prereg_listener
);
594 static struct vfio_info_cap_header
*
595 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
597 struct vfio_info_cap_header
*hdr
;
600 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
604 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
613 static int vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
614 struct vfio_region_info
*info
)
616 struct vfio_info_cap_header
*hdr
;
617 struct vfio_region_info_cap_sparse_mmap
*sparse
;
620 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
625 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
627 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
628 region
->nr
, sparse
->nr_areas
);
630 region
->mmaps
= g_new0(VFIOMmap
, sparse
->nr_areas
);
632 for (i
= 0, j
= 0; i
< sparse
->nr_areas
; i
++) {
633 trace_vfio_region_sparse_mmap_entry(i
, sparse
->areas
[i
].offset
,
634 sparse
->areas
[i
].offset
+
635 sparse
->areas
[i
].size
);
637 if (sparse
->areas
[i
].size
) {
638 region
->mmaps
[j
].offset
= sparse
->areas
[i
].offset
;
639 region
->mmaps
[j
].size
= sparse
->areas
[i
].size
;
644 region
->nr_mmaps
= j
;
645 region
->mmaps
= g_realloc(region
->mmaps
, j
* sizeof(VFIOMmap
));
650 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
651 int index
, const char *name
)
653 struct vfio_region_info
*info
;
656 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
661 region
->vbasedev
= vbasedev
;
662 region
->flags
= info
->flags
;
663 region
->size
= info
->size
;
664 region
->fd_offset
= info
->offset
;
668 region
->mem
= g_new0(MemoryRegion
, 1);
669 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
670 region
, name
, region
->size
);
672 if (!vbasedev
->no_mmap
&&
673 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
675 ret
= vfio_setup_region_sparse_mmaps(region
, info
);
678 region
->nr_mmaps
= 1;
679 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
680 region
->mmaps
[0].offset
= 0;
681 region
->mmaps
[0].size
= region
->size
;
688 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
689 region
->flags
, region
->fd_offset
, region
->size
);
693 int vfio_region_mmap(VFIORegion
*region
)
702 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
703 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
705 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
706 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
707 MAP_SHARED
, region
->vbasedev
->fd
,
709 region
->mmaps
[i
].offset
);
710 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
713 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
715 region
->mmaps
[i
].offset
,
717 region
->mmaps
[i
].offset
+
718 region
->mmaps
[i
].size
- 1, ret
);
720 region
->mmaps
[i
].mmap
= NULL
;
722 for (i
--; i
>= 0; i
--) {
723 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
724 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
725 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
726 region
->mmaps
[i
].mmap
= NULL
;
732 name
= g_strdup_printf("%s mmaps[%d]",
733 memory_region_name(region
->mem
), i
);
734 memory_region_init_ram_device_ptr(®ion
->mmaps
[i
].mem
,
735 memory_region_owner(region
->mem
),
736 name
, region
->mmaps
[i
].size
,
737 region
->mmaps
[i
].mmap
);
739 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
740 ®ion
->mmaps
[i
].mem
);
742 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
743 region
->mmaps
[i
].offset
,
744 region
->mmaps
[i
].offset
+
745 region
->mmaps
[i
].size
- 1);
751 void vfio_region_exit(VFIORegion
*region
)
759 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
760 if (region
->mmaps
[i
].mmap
) {
761 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
765 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
768 void vfio_region_finalize(VFIORegion
*region
)
776 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
777 if (region
->mmaps
[i
].mmap
) {
778 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
779 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
783 object_unparent(OBJECT(region
->mem
));
786 g_free(region
->mmaps
);
788 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
791 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
799 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
800 if (region
->mmaps
[i
].mmap
) {
801 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
805 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
809 void vfio_reset_handler(void *opaque
)
812 VFIODevice
*vbasedev
;
814 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
815 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
816 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
820 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
821 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
822 if (vbasedev
->needs_reset
) {
823 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
829 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
832 struct kvm_device_attr attr
= {
833 .group
= KVM_DEV_VFIO_GROUP
,
834 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
835 .addr
= (uint64_t)(unsigned long)&group
->fd
,
838 if (!kvm_enabled()) {
842 if (vfio_kvm_device_fd
< 0) {
843 struct kvm_create_device cd
= {
844 .type
= KVM_DEV_TYPE_VFIO
,
847 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
848 error_report("Failed to create KVM VFIO device: %m");
852 vfio_kvm_device_fd
= cd
.fd
;
855 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
856 error_report("Failed to add group %d to KVM VFIO device: %m",
862 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
865 struct kvm_device_attr attr
= {
866 .group
= KVM_DEV_VFIO_GROUP
,
867 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
868 .addr
= (uint64_t)(unsigned long)&group
->fd
,
871 if (vfio_kvm_device_fd
< 0) {
875 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
876 error_report("Failed to remove group %d from KVM VFIO device: %m",
882 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
884 VFIOAddressSpace
*space
;
886 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
887 if (space
->as
== as
) {
892 /* No suitable VFIOAddressSpace, create a new one */
893 space
= g_malloc0(sizeof(*space
));
895 QLIST_INIT(&space
->containers
);
897 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
902 static void vfio_put_address_space(VFIOAddressSpace
*space
)
904 if (QLIST_EMPTY(&space
->containers
)) {
905 QLIST_REMOVE(space
, list
);
910 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
,
913 VFIOContainer
*container
;
915 VFIOAddressSpace
*space
;
917 space
= vfio_get_address_space(as
);
919 QLIST_FOREACH(container
, &space
->containers
, next
) {
920 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
921 group
->container
= container
;
922 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
927 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
929 error_setg_errno(errp
, errno
, "failed to open /dev/vfio/vfio");
934 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
935 if (ret
!= VFIO_API_VERSION
) {
936 error_setg(errp
, "supported vfio version: %d, "
937 "reported version: %d", VFIO_API_VERSION
, ret
);
942 container
= g_malloc0(sizeof(*container
));
943 container
->space
= space
;
945 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
) ||
946 ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1v2_IOMMU
)) {
947 bool v2
= !!ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1v2_IOMMU
);
948 struct vfio_iommu_type1_info info
;
950 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
952 error_setg_errno(errp
, errno
, "failed to set group container");
954 goto free_container_exit
;
957 container
->iommu_type
= v2
? VFIO_TYPE1v2_IOMMU
: VFIO_TYPE1_IOMMU
;
958 ret
= ioctl(fd
, VFIO_SET_IOMMU
, container
->iommu_type
);
960 error_setg_errno(errp
, errno
, "failed to set iommu for container");
962 goto free_container_exit
;
966 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
967 * IOVA whatsoever. That's not actually true, but the current
968 * kernel interface doesn't tell us what it can map, and the
969 * existing Type1 IOMMUs generally support any IOVA we're
970 * going to actually try in practice.
972 info
.argsz
= sizeof(info
);
973 ret
= ioctl(fd
, VFIO_IOMMU_GET_INFO
, &info
);
975 if (ret
|| !(info
.flags
& VFIO_IOMMU_INFO_PGSIZES
)) {
976 /* Assume 4k IOVA page size */
977 info
.iova_pgsizes
= 4096;
979 vfio_host_win_add(container
, 0, (hwaddr
)-1, info
.iova_pgsizes
);
980 } else if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_IOMMU
) ||
981 ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_v2_IOMMU
)) {
982 struct vfio_iommu_spapr_tce_info info
;
983 bool v2
= !!ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_v2_IOMMU
);
985 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
987 error_setg_errno(errp
, errno
, "failed to set group container");
989 goto free_container_exit
;
991 container
->iommu_type
=
992 v2
? VFIO_SPAPR_TCE_v2_IOMMU
: VFIO_SPAPR_TCE_IOMMU
;
993 ret
= ioctl(fd
, VFIO_SET_IOMMU
, container
->iommu_type
);
995 error_setg_errno(errp
, errno
, "failed to set iommu for container");
997 goto free_container_exit
;
1001 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1002 * when container fd is closed so we do not call it explicitly
1006 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
1008 error_setg_errno(errp
, errno
, "failed to enable container");
1010 goto free_container_exit
;
1013 container
->prereg_listener
= vfio_prereg_listener
;
1015 memory_listener_register(&container
->prereg_listener
,
1016 &address_space_memory
);
1017 if (container
->error
) {
1018 memory_listener_unregister(&container
->prereg_listener
);
1019 ret
= container
->error
;
1021 "RAM memory listener initialization failed for container");
1022 goto free_container_exit
;
1026 info
.argsz
= sizeof(info
);
1027 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
1029 error_setg_errno(errp
, errno
,
1030 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1033 memory_listener_unregister(&container
->prereg_listener
);
1035 goto free_container_exit
;
1040 * There is a default window in just created container.
1041 * To make region_add/del simpler, we better remove this
1042 * window now and let those iommu_listener callbacks
1043 * create/remove them when needed.
1045 ret
= vfio_spapr_remove_window(container
, info
.dma32_window_start
);
1047 error_setg_errno(errp
, -ret
,
1048 "failed to remove existing window");
1049 goto free_container_exit
;
1052 /* The default table uses 4K pages */
1053 vfio_host_win_add(container
, info
.dma32_window_start
,
1054 info
.dma32_window_start
+
1055 info
.dma32_window_size
- 1,
1059 error_setg(errp
, "No available IOMMU models");
1061 goto free_container_exit
;
1064 container
->listener
= vfio_memory_listener
;
1066 memory_listener_register(&container
->listener
, container
->space
->as
);
1068 if (container
->error
) {
1069 ret
= container
->error
;
1070 error_setg_errno(errp
, -ret
,
1071 "memory listener initialization failed for container");
1072 goto listener_release_exit
;
1075 container
->initialized
= true;
1077 QLIST_INIT(&container
->group_list
);
1078 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
1080 group
->container
= container
;
1081 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1084 listener_release_exit
:
1085 vfio_listener_release(container
);
1087 free_container_exit
:
1094 vfio_put_address_space(space
);
1099 static void vfio_disconnect_container(VFIOGroup
*group
)
1101 VFIOContainer
*container
= group
->container
;
1103 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
1104 error_report("vfio: error disconnecting group %d from container",
1108 QLIST_REMOVE(group
, container_next
);
1109 group
->container
= NULL
;
1111 if (QLIST_EMPTY(&container
->group_list
)) {
1112 VFIOAddressSpace
*space
= container
->space
;
1113 VFIOGuestIOMMU
*giommu
, *tmp
;
1115 vfio_listener_release(container
);
1116 QLIST_REMOVE(container
, next
);
1118 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
1119 memory_region_unregister_iommu_notifier(giommu
->iommu
, &giommu
->n
);
1120 QLIST_REMOVE(giommu
, giommu_next
);
1124 trace_vfio_disconnect_container(container
->fd
);
1125 close(container
->fd
);
1128 vfio_put_address_space(space
);
1132 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
, Error
**errp
)
1136 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
1138 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1139 if (group
->groupid
== groupid
) {
1140 /* Found it. Now is it already in the right context? */
1141 if (group
->container
->space
->as
== as
) {
1144 error_setg(errp
, "group %d used in multiple address spaces",
1151 group
= g_malloc0(sizeof(*group
));
1153 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
1154 group
->fd
= qemu_open(path
, O_RDWR
);
1155 if (group
->fd
< 0) {
1156 error_setg_errno(errp
, errno
, "failed to open %s", path
);
1157 goto free_group_exit
;
1160 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
1161 error_setg_errno(errp
, errno
, "failed to get group %d status", groupid
);
1165 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
1166 error_setg(errp
, "group %d is not viable", groupid
);
1167 error_append_hint(errp
,
1168 "Please ensure all devices within the iommu_group "
1169 "are bound to their vfio bus driver.\n");
1173 group
->groupid
= groupid
;
1174 QLIST_INIT(&group
->device_list
);
1176 if (vfio_connect_container(group
, as
, errp
)) {
1177 error_prepend(errp
, "failed to setup container for group %d: ",
1182 if (QLIST_EMPTY(&vfio_group_list
)) {
1183 qemu_register_reset(vfio_reset_handler
, NULL
);
1186 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
1188 vfio_kvm_device_add_group(group
);
1201 void vfio_put_group(VFIOGroup
*group
)
1203 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
1207 vfio_kvm_device_del_group(group
);
1208 vfio_disconnect_container(group
);
1209 QLIST_REMOVE(group
, next
);
1210 trace_vfio_put_group(group
->fd
);
1214 if (QLIST_EMPTY(&vfio_group_list
)) {
1215 qemu_unregister_reset(vfio_reset_handler
, NULL
);
1219 int vfio_get_device(VFIOGroup
*group
, const char *name
,
1220 VFIODevice
*vbasedev
, Error
**errp
)
1222 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
1225 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
1227 error_setg_errno(errp
, errno
, "error getting device from group %d",
1229 error_append_hint(errp
,
1230 "Verify all devices in group %d are bound to vfio-<bus> "
1231 "or pci-stub and not already in use\n", group
->groupid
);
1235 ret
= ioctl(fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
1237 error_setg_errno(errp
, errno
, "error getting device info");
1243 vbasedev
->group
= group
;
1244 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
1246 vbasedev
->num_irqs
= dev_info
.num_irqs
;
1247 vbasedev
->num_regions
= dev_info
.num_regions
;
1248 vbasedev
->flags
= dev_info
.flags
;
1250 trace_vfio_get_device(name
, dev_info
.flags
, dev_info
.num_regions
,
1253 vbasedev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
1257 void vfio_put_base_device(VFIODevice
*vbasedev
)
1259 if (!vbasedev
->group
) {
1262 QLIST_REMOVE(vbasedev
, next
);
1263 vbasedev
->group
= NULL
;
1264 trace_vfio_put_base_device(vbasedev
->fd
);
1265 close(vbasedev
->fd
);
1268 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
1269 struct vfio_region_info
**info
)
1271 size_t argsz
= sizeof(struct vfio_region_info
);
1273 *info
= g_malloc0(argsz
);
1275 (*info
)->index
= index
;
1277 (*info
)->argsz
= argsz
;
1279 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
1285 if ((*info
)->argsz
> argsz
) {
1286 argsz
= (*info
)->argsz
;
1287 *info
= g_realloc(*info
, argsz
);
1295 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
1296 uint32_t subtype
, struct vfio_region_info
**info
)
1300 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
1301 struct vfio_info_cap_header
*hdr
;
1302 struct vfio_region_info_cap_type
*cap_type
;
1304 if (vfio_get_region_info(vbasedev
, i
, info
)) {
1308 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
1314 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
1316 trace_vfio_get_dev_region(vbasedev
->name
, i
,
1317 cap_type
->type
, cap_type
->subtype
);
1319 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
1331 * Interfaces for IBM EEH (Enhanced Error Handling)
1333 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
1336 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1337 * implementation is broken if there are multiple groups in a
1338 * container. The hardware works in units of Partitionable
1339 * Endpoints (== IOMMU groups) and the EEH operations naively
1340 * iterate across all groups in the container, without any logic
1341 * to make sure the groups have their state synchronized. For
1342 * certain operations (ENABLE) that might be ok, until an error
1343 * occurs, but for others (GET_STATE) it's clearly broken.
1347 * XXX Once fixed kernels exist, test for them here
1350 if (QLIST_EMPTY(&container
->group_list
)) {
1354 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
1361 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
1363 struct vfio_eeh_pe_op pe_op
= {
1364 .argsz
= sizeof(pe_op
),
1369 if (!vfio_eeh_container_ok(container
)) {
1370 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1371 "kernel requires a container with exactly one group", op
);
1375 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
1377 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
1384 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
1386 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
1387 VFIOContainer
*container
= NULL
;
1389 if (QLIST_EMPTY(&space
->containers
)) {
1390 /* No containers to act on */
1394 container
= QLIST_FIRST(&space
->containers
);
1396 if (QLIST_NEXT(container
, next
)) {
1397 /* We don't yet have logic to synchronize EEH state across
1398 * multiple containers */
1404 vfio_put_address_space(space
);
1408 bool vfio_eeh_as_ok(AddressSpace
*as
)
1410 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1412 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
1415 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
1417 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1422 return vfio_eeh_container_op(container
, op
);