2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "hw/vfio/vfio.h"
27 #include "exec/address-spaces.h"
28 #include "exec/memory.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/kvm.h"
33 #include "linux/kvm.h"
37 struct vfio_group_head vfio_group_list
=
38 QLIST_HEAD_INITIALIZER(vfio_group_list
);
39 struct vfio_as_head vfio_address_spaces
=
40 QLIST_HEAD_INITIALIZER(vfio_address_spaces
);
44 * We have a single VFIO pseudo device per KVM VM. Once created it lives
45 * for the life of the VM. Closing the file descriptor only drops our
46 * reference to it and the device's reference to kvm. Therefore once
47 * initialized, this file descriptor is only released on QEMU exit and
48 * we'll re-use it should another vfio device be attached before then.
50 static int vfio_kvm_device_fd
= -1;
54 * Common VFIO interrupt disable
56 void vfio_disable_irqindex(VFIODevice
*vbasedev
, int index
)
58 struct vfio_irq_set irq_set
= {
59 .argsz
= sizeof(irq_set
),
60 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
66 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
69 void vfio_unmask_single_irqindex(VFIODevice
*vbasedev
, int index
)
71 struct vfio_irq_set irq_set
= {
72 .argsz
= sizeof(irq_set
),
73 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
79 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
82 void vfio_mask_single_irqindex(VFIODevice
*vbasedev
, int index
)
84 struct vfio_irq_set irq_set
= {
85 .argsz
= sizeof(irq_set
),
86 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
92 ioctl(vbasedev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
96 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
98 void vfio_region_write(void *opaque
, hwaddr addr
,
99 uint64_t data
, unsigned size
)
101 VFIORegion
*region
= opaque
;
102 VFIODevice
*vbasedev
= region
->vbasedev
;
115 buf
.word
= cpu_to_le16(data
);
118 buf
.dword
= cpu_to_le32(data
);
121 hw_error("vfio: unsupported write size, %d bytes", size
);
125 if (pwrite(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
126 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
128 __func__
, vbasedev
->name
, region
->nr
,
132 trace_vfio_region_write(vbasedev
->name
, region
->nr
, addr
, data
, size
);
135 * A read or write to a BAR always signals an INTx EOI. This will
136 * do nothing if not pending (including not in INTx mode). We assume
137 * that a BAR access is in response to an interrupt and that BAR
138 * accesses will service the interrupt. Unfortunately, we don't know
139 * which access will service the interrupt, so we're potentially
140 * getting quite a few host interrupts per guest interrupt.
142 vbasedev
->ops
->vfio_eoi(vbasedev
);
145 uint64_t vfio_region_read(void *opaque
,
146 hwaddr addr
, unsigned size
)
148 VFIORegion
*region
= opaque
;
149 VFIODevice
*vbasedev
= region
->vbasedev
;
158 if (pread(vbasedev
->fd
, &buf
, size
, region
->fd_offset
+ addr
) != size
) {
159 error_report("%s(%s:region%d+0x%"HWADDR_PRIx
", %d) failed: %m",
160 __func__
, vbasedev
->name
, region
->nr
,
169 data
= le16_to_cpu(buf
.word
);
172 data
= le32_to_cpu(buf
.dword
);
175 hw_error("vfio: unsupported read size, %d bytes", size
);
179 trace_vfio_region_read(vbasedev
->name
, region
->nr
, addr
, size
, data
);
181 /* Same as write above */
182 vbasedev
->ops
->vfio_eoi(vbasedev
);
187 const MemoryRegionOps vfio_region_ops
= {
188 .read
= vfio_region_read
,
189 .write
= vfio_region_write
,
190 .endianness
= DEVICE_LITTLE_ENDIAN
,
194 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
196 static int vfio_dma_unmap(VFIOContainer
*container
,
197 hwaddr iova
, ram_addr_t size
)
199 struct vfio_iommu_type1_dma_unmap unmap
= {
200 .argsz
= sizeof(unmap
),
206 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
207 error_report("VFIO_UNMAP_DMA: %d", -errno
);
214 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
215 ram_addr_t size
, void *vaddr
, bool readonly
)
217 struct vfio_iommu_type1_dma_map map
= {
218 .argsz
= sizeof(map
),
219 .flags
= VFIO_DMA_MAP_FLAG_READ
,
220 .vaddr
= (__u64
)(uintptr_t)vaddr
,
226 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
230 * Try the mapping, if it fails with EBUSY, unmap the region and try
231 * again. This shouldn't be necessary, but we sometimes see it in
234 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
235 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
236 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
240 error_report("VFIO_MAP_DMA: %d", -errno
);
244 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
246 return (!memory_region_is_ram(section
->mr
) &&
247 !memory_region_is_iommu(section
->mr
)) ||
249 * Sizing an enabled 64-bit BAR can cause spurious mappings to
250 * addresses in the upper part of the 64-bit address space. These
251 * are never accessed by the CPU and beyond the address width of
252 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
254 section
->offset_within_address_space
& (1ULL << 63);
257 static void vfio_iommu_map_notify(Notifier
*n
, void *data
)
259 VFIOGuestIOMMU
*giommu
= container_of(n
, VFIOGuestIOMMU
, n
);
260 VFIOContainer
*container
= giommu
->container
;
261 IOMMUTLBEntry
*iotlb
= data
;
262 hwaddr iova
= iotlb
->iova
+ giommu
->iommu_offset
;
265 hwaddr len
= iotlb
->addr_mask
+ 1;
269 trace_vfio_iommu_map_notify(iova
, iova
+ iotlb
->addr_mask
);
271 if (iotlb
->target_as
!= &address_space_memory
) {
272 error_report("Wrong target AS \"%s\", only system memory is allowed",
273 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
278 * The IOMMU TLB entry we have just covers translation through
279 * this IOMMU to its immediate target. We need to translate
280 * it the rest of the way through to memory.
283 mr
= address_space_translate(&address_space_memory
,
284 iotlb
->translated_addr
,
285 &xlat
, &len
, iotlb
->perm
& IOMMU_WO
);
286 if (!memory_region_is_ram(mr
)) {
287 error_report("iommu map to non memory area %"HWADDR_PRIx
"",
292 * Translation truncates length to the IOMMU page size,
293 * check that it did not truncate too much.
295 if (len
& iotlb
->addr_mask
) {
296 error_report("iommu has granularity incompatible with target AS");
300 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
301 vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
302 ret
= vfio_dma_map(container
, iova
,
303 iotlb
->addr_mask
+ 1, vaddr
,
304 !(iotlb
->perm
& IOMMU_WO
) || mr
->readonly
);
306 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
307 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
309 iotlb
->addr_mask
+ 1, vaddr
, ret
);
312 ret
= vfio_dma_unmap(container
, iova
, iotlb
->addr_mask
+ 1);
314 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
315 "0x%"HWADDR_PRIx
") = %d (%m)",
317 iotlb
->addr_mask
+ 1, ret
);
324 static hwaddr
vfio_container_granularity(VFIOContainer
*container
)
326 return (hwaddr
)1 << ctz64(container
->iova_pgsizes
);
329 static void vfio_listener_region_add(MemoryListener
*listener
,
330 MemoryRegionSection
*section
)
332 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
334 Int128 llend
, llsize
;
338 if (vfio_listener_skipped_section(section
)) {
339 trace_vfio_listener_region_add_skip(
340 section
->offset_within_address_space
,
341 section
->offset_within_address_space
+
342 int128_get64(int128_sub(section
->size
, int128_one())));
346 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
347 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
348 error_report("%s received unaligned region", __func__
);
352 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
353 llend
= int128_make64(section
->offset_within_address_space
);
354 llend
= int128_add(llend
, section
->size
);
355 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
357 if (int128_ge(int128_make64(iova
), llend
)) {
360 end
= int128_get64(int128_sub(llend
, int128_one()));
362 if ((iova
< container
->min_iova
) || (end
> container
->max_iova
)) {
363 error_report("vfio: IOMMU container %p can't map guest IOVA region"
364 " 0x%"HWADDR_PRIx
"..0x%"HWADDR_PRIx
,
365 container
, iova
, end
);
370 memory_region_ref(section
->mr
);
372 if (memory_region_is_iommu(section
->mr
)) {
373 VFIOGuestIOMMU
*giommu
;
375 trace_vfio_listener_region_add_iommu(iova
, end
);
377 * FIXME: We should do some checking to see if the
378 * capabilities of the host VFIO IOMMU are adequate to model
381 * FIXME: For VFIO iommu types which have KVM acceleration to
382 * avoid bouncing all map/unmaps through qemu this way, this
383 * would be the right place to wire that up (tell the KVM
384 * device emulation the VFIO iommu handles to use).
386 giommu
= g_malloc0(sizeof(*giommu
));
387 giommu
->iommu
= section
->mr
;
388 giommu
->iommu_offset
= section
->offset_within_address_space
-
389 section
->offset_within_region
;
390 giommu
->container
= container
;
391 giommu
->n
.notify
= vfio_iommu_map_notify
;
392 QLIST_INSERT_HEAD(&container
->giommu_list
, giommu
, giommu_next
);
394 memory_region_register_iommu_notifier(giommu
->iommu
, &giommu
->n
);
395 memory_region_iommu_replay(giommu
->iommu
, &giommu
->n
,
396 vfio_container_granularity(container
),
402 /* Here we assume that memory_region_is_ram(section->mr)==true */
404 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
405 section
->offset_within_region
+
406 (iova
- section
->offset_within_address_space
);
408 trace_vfio_listener_region_add_ram(iova
, end
, vaddr
);
410 llsize
= int128_sub(llend
, int128_make64(iova
));
412 ret
= vfio_dma_map(container
, iova
, int128_get64(llsize
),
413 vaddr
, section
->readonly
);
415 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
416 "0x%"HWADDR_PRIx
", %p) = %d (%m)",
417 container
, iova
, int128_get64(llsize
), vaddr
, ret
);
425 * On the initfn path, store the first error in the container so we
426 * can gracefully fail. Runtime, there's not much we can do other
427 * than throw a hardware error.
429 if (!container
->initialized
) {
430 if (!container
->error
) {
431 container
->error
= ret
;
434 hw_error("vfio: DMA mapping failed, unable to continue");
438 static void vfio_listener_region_del(MemoryListener
*listener
,
439 MemoryRegionSection
*section
)
441 VFIOContainer
*container
= container_of(listener
, VFIOContainer
, listener
);
443 Int128 llend
, llsize
;
446 if (vfio_listener_skipped_section(section
)) {
447 trace_vfio_listener_region_del_skip(
448 section
->offset_within_address_space
,
449 section
->offset_within_address_space
+
450 int128_get64(int128_sub(section
->size
, int128_one())));
454 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
455 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
456 error_report("%s received unaligned region", __func__
);
460 if (memory_region_is_iommu(section
->mr
)) {
461 VFIOGuestIOMMU
*giommu
;
463 QLIST_FOREACH(giommu
, &container
->giommu_list
, giommu_next
) {
464 if (giommu
->iommu
== section
->mr
) {
465 memory_region_unregister_iommu_notifier(&giommu
->n
);
466 QLIST_REMOVE(giommu
, giommu_next
);
473 * FIXME: We assume the one big unmap below is adequate to
474 * remove any individual page mappings in the IOMMU which
475 * might have been copied into VFIO. This works for a page table
476 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
477 * That may not be true for all IOMMU types.
481 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
482 llend
= int128_make64(section
->offset_within_address_space
);
483 llend
= int128_add(llend
, section
->size
);
484 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
486 if (int128_ge(int128_make64(iova
), llend
)) {
489 end
= int128_get64(int128_sub(llend
, int128_one()));
491 llsize
= int128_sub(llend
, int128_make64(iova
));
493 trace_vfio_listener_region_del(iova
, end
);
495 ret
= vfio_dma_unmap(container
, iova
, int128_get64(llsize
));
496 memory_region_unref(section
->mr
);
498 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
499 "0x%"HWADDR_PRIx
") = %d (%m)",
500 container
, iova
, int128_get64(llsize
), ret
);
504 static const MemoryListener vfio_memory_listener
= {
505 .region_add
= vfio_listener_region_add
,
506 .region_del
= vfio_listener_region_del
,
509 static void vfio_listener_release(VFIOContainer
*container
)
511 memory_listener_unregister(&container
->listener
);
514 static struct vfio_info_cap_header
*
515 vfio_get_region_info_cap(struct vfio_region_info
*info
, uint16_t id
)
517 struct vfio_info_cap_header
*hdr
;
520 if (!(info
->flags
& VFIO_REGION_INFO_FLAG_CAPS
)) {
524 for (hdr
= ptr
+ info
->cap_offset
; hdr
!= ptr
; hdr
= ptr
+ hdr
->next
) {
533 static void vfio_setup_region_sparse_mmaps(VFIORegion
*region
,
534 struct vfio_region_info
*info
)
536 struct vfio_info_cap_header
*hdr
;
537 struct vfio_region_info_cap_sparse_mmap
*sparse
;
540 hdr
= vfio_get_region_info_cap(info
, VFIO_REGION_INFO_CAP_SPARSE_MMAP
);
545 sparse
= container_of(hdr
, struct vfio_region_info_cap_sparse_mmap
, header
);
547 trace_vfio_region_sparse_mmap_header(region
->vbasedev
->name
,
548 region
->nr
, sparse
->nr_areas
);
550 region
->nr_mmaps
= sparse
->nr_areas
;
551 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
553 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
554 region
->mmaps
[i
].offset
= sparse
->areas
[i
].offset
;
555 region
->mmaps
[i
].size
= sparse
->areas
[i
].size
;
556 trace_vfio_region_sparse_mmap_entry(i
, region
->mmaps
[i
].offset
,
557 region
->mmaps
[i
].offset
+
558 region
->mmaps
[i
].size
);
562 int vfio_region_setup(Object
*obj
, VFIODevice
*vbasedev
, VFIORegion
*region
,
563 int index
, const char *name
)
565 struct vfio_region_info
*info
;
568 ret
= vfio_get_region_info(vbasedev
, index
, &info
);
573 region
->vbasedev
= vbasedev
;
574 region
->flags
= info
->flags
;
575 region
->size
= info
->size
;
576 region
->fd_offset
= info
->offset
;
580 region
->mem
= g_new0(MemoryRegion
, 1);
581 memory_region_init_io(region
->mem
, obj
, &vfio_region_ops
,
582 region
, name
, region
->size
);
584 if (!vbasedev
->no_mmap
&&
585 region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
&&
586 !(region
->size
& ~qemu_real_host_page_mask
)) {
588 vfio_setup_region_sparse_mmaps(region
, info
);
590 if (!region
->nr_mmaps
) {
591 region
->nr_mmaps
= 1;
592 region
->mmaps
= g_new0(VFIOMmap
, region
->nr_mmaps
);
593 region
->mmaps
[0].offset
= 0;
594 region
->mmaps
[0].size
= region
->size
;
601 trace_vfio_region_setup(vbasedev
->name
, index
, name
,
602 region
->flags
, region
->fd_offset
, region
->size
);
606 int vfio_region_mmap(VFIORegion
*region
)
615 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_READ
? PROT_READ
: 0;
616 prot
|= region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
? PROT_WRITE
: 0;
618 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
619 region
->mmaps
[i
].mmap
= mmap(NULL
, region
->mmaps
[i
].size
, prot
,
620 MAP_SHARED
, region
->vbasedev
->fd
,
622 region
->mmaps
[i
].offset
);
623 if (region
->mmaps
[i
].mmap
== MAP_FAILED
) {
626 trace_vfio_region_mmap_fault(memory_region_name(region
->mem
), i
,
628 region
->mmaps
[i
].offset
,
630 region
->mmaps
[i
].offset
+
631 region
->mmaps
[i
].size
- 1, ret
);
633 region
->mmaps
[i
].mmap
= NULL
;
635 for (i
--; i
>= 0; i
--) {
636 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
637 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
638 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
639 region
->mmaps
[i
].mmap
= NULL
;
645 name
= g_strdup_printf("%s mmaps[%d]",
646 memory_region_name(region
->mem
), i
);
647 memory_region_init_ram_ptr(®ion
->mmaps
[i
].mem
,
648 memory_region_owner(region
->mem
),
649 name
, region
->mmaps
[i
].size
,
650 region
->mmaps
[i
].mmap
);
652 memory_region_set_skip_dump(®ion
->mmaps
[i
].mem
);
653 memory_region_add_subregion(region
->mem
, region
->mmaps
[i
].offset
,
654 ®ion
->mmaps
[i
].mem
);
656 trace_vfio_region_mmap(memory_region_name(®ion
->mmaps
[i
].mem
),
657 region
->mmaps
[i
].offset
,
658 region
->mmaps
[i
].offset
+
659 region
->mmaps
[i
].size
- 1);
665 void vfio_region_exit(VFIORegion
*region
)
673 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
674 if (region
->mmaps
[i
].mmap
) {
675 memory_region_del_subregion(region
->mem
, ®ion
->mmaps
[i
].mem
);
679 trace_vfio_region_exit(region
->vbasedev
->name
, region
->nr
);
682 void vfio_region_finalize(VFIORegion
*region
)
690 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
691 if (region
->mmaps
[i
].mmap
) {
692 munmap(region
->mmaps
[i
].mmap
, region
->mmaps
[i
].size
);
693 object_unparent(OBJECT(®ion
->mmaps
[i
].mem
));
697 object_unparent(OBJECT(region
->mem
));
700 g_free(region
->mmaps
);
702 trace_vfio_region_finalize(region
->vbasedev
->name
, region
->nr
);
705 void vfio_region_mmaps_set_enabled(VFIORegion
*region
, bool enabled
)
713 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
714 if (region
->mmaps
[i
].mmap
) {
715 memory_region_set_enabled(®ion
->mmaps
[i
].mem
, enabled
);
719 trace_vfio_region_mmaps_set_enabled(memory_region_name(region
->mem
),
723 void vfio_reset_handler(void *opaque
)
726 VFIODevice
*vbasedev
;
728 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
729 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
730 vbasedev
->ops
->vfio_compute_needs_reset(vbasedev
);
734 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
735 QLIST_FOREACH(vbasedev
, &group
->device_list
, next
) {
736 if (vbasedev
->needs_reset
) {
737 vbasedev
->ops
->vfio_hot_reset_multi(vbasedev
);
743 static void vfio_kvm_device_add_group(VFIOGroup
*group
)
746 struct kvm_device_attr attr
= {
747 .group
= KVM_DEV_VFIO_GROUP
,
748 .attr
= KVM_DEV_VFIO_GROUP_ADD
,
749 .addr
= (uint64_t)(unsigned long)&group
->fd
,
752 if (!kvm_enabled()) {
756 if (vfio_kvm_device_fd
< 0) {
757 struct kvm_create_device cd
= {
758 .type
= KVM_DEV_TYPE_VFIO
,
761 if (kvm_vm_ioctl(kvm_state
, KVM_CREATE_DEVICE
, &cd
)) {
762 error_report("Failed to create KVM VFIO device: %m");
766 vfio_kvm_device_fd
= cd
.fd
;
769 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
770 error_report("Failed to add group %d to KVM VFIO device: %m",
776 static void vfio_kvm_device_del_group(VFIOGroup
*group
)
779 struct kvm_device_attr attr
= {
780 .group
= KVM_DEV_VFIO_GROUP
,
781 .attr
= KVM_DEV_VFIO_GROUP_DEL
,
782 .addr
= (uint64_t)(unsigned long)&group
->fd
,
785 if (vfio_kvm_device_fd
< 0) {
789 if (ioctl(vfio_kvm_device_fd
, KVM_SET_DEVICE_ATTR
, &attr
)) {
790 error_report("Failed to remove group %d from KVM VFIO device: %m",
796 static VFIOAddressSpace
*vfio_get_address_space(AddressSpace
*as
)
798 VFIOAddressSpace
*space
;
800 QLIST_FOREACH(space
, &vfio_address_spaces
, list
) {
801 if (space
->as
== as
) {
806 /* No suitable VFIOAddressSpace, create a new one */
807 space
= g_malloc0(sizeof(*space
));
809 QLIST_INIT(&space
->containers
);
811 QLIST_INSERT_HEAD(&vfio_address_spaces
, space
, list
);
816 static void vfio_put_address_space(VFIOAddressSpace
*space
)
818 if (QLIST_EMPTY(&space
->containers
)) {
819 QLIST_REMOVE(space
, list
);
824 static int vfio_connect_container(VFIOGroup
*group
, AddressSpace
*as
)
826 VFIOContainer
*container
;
828 VFIOAddressSpace
*space
;
830 space
= vfio_get_address_space(as
);
832 QLIST_FOREACH(container
, &space
->containers
, next
) {
833 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
834 group
->container
= container
;
835 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
840 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
842 error_report("vfio: failed to open /dev/vfio/vfio: %m");
847 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
848 if (ret
!= VFIO_API_VERSION
) {
849 error_report("vfio: supported vfio version: %d, "
850 "reported version: %d", VFIO_API_VERSION
, ret
);
855 container
= g_malloc0(sizeof(*container
));
856 container
->space
= space
;
858 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
) ||
859 ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1v2_IOMMU
)) {
860 bool v2
= !!ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1v2_IOMMU
);
861 struct vfio_iommu_type1_info info
;
863 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
865 error_report("vfio: failed to set group container: %m");
867 goto free_container_exit
;
870 ret
= ioctl(fd
, VFIO_SET_IOMMU
,
871 v2
? VFIO_TYPE1v2_IOMMU
: VFIO_TYPE1_IOMMU
);
873 error_report("vfio: failed to set iommu for container: %m");
875 goto free_container_exit
;
879 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
880 * IOVA whatsoever. That's not actually true, but the current
881 * kernel interface doesn't tell us what it can map, and the
882 * existing Type1 IOMMUs generally support any IOVA we're
883 * going to actually try in practice.
885 container
->min_iova
= 0;
886 container
->max_iova
= (hwaddr
)-1;
888 /* Assume just 4K IOVA page size */
889 container
->iova_pgsizes
= 0x1000;
890 info
.argsz
= sizeof(info
);
891 ret
= ioctl(fd
, VFIO_IOMMU_GET_INFO
, &info
);
893 if ((ret
== 0) && (info
.flags
& VFIO_IOMMU_INFO_PGSIZES
)) {
894 container
->iova_pgsizes
= info
.iova_pgsizes
;
896 } else if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_SPAPR_TCE_IOMMU
)) {
897 struct vfio_iommu_spapr_tce_info info
;
899 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
901 error_report("vfio: failed to set group container: %m");
903 goto free_container_exit
;
905 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_SPAPR_TCE_IOMMU
);
907 error_report("vfio: failed to set iommu for container: %m");
909 goto free_container_exit
;
913 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
914 * when container fd is closed so we do not call it explicitly
917 ret
= ioctl(fd
, VFIO_IOMMU_ENABLE
);
919 error_report("vfio: failed to enable container: %m");
921 goto free_container_exit
;
925 * This only considers the host IOMMU's 32-bit window. At
926 * some point we need to add support for the optional 64-bit
927 * window and dynamic windows
929 info
.argsz
= sizeof(info
);
930 ret
= ioctl(fd
, VFIO_IOMMU_SPAPR_TCE_GET_INFO
, &info
);
932 error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
934 goto free_container_exit
;
936 container
->min_iova
= info
.dma32_window_start
;
937 container
->max_iova
= container
->min_iova
+ info
.dma32_window_size
- 1;
939 /* Assume just 4K IOVA pages for now */
940 container
->iova_pgsizes
= 0x1000;
942 error_report("vfio: No available IOMMU models");
944 goto free_container_exit
;
947 container
->listener
= vfio_memory_listener
;
949 memory_listener_register(&container
->listener
, container
->space
->as
);
951 if (container
->error
) {
952 ret
= container
->error
;
953 error_report("vfio: memory listener initialization failed for container");
954 goto listener_release_exit
;
957 container
->initialized
= true;
959 QLIST_INIT(&container
->group_list
);
960 QLIST_INSERT_HEAD(&space
->containers
, container
, next
);
962 group
->container
= container
;
963 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
966 listener_release_exit
:
967 vfio_listener_release(container
);
976 vfio_put_address_space(space
);
981 static void vfio_disconnect_container(VFIOGroup
*group
)
983 VFIOContainer
*container
= group
->container
;
985 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
986 error_report("vfio: error disconnecting group %d from container",
990 QLIST_REMOVE(group
, container_next
);
991 group
->container
= NULL
;
993 if (QLIST_EMPTY(&container
->group_list
)) {
994 VFIOAddressSpace
*space
= container
->space
;
995 VFIOGuestIOMMU
*giommu
, *tmp
;
997 vfio_listener_release(container
);
998 QLIST_REMOVE(container
, next
);
1000 QLIST_FOREACH_SAFE(giommu
, &container
->giommu_list
, giommu_next
, tmp
) {
1001 memory_region_unregister_iommu_notifier(&giommu
->n
);
1002 QLIST_REMOVE(giommu
, giommu_next
);
1006 trace_vfio_disconnect_container(container
->fd
);
1007 close(container
->fd
);
1010 vfio_put_address_space(space
);
1014 VFIOGroup
*vfio_get_group(int groupid
, AddressSpace
*as
)
1018 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
1020 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
1021 if (group
->groupid
== groupid
) {
1022 /* Found it. Now is it already in the right context? */
1023 if (group
->container
->space
->as
== as
) {
1026 error_report("vfio: group %d used in multiple address spaces",
1033 group
= g_malloc0(sizeof(*group
));
1035 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
1036 group
->fd
= qemu_open(path
, O_RDWR
);
1037 if (group
->fd
< 0) {
1038 error_report("vfio: error opening %s: %m", path
);
1039 goto free_group_exit
;
1042 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
1043 error_report("vfio: error getting group status: %m");
1047 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
1048 error_report("vfio: error, group %d is not viable, please ensure "
1049 "all devices within the iommu_group are bound to their "
1050 "vfio bus driver.", groupid
);
1054 group
->groupid
= groupid
;
1055 QLIST_INIT(&group
->device_list
);
1057 if (vfio_connect_container(group
, as
)) {
1058 error_report("vfio: failed to setup container for group %d", groupid
);
1062 if (QLIST_EMPTY(&vfio_group_list
)) {
1063 qemu_register_reset(vfio_reset_handler
, NULL
);
1066 QLIST_INSERT_HEAD(&vfio_group_list
, group
, next
);
1068 vfio_kvm_device_add_group(group
);
1081 void vfio_put_group(VFIOGroup
*group
)
1083 if (!group
|| !QLIST_EMPTY(&group
->device_list
)) {
1087 vfio_kvm_device_del_group(group
);
1088 vfio_disconnect_container(group
);
1089 QLIST_REMOVE(group
, next
);
1090 trace_vfio_put_group(group
->fd
);
1094 if (QLIST_EMPTY(&vfio_group_list
)) {
1095 qemu_unregister_reset(vfio_reset_handler
, NULL
);
1099 int vfio_get_device(VFIOGroup
*group
, const char *name
,
1100 VFIODevice
*vbasedev
)
1102 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
1105 fd
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
1107 error_report("vfio: error getting device %s from group %d: %m",
1108 name
, group
->groupid
);
1109 error_printf("Verify all devices in group %d are bound to vfio-<bus> "
1110 "or pci-stub and not already in use\n", group
->groupid
);
1114 ret
= ioctl(fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
1116 error_report("vfio: error getting device info: %m");
1122 vbasedev
->group
= group
;
1123 QLIST_INSERT_HEAD(&group
->device_list
, vbasedev
, next
);
1125 vbasedev
->num_irqs
= dev_info
.num_irqs
;
1126 vbasedev
->num_regions
= dev_info
.num_regions
;
1127 vbasedev
->flags
= dev_info
.flags
;
1129 trace_vfio_get_device(name
, dev_info
.flags
, dev_info
.num_regions
,
1132 vbasedev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
1136 void vfio_put_base_device(VFIODevice
*vbasedev
)
1138 if (!vbasedev
->group
) {
1141 QLIST_REMOVE(vbasedev
, next
);
1142 vbasedev
->group
= NULL
;
1143 trace_vfio_put_base_device(vbasedev
->fd
);
1144 close(vbasedev
->fd
);
1147 int vfio_get_region_info(VFIODevice
*vbasedev
, int index
,
1148 struct vfio_region_info
**info
)
1150 size_t argsz
= sizeof(struct vfio_region_info
);
1152 *info
= g_malloc0(argsz
);
1154 (*info
)->index
= index
;
1156 (*info
)->argsz
= argsz
;
1158 if (ioctl(vbasedev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, *info
)) {
1164 if ((*info
)->argsz
> argsz
) {
1165 argsz
= (*info
)->argsz
;
1166 *info
= g_realloc(*info
, argsz
);
1174 int vfio_get_dev_region_info(VFIODevice
*vbasedev
, uint32_t type
,
1175 uint32_t subtype
, struct vfio_region_info
**info
)
1179 for (i
= 0; i
< vbasedev
->num_regions
; i
++) {
1180 struct vfio_info_cap_header
*hdr
;
1181 struct vfio_region_info_cap_type
*cap_type
;
1183 if (vfio_get_region_info(vbasedev
, i
, info
)) {
1187 hdr
= vfio_get_region_info_cap(*info
, VFIO_REGION_INFO_CAP_TYPE
);
1193 cap_type
= container_of(hdr
, struct vfio_region_info_cap_type
, header
);
1195 trace_vfio_get_dev_region(vbasedev
->name
, i
,
1196 cap_type
->type
, cap_type
->subtype
);
1198 if (cap_type
->type
== type
&& cap_type
->subtype
== subtype
) {
1210 * Interfaces for IBM EEH (Enhanced Error Handling)
1212 static bool vfio_eeh_container_ok(VFIOContainer
*container
)
1215 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1216 * implementation is broken if there are multiple groups in a
1217 * container. The hardware works in units of Partitionable
1218 * Endpoints (== IOMMU groups) and the EEH operations naively
1219 * iterate across all groups in the container, without any logic
1220 * to make sure the groups have their state synchronized. For
1221 * certain operations (ENABLE) that might be ok, until an error
1222 * occurs, but for others (GET_STATE) it's clearly broken.
1226 * XXX Once fixed kernels exist, test for them here
1229 if (QLIST_EMPTY(&container
->group_list
)) {
1233 if (QLIST_NEXT(QLIST_FIRST(&container
->group_list
), container_next
)) {
1240 static int vfio_eeh_container_op(VFIOContainer
*container
, uint32_t op
)
1242 struct vfio_eeh_pe_op pe_op
= {
1243 .argsz
= sizeof(pe_op
),
1248 if (!vfio_eeh_container_ok(container
)) {
1249 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1250 "kernel requires a container with exactly one group", op
);
1254 ret
= ioctl(container
->fd
, VFIO_EEH_PE_OP
, &pe_op
);
1256 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op
);
1263 static VFIOContainer
*vfio_eeh_as_container(AddressSpace
*as
)
1265 VFIOAddressSpace
*space
= vfio_get_address_space(as
);
1266 VFIOContainer
*container
= NULL
;
1268 if (QLIST_EMPTY(&space
->containers
)) {
1269 /* No containers to act on */
1273 container
= QLIST_FIRST(&space
->containers
);
1275 if (QLIST_NEXT(container
, next
)) {
1276 /* We don't yet have logic to synchronize EEH state across
1277 * multiple containers */
1283 vfio_put_address_space(space
);
1287 bool vfio_eeh_as_ok(AddressSpace
*as
)
1289 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1291 return (container
!= NULL
) && vfio_eeh_container_ok(container
);
1294 int vfio_eeh_as_op(AddressSpace
*as
, uint32_t op
)
1296 VFIOContainer
*container
= vfio_eeh_as_container(as
);
1301 return vfio_eeh_container_op(container
, op
);