qemu-iotests: 149: Use "/usr/bin/env python"
[qemu/ar7.git] / hw / vfio / common.c
blobf27db36fb3aee03736621e77cdbb10a8ac4d1a6f
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <linux/vfio.h>
26 #include "hw/vfio/vfio-common.h"
27 #include "hw/vfio/vfio.h"
28 #include "exec/address-spaces.h"
29 #include "exec/memory.h"
30 #include "hw/hw.h"
31 #include "qemu/error-report.h"
32 #include "sysemu/kvm.h"
33 #include "trace.h"
35 struct vfio_group_head vfio_group_list =
36 QLIST_HEAD_INITIALIZER(vfio_group_list);
37 struct vfio_as_head vfio_address_spaces =
38 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
40 #ifdef CONFIG_KVM
42 * We have a single VFIO pseudo device per KVM VM. Once created it lives
43 * for the life of the VM. Closing the file descriptor only drops our
44 * reference to it and the device's reference to kvm. Therefore once
45 * initialized, this file descriptor is only released on QEMU exit and
46 * we'll re-use it should another vfio device be attached before then.
48 static int vfio_kvm_device_fd = -1;
49 #endif
52 * Common VFIO interrupt disable
54 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
56 struct vfio_irq_set irq_set = {
57 .argsz = sizeof(irq_set),
58 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
59 .index = index,
60 .start = 0,
61 .count = 0,
64 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
67 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
69 struct vfio_irq_set irq_set = {
70 .argsz = sizeof(irq_set),
71 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
72 .index = index,
73 .start = 0,
74 .count = 1,
77 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
80 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
82 struct vfio_irq_set irq_set = {
83 .argsz = sizeof(irq_set),
84 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
85 .index = index,
86 .start = 0,
87 .count = 1,
90 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
94 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
96 void vfio_region_write(void *opaque, hwaddr addr,
97 uint64_t data, unsigned size)
99 VFIORegion *region = opaque;
100 VFIODevice *vbasedev = region->vbasedev;
101 union {
102 uint8_t byte;
103 uint16_t word;
104 uint32_t dword;
105 uint64_t qword;
106 } buf;
108 switch (size) {
109 case 1:
110 buf.byte = data;
111 break;
112 case 2:
113 buf.word = cpu_to_le16(data);
114 break;
115 case 4:
116 buf.dword = cpu_to_le32(data);
117 break;
118 default:
119 hw_error("vfio: unsupported write size, %d bytes", size);
120 break;
123 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
124 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
125 ",%d) failed: %m",
126 __func__, vbasedev->name, region->nr,
127 addr, data, size);
130 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
133 * A read or write to a BAR always signals an INTx EOI. This will
134 * do nothing if not pending (including not in INTx mode). We assume
135 * that a BAR access is in response to an interrupt and that BAR
136 * accesses will service the interrupt. Unfortunately, we don't know
137 * which access will service the interrupt, so we're potentially
138 * getting quite a few host interrupts per guest interrupt.
140 vbasedev->ops->vfio_eoi(vbasedev);
143 uint64_t vfio_region_read(void *opaque,
144 hwaddr addr, unsigned size)
146 VFIORegion *region = opaque;
147 VFIODevice *vbasedev = region->vbasedev;
148 union {
149 uint8_t byte;
150 uint16_t word;
151 uint32_t dword;
152 uint64_t qword;
153 } buf;
154 uint64_t data = 0;
156 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
157 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
158 __func__, vbasedev->name, region->nr,
159 addr, size);
160 return (uint64_t)-1;
162 switch (size) {
163 case 1:
164 data = buf.byte;
165 break;
166 case 2:
167 data = le16_to_cpu(buf.word);
168 break;
169 case 4:
170 data = le32_to_cpu(buf.dword);
171 break;
172 default:
173 hw_error("vfio: unsupported read size, %d bytes", size);
174 break;
177 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
179 /* Same as write above */
180 vbasedev->ops->vfio_eoi(vbasedev);
182 return data;
185 const MemoryRegionOps vfio_region_ops = {
186 .read = vfio_region_read,
187 .write = vfio_region_write,
188 .endianness = DEVICE_LITTLE_ENDIAN,
192 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
194 static int vfio_dma_unmap(VFIOContainer *container,
195 hwaddr iova, ram_addr_t size)
197 struct vfio_iommu_type1_dma_unmap unmap = {
198 .argsz = sizeof(unmap),
199 .flags = 0,
200 .iova = iova,
201 .size = size,
204 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
205 error_report("VFIO_UNMAP_DMA: %d", -errno);
206 return -errno;
209 return 0;
212 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
213 ram_addr_t size, void *vaddr, bool readonly)
215 struct vfio_iommu_type1_dma_map map = {
216 .argsz = sizeof(map),
217 .flags = VFIO_DMA_MAP_FLAG_READ,
218 .vaddr = (__u64)(uintptr_t)vaddr,
219 .iova = iova,
220 .size = size,
223 if (!readonly) {
224 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
228 * Try the mapping, if it fails with EBUSY, unmap the region and try
229 * again. This shouldn't be necessary, but we sometimes see it in
230 * the VGA ROM space.
232 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
233 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
234 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
235 return 0;
238 error_report("VFIO_MAP_DMA: %d", -errno);
239 return -errno;
242 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
244 return (!memory_region_is_ram(section->mr) &&
245 !memory_region_is_iommu(section->mr)) ||
247 * Sizing an enabled 64-bit BAR can cause spurious mappings to
248 * addresses in the upper part of the 64-bit address space. These
249 * are never accessed by the CPU and beyond the address width of
250 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
252 section->offset_within_address_space & (1ULL << 63);
255 static void vfio_iommu_map_notify(Notifier *n, void *data)
257 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
258 VFIOContainer *container = giommu->container;
259 IOMMUTLBEntry *iotlb = data;
260 MemoryRegion *mr;
261 hwaddr xlat;
262 hwaddr len = iotlb->addr_mask + 1;
263 void *vaddr;
264 int ret;
266 trace_vfio_iommu_map_notify(iotlb->iova,
267 iotlb->iova + iotlb->addr_mask);
270 * The IOMMU TLB entry we have just covers translation through
271 * this IOMMU to its immediate target. We need to translate
272 * it the rest of the way through to memory.
274 rcu_read_lock();
275 mr = address_space_translate(&address_space_memory,
276 iotlb->translated_addr,
277 &xlat, &len, iotlb->perm & IOMMU_WO);
278 if (!memory_region_is_ram(mr)) {
279 error_report("iommu map to non memory area %"HWADDR_PRIx"",
280 xlat);
281 goto out;
284 * Translation truncates length to the IOMMU page size,
285 * check that it did not truncate too much.
287 if (len & iotlb->addr_mask) {
288 error_report("iommu has granularity incompatible with target AS");
289 goto out;
292 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
293 vaddr = memory_region_get_ram_ptr(mr) + xlat;
294 ret = vfio_dma_map(container, iotlb->iova,
295 iotlb->addr_mask + 1, vaddr,
296 !(iotlb->perm & IOMMU_WO) || mr->readonly);
297 if (ret) {
298 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
299 "0x%"HWADDR_PRIx", %p) = %d (%m)",
300 container, iotlb->iova,
301 iotlb->addr_mask + 1, vaddr, ret);
303 } else {
304 ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1);
305 if (ret) {
306 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
307 "0x%"HWADDR_PRIx") = %d (%m)",
308 container, iotlb->iova,
309 iotlb->addr_mask + 1, ret);
312 out:
313 rcu_read_unlock();
316 static hwaddr vfio_container_granularity(VFIOContainer *container)
318 return (hwaddr)1 << ctz64(container->iova_pgsizes);
321 static void vfio_listener_region_add(MemoryListener *listener,
322 MemoryRegionSection *section)
324 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
325 hwaddr iova, end;
326 Int128 llend, llsize;
327 void *vaddr;
328 int ret;
330 if (vfio_listener_skipped_section(section)) {
331 trace_vfio_listener_region_add_skip(
332 section->offset_within_address_space,
333 section->offset_within_address_space +
334 int128_get64(int128_sub(section->size, int128_one())));
335 return;
338 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
339 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
340 error_report("%s received unaligned region", __func__);
341 return;
344 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
345 llend = int128_make64(section->offset_within_address_space);
346 llend = int128_add(llend, section->size);
347 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
349 if (int128_ge(int128_make64(iova), llend)) {
350 return;
352 end = int128_get64(int128_sub(llend, int128_one()));
354 if ((iova < container->min_iova) || (end > container->max_iova)) {
355 error_report("vfio: IOMMU container %p can't map guest IOVA region"
356 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
357 container, iova, end);
358 ret = -EFAULT;
359 goto fail;
362 memory_region_ref(section->mr);
364 if (memory_region_is_iommu(section->mr)) {
365 VFIOGuestIOMMU *giommu;
367 trace_vfio_listener_region_add_iommu(iova, end);
369 * FIXME: We should do some checking to see if the
370 * capabilities of the host VFIO IOMMU are adequate to model
371 * the guest IOMMU
373 * FIXME: For VFIO iommu types which have KVM acceleration to
374 * avoid bouncing all map/unmaps through qemu this way, this
375 * would be the right place to wire that up (tell the KVM
376 * device emulation the VFIO iommu handles to use).
378 giommu = g_malloc0(sizeof(*giommu));
379 giommu->iommu = section->mr;
380 giommu->container = container;
381 giommu->n.notify = vfio_iommu_map_notify;
382 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
384 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
385 memory_region_iommu_replay(giommu->iommu, &giommu->n,
386 vfio_container_granularity(container),
387 false);
389 return;
392 /* Here we assume that memory_region_is_ram(section->mr)==true */
394 vaddr = memory_region_get_ram_ptr(section->mr) +
395 section->offset_within_region +
396 (iova - section->offset_within_address_space);
398 trace_vfio_listener_region_add_ram(iova, end, vaddr);
400 llsize = int128_sub(llend, int128_make64(iova));
402 ret = vfio_dma_map(container, iova, int128_get64(llsize),
403 vaddr, section->readonly);
404 if (ret) {
405 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
406 "0x%"HWADDR_PRIx", %p) = %d (%m)",
407 container, iova, int128_get64(llsize), vaddr, ret);
408 goto fail;
411 return;
413 fail:
415 * On the initfn path, store the first error in the container so we
416 * can gracefully fail. Runtime, there's not much we can do other
417 * than throw a hardware error.
419 if (!container->initialized) {
420 if (!container->error) {
421 container->error = ret;
423 } else {
424 hw_error("vfio: DMA mapping failed, unable to continue");
428 static void vfio_listener_region_del(MemoryListener *listener,
429 MemoryRegionSection *section)
431 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
432 hwaddr iova, end;
433 int ret;
435 if (vfio_listener_skipped_section(section)) {
436 trace_vfio_listener_region_del_skip(
437 section->offset_within_address_space,
438 section->offset_within_address_space +
439 int128_get64(int128_sub(section->size, int128_one())));
440 return;
443 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
444 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
445 error_report("%s received unaligned region", __func__);
446 return;
449 if (memory_region_is_iommu(section->mr)) {
450 VFIOGuestIOMMU *giommu;
452 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
453 if (giommu->iommu == section->mr) {
454 memory_region_unregister_iommu_notifier(&giommu->n);
455 QLIST_REMOVE(giommu, giommu_next);
456 g_free(giommu);
457 break;
462 * FIXME: We assume the one big unmap below is adequate to
463 * remove any individual page mappings in the IOMMU which
464 * might have been copied into VFIO. This works for a page table
465 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
466 * That may not be true for all IOMMU types.
470 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
471 end = (section->offset_within_address_space + int128_get64(section->size)) &
472 TARGET_PAGE_MASK;
474 if (iova >= end) {
475 return;
478 trace_vfio_listener_region_del(iova, end - 1);
480 ret = vfio_dma_unmap(container, iova, end - iova);
481 memory_region_unref(section->mr);
482 if (ret) {
483 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
484 "0x%"HWADDR_PRIx") = %d (%m)",
485 container, iova, end - iova, ret);
489 static const MemoryListener vfio_memory_listener = {
490 .region_add = vfio_listener_region_add,
491 .region_del = vfio_listener_region_del,
494 static void vfio_listener_release(VFIOContainer *container)
496 memory_listener_unregister(&container->listener);
499 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
500 int index, const char *name)
502 struct vfio_region_info *info;
503 int ret;
505 ret = vfio_get_region_info(vbasedev, index, &info);
506 if (ret) {
507 return ret;
510 region->vbasedev = vbasedev;
511 region->flags = info->flags;
512 region->size = info->size;
513 region->fd_offset = info->offset;
514 region->nr = index;
516 if (region->size) {
517 region->mem = g_new0(MemoryRegion, 1);
518 memory_region_init_io(region->mem, obj, &vfio_region_ops,
519 region, name, region->size);
521 if (!vbasedev->no_mmap &&
522 region->flags & VFIO_REGION_INFO_FLAG_MMAP &&
523 !(region->size & ~qemu_real_host_page_mask)) {
525 region->nr_mmaps = 1;
526 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
528 region->mmaps[0].offset = 0;
529 region->mmaps[0].size = region->size;
533 g_free(info);
535 trace_vfio_region_setup(vbasedev->name, index, name,
536 region->flags, region->fd_offset, region->size);
537 return 0;
540 int vfio_region_mmap(VFIORegion *region)
542 int i, prot = 0;
543 char *name;
545 if (!region->mem) {
546 return 0;
549 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
550 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
552 for (i = 0; i < region->nr_mmaps; i++) {
553 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
554 MAP_SHARED, region->vbasedev->fd,
555 region->fd_offset +
556 region->mmaps[i].offset);
557 if (region->mmaps[i].mmap == MAP_FAILED) {
558 int ret = -errno;
560 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
561 region->fd_offset +
562 region->mmaps[i].offset,
563 region->fd_offset +
564 region->mmaps[i].offset +
565 region->mmaps[i].size - 1, ret);
567 region->mmaps[i].mmap = NULL;
569 for (i--; i >= 0; i--) {
570 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
571 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
572 object_unparent(OBJECT(&region->mmaps[i].mem));
573 region->mmaps[i].mmap = NULL;
576 return ret;
579 name = g_strdup_printf("%s mmaps[%d]",
580 memory_region_name(region->mem), i);
581 memory_region_init_ram_ptr(&region->mmaps[i].mem,
582 memory_region_owner(region->mem),
583 name, region->mmaps[i].size,
584 region->mmaps[i].mmap);
585 g_free(name);
586 memory_region_set_skip_dump(&region->mmaps[i].mem);
587 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
588 &region->mmaps[i].mem);
590 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
591 region->mmaps[i].offset,
592 region->mmaps[i].offset +
593 region->mmaps[i].size - 1);
596 return 0;
599 void vfio_region_exit(VFIORegion *region)
601 int i;
603 if (!region->mem) {
604 return;
607 for (i = 0; i < region->nr_mmaps; i++) {
608 if (region->mmaps[i].mmap) {
609 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
613 trace_vfio_region_exit(region->vbasedev->name, region->nr);
616 void vfio_region_finalize(VFIORegion *region)
618 int i;
620 if (!region->mem) {
621 return;
624 for (i = 0; i < region->nr_mmaps; i++) {
625 if (region->mmaps[i].mmap) {
626 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
627 object_unparent(OBJECT(&region->mmaps[i].mem));
631 object_unparent(OBJECT(region->mem));
633 g_free(region->mem);
634 g_free(region->mmaps);
636 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
639 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
641 int i;
643 if (!region->mem) {
644 return;
647 for (i = 0; i < region->nr_mmaps; i++) {
648 if (region->mmaps[i].mmap) {
649 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
653 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
654 enabled);
657 void vfio_reset_handler(void *opaque)
659 VFIOGroup *group;
660 VFIODevice *vbasedev;
662 QLIST_FOREACH(group, &vfio_group_list, next) {
663 QLIST_FOREACH(vbasedev, &group->device_list, next) {
664 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
668 QLIST_FOREACH(group, &vfio_group_list, next) {
669 QLIST_FOREACH(vbasedev, &group->device_list, next) {
670 if (vbasedev->needs_reset) {
671 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
677 static void vfio_kvm_device_add_group(VFIOGroup *group)
679 #ifdef CONFIG_KVM
680 struct kvm_device_attr attr = {
681 .group = KVM_DEV_VFIO_GROUP,
682 .attr = KVM_DEV_VFIO_GROUP_ADD,
683 .addr = (uint64_t)(unsigned long)&group->fd,
686 if (!kvm_enabled()) {
687 return;
690 if (vfio_kvm_device_fd < 0) {
691 struct kvm_create_device cd = {
692 .type = KVM_DEV_TYPE_VFIO,
695 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
696 error_report("Failed to create KVM VFIO device: %m");
697 return;
700 vfio_kvm_device_fd = cd.fd;
703 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
704 error_report("Failed to add group %d to KVM VFIO device: %m",
705 group->groupid);
707 #endif
710 static void vfio_kvm_device_del_group(VFIOGroup *group)
712 #ifdef CONFIG_KVM
713 struct kvm_device_attr attr = {
714 .group = KVM_DEV_VFIO_GROUP,
715 .attr = KVM_DEV_VFIO_GROUP_DEL,
716 .addr = (uint64_t)(unsigned long)&group->fd,
719 if (vfio_kvm_device_fd < 0) {
720 return;
723 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
724 error_report("Failed to remove group %d from KVM VFIO device: %m",
725 group->groupid);
727 #endif
730 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
732 VFIOAddressSpace *space;
734 QLIST_FOREACH(space, &vfio_address_spaces, list) {
735 if (space->as == as) {
736 return space;
740 /* No suitable VFIOAddressSpace, create a new one */
741 space = g_malloc0(sizeof(*space));
742 space->as = as;
743 QLIST_INIT(&space->containers);
745 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
747 return space;
750 static void vfio_put_address_space(VFIOAddressSpace *space)
752 if (QLIST_EMPTY(&space->containers)) {
753 QLIST_REMOVE(space, list);
754 g_free(space);
758 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
760 VFIOContainer *container;
761 int ret, fd;
762 VFIOAddressSpace *space;
764 space = vfio_get_address_space(as);
766 QLIST_FOREACH(container, &space->containers, next) {
767 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
768 group->container = container;
769 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
770 return 0;
774 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
775 if (fd < 0) {
776 error_report("vfio: failed to open /dev/vfio/vfio: %m");
777 ret = -errno;
778 goto put_space_exit;
781 ret = ioctl(fd, VFIO_GET_API_VERSION);
782 if (ret != VFIO_API_VERSION) {
783 error_report("vfio: supported vfio version: %d, "
784 "reported version: %d", VFIO_API_VERSION, ret);
785 ret = -EINVAL;
786 goto close_fd_exit;
789 container = g_malloc0(sizeof(*container));
790 container->space = space;
791 container->fd = fd;
792 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
793 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
794 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
795 struct vfio_iommu_type1_info info;
797 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
798 if (ret) {
799 error_report("vfio: failed to set group container: %m");
800 ret = -errno;
801 goto free_container_exit;
804 ret = ioctl(fd, VFIO_SET_IOMMU,
805 v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
806 if (ret) {
807 error_report("vfio: failed to set iommu for container: %m");
808 ret = -errno;
809 goto free_container_exit;
813 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
814 * IOVA whatsoever. That's not actually true, but the current
815 * kernel interface doesn't tell us what it can map, and the
816 * existing Type1 IOMMUs generally support any IOVA we're
817 * going to actually try in practice.
819 container->min_iova = 0;
820 container->max_iova = (hwaddr)-1;
822 /* Assume just 4K IOVA page size */
823 container->iova_pgsizes = 0x1000;
824 info.argsz = sizeof(info);
825 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
826 /* Ignore errors */
827 if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
828 container->iova_pgsizes = info.iova_pgsizes;
830 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
831 struct vfio_iommu_spapr_tce_info info;
833 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
834 if (ret) {
835 error_report("vfio: failed to set group container: %m");
836 ret = -errno;
837 goto free_container_exit;
839 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
840 if (ret) {
841 error_report("vfio: failed to set iommu for container: %m");
842 ret = -errno;
843 goto free_container_exit;
847 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
848 * when container fd is closed so we do not call it explicitly
849 * in this file.
851 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
852 if (ret) {
853 error_report("vfio: failed to enable container: %m");
854 ret = -errno;
855 goto free_container_exit;
859 * This only considers the host IOMMU's 32-bit window. At
860 * some point we need to add support for the optional 64-bit
861 * window and dynamic windows
863 info.argsz = sizeof(info);
864 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
865 if (ret) {
866 error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
867 ret = -errno;
868 goto free_container_exit;
870 container->min_iova = info.dma32_window_start;
871 container->max_iova = container->min_iova + info.dma32_window_size - 1;
873 /* Assume just 4K IOVA pages for now */
874 container->iova_pgsizes = 0x1000;
875 } else {
876 error_report("vfio: No available IOMMU models");
877 ret = -EINVAL;
878 goto free_container_exit;
881 container->listener = vfio_memory_listener;
883 memory_listener_register(&container->listener, container->space->as);
885 if (container->error) {
886 ret = container->error;
887 error_report("vfio: memory listener initialization failed for container");
888 goto listener_release_exit;
891 container->initialized = true;
893 QLIST_INIT(&container->group_list);
894 QLIST_INSERT_HEAD(&space->containers, container, next);
896 group->container = container;
897 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
899 return 0;
900 listener_release_exit:
901 vfio_listener_release(container);
903 free_container_exit:
904 g_free(container);
906 close_fd_exit:
907 close(fd);
909 put_space_exit:
910 vfio_put_address_space(space);
912 return ret;
915 static void vfio_disconnect_container(VFIOGroup *group)
917 VFIOContainer *container = group->container;
919 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
920 error_report("vfio: error disconnecting group %d from container",
921 group->groupid);
924 QLIST_REMOVE(group, container_next);
925 group->container = NULL;
927 if (QLIST_EMPTY(&container->group_list)) {
928 VFIOAddressSpace *space = container->space;
929 VFIOGuestIOMMU *giommu, *tmp;
931 vfio_listener_release(container);
932 QLIST_REMOVE(container, next);
934 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
935 memory_region_unregister_iommu_notifier(&giommu->n);
936 QLIST_REMOVE(giommu, giommu_next);
937 g_free(giommu);
940 trace_vfio_disconnect_container(container->fd);
941 close(container->fd);
942 g_free(container);
944 vfio_put_address_space(space);
948 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
950 VFIOGroup *group;
951 char path[32];
952 struct vfio_group_status status = { .argsz = sizeof(status) };
954 QLIST_FOREACH(group, &vfio_group_list, next) {
955 if (group->groupid == groupid) {
956 /* Found it. Now is it already in the right context? */
957 if (group->container->space->as == as) {
958 return group;
959 } else {
960 error_report("vfio: group %d used in multiple address spaces",
961 group->groupid);
962 return NULL;
967 group = g_malloc0(sizeof(*group));
969 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
970 group->fd = qemu_open(path, O_RDWR);
971 if (group->fd < 0) {
972 error_report("vfio: error opening %s: %m", path);
973 goto free_group_exit;
976 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
977 error_report("vfio: error getting group status: %m");
978 goto close_fd_exit;
981 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
982 error_report("vfio: error, group %d is not viable, please ensure "
983 "all devices within the iommu_group are bound to their "
984 "vfio bus driver.", groupid);
985 goto close_fd_exit;
988 group->groupid = groupid;
989 QLIST_INIT(&group->device_list);
991 if (vfio_connect_container(group, as)) {
992 error_report("vfio: failed to setup container for group %d", groupid);
993 goto close_fd_exit;
996 if (QLIST_EMPTY(&vfio_group_list)) {
997 qemu_register_reset(vfio_reset_handler, NULL);
1000 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1002 vfio_kvm_device_add_group(group);
1004 return group;
1006 close_fd_exit:
1007 close(group->fd);
1009 free_group_exit:
1010 g_free(group);
1012 return NULL;
1015 void vfio_put_group(VFIOGroup *group)
1017 if (!group || !QLIST_EMPTY(&group->device_list)) {
1018 return;
1021 vfio_kvm_device_del_group(group);
1022 vfio_disconnect_container(group);
1023 QLIST_REMOVE(group, next);
1024 trace_vfio_put_group(group->fd);
1025 close(group->fd);
1026 g_free(group);
1028 if (QLIST_EMPTY(&vfio_group_list)) {
1029 qemu_unregister_reset(vfio_reset_handler, NULL);
1033 int vfio_get_device(VFIOGroup *group, const char *name,
1034 VFIODevice *vbasedev)
1036 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1037 int ret, fd;
1039 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1040 if (fd < 0) {
1041 error_report("vfio: error getting device %s from group %d: %m",
1042 name, group->groupid);
1043 error_printf("Verify all devices in group %d are bound to vfio-<bus> "
1044 "or pci-stub and not already in use\n", group->groupid);
1045 return fd;
1048 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1049 if (ret) {
1050 error_report("vfio: error getting device info: %m");
1051 close(fd);
1052 return ret;
1055 vbasedev->fd = fd;
1056 vbasedev->group = group;
1057 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1059 vbasedev->num_irqs = dev_info.num_irqs;
1060 vbasedev->num_regions = dev_info.num_regions;
1061 vbasedev->flags = dev_info.flags;
1063 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1064 dev_info.num_irqs);
1066 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1067 return 0;
1070 void vfio_put_base_device(VFIODevice *vbasedev)
1072 if (!vbasedev->group) {
1073 return;
1075 QLIST_REMOVE(vbasedev, next);
1076 vbasedev->group = NULL;
1077 trace_vfio_put_base_device(vbasedev->fd);
1078 close(vbasedev->fd);
1081 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1082 struct vfio_region_info **info)
1084 size_t argsz = sizeof(struct vfio_region_info);
1086 *info = g_malloc0(argsz);
1088 (*info)->index = index;
1089 (*info)->argsz = argsz;
1091 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1092 g_free(*info);
1093 return -errno;
1096 return 0;
1100 * Interfaces for IBM EEH (Enhanced Error Handling)
1102 static bool vfio_eeh_container_ok(VFIOContainer *container)
1105 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1106 * implementation is broken if there are multiple groups in a
1107 * container. The hardware works in units of Partitionable
1108 * Endpoints (== IOMMU groups) and the EEH operations naively
1109 * iterate across all groups in the container, without any logic
1110 * to make sure the groups have their state synchronized. For
1111 * certain operations (ENABLE) that might be ok, until an error
1112 * occurs, but for others (GET_STATE) it's clearly broken.
1116 * XXX Once fixed kernels exist, test for them here
1119 if (QLIST_EMPTY(&container->group_list)) {
1120 return false;
1123 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1124 return false;
1127 return true;
1130 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1132 struct vfio_eeh_pe_op pe_op = {
1133 .argsz = sizeof(pe_op),
1134 .op = op,
1136 int ret;
1138 if (!vfio_eeh_container_ok(container)) {
1139 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1140 "kernel requires a container with exactly one group", op);
1141 return -EPERM;
1144 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1145 if (ret < 0) {
1146 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1147 return -errno;
1150 return 0;
1153 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1155 VFIOAddressSpace *space = vfio_get_address_space(as);
1156 VFIOContainer *container = NULL;
1158 if (QLIST_EMPTY(&space->containers)) {
1159 /* No containers to act on */
1160 goto out;
1163 container = QLIST_FIRST(&space->containers);
1165 if (QLIST_NEXT(container, next)) {
1166 /* We don't yet have logic to synchronize EEH state across
1167 * multiple containers */
1168 container = NULL;
1169 goto out;
1172 out:
1173 vfio_put_address_space(space);
1174 return container;
1177 bool vfio_eeh_as_ok(AddressSpace *as)
1179 VFIOContainer *container = vfio_eeh_as_container(as);
1181 return (container != NULL) && vfio_eeh_container_ok(container);
1184 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1186 VFIOContainer *container = vfio_eeh_as_container(as);
1188 if (!container) {
1189 return -ENODEV;
1191 return vfio_eeh_container_op(container, op);