target/s390x: Fix typo
[qemu/ar7.git] / hw / vfio / common.c
blobf3ba9b90077086c237618163acd6a85550e2494d
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/kvm.h"
36 #include "trace.h"
37 #include "qapi/error.h"
39 struct vfio_group_head vfio_group_list =
40 QLIST_HEAD_INITIALIZER(vfio_group_list);
41 struct vfio_as_head vfio_address_spaces =
42 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
44 #ifdef CONFIG_KVM
46 * We have a single VFIO pseudo device per KVM VM. Once created it lives
47 * for the life of the VM. Closing the file descriptor only drops our
48 * reference to it and the device's reference to kvm. Therefore once
49 * initialized, this file descriptor is only released on QEMU exit and
50 * we'll re-use it should another vfio device be attached before then.
52 static int vfio_kvm_device_fd = -1;
53 #endif
56 * Common VFIO interrupt disable
58 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
60 struct vfio_irq_set irq_set = {
61 .argsz = sizeof(irq_set),
62 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
63 .index = index,
64 .start = 0,
65 .count = 0,
68 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
71 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
73 struct vfio_irq_set irq_set = {
74 .argsz = sizeof(irq_set),
75 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
76 .index = index,
77 .start = 0,
78 .count = 1,
81 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
84 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
86 struct vfio_irq_set irq_set = {
87 .argsz = sizeof(irq_set),
88 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
89 .index = index,
90 .start = 0,
91 .count = 1,
94 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
98 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
100 void vfio_region_write(void *opaque, hwaddr addr,
101 uint64_t data, unsigned size)
103 VFIORegion *region = opaque;
104 VFIODevice *vbasedev = region->vbasedev;
105 union {
106 uint8_t byte;
107 uint16_t word;
108 uint32_t dword;
109 uint64_t qword;
110 } buf;
112 switch (size) {
113 case 1:
114 buf.byte = data;
115 break;
116 case 2:
117 buf.word = cpu_to_le16(data);
118 break;
119 case 4:
120 buf.dword = cpu_to_le32(data);
121 break;
122 default:
123 hw_error("vfio: unsupported write size, %d bytes", size);
124 break;
127 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
128 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
129 ",%d) failed: %m",
130 __func__, vbasedev->name, region->nr,
131 addr, data, size);
134 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
137 * A read or write to a BAR always signals an INTx EOI. This will
138 * do nothing if not pending (including not in INTx mode). We assume
139 * that a BAR access is in response to an interrupt and that BAR
140 * accesses will service the interrupt. Unfortunately, we don't know
141 * which access will service the interrupt, so we're potentially
142 * getting quite a few host interrupts per guest interrupt.
144 vbasedev->ops->vfio_eoi(vbasedev);
147 uint64_t vfio_region_read(void *opaque,
148 hwaddr addr, unsigned size)
150 VFIORegion *region = opaque;
151 VFIODevice *vbasedev = region->vbasedev;
152 union {
153 uint8_t byte;
154 uint16_t word;
155 uint32_t dword;
156 uint64_t qword;
157 } buf;
158 uint64_t data = 0;
160 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
161 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
162 __func__, vbasedev->name, region->nr,
163 addr, size);
164 return (uint64_t)-1;
166 switch (size) {
167 case 1:
168 data = buf.byte;
169 break;
170 case 2:
171 data = le16_to_cpu(buf.word);
172 break;
173 case 4:
174 data = le32_to_cpu(buf.dword);
175 break;
176 default:
177 hw_error("vfio: unsupported read size, %d bytes", size);
178 break;
181 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
183 /* Same as write above */
184 vbasedev->ops->vfio_eoi(vbasedev);
186 return data;
189 const MemoryRegionOps vfio_region_ops = {
190 .read = vfio_region_read,
191 .write = vfio_region_write,
192 .endianness = DEVICE_LITTLE_ENDIAN,
196 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
198 static int vfio_dma_unmap(VFIOContainer *container,
199 hwaddr iova, ram_addr_t size)
201 struct vfio_iommu_type1_dma_unmap unmap = {
202 .argsz = sizeof(unmap),
203 .flags = 0,
204 .iova = iova,
205 .size = size,
208 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
209 error_report("VFIO_UNMAP_DMA: %d", -errno);
210 return -errno;
213 return 0;
216 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
217 ram_addr_t size, void *vaddr, bool readonly)
219 struct vfio_iommu_type1_dma_map map = {
220 .argsz = sizeof(map),
221 .flags = VFIO_DMA_MAP_FLAG_READ,
222 .vaddr = (__u64)(uintptr_t)vaddr,
223 .iova = iova,
224 .size = size,
227 if (!readonly) {
228 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
232 * Try the mapping, if it fails with EBUSY, unmap the region and try
233 * again. This shouldn't be necessary, but we sometimes see it in
234 * the VGA ROM space.
236 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
237 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
238 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
239 return 0;
242 error_report("VFIO_MAP_DMA: %d", -errno);
243 return -errno;
246 static void vfio_host_win_add(VFIOContainer *container,
247 hwaddr min_iova, hwaddr max_iova,
248 uint64_t iova_pgsizes)
250 VFIOHostDMAWindow *hostwin;
252 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
253 if (ranges_overlap(hostwin->min_iova,
254 hostwin->max_iova - hostwin->min_iova + 1,
255 min_iova,
256 max_iova - min_iova + 1)) {
257 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
261 hostwin = g_malloc0(sizeof(*hostwin));
263 hostwin->min_iova = min_iova;
264 hostwin->max_iova = max_iova;
265 hostwin->iova_pgsizes = iova_pgsizes;
266 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
269 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
270 hwaddr max_iova)
272 VFIOHostDMAWindow *hostwin;
274 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
275 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
276 QLIST_REMOVE(hostwin, hostwin_next);
277 return 0;
281 return -1;
284 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
286 return (!memory_region_is_ram(section->mr) &&
287 !memory_region_is_iommu(section->mr)) ||
289 * Sizing an enabled 64-bit BAR can cause spurious mappings to
290 * addresses in the upper part of the 64-bit address space. These
291 * are never accessed by the CPU and beyond the address width of
292 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
294 section->offset_within_address_space & (1ULL << 63);
297 /* Called with rcu_read_lock held. */
298 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
299 bool *read_only)
301 MemoryRegion *mr;
302 hwaddr xlat;
303 hwaddr len = iotlb->addr_mask + 1;
304 bool writable = iotlb->perm & IOMMU_WO;
307 * The IOMMU TLB entry we have just covers translation through
308 * this IOMMU to its immediate target. We need to translate
309 * it the rest of the way through to memory.
311 mr = address_space_translate(&address_space_memory,
312 iotlb->translated_addr,
313 &xlat, &len, writable);
314 if (!memory_region_is_ram(mr)) {
315 error_report("iommu map to non memory area %"HWADDR_PRIx"",
316 xlat);
317 return false;
321 * Translation truncates length to the IOMMU page size,
322 * check that it did not truncate too much.
324 if (len & iotlb->addr_mask) {
325 error_report("iommu has granularity incompatible with target AS");
326 return false;
329 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
330 *read_only = !writable || mr->readonly;
332 return true;
335 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
337 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
338 VFIOContainer *container = giommu->container;
339 hwaddr iova = iotlb->iova + giommu->iommu_offset;
340 bool read_only;
341 void *vaddr;
342 int ret;
344 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
345 iova, iova + iotlb->addr_mask);
347 if (iotlb->target_as != &address_space_memory) {
348 error_report("Wrong target AS \"%s\", only system memory is allowed",
349 iotlb->target_as->name ? iotlb->target_as->name : "none");
350 return;
353 rcu_read_lock();
355 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
356 if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
357 goto out;
360 * vaddr is only valid until rcu_read_unlock(). But after
361 * vfio_dma_map has set up the mapping the pages will be
362 * pinned by the kernel. This makes sure that the RAM backend
363 * of vaddr will always be there, even if the memory object is
364 * destroyed and its backing memory munmap-ed.
366 ret = vfio_dma_map(container, iova,
367 iotlb->addr_mask + 1, vaddr,
368 read_only);
369 if (ret) {
370 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
371 "0x%"HWADDR_PRIx", %p) = %d (%m)",
372 container, iova,
373 iotlb->addr_mask + 1, vaddr, ret);
375 } else {
376 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
377 if (ret) {
378 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
379 "0x%"HWADDR_PRIx") = %d (%m)",
380 container, iova,
381 iotlb->addr_mask + 1, ret);
384 out:
385 rcu_read_unlock();
388 static void vfio_listener_region_add(MemoryListener *listener,
389 MemoryRegionSection *section)
391 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
392 hwaddr iova, end;
393 Int128 llend, llsize;
394 void *vaddr;
395 int ret;
396 VFIOHostDMAWindow *hostwin;
397 bool hostwin_found;
399 if (vfio_listener_skipped_section(section)) {
400 trace_vfio_listener_region_add_skip(
401 section->offset_within_address_space,
402 section->offset_within_address_space +
403 int128_get64(int128_sub(section->size, int128_one())));
404 return;
407 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
408 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
409 error_report("%s received unaligned region", __func__);
410 return;
413 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
414 llend = int128_make64(section->offset_within_address_space);
415 llend = int128_add(llend, section->size);
416 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
418 if (int128_ge(int128_make64(iova), llend)) {
419 return;
421 end = int128_get64(int128_sub(llend, int128_one()));
423 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
424 VFIOHostDMAWindow *hostwin;
425 hwaddr pgsize = 0;
427 /* For now intersections are not allowed, we may relax this later */
428 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
429 if (ranges_overlap(hostwin->min_iova,
430 hostwin->max_iova - hostwin->min_iova + 1,
431 section->offset_within_address_space,
432 int128_get64(section->size))) {
433 ret = -1;
434 goto fail;
438 ret = vfio_spapr_create_window(container, section, &pgsize);
439 if (ret) {
440 goto fail;
443 vfio_host_win_add(container, section->offset_within_address_space,
444 section->offset_within_address_space +
445 int128_get64(section->size) - 1, pgsize);
448 hostwin_found = false;
449 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
450 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
451 hostwin_found = true;
452 break;
456 if (!hostwin_found) {
457 error_report("vfio: IOMMU container %p can't map guest IOVA region"
458 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
459 container, iova, end);
460 ret = -EFAULT;
461 goto fail;
464 memory_region_ref(section->mr);
466 if (memory_region_is_iommu(section->mr)) {
467 VFIOGuestIOMMU *giommu;
469 trace_vfio_listener_region_add_iommu(iova, end);
471 * FIXME: For VFIO iommu types which have KVM acceleration to
472 * avoid bouncing all map/unmaps through qemu this way, this
473 * would be the right place to wire that up (tell the KVM
474 * device emulation the VFIO iommu handles to use).
476 giommu = g_malloc0(sizeof(*giommu));
477 giommu->iommu = section->mr;
478 giommu->iommu_offset = section->offset_within_address_space -
479 section->offset_within_region;
480 giommu->container = container;
481 giommu->n.notify = vfio_iommu_map_notify;
482 giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL;
483 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
485 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
486 memory_region_iommu_replay(giommu->iommu, &giommu->n, false);
488 return;
491 /* Here we assume that memory_region_is_ram(section->mr)==true */
493 vaddr = memory_region_get_ram_ptr(section->mr) +
494 section->offset_within_region +
495 (iova - section->offset_within_address_space);
497 trace_vfio_listener_region_add_ram(iova, end, vaddr);
499 llsize = int128_sub(llend, int128_make64(iova));
501 ret = vfio_dma_map(container, iova, int128_get64(llsize),
502 vaddr, section->readonly);
503 if (ret) {
504 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
505 "0x%"HWADDR_PRIx", %p) = %d (%m)",
506 container, iova, int128_get64(llsize), vaddr, ret);
507 goto fail;
510 return;
512 fail:
514 * On the initfn path, store the first error in the container so we
515 * can gracefully fail. Runtime, there's not much we can do other
516 * than throw a hardware error.
518 if (!container->initialized) {
519 if (!container->error) {
520 container->error = ret;
522 } else {
523 hw_error("vfio: DMA mapping failed, unable to continue");
527 static void vfio_listener_region_del(MemoryListener *listener,
528 MemoryRegionSection *section)
530 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
531 hwaddr iova, end;
532 Int128 llend, llsize;
533 int ret;
535 if (vfio_listener_skipped_section(section)) {
536 trace_vfio_listener_region_del_skip(
537 section->offset_within_address_space,
538 section->offset_within_address_space +
539 int128_get64(int128_sub(section->size, int128_one())));
540 return;
543 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
544 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
545 error_report("%s received unaligned region", __func__);
546 return;
549 if (memory_region_is_iommu(section->mr)) {
550 VFIOGuestIOMMU *giommu;
552 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
553 if (giommu->iommu == section->mr) {
554 memory_region_unregister_iommu_notifier(giommu->iommu,
555 &giommu->n);
556 QLIST_REMOVE(giommu, giommu_next);
557 g_free(giommu);
558 break;
563 * FIXME: We assume the one big unmap below is adequate to
564 * remove any individual page mappings in the IOMMU which
565 * might have been copied into VFIO. This works for a page table
566 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
567 * That may not be true for all IOMMU types.
571 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
572 llend = int128_make64(section->offset_within_address_space);
573 llend = int128_add(llend, section->size);
574 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
576 if (int128_ge(int128_make64(iova), llend)) {
577 return;
579 end = int128_get64(int128_sub(llend, int128_one()));
581 llsize = int128_sub(llend, int128_make64(iova));
583 trace_vfio_listener_region_del(iova, end);
585 ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
586 memory_region_unref(section->mr);
587 if (ret) {
588 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
589 "0x%"HWADDR_PRIx") = %d (%m)",
590 container, iova, int128_get64(llsize), ret);
593 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
594 vfio_spapr_remove_window(container,
595 section->offset_within_address_space);
596 if (vfio_host_win_del(container,
597 section->offset_within_address_space,
598 section->offset_within_address_space +
599 int128_get64(section->size) - 1) < 0) {
600 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
601 __func__, section->offset_within_address_space);
606 static const MemoryListener vfio_memory_listener = {
607 .region_add = vfio_listener_region_add,
608 .region_del = vfio_listener_region_del,
611 static void vfio_listener_release(VFIOContainer *container)
613 memory_listener_unregister(&container->listener);
614 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
615 memory_listener_unregister(&container->prereg_listener);
619 static struct vfio_info_cap_header *
620 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
622 struct vfio_info_cap_header *hdr;
623 void *ptr = info;
625 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
626 return NULL;
629 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
630 if (hdr->id == id) {
631 return hdr;
635 return NULL;
638 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
639 struct vfio_region_info *info)
641 struct vfio_info_cap_header *hdr;
642 struct vfio_region_info_cap_sparse_mmap *sparse;
643 int i, j;
645 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
646 if (!hdr) {
647 return -ENODEV;
650 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
652 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
653 region->nr, sparse->nr_areas);
655 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
657 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
658 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
659 sparse->areas[i].offset +
660 sparse->areas[i].size);
662 if (sparse->areas[i].size) {
663 region->mmaps[j].offset = sparse->areas[i].offset;
664 region->mmaps[j].size = sparse->areas[i].size;
665 j++;
669 region->nr_mmaps = j;
670 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
672 return 0;
675 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
676 int index, const char *name)
678 struct vfio_region_info *info;
679 int ret;
681 ret = vfio_get_region_info(vbasedev, index, &info);
682 if (ret) {
683 return ret;
686 region->vbasedev = vbasedev;
687 region->flags = info->flags;
688 region->size = info->size;
689 region->fd_offset = info->offset;
690 region->nr = index;
692 if (region->size) {
693 region->mem = g_new0(MemoryRegion, 1);
694 memory_region_init_io(region->mem, obj, &vfio_region_ops,
695 region, name, region->size);
697 if (!vbasedev->no_mmap &&
698 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
700 ret = vfio_setup_region_sparse_mmaps(region, info);
702 if (ret) {
703 region->nr_mmaps = 1;
704 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
705 region->mmaps[0].offset = 0;
706 region->mmaps[0].size = region->size;
711 g_free(info);
713 trace_vfio_region_setup(vbasedev->name, index, name,
714 region->flags, region->fd_offset, region->size);
715 return 0;
718 int vfio_region_mmap(VFIORegion *region)
720 int i, prot = 0;
721 char *name;
723 if (!region->mem) {
724 return 0;
727 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
728 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
730 for (i = 0; i < region->nr_mmaps; i++) {
731 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
732 MAP_SHARED, region->vbasedev->fd,
733 region->fd_offset +
734 region->mmaps[i].offset);
735 if (region->mmaps[i].mmap == MAP_FAILED) {
736 int ret = -errno;
738 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
739 region->fd_offset +
740 region->mmaps[i].offset,
741 region->fd_offset +
742 region->mmaps[i].offset +
743 region->mmaps[i].size - 1, ret);
745 region->mmaps[i].mmap = NULL;
747 for (i--; i >= 0; i--) {
748 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
749 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
750 object_unparent(OBJECT(&region->mmaps[i].mem));
751 region->mmaps[i].mmap = NULL;
754 return ret;
757 name = g_strdup_printf("%s mmaps[%d]",
758 memory_region_name(region->mem), i);
759 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
760 memory_region_owner(region->mem),
761 name, region->mmaps[i].size,
762 region->mmaps[i].mmap);
763 g_free(name);
764 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
765 &region->mmaps[i].mem);
767 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
768 region->mmaps[i].offset,
769 region->mmaps[i].offset +
770 region->mmaps[i].size - 1);
773 return 0;
776 void vfio_region_exit(VFIORegion *region)
778 int i;
780 if (!region->mem) {
781 return;
784 for (i = 0; i < region->nr_mmaps; i++) {
785 if (region->mmaps[i].mmap) {
786 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
790 trace_vfio_region_exit(region->vbasedev->name, region->nr);
793 void vfio_region_finalize(VFIORegion *region)
795 int i;
797 if (!region->mem) {
798 return;
801 for (i = 0; i < region->nr_mmaps; i++) {
802 if (region->mmaps[i].mmap) {
803 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
804 object_unparent(OBJECT(&region->mmaps[i].mem));
808 object_unparent(OBJECT(region->mem));
810 g_free(region->mem);
811 g_free(region->mmaps);
813 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
816 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
818 int i;
820 if (!region->mem) {
821 return;
824 for (i = 0; i < region->nr_mmaps; i++) {
825 if (region->mmaps[i].mmap) {
826 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
830 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
831 enabled);
834 void vfio_reset_handler(void *opaque)
836 VFIOGroup *group;
837 VFIODevice *vbasedev;
839 QLIST_FOREACH(group, &vfio_group_list, next) {
840 QLIST_FOREACH(vbasedev, &group->device_list, next) {
841 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
845 QLIST_FOREACH(group, &vfio_group_list, next) {
846 QLIST_FOREACH(vbasedev, &group->device_list, next) {
847 if (vbasedev->needs_reset) {
848 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
854 static void vfio_kvm_device_add_group(VFIOGroup *group)
856 #ifdef CONFIG_KVM
857 struct kvm_device_attr attr = {
858 .group = KVM_DEV_VFIO_GROUP,
859 .attr = KVM_DEV_VFIO_GROUP_ADD,
860 .addr = (uint64_t)(unsigned long)&group->fd,
863 if (!kvm_enabled()) {
864 return;
867 if (vfio_kvm_device_fd < 0) {
868 struct kvm_create_device cd = {
869 .type = KVM_DEV_TYPE_VFIO,
872 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
873 error_report("Failed to create KVM VFIO device: %m");
874 return;
877 vfio_kvm_device_fd = cd.fd;
880 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
881 error_report("Failed to add group %d to KVM VFIO device: %m",
882 group->groupid);
884 #endif
887 static void vfio_kvm_device_del_group(VFIOGroup *group)
889 #ifdef CONFIG_KVM
890 struct kvm_device_attr attr = {
891 .group = KVM_DEV_VFIO_GROUP,
892 .attr = KVM_DEV_VFIO_GROUP_DEL,
893 .addr = (uint64_t)(unsigned long)&group->fd,
896 if (vfio_kvm_device_fd < 0) {
897 return;
900 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
901 error_report("Failed to remove group %d from KVM VFIO device: %m",
902 group->groupid);
904 #endif
907 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
909 VFIOAddressSpace *space;
911 QLIST_FOREACH(space, &vfio_address_spaces, list) {
912 if (space->as == as) {
913 return space;
917 /* No suitable VFIOAddressSpace, create a new one */
918 space = g_malloc0(sizeof(*space));
919 space->as = as;
920 QLIST_INIT(&space->containers);
922 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
924 return space;
927 static void vfio_put_address_space(VFIOAddressSpace *space)
929 if (QLIST_EMPTY(&space->containers)) {
930 QLIST_REMOVE(space, list);
931 g_free(space);
935 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
936 Error **errp)
938 VFIOContainer *container;
939 int ret, fd;
940 VFIOAddressSpace *space;
942 space = vfio_get_address_space(as);
944 QLIST_FOREACH(container, &space->containers, next) {
945 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
946 group->container = container;
947 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
948 return 0;
952 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
953 if (fd < 0) {
954 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
955 ret = -errno;
956 goto put_space_exit;
959 ret = ioctl(fd, VFIO_GET_API_VERSION);
960 if (ret != VFIO_API_VERSION) {
961 error_setg(errp, "supported vfio version: %d, "
962 "reported version: %d", VFIO_API_VERSION, ret);
963 ret = -EINVAL;
964 goto close_fd_exit;
967 container = g_malloc0(sizeof(*container));
968 container->space = space;
969 container->fd = fd;
970 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
971 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
972 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
973 struct vfio_iommu_type1_info info;
975 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
976 if (ret) {
977 error_setg_errno(errp, errno, "failed to set group container");
978 ret = -errno;
979 goto free_container_exit;
982 container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
983 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
984 if (ret) {
985 error_setg_errno(errp, errno, "failed to set iommu for container");
986 ret = -errno;
987 goto free_container_exit;
991 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
992 * IOVA whatsoever. That's not actually true, but the current
993 * kernel interface doesn't tell us what it can map, and the
994 * existing Type1 IOMMUs generally support any IOVA we're
995 * going to actually try in practice.
997 info.argsz = sizeof(info);
998 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
999 /* Ignore errors */
1000 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1001 /* Assume 4k IOVA page size */
1002 info.iova_pgsizes = 4096;
1004 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
1005 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
1006 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
1007 struct vfio_iommu_spapr_tce_info info;
1008 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
1010 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
1011 if (ret) {
1012 error_setg_errno(errp, errno, "failed to set group container");
1013 ret = -errno;
1014 goto free_container_exit;
1016 container->iommu_type =
1017 v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
1018 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
1019 if (ret) {
1020 error_setg_errno(errp, errno, "failed to set iommu for container");
1021 ret = -errno;
1022 goto free_container_exit;
1026 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1027 * when container fd is closed so we do not call it explicitly
1028 * in this file.
1030 if (!v2) {
1031 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1032 if (ret) {
1033 error_setg_errno(errp, errno, "failed to enable container");
1034 ret = -errno;
1035 goto free_container_exit;
1037 } else {
1038 container->prereg_listener = vfio_prereg_listener;
1040 memory_listener_register(&container->prereg_listener,
1041 &address_space_memory);
1042 if (container->error) {
1043 memory_listener_unregister(&container->prereg_listener);
1044 ret = container->error;
1045 error_setg(errp,
1046 "RAM memory listener initialization failed for container");
1047 goto free_container_exit;
1051 info.argsz = sizeof(info);
1052 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1053 if (ret) {
1054 error_setg_errno(errp, errno,
1055 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1056 ret = -errno;
1057 if (v2) {
1058 memory_listener_unregister(&container->prereg_listener);
1060 goto free_container_exit;
1063 if (v2) {
1065 * There is a default window in just created container.
1066 * To make region_add/del simpler, we better remove this
1067 * window now and let those iommu_listener callbacks
1068 * create/remove them when needed.
1070 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1071 if (ret) {
1072 error_setg_errno(errp, -ret,
1073 "failed to remove existing window");
1074 goto free_container_exit;
1076 } else {
1077 /* The default table uses 4K pages */
1078 vfio_host_win_add(container, info.dma32_window_start,
1079 info.dma32_window_start +
1080 info.dma32_window_size - 1,
1081 0x1000);
1083 } else {
1084 error_setg(errp, "No available IOMMU models");
1085 ret = -EINVAL;
1086 goto free_container_exit;
1089 container->listener = vfio_memory_listener;
1091 memory_listener_register(&container->listener, container->space->as);
1093 if (container->error) {
1094 ret = container->error;
1095 error_setg_errno(errp, -ret,
1096 "memory listener initialization failed for container");
1097 goto listener_release_exit;
1100 container->initialized = true;
1102 QLIST_INIT(&container->group_list);
1103 QLIST_INSERT_HEAD(&space->containers, container, next);
1105 group->container = container;
1106 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1108 return 0;
1109 listener_release_exit:
1110 vfio_listener_release(container);
1112 free_container_exit:
1113 g_free(container);
1115 close_fd_exit:
1116 close(fd);
1118 put_space_exit:
1119 vfio_put_address_space(space);
1121 return ret;
1124 static void vfio_disconnect_container(VFIOGroup *group)
1126 VFIOContainer *container = group->container;
1128 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1129 error_report("vfio: error disconnecting group %d from container",
1130 group->groupid);
1133 QLIST_REMOVE(group, container_next);
1134 group->container = NULL;
1136 if (QLIST_EMPTY(&container->group_list)) {
1137 VFIOAddressSpace *space = container->space;
1138 VFIOGuestIOMMU *giommu, *tmp;
1140 vfio_listener_release(container);
1141 QLIST_REMOVE(container, next);
1143 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1144 memory_region_unregister_iommu_notifier(giommu->iommu, &giommu->n);
1145 QLIST_REMOVE(giommu, giommu_next);
1146 g_free(giommu);
1149 trace_vfio_disconnect_container(container->fd);
1150 close(container->fd);
1151 g_free(container);
1153 vfio_put_address_space(space);
1157 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1159 VFIOGroup *group;
1160 char path[32];
1161 struct vfio_group_status status = { .argsz = sizeof(status) };
1163 QLIST_FOREACH(group, &vfio_group_list, next) {
1164 if (group->groupid == groupid) {
1165 /* Found it. Now is it already in the right context? */
1166 if (group->container->space->as == as) {
1167 return group;
1168 } else {
1169 error_setg(errp, "group %d used in multiple address spaces",
1170 group->groupid);
1171 return NULL;
1176 group = g_malloc0(sizeof(*group));
1178 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1179 group->fd = qemu_open(path, O_RDWR);
1180 if (group->fd < 0) {
1181 error_setg_errno(errp, errno, "failed to open %s", path);
1182 goto free_group_exit;
1185 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1186 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1187 goto close_fd_exit;
1190 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1191 error_setg(errp, "group %d is not viable", groupid);
1192 error_append_hint(errp,
1193 "Please ensure all devices within the iommu_group "
1194 "are bound to their vfio bus driver.\n");
1195 goto close_fd_exit;
1198 group->groupid = groupid;
1199 QLIST_INIT(&group->device_list);
1201 if (vfio_connect_container(group, as, errp)) {
1202 error_prepend(errp, "failed to setup container for group %d: ",
1203 groupid);
1204 goto close_fd_exit;
1207 if (QLIST_EMPTY(&vfio_group_list)) {
1208 qemu_register_reset(vfio_reset_handler, NULL);
1211 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1213 vfio_kvm_device_add_group(group);
1215 return group;
1217 close_fd_exit:
1218 close(group->fd);
1220 free_group_exit:
1221 g_free(group);
1223 return NULL;
1226 void vfio_put_group(VFIOGroup *group)
1228 if (!group || !QLIST_EMPTY(&group->device_list)) {
1229 return;
1232 vfio_kvm_device_del_group(group);
1233 vfio_disconnect_container(group);
1234 QLIST_REMOVE(group, next);
1235 trace_vfio_put_group(group->fd);
1236 close(group->fd);
1237 g_free(group);
1239 if (QLIST_EMPTY(&vfio_group_list)) {
1240 qemu_unregister_reset(vfio_reset_handler, NULL);
1244 int vfio_get_device(VFIOGroup *group, const char *name,
1245 VFIODevice *vbasedev, Error **errp)
1247 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1248 int ret, fd;
1250 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1251 if (fd < 0) {
1252 error_setg_errno(errp, errno, "error getting device from group %d",
1253 group->groupid);
1254 error_append_hint(errp,
1255 "Verify all devices in group %d are bound to vfio-<bus> "
1256 "or pci-stub and not already in use\n", group->groupid);
1257 return fd;
1260 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1261 if (ret) {
1262 error_setg_errno(errp, errno, "error getting device info");
1263 close(fd);
1264 return ret;
1267 vbasedev->fd = fd;
1268 vbasedev->group = group;
1269 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1271 vbasedev->num_irqs = dev_info.num_irqs;
1272 vbasedev->num_regions = dev_info.num_regions;
1273 vbasedev->flags = dev_info.flags;
1275 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1276 dev_info.num_irqs);
1278 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1279 return 0;
1282 void vfio_put_base_device(VFIODevice *vbasedev)
1284 if (!vbasedev->group) {
1285 return;
1287 QLIST_REMOVE(vbasedev, next);
1288 vbasedev->group = NULL;
1289 trace_vfio_put_base_device(vbasedev->fd);
1290 close(vbasedev->fd);
1293 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1294 struct vfio_region_info **info)
1296 size_t argsz = sizeof(struct vfio_region_info);
1298 *info = g_malloc0(argsz);
1300 (*info)->index = index;
1301 retry:
1302 (*info)->argsz = argsz;
1304 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1305 g_free(*info);
1306 *info = NULL;
1307 return -errno;
1310 if ((*info)->argsz > argsz) {
1311 argsz = (*info)->argsz;
1312 *info = g_realloc(*info, argsz);
1314 goto retry;
1317 return 0;
1320 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1321 uint32_t subtype, struct vfio_region_info **info)
1323 int i;
1325 for (i = 0; i < vbasedev->num_regions; i++) {
1326 struct vfio_info_cap_header *hdr;
1327 struct vfio_region_info_cap_type *cap_type;
1329 if (vfio_get_region_info(vbasedev, i, info)) {
1330 continue;
1333 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1334 if (!hdr) {
1335 g_free(*info);
1336 continue;
1339 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1341 trace_vfio_get_dev_region(vbasedev->name, i,
1342 cap_type->type, cap_type->subtype);
1344 if (cap_type->type == type && cap_type->subtype == subtype) {
1345 return 0;
1348 g_free(*info);
1351 *info = NULL;
1352 return -ENODEV;
1356 * Interfaces for IBM EEH (Enhanced Error Handling)
1358 static bool vfio_eeh_container_ok(VFIOContainer *container)
1361 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1362 * implementation is broken if there are multiple groups in a
1363 * container. The hardware works in units of Partitionable
1364 * Endpoints (== IOMMU groups) and the EEH operations naively
1365 * iterate across all groups in the container, without any logic
1366 * to make sure the groups have their state synchronized. For
1367 * certain operations (ENABLE) that might be ok, until an error
1368 * occurs, but for others (GET_STATE) it's clearly broken.
1372 * XXX Once fixed kernels exist, test for them here
1375 if (QLIST_EMPTY(&container->group_list)) {
1376 return false;
1379 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1380 return false;
1383 return true;
1386 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1388 struct vfio_eeh_pe_op pe_op = {
1389 .argsz = sizeof(pe_op),
1390 .op = op,
1392 int ret;
1394 if (!vfio_eeh_container_ok(container)) {
1395 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1396 "kernel requires a container with exactly one group", op);
1397 return -EPERM;
1400 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1401 if (ret < 0) {
1402 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1403 return -errno;
1406 return ret;
1409 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1411 VFIOAddressSpace *space = vfio_get_address_space(as);
1412 VFIOContainer *container = NULL;
1414 if (QLIST_EMPTY(&space->containers)) {
1415 /* No containers to act on */
1416 goto out;
1419 container = QLIST_FIRST(&space->containers);
1421 if (QLIST_NEXT(container, next)) {
1422 /* We don't yet have logic to synchronize EEH state across
1423 * multiple containers */
1424 container = NULL;
1425 goto out;
1428 out:
1429 vfio_put_address_space(space);
1430 return container;
1433 bool vfio_eeh_as_ok(AddressSpace *as)
1435 VFIOContainer *container = vfio_eeh_as_container(as);
1437 return (container != NULL) && vfio_eeh_container_ok(container);
1440 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1442 VFIOContainer *container = vfio_eeh_as_container(as);
1444 if (!container) {
1445 return -ENODEV;
1447 return vfio_eeh_container_op(container, op);