replay: remove some dead code
[qemu/ar7.git] / hw / vfio / common.c
blobc1fdbf17f2e6d35e6d88d2af3bc05729086b1fa3
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
33 #include "hw/hw.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
39 #include "trace.h"
40 #include "qapi/error.h"
41 #include "migration/migration.h"
43 VFIOGroupList vfio_group_list =
44 QLIST_HEAD_INITIALIZER(vfio_group_list);
45 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
46 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
48 #ifdef CONFIG_KVM
50 * We have a single VFIO pseudo device per KVM VM. Once created it lives
51 * for the life of the VM. Closing the file descriptor only drops our
52 * reference to it and the device's reference to kvm. Therefore once
53 * initialized, this file descriptor is only released on QEMU exit and
54 * we'll re-use it should another vfio device be attached before then.
56 static int vfio_kvm_device_fd = -1;
57 #endif
60 * Common VFIO interrupt disable
62 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
64 struct vfio_irq_set irq_set = {
65 .argsz = sizeof(irq_set),
66 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
67 .index = index,
68 .start = 0,
69 .count = 0,
72 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
75 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
77 struct vfio_irq_set irq_set = {
78 .argsz = sizeof(irq_set),
79 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
80 .index = index,
81 .start = 0,
82 .count = 1,
85 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
88 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
90 struct vfio_irq_set irq_set = {
91 .argsz = sizeof(irq_set),
92 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
93 .index = index,
94 .start = 0,
95 .count = 1,
98 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
101 static inline const char *action_to_str(int action)
103 switch (action) {
104 case VFIO_IRQ_SET_ACTION_MASK:
105 return "MASK";
106 case VFIO_IRQ_SET_ACTION_UNMASK:
107 return "UNMASK";
108 case VFIO_IRQ_SET_ACTION_TRIGGER:
109 return "TRIGGER";
110 default:
111 return "UNKNOWN ACTION";
115 static const char *index_to_str(VFIODevice *vbasedev, int index)
117 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
118 return NULL;
121 switch (index) {
122 case VFIO_PCI_INTX_IRQ_INDEX:
123 return "INTX";
124 case VFIO_PCI_MSI_IRQ_INDEX:
125 return "MSI";
126 case VFIO_PCI_MSIX_IRQ_INDEX:
127 return "MSIX";
128 case VFIO_PCI_ERR_IRQ_INDEX:
129 return "ERR";
130 case VFIO_PCI_REQ_IRQ_INDEX:
131 return "REQ";
132 default:
133 return NULL;
137 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
138 int action, int fd, Error **errp)
140 struct vfio_irq_set *irq_set;
141 int argsz, ret = 0;
142 const char *name;
143 int32_t *pfd;
145 argsz = sizeof(*irq_set) + sizeof(*pfd);
147 irq_set = g_malloc0(argsz);
148 irq_set->argsz = argsz;
149 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
150 irq_set->index = index;
151 irq_set->start = subindex;
152 irq_set->count = 1;
153 pfd = (int32_t *)&irq_set->data;
154 *pfd = fd;
156 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
157 ret = -errno;
159 g_free(irq_set);
161 if (!ret) {
162 return 0;
165 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
167 name = index_to_str(vbasedev, index);
168 if (name) {
169 error_prepend(errp, "%s-%d: ", name, subindex);
170 } else {
171 error_prepend(errp, "index %d-%d: ", index, subindex);
173 error_prepend(errp,
174 "Failed to %s %s eventfd signaling for interrupt ",
175 fd < 0 ? "tear down" : "set up", action_to_str(action));
176 return ret;
180 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
182 void vfio_region_write(void *opaque, hwaddr addr,
183 uint64_t data, unsigned size)
185 VFIORegion *region = opaque;
186 VFIODevice *vbasedev = region->vbasedev;
187 union {
188 uint8_t byte;
189 uint16_t word;
190 uint32_t dword;
191 uint64_t qword;
192 } buf;
194 switch (size) {
195 case 1:
196 buf.byte = data;
197 break;
198 case 2:
199 buf.word = cpu_to_le16(data);
200 break;
201 case 4:
202 buf.dword = cpu_to_le32(data);
203 break;
204 case 8:
205 buf.qword = cpu_to_le64(data);
206 break;
207 default:
208 hw_error("vfio: unsupported write size, %u bytes", size);
209 break;
212 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
213 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
214 ",%d) failed: %m",
215 __func__, vbasedev->name, region->nr,
216 addr, data, size);
219 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
222 * A read or write to a BAR always signals an INTx EOI. This will
223 * do nothing if not pending (including not in INTx mode). We assume
224 * that a BAR access is in response to an interrupt and that BAR
225 * accesses will service the interrupt. Unfortunately, we don't know
226 * which access will service the interrupt, so we're potentially
227 * getting quite a few host interrupts per guest interrupt.
229 vbasedev->ops->vfio_eoi(vbasedev);
232 uint64_t vfio_region_read(void *opaque,
233 hwaddr addr, unsigned size)
235 VFIORegion *region = opaque;
236 VFIODevice *vbasedev = region->vbasedev;
237 union {
238 uint8_t byte;
239 uint16_t word;
240 uint32_t dword;
241 uint64_t qword;
242 } buf;
243 uint64_t data = 0;
245 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
246 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
247 __func__, vbasedev->name, region->nr,
248 addr, size);
249 return (uint64_t)-1;
251 switch (size) {
252 case 1:
253 data = buf.byte;
254 break;
255 case 2:
256 data = le16_to_cpu(buf.word);
257 break;
258 case 4:
259 data = le32_to_cpu(buf.dword);
260 break;
261 case 8:
262 data = le64_to_cpu(buf.qword);
263 break;
264 default:
265 hw_error("vfio: unsupported read size, %u bytes", size);
266 break;
269 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
271 /* Same as write above */
272 vbasedev->ops->vfio_eoi(vbasedev);
274 return data;
277 const MemoryRegionOps vfio_region_ops = {
278 .read = vfio_region_read,
279 .write = vfio_region_write,
280 .endianness = DEVICE_LITTLE_ENDIAN,
281 .valid = {
282 .min_access_size = 1,
283 .max_access_size = 8,
285 .impl = {
286 .min_access_size = 1,
287 .max_access_size = 8,
292 * Device state interfaces
295 bool vfio_mig_active(void)
297 VFIOGroup *group;
298 VFIODevice *vbasedev;
300 if (QLIST_EMPTY(&vfio_group_list)) {
301 return false;
304 QLIST_FOREACH(group, &vfio_group_list, next) {
305 QLIST_FOREACH(vbasedev, &group->device_list, next) {
306 if (vbasedev->migration_blocker) {
307 return false;
311 return true;
314 static bool vfio_devices_all_stopped_and_saving(VFIOContainer *container)
316 VFIOGroup *group;
317 VFIODevice *vbasedev;
318 MigrationState *ms = migrate_get_current();
320 if (!migration_is_setup_or_active(ms->state)) {
321 return false;
324 QLIST_FOREACH(group, &container->group_list, container_next) {
325 QLIST_FOREACH(vbasedev, &group->device_list, next) {
326 VFIOMigration *migration = vbasedev->migration;
328 if (!migration) {
329 return false;
332 if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) &&
333 !(migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
334 continue;
335 } else {
336 return false;
340 return true;
343 static bool vfio_devices_all_running_and_saving(VFIOContainer *container)
345 VFIOGroup *group;
346 VFIODevice *vbasedev;
347 MigrationState *ms = migrate_get_current();
349 if (!migration_is_setup_or_active(ms->state)) {
350 return false;
353 QLIST_FOREACH(group, &container->group_list, container_next) {
354 QLIST_FOREACH(vbasedev, &group->device_list, next) {
355 VFIOMigration *migration = vbasedev->migration;
357 if (!migration) {
358 return false;
361 if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) &&
362 (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
363 continue;
364 } else {
365 return false;
369 return true;
372 static int vfio_dma_unmap_bitmap(VFIOContainer *container,
373 hwaddr iova, ram_addr_t size,
374 IOMMUTLBEntry *iotlb)
376 struct vfio_iommu_type1_dma_unmap *unmap;
377 struct vfio_bitmap *bitmap;
378 uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS;
379 int ret;
381 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
383 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
384 unmap->iova = iova;
385 unmap->size = size;
386 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
387 bitmap = (struct vfio_bitmap *)&unmap->data;
390 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
391 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to
392 * TARGET_PAGE_SIZE.
395 bitmap->pgsize = TARGET_PAGE_SIZE;
396 bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
397 BITS_PER_BYTE;
399 if (bitmap->size > container->max_dirty_bitmap_size) {
400 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
401 (uint64_t)bitmap->size);
402 ret = -E2BIG;
403 goto unmap_exit;
406 bitmap->data = g_try_malloc0(bitmap->size);
407 if (!bitmap->data) {
408 ret = -ENOMEM;
409 goto unmap_exit;
412 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
413 if (!ret) {
414 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data,
415 iotlb->translated_addr, pages);
416 } else {
417 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
420 g_free(bitmap->data);
421 unmap_exit:
422 g_free(unmap);
423 return ret;
427 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
429 static int vfio_dma_unmap(VFIOContainer *container,
430 hwaddr iova, ram_addr_t size,
431 IOMMUTLBEntry *iotlb)
433 struct vfio_iommu_type1_dma_unmap unmap = {
434 .argsz = sizeof(unmap),
435 .flags = 0,
436 .iova = iova,
437 .size = size,
440 if (iotlb && container->dirty_pages_supported &&
441 vfio_devices_all_running_and_saving(container)) {
442 return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
445 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
447 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
448 * v4.15) where an overflow in its wrap-around check prevents us from
449 * unmapping the last page of the address space. Test for the error
450 * condition and re-try the unmap excluding the last page. The
451 * expectation is that we've never mapped the last page anyway and this
452 * unmap request comes via vIOMMU support which also makes it unlikely
453 * that this page is used. This bug was introduced well after type1 v2
454 * support was introduced, so we shouldn't need to test for v1. A fix
455 * is queued for kernel v5.0 so this workaround can be removed once
456 * affected kernels are sufficiently deprecated.
458 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
459 container->iommu_type == VFIO_TYPE1v2_IOMMU) {
460 trace_vfio_dma_unmap_overflow_workaround();
461 unmap.size -= 1ULL << ctz64(container->pgsizes);
462 continue;
464 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
465 return -errno;
468 return 0;
471 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
472 ram_addr_t size, void *vaddr, bool readonly)
474 struct vfio_iommu_type1_dma_map map = {
475 .argsz = sizeof(map),
476 .flags = VFIO_DMA_MAP_FLAG_READ,
477 .vaddr = (__u64)(uintptr_t)vaddr,
478 .iova = iova,
479 .size = size,
482 if (!readonly) {
483 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
487 * Try the mapping, if it fails with EBUSY, unmap the region and try
488 * again. This shouldn't be necessary, but we sometimes see it in
489 * the VGA ROM space.
491 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
492 (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
493 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
494 return 0;
497 error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
498 return -errno;
501 static void vfio_host_win_add(VFIOContainer *container,
502 hwaddr min_iova, hwaddr max_iova,
503 uint64_t iova_pgsizes)
505 VFIOHostDMAWindow *hostwin;
507 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
508 if (ranges_overlap(hostwin->min_iova,
509 hostwin->max_iova - hostwin->min_iova + 1,
510 min_iova,
511 max_iova - min_iova + 1)) {
512 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
516 hostwin = g_malloc0(sizeof(*hostwin));
518 hostwin->min_iova = min_iova;
519 hostwin->max_iova = max_iova;
520 hostwin->iova_pgsizes = iova_pgsizes;
521 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
524 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
525 hwaddr max_iova)
527 VFIOHostDMAWindow *hostwin;
529 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
530 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
531 QLIST_REMOVE(hostwin, hostwin_next);
532 return 0;
536 return -1;
539 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
541 return (!memory_region_is_ram(section->mr) &&
542 !memory_region_is_iommu(section->mr)) ||
544 * Sizing an enabled 64-bit BAR can cause spurious mappings to
545 * addresses in the upper part of the 64-bit address space. These
546 * are never accessed by the CPU and beyond the address width of
547 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
549 section->offset_within_address_space & (1ULL << 63);
552 /* Called with rcu_read_lock held. */
553 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
554 ram_addr_t *ram_addr, bool *read_only)
556 MemoryRegion *mr;
557 hwaddr xlat;
558 hwaddr len = iotlb->addr_mask + 1;
559 bool writable = iotlb->perm & IOMMU_WO;
562 * The IOMMU TLB entry we have just covers translation through
563 * this IOMMU to its immediate target. We need to translate
564 * it the rest of the way through to memory.
566 mr = address_space_translate(&address_space_memory,
567 iotlb->translated_addr,
568 &xlat, &len, writable,
569 MEMTXATTRS_UNSPECIFIED);
570 if (!memory_region_is_ram(mr)) {
571 error_report("iommu map to non memory area %"HWADDR_PRIx"",
572 xlat);
573 return false;
577 * Translation truncates length to the IOMMU page size,
578 * check that it did not truncate too much.
580 if (len & iotlb->addr_mask) {
581 error_report("iommu has granularity incompatible with target AS");
582 return false;
585 if (vaddr) {
586 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
589 if (ram_addr) {
590 *ram_addr = memory_region_get_ram_addr(mr) + xlat;
593 if (read_only) {
594 *read_only = !writable || mr->readonly;
597 return true;
600 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
602 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
603 VFIOContainer *container = giommu->container;
604 hwaddr iova = iotlb->iova + giommu->iommu_offset;
605 void *vaddr;
606 int ret;
608 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
609 iova, iova + iotlb->addr_mask);
611 if (iotlb->target_as != &address_space_memory) {
612 error_report("Wrong target AS \"%s\", only system memory is allowed",
613 iotlb->target_as->name ? iotlb->target_as->name : "none");
614 return;
617 rcu_read_lock();
619 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
620 bool read_only;
622 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
623 goto out;
626 * vaddr is only valid until rcu_read_unlock(). But after
627 * vfio_dma_map has set up the mapping the pages will be
628 * pinned by the kernel. This makes sure that the RAM backend
629 * of vaddr will always be there, even if the memory object is
630 * destroyed and its backing memory munmap-ed.
632 ret = vfio_dma_map(container, iova,
633 iotlb->addr_mask + 1, vaddr,
634 read_only);
635 if (ret) {
636 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
637 "0x%"HWADDR_PRIx", %p) = %d (%m)",
638 container, iova,
639 iotlb->addr_mask + 1, vaddr, ret);
641 } else {
642 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
643 if (ret) {
644 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
645 "0x%"HWADDR_PRIx") = %d (%m)",
646 container, iova,
647 iotlb->addr_mask + 1, ret);
650 out:
651 rcu_read_unlock();
654 static void vfio_listener_region_add(MemoryListener *listener,
655 MemoryRegionSection *section)
657 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
658 hwaddr iova, end;
659 Int128 llend, llsize;
660 void *vaddr;
661 int ret;
662 VFIOHostDMAWindow *hostwin;
663 bool hostwin_found;
664 Error *err = NULL;
666 if (vfio_listener_skipped_section(section)) {
667 trace_vfio_listener_region_add_skip(
668 section->offset_within_address_space,
669 section->offset_within_address_space +
670 int128_get64(int128_sub(section->size, int128_one())));
671 return;
674 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
675 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
676 error_report("%s received unaligned region", __func__);
677 return;
680 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
681 llend = int128_make64(section->offset_within_address_space);
682 llend = int128_add(llend, section->size);
683 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
685 if (int128_ge(int128_make64(iova), llend)) {
686 return;
688 end = int128_get64(int128_sub(llend, int128_one()));
690 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
691 hwaddr pgsize = 0;
693 /* For now intersections are not allowed, we may relax this later */
694 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
695 if (ranges_overlap(hostwin->min_iova,
696 hostwin->max_iova - hostwin->min_iova + 1,
697 section->offset_within_address_space,
698 int128_get64(section->size))) {
699 error_setg(&err,
700 "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
701 "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
702 section->offset_within_address_space,
703 section->offset_within_address_space +
704 int128_get64(section->size) - 1,
705 hostwin->min_iova, hostwin->max_iova);
706 goto fail;
710 ret = vfio_spapr_create_window(container, section, &pgsize);
711 if (ret) {
712 error_setg_errno(&err, -ret, "Failed to create SPAPR window");
713 goto fail;
716 vfio_host_win_add(container, section->offset_within_address_space,
717 section->offset_within_address_space +
718 int128_get64(section->size) - 1, pgsize);
719 #ifdef CONFIG_KVM
720 if (kvm_enabled()) {
721 VFIOGroup *group;
722 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
723 struct kvm_vfio_spapr_tce param;
724 struct kvm_device_attr attr = {
725 .group = KVM_DEV_VFIO_GROUP,
726 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
727 .addr = (uint64_t)(unsigned long)&param,
730 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
731 &param.tablefd)) {
732 QLIST_FOREACH(group, &container->group_list, container_next) {
733 param.groupfd = group->fd;
734 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
735 error_report("vfio: failed to setup fd %d "
736 "for a group with fd %d: %s",
737 param.tablefd, param.groupfd,
738 strerror(errno));
739 return;
741 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
745 #endif
748 hostwin_found = false;
749 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
750 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
751 hostwin_found = true;
752 break;
756 if (!hostwin_found) {
757 error_setg(&err, "Container %p can't map guest IOVA region"
758 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
759 goto fail;
762 memory_region_ref(section->mr);
764 if (memory_region_is_iommu(section->mr)) {
765 VFIOGuestIOMMU *giommu;
766 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
767 int iommu_idx;
769 trace_vfio_listener_region_add_iommu(iova, end);
771 * FIXME: For VFIO iommu types which have KVM acceleration to
772 * avoid bouncing all map/unmaps through qemu this way, this
773 * would be the right place to wire that up (tell the KVM
774 * device emulation the VFIO iommu handles to use).
776 giommu = g_malloc0(sizeof(*giommu));
777 giommu->iommu = iommu_mr;
778 giommu->iommu_offset = section->offset_within_address_space -
779 section->offset_within_region;
780 giommu->container = container;
781 llend = int128_add(int128_make64(section->offset_within_region),
782 section->size);
783 llend = int128_sub(llend, int128_one());
784 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
785 MEMTXATTRS_UNSPECIFIED);
786 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
787 IOMMU_NOTIFIER_ALL,
788 section->offset_within_region,
789 int128_get64(llend),
790 iommu_idx);
792 ret = memory_region_iommu_set_page_size_mask(giommu->iommu,
793 container->pgsizes,
794 &err);
795 if (ret) {
796 g_free(giommu);
797 goto fail;
800 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
801 &err);
802 if (ret) {
803 g_free(giommu);
804 goto fail;
806 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
807 memory_region_iommu_replay(giommu->iommu, &giommu->n);
809 return;
812 /* Here we assume that memory_region_is_ram(section->mr)==true */
814 vaddr = memory_region_get_ram_ptr(section->mr) +
815 section->offset_within_region +
816 (iova - section->offset_within_address_space);
818 trace_vfio_listener_region_add_ram(iova, end, vaddr);
820 llsize = int128_sub(llend, int128_make64(iova));
822 if (memory_region_is_ram_device(section->mr)) {
823 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
825 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
826 trace_vfio_listener_region_add_no_dma_map(
827 memory_region_name(section->mr),
828 section->offset_within_address_space,
829 int128_getlo(section->size),
830 pgmask + 1);
831 return;
835 ret = vfio_dma_map(container, iova, int128_get64(llsize),
836 vaddr, section->readonly);
837 if (ret) {
838 error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
839 "0x%"HWADDR_PRIx", %p) = %d (%m)",
840 container, iova, int128_get64(llsize), vaddr, ret);
841 if (memory_region_is_ram_device(section->mr)) {
842 /* Allow unexpected mappings not to be fatal for RAM devices */
843 error_report_err(err);
844 return;
846 goto fail;
849 return;
851 fail:
852 if (memory_region_is_ram_device(section->mr)) {
853 error_report("failed to vfio_dma_map. pci p2p may not work");
854 return;
857 * On the initfn path, store the first error in the container so we
858 * can gracefully fail. Runtime, there's not much we can do other
859 * than throw a hardware error.
861 if (!container->initialized) {
862 if (!container->error) {
863 error_propagate_prepend(&container->error, err,
864 "Region %s: ",
865 memory_region_name(section->mr));
866 } else {
867 error_free(err);
869 } else {
870 error_report_err(err);
871 hw_error("vfio: DMA mapping failed, unable to continue");
875 static void vfio_listener_region_del(MemoryListener *listener,
876 MemoryRegionSection *section)
878 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
879 hwaddr iova, end;
880 Int128 llend, llsize;
881 int ret;
882 bool try_unmap = true;
884 if (vfio_listener_skipped_section(section)) {
885 trace_vfio_listener_region_del_skip(
886 section->offset_within_address_space,
887 section->offset_within_address_space +
888 int128_get64(int128_sub(section->size, int128_one())));
889 return;
892 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
893 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
894 error_report("%s received unaligned region", __func__);
895 return;
898 if (memory_region_is_iommu(section->mr)) {
899 VFIOGuestIOMMU *giommu;
901 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
902 if (MEMORY_REGION(giommu->iommu) == section->mr &&
903 giommu->n.start == section->offset_within_region) {
904 memory_region_unregister_iommu_notifier(section->mr,
905 &giommu->n);
906 QLIST_REMOVE(giommu, giommu_next);
907 g_free(giommu);
908 break;
913 * FIXME: We assume the one big unmap below is adequate to
914 * remove any individual page mappings in the IOMMU which
915 * might have been copied into VFIO. This works for a page table
916 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
917 * That may not be true for all IOMMU types.
921 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
922 llend = int128_make64(section->offset_within_address_space);
923 llend = int128_add(llend, section->size);
924 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
926 if (int128_ge(int128_make64(iova), llend)) {
927 return;
929 end = int128_get64(int128_sub(llend, int128_one()));
931 llsize = int128_sub(llend, int128_make64(iova));
933 trace_vfio_listener_region_del(iova, end);
935 if (memory_region_is_ram_device(section->mr)) {
936 hwaddr pgmask;
937 VFIOHostDMAWindow *hostwin;
938 bool hostwin_found = false;
940 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
941 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
942 hostwin_found = true;
943 break;
946 assert(hostwin_found); /* or region_add() would have failed */
948 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
949 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
952 if (try_unmap) {
953 if (int128_eq(llsize, int128_2_64())) {
954 /* The unmap ioctl doesn't accept a full 64-bit span. */
955 llsize = int128_rshift(llsize, 1);
956 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
957 if (ret) {
958 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
959 "0x%"HWADDR_PRIx") = %d (%m)",
960 container, iova, int128_get64(llsize), ret);
962 iova += int128_get64(llsize);
964 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
965 if (ret) {
966 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
967 "0x%"HWADDR_PRIx") = %d (%m)",
968 container, iova, int128_get64(llsize), ret);
972 memory_region_unref(section->mr);
974 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
975 vfio_spapr_remove_window(container,
976 section->offset_within_address_space);
977 if (vfio_host_win_del(container,
978 section->offset_within_address_space,
979 section->offset_within_address_space +
980 int128_get64(section->size) - 1) < 0) {
981 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
982 __func__, section->offset_within_address_space);
987 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
988 uint64_t size, ram_addr_t ram_addr)
990 struct vfio_iommu_type1_dirty_bitmap *dbitmap;
991 struct vfio_iommu_type1_dirty_bitmap_get *range;
992 uint64_t pages;
993 int ret;
995 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
997 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
998 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
999 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
1000 range->iova = iova;
1001 range->size = size;
1004 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
1005 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to
1006 * TARGET_PAGE_SIZE.
1008 range->bitmap.pgsize = TARGET_PAGE_SIZE;
1010 pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS;
1011 range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
1012 BITS_PER_BYTE;
1013 range->bitmap.data = g_try_malloc0(range->bitmap.size);
1014 if (!range->bitmap.data) {
1015 ret = -ENOMEM;
1016 goto err_out;
1019 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
1020 if (ret) {
1021 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
1022 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
1023 (uint64_t)range->size, errno);
1024 goto err_out;
1027 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
1028 ram_addr, pages);
1030 trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
1031 range->bitmap.size, ram_addr);
1032 err_out:
1033 g_free(range->bitmap.data);
1034 g_free(dbitmap);
1036 return ret;
1039 typedef struct {
1040 IOMMUNotifier n;
1041 VFIOGuestIOMMU *giommu;
1042 } vfio_giommu_dirty_notifier;
1044 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1046 vfio_giommu_dirty_notifier *gdn = container_of(n,
1047 vfio_giommu_dirty_notifier, n);
1048 VFIOGuestIOMMU *giommu = gdn->giommu;
1049 VFIOContainer *container = giommu->container;
1050 hwaddr iova = iotlb->iova + giommu->iommu_offset;
1051 ram_addr_t translated_addr;
1053 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
1055 if (iotlb->target_as != &address_space_memory) {
1056 error_report("Wrong target AS \"%s\", only system memory is allowed",
1057 iotlb->target_as->name ? iotlb->target_as->name : "none");
1058 return;
1061 rcu_read_lock();
1062 if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
1063 int ret;
1065 ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
1066 translated_addr);
1067 if (ret) {
1068 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
1069 "0x%"HWADDR_PRIx") = %d (%m)",
1070 container, iova,
1071 iotlb->addr_mask + 1, ret);
1074 rcu_read_unlock();
1077 static int vfio_sync_dirty_bitmap(VFIOContainer *container,
1078 MemoryRegionSection *section)
1080 ram_addr_t ram_addr;
1082 if (memory_region_is_iommu(section->mr)) {
1083 VFIOGuestIOMMU *giommu;
1085 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
1086 if (MEMORY_REGION(giommu->iommu) == section->mr &&
1087 giommu->n.start == section->offset_within_region) {
1088 Int128 llend;
1089 vfio_giommu_dirty_notifier gdn = { .giommu = giommu };
1090 int idx = memory_region_iommu_attrs_to_index(giommu->iommu,
1091 MEMTXATTRS_UNSPECIFIED);
1093 llend = int128_add(int128_make64(section->offset_within_region),
1094 section->size);
1095 llend = int128_sub(llend, int128_one());
1097 iommu_notifier_init(&gdn.n,
1098 vfio_iommu_map_dirty_notify,
1099 IOMMU_NOTIFIER_MAP,
1100 section->offset_within_region,
1101 int128_get64(llend),
1102 idx);
1103 memory_region_iommu_replay(giommu->iommu, &gdn.n);
1104 break;
1107 return 0;
1110 ram_addr = memory_region_get_ram_addr(section->mr) +
1111 section->offset_within_region;
1113 return vfio_get_dirty_bitmap(container,
1114 TARGET_PAGE_ALIGN(section->offset_within_address_space),
1115 int128_get64(section->size), ram_addr);
1118 static void vfio_listerner_log_sync(MemoryListener *listener,
1119 MemoryRegionSection *section)
1121 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1123 if (vfio_listener_skipped_section(section) ||
1124 !container->dirty_pages_supported) {
1125 return;
1128 if (vfio_devices_all_stopped_and_saving(container)) {
1129 vfio_sync_dirty_bitmap(container, section);
1133 static const MemoryListener vfio_memory_listener = {
1134 .region_add = vfio_listener_region_add,
1135 .region_del = vfio_listener_region_del,
1136 .log_sync = vfio_listerner_log_sync,
1139 static void vfio_listener_release(VFIOContainer *container)
1141 memory_listener_unregister(&container->listener);
1142 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1143 memory_listener_unregister(&container->prereg_listener);
1147 static struct vfio_info_cap_header *
1148 vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
1150 struct vfio_info_cap_header *hdr;
1152 for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
1153 if (hdr->id == id) {
1154 return hdr;
1158 return NULL;
1161 struct vfio_info_cap_header *
1162 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
1164 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
1165 return NULL;
1168 return vfio_get_cap((void *)info, info->cap_offset, id);
1171 static struct vfio_info_cap_header *
1172 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
1174 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
1175 return NULL;
1178 return vfio_get_cap((void *)info, info->cap_offset, id);
1181 struct vfio_info_cap_header *
1182 vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
1184 if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) {
1185 return NULL;
1188 return vfio_get_cap((void *)info, info->cap_offset, id);
1191 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
1192 unsigned int *avail)
1194 struct vfio_info_cap_header *hdr;
1195 struct vfio_iommu_type1_info_dma_avail *cap;
1197 /* If the capability cannot be found, assume no DMA limiting */
1198 hdr = vfio_get_iommu_type1_info_cap(info,
1199 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
1200 if (hdr == NULL) {
1201 return false;
1204 if (avail != NULL) {
1205 cap = (void *) hdr;
1206 *avail = cap->avail;
1209 return true;
1212 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
1213 struct vfio_region_info *info)
1215 struct vfio_info_cap_header *hdr;
1216 struct vfio_region_info_cap_sparse_mmap *sparse;
1217 int i, j;
1219 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
1220 if (!hdr) {
1221 return -ENODEV;
1224 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
1226 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
1227 region->nr, sparse->nr_areas);
1229 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
1231 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
1232 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
1233 sparse->areas[i].offset +
1234 sparse->areas[i].size);
1236 if (sparse->areas[i].size) {
1237 region->mmaps[j].offset = sparse->areas[i].offset;
1238 region->mmaps[j].size = sparse->areas[i].size;
1239 j++;
1243 region->nr_mmaps = j;
1244 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
1246 return 0;
1249 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
1250 int index, const char *name)
1252 struct vfio_region_info *info;
1253 int ret;
1255 ret = vfio_get_region_info(vbasedev, index, &info);
1256 if (ret) {
1257 return ret;
1260 region->vbasedev = vbasedev;
1261 region->flags = info->flags;
1262 region->size = info->size;
1263 region->fd_offset = info->offset;
1264 region->nr = index;
1266 if (region->size) {
1267 region->mem = g_new0(MemoryRegion, 1);
1268 memory_region_init_io(region->mem, obj, &vfio_region_ops,
1269 region, name, region->size);
1271 if (!vbasedev->no_mmap &&
1272 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
1274 ret = vfio_setup_region_sparse_mmaps(region, info);
1276 if (ret) {
1277 region->nr_mmaps = 1;
1278 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
1279 region->mmaps[0].offset = 0;
1280 region->mmaps[0].size = region->size;
1285 g_free(info);
1287 trace_vfio_region_setup(vbasedev->name, index, name,
1288 region->flags, region->fd_offset, region->size);
1289 return 0;
1292 static void vfio_subregion_unmap(VFIORegion *region, int index)
1294 trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
1295 region->mmaps[index].offset,
1296 region->mmaps[index].offset +
1297 region->mmaps[index].size - 1);
1298 memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
1299 munmap(region->mmaps[index].mmap, region->mmaps[index].size);
1300 object_unparent(OBJECT(&region->mmaps[index].mem));
1301 region->mmaps[index].mmap = NULL;
1304 int vfio_region_mmap(VFIORegion *region)
1306 int i, prot = 0;
1307 char *name;
1309 if (!region->mem) {
1310 return 0;
1313 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
1314 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
1316 for (i = 0; i < region->nr_mmaps; i++) {
1317 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
1318 MAP_SHARED, region->vbasedev->fd,
1319 region->fd_offset +
1320 region->mmaps[i].offset);
1321 if (region->mmaps[i].mmap == MAP_FAILED) {
1322 int ret = -errno;
1324 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
1325 region->fd_offset +
1326 region->mmaps[i].offset,
1327 region->fd_offset +
1328 region->mmaps[i].offset +
1329 region->mmaps[i].size - 1, ret);
1331 region->mmaps[i].mmap = NULL;
1333 for (i--; i >= 0; i--) {
1334 vfio_subregion_unmap(region, i);
1337 return ret;
1340 name = g_strdup_printf("%s mmaps[%d]",
1341 memory_region_name(region->mem), i);
1342 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
1343 memory_region_owner(region->mem),
1344 name, region->mmaps[i].size,
1345 region->mmaps[i].mmap);
1346 g_free(name);
1347 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
1348 &region->mmaps[i].mem);
1350 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
1351 region->mmaps[i].offset,
1352 region->mmaps[i].offset +
1353 region->mmaps[i].size - 1);
1356 return 0;
1359 void vfio_region_unmap(VFIORegion *region)
1361 int i;
1363 if (!region->mem) {
1364 return;
1367 for (i = 0; i < region->nr_mmaps; i++) {
1368 if (region->mmaps[i].mmap) {
1369 vfio_subregion_unmap(region, i);
1374 void vfio_region_exit(VFIORegion *region)
1376 int i;
1378 if (!region->mem) {
1379 return;
1382 for (i = 0; i < region->nr_mmaps; i++) {
1383 if (region->mmaps[i].mmap) {
1384 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
1388 trace_vfio_region_exit(region->vbasedev->name, region->nr);
1391 void vfio_region_finalize(VFIORegion *region)
1393 int i;
1395 if (!region->mem) {
1396 return;
1399 for (i = 0; i < region->nr_mmaps; i++) {
1400 if (region->mmaps[i].mmap) {
1401 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
1402 object_unparent(OBJECT(&region->mmaps[i].mem));
1406 object_unparent(OBJECT(region->mem));
1408 g_free(region->mem);
1409 g_free(region->mmaps);
1411 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
1413 region->mem = NULL;
1414 region->mmaps = NULL;
1415 region->nr_mmaps = 0;
1416 region->size = 0;
1417 region->flags = 0;
1418 region->nr = 0;
1421 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
1423 int i;
1425 if (!region->mem) {
1426 return;
1429 for (i = 0; i < region->nr_mmaps; i++) {
1430 if (region->mmaps[i].mmap) {
1431 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
1435 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
1436 enabled);
1439 void vfio_reset_handler(void *opaque)
1441 VFIOGroup *group;
1442 VFIODevice *vbasedev;
1444 QLIST_FOREACH(group, &vfio_group_list, next) {
1445 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1446 if (vbasedev->dev->realized) {
1447 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1452 QLIST_FOREACH(group, &vfio_group_list, next) {
1453 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1454 if (vbasedev->dev->realized && vbasedev->needs_reset) {
1455 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1461 static void vfio_kvm_device_add_group(VFIOGroup *group)
1463 #ifdef CONFIG_KVM
1464 struct kvm_device_attr attr = {
1465 .group = KVM_DEV_VFIO_GROUP,
1466 .attr = KVM_DEV_VFIO_GROUP_ADD,
1467 .addr = (uint64_t)(unsigned long)&group->fd,
1470 if (!kvm_enabled()) {
1471 return;
1474 if (vfio_kvm_device_fd < 0) {
1475 struct kvm_create_device cd = {
1476 .type = KVM_DEV_TYPE_VFIO,
1479 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1480 error_report("Failed to create KVM VFIO device: %m");
1481 return;
1484 vfio_kvm_device_fd = cd.fd;
1487 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1488 error_report("Failed to add group %d to KVM VFIO device: %m",
1489 group->groupid);
1491 #endif
1494 static void vfio_kvm_device_del_group(VFIOGroup *group)
1496 #ifdef CONFIG_KVM
1497 struct kvm_device_attr attr = {
1498 .group = KVM_DEV_VFIO_GROUP,
1499 .attr = KVM_DEV_VFIO_GROUP_DEL,
1500 .addr = (uint64_t)(unsigned long)&group->fd,
1503 if (vfio_kvm_device_fd < 0) {
1504 return;
1507 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1508 error_report("Failed to remove group %d from KVM VFIO device: %m",
1509 group->groupid);
1511 #endif
1514 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1516 VFIOAddressSpace *space;
1518 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1519 if (space->as == as) {
1520 return space;
1524 /* No suitable VFIOAddressSpace, create a new one */
1525 space = g_malloc0(sizeof(*space));
1526 space->as = as;
1527 QLIST_INIT(&space->containers);
1529 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1531 return space;
1534 static void vfio_put_address_space(VFIOAddressSpace *space)
1536 if (QLIST_EMPTY(&space->containers)) {
1537 QLIST_REMOVE(space, list);
1538 g_free(space);
1543 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1545 static int vfio_get_iommu_type(VFIOContainer *container,
1546 Error **errp)
1548 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
1549 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
1550 int i;
1552 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
1553 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
1554 return iommu_types[i];
1557 error_setg(errp, "No available IOMMU models");
1558 return -EINVAL;
1561 static int vfio_init_container(VFIOContainer *container, int group_fd,
1562 Error **errp)
1564 int iommu_type, ret;
1566 iommu_type = vfio_get_iommu_type(container, errp);
1567 if (iommu_type < 0) {
1568 return iommu_type;
1571 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
1572 if (ret) {
1573 error_setg_errno(errp, errno, "Failed to set group container");
1574 return -errno;
1577 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
1578 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1580 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1581 * v2, the running platform may not support v2 and there is no
1582 * way to guess it until an IOMMU group gets added to the container.
1583 * So in case it fails with v2, try v1 as a fallback.
1585 iommu_type = VFIO_SPAPR_TCE_IOMMU;
1586 continue;
1588 error_setg_errno(errp, errno, "Failed to set iommu for container");
1589 return -errno;
1592 container->iommu_type = iommu_type;
1593 return 0;
1596 static int vfio_get_iommu_info(VFIOContainer *container,
1597 struct vfio_iommu_type1_info **info)
1600 size_t argsz = sizeof(struct vfio_iommu_type1_info);
1602 *info = g_new0(struct vfio_iommu_type1_info, 1);
1603 again:
1604 (*info)->argsz = argsz;
1606 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
1607 g_free(*info);
1608 *info = NULL;
1609 return -errno;
1612 if (((*info)->argsz > argsz)) {
1613 argsz = (*info)->argsz;
1614 *info = g_realloc(*info, argsz);
1615 goto again;
1618 return 0;
1621 static struct vfio_info_cap_header *
1622 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
1624 struct vfio_info_cap_header *hdr;
1625 void *ptr = info;
1627 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
1628 return NULL;
1631 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
1632 if (hdr->id == id) {
1633 return hdr;
1637 return NULL;
1640 static void vfio_get_iommu_info_migration(VFIOContainer *container,
1641 struct vfio_iommu_type1_info *info)
1643 struct vfio_info_cap_header *hdr;
1644 struct vfio_iommu_type1_info_cap_migration *cap_mig;
1646 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
1647 if (!hdr) {
1648 return;
1651 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
1652 header);
1655 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
1656 * TARGET_PAGE_SIZE to mark those dirty.
1658 if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) {
1659 container->dirty_pages_supported = true;
1660 container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
1661 container->dirty_pgsizes = cap_mig->pgsize_bitmap;
1665 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1666 Error **errp)
1668 VFIOContainer *container;
1669 int ret, fd;
1670 VFIOAddressSpace *space;
1672 space = vfio_get_address_space(as);
1675 * VFIO is currently incompatible with discarding of RAM insofar as the
1676 * madvise to purge (zap) the page from QEMU's address space does not
1677 * interact with the memory API and therefore leaves stale virtual to
1678 * physical mappings in the IOMMU if the page was previously pinned. We
1679 * therefore set discarding broken for each group added to a container,
1680 * whether the container is used individually or shared. This provides
1681 * us with options to allow devices within a group to opt-in and allow
1682 * discarding, so long as it is done consistently for a group (for instance
1683 * if the device is an mdev device where it is known that the host vendor
1684 * driver will never pin pages outside of the working set of the guest
1685 * driver, which would thus not be discarding candidates).
1687 * The first opportunity to induce pinning occurs here where we attempt to
1688 * attach the group to existing containers within the AddressSpace. If any
1689 * pages are already zapped from the virtual address space, such as from
1690 * previous discards, new pinning will cause valid mappings to be
1691 * re-established. Likewise, when the overall MemoryListener for a new
1692 * container is registered, a replay of mappings within the AddressSpace
1693 * will occur, re-establishing any previously zapped pages as well.
1695 * Especially virtio-balloon is currently only prevented from discarding
1696 * new memory, it will not yet set ram_block_discard_set_required() and
1697 * therefore, neither stops us here or deals with the sudden memory
1698 * consumption of inflated memory.
1700 ret = ram_block_discard_disable(true);
1701 if (ret) {
1702 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
1703 return ret;
1706 QLIST_FOREACH(container, &space->containers, next) {
1707 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1708 group->container = container;
1709 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1710 vfio_kvm_device_add_group(group);
1711 return 0;
1715 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
1716 if (fd < 0) {
1717 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
1718 ret = -errno;
1719 goto put_space_exit;
1722 ret = ioctl(fd, VFIO_GET_API_VERSION);
1723 if (ret != VFIO_API_VERSION) {
1724 error_setg(errp, "supported vfio version: %d, "
1725 "reported version: %d", VFIO_API_VERSION, ret);
1726 ret = -EINVAL;
1727 goto close_fd_exit;
1730 container = g_malloc0(sizeof(*container));
1731 container->space = space;
1732 container->fd = fd;
1733 container->error = NULL;
1734 container->dirty_pages_supported = false;
1735 QLIST_INIT(&container->giommu_list);
1736 QLIST_INIT(&container->hostwin_list);
1738 ret = vfio_init_container(container, group->fd, errp);
1739 if (ret) {
1740 goto free_container_exit;
1743 switch (container->iommu_type) {
1744 case VFIO_TYPE1v2_IOMMU:
1745 case VFIO_TYPE1_IOMMU:
1747 struct vfio_iommu_type1_info *info;
1750 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1751 * IOVA whatsoever. That's not actually true, but the current
1752 * kernel interface doesn't tell us what it can map, and the
1753 * existing Type1 IOMMUs generally support any IOVA we're
1754 * going to actually try in practice.
1756 ret = vfio_get_iommu_info(container, &info);
1758 if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
1759 /* Assume 4k IOVA page size */
1760 info->iova_pgsizes = 4096;
1762 vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
1763 container->pgsizes = info->iova_pgsizes;
1765 if (!ret) {
1766 vfio_get_iommu_info_migration(container, info);
1768 g_free(info);
1769 break;
1771 case VFIO_SPAPR_TCE_v2_IOMMU:
1772 case VFIO_SPAPR_TCE_IOMMU:
1774 struct vfio_iommu_spapr_tce_info info;
1775 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
1778 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1779 * when container fd is closed so we do not call it explicitly
1780 * in this file.
1782 if (!v2) {
1783 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1784 if (ret) {
1785 error_setg_errno(errp, errno, "failed to enable container");
1786 ret = -errno;
1787 goto free_container_exit;
1789 } else {
1790 container->prereg_listener = vfio_prereg_listener;
1792 memory_listener_register(&container->prereg_listener,
1793 &address_space_memory);
1794 if (container->error) {
1795 memory_listener_unregister(&container->prereg_listener);
1796 ret = -1;
1797 error_propagate_prepend(errp, container->error,
1798 "RAM memory listener initialization failed: ");
1799 goto free_container_exit;
1803 info.argsz = sizeof(info);
1804 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1805 if (ret) {
1806 error_setg_errno(errp, errno,
1807 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1808 ret = -errno;
1809 if (v2) {
1810 memory_listener_unregister(&container->prereg_listener);
1812 goto free_container_exit;
1815 if (v2) {
1816 container->pgsizes = info.ddw.pgsizes;
1818 * There is a default window in just created container.
1819 * To make region_add/del simpler, we better remove this
1820 * window now and let those iommu_listener callbacks
1821 * create/remove them when needed.
1823 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1824 if (ret) {
1825 error_setg_errno(errp, -ret,
1826 "failed to remove existing window");
1827 goto free_container_exit;
1829 } else {
1830 /* The default table uses 4K pages */
1831 container->pgsizes = 0x1000;
1832 vfio_host_win_add(container, info.dma32_window_start,
1833 info.dma32_window_start +
1834 info.dma32_window_size - 1,
1835 0x1000);
1840 vfio_kvm_device_add_group(group);
1842 QLIST_INIT(&container->group_list);
1843 QLIST_INSERT_HEAD(&space->containers, container, next);
1845 group->container = container;
1846 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1848 container->listener = vfio_memory_listener;
1850 memory_listener_register(&container->listener, container->space->as);
1852 if (container->error) {
1853 ret = -1;
1854 error_propagate_prepend(errp, container->error,
1855 "memory listener initialization failed: ");
1856 goto listener_release_exit;
1859 container->initialized = true;
1861 return 0;
1862 listener_release_exit:
1863 QLIST_REMOVE(group, container_next);
1864 QLIST_REMOVE(container, next);
1865 vfio_kvm_device_del_group(group);
1866 vfio_listener_release(container);
1868 free_container_exit:
1869 g_free(container);
1871 close_fd_exit:
1872 close(fd);
1874 put_space_exit:
1875 ram_block_discard_disable(false);
1876 vfio_put_address_space(space);
1878 return ret;
1881 static void vfio_disconnect_container(VFIOGroup *group)
1883 VFIOContainer *container = group->container;
1885 QLIST_REMOVE(group, container_next);
1886 group->container = NULL;
1889 * Explicitly release the listener first before unset container,
1890 * since unset may destroy the backend container if it's the last
1891 * group.
1893 if (QLIST_EMPTY(&container->group_list)) {
1894 vfio_listener_release(container);
1897 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1898 error_report("vfio: error disconnecting group %d from container",
1899 group->groupid);
1902 if (QLIST_EMPTY(&container->group_list)) {
1903 VFIOAddressSpace *space = container->space;
1904 VFIOGuestIOMMU *giommu, *tmp;
1906 QLIST_REMOVE(container, next);
1908 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1909 memory_region_unregister_iommu_notifier(
1910 MEMORY_REGION(giommu->iommu), &giommu->n);
1911 QLIST_REMOVE(giommu, giommu_next);
1912 g_free(giommu);
1915 trace_vfio_disconnect_container(container->fd);
1916 close(container->fd);
1917 g_free(container);
1919 vfio_put_address_space(space);
1923 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1925 VFIOGroup *group;
1926 char path[32];
1927 struct vfio_group_status status = { .argsz = sizeof(status) };
1929 QLIST_FOREACH(group, &vfio_group_list, next) {
1930 if (group->groupid == groupid) {
1931 /* Found it. Now is it already in the right context? */
1932 if (group->container->space->as == as) {
1933 return group;
1934 } else {
1935 error_setg(errp, "group %d used in multiple address spaces",
1936 group->groupid);
1937 return NULL;
1942 group = g_malloc0(sizeof(*group));
1944 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1945 group->fd = qemu_open_old(path, O_RDWR);
1946 if (group->fd < 0) {
1947 error_setg_errno(errp, errno, "failed to open %s", path);
1948 goto free_group_exit;
1951 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1952 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1953 goto close_fd_exit;
1956 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1957 error_setg(errp, "group %d is not viable", groupid);
1958 error_append_hint(errp,
1959 "Please ensure all devices within the iommu_group "
1960 "are bound to their vfio bus driver.\n");
1961 goto close_fd_exit;
1964 group->groupid = groupid;
1965 QLIST_INIT(&group->device_list);
1967 if (vfio_connect_container(group, as, errp)) {
1968 error_prepend(errp, "failed to setup container for group %d: ",
1969 groupid);
1970 goto close_fd_exit;
1973 if (QLIST_EMPTY(&vfio_group_list)) {
1974 qemu_register_reset(vfio_reset_handler, NULL);
1977 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1979 return group;
1981 close_fd_exit:
1982 close(group->fd);
1984 free_group_exit:
1985 g_free(group);
1987 return NULL;
1990 void vfio_put_group(VFIOGroup *group)
1992 if (!group || !QLIST_EMPTY(&group->device_list)) {
1993 return;
1996 if (!group->ram_block_discard_allowed) {
1997 ram_block_discard_disable(false);
1999 vfio_kvm_device_del_group(group);
2000 vfio_disconnect_container(group);
2001 QLIST_REMOVE(group, next);
2002 trace_vfio_put_group(group->fd);
2003 close(group->fd);
2004 g_free(group);
2006 if (QLIST_EMPTY(&vfio_group_list)) {
2007 qemu_unregister_reset(vfio_reset_handler, NULL);
2011 int vfio_get_device(VFIOGroup *group, const char *name,
2012 VFIODevice *vbasedev, Error **errp)
2014 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
2015 int ret, fd;
2017 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
2018 if (fd < 0) {
2019 error_setg_errno(errp, errno, "error getting device from group %d",
2020 group->groupid);
2021 error_append_hint(errp,
2022 "Verify all devices in group %d are bound to vfio-<bus> "
2023 "or pci-stub and not already in use\n", group->groupid);
2024 return fd;
2027 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
2028 if (ret) {
2029 error_setg_errno(errp, errno, "error getting device info");
2030 close(fd);
2031 return ret;
2035 * Set discarding of RAM as not broken for this group if the driver knows
2036 * the device operates compatibly with discarding. Setting must be
2037 * consistent per group, but since compatibility is really only possible
2038 * with mdev currently, we expect singleton groups.
2040 if (vbasedev->ram_block_discard_allowed !=
2041 group->ram_block_discard_allowed) {
2042 if (!QLIST_EMPTY(&group->device_list)) {
2043 error_setg(errp, "Inconsistent setting of support for discarding "
2044 "RAM (e.g., balloon) within group");
2045 close(fd);
2046 return -1;
2049 if (!group->ram_block_discard_allowed) {
2050 group->ram_block_discard_allowed = true;
2051 ram_block_discard_disable(false);
2055 vbasedev->fd = fd;
2056 vbasedev->group = group;
2057 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
2059 vbasedev->num_irqs = dev_info.num_irqs;
2060 vbasedev->num_regions = dev_info.num_regions;
2061 vbasedev->flags = dev_info.flags;
2063 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
2064 dev_info.num_irqs);
2066 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
2067 return 0;
2070 void vfio_put_base_device(VFIODevice *vbasedev)
2072 if (!vbasedev->group) {
2073 return;
2075 QLIST_REMOVE(vbasedev, next);
2076 vbasedev->group = NULL;
2077 trace_vfio_put_base_device(vbasedev->fd);
2078 close(vbasedev->fd);
2081 int vfio_get_region_info(VFIODevice *vbasedev, int index,
2082 struct vfio_region_info **info)
2084 size_t argsz = sizeof(struct vfio_region_info);
2086 *info = g_malloc0(argsz);
2088 (*info)->index = index;
2089 retry:
2090 (*info)->argsz = argsz;
2092 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
2093 g_free(*info);
2094 *info = NULL;
2095 return -errno;
2098 if ((*info)->argsz > argsz) {
2099 argsz = (*info)->argsz;
2100 *info = g_realloc(*info, argsz);
2102 goto retry;
2105 return 0;
2108 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
2109 uint32_t subtype, struct vfio_region_info **info)
2111 int i;
2113 for (i = 0; i < vbasedev->num_regions; i++) {
2114 struct vfio_info_cap_header *hdr;
2115 struct vfio_region_info_cap_type *cap_type;
2117 if (vfio_get_region_info(vbasedev, i, info)) {
2118 continue;
2121 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
2122 if (!hdr) {
2123 g_free(*info);
2124 continue;
2127 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
2129 trace_vfio_get_dev_region(vbasedev->name, i,
2130 cap_type->type, cap_type->subtype);
2132 if (cap_type->type == type && cap_type->subtype == subtype) {
2133 return 0;
2136 g_free(*info);
2139 *info = NULL;
2140 return -ENODEV;
2143 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
2145 struct vfio_region_info *info = NULL;
2146 bool ret = false;
2148 if (!vfio_get_region_info(vbasedev, region, &info)) {
2149 if (vfio_get_region_info_cap(info, cap_type)) {
2150 ret = true;
2152 g_free(info);
2155 return ret;
2159 * Interfaces for IBM EEH (Enhanced Error Handling)
2161 static bool vfio_eeh_container_ok(VFIOContainer *container)
2164 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
2165 * implementation is broken if there are multiple groups in a
2166 * container. The hardware works in units of Partitionable
2167 * Endpoints (== IOMMU groups) and the EEH operations naively
2168 * iterate across all groups in the container, without any logic
2169 * to make sure the groups have their state synchronized. For
2170 * certain operations (ENABLE) that might be ok, until an error
2171 * occurs, but for others (GET_STATE) it's clearly broken.
2175 * XXX Once fixed kernels exist, test for them here
2178 if (QLIST_EMPTY(&container->group_list)) {
2179 return false;
2182 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
2183 return false;
2186 return true;
2189 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
2191 struct vfio_eeh_pe_op pe_op = {
2192 .argsz = sizeof(pe_op),
2193 .op = op,
2195 int ret;
2197 if (!vfio_eeh_container_ok(container)) {
2198 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
2199 "kernel requires a container with exactly one group", op);
2200 return -EPERM;
2203 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
2204 if (ret < 0) {
2205 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
2206 return -errno;
2209 return ret;
2212 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
2214 VFIOAddressSpace *space = vfio_get_address_space(as);
2215 VFIOContainer *container = NULL;
2217 if (QLIST_EMPTY(&space->containers)) {
2218 /* No containers to act on */
2219 goto out;
2222 container = QLIST_FIRST(&space->containers);
2224 if (QLIST_NEXT(container, next)) {
2225 /* We don't yet have logic to synchronize EEH state across
2226 * multiple containers */
2227 container = NULL;
2228 goto out;
2231 out:
2232 vfio_put_address_space(space);
2233 return container;
2236 bool vfio_eeh_as_ok(AddressSpace *as)
2238 VFIOContainer *container = vfio_eeh_as_container(as);
2240 return (container != NULL) && vfio_eeh_container_ok(container);
2243 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
2245 VFIOContainer *container = vfio_eeh_as_container(as);
2247 if (!container) {
2248 return -ENODEV;
2250 return vfio_eeh_container_op(container, op);