hw/ppc: Drop useless CONFIG_KVM ifdefery
[qemu/ar7.git] / hw / vfio / common.c
bloba859298fdad95a88dbf7dcfd699704232d39223b
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/balloon.h"
36 #include "sysemu/kvm.h"
37 #include "trace.h"
38 #include "qapi/error.h"
40 VFIOGroupList vfio_group_list =
41 QLIST_HEAD_INITIALIZER(vfio_group_list);
42 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
43 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
45 #ifdef CONFIG_KVM
47 * We have a single VFIO pseudo device per KVM VM. Once created it lives
48 * for the life of the VM. Closing the file descriptor only drops our
49 * reference to it and the device's reference to kvm. Therefore once
50 * initialized, this file descriptor is only released on QEMU exit and
51 * we'll re-use it should another vfio device be attached before then.
53 static int vfio_kvm_device_fd = -1;
54 #endif
57 * Common VFIO interrupt disable
59 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
61 struct vfio_irq_set irq_set = {
62 .argsz = sizeof(irq_set),
63 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
64 .index = index,
65 .start = 0,
66 .count = 0,
69 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
72 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
74 struct vfio_irq_set irq_set = {
75 .argsz = sizeof(irq_set),
76 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
77 .index = index,
78 .start = 0,
79 .count = 1,
82 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
85 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
87 struct vfio_irq_set irq_set = {
88 .argsz = sizeof(irq_set),
89 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
90 .index = index,
91 .start = 0,
92 .count = 1,
95 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
98 static inline const char *action_to_str(int action)
100 switch (action) {
101 case VFIO_IRQ_SET_ACTION_MASK:
102 return "MASK";
103 case VFIO_IRQ_SET_ACTION_UNMASK:
104 return "UNMASK";
105 case VFIO_IRQ_SET_ACTION_TRIGGER:
106 return "TRIGGER";
107 default:
108 return "UNKNOWN ACTION";
112 static const char *index_to_str(VFIODevice *vbasedev, int index)
114 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
115 return NULL;
118 switch (index) {
119 case VFIO_PCI_INTX_IRQ_INDEX:
120 return "INTX";
121 case VFIO_PCI_MSI_IRQ_INDEX:
122 return "MSI";
123 case VFIO_PCI_MSIX_IRQ_INDEX:
124 return "MSIX";
125 case VFIO_PCI_ERR_IRQ_INDEX:
126 return "ERR";
127 case VFIO_PCI_REQ_IRQ_INDEX:
128 return "REQ";
129 default:
130 return NULL;
134 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
135 int action, int fd, Error **errp)
137 struct vfio_irq_set *irq_set;
138 int argsz, ret = 0;
139 const char *name;
140 int32_t *pfd;
142 argsz = sizeof(*irq_set) + sizeof(*pfd);
144 irq_set = g_malloc0(argsz);
145 irq_set->argsz = argsz;
146 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
147 irq_set->index = index;
148 irq_set->start = subindex;
149 irq_set->count = 1;
150 pfd = (int32_t *)&irq_set->data;
151 *pfd = fd;
153 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
154 ret = -errno;
156 g_free(irq_set);
158 if (!ret) {
159 return 0;
162 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
164 name = index_to_str(vbasedev, index);
165 if (name) {
166 error_prepend(errp, "%s-%d: ", name, subindex);
167 } else {
168 error_prepend(errp, "index %d-%d: ", index, subindex);
170 error_prepend(errp,
171 "Failed to %s %s eventfd signaling for interrupt ",
172 fd < 0 ? "tear down" : "set up", action_to_str(action));
173 return ret;
177 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
179 void vfio_region_write(void *opaque, hwaddr addr,
180 uint64_t data, unsigned size)
182 VFIORegion *region = opaque;
183 VFIODevice *vbasedev = region->vbasedev;
184 union {
185 uint8_t byte;
186 uint16_t word;
187 uint32_t dword;
188 uint64_t qword;
189 } buf;
191 switch (size) {
192 case 1:
193 buf.byte = data;
194 break;
195 case 2:
196 buf.word = cpu_to_le16(data);
197 break;
198 case 4:
199 buf.dword = cpu_to_le32(data);
200 break;
201 case 8:
202 buf.qword = cpu_to_le64(data);
203 break;
204 default:
205 hw_error("vfio: unsupported write size, %d bytes", size);
206 break;
209 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
210 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
211 ",%d) failed: %m",
212 __func__, vbasedev->name, region->nr,
213 addr, data, size);
216 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
219 * A read or write to a BAR always signals an INTx EOI. This will
220 * do nothing if not pending (including not in INTx mode). We assume
221 * that a BAR access is in response to an interrupt and that BAR
222 * accesses will service the interrupt. Unfortunately, we don't know
223 * which access will service the interrupt, so we're potentially
224 * getting quite a few host interrupts per guest interrupt.
226 vbasedev->ops->vfio_eoi(vbasedev);
229 uint64_t vfio_region_read(void *opaque,
230 hwaddr addr, unsigned size)
232 VFIORegion *region = opaque;
233 VFIODevice *vbasedev = region->vbasedev;
234 union {
235 uint8_t byte;
236 uint16_t word;
237 uint32_t dword;
238 uint64_t qword;
239 } buf;
240 uint64_t data = 0;
242 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
243 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
244 __func__, vbasedev->name, region->nr,
245 addr, size);
246 return (uint64_t)-1;
248 switch (size) {
249 case 1:
250 data = buf.byte;
251 break;
252 case 2:
253 data = le16_to_cpu(buf.word);
254 break;
255 case 4:
256 data = le32_to_cpu(buf.dword);
257 break;
258 case 8:
259 data = le64_to_cpu(buf.qword);
260 break;
261 default:
262 hw_error("vfio: unsupported read size, %d bytes", size);
263 break;
266 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
268 /* Same as write above */
269 vbasedev->ops->vfio_eoi(vbasedev);
271 return data;
274 const MemoryRegionOps vfio_region_ops = {
275 .read = vfio_region_read,
276 .write = vfio_region_write,
277 .endianness = DEVICE_LITTLE_ENDIAN,
278 .valid = {
279 .min_access_size = 1,
280 .max_access_size = 8,
282 .impl = {
283 .min_access_size = 1,
284 .max_access_size = 8,
289 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
291 static int vfio_dma_unmap(VFIOContainer *container,
292 hwaddr iova, ram_addr_t size)
294 struct vfio_iommu_type1_dma_unmap unmap = {
295 .argsz = sizeof(unmap),
296 .flags = 0,
297 .iova = iova,
298 .size = size,
301 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
303 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
304 * v4.15) where an overflow in its wrap-around check prevents us from
305 * unmapping the last page of the address space. Test for the error
306 * condition and re-try the unmap excluding the last page. The
307 * expectation is that we've never mapped the last page anyway and this
308 * unmap request comes via vIOMMU support which also makes it unlikely
309 * that this page is used. This bug was introduced well after type1 v2
310 * support was introduced, so we shouldn't need to test for v1. A fix
311 * is queued for kernel v5.0 so this workaround can be removed once
312 * affected kernels are sufficiently deprecated.
314 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
315 container->iommu_type == VFIO_TYPE1v2_IOMMU) {
316 trace_vfio_dma_unmap_overflow_workaround();
317 unmap.size -= 1ULL << ctz64(container->pgsizes);
318 continue;
320 error_report("VFIO_UNMAP_DMA: %d", -errno);
321 return -errno;
324 return 0;
327 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
328 ram_addr_t size, void *vaddr, bool readonly)
330 struct vfio_iommu_type1_dma_map map = {
331 .argsz = sizeof(map),
332 .flags = VFIO_DMA_MAP_FLAG_READ,
333 .vaddr = (__u64)(uintptr_t)vaddr,
334 .iova = iova,
335 .size = size,
338 if (!readonly) {
339 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
343 * Try the mapping, if it fails with EBUSY, unmap the region and try
344 * again. This shouldn't be necessary, but we sometimes see it in
345 * the VGA ROM space.
347 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
348 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
349 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
350 return 0;
353 error_report("VFIO_MAP_DMA: %d", -errno);
354 return -errno;
357 static void vfio_host_win_add(VFIOContainer *container,
358 hwaddr min_iova, hwaddr max_iova,
359 uint64_t iova_pgsizes)
361 VFIOHostDMAWindow *hostwin;
363 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
364 if (ranges_overlap(hostwin->min_iova,
365 hostwin->max_iova - hostwin->min_iova + 1,
366 min_iova,
367 max_iova - min_iova + 1)) {
368 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
372 hostwin = g_malloc0(sizeof(*hostwin));
374 hostwin->min_iova = min_iova;
375 hostwin->max_iova = max_iova;
376 hostwin->iova_pgsizes = iova_pgsizes;
377 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
380 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
381 hwaddr max_iova)
383 VFIOHostDMAWindow *hostwin;
385 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
386 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
387 QLIST_REMOVE(hostwin, hostwin_next);
388 return 0;
392 return -1;
395 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
397 return (!memory_region_is_ram(section->mr) &&
398 !memory_region_is_iommu(section->mr)) ||
400 * Sizing an enabled 64-bit BAR can cause spurious mappings to
401 * addresses in the upper part of the 64-bit address space. These
402 * are never accessed by the CPU and beyond the address width of
403 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
405 section->offset_within_address_space & (1ULL << 63);
408 /* Called with rcu_read_lock held. */
409 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
410 bool *read_only)
412 MemoryRegion *mr;
413 hwaddr xlat;
414 hwaddr len = iotlb->addr_mask + 1;
415 bool writable = iotlb->perm & IOMMU_WO;
418 * The IOMMU TLB entry we have just covers translation through
419 * this IOMMU to its immediate target. We need to translate
420 * it the rest of the way through to memory.
422 mr = address_space_translate(&address_space_memory,
423 iotlb->translated_addr,
424 &xlat, &len, writable,
425 MEMTXATTRS_UNSPECIFIED);
426 if (!memory_region_is_ram(mr)) {
427 error_report("iommu map to non memory area %"HWADDR_PRIx"",
428 xlat);
429 return false;
433 * Translation truncates length to the IOMMU page size,
434 * check that it did not truncate too much.
436 if (len & iotlb->addr_mask) {
437 error_report("iommu has granularity incompatible with target AS");
438 return false;
441 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
442 *read_only = !writable || mr->readonly;
444 return true;
447 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
449 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
450 VFIOContainer *container = giommu->container;
451 hwaddr iova = iotlb->iova + giommu->iommu_offset;
452 bool read_only;
453 void *vaddr;
454 int ret;
456 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
457 iova, iova + iotlb->addr_mask);
459 if (iotlb->target_as != &address_space_memory) {
460 error_report("Wrong target AS \"%s\", only system memory is allowed",
461 iotlb->target_as->name ? iotlb->target_as->name : "none");
462 return;
465 rcu_read_lock();
467 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
468 if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
469 goto out;
472 * vaddr is only valid until rcu_read_unlock(). But after
473 * vfio_dma_map has set up the mapping the pages will be
474 * pinned by the kernel. This makes sure that the RAM backend
475 * of vaddr will always be there, even if the memory object is
476 * destroyed and its backing memory munmap-ed.
478 ret = vfio_dma_map(container, iova,
479 iotlb->addr_mask + 1, vaddr,
480 read_only);
481 if (ret) {
482 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
483 "0x%"HWADDR_PRIx", %p) = %d (%m)",
484 container, iova,
485 iotlb->addr_mask + 1, vaddr, ret);
487 } else {
488 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
489 if (ret) {
490 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
491 "0x%"HWADDR_PRIx") = %d (%m)",
492 container, iova,
493 iotlb->addr_mask + 1, ret);
496 out:
497 rcu_read_unlock();
500 static void vfio_listener_region_add(MemoryListener *listener,
501 MemoryRegionSection *section)
503 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
504 hwaddr iova, end;
505 Int128 llend, llsize;
506 void *vaddr;
507 int ret;
508 VFIOHostDMAWindow *hostwin;
509 bool hostwin_found;
511 if (vfio_listener_skipped_section(section)) {
512 trace_vfio_listener_region_add_skip(
513 section->offset_within_address_space,
514 section->offset_within_address_space +
515 int128_get64(int128_sub(section->size, int128_one())));
516 return;
519 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
520 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
521 error_report("%s received unaligned region", __func__);
522 return;
525 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
526 llend = int128_make64(section->offset_within_address_space);
527 llend = int128_add(llend, section->size);
528 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
530 if (int128_ge(int128_make64(iova), llend)) {
531 return;
533 end = int128_get64(int128_sub(llend, int128_one()));
535 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
536 hwaddr pgsize = 0;
538 /* For now intersections are not allowed, we may relax this later */
539 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
540 if (ranges_overlap(hostwin->min_iova,
541 hostwin->max_iova - hostwin->min_iova + 1,
542 section->offset_within_address_space,
543 int128_get64(section->size))) {
544 ret = -1;
545 goto fail;
549 ret = vfio_spapr_create_window(container, section, &pgsize);
550 if (ret) {
551 goto fail;
554 vfio_host_win_add(container, section->offset_within_address_space,
555 section->offset_within_address_space +
556 int128_get64(section->size) - 1, pgsize);
557 #ifdef CONFIG_KVM
558 if (kvm_enabled()) {
559 VFIOGroup *group;
560 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
561 struct kvm_vfio_spapr_tce param;
562 struct kvm_device_attr attr = {
563 .group = KVM_DEV_VFIO_GROUP,
564 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
565 .addr = (uint64_t)(unsigned long)&param,
568 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
569 &param.tablefd)) {
570 QLIST_FOREACH(group, &container->group_list, container_next) {
571 param.groupfd = group->fd;
572 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
573 error_report("vfio: failed to setup fd %d "
574 "for a group with fd %d: %s",
575 param.tablefd, param.groupfd,
576 strerror(errno));
577 return;
579 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
583 #endif
586 hostwin_found = false;
587 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
588 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
589 hostwin_found = true;
590 break;
594 if (!hostwin_found) {
595 error_report("vfio: IOMMU container %p can't map guest IOVA region"
596 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
597 container, iova, end);
598 ret = -EFAULT;
599 goto fail;
602 memory_region_ref(section->mr);
604 if (memory_region_is_iommu(section->mr)) {
605 VFIOGuestIOMMU *giommu;
606 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
607 int iommu_idx;
609 trace_vfio_listener_region_add_iommu(iova, end);
611 * FIXME: For VFIO iommu types which have KVM acceleration to
612 * avoid bouncing all map/unmaps through qemu this way, this
613 * would be the right place to wire that up (tell the KVM
614 * device emulation the VFIO iommu handles to use).
616 giommu = g_malloc0(sizeof(*giommu));
617 giommu->iommu = iommu_mr;
618 giommu->iommu_offset = section->offset_within_address_space -
619 section->offset_within_region;
620 giommu->container = container;
621 llend = int128_add(int128_make64(section->offset_within_region),
622 section->size);
623 llend = int128_sub(llend, int128_one());
624 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
625 MEMTXATTRS_UNSPECIFIED);
626 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
627 IOMMU_NOTIFIER_ALL,
628 section->offset_within_region,
629 int128_get64(llend),
630 iommu_idx);
631 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
633 memory_region_register_iommu_notifier(section->mr, &giommu->n);
634 memory_region_iommu_replay(giommu->iommu, &giommu->n);
636 return;
639 /* Here we assume that memory_region_is_ram(section->mr)==true */
641 vaddr = memory_region_get_ram_ptr(section->mr) +
642 section->offset_within_region +
643 (iova - section->offset_within_address_space);
645 trace_vfio_listener_region_add_ram(iova, end, vaddr);
647 llsize = int128_sub(llend, int128_make64(iova));
649 if (memory_region_is_ram_device(section->mr)) {
650 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
652 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
653 trace_vfio_listener_region_add_no_dma_map(
654 memory_region_name(section->mr),
655 section->offset_within_address_space,
656 int128_getlo(section->size),
657 pgmask + 1);
658 return;
662 ret = vfio_dma_map(container, iova, int128_get64(llsize),
663 vaddr, section->readonly);
664 if (ret) {
665 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
666 "0x%"HWADDR_PRIx", %p) = %d (%m)",
667 container, iova, int128_get64(llsize), vaddr, ret);
668 if (memory_region_is_ram_device(section->mr)) {
669 /* Allow unexpected mappings not to be fatal for RAM devices */
670 return;
672 goto fail;
675 return;
677 fail:
678 if (memory_region_is_ram_device(section->mr)) {
679 error_report("failed to vfio_dma_map. pci p2p may not work");
680 return;
683 * On the initfn path, store the first error in the container so we
684 * can gracefully fail. Runtime, there's not much we can do other
685 * than throw a hardware error.
687 if (!container->initialized) {
688 if (!container->error) {
689 container->error = ret;
691 } else {
692 hw_error("vfio: DMA mapping failed, unable to continue");
696 static void vfio_listener_region_del(MemoryListener *listener,
697 MemoryRegionSection *section)
699 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
700 hwaddr iova, end;
701 Int128 llend, llsize;
702 int ret;
703 bool try_unmap = true;
705 if (vfio_listener_skipped_section(section)) {
706 trace_vfio_listener_region_del_skip(
707 section->offset_within_address_space,
708 section->offset_within_address_space +
709 int128_get64(int128_sub(section->size, int128_one())));
710 return;
713 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
714 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
715 error_report("%s received unaligned region", __func__);
716 return;
719 if (memory_region_is_iommu(section->mr)) {
720 VFIOGuestIOMMU *giommu;
722 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
723 if (MEMORY_REGION(giommu->iommu) == section->mr &&
724 giommu->n.start == section->offset_within_region) {
725 memory_region_unregister_iommu_notifier(section->mr,
726 &giommu->n);
727 QLIST_REMOVE(giommu, giommu_next);
728 g_free(giommu);
729 break;
734 * FIXME: We assume the one big unmap below is adequate to
735 * remove any individual page mappings in the IOMMU which
736 * might have been copied into VFIO. This works for a page table
737 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
738 * That may not be true for all IOMMU types.
742 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
743 llend = int128_make64(section->offset_within_address_space);
744 llend = int128_add(llend, section->size);
745 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
747 if (int128_ge(int128_make64(iova), llend)) {
748 return;
750 end = int128_get64(int128_sub(llend, int128_one()));
752 llsize = int128_sub(llend, int128_make64(iova));
754 trace_vfio_listener_region_del(iova, end);
756 if (memory_region_is_ram_device(section->mr)) {
757 hwaddr pgmask;
758 VFIOHostDMAWindow *hostwin;
759 bool hostwin_found = false;
761 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
762 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
763 hostwin_found = true;
764 break;
767 assert(hostwin_found); /* or region_add() would have failed */
769 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
770 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
773 if (try_unmap) {
774 ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
775 if (ret) {
776 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
777 "0x%"HWADDR_PRIx") = %d (%m)",
778 container, iova, int128_get64(llsize), ret);
782 memory_region_unref(section->mr);
784 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
785 vfio_spapr_remove_window(container,
786 section->offset_within_address_space);
787 if (vfio_host_win_del(container,
788 section->offset_within_address_space,
789 section->offset_within_address_space +
790 int128_get64(section->size) - 1) < 0) {
791 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
792 __func__, section->offset_within_address_space);
797 static const MemoryListener vfio_memory_listener = {
798 .region_add = vfio_listener_region_add,
799 .region_del = vfio_listener_region_del,
802 static void vfio_listener_release(VFIOContainer *container)
804 memory_listener_unregister(&container->listener);
805 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
806 memory_listener_unregister(&container->prereg_listener);
810 struct vfio_info_cap_header *
811 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
813 struct vfio_info_cap_header *hdr;
814 void *ptr = info;
816 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
817 return NULL;
820 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
821 if (hdr->id == id) {
822 return hdr;
826 return NULL;
829 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
830 struct vfio_region_info *info)
832 struct vfio_info_cap_header *hdr;
833 struct vfio_region_info_cap_sparse_mmap *sparse;
834 int i, j;
836 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
837 if (!hdr) {
838 return -ENODEV;
841 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
843 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
844 region->nr, sparse->nr_areas);
846 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
848 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
849 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
850 sparse->areas[i].offset +
851 sparse->areas[i].size);
853 if (sparse->areas[i].size) {
854 region->mmaps[j].offset = sparse->areas[i].offset;
855 region->mmaps[j].size = sparse->areas[i].size;
856 j++;
860 region->nr_mmaps = j;
861 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
863 return 0;
866 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
867 int index, const char *name)
869 struct vfio_region_info *info;
870 int ret;
872 ret = vfio_get_region_info(vbasedev, index, &info);
873 if (ret) {
874 return ret;
877 region->vbasedev = vbasedev;
878 region->flags = info->flags;
879 region->size = info->size;
880 region->fd_offset = info->offset;
881 region->nr = index;
883 if (region->size) {
884 region->mem = g_new0(MemoryRegion, 1);
885 memory_region_init_io(region->mem, obj, &vfio_region_ops,
886 region, name, region->size);
888 if (!vbasedev->no_mmap &&
889 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
891 ret = vfio_setup_region_sparse_mmaps(region, info);
893 if (ret) {
894 region->nr_mmaps = 1;
895 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
896 region->mmaps[0].offset = 0;
897 region->mmaps[0].size = region->size;
902 g_free(info);
904 trace_vfio_region_setup(vbasedev->name, index, name,
905 region->flags, region->fd_offset, region->size);
906 return 0;
909 int vfio_region_mmap(VFIORegion *region)
911 int i, prot = 0;
912 char *name;
914 if (!region->mem) {
915 return 0;
918 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
919 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
921 for (i = 0; i < region->nr_mmaps; i++) {
922 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
923 MAP_SHARED, region->vbasedev->fd,
924 region->fd_offset +
925 region->mmaps[i].offset);
926 if (region->mmaps[i].mmap == MAP_FAILED) {
927 int ret = -errno;
929 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
930 region->fd_offset +
931 region->mmaps[i].offset,
932 region->fd_offset +
933 region->mmaps[i].offset +
934 region->mmaps[i].size - 1, ret);
936 region->mmaps[i].mmap = NULL;
938 for (i--; i >= 0; i--) {
939 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
940 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
941 object_unparent(OBJECT(&region->mmaps[i].mem));
942 region->mmaps[i].mmap = NULL;
945 return ret;
948 name = g_strdup_printf("%s mmaps[%d]",
949 memory_region_name(region->mem), i);
950 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
951 memory_region_owner(region->mem),
952 name, region->mmaps[i].size,
953 region->mmaps[i].mmap);
954 g_free(name);
955 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
956 &region->mmaps[i].mem);
958 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
959 region->mmaps[i].offset,
960 region->mmaps[i].offset +
961 region->mmaps[i].size - 1);
964 return 0;
967 void vfio_region_exit(VFIORegion *region)
969 int i;
971 if (!region->mem) {
972 return;
975 for (i = 0; i < region->nr_mmaps; i++) {
976 if (region->mmaps[i].mmap) {
977 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
981 trace_vfio_region_exit(region->vbasedev->name, region->nr);
984 void vfio_region_finalize(VFIORegion *region)
986 int i;
988 if (!region->mem) {
989 return;
992 for (i = 0; i < region->nr_mmaps; i++) {
993 if (region->mmaps[i].mmap) {
994 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
995 object_unparent(OBJECT(&region->mmaps[i].mem));
999 object_unparent(OBJECT(region->mem));
1001 g_free(region->mem);
1002 g_free(region->mmaps);
1004 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
1006 region->mem = NULL;
1007 region->mmaps = NULL;
1008 region->nr_mmaps = 0;
1009 region->size = 0;
1010 region->flags = 0;
1011 region->nr = 0;
1014 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
1016 int i;
1018 if (!region->mem) {
1019 return;
1022 for (i = 0; i < region->nr_mmaps; i++) {
1023 if (region->mmaps[i].mmap) {
1024 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
1028 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
1029 enabled);
1032 void vfio_reset_handler(void *opaque)
1034 VFIOGroup *group;
1035 VFIODevice *vbasedev;
1037 QLIST_FOREACH(group, &vfio_group_list, next) {
1038 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1039 if (vbasedev->dev->realized) {
1040 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1045 QLIST_FOREACH(group, &vfio_group_list, next) {
1046 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1047 if (vbasedev->dev->realized && vbasedev->needs_reset) {
1048 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1054 static void vfio_kvm_device_add_group(VFIOGroup *group)
1056 #ifdef CONFIG_KVM
1057 struct kvm_device_attr attr = {
1058 .group = KVM_DEV_VFIO_GROUP,
1059 .attr = KVM_DEV_VFIO_GROUP_ADD,
1060 .addr = (uint64_t)(unsigned long)&group->fd,
1063 if (!kvm_enabled()) {
1064 return;
1067 if (vfio_kvm_device_fd < 0) {
1068 struct kvm_create_device cd = {
1069 .type = KVM_DEV_TYPE_VFIO,
1072 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1073 error_report("Failed to create KVM VFIO device: %m");
1074 return;
1077 vfio_kvm_device_fd = cd.fd;
1080 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1081 error_report("Failed to add group %d to KVM VFIO device: %m",
1082 group->groupid);
1084 #endif
1087 static void vfio_kvm_device_del_group(VFIOGroup *group)
1089 #ifdef CONFIG_KVM
1090 struct kvm_device_attr attr = {
1091 .group = KVM_DEV_VFIO_GROUP,
1092 .attr = KVM_DEV_VFIO_GROUP_DEL,
1093 .addr = (uint64_t)(unsigned long)&group->fd,
1096 if (vfio_kvm_device_fd < 0) {
1097 return;
1100 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1101 error_report("Failed to remove group %d from KVM VFIO device: %m",
1102 group->groupid);
1104 #endif
1107 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1109 VFIOAddressSpace *space;
1111 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1112 if (space->as == as) {
1113 return space;
1117 /* No suitable VFIOAddressSpace, create a new one */
1118 space = g_malloc0(sizeof(*space));
1119 space->as = as;
1120 QLIST_INIT(&space->containers);
1122 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1124 return space;
1127 static void vfio_put_address_space(VFIOAddressSpace *space)
1129 if (QLIST_EMPTY(&space->containers)) {
1130 QLIST_REMOVE(space, list);
1131 g_free(space);
1136 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1138 static int vfio_get_iommu_type(VFIOContainer *container,
1139 Error **errp)
1141 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
1142 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
1143 int i;
1145 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
1146 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
1147 return iommu_types[i];
1150 error_setg(errp, "No available IOMMU models");
1151 return -EINVAL;
1154 static int vfio_init_container(VFIOContainer *container, int group_fd,
1155 Error **errp)
1157 int iommu_type, ret;
1159 iommu_type = vfio_get_iommu_type(container, errp);
1160 if (iommu_type < 0) {
1161 return iommu_type;
1164 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
1165 if (ret) {
1166 error_setg_errno(errp, errno, "Failed to set group container");
1167 return -errno;
1170 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
1171 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1173 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1174 * v2, the running platform may not support v2 and there is no
1175 * way to guess it until an IOMMU group gets added to the container.
1176 * So in case it fails with v2, try v1 as a fallback.
1178 iommu_type = VFIO_SPAPR_TCE_IOMMU;
1179 continue;
1181 error_setg_errno(errp, errno, "Failed to set iommu for container");
1182 return -errno;
1185 container->iommu_type = iommu_type;
1186 return 0;
1189 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1190 Error **errp)
1192 VFIOContainer *container;
1193 int ret, fd;
1194 VFIOAddressSpace *space;
1196 space = vfio_get_address_space(as);
1199 * VFIO is currently incompatible with memory ballooning insofar as the
1200 * madvise to purge (zap) the page from QEMU's address space does not
1201 * interact with the memory API and therefore leaves stale virtual to
1202 * physical mappings in the IOMMU if the page was previously pinned. We
1203 * therefore add a balloon inhibit for each group added to a container,
1204 * whether the container is used individually or shared. This provides
1205 * us with options to allow devices within a group to opt-in and allow
1206 * ballooning, so long as it is done consistently for a group (for instance
1207 * if the device is an mdev device where it is known that the host vendor
1208 * driver will never pin pages outside of the working set of the guest
1209 * driver, which would thus not be ballooning candidates).
1211 * The first opportunity to induce pinning occurs here where we attempt to
1212 * attach the group to existing containers within the AddressSpace. If any
1213 * pages are already zapped from the virtual address space, such as from a
1214 * previous ballooning opt-in, new pinning will cause valid mappings to be
1215 * re-established. Likewise, when the overall MemoryListener for a new
1216 * container is registered, a replay of mappings within the AddressSpace
1217 * will occur, re-establishing any previously zapped pages as well.
1219 * NB. Balloon inhibiting does not currently block operation of the
1220 * balloon driver or revoke previously pinned pages, it only prevents
1221 * calling madvise to modify the virtual mapping of ballooned pages.
1223 qemu_balloon_inhibit(true);
1225 QLIST_FOREACH(container, &space->containers, next) {
1226 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1227 group->container = container;
1228 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1229 vfio_kvm_device_add_group(group);
1230 return 0;
1234 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
1235 if (fd < 0) {
1236 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
1237 ret = -errno;
1238 goto put_space_exit;
1241 ret = ioctl(fd, VFIO_GET_API_VERSION);
1242 if (ret != VFIO_API_VERSION) {
1243 error_setg(errp, "supported vfio version: %d, "
1244 "reported version: %d", VFIO_API_VERSION, ret);
1245 ret = -EINVAL;
1246 goto close_fd_exit;
1249 container = g_malloc0(sizeof(*container));
1250 container->space = space;
1251 container->fd = fd;
1252 QLIST_INIT(&container->giommu_list);
1253 QLIST_INIT(&container->hostwin_list);
1255 ret = vfio_init_container(container, group->fd, errp);
1256 if (ret) {
1257 goto free_container_exit;
1260 switch (container->iommu_type) {
1261 case VFIO_TYPE1v2_IOMMU:
1262 case VFIO_TYPE1_IOMMU:
1264 struct vfio_iommu_type1_info info;
1267 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1268 * IOVA whatsoever. That's not actually true, but the current
1269 * kernel interface doesn't tell us what it can map, and the
1270 * existing Type1 IOMMUs generally support any IOVA we're
1271 * going to actually try in practice.
1273 info.argsz = sizeof(info);
1274 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
1275 /* Ignore errors */
1276 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1277 /* Assume 4k IOVA page size */
1278 info.iova_pgsizes = 4096;
1280 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
1281 container->pgsizes = info.iova_pgsizes;
1282 break;
1284 case VFIO_SPAPR_TCE_v2_IOMMU:
1285 case VFIO_SPAPR_TCE_IOMMU:
1287 struct vfio_iommu_spapr_tce_info info;
1288 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
1291 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1292 * when container fd is closed so we do not call it explicitly
1293 * in this file.
1295 if (!v2) {
1296 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1297 if (ret) {
1298 error_setg_errno(errp, errno, "failed to enable container");
1299 ret = -errno;
1300 goto free_container_exit;
1302 } else {
1303 container->prereg_listener = vfio_prereg_listener;
1305 memory_listener_register(&container->prereg_listener,
1306 &address_space_memory);
1307 if (container->error) {
1308 memory_listener_unregister(&container->prereg_listener);
1309 ret = container->error;
1310 error_setg(errp,
1311 "RAM memory listener initialization failed for container");
1312 goto free_container_exit;
1316 info.argsz = sizeof(info);
1317 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1318 if (ret) {
1319 error_setg_errno(errp, errno,
1320 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1321 ret = -errno;
1322 if (v2) {
1323 memory_listener_unregister(&container->prereg_listener);
1325 goto free_container_exit;
1328 if (v2) {
1329 container->pgsizes = info.ddw.pgsizes;
1331 * There is a default window in just created container.
1332 * To make region_add/del simpler, we better remove this
1333 * window now and let those iommu_listener callbacks
1334 * create/remove them when needed.
1336 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1337 if (ret) {
1338 error_setg_errno(errp, -ret,
1339 "failed to remove existing window");
1340 goto free_container_exit;
1342 } else {
1343 /* The default table uses 4K pages */
1344 container->pgsizes = 0x1000;
1345 vfio_host_win_add(container, info.dma32_window_start,
1346 info.dma32_window_start +
1347 info.dma32_window_size - 1,
1348 0x1000);
1353 vfio_kvm_device_add_group(group);
1355 QLIST_INIT(&container->group_list);
1356 QLIST_INSERT_HEAD(&space->containers, container, next);
1358 group->container = container;
1359 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1361 container->listener = vfio_memory_listener;
1363 memory_listener_register(&container->listener, container->space->as);
1365 if (container->error) {
1366 ret = container->error;
1367 error_setg_errno(errp, -ret,
1368 "memory listener initialization failed for container");
1369 goto listener_release_exit;
1372 container->initialized = true;
1374 return 0;
1375 listener_release_exit:
1376 QLIST_REMOVE(group, container_next);
1377 QLIST_REMOVE(container, next);
1378 vfio_kvm_device_del_group(group);
1379 vfio_listener_release(container);
1381 free_container_exit:
1382 g_free(container);
1384 close_fd_exit:
1385 close(fd);
1387 put_space_exit:
1388 qemu_balloon_inhibit(false);
1389 vfio_put_address_space(space);
1391 return ret;
1394 static void vfio_disconnect_container(VFIOGroup *group)
1396 VFIOContainer *container = group->container;
1398 QLIST_REMOVE(group, container_next);
1399 group->container = NULL;
1402 * Explicitly release the listener first before unset container,
1403 * since unset may destroy the backend container if it's the last
1404 * group.
1406 if (QLIST_EMPTY(&container->group_list)) {
1407 vfio_listener_release(container);
1410 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1411 error_report("vfio: error disconnecting group %d from container",
1412 group->groupid);
1415 if (QLIST_EMPTY(&container->group_list)) {
1416 VFIOAddressSpace *space = container->space;
1417 VFIOGuestIOMMU *giommu, *tmp;
1419 QLIST_REMOVE(container, next);
1421 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1422 memory_region_unregister_iommu_notifier(
1423 MEMORY_REGION(giommu->iommu), &giommu->n);
1424 QLIST_REMOVE(giommu, giommu_next);
1425 g_free(giommu);
1428 trace_vfio_disconnect_container(container->fd);
1429 close(container->fd);
1430 g_free(container);
1432 vfio_put_address_space(space);
1436 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1438 VFIOGroup *group;
1439 char path[32];
1440 struct vfio_group_status status = { .argsz = sizeof(status) };
1442 QLIST_FOREACH(group, &vfio_group_list, next) {
1443 if (group->groupid == groupid) {
1444 /* Found it. Now is it already in the right context? */
1445 if (group->container->space->as == as) {
1446 return group;
1447 } else {
1448 error_setg(errp, "group %d used in multiple address spaces",
1449 group->groupid);
1450 return NULL;
1455 group = g_malloc0(sizeof(*group));
1457 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1458 group->fd = qemu_open(path, O_RDWR);
1459 if (group->fd < 0) {
1460 error_setg_errno(errp, errno, "failed to open %s", path);
1461 goto free_group_exit;
1464 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1465 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1466 goto close_fd_exit;
1469 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1470 error_setg(errp, "group %d is not viable", groupid);
1471 error_append_hint(errp,
1472 "Please ensure all devices within the iommu_group "
1473 "are bound to their vfio bus driver.\n");
1474 goto close_fd_exit;
1477 group->groupid = groupid;
1478 QLIST_INIT(&group->device_list);
1480 if (vfio_connect_container(group, as, errp)) {
1481 error_prepend(errp, "failed to setup container for group %d: ",
1482 groupid);
1483 goto close_fd_exit;
1486 if (QLIST_EMPTY(&vfio_group_list)) {
1487 qemu_register_reset(vfio_reset_handler, NULL);
1490 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1492 return group;
1494 close_fd_exit:
1495 close(group->fd);
1497 free_group_exit:
1498 g_free(group);
1500 return NULL;
1503 void vfio_put_group(VFIOGroup *group)
1505 if (!group || !QLIST_EMPTY(&group->device_list)) {
1506 return;
1509 if (!group->balloon_allowed) {
1510 qemu_balloon_inhibit(false);
1512 vfio_kvm_device_del_group(group);
1513 vfio_disconnect_container(group);
1514 QLIST_REMOVE(group, next);
1515 trace_vfio_put_group(group->fd);
1516 close(group->fd);
1517 g_free(group);
1519 if (QLIST_EMPTY(&vfio_group_list)) {
1520 qemu_unregister_reset(vfio_reset_handler, NULL);
1524 int vfio_get_device(VFIOGroup *group, const char *name,
1525 VFIODevice *vbasedev, Error **errp)
1527 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1528 int ret, fd;
1530 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1531 if (fd < 0) {
1532 error_setg_errno(errp, errno, "error getting device from group %d",
1533 group->groupid);
1534 error_append_hint(errp,
1535 "Verify all devices in group %d are bound to vfio-<bus> "
1536 "or pci-stub and not already in use\n", group->groupid);
1537 return fd;
1540 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1541 if (ret) {
1542 error_setg_errno(errp, errno, "error getting device info");
1543 close(fd);
1544 return ret;
1548 * Clear the balloon inhibitor for this group if the driver knows the
1549 * device operates compatibly with ballooning. Setting must be consistent
1550 * per group, but since compatibility is really only possible with mdev
1551 * currently, we expect singleton groups.
1553 if (vbasedev->balloon_allowed != group->balloon_allowed) {
1554 if (!QLIST_EMPTY(&group->device_list)) {
1555 error_setg(errp,
1556 "Inconsistent device balloon setting within group");
1557 close(fd);
1558 return -1;
1561 if (!group->balloon_allowed) {
1562 group->balloon_allowed = true;
1563 qemu_balloon_inhibit(false);
1567 vbasedev->fd = fd;
1568 vbasedev->group = group;
1569 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1571 vbasedev->num_irqs = dev_info.num_irqs;
1572 vbasedev->num_regions = dev_info.num_regions;
1573 vbasedev->flags = dev_info.flags;
1575 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1576 dev_info.num_irqs);
1578 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1579 return 0;
1582 void vfio_put_base_device(VFIODevice *vbasedev)
1584 if (!vbasedev->group) {
1585 return;
1587 QLIST_REMOVE(vbasedev, next);
1588 vbasedev->group = NULL;
1589 trace_vfio_put_base_device(vbasedev->fd);
1590 close(vbasedev->fd);
1593 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1594 struct vfio_region_info **info)
1596 size_t argsz = sizeof(struct vfio_region_info);
1598 *info = g_malloc0(argsz);
1600 (*info)->index = index;
1601 retry:
1602 (*info)->argsz = argsz;
1604 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1605 g_free(*info);
1606 *info = NULL;
1607 return -errno;
1610 if ((*info)->argsz > argsz) {
1611 argsz = (*info)->argsz;
1612 *info = g_realloc(*info, argsz);
1614 goto retry;
1617 return 0;
1620 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1621 uint32_t subtype, struct vfio_region_info **info)
1623 int i;
1625 for (i = 0; i < vbasedev->num_regions; i++) {
1626 struct vfio_info_cap_header *hdr;
1627 struct vfio_region_info_cap_type *cap_type;
1629 if (vfio_get_region_info(vbasedev, i, info)) {
1630 continue;
1633 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1634 if (!hdr) {
1635 g_free(*info);
1636 continue;
1639 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1641 trace_vfio_get_dev_region(vbasedev->name, i,
1642 cap_type->type, cap_type->subtype);
1644 if (cap_type->type == type && cap_type->subtype == subtype) {
1645 return 0;
1648 g_free(*info);
1651 *info = NULL;
1652 return -ENODEV;
1655 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
1657 struct vfio_region_info *info = NULL;
1658 bool ret = false;
1660 if (!vfio_get_region_info(vbasedev, region, &info)) {
1661 if (vfio_get_region_info_cap(info, cap_type)) {
1662 ret = true;
1664 g_free(info);
1667 return ret;
1671 * Interfaces for IBM EEH (Enhanced Error Handling)
1673 static bool vfio_eeh_container_ok(VFIOContainer *container)
1676 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1677 * implementation is broken if there are multiple groups in a
1678 * container. The hardware works in units of Partitionable
1679 * Endpoints (== IOMMU groups) and the EEH operations naively
1680 * iterate across all groups in the container, without any logic
1681 * to make sure the groups have their state synchronized. For
1682 * certain operations (ENABLE) that might be ok, until an error
1683 * occurs, but for others (GET_STATE) it's clearly broken.
1687 * XXX Once fixed kernels exist, test for them here
1690 if (QLIST_EMPTY(&container->group_list)) {
1691 return false;
1694 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1695 return false;
1698 return true;
1701 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1703 struct vfio_eeh_pe_op pe_op = {
1704 .argsz = sizeof(pe_op),
1705 .op = op,
1707 int ret;
1709 if (!vfio_eeh_container_ok(container)) {
1710 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1711 "kernel requires a container with exactly one group", op);
1712 return -EPERM;
1715 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1716 if (ret < 0) {
1717 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1718 return -errno;
1721 return ret;
1724 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1726 VFIOAddressSpace *space = vfio_get_address_space(as);
1727 VFIOContainer *container = NULL;
1729 if (QLIST_EMPTY(&space->containers)) {
1730 /* No containers to act on */
1731 goto out;
1734 container = QLIST_FIRST(&space->containers);
1736 if (QLIST_NEXT(container, next)) {
1737 /* We don't yet have logic to synchronize EEH state across
1738 * multiple containers */
1739 container = NULL;
1740 goto out;
1743 out:
1744 vfio_put_address_space(space);
1745 return container;
1748 bool vfio_eeh_as_ok(AddressSpace *as)
1750 VFIOContainer *container = vfio_eeh_as_container(as);
1752 return (container != NULL) && vfio_eeh_container_ok(container);
1755 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1757 VFIOContainer *container = vfio_eeh_as_container(as);
1759 if (!container) {
1760 return -ENODEV;
1762 return vfio_eeh_container_op(container, op);