linux-user: Support f_flags in statfs64 when available.
[qemu/ar7.git] / hw / vfio / common.c
blob13471ae29436d19520632f03ec80f84ef0cc4e7a
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/main-loop.h"
35 #include "qemu/range.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/reset.h"
38 #include "trace.h"
39 #include "qapi/error.h"
41 VFIOGroupList vfio_group_list =
42 QLIST_HEAD_INITIALIZER(vfio_group_list);
43 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
44 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
46 #ifdef CONFIG_KVM
48 * We have a single VFIO pseudo device per KVM VM. Once created it lives
49 * for the life of the VM. Closing the file descriptor only drops our
50 * reference to it and the device's reference to kvm. Therefore once
51 * initialized, this file descriptor is only released on QEMU exit and
52 * we'll re-use it should another vfio device be attached before then.
54 static int vfio_kvm_device_fd = -1;
55 #endif
58 * Common VFIO interrupt disable
60 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
62 struct vfio_irq_set irq_set = {
63 .argsz = sizeof(irq_set),
64 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
65 .index = index,
66 .start = 0,
67 .count = 0,
70 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
73 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
75 struct vfio_irq_set irq_set = {
76 .argsz = sizeof(irq_set),
77 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
78 .index = index,
79 .start = 0,
80 .count = 1,
83 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
86 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
88 struct vfio_irq_set irq_set = {
89 .argsz = sizeof(irq_set),
90 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
91 .index = index,
92 .start = 0,
93 .count = 1,
96 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
99 static inline const char *action_to_str(int action)
101 switch (action) {
102 case VFIO_IRQ_SET_ACTION_MASK:
103 return "MASK";
104 case VFIO_IRQ_SET_ACTION_UNMASK:
105 return "UNMASK";
106 case VFIO_IRQ_SET_ACTION_TRIGGER:
107 return "TRIGGER";
108 default:
109 return "UNKNOWN ACTION";
113 static const char *index_to_str(VFIODevice *vbasedev, int index)
115 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
116 return NULL;
119 switch (index) {
120 case VFIO_PCI_INTX_IRQ_INDEX:
121 return "INTX";
122 case VFIO_PCI_MSI_IRQ_INDEX:
123 return "MSI";
124 case VFIO_PCI_MSIX_IRQ_INDEX:
125 return "MSIX";
126 case VFIO_PCI_ERR_IRQ_INDEX:
127 return "ERR";
128 case VFIO_PCI_REQ_IRQ_INDEX:
129 return "REQ";
130 default:
131 return NULL;
135 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
136 int action, int fd, Error **errp)
138 struct vfio_irq_set *irq_set;
139 int argsz, ret = 0;
140 const char *name;
141 int32_t *pfd;
143 argsz = sizeof(*irq_set) + sizeof(*pfd);
145 irq_set = g_malloc0(argsz);
146 irq_set->argsz = argsz;
147 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
148 irq_set->index = index;
149 irq_set->start = subindex;
150 irq_set->count = 1;
151 pfd = (int32_t *)&irq_set->data;
152 *pfd = fd;
154 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
155 ret = -errno;
157 g_free(irq_set);
159 if (!ret) {
160 return 0;
163 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
165 name = index_to_str(vbasedev, index);
166 if (name) {
167 error_prepend(errp, "%s-%d: ", name, subindex);
168 } else {
169 error_prepend(errp, "index %d-%d: ", index, subindex);
171 error_prepend(errp,
172 "Failed to %s %s eventfd signaling for interrupt ",
173 fd < 0 ? "tear down" : "set up", action_to_str(action));
174 return ret;
178 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
180 void vfio_region_write(void *opaque, hwaddr addr,
181 uint64_t data, unsigned size)
183 VFIORegion *region = opaque;
184 VFIODevice *vbasedev = region->vbasedev;
185 union {
186 uint8_t byte;
187 uint16_t word;
188 uint32_t dword;
189 uint64_t qword;
190 } buf;
192 switch (size) {
193 case 1:
194 buf.byte = data;
195 break;
196 case 2:
197 buf.word = cpu_to_le16(data);
198 break;
199 case 4:
200 buf.dword = cpu_to_le32(data);
201 break;
202 case 8:
203 buf.qword = cpu_to_le64(data);
204 break;
205 default:
206 hw_error("vfio: unsupported write size, %d bytes", size);
207 break;
210 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
211 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
212 ",%d) failed: %m",
213 __func__, vbasedev->name, region->nr,
214 addr, data, size);
217 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
220 * A read or write to a BAR always signals an INTx EOI. This will
221 * do nothing if not pending (including not in INTx mode). We assume
222 * that a BAR access is in response to an interrupt and that BAR
223 * accesses will service the interrupt. Unfortunately, we don't know
224 * which access will service the interrupt, so we're potentially
225 * getting quite a few host interrupts per guest interrupt.
227 vbasedev->ops->vfio_eoi(vbasedev);
230 uint64_t vfio_region_read(void *opaque,
231 hwaddr addr, unsigned size)
233 VFIORegion *region = opaque;
234 VFIODevice *vbasedev = region->vbasedev;
235 union {
236 uint8_t byte;
237 uint16_t word;
238 uint32_t dword;
239 uint64_t qword;
240 } buf;
241 uint64_t data = 0;
243 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
244 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
245 __func__, vbasedev->name, region->nr,
246 addr, size);
247 return (uint64_t)-1;
249 switch (size) {
250 case 1:
251 data = buf.byte;
252 break;
253 case 2:
254 data = le16_to_cpu(buf.word);
255 break;
256 case 4:
257 data = le32_to_cpu(buf.dword);
258 break;
259 case 8:
260 data = le64_to_cpu(buf.qword);
261 break;
262 default:
263 hw_error("vfio: unsupported read size, %d bytes", size);
264 break;
267 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
269 /* Same as write above */
270 vbasedev->ops->vfio_eoi(vbasedev);
272 return data;
275 const MemoryRegionOps vfio_region_ops = {
276 .read = vfio_region_read,
277 .write = vfio_region_write,
278 .endianness = DEVICE_LITTLE_ENDIAN,
279 .valid = {
280 .min_access_size = 1,
281 .max_access_size = 8,
283 .impl = {
284 .min_access_size = 1,
285 .max_access_size = 8,
290 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
292 static int vfio_dma_unmap(VFIOContainer *container,
293 hwaddr iova, ram_addr_t size)
295 struct vfio_iommu_type1_dma_unmap unmap = {
296 .argsz = sizeof(unmap),
297 .flags = 0,
298 .iova = iova,
299 .size = size,
302 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
304 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
305 * v4.15) where an overflow in its wrap-around check prevents us from
306 * unmapping the last page of the address space. Test for the error
307 * condition and re-try the unmap excluding the last page. The
308 * expectation is that we've never mapped the last page anyway and this
309 * unmap request comes via vIOMMU support which also makes it unlikely
310 * that this page is used. This bug was introduced well after type1 v2
311 * support was introduced, so we shouldn't need to test for v1. A fix
312 * is queued for kernel v5.0 so this workaround can be removed once
313 * affected kernels are sufficiently deprecated.
315 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
316 container->iommu_type == VFIO_TYPE1v2_IOMMU) {
317 trace_vfio_dma_unmap_overflow_workaround();
318 unmap.size -= 1ULL << ctz64(container->pgsizes);
319 continue;
321 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
322 return -errno;
325 return 0;
328 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
329 ram_addr_t size, void *vaddr, bool readonly)
331 struct vfio_iommu_type1_dma_map map = {
332 .argsz = sizeof(map),
333 .flags = VFIO_DMA_MAP_FLAG_READ,
334 .vaddr = (__u64)(uintptr_t)vaddr,
335 .iova = iova,
336 .size = size,
339 if (!readonly) {
340 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
344 * Try the mapping, if it fails with EBUSY, unmap the region and try
345 * again. This shouldn't be necessary, but we sometimes see it in
346 * the VGA ROM space.
348 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
349 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
350 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
351 return 0;
354 error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
355 return -errno;
358 static void vfio_host_win_add(VFIOContainer *container,
359 hwaddr min_iova, hwaddr max_iova,
360 uint64_t iova_pgsizes)
362 VFIOHostDMAWindow *hostwin;
364 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
365 if (ranges_overlap(hostwin->min_iova,
366 hostwin->max_iova - hostwin->min_iova + 1,
367 min_iova,
368 max_iova - min_iova + 1)) {
369 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
373 hostwin = g_malloc0(sizeof(*hostwin));
375 hostwin->min_iova = min_iova;
376 hostwin->max_iova = max_iova;
377 hostwin->iova_pgsizes = iova_pgsizes;
378 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
381 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
382 hwaddr max_iova)
384 VFIOHostDMAWindow *hostwin;
386 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
387 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
388 QLIST_REMOVE(hostwin, hostwin_next);
389 return 0;
393 return -1;
396 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
398 return (!memory_region_is_ram(section->mr) &&
399 !memory_region_is_iommu(section->mr)) ||
401 * Sizing an enabled 64-bit BAR can cause spurious mappings to
402 * addresses in the upper part of the 64-bit address space. These
403 * are never accessed by the CPU and beyond the address width of
404 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
406 section->offset_within_address_space & (1ULL << 63);
409 /* Called with rcu_read_lock held. */
410 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
411 bool *read_only)
413 MemoryRegion *mr;
414 hwaddr xlat;
415 hwaddr len = iotlb->addr_mask + 1;
416 bool writable = iotlb->perm & IOMMU_WO;
419 * The IOMMU TLB entry we have just covers translation through
420 * this IOMMU to its immediate target. We need to translate
421 * it the rest of the way through to memory.
423 mr = address_space_translate(&address_space_memory,
424 iotlb->translated_addr,
425 &xlat, &len, writable,
426 MEMTXATTRS_UNSPECIFIED);
427 if (!memory_region_is_ram(mr)) {
428 error_report("iommu map to non memory area %"HWADDR_PRIx"",
429 xlat);
430 return false;
434 * Translation truncates length to the IOMMU page size,
435 * check that it did not truncate too much.
437 if (len & iotlb->addr_mask) {
438 error_report("iommu has granularity incompatible with target AS");
439 return false;
442 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
443 *read_only = !writable || mr->readonly;
445 return true;
448 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
450 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
451 VFIOContainer *container = giommu->container;
452 hwaddr iova = iotlb->iova + giommu->iommu_offset;
453 bool read_only;
454 void *vaddr;
455 int ret;
457 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
458 iova, iova + iotlb->addr_mask);
460 if (iotlb->target_as != &address_space_memory) {
461 error_report("Wrong target AS \"%s\", only system memory is allowed",
462 iotlb->target_as->name ? iotlb->target_as->name : "none");
463 return;
466 rcu_read_lock();
468 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
469 if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
470 goto out;
473 * vaddr is only valid until rcu_read_unlock(). But after
474 * vfio_dma_map has set up the mapping the pages will be
475 * pinned by the kernel. This makes sure that the RAM backend
476 * of vaddr will always be there, even if the memory object is
477 * destroyed and its backing memory munmap-ed.
479 ret = vfio_dma_map(container, iova,
480 iotlb->addr_mask + 1, vaddr,
481 read_only);
482 if (ret) {
483 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
484 "0x%"HWADDR_PRIx", %p) = %d (%m)",
485 container, iova,
486 iotlb->addr_mask + 1, vaddr, ret);
488 } else {
489 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
490 if (ret) {
491 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
492 "0x%"HWADDR_PRIx") = %d (%m)",
493 container, iova,
494 iotlb->addr_mask + 1, ret);
497 out:
498 rcu_read_unlock();
501 static void vfio_listener_region_add(MemoryListener *listener,
502 MemoryRegionSection *section)
504 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
505 hwaddr iova, end;
506 Int128 llend, llsize;
507 void *vaddr;
508 int ret;
509 VFIOHostDMAWindow *hostwin;
510 bool hostwin_found;
511 Error *err = NULL;
513 if (vfio_listener_skipped_section(section)) {
514 trace_vfio_listener_region_add_skip(
515 section->offset_within_address_space,
516 section->offset_within_address_space +
517 int128_get64(int128_sub(section->size, int128_one())));
518 return;
521 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
522 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
523 error_report("%s received unaligned region", __func__);
524 return;
527 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
528 llend = int128_make64(section->offset_within_address_space);
529 llend = int128_add(llend, section->size);
530 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
532 if (int128_ge(int128_make64(iova), llend)) {
533 return;
535 end = int128_get64(int128_sub(llend, int128_one()));
537 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
538 hwaddr pgsize = 0;
540 /* For now intersections are not allowed, we may relax this later */
541 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
542 if (ranges_overlap(hostwin->min_iova,
543 hostwin->max_iova - hostwin->min_iova + 1,
544 section->offset_within_address_space,
545 int128_get64(section->size))) {
546 error_setg(&err,
547 "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
548 "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
549 section->offset_within_address_space,
550 section->offset_within_address_space +
551 int128_get64(section->size) - 1,
552 hostwin->min_iova, hostwin->max_iova);
553 goto fail;
557 ret = vfio_spapr_create_window(container, section, &pgsize);
558 if (ret) {
559 error_setg_errno(&err, -ret, "Failed to create SPAPR window");
560 goto fail;
563 vfio_host_win_add(container, section->offset_within_address_space,
564 section->offset_within_address_space +
565 int128_get64(section->size) - 1, pgsize);
566 #ifdef CONFIG_KVM
567 if (kvm_enabled()) {
568 VFIOGroup *group;
569 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
570 struct kvm_vfio_spapr_tce param;
571 struct kvm_device_attr attr = {
572 .group = KVM_DEV_VFIO_GROUP,
573 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
574 .addr = (uint64_t)(unsigned long)&param,
577 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
578 &param.tablefd)) {
579 QLIST_FOREACH(group, &container->group_list, container_next) {
580 param.groupfd = group->fd;
581 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
582 error_report("vfio: failed to setup fd %d "
583 "for a group with fd %d: %s",
584 param.tablefd, param.groupfd,
585 strerror(errno));
586 return;
588 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
592 #endif
595 hostwin_found = false;
596 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
597 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
598 hostwin_found = true;
599 break;
603 if (!hostwin_found) {
604 error_setg(&err, "Container %p can't map guest IOVA region"
605 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
606 goto fail;
609 memory_region_ref(section->mr);
611 if (memory_region_is_iommu(section->mr)) {
612 VFIOGuestIOMMU *giommu;
613 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
614 int iommu_idx;
616 trace_vfio_listener_region_add_iommu(iova, end);
618 * FIXME: For VFIO iommu types which have KVM acceleration to
619 * avoid bouncing all map/unmaps through qemu this way, this
620 * would be the right place to wire that up (tell the KVM
621 * device emulation the VFIO iommu handles to use).
623 giommu = g_malloc0(sizeof(*giommu));
624 giommu->iommu = iommu_mr;
625 giommu->iommu_offset = section->offset_within_address_space -
626 section->offset_within_region;
627 giommu->container = container;
628 llend = int128_add(int128_make64(section->offset_within_region),
629 section->size);
630 llend = int128_sub(llend, int128_one());
631 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
632 MEMTXATTRS_UNSPECIFIED);
633 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
634 IOMMU_NOTIFIER_ALL,
635 section->offset_within_region,
636 int128_get64(llend),
637 iommu_idx);
639 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
640 &err);
641 if (ret) {
642 g_free(giommu);
643 goto fail;
645 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
646 memory_region_iommu_replay(giommu->iommu, &giommu->n);
648 return;
651 /* Here we assume that memory_region_is_ram(section->mr)==true */
653 vaddr = memory_region_get_ram_ptr(section->mr) +
654 section->offset_within_region +
655 (iova - section->offset_within_address_space);
657 trace_vfio_listener_region_add_ram(iova, end, vaddr);
659 llsize = int128_sub(llend, int128_make64(iova));
661 if (memory_region_is_ram_device(section->mr)) {
662 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
664 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
665 trace_vfio_listener_region_add_no_dma_map(
666 memory_region_name(section->mr),
667 section->offset_within_address_space,
668 int128_getlo(section->size),
669 pgmask + 1);
670 return;
674 ret = vfio_dma_map(container, iova, int128_get64(llsize),
675 vaddr, section->readonly);
676 if (ret) {
677 error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
678 "0x%"HWADDR_PRIx", %p) = %d (%m)",
679 container, iova, int128_get64(llsize), vaddr, ret);
680 if (memory_region_is_ram_device(section->mr)) {
681 /* Allow unexpected mappings not to be fatal for RAM devices */
682 error_report_err(err);
683 return;
685 goto fail;
688 return;
690 fail:
691 if (memory_region_is_ram_device(section->mr)) {
692 error_report("failed to vfio_dma_map. pci p2p may not work");
693 return;
696 * On the initfn path, store the first error in the container so we
697 * can gracefully fail. Runtime, there's not much we can do other
698 * than throw a hardware error.
700 if (!container->initialized) {
701 if (!container->error) {
702 error_propagate_prepend(&container->error, err,
703 "Region %s: ",
704 memory_region_name(section->mr));
705 } else {
706 error_free(err);
708 } else {
709 error_report_err(err);
710 hw_error("vfio: DMA mapping failed, unable to continue");
714 static void vfio_listener_region_del(MemoryListener *listener,
715 MemoryRegionSection *section)
717 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
718 hwaddr iova, end;
719 Int128 llend, llsize;
720 int ret;
721 bool try_unmap = true;
723 if (vfio_listener_skipped_section(section)) {
724 trace_vfio_listener_region_del_skip(
725 section->offset_within_address_space,
726 section->offset_within_address_space +
727 int128_get64(int128_sub(section->size, int128_one())));
728 return;
731 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
732 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
733 error_report("%s received unaligned region", __func__);
734 return;
737 if (memory_region_is_iommu(section->mr)) {
738 VFIOGuestIOMMU *giommu;
740 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
741 if (MEMORY_REGION(giommu->iommu) == section->mr &&
742 giommu->n.start == section->offset_within_region) {
743 memory_region_unregister_iommu_notifier(section->mr,
744 &giommu->n);
745 QLIST_REMOVE(giommu, giommu_next);
746 g_free(giommu);
747 break;
752 * FIXME: We assume the one big unmap below is adequate to
753 * remove any individual page mappings in the IOMMU which
754 * might have been copied into VFIO. This works for a page table
755 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
756 * That may not be true for all IOMMU types.
760 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
761 llend = int128_make64(section->offset_within_address_space);
762 llend = int128_add(llend, section->size);
763 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
765 if (int128_ge(int128_make64(iova), llend)) {
766 return;
768 end = int128_get64(int128_sub(llend, int128_one()));
770 llsize = int128_sub(llend, int128_make64(iova));
772 trace_vfio_listener_region_del(iova, end);
774 if (memory_region_is_ram_device(section->mr)) {
775 hwaddr pgmask;
776 VFIOHostDMAWindow *hostwin;
777 bool hostwin_found = false;
779 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
780 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
781 hostwin_found = true;
782 break;
785 assert(hostwin_found); /* or region_add() would have failed */
787 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
788 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
791 if (try_unmap) {
792 ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
793 if (ret) {
794 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
795 "0x%"HWADDR_PRIx") = %d (%m)",
796 container, iova, int128_get64(llsize), ret);
800 memory_region_unref(section->mr);
802 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
803 vfio_spapr_remove_window(container,
804 section->offset_within_address_space);
805 if (vfio_host_win_del(container,
806 section->offset_within_address_space,
807 section->offset_within_address_space +
808 int128_get64(section->size) - 1) < 0) {
809 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
810 __func__, section->offset_within_address_space);
815 static const MemoryListener vfio_memory_listener = {
816 .region_add = vfio_listener_region_add,
817 .region_del = vfio_listener_region_del,
820 static void vfio_listener_release(VFIOContainer *container)
822 memory_listener_unregister(&container->listener);
823 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
824 memory_listener_unregister(&container->prereg_listener);
828 struct vfio_info_cap_header *
829 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
831 struct vfio_info_cap_header *hdr;
832 void *ptr = info;
834 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
835 return NULL;
838 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
839 if (hdr->id == id) {
840 return hdr;
844 return NULL;
847 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
848 struct vfio_region_info *info)
850 struct vfio_info_cap_header *hdr;
851 struct vfio_region_info_cap_sparse_mmap *sparse;
852 int i, j;
854 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
855 if (!hdr) {
856 return -ENODEV;
859 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
861 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
862 region->nr, sparse->nr_areas);
864 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
866 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
867 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
868 sparse->areas[i].offset +
869 sparse->areas[i].size);
871 if (sparse->areas[i].size) {
872 region->mmaps[j].offset = sparse->areas[i].offset;
873 region->mmaps[j].size = sparse->areas[i].size;
874 j++;
878 region->nr_mmaps = j;
879 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
881 return 0;
884 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
885 int index, const char *name)
887 struct vfio_region_info *info;
888 int ret;
890 ret = vfio_get_region_info(vbasedev, index, &info);
891 if (ret) {
892 return ret;
895 region->vbasedev = vbasedev;
896 region->flags = info->flags;
897 region->size = info->size;
898 region->fd_offset = info->offset;
899 region->nr = index;
901 if (region->size) {
902 region->mem = g_new0(MemoryRegion, 1);
903 memory_region_init_io(region->mem, obj, &vfio_region_ops,
904 region, name, region->size);
906 if (!vbasedev->no_mmap &&
907 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
909 ret = vfio_setup_region_sparse_mmaps(region, info);
911 if (ret) {
912 region->nr_mmaps = 1;
913 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
914 region->mmaps[0].offset = 0;
915 region->mmaps[0].size = region->size;
920 g_free(info);
922 trace_vfio_region_setup(vbasedev->name, index, name,
923 region->flags, region->fd_offset, region->size);
924 return 0;
927 int vfio_region_mmap(VFIORegion *region)
929 int i, prot = 0;
930 char *name;
932 if (!region->mem) {
933 return 0;
936 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
937 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
939 for (i = 0; i < region->nr_mmaps; i++) {
940 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
941 MAP_SHARED, region->vbasedev->fd,
942 region->fd_offset +
943 region->mmaps[i].offset);
944 if (region->mmaps[i].mmap == MAP_FAILED) {
945 int ret = -errno;
947 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
948 region->fd_offset +
949 region->mmaps[i].offset,
950 region->fd_offset +
951 region->mmaps[i].offset +
952 region->mmaps[i].size - 1, ret);
954 region->mmaps[i].mmap = NULL;
956 for (i--; i >= 0; i--) {
957 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
958 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
959 object_unparent(OBJECT(&region->mmaps[i].mem));
960 region->mmaps[i].mmap = NULL;
963 return ret;
966 name = g_strdup_printf("%s mmaps[%d]",
967 memory_region_name(region->mem), i);
968 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
969 memory_region_owner(region->mem),
970 name, region->mmaps[i].size,
971 region->mmaps[i].mmap);
972 g_free(name);
973 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
974 &region->mmaps[i].mem);
976 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
977 region->mmaps[i].offset,
978 region->mmaps[i].offset +
979 region->mmaps[i].size - 1);
982 return 0;
985 void vfio_region_exit(VFIORegion *region)
987 int i;
989 if (!region->mem) {
990 return;
993 for (i = 0; i < region->nr_mmaps; i++) {
994 if (region->mmaps[i].mmap) {
995 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
999 trace_vfio_region_exit(region->vbasedev->name, region->nr);
1002 void vfio_region_finalize(VFIORegion *region)
1004 int i;
1006 if (!region->mem) {
1007 return;
1010 for (i = 0; i < region->nr_mmaps; i++) {
1011 if (region->mmaps[i].mmap) {
1012 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
1013 object_unparent(OBJECT(&region->mmaps[i].mem));
1017 object_unparent(OBJECT(region->mem));
1019 g_free(region->mem);
1020 g_free(region->mmaps);
1022 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
1024 region->mem = NULL;
1025 region->mmaps = NULL;
1026 region->nr_mmaps = 0;
1027 region->size = 0;
1028 region->flags = 0;
1029 region->nr = 0;
1032 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
1034 int i;
1036 if (!region->mem) {
1037 return;
1040 for (i = 0; i < region->nr_mmaps; i++) {
1041 if (region->mmaps[i].mmap) {
1042 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
1046 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
1047 enabled);
1050 void vfio_reset_handler(void *opaque)
1052 VFIOGroup *group;
1053 VFIODevice *vbasedev;
1055 QLIST_FOREACH(group, &vfio_group_list, next) {
1056 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1057 if (vbasedev->dev->realized) {
1058 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1063 QLIST_FOREACH(group, &vfio_group_list, next) {
1064 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1065 if (vbasedev->dev->realized && vbasedev->needs_reset) {
1066 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1072 static void vfio_kvm_device_add_group(VFIOGroup *group)
1074 #ifdef CONFIG_KVM
1075 struct kvm_device_attr attr = {
1076 .group = KVM_DEV_VFIO_GROUP,
1077 .attr = KVM_DEV_VFIO_GROUP_ADD,
1078 .addr = (uint64_t)(unsigned long)&group->fd,
1081 if (!kvm_enabled()) {
1082 return;
1085 if (vfio_kvm_device_fd < 0) {
1086 struct kvm_create_device cd = {
1087 .type = KVM_DEV_TYPE_VFIO,
1090 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1091 error_report("Failed to create KVM VFIO device: %m");
1092 return;
1095 vfio_kvm_device_fd = cd.fd;
1098 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1099 error_report("Failed to add group %d to KVM VFIO device: %m",
1100 group->groupid);
1102 #endif
1105 static void vfio_kvm_device_del_group(VFIOGroup *group)
1107 #ifdef CONFIG_KVM
1108 struct kvm_device_attr attr = {
1109 .group = KVM_DEV_VFIO_GROUP,
1110 .attr = KVM_DEV_VFIO_GROUP_DEL,
1111 .addr = (uint64_t)(unsigned long)&group->fd,
1114 if (vfio_kvm_device_fd < 0) {
1115 return;
1118 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1119 error_report("Failed to remove group %d from KVM VFIO device: %m",
1120 group->groupid);
1122 #endif
1125 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1127 VFIOAddressSpace *space;
1129 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1130 if (space->as == as) {
1131 return space;
1135 /* No suitable VFIOAddressSpace, create a new one */
1136 space = g_malloc0(sizeof(*space));
1137 space->as = as;
1138 QLIST_INIT(&space->containers);
1140 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1142 return space;
1145 static void vfio_put_address_space(VFIOAddressSpace *space)
1147 if (QLIST_EMPTY(&space->containers)) {
1148 QLIST_REMOVE(space, list);
1149 g_free(space);
1154 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1156 static int vfio_get_iommu_type(VFIOContainer *container,
1157 Error **errp)
1159 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
1160 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
1161 int i;
1163 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
1164 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
1165 return iommu_types[i];
1168 error_setg(errp, "No available IOMMU models");
1169 return -EINVAL;
1172 static int vfio_init_container(VFIOContainer *container, int group_fd,
1173 Error **errp)
1175 int iommu_type, ret;
1177 iommu_type = vfio_get_iommu_type(container, errp);
1178 if (iommu_type < 0) {
1179 return iommu_type;
1182 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
1183 if (ret) {
1184 error_setg_errno(errp, errno, "Failed to set group container");
1185 return -errno;
1188 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
1189 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1191 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1192 * v2, the running platform may not support v2 and there is no
1193 * way to guess it until an IOMMU group gets added to the container.
1194 * So in case it fails with v2, try v1 as a fallback.
1196 iommu_type = VFIO_SPAPR_TCE_IOMMU;
1197 continue;
1199 error_setg_errno(errp, errno, "Failed to set iommu for container");
1200 return -errno;
1203 container->iommu_type = iommu_type;
1204 return 0;
1207 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1208 Error **errp)
1210 VFIOContainer *container;
1211 int ret, fd;
1212 VFIOAddressSpace *space;
1214 space = vfio_get_address_space(as);
1217 * VFIO is currently incompatible with discarding of RAM insofar as the
1218 * madvise to purge (zap) the page from QEMU's address space does not
1219 * interact with the memory API and therefore leaves stale virtual to
1220 * physical mappings in the IOMMU if the page was previously pinned. We
1221 * therefore set discarding broken for each group added to a container,
1222 * whether the container is used individually or shared. This provides
1223 * us with options to allow devices within a group to opt-in and allow
1224 * discarding, so long as it is done consistently for a group (for instance
1225 * if the device is an mdev device where it is known that the host vendor
1226 * driver will never pin pages outside of the working set of the guest
1227 * driver, which would thus not be discarding candidates).
1229 * The first opportunity to induce pinning occurs here where we attempt to
1230 * attach the group to existing containers within the AddressSpace. If any
1231 * pages are already zapped from the virtual address space, such as from
1232 * previous discards, new pinning will cause valid mappings to be
1233 * re-established. Likewise, when the overall MemoryListener for a new
1234 * container is registered, a replay of mappings within the AddressSpace
1235 * will occur, re-establishing any previously zapped pages as well.
1237 * Especially virtio-balloon is currently only prevented from discarding
1238 * new memory, it will not yet set ram_block_discard_set_required() and
1239 * therefore, neither stops us here or deals with the sudden memory
1240 * consumption of inflated memory.
1242 ret = ram_block_discard_disable(true);
1243 if (ret) {
1244 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
1245 return ret;
1248 QLIST_FOREACH(container, &space->containers, next) {
1249 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1250 group->container = container;
1251 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1252 vfio_kvm_device_add_group(group);
1253 return 0;
1257 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
1258 if (fd < 0) {
1259 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
1260 ret = -errno;
1261 goto put_space_exit;
1264 ret = ioctl(fd, VFIO_GET_API_VERSION);
1265 if (ret != VFIO_API_VERSION) {
1266 error_setg(errp, "supported vfio version: %d, "
1267 "reported version: %d", VFIO_API_VERSION, ret);
1268 ret = -EINVAL;
1269 goto close_fd_exit;
1272 container = g_malloc0(sizeof(*container));
1273 container->space = space;
1274 container->fd = fd;
1275 container->error = NULL;
1276 QLIST_INIT(&container->giommu_list);
1277 QLIST_INIT(&container->hostwin_list);
1279 ret = vfio_init_container(container, group->fd, errp);
1280 if (ret) {
1281 goto free_container_exit;
1284 switch (container->iommu_type) {
1285 case VFIO_TYPE1v2_IOMMU:
1286 case VFIO_TYPE1_IOMMU:
1288 struct vfio_iommu_type1_info info;
1291 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1292 * IOVA whatsoever. That's not actually true, but the current
1293 * kernel interface doesn't tell us what it can map, and the
1294 * existing Type1 IOMMUs generally support any IOVA we're
1295 * going to actually try in practice.
1297 info.argsz = sizeof(info);
1298 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
1299 /* Ignore errors */
1300 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1301 /* Assume 4k IOVA page size */
1302 info.iova_pgsizes = 4096;
1304 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
1305 container->pgsizes = info.iova_pgsizes;
1306 break;
1308 case VFIO_SPAPR_TCE_v2_IOMMU:
1309 case VFIO_SPAPR_TCE_IOMMU:
1311 struct vfio_iommu_spapr_tce_info info;
1312 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
1315 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1316 * when container fd is closed so we do not call it explicitly
1317 * in this file.
1319 if (!v2) {
1320 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1321 if (ret) {
1322 error_setg_errno(errp, errno, "failed to enable container");
1323 ret = -errno;
1324 goto free_container_exit;
1326 } else {
1327 container->prereg_listener = vfio_prereg_listener;
1329 memory_listener_register(&container->prereg_listener,
1330 &address_space_memory);
1331 if (container->error) {
1332 memory_listener_unregister(&container->prereg_listener);
1333 ret = -1;
1334 error_propagate_prepend(errp, container->error,
1335 "RAM memory listener initialization failed: ");
1336 goto free_container_exit;
1340 info.argsz = sizeof(info);
1341 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1342 if (ret) {
1343 error_setg_errno(errp, errno,
1344 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1345 ret = -errno;
1346 if (v2) {
1347 memory_listener_unregister(&container->prereg_listener);
1349 goto free_container_exit;
1352 if (v2) {
1353 container->pgsizes = info.ddw.pgsizes;
1355 * There is a default window in just created container.
1356 * To make region_add/del simpler, we better remove this
1357 * window now and let those iommu_listener callbacks
1358 * create/remove them when needed.
1360 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1361 if (ret) {
1362 error_setg_errno(errp, -ret,
1363 "failed to remove existing window");
1364 goto free_container_exit;
1366 } else {
1367 /* The default table uses 4K pages */
1368 container->pgsizes = 0x1000;
1369 vfio_host_win_add(container, info.dma32_window_start,
1370 info.dma32_window_start +
1371 info.dma32_window_size - 1,
1372 0x1000);
1377 vfio_kvm_device_add_group(group);
1379 QLIST_INIT(&container->group_list);
1380 QLIST_INSERT_HEAD(&space->containers, container, next);
1382 group->container = container;
1383 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1385 container->listener = vfio_memory_listener;
1387 memory_listener_register(&container->listener, container->space->as);
1389 if (container->error) {
1390 ret = -1;
1391 error_propagate_prepend(errp, container->error,
1392 "memory listener initialization failed: ");
1393 goto listener_release_exit;
1396 container->initialized = true;
1398 return 0;
1399 listener_release_exit:
1400 QLIST_REMOVE(group, container_next);
1401 QLIST_REMOVE(container, next);
1402 vfio_kvm_device_del_group(group);
1403 vfio_listener_release(container);
1405 free_container_exit:
1406 g_free(container);
1408 close_fd_exit:
1409 close(fd);
1411 put_space_exit:
1412 ram_block_discard_disable(false);
1413 vfio_put_address_space(space);
1415 return ret;
1418 static void vfio_disconnect_container(VFIOGroup *group)
1420 VFIOContainer *container = group->container;
1422 QLIST_REMOVE(group, container_next);
1423 group->container = NULL;
1426 * Explicitly release the listener first before unset container,
1427 * since unset may destroy the backend container if it's the last
1428 * group.
1430 if (QLIST_EMPTY(&container->group_list)) {
1431 vfio_listener_release(container);
1434 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1435 error_report("vfio: error disconnecting group %d from container",
1436 group->groupid);
1439 if (QLIST_EMPTY(&container->group_list)) {
1440 VFIOAddressSpace *space = container->space;
1441 VFIOGuestIOMMU *giommu, *tmp;
1443 QLIST_REMOVE(container, next);
1445 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1446 memory_region_unregister_iommu_notifier(
1447 MEMORY_REGION(giommu->iommu), &giommu->n);
1448 QLIST_REMOVE(giommu, giommu_next);
1449 g_free(giommu);
1452 trace_vfio_disconnect_container(container->fd);
1453 close(container->fd);
1454 g_free(container);
1456 vfio_put_address_space(space);
1460 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1462 VFIOGroup *group;
1463 char path[32];
1464 struct vfio_group_status status = { .argsz = sizeof(status) };
1466 QLIST_FOREACH(group, &vfio_group_list, next) {
1467 if (group->groupid == groupid) {
1468 /* Found it. Now is it already in the right context? */
1469 if (group->container->space->as == as) {
1470 return group;
1471 } else {
1472 error_setg(errp, "group %d used in multiple address spaces",
1473 group->groupid);
1474 return NULL;
1479 group = g_malloc0(sizeof(*group));
1481 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1482 group->fd = qemu_open_old(path, O_RDWR);
1483 if (group->fd < 0) {
1484 error_setg_errno(errp, errno, "failed to open %s", path);
1485 goto free_group_exit;
1488 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1489 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1490 goto close_fd_exit;
1493 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1494 error_setg(errp, "group %d is not viable", groupid);
1495 error_append_hint(errp,
1496 "Please ensure all devices within the iommu_group "
1497 "are bound to their vfio bus driver.\n");
1498 goto close_fd_exit;
1501 group->groupid = groupid;
1502 QLIST_INIT(&group->device_list);
1504 if (vfio_connect_container(group, as, errp)) {
1505 error_prepend(errp, "failed to setup container for group %d: ",
1506 groupid);
1507 goto close_fd_exit;
1510 if (QLIST_EMPTY(&vfio_group_list)) {
1511 qemu_register_reset(vfio_reset_handler, NULL);
1514 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1516 return group;
1518 close_fd_exit:
1519 close(group->fd);
1521 free_group_exit:
1522 g_free(group);
1524 return NULL;
1527 void vfio_put_group(VFIOGroup *group)
1529 if (!group || !QLIST_EMPTY(&group->device_list)) {
1530 return;
1533 if (!group->ram_block_discard_allowed) {
1534 ram_block_discard_disable(false);
1536 vfio_kvm_device_del_group(group);
1537 vfio_disconnect_container(group);
1538 QLIST_REMOVE(group, next);
1539 trace_vfio_put_group(group->fd);
1540 close(group->fd);
1541 g_free(group);
1543 if (QLIST_EMPTY(&vfio_group_list)) {
1544 qemu_unregister_reset(vfio_reset_handler, NULL);
1548 int vfio_get_device(VFIOGroup *group, const char *name,
1549 VFIODevice *vbasedev, Error **errp)
1551 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1552 int ret, fd;
1554 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1555 if (fd < 0) {
1556 error_setg_errno(errp, errno, "error getting device from group %d",
1557 group->groupid);
1558 error_append_hint(errp,
1559 "Verify all devices in group %d are bound to vfio-<bus> "
1560 "or pci-stub and not already in use\n", group->groupid);
1561 return fd;
1564 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1565 if (ret) {
1566 error_setg_errno(errp, errno, "error getting device info");
1567 close(fd);
1568 return ret;
1572 * Set discarding of RAM as not broken for this group if the driver knows
1573 * the device operates compatibly with discarding. Setting must be
1574 * consistent per group, but since compatibility is really only possible
1575 * with mdev currently, we expect singleton groups.
1577 if (vbasedev->ram_block_discard_allowed !=
1578 group->ram_block_discard_allowed) {
1579 if (!QLIST_EMPTY(&group->device_list)) {
1580 error_setg(errp, "Inconsistent setting of support for discarding "
1581 "RAM (e.g., balloon) within group");
1582 close(fd);
1583 return -1;
1586 if (!group->ram_block_discard_allowed) {
1587 group->ram_block_discard_allowed = true;
1588 ram_block_discard_disable(false);
1592 vbasedev->fd = fd;
1593 vbasedev->group = group;
1594 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1596 vbasedev->num_irqs = dev_info.num_irqs;
1597 vbasedev->num_regions = dev_info.num_regions;
1598 vbasedev->flags = dev_info.flags;
1600 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1601 dev_info.num_irqs);
1603 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1604 return 0;
1607 void vfio_put_base_device(VFIODevice *vbasedev)
1609 if (!vbasedev->group) {
1610 return;
1612 QLIST_REMOVE(vbasedev, next);
1613 vbasedev->group = NULL;
1614 trace_vfio_put_base_device(vbasedev->fd);
1615 close(vbasedev->fd);
1618 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1619 struct vfio_region_info **info)
1621 size_t argsz = sizeof(struct vfio_region_info);
1623 *info = g_malloc0(argsz);
1625 (*info)->index = index;
1626 retry:
1627 (*info)->argsz = argsz;
1629 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1630 g_free(*info);
1631 *info = NULL;
1632 return -errno;
1635 if ((*info)->argsz > argsz) {
1636 argsz = (*info)->argsz;
1637 *info = g_realloc(*info, argsz);
1639 goto retry;
1642 return 0;
1645 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1646 uint32_t subtype, struct vfio_region_info **info)
1648 int i;
1650 for (i = 0; i < vbasedev->num_regions; i++) {
1651 struct vfio_info_cap_header *hdr;
1652 struct vfio_region_info_cap_type *cap_type;
1654 if (vfio_get_region_info(vbasedev, i, info)) {
1655 continue;
1658 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1659 if (!hdr) {
1660 g_free(*info);
1661 continue;
1664 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1666 trace_vfio_get_dev_region(vbasedev->name, i,
1667 cap_type->type, cap_type->subtype);
1669 if (cap_type->type == type && cap_type->subtype == subtype) {
1670 return 0;
1673 g_free(*info);
1676 *info = NULL;
1677 return -ENODEV;
1680 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
1682 struct vfio_region_info *info = NULL;
1683 bool ret = false;
1685 if (!vfio_get_region_info(vbasedev, region, &info)) {
1686 if (vfio_get_region_info_cap(info, cap_type)) {
1687 ret = true;
1689 g_free(info);
1692 return ret;
1696 * Interfaces for IBM EEH (Enhanced Error Handling)
1698 static bool vfio_eeh_container_ok(VFIOContainer *container)
1701 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1702 * implementation is broken if there are multiple groups in a
1703 * container. The hardware works in units of Partitionable
1704 * Endpoints (== IOMMU groups) and the EEH operations naively
1705 * iterate across all groups in the container, without any logic
1706 * to make sure the groups have their state synchronized. For
1707 * certain operations (ENABLE) that might be ok, until an error
1708 * occurs, but for others (GET_STATE) it's clearly broken.
1712 * XXX Once fixed kernels exist, test for them here
1715 if (QLIST_EMPTY(&container->group_list)) {
1716 return false;
1719 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1720 return false;
1723 return true;
1726 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1728 struct vfio_eeh_pe_op pe_op = {
1729 .argsz = sizeof(pe_op),
1730 .op = op,
1732 int ret;
1734 if (!vfio_eeh_container_ok(container)) {
1735 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1736 "kernel requires a container with exactly one group", op);
1737 return -EPERM;
1740 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1741 if (ret < 0) {
1742 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1743 return -errno;
1746 return ret;
1749 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1751 VFIOAddressSpace *space = vfio_get_address_space(as);
1752 VFIOContainer *container = NULL;
1754 if (QLIST_EMPTY(&space->containers)) {
1755 /* No containers to act on */
1756 goto out;
1759 container = QLIST_FIRST(&space->containers);
1761 if (QLIST_NEXT(container, next)) {
1762 /* We don't yet have logic to synchronize EEH state across
1763 * multiple containers */
1764 container = NULL;
1765 goto out;
1768 out:
1769 vfio_put_address_space(space);
1770 return container;
1773 bool vfio_eeh_as_ok(AddressSpace *as)
1775 VFIOContainer *container = vfio_eeh_as_container(as);
1777 return (container != NULL) && vfio_eeh_container_ok(container);
1780 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1782 VFIOContainer *container = vfio_eeh_as_container(as);
1784 if (!container) {
1785 return -ENODEV;
1787 return vfio_eeh_container_op(container, op);