nvme: simplify plug/unplug
[qemu/ar7.git] / hw / vfio / common.c
blobfb396cf00ac40eb35967a04c9cc798ca896eed57
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/kvm.h"
36 #include "trace.h"
37 #include "qapi/error.h"
39 struct vfio_group_head vfio_group_list =
40 QLIST_HEAD_INITIALIZER(vfio_group_list);
41 struct vfio_as_head vfio_address_spaces =
42 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
44 #ifdef CONFIG_KVM
46 * We have a single VFIO pseudo device per KVM VM. Once created it lives
47 * for the life of the VM. Closing the file descriptor only drops our
48 * reference to it and the device's reference to kvm. Therefore once
49 * initialized, this file descriptor is only released on QEMU exit and
50 * we'll re-use it should another vfio device be attached before then.
52 static int vfio_kvm_device_fd = -1;
53 #endif
56 * Common VFIO interrupt disable
58 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
60 struct vfio_irq_set irq_set = {
61 .argsz = sizeof(irq_set),
62 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
63 .index = index,
64 .start = 0,
65 .count = 0,
68 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
71 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
73 struct vfio_irq_set irq_set = {
74 .argsz = sizeof(irq_set),
75 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
76 .index = index,
77 .start = 0,
78 .count = 1,
81 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
84 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
86 struct vfio_irq_set irq_set = {
87 .argsz = sizeof(irq_set),
88 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
89 .index = index,
90 .start = 0,
91 .count = 1,
94 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
98 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
100 void vfio_region_write(void *opaque, hwaddr addr,
101 uint64_t data, unsigned size)
103 VFIORegion *region = opaque;
104 VFIODevice *vbasedev = region->vbasedev;
105 union {
106 uint8_t byte;
107 uint16_t word;
108 uint32_t dword;
109 uint64_t qword;
110 } buf;
112 switch (size) {
113 case 1:
114 buf.byte = data;
115 break;
116 case 2:
117 buf.word = cpu_to_le16(data);
118 break;
119 case 4:
120 buf.dword = cpu_to_le32(data);
121 break;
122 case 8:
123 buf.qword = cpu_to_le64(data);
124 break;
125 default:
126 hw_error("vfio: unsupported write size, %d bytes", size);
127 break;
130 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
131 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
132 ",%d) failed: %m",
133 __func__, vbasedev->name, region->nr,
134 addr, data, size);
137 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
140 * A read or write to a BAR always signals an INTx EOI. This will
141 * do nothing if not pending (including not in INTx mode). We assume
142 * that a BAR access is in response to an interrupt and that BAR
143 * accesses will service the interrupt. Unfortunately, we don't know
144 * which access will service the interrupt, so we're potentially
145 * getting quite a few host interrupts per guest interrupt.
147 vbasedev->ops->vfio_eoi(vbasedev);
150 uint64_t vfio_region_read(void *opaque,
151 hwaddr addr, unsigned size)
153 VFIORegion *region = opaque;
154 VFIODevice *vbasedev = region->vbasedev;
155 union {
156 uint8_t byte;
157 uint16_t word;
158 uint32_t dword;
159 uint64_t qword;
160 } buf;
161 uint64_t data = 0;
163 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
164 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
165 __func__, vbasedev->name, region->nr,
166 addr, size);
167 return (uint64_t)-1;
169 switch (size) {
170 case 1:
171 data = buf.byte;
172 break;
173 case 2:
174 data = le16_to_cpu(buf.word);
175 break;
176 case 4:
177 data = le32_to_cpu(buf.dword);
178 break;
179 case 8:
180 data = le64_to_cpu(buf.qword);
181 break;
182 default:
183 hw_error("vfio: unsupported read size, %d bytes", size);
184 break;
187 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
189 /* Same as write above */
190 vbasedev->ops->vfio_eoi(vbasedev);
192 return data;
195 const MemoryRegionOps vfio_region_ops = {
196 .read = vfio_region_read,
197 .write = vfio_region_write,
198 .endianness = DEVICE_LITTLE_ENDIAN,
199 .valid = {
200 .min_access_size = 1,
201 .max_access_size = 8,
203 .impl = {
204 .min_access_size = 1,
205 .max_access_size = 8,
210 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
212 static int vfio_dma_unmap(VFIOContainer *container,
213 hwaddr iova, ram_addr_t size)
215 struct vfio_iommu_type1_dma_unmap unmap = {
216 .argsz = sizeof(unmap),
217 .flags = 0,
218 .iova = iova,
219 .size = size,
222 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
223 error_report("VFIO_UNMAP_DMA: %d", -errno);
224 return -errno;
227 return 0;
230 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
231 ram_addr_t size, void *vaddr, bool readonly)
233 struct vfio_iommu_type1_dma_map map = {
234 .argsz = sizeof(map),
235 .flags = VFIO_DMA_MAP_FLAG_READ,
236 .vaddr = (__u64)(uintptr_t)vaddr,
237 .iova = iova,
238 .size = size,
241 if (!readonly) {
242 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
246 * Try the mapping, if it fails with EBUSY, unmap the region and try
247 * again. This shouldn't be necessary, but we sometimes see it in
248 * the VGA ROM space.
250 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
251 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
252 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
253 return 0;
256 error_report("VFIO_MAP_DMA: %d", -errno);
257 return -errno;
260 static void vfio_host_win_add(VFIOContainer *container,
261 hwaddr min_iova, hwaddr max_iova,
262 uint64_t iova_pgsizes)
264 VFIOHostDMAWindow *hostwin;
266 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
267 if (ranges_overlap(hostwin->min_iova,
268 hostwin->max_iova - hostwin->min_iova + 1,
269 min_iova,
270 max_iova - min_iova + 1)) {
271 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
275 hostwin = g_malloc0(sizeof(*hostwin));
277 hostwin->min_iova = min_iova;
278 hostwin->max_iova = max_iova;
279 hostwin->iova_pgsizes = iova_pgsizes;
280 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
283 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
284 hwaddr max_iova)
286 VFIOHostDMAWindow *hostwin;
288 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
289 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
290 QLIST_REMOVE(hostwin, hostwin_next);
291 return 0;
295 return -1;
298 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
300 return (!memory_region_is_ram(section->mr) &&
301 !memory_region_is_iommu(section->mr)) ||
303 * Sizing an enabled 64-bit BAR can cause spurious mappings to
304 * addresses in the upper part of the 64-bit address space. These
305 * are never accessed by the CPU and beyond the address width of
306 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
308 section->offset_within_address_space & (1ULL << 63);
311 /* Called with rcu_read_lock held. */
312 static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
313 bool *read_only)
315 MemoryRegion *mr;
316 hwaddr xlat;
317 hwaddr len = iotlb->addr_mask + 1;
318 bool writable = iotlb->perm & IOMMU_WO;
321 * The IOMMU TLB entry we have just covers translation through
322 * this IOMMU to its immediate target. We need to translate
323 * it the rest of the way through to memory.
325 mr = address_space_translate(&address_space_memory,
326 iotlb->translated_addr,
327 &xlat, &len, writable,
328 MEMTXATTRS_UNSPECIFIED);
329 if (!memory_region_is_ram(mr)) {
330 error_report("iommu map to non memory area %"HWADDR_PRIx"",
331 xlat);
332 return false;
336 * Translation truncates length to the IOMMU page size,
337 * check that it did not truncate too much.
339 if (len & iotlb->addr_mask) {
340 error_report("iommu has granularity incompatible with target AS");
341 return false;
344 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
345 *read_only = !writable || mr->readonly;
347 return true;
350 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
352 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
353 VFIOContainer *container = giommu->container;
354 hwaddr iova = iotlb->iova + giommu->iommu_offset;
355 bool read_only;
356 void *vaddr;
357 int ret;
359 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
360 iova, iova + iotlb->addr_mask);
362 if (iotlb->target_as != &address_space_memory) {
363 error_report("Wrong target AS \"%s\", only system memory is allowed",
364 iotlb->target_as->name ? iotlb->target_as->name : "none");
365 return;
368 rcu_read_lock();
370 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
371 if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
372 goto out;
375 * vaddr is only valid until rcu_read_unlock(). But after
376 * vfio_dma_map has set up the mapping the pages will be
377 * pinned by the kernel. This makes sure that the RAM backend
378 * of vaddr will always be there, even if the memory object is
379 * destroyed and its backing memory munmap-ed.
381 ret = vfio_dma_map(container, iova,
382 iotlb->addr_mask + 1, vaddr,
383 read_only);
384 if (ret) {
385 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
386 "0x%"HWADDR_PRIx", %p) = %d (%m)",
387 container, iova,
388 iotlb->addr_mask + 1, vaddr, ret);
390 } else {
391 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
392 if (ret) {
393 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
394 "0x%"HWADDR_PRIx") = %d (%m)",
395 container, iova,
396 iotlb->addr_mask + 1, ret);
399 out:
400 rcu_read_unlock();
403 static void vfio_listener_region_add(MemoryListener *listener,
404 MemoryRegionSection *section)
406 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
407 hwaddr iova, end;
408 Int128 llend, llsize;
409 void *vaddr;
410 int ret;
411 VFIOHostDMAWindow *hostwin;
412 bool hostwin_found;
414 if (vfio_listener_skipped_section(section)) {
415 trace_vfio_listener_region_add_skip(
416 section->offset_within_address_space,
417 section->offset_within_address_space +
418 int128_get64(int128_sub(section->size, int128_one())));
419 return;
422 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
423 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
424 error_report("%s received unaligned region", __func__);
425 return;
428 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
429 llend = int128_make64(section->offset_within_address_space);
430 llend = int128_add(llend, section->size);
431 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
433 if (int128_ge(int128_make64(iova), llend)) {
434 return;
436 end = int128_get64(int128_sub(llend, int128_one()));
438 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
439 hwaddr pgsize = 0;
441 /* For now intersections are not allowed, we may relax this later */
442 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
443 if (ranges_overlap(hostwin->min_iova,
444 hostwin->max_iova - hostwin->min_iova + 1,
445 section->offset_within_address_space,
446 int128_get64(section->size))) {
447 ret = -1;
448 goto fail;
452 ret = vfio_spapr_create_window(container, section, &pgsize);
453 if (ret) {
454 goto fail;
457 vfio_host_win_add(container, section->offset_within_address_space,
458 section->offset_within_address_space +
459 int128_get64(section->size) - 1, pgsize);
460 #ifdef CONFIG_KVM
461 if (kvm_enabled()) {
462 VFIOGroup *group;
463 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
464 struct kvm_vfio_spapr_tce param;
465 struct kvm_device_attr attr = {
466 .group = KVM_DEV_VFIO_GROUP,
467 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
468 .addr = (uint64_t)(unsigned long)&param,
471 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
472 &param.tablefd)) {
473 QLIST_FOREACH(group, &container->group_list, container_next) {
474 param.groupfd = group->fd;
475 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
476 error_report("vfio: failed to setup fd %d "
477 "for a group with fd %d: %s",
478 param.tablefd, param.groupfd,
479 strerror(errno));
480 return;
482 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
486 #endif
489 hostwin_found = false;
490 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
491 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
492 hostwin_found = true;
493 break;
497 if (!hostwin_found) {
498 error_report("vfio: IOMMU container %p can't map guest IOVA region"
499 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
500 container, iova, end);
501 ret = -EFAULT;
502 goto fail;
505 memory_region_ref(section->mr);
507 if (memory_region_is_iommu(section->mr)) {
508 VFIOGuestIOMMU *giommu;
509 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
510 int iommu_idx;
512 trace_vfio_listener_region_add_iommu(iova, end);
514 * FIXME: For VFIO iommu types which have KVM acceleration to
515 * avoid bouncing all map/unmaps through qemu this way, this
516 * would be the right place to wire that up (tell the KVM
517 * device emulation the VFIO iommu handles to use).
519 giommu = g_malloc0(sizeof(*giommu));
520 giommu->iommu = iommu_mr;
521 giommu->iommu_offset = section->offset_within_address_space -
522 section->offset_within_region;
523 giommu->container = container;
524 llend = int128_add(int128_make64(section->offset_within_region),
525 section->size);
526 llend = int128_sub(llend, int128_one());
527 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
528 MEMTXATTRS_UNSPECIFIED);
529 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
530 IOMMU_NOTIFIER_ALL,
531 section->offset_within_region,
532 int128_get64(llend),
533 iommu_idx);
534 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
536 memory_region_register_iommu_notifier(section->mr, &giommu->n);
537 memory_region_iommu_replay(giommu->iommu, &giommu->n);
539 return;
542 /* Here we assume that memory_region_is_ram(section->mr)==true */
544 vaddr = memory_region_get_ram_ptr(section->mr) +
545 section->offset_within_region +
546 (iova - section->offset_within_address_space);
548 trace_vfio_listener_region_add_ram(iova, end, vaddr);
550 llsize = int128_sub(llend, int128_make64(iova));
552 if (memory_region_is_ram_device(section->mr)) {
553 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
555 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
556 trace_vfio_listener_region_add_no_dma_map(
557 memory_region_name(section->mr),
558 section->offset_within_address_space,
559 int128_getlo(section->size),
560 pgmask + 1);
561 return;
565 ret = vfio_dma_map(container, iova, int128_get64(llsize),
566 vaddr, section->readonly);
567 if (ret) {
568 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
569 "0x%"HWADDR_PRIx", %p) = %d (%m)",
570 container, iova, int128_get64(llsize), vaddr, ret);
571 if (memory_region_is_ram_device(section->mr)) {
572 /* Allow unexpected mappings not to be fatal for RAM devices */
573 return;
575 goto fail;
578 return;
580 fail:
581 if (memory_region_is_ram_device(section->mr)) {
582 error_report("failed to vfio_dma_map. pci p2p may not work");
583 return;
586 * On the initfn path, store the first error in the container so we
587 * can gracefully fail. Runtime, there's not much we can do other
588 * than throw a hardware error.
590 if (!container->initialized) {
591 if (!container->error) {
592 container->error = ret;
594 } else {
595 hw_error("vfio: DMA mapping failed, unable to continue");
599 static void vfio_listener_region_del(MemoryListener *listener,
600 MemoryRegionSection *section)
602 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
603 hwaddr iova, end;
604 Int128 llend, llsize;
605 int ret;
606 bool try_unmap = true;
608 if (vfio_listener_skipped_section(section)) {
609 trace_vfio_listener_region_del_skip(
610 section->offset_within_address_space,
611 section->offset_within_address_space +
612 int128_get64(int128_sub(section->size, int128_one())));
613 return;
616 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
617 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
618 error_report("%s received unaligned region", __func__);
619 return;
622 if (memory_region_is_iommu(section->mr)) {
623 VFIOGuestIOMMU *giommu;
625 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
626 if (MEMORY_REGION(giommu->iommu) == section->mr &&
627 giommu->n.start == section->offset_within_region) {
628 memory_region_unregister_iommu_notifier(section->mr,
629 &giommu->n);
630 QLIST_REMOVE(giommu, giommu_next);
631 g_free(giommu);
632 break;
637 * FIXME: We assume the one big unmap below is adequate to
638 * remove any individual page mappings in the IOMMU which
639 * might have been copied into VFIO. This works for a page table
640 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
641 * That may not be true for all IOMMU types.
645 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
646 llend = int128_make64(section->offset_within_address_space);
647 llend = int128_add(llend, section->size);
648 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
650 if (int128_ge(int128_make64(iova), llend)) {
651 return;
653 end = int128_get64(int128_sub(llend, int128_one()));
655 llsize = int128_sub(llend, int128_make64(iova));
657 trace_vfio_listener_region_del(iova, end);
659 if (memory_region_is_ram_device(section->mr)) {
660 hwaddr pgmask;
661 VFIOHostDMAWindow *hostwin;
662 bool hostwin_found = false;
664 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
665 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
666 hostwin_found = true;
667 break;
670 assert(hostwin_found); /* or region_add() would have failed */
672 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
673 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
676 if (try_unmap) {
677 ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
678 if (ret) {
679 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
680 "0x%"HWADDR_PRIx") = %d (%m)",
681 container, iova, int128_get64(llsize), ret);
685 memory_region_unref(section->mr);
687 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
688 vfio_spapr_remove_window(container,
689 section->offset_within_address_space);
690 if (vfio_host_win_del(container,
691 section->offset_within_address_space,
692 section->offset_within_address_space +
693 int128_get64(section->size) - 1) < 0) {
694 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
695 __func__, section->offset_within_address_space);
700 static const MemoryListener vfio_memory_listener = {
701 .region_add = vfio_listener_region_add,
702 .region_del = vfio_listener_region_del,
705 static void vfio_listener_release(VFIOContainer *container)
707 memory_listener_unregister(&container->listener);
708 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
709 memory_listener_unregister(&container->prereg_listener);
713 static struct vfio_info_cap_header *
714 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
716 struct vfio_info_cap_header *hdr;
717 void *ptr = info;
719 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
720 return NULL;
723 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
724 if (hdr->id == id) {
725 return hdr;
729 return NULL;
732 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
733 struct vfio_region_info *info)
735 struct vfio_info_cap_header *hdr;
736 struct vfio_region_info_cap_sparse_mmap *sparse;
737 int i, j;
739 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
740 if (!hdr) {
741 return -ENODEV;
744 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
746 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
747 region->nr, sparse->nr_areas);
749 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
751 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
752 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
753 sparse->areas[i].offset +
754 sparse->areas[i].size);
756 if (sparse->areas[i].size) {
757 region->mmaps[j].offset = sparse->areas[i].offset;
758 region->mmaps[j].size = sparse->areas[i].size;
759 j++;
763 region->nr_mmaps = j;
764 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
766 return 0;
769 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
770 int index, const char *name)
772 struct vfio_region_info *info;
773 int ret;
775 ret = vfio_get_region_info(vbasedev, index, &info);
776 if (ret) {
777 return ret;
780 region->vbasedev = vbasedev;
781 region->flags = info->flags;
782 region->size = info->size;
783 region->fd_offset = info->offset;
784 region->nr = index;
786 if (region->size) {
787 region->mem = g_new0(MemoryRegion, 1);
788 memory_region_init_io(region->mem, obj, &vfio_region_ops,
789 region, name, region->size);
791 if (!vbasedev->no_mmap &&
792 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
794 ret = vfio_setup_region_sparse_mmaps(region, info);
796 if (ret) {
797 region->nr_mmaps = 1;
798 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
799 region->mmaps[0].offset = 0;
800 region->mmaps[0].size = region->size;
805 g_free(info);
807 trace_vfio_region_setup(vbasedev->name, index, name,
808 region->flags, region->fd_offset, region->size);
809 return 0;
812 int vfio_region_mmap(VFIORegion *region)
814 int i, prot = 0;
815 char *name;
817 if (!region->mem) {
818 return 0;
821 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
822 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
824 for (i = 0; i < region->nr_mmaps; i++) {
825 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
826 MAP_SHARED, region->vbasedev->fd,
827 region->fd_offset +
828 region->mmaps[i].offset);
829 if (region->mmaps[i].mmap == MAP_FAILED) {
830 int ret = -errno;
832 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
833 region->fd_offset +
834 region->mmaps[i].offset,
835 region->fd_offset +
836 region->mmaps[i].offset +
837 region->mmaps[i].size - 1, ret);
839 region->mmaps[i].mmap = NULL;
841 for (i--; i >= 0; i--) {
842 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
843 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
844 object_unparent(OBJECT(&region->mmaps[i].mem));
845 region->mmaps[i].mmap = NULL;
848 return ret;
851 name = g_strdup_printf("%s mmaps[%d]",
852 memory_region_name(region->mem), i);
853 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
854 memory_region_owner(region->mem),
855 name, region->mmaps[i].size,
856 region->mmaps[i].mmap);
857 g_free(name);
858 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
859 &region->mmaps[i].mem);
861 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
862 region->mmaps[i].offset,
863 region->mmaps[i].offset +
864 region->mmaps[i].size - 1);
867 return 0;
870 void vfio_region_exit(VFIORegion *region)
872 int i;
874 if (!region->mem) {
875 return;
878 for (i = 0; i < region->nr_mmaps; i++) {
879 if (region->mmaps[i].mmap) {
880 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
884 trace_vfio_region_exit(region->vbasedev->name, region->nr);
887 void vfio_region_finalize(VFIORegion *region)
889 int i;
891 if (!region->mem) {
892 return;
895 for (i = 0; i < region->nr_mmaps; i++) {
896 if (region->mmaps[i].mmap) {
897 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
898 object_unparent(OBJECT(&region->mmaps[i].mem));
902 object_unparent(OBJECT(region->mem));
904 g_free(region->mem);
905 g_free(region->mmaps);
907 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
909 region->mem = NULL;
910 region->mmaps = NULL;
911 region->nr_mmaps = 0;
912 region->size = 0;
913 region->flags = 0;
914 region->nr = 0;
917 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
919 int i;
921 if (!region->mem) {
922 return;
925 for (i = 0; i < region->nr_mmaps; i++) {
926 if (region->mmaps[i].mmap) {
927 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
931 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
932 enabled);
935 void vfio_reset_handler(void *opaque)
937 VFIOGroup *group;
938 VFIODevice *vbasedev;
940 QLIST_FOREACH(group, &vfio_group_list, next) {
941 QLIST_FOREACH(vbasedev, &group->device_list, next) {
942 if (vbasedev->dev->realized) {
943 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
948 QLIST_FOREACH(group, &vfio_group_list, next) {
949 QLIST_FOREACH(vbasedev, &group->device_list, next) {
950 if (vbasedev->dev->realized && vbasedev->needs_reset) {
951 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
957 static void vfio_kvm_device_add_group(VFIOGroup *group)
959 #ifdef CONFIG_KVM
960 struct kvm_device_attr attr = {
961 .group = KVM_DEV_VFIO_GROUP,
962 .attr = KVM_DEV_VFIO_GROUP_ADD,
963 .addr = (uint64_t)(unsigned long)&group->fd,
966 if (!kvm_enabled()) {
967 return;
970 if (vfio_kvm_device_fd < 0) {
971 struct kvm_create_device cd = {
972 .type = KVM_DEV_TYPE_VFIO,
975 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
976 error_report("Failed to create KVM VFIO device: %m");
977 return;
980 vfio_kvm_device_fd = cd.fd;
983 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
984 error_report("Failed to add group %d to KVM VFIO device: %m",
985 group->groupid);
987 #endif
990 static void vfio_kvm_device_del_group(VFIOGroup *group)
992 #ifdef CONFIG_KVM
993 struct kvm_device_attr attr = {
994 .group = KVM_DEV_VFIO_GROUP,
995 .attr = KVM_DEV_VFIO_GROUP_DEL,
996 .addr = (uint64_t)(unsigned long)&group->fd,
999 if (vfio_kvm_device_fd < 0) {
1000 return;
1003 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1004 error_report("Failed to remove group %d from KVM VFIO device: %m",
1005 group->groupid);
1007 #endif
1010 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1012 VFIOAddressSpace *space;
1014 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1015 if (space->as == as) {
1016 return space;
1020 /* No suitable VFIOAddressSpace, create a new one */
1021 space = g_malloc0(sizeof(*space));
1022 space->as = as;
1023 QLIST_INIT(&space->containers);
1025 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1027 return space;
1030 static void vfio_put_address_space(VFIOAddressSpace *space)
1032 if (QLIST_EMPTY(&space->containers)) {
1033 QLIST_REMOVE(space, list);
1034 g_free(space);
1038 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1039 Error **errp)
1041 VFIOContainer *container;
1042 int ret, fd;
1043 VFIOAddressSpace *space;
1045 space = vfio_get_address_space(as);
1047 QLIST_FOREACH(container, &space->containers, next) {
1048 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
1049 group->container = container;
1050 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1051 vfio_kvm_device_add_group(group);
1052 return 0;
1056 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
1057 if (fd < 0) {
1058 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
1059 ret = -errno;
1060 goto put_space_exit;
1063 ret = ioctl(fd, VFIO_GET_API_VERSION);
1064 if (ret != VFIO_API_VERSION) {
1065 error_setg(errp, "supported vfio version: %d, "
1066 "reported version: %d", VFIO_API_VERSION, ret);
1067 ret = -EINVAL;
1068 goto close_fd_exit;
1071 container = g_malloc0(sizeof(*container));
1072 container->space = space;
1073 container->fd = fd;
1074 QLIST_INIT(&container->giommu_list);
1075 QLIST_INIT(&container->hostwin_list);
1076 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
1077 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
1078 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
1079 struct vfio_iommu_type1_info info;
1081 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
1082 if (ret) {
1083 error_setg_errno(errp, errno, "failed to set group container");
1084 ret = -errno;
1085 goto free_container_exit;
1088 container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
1089 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
1090 if (ret) {
1091 error_setg_errno(errp, errno, "failed to set iommu for container");
1092 ret = -errno;
1093 goto free_container_exit;
1097 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1098 * IOVA whatsoever. That's not actually true, but the current
1099 * kernel interface doesn't tell us what it can map, and the
1100 * existing Type1 IOMMUs generally support any IOVA we're
1101 * going to actually try in practice.
1103 info.argsz = sizeof(info);
1104 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
1105 /* Ignore errors */
1106 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1107 /* Assume 4k IOVA page size */
1108 info.iova_pgsizes = 4096;
1110 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
1111 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
1112 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
1113 struct vfio_iommu_spapr_tce_info info;
1114 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
1116 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
1117 if (ret) {
1118 error_setg_errno(errp, errno, "failed to set group container");
1119 ret = -errno;
1120 goto free_container_exit;
1122 container->iommu_type =
1123 v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
1124 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
1125 if (ret) {
1126 container->iommu_type = VFIO_SPAPR_TCE_IOMMU;
1127 v2 = false;
1128 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
1130 if (ret) {
1131 error_setg_errno(errp, errno, "failed to set iommu for container");
1132 ret = -errno;
1133 goto free_container_exit;
1137 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1138 * when container fd is closed so we do not call it explicitly
1139 * in this file.
1141 if (!v2) {
1142 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1143 if (ret) {
1144 error_setg_errno(errp, errno, "failed to enable container");
1145 ret = -errno;
1146 goto free_container_exit;
1148 } else {
1149 container->prereg_listener = vfio_prereg_listener;
1151 memory_listener_register(&container->prereg_listener,
1152 &address_space_memory);
1153 if (container->error) {
1154 memory_listener_unregister(&container->prereg_listener);
1155 ret = container->error;
1156 error_setg(errp,
1157 "RAM memory listener initialization failed for container");
1158 goto free_container_exit;
1162 info.argsz = sizeof(info);
1163 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1164 if (ret) {
1165 error_setg_errno(errp, errno,
1166 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1167 ret = -errno;
1168 if (v2) {
1169 memory_listener_unregister(&container->prereg_listener);
1171 goto free_container_exit;
1174 if (v2) {
1176 * There is a default window in just created container.
1177 * To make region_add/del simpler, we better remove this
1178 * window now and let those iommu_listener callbacks
1179 * create/remove them when needed.
1181 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1182 if (ret) {
1183 error_setg_errno(errp, -ret,
1184 "failed to remove existing window");
1185 goto free_container_exit;
1187 } else {
1188 /* The default table uses 4K pages */
1189 vfio_host_win_add(container, info.dma32_window_start,
1190 info.dma32_window_start +
1191 info.dma32_window_size - 1,
1192 0x1000);
1194 } else {
1195 error_setg(errp, "No available IOMMU models");
1196 ret = -EINVAL;
1197 goto free_container_exit;
1200 vfio_kvm_device_add_group(group);
1202 QLIST_INIT(&container->group_list);
1203 QLIST_INSERT_HEAD(&space->containers, container, next);
1205 group->container = container;
1206 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1208 container->listener = vfio_memory_listener;
1210 memory_listener_register(&container->listener, container->space->as);
1212 if (container->error) {
1213 ret = container->error;
1214 error_setg_errno(errp, -ret,
1215 "memory listener initialization failed for container");
1216 goto listener_release_exit;
1219 container->initialized = true;
1221 return 0;
1222 listener_release_exit:
1223 QLIST_REMOVE(group, container_next);
1224 QLIST_REMOVE(container, next);
1225 vfio_kvm_device_del_group(group);
1226 vfio_listener_release(container);
1228 free_container_exit:
1229 g_free(container);
1231 close_fd_exit:
1232 close(fd);
1234 put_space_exit:
1235 vfio_put_address_space(space);
1237 return ret;
1240 static void vfio_disconnect_container(VFIOGroup *group)
1242 VFIOContainer *container = group->container;
1244 QLIST_REMOVE(group, container_next);
1245 group->container = NULL;
1248 * Explicitly release the listener first before unset container,
1249 * since unset may destroy the backend container if it's the last
1250 * group.
1252 if (QLIST_EMPTY(&container->group_list)) {
1253 vfio_listener_release(container);
1256 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1257 error_report("vfio: error disconnecting group %d from container",
1258 group->groupid);
1261 if (QLIST_EMPTY(&container->group_list)) {
1262 VFIOAddressSpace *space = container->space;
1263 VFIOGuestIOMMU *giommu, *tmp;
1265 QLIST_REMOVE(container, next);
1267 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1268 memory_region_unregister_iommu_notifier(
1269 MEMORY_REGION(giommu->iommu), &giommu->n);
1270 QLIST_REMOVE(giommu, giommu_next);
1271 g_free(giommu);
1274 trace_vfio_disconnect_container(container->fd);
1275 close(container->fd);
1276 g_free(container);
1278 vfio_put_address_space(space);
1282 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1284 VFIOGroup *group;
1285 char path[32];
1286 struct vfio_group_status status = { .argsz = sizeof(status) };
1288 QLIST_FOREACH(group, &vfio_group_list, next) {
1289 if (group->groupid == groupid) {
1290 /* Found it. Now is it already in the right context? */
1291 if (group->container->space->as == as) {
1292 return group;
1293 } else {
1294 error_setg(errp, "group %d used in multiple address spaces",
1295 group->groupid);
1296 return NULL;
1301 group = g_malloc0(sizeof(*group));
1303 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1304 group->fd = qemu_open(path, O_RDWR);
1305 if (group->fd < 0) {
1306 error_setg_errno(errp, errno, "failed to open %s", path);
1307 goto free_group_exit;
1310 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1311 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1312 goto close_fd_exit;
1315 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1316 error_setg(errp, "group %d is not viable", groupid);
1317 error_append_hint(errp,
1318 "Please ensure all devices within the iommu_group "
1319 "are bound to their vfio bus driver.\n");
1320 goto close_fd_exit;
1323 group->groupid = groupid;
1324 QLIST_INIT(&group->device_list);
1326 if (vfio_connect_container(group, as, errp)) {
1327 error_prepend(errp, "failed to setup container for group %d: ",
1328 groupid);
1329 goto close_fd_exit;
1332 if (QLIST_EMPTY(&vfio_group_list)) {
1333 qemu_register_reset(vfio_reset_handler, NULL);
1336 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1338 return group;
1340 close_fd_exit:
1341 close(group->fd);
1343 free_group_exit:
1344 g_free(group);
1346 return NULL;
1349 void vfio_put_group(VFIOGroup *group)
1351 if (!group || !QLIST_EMPTY(&group->device_list)) {
1352 return;
1355 vfio_kvm_device_del_group(group);
1356 vfio_disconnect_container(group);
1357 QLIST_REMOVE(group, next);
1358 trace_vfio_put_group(group->fd);
1359 close(group->fd);
1360 g_free(group);
1362 if (QLIST_EMPTY(&vfio_group_list)) {
1363 qemu_unregister_reset(vfio_reset_handler, NULL);
1367 int vfio_get_device(VFIOGroup *group, const char *name,
1368 VFIODevice *vbasedev, Error **errp)
1370 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1371 int ret, fd;
1373 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1374 if (fd < 0) {
1375 error_setg_errno(errp, errno, "error getting device from group %d",
1376 group->groupid);
1377 error_append_hint(errp,
1378 "Verify all devices in group %d are bound to vfio-<bus> "
1379 "or pci-stub and not already in use\n", group->groupid);
1380 return fd;
1383 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1384 if (ret) {
1385 error_setg_errno(errp, errno, "error getting device info");
1386 close(fd);
1387 return ret;
1390 vbasedev->fd = fd;
1391 vbasedev->group = group;
1392 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1394 vbasedev->num_irqs = dev_info.num_irqs;
1395 vbasedev->num_regions = dev_info.num_regions;
1396 vbasedev->flags = dev_info.flags;
1398 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1399 dev_info.num_irqs);
1401 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1402 return 0;
1405 void vfio_put_base_device(VFIODevice *vbasedev)
1407 if (!vbasedev->group) {
1408 return;
1410 QLIST_REMOVE(vbasedev, next);
1411 vbasedev->group = NULL;
1412 trace_vfio_put_base_device(vbasedev->fd);
1413 close(vbasedev->fd);
1416 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1417 struct vfio_region_info **info)
1419 size_t argsz = sizeof(struct vfio_region_info);
1421 *info = g_malloc0(argsz);
1423 (*info)->index = index;
1424 retry:
1425 (*info)->argsz = argsz;
1427 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1428 g_free(*info);
1429 *info = NULL;
1430 return -errno;
1433 if ((*info)->argsz > argsz) {
1434 argsz = (*info)->argsz;
1435 *info = g_realloc(*info, argsz);
1437 goto retry;
1440 return 0;
1443 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1444 uint32_t subtype, struct vfio_region_info **info)
1446 int i;
1448 for (i = 0; i < vbasedev->num_regions; i++) {
1449 struct vfio_info_cap_header *hdr;
1450 struct vfio_region_info_cap_type *cap_type;
1452 if (vfio_get_region_info(vbasedev, i, info)) {
1453 continue;
1456 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1457 if (!hdr) {
1458 g_free(*info);
1459 continue;
1462 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1464 trace_vfio_get_dev_region(vbasedev->name, i,
1465 cap_type->type, cap_type->subtype);
1467 if (cap_type->type == type && cap_type->subtype == subtype) {
1468 return 0;
1471 g_free(*info);
1474 *info = NULL;
1475 return -ENODEV;
1478 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
1480 struct vfio_region_info *info = NULL;
1481 bool ret = false;
1483 if (!vfio_get_region_info(vbasedev, region, &info)) {
1484 if (vfio_get_region_info_cap(info, cap_type)) {
1485 ret = true;
1487 g_free(info);
1490 return ret;
1494 * Interfaces for IBM EEH (Enhanced Error Handling)
1496 static bool vfio_eeh_container_ok(VFIOContainer *container)
1499 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1500 * implementation is broken if there are multiple groups in a
1501 * container. The hardware works in units of Partitionable
1502 * Endpoints (== IOMMU groups) and the EEH operations naively
1503 * iterate across all groups in the container, without any logic
1504 * to make sure the groups have their state synchronized. For
1505 * certain operations (ENABLE) that might be ok, until an error
1506 * occurs, but for others (GET_STATE) it's clearly broken.
1510 * XXX Once fixed kernels exist, test for them here
1513 if (QLIST_EMPTY(&container->group_list)) {
1514 return false;
1517 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1518 return false;
1521 return true;
1524 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1526 struct vfio_eeh_pe_op pe_op = {
1527 .argsz = sizeof(pe_op),
1528 .op = op,
1530 int ret;
1532 if (!vfio_eeh_container_ok(container)) {
1533 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1534 "kernel requires a container with exactly one group", op);
1535 return -EPERM;
1538 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1539 if (ret < 0) {
1540 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1541 return -errno;
1544 return ret;
1547 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1549 VFIOAddressSpace *space = vfio_get_address_space(as);
1550 VFIOContainer *container = NULL;
1552 if (QLIST_EMPTY(&space->containers)) {
1553 /* No containers to act on */
1554 goto out;
1557 container = QLIST_FIRST(&space->containers);
1559 if (QLIST_NEXT(container, next)) {
1560 /* We don't yet have logic to synchronize EEH state across
1561 * multiple containers */
1562 container = NULL;
1563 goto out;
1566 out:
1567 vfio_put_address_space(space);
1568 return container;
1571 bool vfio_eeh_as_ok(AddressSpace *as)
1573 VFIOContainer *container = vfio_eeh_as_container(as);
1575 return (container != NULL) && vfio_eeh_container_ok(container);
1578 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1580 VFIOContainer *container = vfio_eeh_as_container(as);
1582 if (!container) {
1583 return -ENODEV;
1585 return vfio_eeh_container_op(container, op);