parallels: dirty BAT properly for continuous allocations
[qemu/ar7.git] / hw / vfio / common.c
blob6797208cc12fb51283bc2f84d1123309594b9a03
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include <sys/ioctl.h>
22 #include <sys/mman.h>
23 #include <linux/vfio.h>
25 #include "hw/vfio/vfio-common.h"
26 #include "hw/vfio/vfio.h"
27 #include "exec/address-spaces.h"
28 #include "exec/memory.h"
29 #include "hw/hw.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/kvm.h"
32 #include "trace.h"
34 struct vfio_group_head vfio_group_list =
35 QLIST_HEAD_INITIALIZER(vfio_group_list);
36 struct vfio_as_head vfio_address_spaces =
37 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
39 #ifdef CONFIG_KVM
41 * We have a single VFIO pseudo device per KVM VM. Once created it lives
42 * for the life of the VM. Closing the file descriptor only drops our
43 * reference to it and the device's reference to kvm. Therefore once
44 * initialized, this file descriptor is only released on QEMU exit and
45 * we'll re-use it should another vfio device be attached before then.
47 static int vfio_kvm_device_fd = -1;
48 #endif
51 * Common VFIO interrupt disable
53 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
55 struct vfio_irq_set irq_set = {
56 .argsz = sizeof(irq_set),
57 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
58 .index = index,
59 .start = 0,
60 .count = 0,
63 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
66 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
68 struct vfio_irq_set irq_set = {
69 .argsz = sizeof(irq_set),
70 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
71 .index = index,
72 .start = 0,
73 .count = 1,
76 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
79 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
81 struct vfio_irq_set irq_set = {
82 .argsz = sizeof(irq_set),
83 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
84 .index = index,
85 .start = 0,
86 .count = 1,
89 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
93 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
95 void vfio_region_write(void *opaque, hwaddr addr,
96 uint64_t data, unsigned size)
98 VFIORegion *region = opaque;
99 VFIODevice *vbasedev = region->vbasedev;
100 union {
101 uint8_t byte;
102 uint16_t word;
103 uint32_t dword;
104 uint64_t qword;
105 } buf;
107 switch (size) {
108 case 1:
109 buf.byte = data;
110 break;
111 case 2:
112 buf.word = cpu_to_le16(data);
113 break;
114 case 4:
115 buf.dword = cpu_to_le32(data);
116 break;
117 default:
118 hw_error("vfio: unsupported write size, %d bytes", size);
119 break;
122 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
123 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
124 ",%d) failed: %m",
125 __func__, vbasedev->name, region->nr,
126 addr, data, size);
129 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
132 * A read or write to a BAR always signals an INTx EOI. This will
133 * do nothing if not pending (including not in INTx mode). We assume
134 * that a BAR access is in response to an interrupt and that BAR
135 * accesses will service the interrupt. Unfortunately, we don't know
136 * which access will service the interrupt, so we're potentially
137 * getting quite a few host interrupts per guest interrupt.
139 vbasedev->ops->vfio_eoi(vbasedev);
142 uint64_t vfio_region_read(void *opaque,
143 hwaddr addr, unsigned size)
145 VFIORegion *region = opaque;
146 VFIODevice *vbasedev = region->vbasedev;
147 union {
148 uint8_t byte;
149 uint16_t word;
150 uint32_t dword;
151 uint64_t qword;
152 } buf;
153 uint64_t data = 0;
155 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
156 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
157 __func__, vbasedev->name, region->nr,
158 addr, size);
159 return (uint64_t)-1;
161 switch (size) {
162 case 1:
163 data = buf.byte;
164 break;
165 case 2:
166 data = le16_to_cpu(buf.word);
167 break;
168 case 4:
169 data = le32_to_cpu(buf.dword);
170 break;
171 default:
172 hw_error("vfio: unsupported read size, %d bytes", size);
173 break;
176 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
178 /* Same as write above */
179 vbasedev->ops->vfio_eoi(vbasedev);
181 return data;
184 const MemoryRegionOps vfio_region_ops = {
185 .read = vfio_region_read,
186 .write = vfio_region_write,
187 .endianness = DEVICE_LITTLE_ENDIAN,
191 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
193 static int vfio_dma_unmap(VFIOContainer *container,
194 hwaddr iova, ram_addr_t size)
196 struct vfio_iommu_type1_dma_unmap unmap = {
197 .argsz = sizeof(unmap),
198 .flags = 0,
199 .iova = iova,
200 .size = size,
203 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
204 error_report("VFIO_UNMAP_DMA: %d", -errno);
205 return -errno;
208 return 0;
211 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
212 ram_addr_t size, void *vaddr, bool readonly)
214 struct vfio_iommu_type1_dma_map map = {
215 .argsz = sizeof(map),
216 .flags = VFIO_DMA_MAP_FLAG_READ,
217 .vaddr = (__u64)(uintptr_t)vaddr,
218 .iova = iova,
219 .size = size,
222 if (!readonly) {
223 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
227 * Try the mapping, if it fails with EBUSY, unmap the region and try
228 * again. This shouldn't be necessary, but we sometimes see it in
229 * the VGA ROM space.
231 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
232 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
233 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
234 return 0;
237 error_report("VFIO_MAP_DMA: %d", -errno);
238 return -errno;
241 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
243 return (!memory_region_is_ram(section->mr) &&
244 !memory_region_is_iommu(section->mr)) ||
246 * Sizing an enabled 64-bit BAR can cause spurious mappings to
247 * addresses in the upper part of the 64-bit address space. These
248 * are never accessed by the CPU and beyond the address width of
249 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
251 section->offset_within_address_space & (1ULL << 63);
254 static void vfio_iommu_map_notify(Notifier *n, void *data)
256 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
257 VFIOContainer *container = giommu->container;
258 IOMMUTLBEntry *iotlb = data;
259 MemoryRegion *mr;
260 hwaddr xlat;
261 hwaddr len = iotlb->addr_mask + 1;
262 void *vaddr;
263 int ret;
265 trace_vfio_iommu_map_notify(iotlb->iova,
266 iotlb->iova + iotlb->addr_mask);
269 * The IOMMU TLB entry we have just covers translation through
270 * this IOMMU to its immediate target. We need to translate
271 * it the rest of the way through to memory.
273 rcu_read_lock();
274 mr = address_space_translate(&address_space_memory,
275 iotlb->translated_addr,
276 &xlat, &len, iotlb->perm & IOMMU_WO);
277 if (!memory_region_is_ram(mr)) {
278 error_report("iommu map to non memory area %"HWADDR_PRIx"",
279 xlat);
280 goto out;
283 * Translation truncates length to the IOMMU page size,
284 * check that it did not truncate too much.
286 if (len & iotlb->addr_mask) {
287 error_report("iommu has granularity incompatible with target AS");
288 goto out;
291 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
292 vaddr = memory_region_get_ram_ptr(mr) + xlat;
293 ret = vfio_dma_map(container, iotlb->iova,
294 iotlb->addr_mask + 1, vaddr,
295 !(iotlb->perm & IOMMU_WO) || mr->readonly);
296 if (ret) {
297 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
298 "0x%"HWADDR_PRIx", %p) = %d (%m)",
299 container, iotlb->iova,
300 iotlb->addr_mask + 1, vaddr, ret);
302 } else {
303 ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1);
304 if (ret) {
305 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
306 "0x%"HWADDR_PRIx") = %d (%m)",
307 container, iotlb->iova,
308 iotlb->addr_mask + 1, ret);
311 out:
312 rcu_read_unlock();
315 static hwaddr vfio_container_granularity(VFIOContainer *container)
317 return (hwaddr)1 << ctz64(container->iova_pgsizes);
320 static void vfio_listener_region_add(MemoryListener *listener,
321 MemoryRegionSection *section)
323 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
324 hwaddr iova, end;
325 Int128 llend;
326 void *vaddr;
327 int ret;
329 if (vfio_listener_skipped_section(section)) {
330 trace_vfio_listener_region_add_skip(
331 section->offset_within_address_space,
332 section->offset_within_address_space +
333 int128_get64(int128_sub(section->size, int128_one())));
334 return;
337 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
338 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
339 error_report("%s received unaligned region", __func__);
340 return;
343 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
344 llend = int128_make64(section->offset_within_address_space);
345 llend = int128_add(llend, section->size);
346 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
348 if (int128_ge(int128_make64(iova), llend)) {
349 return;
351 end = int128_get64(llend);
353 if ((iova < container->min_iova) || ((end - 1) > container->max_iova)) {
354 error_report("vfio: IOMMU container %p can't map guest IOVA region"
355 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
356 container, iova, end - 1);
357 ret = -EFAULT;
358 goto fail;
361 memory_region_ref(section->mr);
363 if (memory_region_is_iommu(section->mr)) {
364 VFIOGuestIOMMU *giommu;
366 trace_vfio_listener_region_add_iommu(iova, end - 1);
368 * FIXME: We should do some checking to see if the
369 * capabilities of the host VFIO IOMMU are adequate to model
370 * the guest IOMMU
372 * FIXME: For VFIO iommu types which have KVM acceleration to
373 * avoid bouncing all map/unmaps through qemu this way, this
374 * would be the right place to wire that up (tell the KVM
375 * device emulation the VFIO iommu handles to use).
377 giommu = g_malloc0(sizeof(*giommu));
378 giommu->iommu = section->mr;
379 giommu->container = container;
380 giommu->n.notify = vfio_iommu_map_notify;
381 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
383 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
384 memory_region_iommu_replay(giommu->iommu, &giommu->n,
385 vfio_container_granularity(container),
386 false);
388 return;
391 /* Here we assume that memory_region_is_ram(section->mr)==true */
393 vaddr = memory_region_get_ram_ptr(section->mr) +
394 section->offset_within_region +
395 (iova - section->offset_within_address_space);
397 trace_vfio_listener_region_add_ram(iova, end - 1, vaddr);
399 ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
400 if (ret) {
401 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
402 "0x%"HWADDR_PRIx", %p) = %d (%m)",
403 container, iova, end - iova, vaddr, ret);
404 goto fail;
407 return;
409 fail:
411 * On the initfn path, store the first error in the container so we
412 * can gracefully fail. Runtime, there's not much we can do other
413 * than throw a hardware error.
415 if (!container->initialized) {
416 if (!container->error) {
417 container->error = ret;
419 } else {
420 hw_error("vfio: DMA mapping failed, unable to continue");
424 static void vfio_listener_region_del(MemoryListener *listener,
425 MemoryRegionSection *section)
427 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
428 hwaddr iova, end;
429 int ret;
431 if (vfio_listener_skipped_section(section)) {
432 trace_vfio_listener_region_del_skip(
433 section->offset_within_address_space,
434 section->offset_within_address_space +
435 int128_get64(int128_sub(section->size, int128_one())));
436 return;
439 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
440 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
441 error_report("%s received unaligned region", __func__);
442 return;
445 if (memory_region_is_iommu(section->mr)) {
446 VFIOGuestIOMMU *giommu;
448 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
449 if (giommu->iommu == section->mr) {
450 memory_region_unregister_iommu_notifier(&giommu->n);
451 QLIST_REMOVE(giommu, giommu_next);
452 g_free(giommu);
453 break;
458 * FIXME: We assume the one big unmap below is adequate to
459 * remove any individual page mappings in the IOMMU which
460 * might have been copied into VFIO. This works for a page table
461 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
462 * That may not be true for all IOMMU types.
466 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
467 end = (section->offset_within_address_space + int128_get64(section->size)) &
468 TARGET_PAGE_MASK;
470 if (iova >= end) {
471 return;
474 trace_vfio_listener_region_del(iova, end - 1);
476 ret = vfio_dma_unmap(container, iova, end - iova);
477 memory_region_unref(section->mr);
478 if (ret) {
479 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
480 "0x%"HWADDR_PRIx") = %d (%m)",
481 container, iova, end - iova, ret);
485 static const MemoryListener vfio_memory_listener = {
486 .region_add = vfio_listener_region_add,
487 .region_del = vfio_listener_region_del,
490 static void vfio_listener_release(VFIOContainer *container)
492 memory_listener_unregister(&container->listener);
495 int vfio_mmap_region(Object *obj, VFIORegion *region,
496 MemoryRegion *mem, MemoryRegion *submem,
497 void **map, size_t size, off_t offset,
498 const char *name)
500 int ret = 0;
501 VFIODevice *vbasedev = region->vbasedev;
503 if (!vbasedev->no_mmap && size && region->flags &
504 VFIO_REGION_INFO_FLAG_MMAP) {
505 int prot = 0;
507 if (region->flags & VFIO_REGION_INFO_FLAG_READ) {
508 prot |= PROT_READ;
511 if (region->flags & VFIO_REGION_INFO_FLAG_WRITE) {
512 prot |= PROT_WRITE;
515 *map = mmap(NULL, size, prot, MAP_SHARED,
516 vbasedev->fd,
517 region->fd_offset + offset);
518 if (*map == MAP_FAILED) {
519 *map = NULL;
520 ret = -errno;
521 goto empty_region;
524 memory_region_init_ram_ptr(submem, obj, name, size, *map);
525 memory_region_set_skip_dump(submem);
526 } else {
527 empty_region:
528 /* Create a zero sized sub-region to make cleanup easy. */
529 memory_region_init(submem, obj, name, 0);
532 memory_region_add_subregion(mem, offset, submem);
534 return ret;
537 void vfio_reset_handler(void *opaque)
539 VFIOGroup *group;
540 VFIODevice *vbasedev;
542 QLIST_FOREACH(group, &vfio_group_list, next) {
543 QLIST_FOREACH(vbasedev, &group->device_list, next) {
544 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
548 QLIST_FOREACH(group, &vfio_group_list, next) {
549 QLIST_FOREACH(vbasedev, &group->device_list, next) {
550 if (vbasedev->needs_reset) {
551 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
557 static void vfio_kvm_device_add_group(VFIOGroup *group)
559 #ifdef CONFIG_KVM
560 struct kvm_device_attr attr = {
561 .group = KVM_DEV_VFIO_GROUP,
562 .attr = KVM_DEV_VFIO_GROUP_ADD,
563 .addr = (uint64_t)(unsigned long)&group->fd,
566 if (!kvm_enabled()) {
567 return;
570 if (vfio_kvm_device_fd < 0) {
571 struct kvm_create_device cd = {
572 .type = KVM_DEV_TYPE_VFIO,
575 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
576 error_report("Failed to create KVM VFIO device: %m");
577 return;
580 vfio_kvm_device_fd = cd.fd;
583 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
584 error_report("Failed to add group %d to KVM VFIO device: %m",
585 group->groupid);
587 #endif
590 static void vfio_kvm_device_del_group(VFIOGroup *group)
592 #ifdef CONFIG_KVM
593 struct kvm_device_attr attr = {
594 .group = KVM_DEV_VFIO_GROUP,
595 .attr = KVM_DEV_VFIO_GROUP_DEL,
596 .addr = (uint64_t)(unsigned long)&group->fd,
599 if (vfio_kvm_device_fd < 0) {
600 return;
603 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
604 error_report("Failed to remove group %d from KVM VFIO device: %m",
605 group->groupid);
607 #endif
610 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
612 VFIOAddressSpace *space;
614 QLIST_FOREACH(space, &vfio_address_spaces, list) {
615 if (space->as == as) {
616 return space;
620 /* No suitable VFIOAddressSpace, create a new one */
621 space = g_malloc0(sizeof(*space));
622 space->as = as;
623 QLIST_INIT(&space->containers);
625 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
627 return space;
630 static void vfio_put_address_space(VFIOAddressSpace *space)
632 if (QLIST_EMPTY(&space->containers)) {
633 QLIST_REMOVE(space, list);
634 g_free(space);
638 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
640 VFIOContainer *container;
641 int ret, fd;
642 VFIOAddressSpace *space;
644 space = vfio_get_address_space(as);
646 QLIST_FOREACH(container, &space->containers, next) {
647 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
648 group->container = container;
649 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
650 return 0;
654 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
655 if (fd < 0) {
656 error_report("vfio: failed to open /dev/vfio/vfio: %m");
657 ret = -errno;
658 goto put_space_exit;
661 ret = ioctl(fd, VFIO_GET_API_VERSION);
662 if (ret != VFIO_API_VERSION) {
663 error_report("vfio: supported vfio version: %d, "
664 "reported version: %d", VFIO_API_VERSION, ret);
665 ret = -EINVAL;
666 goto close_fd_exit;
669 container = g_malloc0(sizeof(*container));
670 container->space = space;
671 container->fd = fd;
672 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
673 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
674 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
675 struct vfio_iommu_type1_info info;
677 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
678 if (ret) {
679 error_report("vfio: failed to set group container: %m");
680 ret = -errno;
681 goto free_container_exit;
684 ret = ioctl(fd, VFIO_SET_IOMMU,
685 v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
686 if (ret) {
687 error_report("vfio: failed to set iommu for container: %m");
688 ret = -errno;
689 goto free_container_exit;
693 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
694 * IOVA whatsoever. That's not actually true, but the current
695 * kernel interface doesn't tell us what it can map, and the
696 * existing Type1 IOMMUs generally support any IOVA we're
697 * going to actually try in practice.
699 container->min_iova = 0;
700 container->max_iova = (hwaddr)-1;
702 /* Assume just 4K IOVA page size */
703 container->iova_pgsizes = 0x1000;
704 info.argsz = sizeof(info);
705 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
706 /* Ignore errors */
707 if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
708 container->iova_pgsizes = info.iova_pgsizes;
710 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
711 struct vfio_iommu_spapr_tce_info info;
713 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
714 if (ret) {
715 error_report("vfio: failed to set group container: %m");
716 ret = -errno;
717 goto free_container_exit;
719 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
720 if (ret) {
721 error_report("vfio: failed to set iommu for container: %m");
722 ret = -errno;
723 goto free_container_exit;
727 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
728 * when container fd is closed so we do not call it explicitly
729 * in this file.
731 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
732 if (ret) {
733 error_report("vfio: failed to enable container: %m");
734 ret = -errno;
735 goto free_container_exit;
739 * This only considers the host IOMMU's 32-bit window. At
740 * some point we need to add support for the optional 64-bit
741 * window and dynamic windows
743 info.argsz = sizeof(info);
744 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
745 if (ret) {
746 error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
747 ret = -errno;
748 goto free_container_exit;
750 container->min_iova = info.dma32_window_start;
751 container->max_iova = container->min_iova + info.dma32_window_size - 1;
753 /* Assume just 4K IOVA pages for now */
754 container->iova_pgsizes = 0x1000;
755 } else {
756 error_report("vfio: No available IOMMU models");
757 ret = -EINVAL;
758 goto free_container_exit;
761 container->listener = vfio_memory_listener;
763 memory_listener_register(&container->listener, container->space->as);
765 if (container->error) {
766 ret = container->error;
767 error_report("vfio: memory listener initialization failed for container");
768 goto listener_release_exit;
771 container->initialized = true;
773 QLIST_INIT(&container->group_list);
774 QLIST_INSERT_HEAD(&space->containers, container, next);
776 group->container = container;
777 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
779 return 0;
780 listener_release_exit:
781 vfio_listener_release(container);
783 free_container_exit:
784 g_free(container);
786 close_fd_exit:
787 close(fd);
789 put_space_exit:
790 vfio_put_address_space(space);
792 return ret;
795 static void vfio_disconnect_container(VFIOGroup *group)
797 VFIOContainer *container = group->container;
799 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
800 error_report("vfio: error disconnecting group %d from container",
801 group->groupid);
804 QLIST_REMOVE(group, container_next);
805 group->container = NULL;
807 if (QLIST_EMPTY(&container->group_list)) {
808 VFIOAddressSpace *space = container->space;
809 VFIOGuestIOMMU *giommu, *tmp;
811 vfio_listener_release(container);
812 QLIST_REMOVE(container, next);
814 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
815 memory_region_unregister_iommu_notifier(&giommu->n);
816 QLIST_REMOVE(giommu, giommu_next);
817 g_free(giommu);
820 trace_vfio_disconnect_container(container->fd);
821 close(container->fd);
822 g_free(container);
824 vfio_put_address_space(space);
828 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
830 VFIOGroup *group;
831 char path[32];
832 struct vfio_group_status status = { .argsz = sizeof(status) };
834 QLIST_FOREACH(group, &vfio_group_list, next) {
835 if (group->groupid == groupid) {
836 /* Found it. Now is it already in the right context? */
837 if (group->container->space->as == as) {
838 return group;
839 } else {
840 error_report("vfio: group %d used in multiple address spaces",
841 group->groupid);
842 return NULL;
847 group = g_malloc0(sizeof(*group));
849 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
850 group->fd = qemu_open(path, O_RDWR);
851 if (group->fd < 0) {
852 error_report("vfio: error opening %s: %m", path);
853 goto free_group_exit;
856 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
857 error_report("vfio: error getting group status: %m");
858 goto close_fd_exit;
861 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
862 error_report("vfio: error, group %d is not viable, please ensure "
863 "all devices within the iommu_group are bound to their "
864 "vfio bus driver.", groupid);
865 goto close_fd_exit;
868 group->groupid = groupid;
869 QLIST_INIT(&group->device_list);
871 if (vfio_connect_container(group, as)) {
872 error_report("vfio: failed to setup container for group %d", groupid);
873 goto close_fd_exit;
876 if (QLIST_EMPTY(&vfio_group_list)) {
877 qemu_register_reset(vfio_reset_handler, NULL);
880 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
882 vfio_kvm_device_add_group(group);
884 return group;
886 close_fd_exit:
887 close(group->fd);
889 free_group_exit:
890 g_free(group);
892 return NULL;
895 void vfio_put_group(VFIOGroup *group)
897 if (!group || !QLIST_EMPTY(&group->device_list)) {
898 return;
901 vfio_kvm_device_del_group(group);
902 vfio_disconnect_container(group);
903 QLIST_REMOVE(group, next);
904 trace_vfio_put_group(group->fd);
905 close(group->fd);
906 g_free(group);
908 if (QLIST_EMPTY(&vfio_group_list)) {
909 qemu_unregister_reset(vfio_reset_handler, NULL);
913 int vfio_get_device(VFIOGroup *group, const char *name,
914 VFIODevice *vbasedev)
916 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
917 int ret, fd;
919 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
920 if (fd < 0) {
921 error_report("vfio: error getting device %s from group %d: %m",
922 name, group->groupid);
923 error_printf("Verify all devices in group %d are bound to vfio-<bus> "
924 "or pci-stub and not already in use\n", group->groupid);
925 return fd;
928 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
929 if (ret) {
930 error_report("vfio: error getting device info: %m");
931 close(fd);
932 return ret;
935 vbasedev->fd = fd;
936 vbasedev->group = group;
937 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
939 vbasedev->num_irqs = dev_info.num_irqs;
940 vbasedev->num_regions = dev_info.num_regions;
941 vbasedev->flags = dev_info.flags;
943 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
944 dev_info.num_irqs);
946 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
947 return 0;
950 void vfio_put_base_device(VFIODevice *vbasedev)
952 if (!vbasedev->group) {
953 return;
955 QLIST_REMOVE(vbasedev, next);
956 vbasedev->group = NULL;
957 trace_vfio_put_base_device(vbasedev->fd);
958 close(vbasedev->fd);
961 static int vfio_container_do_ioctl(AddressSpace *as, int32_t groupid,
962 int req, void *param)
964 VFIOGroup *group;
965 VFIOContainer *container;
966 int ret = -1;
968 group = vfio_get_group(groupid, as);
969 if (!group) {
970 error_report("vfio: group %d not registered", groupid);
971 return ret;
974 container = group->container;
975 if (group->container) {
976 ret = ioctl(container->fd, req, param);
977 if (ret < 0) {
978 error_report("vfio: failed to ioctl %d to container: ret=%d, %s",
979 _IOC_NR(req) - VFIO_BASE, ret, strerror(errno));
983 vfio_put_group(group);
985 return ret;
988 int vfio_container_ioctl(AddressSpace *as, int32_t groupid,
989 int req, void *param)
991 /* We allow only certain ioctls to the container */
992 switch (req) {
993 case VFIO_CHECK_EXTENSION:
994 case VFIO_IOMMU_SPAPR_TCE_GET_INFO:
995 case VFIO_EEH_PE_OP:
996 break;
997 default:
998 /* Return an error on unknown requests */
999 error_report("vfio: unsupported ioctl %X", req);
1000 return -1;
1003 return vfio_container_do_ioctl(as, groupid, req, param);