target/mips: Prefer fast cpu_env() over slower CPU QOM cast macro
[qemu/ar7.git] / backends / iommufd.c
blob62a79fa6b04996054a4a22c8118efc87d83d6b09
1 /*
2 * iommufd container backend
4 * Copyright (C) 2023 Intel Corporation.
5 * Copyright Red Hat, Inc. 2023
7 * Authors: Yi Liu <yi.l.liu@intel.com>
8 * Eric Auger <eric.auger@redhat.com>
10 * SPDX-License-Identifier: GPL-2.0-or-later
13 #include "qemu/osdep.h"
14 #include "sysemu/iommufd.h"
15 #include "qapi/error.h"
16 #include "qapi/qmp/qerror.h"
17 #include "qemu/module.h"
18 #include "qom/object_interfaces.h"
19 #include "qemu/error-report.h"
20 #include "monitor/monitor.h"
21 #include "trace.h"
22 #include <sys/ioctl.h>
23 #include <linux/iommufd.h>
25 static void iommufd_backend_init(Object *obj)
27 IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
29 be->fd = -1;
30 be->users = 0;
31 be->owned = true;
34 static void iommufd_backend_finalize(Object *obj)
36 IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
38 if (be->owned) {
39 close(be->fd);
40 be->fd = -1;
44 static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
46 ERRP_GUARD();
47 IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
48 int fd = -1;
50 fd = monitor_fd_param(monitor_cur(), str, errp);
51 if (fd == -1) {
52 error_prepend(errp, "Could not parse remote object fd %s:", str);
53 return;
55 be->fd = fd;
56 be->owned = false;
57 trace_iommu_backend_set_fd(be->fd);
60 static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
62 IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
64 return !be->users;
67 static void iommufd_backend_class_init(ObjectClass *oc, void *data)
69 UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
71 ucc->can_be_deleted = iommufd_backend_can_be_deleted;
73 object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
76 int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
78 int fd, ret = 0;
80 if (be->owned && !be->users) {
81 fd = qemu_open_old("/dev/iommu", O_RDWR);
82 if (fd < 0) {
83 error_setg_errno(errp, errno, "/dev/iommu opening failed");
84 ret = fd;
85 goto out;
87 be->fd = fd;
89 be->users++;
90 out:
91 trace_iommufd_backend_connect(be->fd, be->owned,
92 be->users, ret);
93 return ret;
96 void iommufd_backend_disconnect(IOMMUFDBackend *be)
98 if (!be->users) {
99 goto out;
101 be->users--;
102 if (!be->users && be->owned) {
103 close(be->fd);
104 be->fd = -1;
106 out:
107 trace_iommufd_backend_disconnect(be->fd, be->users);
110 int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
111 Error **errp)
113 int ret, fd = be->fd;
114 struct iommu_ioas_alloc alloc_data = {
115 .size = sizeof(alloc_data),
116 .flags = 0,
119 ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data);
120 if (ret) {
121 error_setg_errno(errp, errno, "Failed to allocate ioas");
122 return ret;
125 *ioas_id = alloc_data.out_ioas_id;
126 trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret);
128 return ret;
131 void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
133 int ret, fd = be->fd;
134 struct iommu_destroy des = {
135 .size = sizeof(des),
136 .id = id,
139 ret = ioctl(fd, IOMMU_DESTROY, &des);
140 trace_iommufd_backend_free_id(fd, id, ret);
141 if (ret) {
142 error_report("Failed to free id: %u %m", id);
146 int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
147 ram_addr_t size, void *vaddr, bool readonly)
149 int ret, fd = be->fd;
150 struct iommu_ioas_map map = {
151 .size = sizeof(map),
152 .flags = IOMMU_IOAS_MAP_READABLE |
153 IOMMU_IOAS_MAP_FIXED_IOVA,
154 .ioas_id = ioas_id,
155 .__reserved = 0,
156 .user_va = (uintptr_t)vaddr,
157 .iova = iova,
158 .length = size,
161 if (!readonly) {
162 map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
165 ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
166 trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
167 vaddr, readonly, ret);
168 if (ret) {
169 ret = -errno;
171 /* TODO: Not support mapping hardware PCI BAR region for now. */
172 if (errno == EFAULT) {
173 warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
174 } else {
175 error_report("IOMMU_IOAS_MAP failed: %m");
178 return ret;
181 int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
182 hwaddr iova, ram_addr_t size)
184 int ret, fd = be->fd;
185 struct iommu_ioas_unmap unmap = {
186 .size = sizeof(unmap),
187 .ioas_id = ioas_id,
188 .iova = iova,
189 .length = size,
192 ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
194 * IOMMUFD takes mapping as some kind of object, unmapping
195 * nonexistent mapping is treated as deleting a nonexistent
196 * object and return ENOENT. This is different from legacy
197 * backend which allows it. vIOMMU may trigger a lot of
198 * redundant unmapping, to avoid flush the log, treat them
199 * as succeess for IOMMUFD just like legacy backend.
201 if (ret && errno == ENOENT) {
202 trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
203 ret = 0;
204 } else {
205 trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
208 if (ret) {
209 ret = -errno;
210 error_report("IOMMU_IOAS_UNMAP failed: %m");
212 return ret;
215 static const TypeInfo iommufd_backend_info = {
216 .name = TYPE_IOMMUFD_BACKEND,
217 .parent = TYPE_OBJECT,
218 .instance_size = sizeof(IOMMUFDBackend),
219 .instance_init = iommufd_backend_init,
220 .instance_finalize = iommufd_backend_finalize,
221 .class_size = sizeof(IOMMUFDBackendClass),
222 .class_init = iommufd_backend_class_init,
223 .interfaces = (InterfaceInfo[]) {
224 { TYPE_USER_CREATABLE },
229 static void register_types(void)
231 type_register_static(&iommufd_backend_info);
234 type_init(register_types);