virtio-pci: add virtio_pci_optimal_num_queues() helper
[qemu/ar7.git] / hw / virtio / vhost-backend.c
blob782b1d67d9a5696ef4a97c8dc1a8855fc386c338
1 /*
2 * vhost-backend
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "hw/virtio/vhost.h"
13 #include "hw/virtio/vhost-backend.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "standard-headers/linux/vhost_types.h"
18 #include "hw/virtio/vhost-vdpa.h"
19 #ifdef CONFIG_VHOST_KERNEL
20 #include <linux/vhost.h>
21 #include <sys/ioctl.h>
23 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
24 void *arg)
26 int fd = (uintptr_t) dev->opaque;
28 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
30 return ioctl(fd, request, arg);
33 static int vhost_kernel_init(struct vhost_dev *dev, void *opaque)
35 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
37 dev->opaque = opaque;
39 return 0;
42 static int vhost_kernel_cleanup(struct vhost_dev *dev)
44 int fd = (uintptr_t) dev->opaque;
46 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
48 return close(fd);
51 static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
53 int limit = 64;
54 char *s;
56 if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
57 &s, NULL, NULL)) {
58 uint64_t val = g_ascii_strtoull(s, NULL, 10);
59 if (!((val == G_MAXUINT64 || !val) && errno)) {
60 g_free(s);
61 return val;
63 error_report("ignoring invalid max_mem_regions value in vhost module:"
64 " %s", s);
66 g_free(s);
67 return limit;
70 static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
71 struct vhost_vring_file *file)
73 return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
76 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
77 struct vhost_scsi_target *target)
79 return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
82 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
83 struct vhost_scsi_target *target)
85 return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
88 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
90 return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
93 static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
94 struct vhost_log *log)
96 return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
99 static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
100 struct vhost_memory *mem)
102 return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
105 static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
106 struct vhost_vring_addr *addr)
108 return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
111 static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
112 struct vhost_vring_state *ring)
114 return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
117 static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
118 struct vhost_vring_state *ring)
120 return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
123 static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
124 struct vhost_vring_state *ring)
126 return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
129 static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
130 struct vhost_vring_state *ring)
132 return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
135 static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
136 struct vhost_vring_file *file)
138 return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
141 static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
142 struct vhost_vring_file *file)
144 return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
147 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
148 struct vhost_vring_state *s)
150 return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
153 static int vhost_kernel_set_features(struct vhost_dev *dev,
154 uint64_t features)
156 return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
159 static int vhost_kernel_get_features(struct vhost_dev *dev,
160 uint64_t *features)
162 return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
165 static int vhost_kernel_set_owner(struct vhost_dev *dev)
167 return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
170 static int vhost_kernel_reset_device(struct vhost_dev *dev)
172 return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
175 static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
177 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
179 return idx - dev->vq_index;
182 #ifdef CONFIG_VHOST_VSOCK
183 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
184 uint64_t guest_cid)
186 return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
189 static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
191 return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
193 #endif /* CONFIG_VHOST_VSOCK */
195 static void vhost_kernel_iotlb_read(void *opaque)
197 struct vhost_dev *dev = opaque;
198 struct vhost_msg msg;
199 ssize_t len;
201 while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
202 if (len < sizeof msg) {
203 error_report("Wrong vhost message len: %d", (int)len);
204 break;
206 if (msg.type != VHOST_IOTLB_MSG) {
207 error_report("Unknown vhost iotlb message type");
208 break;
211 vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
215 static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev,
216 struct vhost_iotlb_msg *imsg)
218 struct vhost_msg msg;
220 msg.type = VHOST_IOTLB_MSG;
221 msg.iotlb = *imsg;
223 if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
224 error_report("Fail to update device iotlb");
225 return -EFAULT;
228 return 0;
231 static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
232 int enabled)
234 if (enabled)
235 qemu_set_fd_handler((uintptr_t)dev->opaque,
236 vhost_kernel_iotlb_read, NULL, dev);
237 else
238 qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
241 static const VhostOps kernel_ops = {
242 .backend_type = VHOST_BACKEND_TYPE_KERNEL,
243 .vhost_backend_init = vhost_kernel_init,
244 .vhost_backend_cleanup = vhost_kernel_cleanup,
245 .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
246 .vhost_net_set_backend = vhost_kernel_net_set_backend,
247 .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
248 .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
249 .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
250 .vhost_set_log_base = vhost_kernel_set_log_base,
251 .vhost_set_mem_table = vhost_kernel_set_mem_table,
252 .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
253 .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
254 .vhost_set_vring_num = vhost_kernel_set_vring_num,
255 .vhost_set_vring_base = vhost_kernel_set_vring_base,
256 .vhost_get_vring_base = vhost_kernel_get_vring_base,
257 .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
258 .vhost_set_vring_call = vhost_kernel_set_vring_call,
259 .vhost_set_vring_busyloop_timeout =
260 vhost_kernel_set_vring_busyloop_timeout,
261 .vhost_set_features = vhost_kernel_set_features,
262 .vhost_get_features = vhost_kernel_get_features,
263 .vhost_set_owner = vhost_kernel_set_owner,
264 .vhost_reset_device = vhost_kernel_reset_device,
265 .vhost_get_vq_index = vhost_kernel_get_vq_index,
266 #ifdef CONFIG_VHOST_VSOCK
267 .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
268 .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
269 #endif /* CONFIG_VHOST_VSOCK */
270 .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
271 .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
273 #endif
275 int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
277 int r = 0;
279 switch (backend_type) {
280 #ifdef CONFIG_VHOST_KERNEL
281 case VHOST_BACKEND_TYPE_KERNEL:
282 dev->vhost_ops = &kernel_ops;
283 break;
284 #endif
285 #ifdef CONFIG_VHOST_USER
286 case VHOST_BACKEND_TYPE_USER:
287 dev->vhost_ops = &user_ops;
288 break;
289 #endif
290 #ifdef CONFIG_VHOST_VDPA
291 case VHOST_BACKEND_TYPE_VDPA:
292 dev->vhost_ops = &vdpa_ops;
293 break;
294 #endif
295 default:
296 error_report("Unknown vhost backend type");
297 r = -1;
300 return r;
303 int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
304 uint64_t iova, uint64_t uaddr,
305 uint64_t len,
306 IOMMUAccessFlags perm)
308 struct vhost_iotlb_msg imsg;
310 imsg.iova = iova;
311 imsg.uaddr = uaddr;
312 imsg.size = len;
313 imsg.type = VHOST_IOTLB_UPDATE;
315 switch (perm) {
316 case IOMMU_RO:
317 imsg.perm = VHOST_ACCESS_RO;
318 break;
319 case IOMMU_WO:
320 imsg.perm = VHOST_ACCESS_WO;
321 break;
322 case IOMMU_RW:
323 imsg.perm = VHOST_ACCESS_RW;
324 break;
325 default:
326 return -EINVAL;
329 if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
330 return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
332 return -ENODEV;
335 int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
336 uint64_t iova, uint64_t len)
338 struct vhost_iotlb_msg imsg;
340 imsg.iova = iova;
341 imsg.size = len;
342 imsg.type = VHOST_IOTLB_INVALIDATE;
344 if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
345 return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
347 return -ENODEV;
350 int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
351 struct vhost_iotlb_msg *imsg)
353 int ret = 0;
355 switch (imsg->type) {
356 case VHOST_IOTLB_MISS:
357 ret = vhost_device_iotlb_miss(dev, imsg->iova,
358 imsg->perm != VHOST_ACCESS_RO);
359 break;
360 case VHOST_IOTLB_ACCESS_FAIL:
361 /* FIXME: report device iotlb error */
362 error_report("Access failure IOTLB message type not supported");
363 ret = -ENOTSUP;
364 break;
365 case VHOST_IOTLB_UPDATE:
366 case VHOST_IOTLB_INVALIDATE:
367 default:
368 error_report("Unexpected IOTLB message type");
369 ret = -EINVAL;
370 break;
373 return ret;