4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include "hw/virtio/vhost.h"
13 #include "hw/virtio/vhost-backend.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "standard-headers/linux/vhost_types.h"
18 #include "hw/virtio/vhost-vdpa.h"
19 #ifdef CONFIG_VHOST_KERNEL
20 #include <linux/vhost.h>
21 #include <sys/ioctl.h>
23 static int vhost_kernel_call(struct vhost_dev
*dev
, unsigned long int request
,
26 int fd
= (uintptr_t) dev
->opaque
;
29 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_KERNEL
);
31 ret
= ioctl(fd
, request
, arg
);
32 return ret
< 0 ? -errno
: ret
;
35 static int vhost_kernel_init(struct vhost_dev
*dev
, void *opaque
, Error
**errp
)
37 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_KERNEL
);
44 static int vhost_kernel_cleanup(struct vhost_dev
*dev
)
46 int fd
= (uintptr_t) dev
->opaque
;
48 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_KERNEL
);
50 return close(fd
) < 0 ? -errno
: 0;
53 static int vhost_kernel_memslots_limit(struct vhost_dev
*dev
)
58 if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
60 uint64_t val
= g_ascii_strtoull(s
, NULL
, 10);
61 if (val
< INT_MAX
&& val
> 0) {
65 error_report("ignoring invalid max_mem_regions value in vhost module:"
72 static int vhost_kernel_net_set_backend(struct vhost_dev
*dev
,
73 struct vhost_vring_file
*file
)
75 return vhost_kernel_call(dev
, VHOST_NET_SET_BACKEND
, file
);
78 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev
*dev
,
79 struct vhost_scsi_target
*target
)
81 return vhost_kernel_call(dev
, VHOST_SCSI_SET_ENDPOINT
, target
);
84 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev
*dev
,
85 struct vhost_scsi_target
*target
)
87 return vhost_kernel_call(dev
, VHOST_SCSI_CLEAR_ENDPOINT
, target
);
90 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev
*dev
, int *version
)
92 return vhost_kernel_call(dev
, VHOST_SCSI_GET_ABI_VERSION
, version
);
95 static int vhost_kernel_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
96 struct vhost_log
*log
)
98 return vhost_kernel_call(dev
, VHOST_SET_LOG_BASE
, &base
);
101 static int vhost_kernel_set_mem_table(struct vhost_dev
*dev
,
102 struct vhost_memory
*mem
)
104 return vhost_kernel_call(dev
, VHOST_SET_MEM_TABLE
, mem
);
107 static int vhost_kernel_set_vring_addr(struct vhost_dev
*dev
,
108 struct vhost_vring_addr
*addr
)
110 return vhost_kernel_call(dev
, VHOST_SET_VRING_ADDR
, addr
);
113 static int vhost_kernel_set_vring_endian(struct vhost_dev
*dev
,
114 struct vhost_vring_state
*ring
)
116 return vhost_kernel_call(dev
, VHOST_SET_VRING_ENDIAN
, ring
);
119 static int vhost_kernel_set_vring_num(struct vhost_dev
*dev
,
120 struct vhost_vring_state
*ring
)
122 return vhost_kernel_call(dev
, VHOST_SET_VRING_NUM
, ring
);
125 static int vhost_kernel_set_vring_base(struct vhost_dev
*dev
,
126 struct vhost_vring_state
*ring
)
128 return vhost_kernel_call(dev
, VHOST_SET_VRING_BASE
, ring
);
131 static int vhost_kernel_get_vring_base(struct vhost_dev
*dev
,
132 struct vhost_vring_state
*ring
)
134 return vhost_kernel_call(dev
, VHOST_GET_VRING_BASE
, ring
);
137 static int vhost_kernel_set_vring_kick(struct vhost_dev
*dev
,
138 struct vhost_vring_file
*file
)
140 return vhost_kernel_call(dev
, VHOST_SET_VRING_KICK
, file
);
143 static int vhost_kernel_set_vring_call(struct vhost_dev
*dev
,
144 struct vhost_vring_file
*file
)
146 return vhost_kernel_call(dev
, VHOST_SET_VRING_CALL
, file
);
149 static int vhost_kernel_set_vring_err(struct vhost_dev
*dev
,
150 struct vhost_vring_file
*file
)
152 return vhost_kernel_call(dev
, VHOST_SET_VRING_ERR
, file
);
155 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev
*dev
,
156 struct vhost_vring_state
*s
)
158 return vhost_kernel_call(dev
, VHOST_SET_VRING_BUSYLOOP_TIMEOUT
, s
);
161 static int vhost_kernel_set_features(struct vhost_dev
*dev
,
164 return vhost_kernel_call(dev
, VHOST_SET_FEATURES
, &features
);
167 static int vhost_kernel_set_backend_cap(struct vhost_dev
*dev
)
170 uint64_t f
= 0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
;
173 if (vhost_kernel_call(dev
, VHOST_GET_BACKEND_FEATURES
, &features
)) {
178 r
= vhost_kernel_call(dev
, VHOST_SET_BACKEND_FEATURES
,
184 dev
->backend_cap
= features
;
189 static int vhost_kernel_get_features(struct vhost_dev
*dev
,
192 return vhost_kernel_call(dev
, VHOST_GET_FEATURES
, features
);
195 static int vhost_kernel_set_owner(struct vhost_dev
*dev
)
197 return vhost_kernel_call(dev
, VHOST_SET_OWNER
, NULL
);
200 static int vhost_kernel_reset_device(struct vhost_dev
*dev
)
202 return vhost_kernel_call(dev
, VHOST_RESET_OWNER
, NULL
);
205 static int vhost_kernel_get_vq_index(struct vhost_dev
*dev
, int idx
)
207 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
209 return idx
- dev
->vq_index
;
212 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev
*dev
,
215 return vhost_kernel_call(dev
, VHOST_VSOCK_SET_GUEST_CID
, &guest_cid
);
218 static int vhost_kernel_vsock_set_running(struct vhost_dev
*dev
, int start
)
220 return vhost_kernel_call(dev
, VHOST_VSOCK_SET_RUNNING
, &start
);
223 static void vhost_kernel_iotlb_read(void *opaque
)
225 struct vhost_dev
*dev
= opaque
;
228 if (dev
->backend_cap
&
229 (0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
)) {
230 struct vhost_msg_v2 msg
;
232 while ((len
= read((uintptr_t)dev
->opaque
, &msg
, sizeof msg
)) > 0) {
233 if (len
< sizeof msg
) {
234 error_report("Wrong vhost message len: %d", (int)len
);
237 if (msg
.type
!= VHOST_IOTLB_MSG_V2
) {
238 error_report("Unknown vhost iotlb message type");
242 vhost_backend_handle_iotlb_msg(dev
, &msg
.iotlb
);
245 struct vhost_msg msg
;
247 while ((len
= read((uintptr_t)dev
->opaque
, &msg
, sizeof msg
)) > 0) {
248 if (len
< sizeof msg
) {
249 error_report("Wrong vhost message len: %d", (int)len
);
252 if (msg
.type
!= VHOST_IOTLB_MSG
) {
253 error_report("Unknown vhost iotlb message type");
257 vhost_backend_handle_iotlb_msg(dev
, &msg
.iotlb
);
262 static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev
*dev
,
263 struct vhost_iotlb_msg
*imsg
)
265 if (dev
->backend_cap
& (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2
)) {
266 struct vhost_msg_v2 msg
= {};
268 msg
.type
= VHOST_IOTLB_MSG_V2
;
271 if (write((uintptr_t)dev
->opaque
, &msg
, sizeof msg
) != sizeof msg
) {
272 error_report("Fail to update device iotlb");
276 struct vhost_msg msg
= {};
278 msg
.type
= VHOST_IOTLB_MSG
;
281 if (write((uintptr_t)dev
->opaque
, &msg
, sizeof msg
) != sizeof msg
) {
282 error_report("Fail to update device iotlb");
290 static void vhost_kernel_set_iotlb_callback(struct vhost_dev
*dev
,
294 qemu_set_fd_handler((uintptr_t)dev
->opaque
,
295 vhost_kernel_iotlb_read
, NULL
, dev
);
297 qemu_set_fd_handler((uintptr_t)dev
->opaque
, NULL
, NULL
, NULL
);
300 const VhostOps kernel_ops
= {
301 .backend_type
= VHOST_BACKEND_TYPE_KERNEL
,
302 .vhost_backend_init
= vhost_kernel_init
,
303 .vhost_backend_cleanup
= vhost_kernel_cleanup
,
304 .vhost_backend_memslots_limit
= vhost_kernel_memslots_limit
,
305 .vhost_net_set_backend
= vhost_kernel_net_set_backend
,
306 .vhost_scsi_set_endpoint
= vhost_kernel_scsi_set_endpoint
,
307 .vhost_scsi_clear_endpoint
= vhost_kernel_scsi_clear_endpoint
,
308 .vhost_scsi_get_abi_version
= vhost_kernel_scsi_get_abi_version
,
309 .vhost_set_log_base
= vhost_kernel_set_log_base
,
310 .vhost_set_mem_table
= vhost_kernel_set_mem_table
,
311 .vhost_set_vring_addr
= vhost_kernel_set_vring_addr
,
312 .vhost_set_vring_endian
= vhost_kernel_set_vring_endian
,
313 .vhost_set_vring_num
= vhost_kernel_set_vring_num
,
314 .vhost_set_vring_base
= vhost_kernel_set_vring_base
,
315 .vhost_get_vring_base
= vhost_kernel_get_vring_base
,
316 .vhost_set_vring_kick
= vhost_kernel_set_vring_kick
,
317 .vhost_set_vring_call
= vhost_kernel_set_vring_call
,
318 .vhost_set_vring_err
= vhost_kernel_set_vring_err
,
319 .vhost_set_vring_busyloop_timeout
=
320 vhost_kernel_set_vring_busyloop_timeout
,
321 .vhost_set_features
= vhost_kernel_set_features
,
322 .vhost_get_features
= vhost_kernel_get_features
,
323 .vhost_set_backend_cap
= vhost_kernel_set_backend_cap
,
324 .vhost_set_owner
= vhost_kernel_set_owner
,
325 .vhost_reset_device
= vhost_kernel_reset_device
,
326 .vhost_get_vq_index
= vhost_kernel_get_vq_index
,
327 .vhost_vsock_set_guest_cid
= vhost_kernel_vsock_set_guest_cid
,
328 .vhost_vsock_set_running
= vhost_kernel_vsock_set_running
,
329 .vhost_set_iotlb_callback
= vhost_kernel_set_iotlb_callback
,
330 .vhost_send_device_iotlb_msg
= vhost_kernel_send_device_iotlb_msg
,
334 int vhost_backend_update_device_iotlb(struct vhost_dev
*dev
,
335 uint64_t iova
, uint64_t uaddr
,
337 IOMMUAccessFlags perm
)
339 struct vhost_iotlb_msg imsg
;
344 imsg
.type
= VHOST_IOTLB_UPDATE
;
348 imsg
.perm
= VHOST_ACCESS_RO
;
351 imsg
.perm
= VHOST_ACCESS_WO
;
354 imsg
.perm
= VHOST_ACCESS_RW
;
360 if (dev
->vhost_ops
&& dev
->vhost_ops
->vhost_send_device_iotlb_msg
)
361 return dev
->vhost_ops
->vhost_send_device_iotlb_msg(dev
, &imsg
);
366 int vhost_backend_invalidate_device_iotlb(struct vhost_dev
*dev
,
367 uint64_t iova
, uint64_t len
)
369 struct vhost_iotlb_msg imsg
;
373 imsg
.type
= VHOST_IOTLB_INVALIDATE
;
375 if (dev
->vhost_ops
&& dev
->vhost_ops
->vhost_send_device_iotlb_msg
)
376 return dev
->vhost_ops
->vhost_send_device_iotlb_msg(dev
, &imsg
);
381 int vhost_backend_handle_iotlb_msg(struct vhost_dev
*dev
,
382 struct vhost_iotlb_msg
*imsg
)
386 if (unlikely(!dev
->vdev
)) {
387 error_report("Unexpected IOTLB message when virtio device is stopped");
391 switch (imsg
->type
) {
392 case VHOST_IOTLB_MISS
:
393 ret
= vhost_device_iotlb_miss(dev
, imsg
->iova
,
394 imsg
->perm
!= VHOST_ACCESS_RO
);
396 case VHOST_IOTLB_ACCESS_FAIL
:
397 /* FIXME: report device iotlb error */
398 error_report("Access failure IOTLB message type not supported");
401 case VHOST_IOTLB_UPDATE
:
402 case VHOST_IOTLB_INVALIDATE
:
404 error_report("Unexpected IOTLB message type");