4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include "hw/virtio/vhost.h"
13 #include "hw/virtio/vhost-backend.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "standard-headers/linux/vhost_types.h"
18 #include "hw/virtio/vhost-vdpa.h"
19 #ifdef CONFIG_VHOST_KERNEL
20 #include <linux/vhost.h>
21 #include <sys/ioctl.h>
23 static int vhost_kernel_call(struct vhost_dev
*dev
, unsigned long int request
,
26 int fd
= (uintptr_t) dev
->opaque
;
29 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_KERNEL
);
31 ret
= ioctl(fd
, request
, arg
);
32 return ret
< 0 ? -errno
: ret
;
35 static int vhost_kernel_init(struct vhost_dev
*dev
, void *opaque
, Error
**errp
)
37 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_KERNEL
);
44 static int vhost_kernel_cleanup(struct vhost_dev
*dev
)
46 int fd
= (uintptr_t) dev
->opaque
;
48 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_KERNEL
);
53 static int vhost_kernel_memslots_limit(struct vhost_dev
*dev
)
58 if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
60 uint64_t val
= g_ascii_strtoull(s
, NULL
, 10);
61 if (!((val
== G_MAXUINT64
|| !val
) && errno
)) {
65 error_report("ignoring invalid max_mem_regions value in vhost module:"
72 static int vhost_kernel_net_set_backend(struct vhost_dev
*dev
,
73 struct vhost_vring_file
*file
)
75 return vhost_kernel_call(dev
, VHOST_NET_SET_BACKEND
, file
);
78 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev
*dev
,
79 struct vhost_scsi_target
*target
)
81 return vhost_kernel_call(dev
, VHOST_SCSI_SET_ENDPOINT
, target
);
84 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev
*dev
,
85 struct vhost_scsi_target
*target
)
87 return vhost_kernel_call(dev
, VHOST_SCSI_CLEAR_ENDPOINT
, target
);
90 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev
*dev
, int *version
)
92 return vhost_kernel_call(dev
, VHOST_SCSI_GET_ABI_VERSION
, version
);
95 static int vhost_kernel_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
96 struct vhost_log
*log
)
98 return vhost_kernel_call(dev
, VHOST_SET_LOG_BASE
, &base
);
101 static int vhost_kernel_set_mem_table(struct vhost_dev
*dev
,
102 struct vhost_memory
*mem
)
104 return vhost_kernel_call(dev
, VHOST_SET_MEM_TABLE
, mem
);
107 static int vhost_kernel_set_vring_addr(struct vhost_dev
*dev
,
108 struct vhost_vring_addr
*addr
)
110 return vhost_kernel_call(dev
, VHOST_SET_VRING_ADDR
, addr
);
113 static int vhost_kernel_set_vring_endian(struct vhost_dev
*dev
,
114 struct vhost_vring_state
*ring
)
116 return vhost_kernel_call(dev
, VHOST_SET_VRING_ENDIAN
, ring
);
119 static int vhost_kernel_set_vring_num(struct vhost_dev
*dev
,
120 struct vhost_vring_state
*ring
)
122 return vhost_kernel_call(dev
, VHOST_SET_VRING_NUM
, ring
);
125 static int vhost_kernel_set_vring_base(struct vhost_dev
*dev
,
126 struct vhost_vring_state
*ring
)
128 return vhost_kernel_call(dev
, VHOST_SET_VRING_BASE
, ring
);
131 static int vhost_kernel_get_vring_base(struct vhost_dev
*dev
,
132 struct vhost_vring_state
*ring
)
134 return vhost_kernel_call(dev
, VHOST_GET_VRING_BASE
, ring
);
137 static int vhost_kernel_set_vring_kick(struct vhost_dev
*dev
,
138 struct vhost_vring_file
*file
)
140 return vhost_kernel_call(dev
, VHOST_SET_VRING_KICK
, file
);
143 static int vhost_kernel_set_vring_call(struct vhost_dev
*dev
,
144 struct vhost_vring_file
*file
)
146 return vhost_kernel_call(dev
, VHOST_SET_VRING_CALL
, file
);
149 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev
*dev
,
150 struct vhost_vring_state
*s
)
152 return vhost_kernel_call(dev
, VHOST_SET_VRING_BUSYLOOP_TIMEOUT
, s
);
155 static int vhost_kernel_set_features(struct vhost_dev
*dev
,
158 return vhost_kernel_call(dev
, VHOST_SET_FEATURES
, &features
);
161 static int vhost_kernel_set_backend_cap(struct vhost_dev
*dev
)
164 uint64_t f
= 0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
;
167 if (vhost_kernel_call(dev
, VHOST_GET_BACKEND_FEATURES
, &features
)) {
172 r
= vhost_kernel_call(dev
, VHOST_SET_BACKEND_FEATURES
,
178 dev
->backend_cap
= features
;
183 static int vhost_kernel_get_features(struct vhost_dev
*dev
,
186 return vhost_kernel_call(dev
, VHOST_GET_FEATURES
, features
);
189 static int vhost_kernel_set_owner(struct vhost_dev
*dev
)
191 return vhost_kernel_call(dev
, VHOST_SET_OWNER
, NULL
);
194 static int vhost_kernel_reset_device(struct vhost_dev
*dev
)
196 return vhost_kernel_call(dev
, VHOST_RESET_OWNER
, NULL
);
199 static int vhost_kernel_get_vq_index(struct vhost_dev
*dev
, int idx
)
201 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
203 return idx
- dev
->vq_index
;
206 #ifdef CONFIG_VHOST_VSOCK
207 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev
*dev
,
210 return vhost_kernel_call(dev
, VHOST_VSOCK_SET_GUEST_CID
, &guest_cid
);
213 static int vhost_kernel_vsock_set_running(struct vhost_dev
*dev
, int start
)
215 return vhost_kernel_call(dev
, VHOST_VSOCK_SET_RUNNING
, &start
);
217 #endif /* CONFIG_VHOST_VSOCK */
219 static void vhost_kernel_iotlb_read(void *opaque
)
221 struct vhost_dev
*dev
= opaque
;
224 if (dev
->backend_cap
&
225 (0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
)) {
226 struct vhost_msg_v2 msg
;
228 while ((len
= read((uintptr_t)dev
->opaque
, &msg
, sizeof msg
)) > 0) {
229 if (len
< sizeof msg
) {
230 error_report("Wrong vhost message len: %d", (int)len
);
233 if (msg
.type
!= VHOST_IOTLB_MSG_V2
) {
234 error_report("Unknown vhost iotlb message type");
238 vhost_backend_handle_iotlb_msg(dev
, &msg
.iotlb
);
241 struct vhost_msg msg
;
243 while ((len
= read((uintptr_t)dev
->opaque
, &msg
, sizeof msg
)) > 0) {
244 if (len
< sizeof msg
) {
245 error_report("Wrong vhost message len: %d", (int)len
);
248 if (msg
.type
!= VHOST_IOTLB_MSG
) {
249 error_report("Unknown vhost iotlb message type");
253 vhost_backend_handle_iotlb_msg(dev
, &msg
.iotlb
);
258 static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev
*dev
,
259 struct vhost_iotlb_msg
*imsg
)
261 if (dev
->backend_cap
& (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2
)) {
262 struct vhost_msg_v2 msg
= {};
264 msg
.type
= VHOST_IOTLB_MSG_V2
;
267 if (write((uintptr_t)dev
->opaque
, &msg
, sizeof msg
) != sizeof msg
) {
268 error_report("Fail to update device iotlb");
272 struct vhost_msg msg
= {};
274 msg
.type
= VHOST_IOTLB_MSG
;
277 if (write((uintptr_t)dev
->opaque
, &msg
, sizeof msg
) != sizeof msg
) {
278 error_report("Fail to update device iotlb");
286 static void vhost_kernel_set_iotlb_callback(struct vhost_dev
*dev
,
290 qemu_set_fd_handler((uintptr_t)dev
->opaque
,
291 vhost_kernel_iotlb_read
, NULL
, dev
);
293 qemu_set_fd_handler((uintptr_t)dev
->opaque
, NULL
, NULL
, NULL
);
296 const VhostOps kernel_ops
= {
297 .backend_type
= VHOST_BACKEND_TYPE_KERNEL
,
298 .vhost_backend_init
= vhost_kernel_init
,
299 .vhost_backend_cleanup
= vhost_kernel_cleanup
,
300 .vhost_backend_memslots_limit
= vhost_kernel_memslots_limit
,
301 .vhost_net_set_backend
= vhost_kernel_net_set_backend
,
302 .vhost_scsi_set_endpoint
= vhost_kernel_scsi_set_endpoint
,
303 .vhost_scsi_clear_endpoint
= vhost_kernel_scsi_clear_endpoint
,
304 .vhost_scsi_get_abi_version
= vhost_kernel_scsi_get_abi_version
,
305 .vhost_set_log_base
= vhost_kernel_set_log_base
,
306 .vhost_set_mem_table
= vhost_kernel_set_mem_table
,
307 .vhost_set_vring_addr
= vhost_kernel_set_vring_addr
,
308 .vhost_set_vring_endian
= vhost_kernel_set_vring_endian
,
309 .vhost_set_vring_num
= vhost_kernel_set_vring_num
,
310 .vhost_set_vring_base
= vhost_kernel_set_vring_base
,
311 .vhost_get_vring_base
= vhost_kernel_get_vring_base
,
312 .vhost_set_vring_kick
= vhost_kernel_set_vring_kick
,
313 .vhost_set_vring_call
= vhost_kernel_set_vring_call
,
314 .vhost_set_vring_busyloop_timeout
=
315 vhost_kernel_set_vring_busyloop_timeout
,
316 .vhost_set_features
= vhost_kernel_set_features
,
317 .vhost_get_features
= vhost_kernel_get_features
,
318 .vhost_set_backend_cap
= vhost_kernel_set_backend_cap
,
319 .vhost_set_owner
= vhost_kernel_set_owner
,
320 .vhost_reset_device
= vhost_kernel_reset_device
,
321 .vhost_get_vq_index
= vhost_kernel_get_vq_index
,
322 #ifdef CONFIG_VHOST_VSOCK
323 .vhost_vsock_set_guest_cid
= vhost_kernel_vsock_set_guest_cid
,
324 .vhost_vsock_set_running
= vhost_kernel_vsock_set_running
,
325 #endif /* CONFIG_VHOST_VSOCK */
326 .vhost_set_iotlb_callback
= vhost_kernel_set_iotlb_callback
,
327 .vhost_send_device_iotlb_msg
= vhost_kernel_send_device_iotlb_msg
,
331 int vhost_backend_update_device_iotlb(struct vhost_dev
*dev
,
332 uint64_t iova
, uint64_t uaddr
,
334 IOMMUAccessFlags perm
)
336 struct vhost_iotlb_msg imsg
;
341 imsg
.type
= VHOST_IOTLB_UPDATE
;
345 imsg
.perm
= VHOST_ACCESS_RO
;
348 imsg
.perm
= VHOST_ACCESS_WO
;
351 imsg
.perm
= VHOST_ACCESS_RW
;
357 if (dev
->vhost_ops
&& dev
->vhost_ops
->vhost_send_device_iotlb_msg
)
358 return dev
->vhost_ops
->vhost_send_device_iotlb_msg(dev
, &imsg
);
363 int vhost_backend_invalidate_device_iotlb(struct vhost_dev
*dev
,
364 uint64_t iova
, uint64_t len
)
366 struct vhost_iotlb_msg imsg
;
370 imsg
.type
= VHOST_IOTLB_INVALIDATE
;
372 if (dev
->vhost_ops
&& dev
->vhost_ops
->vhost_send_device_iotlb_msg
)
373 return dev
->vhost_ops
->vhost_send_device_iotlb_msg(dev
, &imsg
);
378 int vhost_backend_handle_iotlb_msg(struct vhost_dev
*dev
,
379 struct vhost_iotlb_msg
*imsg
)
383 if (unlikely(!dev
->vdev
)) {
384 error_report("Unexpected IOTLB message when virtio device is stopped");
388 switch (imsg
->type
) {
389 case VHOST_IOTLB_MISS
:
390 ret
= vhost_device_iotlb_miss(dev
, imsg
->iova
,
391 imsg
->perm
!= VHOST_ACCESS_RO
);
393 case VHOST_IOTLB_ACCESS_FAIL
:
394 /* FIXME: report device iotlb error */
395 error_report("Access failure IOTLB message type not supported");
398 case VHOST_IOTLB_UPDATE
:
399 case VHOST_IOTLB_INVALIDATE
:
401 error_report("Unexpected IOTLB message type");