4 * Copyright (c) 2016 Red Hat, Inc.
7 * Victor Kaplansky <victork@redhat.com>
8 * Marc-André Lureau <mlureau@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or
11 * later. See the COPYING file in the top-level directory.
14 #ifndef LIBVHOST_USER_H
15 #define LIBVHOST_USER_H
21 #include <linux/vhost.h>
22 #include "standard-headers/linux/virtio_ring.h"
24 /* Based on qemu/hw/virtio/vhost-user.c */
25 #define VHOST_USER_F_PROTOCOL_FEATURES 30
26 #define VHOST_LOG_PAGE 4096
28 #define VHOST_MAX_NR_VIRTQUEUE 8
29 #define VIRTQUEUE_MAX_SIZE 1024
31 #define VHOST_MEMORY_MAX_NREGIONS 8
33 typedef enum VhostSetConfigType
{
34 VHOST_SET_CONFIG_TYPE_MASTER
= 0,
35 VHOST_SET_CONFIG_TYPE_MIGRATION
= 1,
39 * Maximum size of virtio device config space
41 #define VHOST_USER_MAX_CONFIG_SIZE 256
43 enum VhostUserProtocolFeature
{
44 VHOST_USER_PROTOCOL_F_MQ
= 0,
45 VHOST_USER_PROTOCOL_F_LOG_SHMFD
= 1,
46 VHOST_USER_PROTOCOL_F_RARP
= 2,
47 VHOST_USER_PROTOCOL_F_REPLY_ACK
= 3,
48 VHOST_USER_PROTOCOL_F_NET_MTU
= 4,
49 VHOST_USER_PROTOCOL_F_SLAVE_REQ
= 5,
50 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
= 6,
52 VHOST_USER_PROTOCOL_F_MAX
55 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
57 typedef enum VhostUserRequest
{
59 VHOST_USER_GET_FEATURES
= 1,
60 VHOST_USER_SET_FEATURES
= 2,
61 VHOST_USER_SET_OWNER
= 3,
62 VHOST_USER_RESET_OWNER
= 4,
63 VHOST_USER_SET_MEM_TABLE
= 5,
64 VHOST_USER_SET_LOG_BASE
= 6,
65 VHOST_USER_SET_LOG_FD
= 7,
66 VHOST_USER_SET_VRING_NUM
= 8,
67 VHOST_USER_SET_VRING_ADDR
= 9,
68 VHOST_USER_SET_VRING_BASE
= 10,
69 VHOST_USER_GET_VRING_BASE
= 11,
70 VHOST_USER_SET_VRING_KICK
= 12,
71 VHOST_USER_SET_VRING_CALL
= 13,
72 VHOST_USER_SET_VRING_ERR
= 14,
73 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
74 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
75 VHOST_USER_GET_QUEUE_NUM
= 17,
76 VHOST_USER_SET_VRING_ENABLE
= 18,
77 VHOST_USER_SEND_RARP
= 19,
78 VHOST_USER_NET_SET_MTU
= 20,
79 VHOST_USER_SET_SLAVE_REQ_FD
= 21,
80 VHOST_USER_IOTLB_MSG
= 22,
81 VHOST_USER_SET_VRING_ENDIAN
= 23,
82 VHOST_USER_GET_CONFIG
= 24,
83 VHOST_USER_SET_CONFIG
= 25,
87 typedef struct VhostUserMemoryRegion
{
88 uint64_t guest_phys_addr
;
90 uint64_t userspace_addr
;
92 } VhostUserMemoryRegion
;
94 typedef struct VhostUserMemory
{
97 VhostUserMemoryRegion regions
[VHOST_MEMORY_MAX_NREGIONS
];
100 typedef struct VhostUserLog
{
102 uint64_t mmap_offset
;
105 typedef struct VhostUserConfig
{
109 uint8_t region
[VHOST_USER_MAX_CONFIG_SIZE
];
112 static VhostUserConfig c
__attribute__ ((unused
));
113 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
118 # define VU_PACKED __attribute__((gcc_struct, packed))
120 # define VU_PACKED __attribute__((packed))
123 typedef struct VhostUserMsg
{
124 VhostUserRequest request
;
126 #define VHOST_USER_VERSION_MASK (0x3)
127 #define VHOST_USER_REPLY_MASK (0x1 << 2)
129 uint32_t size
; /* the following payload size */
132 #define VHOST_USER_VRING_IDX_MASK (0xff)
133 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
135 struct vhost_vring_state state
;
136 struct vhost_vring_addr addr
;
137 VhostUserMemory memory
;
139 VhostUserConfig config
;
142 int fds
[VHOST_MEMORY_MAX_NREGIONS
];
145 } VU_PACKED VhostUserMsg
;
147 typedef struct VuDevRegion
{
148 /* Guest Physical address. */
150 /* Memory region size. */
152 /* QEMU virtual address (userspace). */
154 /* Starting offset in our mmaped space. */
155 uint64_t mmap_offset
;
156 /* Start address of mmaped space. */
160 typedef struct VuDev VuDev
;
162 typedef uint64_t (*vu_get_features_cb
) (VuDev
*dev
);
163 typedef void (*vu_set_features_cb
) (VuDev
*dev
, uint64_t features
);
164 typedef int (*vu_process_msg_cb
) (VuDev
*dev
, VhostUserMsg
*vmsg
,
166 typedef void (*vu_queue_set_started_cb
) (VuDev
*dev
, int qidx
, bool started
);
167 typedef bool (*vu_queue_is_processed_in_order_cb
) (VuDev
*dev
, int qidx
);
168 typedef int (*vu_get_config_cb
) (VuDev
*dev
, uint8_t *config
, uint32_t len
);
169 typedef int (*vu_set_config_cb
) (VuDev
*dev
, const uint8_t *data
,
170 uint32_t offset
, uint32_t size
,
173 typedef struct VuDevIface
{
174 /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
175 vu_get_features_cb get_features
;
176 /* enable vhost implementation features */
177 vu_set_features_cb set_features
;
178 /* get the protocol feature bitmask from the underlying vhost
180 vu_get_features_cb get_protocol_features
;
181 /* enable protocol features in the underlying vhost implementation. */
182 vu_set_features_cb set_protocol_features
;
183 /* process_msg is called for each vhost-user message received */
184 /* skip libvhost-user processing if return value != 0 */
185 vu_process_msg_cb process_msg
;
186 /* tells when queues can be processed */
187 vu_queue_set_started_cb queue_set_started
;
189 * If the queue is processed in order, in which case it will be
190 * resumed to vring.used->idx. This can help to support resuming
191 * on unmanaged exit/crash.
193 vu_queue_is_processed_in_order_cb queue_is_processed_in_order
;
194 /* get the config space of the device */
195 vu_get_config_cb get_config
;
196 /* set the config space of the device */
197 vu_set_config_cb set_config
;
200 typedef void (*vu_queue_handler_cb
) (VuDev
*dev
, int qidx
);
202 typedef struct VuRing
{
204 struct vring_desc
*desc
;
205 struct vring_avail
*avail
;
206 struct vring_used
*used
;
207 uint64_t log_guest_addr
;
211 typedef struct VuVirtq
{
214 /* Next head to pop */
215 uint16_t last_avail_idx
;
217 /* Last avail_idx read from VQ. */
218 uint16_t shadow_avail_idx
;
222 /* Last used index value we have signalled on */
223 uint16_t signalled_used
;
225 /* Last used index value we have signalled on */
226 bool signalled_used_valid
;
228 /* Notification enabled? */
233 vu_queue_handler_cb handler
;
242 enum VuWatchCondtion
{
243 VU_WATCH_IN
= POLLIN
,
244 VU_WATCH_OUT
= POLLOUT
,
245 VU_WATCH_PRI
= POLLPRI
,
246 VU_WATCH_ERR
= POLLERR
,
247 VU_WATCH_HUP
= POLLHUP
,
250 typedef void (*vu_panic_cb
) (VuDev
*dev
, const char *err
);
251 typedef void (*vu_watch_cb
) (VuDev
*dev
, int condition
, void *data
);
252 typedef void (*vu_set_watch_cb
) (VuDev
*dev
, int fd
, int condition
,
253 vu_watch_cb cb
, void *data
);
254 typedef void (*vu_remove_watch_cb
) (VuDev
*dev
, int fd
);
259 VuDevRegion regions
[VHOST_MEMORY_MAX_NREGIONS
];
260 VuVirtq vq
[VHOST_MAX_NR_VIRTQUEUE
];
266 uint64_t protocol_features
;
269 /* @set_watch: add or update the given fd to the watch set,
270 * call cb when condition is met */
271 vu_set_watch_cb set_watch
;
273 /* @remove_watch: remove the given fd from the watch set */
274 vu_remove_watch_cb remove_watch
;
276 /* @panic: encountered an unrecoverable error, you may try to
279 const VuDevIface
*iface
;
282 typedef struct VuVirtqElement
{
284 unsigned int out_num
;
287 struct iovec
*out_sg
;
292 * @dev: a VuDev context
293 * @socket: the socket connected to vhost-user master
294 * @panic: a panic callback
295 * @set_watch: a set_watch callback
296 * @remove_watch: a remove_watch callback
297 * @iface: a VuDevIface structure with vhost-user device callbacks
299 * Intializes a VuDev vhost-user context.
301 void vu_init(VuDev
*dev
,
304 vu_set_watch_cb set_watch
,
305 vu_remove_watch_cb remove_watch
,
306 const VuDevIface
*iface
);
311 * @dev: a VuDev context
313 * Cleans up the VuDev context
315 void vu_deinit(VuDev
*dev
);
319 * @dev: a VuDev context
321 * Process one vhost-user message.
323 * Returns: TRUE on success, FALSE on failure.
325 bool vu_dispatch(VuDev
*dev
);
329 * @dev: a VuDev context
330 * @guest_addr: guest address
332 * Translate a guest address to a pointer. Returns NULL on failure.
334 void *vu_gpa_to_va(VuDev
*dev
, uint64_t guest_addr
);
338 * @dev: a VuDev context
341 * Returns the queue number @qidx.
343 VuVirtq
*vu_get_queue(VuDev
*dev
, int qidx
);
346 * vu_set_queue_handler:
347 * @dev: a VuDev context
348 * @vq: a VuVirtq queue
349 * @handler: the queue handler callback
351 * Set the queue handler. This function may be called several times
352 * for the same queue. If called with NULL @handler, the handler is
355 void vu_set_queue_handler(VuDev
*dev
, VuVirtq
*vq
,
356 vu_queue_handler_cb handler
);
360 * vu_queue_set_notification:
361 * @dev: a VuDev context
362 * @vq: a VuVirtq queue
365 * Set whether the queue notifies (via event index or interrupt)
367 void vu_queue_set_notification(VuDev
*dev
, VuVirtq
*vq
, int enable
);
371 * @dev: a VuDev context
372 * @vq: a VuVirtq queue
374 * Returns: whether the queue is enabled.
376 bool vu_queue_enabled(VuDev
*dev
, VuVirtq
*vq
);
380 * @dev: a VuDev context
381 * @vq: a VuVirtq queue
383 * Returns: whether the queue is started.
385 bool vu_queue_started(const VuDev
*dev
, const VuVirtq
*vq
);
389 * @dev: a VuDev context
390 * @vq: a VuVirtq queue
392 * Returns: true if the queue is empty or not ready.
394 bool vu_queue_empty(VuDev
*dev
, VuVirtq
*vq
);
398 * @dev: a VuDev context
399 * @vq: a VuVirtq queue
401 * Request to notify the queue via callfd (skipped if unnecessary)
403 void vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
);
407 * @dev: a VuDev context
408 * @vq: a VuVirtq queue
409 * @sz: the size of struct to return (must be >= VuVirtqElement)
411 * Returns: a VuVirtqElement filled from the queue or NULL. The
412 * returned element must be free()-d by the caller.
414 void *vu_queue_pop(VuDev
*dev
, VuVirtq
*vq
, size_t sz
);
418 * @dev: a VuDev context
419 * @vq: a VuVirtq queue
420 * @num: number of elements to push back
422 * Pretend that elements weren't popped from the virtqueue. The next
423 * virtqueue_pop() will refetch the oldest element.
425 * Returns: true on success, false if @num is greater than the number of in use
428 bool vu_queue_rewind(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
);
432 * @dev: a VuDev context
433 * @vq: a VuVirtq queue
434 * @elem: a VuVirtqElement
435 * @len: length in bytes to write
436 * @idx: optional offset for the used ring index (0 in general)
438 * Fill the used ring with @elem element.
440 void vu_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
441 const VuVirtqElement
*elem
,
442 unsigned int len
, unsigned int idx
);
446 * @dev: a VuDev context
447 * @vq: a VuVirtq queue
448 * @elem: a VuVirtqElement
449 * @len: length in bytes to write
451 * Helper that combines vu_queue_fill() with a vu_queue_flush().
453 void vu_queue_push(VuDev
*dev
, VuVirtq
*vq
,
454 const VuVirtqElement
*elem
, unsigned int len
);
458 * @dev: a VuDev context
459 * @vq: a VuVirtq queue
460 * @num: number of elements to flush
462 * Mark the last number of elements as done (used.idx is updated by
465 void vu_queue_flush(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
);
468 * vu_queue_get_avail_bytes:
469 * @dev: a VuDev context
470 * @vq: a VuVirtq queue
471 * @in_bytes: in bytes
472 * @out_bytes: out bytes
473 * @max_in_bytes: stop counting after max_in_bytes
474 * @max_out_bytes: stop counting after max_out_bytes
476 * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
478 void vu_queue_get_avail_bytes(VuDev
*vdev
, VuVirtq
*vq
, unsigned int *in_bytes
,
479 unsigned int *out_bytes
,
480 unsigned max_in_bytes
, unsigned max_out_bytes
);
483 * vu_queue_avail_bytes:
484 * @dev: a VuDev context
485 * @vq: a VuVirtq queue
486 * @in_bytes: expected in bytes
487 * @out_bytes: expected out bytes
489 * Returns: true if in_bytes <= in_total && out_bytes <= out_total
491 bool vu_queue_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int in_bytes
,
492 unsigned int out_bytes
);
494 #endif /* LIBVHOST_USER_H */