4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
16 /* this code avoids GLib dependency */
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
29 #include <linux/vhost.h>
31 #include "qemu/compiler.h"
32 #include "qemu/atomic.h"
34 #include "libvhost-user.h"
36 /* usually provided by GLib */
38 #define MIN(x, y) ({ \
39 typeof(x) _min1 = (x); \
40 typeof(y) _min2 = (y); \
41 (void) (&_min1 == &_min2); \
42 _min1 < _min2 ? _min1 : _min2; })
45 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
47 /* The version of the protocol we support */
48 #define VHOST_USER_VERSION 1
49 #define LIBVHOST_USER_DEBUG 0
53 if (LIBVHOST_USER_DEBUG) { \
54 fprintf(stderr, __VA_ARGS__); \
59 vu_request_to_string(unsigned int req
)
61 #define REQ(req) [req] = #req
62 static const char *vu_request_str
[] = {
64 REQ(VHOST_USER_GET_FEATURES
),
65 REQ(VHOST_USER_SET_FEATURES
),
66 REQ(VHOST_USER_SET_OWNER
),
67 REQ(VHOST_USER_RESET_OWNER
),
68 REQ(VHOST_USER_SET_MEM_TABLE
),
69 REQ(VHOST_USER_SET_LOG_BASE
),
70 REQ(VHOST_USER_SET_LOG_FD
),
71 REQ(VHOST_USER_SET_VRING_NUM
),
72 REQ(VHOST_USER_SET_VRING_ADDR
),
73 REQ(VHOST_USER_SET_VRING_BASE
),
74 REQ(VHOST_USER_GET_VRING_BASE
),
75 REQ(VHOST_USER_SET_VRING_KICK
),
76 REQ(VHOST_USER_SET_VRING_CALL
),
77 REQ(VHOST_USER_SET_VRING_ERR
),
78 REQ(VHOST_USER_GET_PROTOCOL_FEATURES
),
79 REQ(VHOST_USER_SET_PROTOCOL_FEATURES
),
80 REQ(VHOST_USER_GET_QUEUE_NUM
),
81 REQ(VHOST_USER_SET_VRING_ENABLE
),
82 REQ(VHOST_USER_SEND_RARP
),
83 REQ(VHOST_USER_NET_SET_MTU
),
84 REQ(VHOST_USER_SET_SLAVE_REQ_FD
),
85 REQ(VHOST_USER_IOTLB_MSG
),
86 REQ(VHOST_USER_SET_VRING_ENDIAN
),
87 REQ(VHOST_USER_GET_CONFIG
),
88 REQ(VHOST_USER_SET_CONFIG
),
93 if (req
< VHOST_USER_MAX
) {
94 return vu_request_str
[req
];
101 vu_panic(VuDev
*dev
, const char *msg
, ...)
107 if (vasprintf(&buf
, msg
, ap
) < 0) {
113 dev
->panic(dev
, buf
);
116 /* FIXME: find a way to call virtio_error? */
119 /* Translate guest physical address to our virtual address. */
121 vu_gpa_to_va(VuDev
*dev
, uint64_t guest_addr
)
125 /* Find matching memory region. */
126 for (i
= 0; i
< dev
->nregions
; i
++) {
127 VuDevRegion
*r
= &dev
->regions
[i
];
129 if ((guest_addr
>= r
->gpa
) && (guest_addr
< (r
->gpa
+ r
->size
))) {
130 return (void *)(uintptr_t)
131 guest_addr
- r
->gpa
+ r
->mmap_addr
+ r
->mmap_offset
;
138 /* Translate qemu virtual address to our virtual address. */
140 qva_to_va(VuDev
*dev
, uint64_t qemu_addr
)
144 /* Find matching memory region. */
145 for (i
= 0; i
< dev
->nregions
; i
++) {
146 VuDevRegion
*r
= &dev
->regions
[i
];
148 if ((qemu_addr
>= r
->qva
) && (qemu_addr
< (r
->qva
+ r
->size
))) {
149 return (void *)(uintptr_t)
150 qemu_addr
- r
->qva
+ r
->mmap_addr
+ r
->mmap_offset
;
158 vmsg_close_fds(VhostUserMsg
*vmsg
)
162 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
168 vu_message_read(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
170 char control
[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS
* sizeof(int))] = { };
172 .iov_base
= (char *)vmsg
,
173 .iov_len
= VHOST_USER_HDR_SIZE
,
175 struct msghdr msg
= {
178 .msg_control
= control
,
179 .msg_controllen
= sizeof(control
),
182 struct cmsghdr
*cmsg
;
186 rc
= recvmsg(conn_fd
, &msg
, 0);
187 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
190 vu_panic(dev
, "Error while recvmsg: %s", strerror(errno
));
195 for (cmsg
= CMSG_FIRSTHDR(&msg
);
197 cmsg
= CMSG_NXTHDR(&msg
, cmsg
))
199 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
200 fd_size
= cmsg
->cmsg_len
- CMSG_LEN(0);
201 vmsg
->fd_num
= fd_size
/ sizeof(int);
202 memcpy(vmsg
->fds
, CMSG_DATA(cmsg
), fd_size
);
207 if (vmsg
->size
> sizeof(vmsg
->payload
)) {
209 "Error: too big message request: %d, size: vmsg->size: %u, "
210 "while sizeof(vmsg->payload) = %zu\n",
211 vmsg
->request
, vmsg
->size
, sizeof(vmsg
->payload
));
217 rc
= read(conn_fd
, &vmsg
->payload
, vmsg
->size
);
218 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
221 vu_panic(dev
, "Error while reading: %s", strerror(errno
));
225 assert(rc
== vmsg
->size
);
231 vmsg_close_fds(vmsg
);
237 vu_message_write(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
240 uint8_t *p
= (uint8_t *)vmsg
;
242 /* Set the version in the flags when sending the reply */
243 vmsg
->flags
&= ~VHOST_USER_VERSION_MASK
;
244 vmsg
->flags
|= VHOST_USER_VERSION
;
245 vmsg
->flags
|= VHOST_USER_REPLY_MASK
;
248 rc
= write(conn_fd
, p
, VHOST_USER_HDR_SIZE
);
249 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
253 rc
= write(conn_fd
, vmsg
->data
, vmsg
->size
);
255 rc
= write(conn_fd
, p
+ VHOST_USER_HDR_SIZE
, vmsg
->size
);
257 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
260 vu_panic(dev
, "Error while writing: %s", strerror(errno
));
267 /* Kick the log_call_fd if required. */
269 vu_log_kick(VuDev
*dev
)
271 if (dev
->log_call_fd
!= -1) {
272 DPRINT("Kicking the QEMU's log...\n");
273 if (eventfd_write(dev
->log_call_fd
, 1) < 0) {
274 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
280 vu_log_page(uint8_t *log_table
, uint64_t page
)
282 DPRINT("Logged dirty guest page: %"PRId64
"\n", page
);
283 atomic_or(&log_table
[page
/ 8], 1 << (page
% 8));
287 vu_log_write(VuDev
*dev
, uint64_t address
, uint64_t length
)
291 if (!(dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) ||
292 !dev
->log_table
|| !length
) {
296 assert(dev
->log_size
> ((address
+ length
- 1) / VHOST_LOG_PAGE
/ 8));
298 page
= address
/ VHOST_LOG_PAGE
;
299 while (page
* VHOST_LOG_PAGE
< address
+ length
) {
300 vu_log_page(dev
->log_table
, page
);
301 page
+= VHOST_LOG_PAGE
;
308 vu_kick_cb(VuDev
*dev
, int condition
, void *data
)
310 int index
= (intptr_t)data
;
311 VuVirtq
*vq
= &dev
->vq
[index
];
312 int sock
= vq
->kick_fd
;
316 rc
= eventfd_read(sock
, &kick_data
);
318 vu_panic(dev
, "kick eventfd_read(): %s", strerror(errno
));
319 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
321 DPRINT("Got kick_data: %016"PRIx64
" handler:%p idx:%d\n",
322 kick_data
, vq
->handler
, index
);
324 vq
->handler(dev
, index
);
330 vu_get_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
333 1ULL << VHOST_F_LOG_ALL
|
334 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
336 if (dev
->iface
->get_features
) {
337 vmsg
->payload
.u64
|= dev
->iface
->get_features(dev
);
340 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
342 DPRINT("Sending back to guest u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
348 vu_set_enable_all_rings(VuDev
*dev
, bool enabled
)
352 for (i
= 0; i
< VHOST_MAX_NR_VIRTQUEUE
; i
++) {
353 dev
->vq
[i
].enable
= enabled
;
358 vu_set_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
360 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
362 dev
->features
= vmsg
->payload
.u64
;
364 if (!(dev
->features
& VHOST_USER_F_PROTOCOL_FEATURES
)) {
365 vu_set_enable_all_rings(dev
, true);
368 if (dev
->iface
->set_features
) {
369 dev
->iface
->set_features(dev
, dev
->features
);
376 vu_set_owner_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
382 vu_close_log(VuDev
*dev
)
384 if (dev
->log_table
) {
385 if (munmap(dev
->log_table
, dev
->log_size
) != 0) {
386 perror("close log munmap() error");
389 dev
->log_table
= NULL
;
391 if (dev
->log_call_fd
!= -1) {
392 close(dev
->log_call_fd
);
393 dev
->log_call_fd
= -1;
398 vu_reset_device_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
400 vu_set_enable_all_rings(dev
, false);
406 vu_set_mem_table_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
409 VhostUserMemory
*memory
= &vmsg
->payload
.memory
;
410 dev
->nregions
= memory
->nregions
;
412 DPRINT("Nregions: %d\n", memory
->nregions
);
413 for (i
= 0; i
< dev
->nregions
; i
++) {
415 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
416 VuDevRegion
*dev_region
= &dev
->regions
[i
];
418 DPRINT("Region %d\n", i
);
419 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
420 msg_region
->guest_phys_addr
);
421 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
422 msg_region
->memory_size
);
423 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
424 msg_region
->userspace_addr
);
425 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
426 msg_region
->mmap_offset
);
428 dev_region
->gpa
= msg_region
->guest_phys_addr
;
429 dev_region
->size
= msg_region
->memory_size
;
430 dev_region
->qva
= msg_region
->userspace_addr
;
431 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
433 /* We don't use offset argument of mmap() since the
434 * mapped address has to be page aligned, and we use huge
436 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
437 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
440 if (mmap_addr
== MAP_FAILED
) {
441 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
443 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
444 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
445 dev_region
->mmap_addr
);
455 vu_set_log_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
458 uint64_t log_mmap_size
, log_mmap_offset
;
461 if (vmsg
->fd_num
!= 1 ||
462 vmsg
->size
!= sizeof(vmsg
->payload
.log
)) {
463 vu_panic(dev
, "Invalid log_base message");
468 log_mmap_offset
= vmsg
->payload
.log
.mmap_offset
;
469 log_mmap_size
= vmsg
->payload
.log
.mmap_size
;
470 DPRINT("Log mmap_offset: %"PRId64
"\n", log_mmap_offset
);
471 DPRINT("Log mmap_size: %"PRId64
"\n", log_mmap_size
);
473 rc
= mmap(0, log_mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
,
475 if (rc
== MAP_FAILED
) {
476 perror("log mmap error");
479 dev
->log_size
= log_mmap_size
;
481 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
487 vu_set_log_fd_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
489 if (vmsg
->fd_num
!= 1) {
490 vu_panic(dev
, "Invalid log_fd message");
494 if (dev
->log_call_fd
!= -1) {
495 close(dev
->log_call_fd
);
497 dev
->log_call_fd
= vmsg
->fds
[0];
498 DPRINT("Got log_call_fd: %d\n", vmsg
->fds
[0]);
504 vu_set_vring_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
506 unsigned int index
= vmsg
->payload
.state
.index
;
507 unsigned int num
= vmsg
->payload
.state
.num
;
509 DPRINT("State.index: %d\n", index
);
510 DPRINT("State.num: %d\n", num
);
511 dev
->vq
[index
].vring
.num
= num
;
517 vu_set_vring_addr_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
519 struct vhost_vring_addr
*vra
= &vmsg
->payload
.addr
;
520 unsigned int index
= vra
->index
;
521 VuVirtq
*vq
= &dev
->vq
[index
];
523 DPRINT("vhost_vring_addr:\n");
524 DPRINT(" index: %d\n", vra
->index
);
525 DPRINT(" flags: %d\n", vra
->flags
);
526 DPRINT(" desc_user_addr: 0x%016llx\n", vra
->desc_user_addr
);
527 DPRINT(" used_user_addr: 0x%016llx\n", vra
->used_user_addr
);
528 DPRINT(" avail_user_addr: 0x%016llx\n", vra
->avail_user_addr
);
529 DPRINT(" log_guest_addr: 0x%016llx\n", vra
->log_guest_addr
);
531 vq
->vring
.flags
= vra
->flags
;
532 vq
->vring
.desc
= qva_to_va(dev
, vra
->desc_user_addr
);
533 vq
->vring
.used
= qva_to_va(dev
, vra
->used_user_addr
);
534 vq
->vring
.avail
= qva_to_va(dev
, vra
->avail_user_addr
);
535 vq
->vring
.log_guest_addr
= vra
->log_guest_addr
;
537 DPRINT("Setting virtq addresses:\n");
538 DPRINT(" vring_desc at %p\n", vq
->vring
.desc
);
539 DPRINT(" vring_used at %p\n", vq
->vring
.used
);
540 DPRINT(" vring_avail at %p\n", vq
->vring
.avail
);
542 if (!(vq
->vring
.desc
&& vq
->vring
.used
&& vq
->vring
.avail
)) {
543 vu_panic(dev
, "Invalid vring_addr message");
547 vq
->used_idx
= vq
->vring
.used
->idx
;
549 if (vq
->last_avail_idx
!= vq
->used_idx
) {
550 bool resume
= dev
->iface
->queue_is_processed_in_order
&&
551 dev
->iface
->queue_is_processed_in_order(dev
, index
);
553 DPRINT("Last avail index != used index: %u != %u%s\n",
554 vq
->last_avail_idx
, vq
->used_idx
,
555 resume
? ", resuming" : "");
558 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->used_idx
;
566 vu_set_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
568 unsigned int index
= vmsg
->payload
.state
.index
;
569 unsigned int num
= vmsg
->payload
.state
.num
;
571 DPRINT("State.index: %d\n", index
);
572 DPRINT("State.num: %d\n", num
);
573 dev
->vq
[index
].shadow_avail_idx
= dev
->vq
[index
].last_avail_idx
= num
;
579 vu_get_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
581 unsigned int index
= vmsg
->payload
.state
.index
;
583 DPRINT("State.index: %d\n", index
);
584 vmsg
->payload
.state
.num
= dev
->vq
[index
].last_avail_idx
;
585 vmsg
->size
= sizeof(vmsg
->payload
.state
);
587 dev
->vq
[index
].started
= false;
588 if (dev
->iface
->queue_set_started
) {
589 dev
->iface
->queue_set_started(dev
, index
, false);
592 if (dev
->vq
[index
].call_fd
!= -1) {
593 close(dev
->vq
[index
].call_fd
);
594 dev
->vq
[index
].call_fd
= -1;
596 if (dev
->vq
[index
].kick_fd
!= -1) {
597 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
598 close(dev
->vq
[index
].kick_fd
);
599 dev
->vq
[index
].kick_fd
= -1;
606 vu_check_queue_msg_file(VuDev
*dev
, VhostUserMsg
*vmsg
)
608 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
610 if (index
>= VHOST_MAX_NR_VIRTQUEUE
) {
611 vmsg_close_fds(vmsg
);
612 vu_panic(dev
, "Invalid queue index: %u", index
);
616 if (vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
||
618 vmsg_close_fds(vmsg
);
619 vu_panic(dev
, "Invalid fds in request: %d", vmsg
->request
);
627 vu_set_vring_kick_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
629 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
631 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
633 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
637 if (dev
->vq
[index
].kick_fd
!= -1) {
638 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
639 close(dev
->vq
[index
].kick_fd
);
640 dev
->vq
[index
].kick_fd
= -1;
643 if (!(vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
)) {
644 dev
->vq
[index
].kick_fd
= vmsg
->fds
[0];
645 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg
->fds
[0], index
);
648 dev
->vq
[index
].started
= true;
649 if (dev
->iface
->queue_set_started
) {
650 dev
->iface
->queue_set_started(dev
, index
, true);
653 if (dev
->vq
[index
].kick_fd
!= -1 && dev
->vq
[index
].handler
) {
654 dev
->set_watch(dev
, dev
->vq
[index
].kick_fd
, VU_WATCH_IN
,
655 vu_kick_cb
, (void *)(long)index
);
657 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
658 dev
->vq
[index
].kick_fd
, index
);
664 void vu_set_queue_handler(VuDev
*dev
, VuVirtq
*vq
,
665 vu_queue_handler_cb handler
)
667 int qidx
= vq
- dev
->vq
;
669 vq
->handler
= handler
;
670 if (vq
->kick_fd
>= 0) {
672 dev
->set_watch(dev
, vq
->kick_fd
, VU_WATCH_IN
,
673 vu_kick_cb
, (void *)(long)qidx
);
675 dev
->remove_watch(dev
, vq
->kick_fd
);
681 vu_set_vring_call_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
683 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
685 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
687 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
691 if (dev
->vq
[index
].call_fd
!= -1) {
692 close(dev
->vq
[index
].call_fd
);
693 dev
->vq
[index
].call_fd
= -1;
696 if (!(vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
)) {
697 dev
->vq
[index
].call_fd
= vmsg
->fds
[0];
700 DPRINT("Got call_fd: %d for vq: %d\n", vmsg
->fds
[0], index
);
706 vu_set_vring_err_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
708 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
710 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
712 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
716 if (dev
->vq
[index
].err_fd
!= -1) {
717 close(dev
->vq
[index
].err_fd
);
718 dev
->vq
[index
].err_fd
= -1;
721 if (!(vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
)) {
722 dev
->vq
[index
].err_fd
= vmsg
->fds
[0];
729 vu_get_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
731 uint64_t features
= 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD
|
732 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ
;
734 if (dev
->iface
->get_protocol_features
) {
735 features
|= dev
->iface
->get_protocol_features(dev
);
738 vmsg
->payload
.u64
= features
;
739 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
745 vu_set_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
747 uint64_t features
= vmsg
->payload
.u64
;
749 DPRINT("u64: 0x%016"PRIx64
"\n", features
);
751 dev
->protocol_features
= vmsg
->payload
.u64
;
753 if (dev
->iface
->set_protocol_features
) {
754 dev
->iface
->set_protocol_features(dev
, features
);
761 vu_get_queue_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
763 DPRINT("Function %s() not implemented yet.\n", __func__
);
768 vu_set_vring_enable_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
770 unsigned int index
= vmsg
->payload
.state
.index
;
771 unsigned int enable
= vmsg
->payload
.state
.num
;
773 DPRINT("State.index: %d\n", index
);
774 DPRINT("State.enable: %d\n", enable
);
776 if (index
>= VHOST_MAX_NR_VIRTQUEUE
) {
777 vu_panic(dev
, "Invalid vring_enable index: %u", index
);
781 dev
->vq
[index
].enable
= enable
;
786 vu_set_slave_req_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
788 if (vmsg
->fd_num
!= 1) {
789 vu_panic(dev
, "Invalid slave_req_fd message (%d fd's)", vmsg
->fd_num
);
793 if (dev
->slave_fd
!= -1) {
794 close(dev
->slave_fd
);
796 dev
->slave_fd
= vmsg
->fds
[0];
797 DPRINT("Got slave_fd: %d\n", vmsg
->fds
[0]);
803 vu_get_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
807 if (dev
->iface
->get_config
) {
808 ret
= dev
->iface
->get_config(dev
, vmsg
->payload
.config
.region
,
809 vmsg
->payload
.config
.size
);
813 /* resize to zero to indicate an error to master */
821 vu_set_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
825 if (dev
->iface
->set_config
) {
826 ret
= dev
->iface
->set_config(dev
, vmsg
->payload
.config
.region
,
827 vmsg
->payload
.config
.offset
,
828 vmsg
->payload
.config
.size
,
829 vmsg
->payload
.config
.flags
);
831 vu_panic(dev
, "Set virtio configuration space failed");
839 vu_process_message(VuDev
*dev
, VhostUserMsg
*vmsg
)
843 /* Print out generic part of the request. */
844 DPRINT("================ Vhost user message ================\n");
845 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg
->request
),
847 DPRINT("Flags: 0x%x\n", vmsg
->flags
);
848 DPRINT("Size: %d\n", vmsg
->size
);
853 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
854 DPRINT(" %d", vmsg
->fds
[i
]);
859 if (dev
->iface
->process_msg
&&
860 dev
->iface
->process_msg(dev
, vmsg
, &do_reply
)) {
864 switch (vmsg
->request
) {
865 case VHOST_USER_GET_FEATURES
:
866 return vu_get_features_exec(dev
, vmsg
);
867 case VHOST_USER_SET_FEATURES
:
868 return vu_set_features_exec(dev
, vmsg
);
869 case VHOST_USER_GET_PROTOCOL_FEATURES
:
870 return vu_get_protocol_features_exec(dev
, vmsg
);
871 case VHOST_USER_SET_PROTOCOL_FEATURES
:
872 return vu_set_protocol_features_exec(dev
, vmsg
);
873 case VHOST_USER_SET_OWNER
:
874 return vu_set_owner_exec(dev
, vmsg
);
875 case VHOST_USER_RESET_OWNER
:
876 return vu_reset_device_exec(dev
, vmsg
);
877 case VHOST_USER_SET_MEM_TABLE
:
878 return vu_set_mem_table_exec(dev
, vmsg
);
879 case VHOST_USER_SET_LOG_BASE
:
880 return vu_set_log_base_exec(dev
, vmsg
);
881 case VHOST_USER_SET_LOG_FD
:
882 return vu_set_log_fd_exec(dev
, vmsg
);
883 case VHOST_USER_SET_VRING_NUM
:
884 return vu_set_vring_num_exec(dev
, vmsg
);
885 case VHOST_USER_SET_VRING_ADDR
:
886 return vu_set_vring_addr_exec(dev
, vmsg
);
887 case VHOST_USER_SET_VRING_BASE
:
888 return vu_set_vring_base_exec(dev
, vmsg
);
889 case VHOST_USER_GET_VRING_BASE
:
890 return vu_get_vring_base_exec(dev
, vmsg
);
891 case VHOST_USER_SET_VRING_KICK
:
892 return vu_set_vring_kick_exec(dev
, vmsg
);
893 case VHOST_USER_SET_VRING_CALL
:
894 return vu_set_vring_call_exec(dev
, vmsg
);
895 case VHOST_USER_SET_VRING_ERR
:
896 return vu_set_vring_err_exec(dev
, vmsg
);
897 case VHOST_USER_GET_QUEUE_NUM
:
898 return vu_get_queue_num_exec(dev
, vmsg
);
899 case VHOST_USER_SET_VRING_ENABLE
:
900 return vu_set_vring_enable_exec(dev
, vmsg
);
901 case VHOST_USER_SET_SLAVE_REQ_FD
:
902 return vu_set_slave_req_fd(dev
, vmsg
);
903 case VHOST_USER_GET_CONFIG
:
904 return vu_get_config(dev
, vmsg
);
905 case VHOST_USER_SET_CONFIG
:
906 return vu_set_config(dev
, vmsg
);
907 case VHOST_USER_NONE
:
910 vmsg_close_fds(vmsg
);
911 vu_panic(dev
, "Unhandled request: %d", vmsg
->request
);
918 vu_dispatch(VuDev
*dev
)
920 VhostUserMsg vmsg
= { 0, };
922 bool success
= false;
924 if (!vu_message_read(dev
, dev
->sock
, &vmsg
)) {
928 reply_requested
= vu_process_message(dev
, &vmsg
);
929 if (!reply_requested
) {
934 if (!vu_message_write(dev
, dev
->sock
, &vmsg
)) {
946 vu_deinit(VuDev
*dev
)
950 for (i
= 0; i
< dev
->nregions
; i
++) {
951 VuDevRegion
*r
= &dev
->regions
[i
];
952 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
953 if (m
!= MAP_FAILED
) {
954 munmap(m
, r
->size
+ r
->mmap_offset
);
959 for (i
= 0; i
< VHOST_MAX_NR_VIRTQUEUE
; i
++) {
960 VuVirtq
*vq
= &dev
->vq
[i
];
962 if (vq
->call_fd
!= -1) {
967 if (vq
->kick_fd
!= -1) {
972 if (vq
->err_fd
!= -1) {
980 if (dev
->slave_fd
!= -1) {
981 close(dev
->slave_fd
);
985 if (dev
->sock
!= -1) {
994 vu_set_watch_cb set_watch
,
995 vu_remove_watch_cb remove_watch
,
996 const VuDevIface
*iface
)
1000 assert(socket
>= 0);
1002 assert(remove_watch
);
1006 memset(dev
, 0, sizeof(*dev
));
1010 dev
->set_watch
= set_watch
;
1011 dev
->remove_watch
= remove_watch
;
1013 dev
->log_call_fd
= -1;
1015 for (i
= 0; i
< VHOST_MAX_NR_VIRTQUEUE
; i
++) {
1016 dev
->vq
[i
] = (VuVirtq
) {
1017 .call_fd
= -1, .kick_fd
= -1, .err_fd
= -1,
1018 .notification
= true,
1024 vu_get_queue(VuDev
*dev
, int qidx
)
1026 assert(qidx
< VHOST_MAX_NR_VIRTQUEUE
);
1027 return &dev
->vq
[qidx
];
1031 vu_queue_enabled(VuDev
*dev
, VuVirtq
*vq
)
1037 vu_queue_started(const VuDev
*dev
, const VuVirtq
*vq
)
1042 static inline uint16_t
1043 vring_avail_flags(VuVirtq
*vq
)
1045 return vq
->vring
.avail
->flags
;
1048 static inline uint16_t
1049 vring_avail_idx(VuVirtq
*vq
)
1051 vq
->shadow_avail_idx
= vq
->vring
.avail
->idx
;
1053 return vq
->shadow_avail_idx
;
1056 static inline uint16_t
1057 vring_avail_ring(VuVirtq
*vq
, int i
)
1059 return vq
->vring
.avail
->ring
[i
];
1062 static inline uint16_t
1063 vring_get_used_event(VuVirtq
*vq
)
1065 return vring_avail_ring(vq
, vq
->vring
.num
);
1069 virtqueue_num_heads(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
)
1071 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
1073 /* Check it isn't doing very strange things with descriptor numbers. */
1074 if (num_heads
> vq
->vring
.num
) {
1075 vu_panic(dev
, "Guest moved used index from %u to %u",
1076 idx
, vq
->shadow_avail_idx
);
1080 /* On success, callers read a descriptor at vq->last_avail_idx.
1081 * Make sure descriptor read does not bypass avail index read. */
1089 virtqueue_get_head(VuDev
*dev
, VuVirtq
*vq
,
1090 unsigned int idx
, unsigned int *head
)
1092 /* Grab the next descriptor number they're advertising, and increment
1093 * the index we've seen. */
1094 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
1096 /* If their number is silly, that's a fatal mistake. */
1097 if (*head
>= vq
->vring
.num
) {
1098 vu_panic(dev
, "Guest says index %u is available", head
);
1106 VIRTQUEUE_READ_DESC_ERROR
= -1,
1107 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
1108 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
1112 virtqueue_read_next_desc(VuDev
*dev
, struct vring_desc
*desc
,
1113 int i
, unsigned int max
, unsigned int *next
)
1115 /* If this descriptor says it doesn't chain, we're done. */
1116 if (!(desc
[i
].flags
& VRING_DESC_F_NEXT
)) {
1117 return VIRTQUEUE_READ_DESC_DONE
;
1120 /* Check they're not leading us off end of descriptors. */
1121 *next
= desc
[i
].next
;
1122 /* Make sure compiler knows to grab that: we don't want it changing! */
1126 vu_panic(dev
, "Desc next is %u", next
);
1127 return VIRTQUEUE_READ_DESC_ERROR
;
1130 return VIRTQUEUE_READ_DESC_MORE
;
1134 vu_queue_get_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int *in_bytes
,
1135 unsigned int *out_bytes
,
1136 unsigned max_in_bytes
, unsigned max_out_bytes
)
1139 unsigned int total_bufs
, in_total
, out_total
;
1142 idx
= vq
->last_avail_idx
;
1144 total_bufs
= in_total
= out_total
= 0;
1145 if (unlikely(dev
->broken
) ||
1146 unlikely(!vq
->vring
.avail
)) {
1150 while ((rc
= virtqueue_num_heads(dev
, vq
, idx
)) > 0) {
1151 unsigned int max
, num_bufs
, indirect
= 0;
1152 struct vring_desc
*desc
;
1155 max
= vq
->vring
.num
;
1156 num_bufs
= total_bufs
;
1157 if (!virtqueue_get_head(dev
, vq
, idx
++, &i
)) {
1160 desc
= vq
->vring
.desc
;
1162 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
1163 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
1164 vu_panic(dev
, "Invalid size for indirect buffer table");
1168 /* If we've got too many, that implies a descriptor loop. */
1169 if (num_bufs
>= max
) {
1170 vu_panic(dev
, "Looped descriptor");
1174 /* loop over the indirect descriptor table */
1176 max
= desc
[i
].len
/ sizeof(struct vring_desc
);
1177 desc
= vu_gpa_to_va(dev
, desc
[i
].addr
);
1182 /* If we've got too many, that implies a descriptor loop. */
1183 if (++num_bufs
> max
) {
1184 vu_panic(dev
, "Looped descriptor");
1188 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
1189 in_total
+= desc
[i
].len
;
1191 out_total
+= desc
[i
].len
;
1193 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1196 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
1197 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1199 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1204 total_bufs
= num_bufs
;
1214 *in_bytes
= in_total
;
1217 *out_bytes
= out_total
;
1222 in_total
= out_total
= 0;
1227 vu_queue_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int in_bytes
,
1228 unsigned int out_bytes
)
1230 unsigned int in_total
, out_total
;
1232 vu_queue_get_avail_bytes(dev
, vq
, &in_total
, &out_total
,
1233 in_bytes
, out_bytes
);
1235 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1238 /* Fetch avail_idx from VQ memory only when we really need to know if
1239 * guest has added some buffers. */
1241 vu_queue_empty(VuDev
*dev
, VuVirtq
*vq
)
1243 if (unlikely(dev
->broken
) ||
1244 unlikely(!vq
->vring
.avail
)) {
1248 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
1252 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
1256 bool has_feature(uint64_t features
, unsigned int fbit
)
1259 return !!(features
& (1ULL << fbit
));
1263 bool vu_has_feature(VuDev
*dev
,
1266 return has_feature(dev
->features
, fbit
);
1270 vring_notify(VuDev
*dev
, VuVirtq
*vq
)
1275 /* We need to expose used array entries before checking used event. */
1278 /* Always notify when queue is empty (when feature acknowledge) */
1279 if (vu_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1280 !vq
->inuse
&& vu_queue_empty(dev
, vq
)) {
1284 if (!vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1285 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
1288 v
= vq
->signalled_used_valid
;
1289 vq
->signalled_used_valid
= true;
1290 old
= vq
->signalled_used
;
1291 new = vq
->signalled_used
= vq
->used_idx
;
1292 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
1296 vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
)
1298 if (unlikely(dev
->broken
) ||
1299 unlikely(!vq
->vring
.avail
)) {
1303 if (!vring_notify(dev
, vq
)) {
1304 DPRINT("skipped notify...\n");
1308 if (eventfd_write(vq
->call_fd
, 1) < 0) {
1309 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
1314 vring_used_flags_set_bit(VuVirtq
*vq
, int mask
)
1318 flags
= (uint16_t *)((char*)vq
->vring
.used
+
1319 offsetof(struct vring_used
, flags
));
1324 vring_used_flags_unset_bit(VuVirtq
*vq
, int mask
)
1328 flags
= (uint16_t *)((char*)vq
->vring
.used
+
1329 offsetof(struct vring_used
, flags
));
1334 vring_set_avail_event(VuVirtq
*vq
, uint16_t val
)
1336 if (!vq
->notification
) {
1340 *((uint16_t *) &vq
->vring
.used
->ring
[vq
->vring
.num
]) = val
;
1344 vu_queue_set_notification(VuDev
*dev
, VuVirtq
*vq
, int enable
)
1346 vq
->notification
= enable
;
1347 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1348 vring_set_avail_event(vq
, vring_avail_idx(vq
));
1349 } else if (enable
) {
1350 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
1352 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
1355 /* Expose avail event/used flags before caller checks the avail idx. */
1361 virtqueue_map_desc(VuDev
*dev
,
1362 unsigned int *p_num_sg
, struct iovec
*iov
,
1363 unsigned int max_num_sg
, bool is_write
,
1364 uint64_t pa
, size_t sz
)
1366 unsigned num_sg
= *p_num_sg
;
1368 assert(num_sg
<= max_num_sg
);
1371 vu_panic(dev
, "virtio: zero sized buffers are not allowed");
1375 iov
[num_sg
].iov_base
= vu_gpa_to_va(dev
, pa
);
1376 iov
[num_sg
].iov_len
= sz
;
1382 /* Round number down to multiple */
1383 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
1385 /* Round number up to multiple */
1386 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
1389 virtqueue_alloc_element(size_t sz
,
1390 unsigned out_num
, unsigned in_num
)
1392 VuVirtqElement
*elem
;
1393 size_t in_sg_ofs
= ALIGN_UP(sz
, __alignof__(elem
->in_sg
[0]));
1394 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
1395 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
1397 assert(sz
>= sizeof(VuVirtqElement
));
1398 elem
= malloc(out_sg_end
);
1399 elem
->out_num
= out_num
;
1400 elem
->in_num
= in_num
;
1401 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
1402 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
1407 vu_queue_pop(VuDev
*dev
, VuVirtq
*vq
, size_t sz
)
1409 unsigned int i
, head
, max
;
1410 VuVirtqElement
*elem
;
1411 unsigned out_num
, in_num
;
1412 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1413 struct vring_desc
*desc
;
1416 if (unlikely(dev
->broken
) ||
1417 unlikely(!vq
->vring
.avail
)) {
1421 if (vu_queue_empty(dev
, vq
)) {
1424 /* Needed after virtio_queue_empty(), see comment in
1425 * virtqueue_num_heads(). */
1428 /* When we start there are none of either input nor output. */
1429 out_num
= in_num
= 0;
1431 max
= vq
->vring
.num
;
1432 if (vq
->inuse
>= vq
->vring
.num
) {
1433 vu_panic(dev
, "Virtqueue size exceeded");
1437 if (!virtqueue_get_head(dev
, vq
, vq
->last_avail_idx
++, &head
)) {
1441 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1442 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1446 desc
= vq
->vring
.desc
;
1447 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
1448 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
1449 vu_panic(dev
, "Invalid size for indirect buffer table");
1452 /* loop over the indirect descriptor table */
1453 max
= desc
[i
].len
/ sizeof(struct vring_desc
);
1454 desc
= vu_gpa_to_va(dev
, desc
[i
].addr
);
1458 /* Collect all the descriptors */
1460 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
1461 virtqueue_map_desc(dev
, &in_num
, iov
+ out_num
,
1462 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1463 desc
[i
].addr
, desc
[i
].len
);
1466 vu_panic(dev
, "Incorrect order for descriptors");
1469 virtqueue_map_desc(dev
, &out_num
, iov
,
1470 VIRTQUEUE_MAX_SIZE
, false,
1471 desc
[i
].addr
, desc
[i
].len
);
1474 /* If we've got too many, that implies a descriptor loop. */
1475 if ((in_num
+ out_num
) > max
) {
1476 vu_panic(dev
, "Looped descriptor");
1478 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
1479 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1481 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1485 /* Now copy what we have collected and mapped */
1486 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1488 for (i
= 0; i
< out_num
; i
++) {
1489 elem
->out_sg
[i
] = iov
[i
];
1491 for (i
= 0; i
< in_num
; i
++) {
1492 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1501 vu_queue_rewind(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
)
1503 if (num
> vq
->inuse
) {
1506 vq
->last_avail_idx
-= num
;
1512 void vring_used_write(VuDev
*dev
, VuVirtq
*vq
,
1513 struct vring_used_elem
*uelem
, int i
)
1515 struct vring_used
*used
= vq
->vring
.used
;
1517 used
->ring
[i
] = *uelem
;
1518 vu_log_write(dev
, vq
->vring
.log_guest_addr
+
1519 offsetof(struct vring_used
, ring
[i
]),
1520 sizeof(used
->ring
[i
]));
1525 vu_log_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
1526 const VuVirtqElement
*elem
,
1529 struct vring_desc
*desc
= vq
->vring
.desc
;
1530 unsigned int i
, max
, min
;
1531 unsigned num_bufs
= 0;
1533 max
= vq
->vring
.num
;
1536 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
1537 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
1538 vu_panic(dev
, "Invalid size for indirect buffer table");
1541 /* loop over the indirect descriptor table */
1542 max
= desc
[i
].len
/ sizeof(struct vring_desc
);
1543 desc
= vu_gpa_to_va(dev
, desc
[i
].addr
);
1548 if (++num_bufs
> max
) {
1549 vu_panic(dev
, "Looped descriptor");
1553 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
1554 min
= MIN(desc
[i
].len
, len
);
1555 vu_log_write(dev
, desc
[i
].addr
, min
);
1560 (virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
)
1561 == VIRTQUEUE_READ_DESC_MORE
));
1565 vu_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
1566 const VuVirtqElement
*elem
,
1567 unsigned int len
, unsigned int idx
)
1569 struct vring_used_elem uelem
;
1571 if (unlikely(dev
->broken
) ||
1572 unlikely(!vq
->vring
.avail
)) {
1576 vu_log_queue_fill(dev
, vq
, elem
, len
);
1578 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
1580 uelem
.id
= elem
->index
;
1582 vring_used_write(dev
, vq
, &uelem
, idx
);
1586 void vring_used_idx_set(VuDev
*dev
, VuVirtq
*vq
, uint16_t val
)
1588 vq
->vring
.used
->idx
= val
;
1590 vq
->vring
.log_guest_addr
+ offsetof(struct vring_used
, idx
),
1591 sizeof(vq
->vring
.used
->idx
));
1597 vu_queue_flush(VuDev
*dev
, VuVirtq
*vq
, unsigned int count
)
1601 if (unlikely(dev
->broken
) ||
1602 unlikely(!vq
->vring
.avail
)) {
1606 /* Make sure buffer is written before we update index. */
1611 vring_used_idx_set(dev
, vq
, new);
1613 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
))) {
1614 vq
->signalled_used_valid
= false;
1619 vu_queue_push(VuDev
*dev
, VuVirtq
*vq
,
1620 const VuVirtqElement
*elem
, unsigned int len
)
1622 vu_queue_fill(dev
, vq
, elem
, len
, 0);
1623 vu_queue_flush(dev
, vq
, 1);