Merge tag 'v9.0.0-rc3'
[qemu/ar7.git] / subprojects / libvhost-user / libvhost-user.c
bloba879149fefacca2a1f08551b0dc282e42e17f752
1 /*
2 * Vhost User library
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
16 #ifndef _GNU_SOURCE
17 #define _GNU_SOURCE
18 #endif
20 /* this code avoids GLib dependency */
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <unistd.h>
24 #include <stdarg.h>
25 #include <errno.h>
26 #include <string.h>
27 #include <assert.h>
28 #include <inttypes.h>
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <sys/eventfd.h>
32 #include <sys/mman.h>
33 #include <endian.h>
35 /* Necessary to provide VIRTIO_F_VERSION_1 on system
36 * with older linux headers. Must appear before
37 * <linux/vhost.h> below.
39 #include "standard-headers/linux/virtio_config.h"
41 #if defined(__linux__)
42 #include <sys/syscall.h>
43 #include <fcntl.h>
44 #include <sys/ioctl.h>
45 #include <linux/vhost.h>
46 #include <sys/vfs.h>
47 #include <linux/magic.h>
49 #ifdef __NR_userfaultfd
50 #include <linux/userfaultfd.h>
51 #endif
53 #endif
55 #include "include/atomic.h"
57 #include "libvhost-user.h"
59 /* usually provided by GLib */
60 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
61 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4)
62 #define G_GNUC_PRINTF(format_idx, arg_idx) \
63 __attribute__((__format__(gnu_printf, format_idx, arg_idx)))
64 #else
65 #define G_GNUC_PRINTF(format_idx, arg_idx) \
66 __attribute__((__format__(__printf__, format_idx, arg_idx)))
67 #endif
68 #else /* !__GNUC__ */
69 #define G_GNUC_PRINTF(format_idx, arg_idx)
70 #endif /* !__GNUC__ */
71 #ifndef MIN
72 #define MIN(x, y) ({ \
73 __typeof__(x) _min1 = (x); \
74 __typeof__(y) _min2 = (y); \
75 (void) (&_min1 == &_min2); \
76 _min1 < _min2 ? _min1 : _min2; })
77 #endif
79 /* Round number down to multiple */
80 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
82 /* Round number up to multiple */
83 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
85 #ifndef unlikely
86 #define unlikely(x) __builtin_expect(!!(x), 0)
87 #endif
89 /* Align each region to cache line size in inflight buffer */
90 #define INFLIGHT_ALIGNMENT 64
92 /* The version of inflight buffer */
93 #define INFLIGHT_VERSION 1
95 /* The version of the protocol we support */
96 #define VHOST_USER_VERSION 1
97 #define LIBVHOST_USER_DEBUG 0
99 #define DPRINT(...) \
100 do { \
101 if (LIBVHOST_USER_DEBUG) { \
102 fprintf(stderr, __VA_ARGS__); \
104 } while (0)
106 static inline
107 bool has_feature(uint64_t features, unsigned int fbit)
109 assert(fbit < 64);
110 return !!(features & (1ULL << fbit));
113 static inline
114 bool vu_has_feature(VuDev *dev,
115 unsigned int fbit)
117 return has_feature(dev->features, fbit);
120 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit)
122 return has_feature(dev->protocol_features, fbit);
125 const char *
126 vu_request_to_string(unsigned int req)
128 #define REQ(req) [req] = #req
129 static const char *vu_request_str[] = {
130 REQ(VHOST_USER_NONE),
131 REQ(VHOST_USER_GET_FEATURES),
132 REQ(VHOST_USER_SET_FEATURES),
133 REQ(VHOST_USER_SET_OWNER),
134 REQ(VHOST_USER_RESET_OWNER),
135 REQ(VHOST_USER_SET_MEM_TABLE),
136 REQ(VHOST_USER_SET_LOG_BASE),
137 REQ(VHOST_USER_SET_LOG_FD),
138 REQ(VHOST_USER_SET_VRING_NUM),
139 REQ(VHOST_USER_SET_VRING_ADDR),
140 REQ(VHOST_USER_SET_VRING_BASE),
141 REQ(VHOST_USER_GET_VRING_BASE),
142 REQ(VHOST_USER_SET_VRING_KICK),
143 REQ(VHOST_USER_SET_VRING_CALL),
144 REQ(VHOST_USER_SET_VRING_ERR),
145 REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
146 REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
147 REQ(VHOST_USER_GET_QUEUE_NUM),
148 REQ(VHOST_USER_SET_VRING_ENABLE),
149 REQ(VHOST_USER_SEND_RARP),
150 REQ(VHOST_USER_NET_SET_MTU),
151 REQ(VHOST_USER_SET_BACKEND_REQ_FD),
152 REQ(VHOST_USER_IOTLB_MSG),
153 REQ(VHOST_USER_SET_VRING_ENDIAN),
154 REQ(VHOST_USER_GET_CONFIG),
155 REQ(VHOST_USER_SET_CONFIG),
156 REQ(VHOST_USER_POSTCOPY_ADVISE),
157 REQ(VHOST_USER_POSTCOPY_LISTEN),
158 REQ(VHOST_USER_POSTCOPY_END),
159 REQ(VHOST_USER_GET_INFLIGHT_FD),
160 REQ(VHOST_USER_SET_INFLIGHT_FD),
161 REQ(VHOST_USER_GPU_SET_SOCKET),
162 REQ(VHOST_USER_VRING_KICK),
163 REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
164 REQ(VHOST_USER_ADD_MEM_REG),
165 REQ(VHOST_USER_REM_MEM_REG),
166 REQ(VHOST_USER_GET_SHARED_OBJECT),
167 REQ(VHOST_USER_MAX),
169 #undef REQ
171 if (req < VHOST_USER_MAX) {
172 return vu_request_str[req];
173 } else {
174 return "unknown";
178 static void G_GNUC_PRINTF(2, 3)
179 vu_panic(VuDev *dev, const char *msg, ...)
181 char *buf = NULL;
182 va_list ap;
184 va_start(ap, msg);
185 if (vasprintf(&buf, msg, ap) < 0) {
186 buf = NULL;
188 va_end(ap);
190 dev->broken = true;
191 dev->panic(dev, buf);
192 free(buf);
195 * FIXME:
196 * find a way to call virtio_error, or perhaps close the connection?
200 /* Search for a memory region that covers this guest physical address. */
201 static VuDevRegion *
202 vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr)
204 int low = 0;
205 int high = dev->nregions - 1;
208 * Memory regions cannot overlap in guest physical address space. Each
209 * GPA belongs to exactly one memory region, so there can only be one
210 * match.
212 * We store our memory regions ordered by GPA and can simply perform a
213 * binary search.
215 while (low <= high) {
216 unsigned int mid = low + (high - low) / 2;
217 VuDevRegion *cur = &dev->regions[mid];
219 if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) {
220 return cur;
222 if (guest_addr >= cur->gpa + cur->size) {
223 low = mid + 1;
225 if (guest_addr < cur->gpa) {
226 high = mid - 1;
229 return NULL;
232 /* Translate guest physical address to our virtual address. */
233 void *
234 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
236 VuDevRegion *r;
238 if (*plen == 0) {
239 return NULL;
242 r = vu_gpa_to_mem_region(dev, guest_addr);
243 if (!r) {
244 return NULL;
247 if ((guest_addr + *plen) > (r->gpa + r->size)) {
248 *plen = r->gpa + r->size - guest_addr;
250 return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr +
251 r->mmap_offset;
254 /* Translate qemu virtual address to our virtual address. */
255 static void *
256 qva_to_va(VuDev *dev, uint64_t qemu_addr)
258 unsigned int i;
260 /* Find matching memory region. */
261 for (i = 0; i < dev->nregions; i++) {
262 VuDevRegion *r = &dev->regions[i];
264 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
265 return (void *)(uintptr_t)
266 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
270 return NULL;
273 static void
274 vu_remove_all_mem_regs(VuDev *dev)
276 unsigned int i;
278 for (i = 0; i < dev->nregions; i++) {
279 VuDevRegion *r = &dev->regions[i];
281 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset);
283 dev->nregions = 0;
286 static bool
287 map_ring(VuDev *dev, VuVirtq *vq)
289 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
290 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
291 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
293 DPRINT("Setting virtq addresses:\n");
294 DPRINT(" vring_desc at %p\n", vq->vring.desc);
295 DPRINT(" vring_used at %p\n", vq->vring.used);
296 DPRINT(" vring_avail at %p\n", vq->vring.avail);
298 return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
301 static bool
302 vu_is_vq_usable(VuDev *dev, VuVirtq *vq)
304 if (unlikely(dev->broken)) {
305 return false;
308 if (likely(vq->vring.avail)) {
309 return true;
313 * In corner cases, we might temporarily remove a memory region that
314 * mapped a ring. When removing a memory region we make sure to
315 * unmap any rings that would be impacted. Let's try to remap if we
316 * already succeeded mapping this ring once.
318 if (!vq->vra.desc_user_addr || !vq->vra.used_user_addr ||
319 !vq->vra.avail_user_addr) {
320 return false;
322 if (map_ring(dev, vq)) {
323 vu_panic(dev, "remapping queue on access");
324 return false;
326 return true;
329 static void
330 unmap_rings(VuDev *dev, VuDevRegion *r)
332 int i;
334 for (i = 0; i < dev->max_queues; i++) {
335 VuVirtq *vq = &dev->vq[i];
336 const uintptr_t desc = (uintptr_t)vq->vring.desc;
337 const uintptr_t used = (uintptr_t)vq->vring.used;
338 const uintptr_t avail = (uintptr_t)vq->vring.avail;
340 if (desc < r->mmap_addr || desc >= r->mmap_addr + r->size) {
341 continue;
343 if (used < r->mmap_addr || used >= r->mmap_addr + r->size) {
344 continue;
346 if (avail < r->mmap_addr || avail >= r->mmap_addr + r->size) {
347 continue;
350 DPRINT("Unmapping rings of queue %d\n", i);
351 vq->vring.desc = NULL;
352 vq->vring.used = NULL;
353 vq->vring.avail = NULL;
357 static size_t
358 get_fd_hugepagesize(int fd)
360 #if defined(__linux__)
361 struct statfs fs;
362 int ret;
364 do {
365 ret = fstatfs(fd, &fs);
366 } while (ret != 0 && errno == EINTR);
368 if (!ret && (unsigned int)fs.f_type == HUGETLBFS_MAGIC) {
369 return fs.f_bsize;
371 #endif
372 return 0;
375 static void
376 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
378 const uint64_t start_gpa = msg_region->guest_phys_addr;
379 const uint64_t end_gpa = start_gpa + msg_region->memory_size;
380 int prot = PROT_READ | PROT_WRITE;
381 uint64_t mmap_offset, fd_offset;
382 size_t hugepagesize;
383 VuDevRegion *r;
384 void *mmap_addr;
385 int low = 0;
386 int high = dev->nregions - 1;
387 unsigned int idx;
389 DPRINT("Adding region %d\n", dev->nregions);
390 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
391 msg_region->guest_phys_addr);
392 DPRINT(" memory_size: 0x%016"PRIx64"\n",
393 msg_region->memory_size);
394 DPRINT(" userspace_addr: 0x%016"PRIx64"\n",
395 msg_region->userspace_addr);
396 DPRINT(" old mmap_offset: 0x%016"PRIx64"\n",
397 msg_region->mmap_offset);
399 if (dev->postcopy_listening) {
401 * In postcopy we're using PROT_NONE here to catch anyone
402 * accessing it before we userfault
404 prot = PROT_NONE;
408 * We will add memory regions into the array sorted by GPA. Perform a
409 * binary search to locate the insertion point: it will be at the low
410 * index.
412 while (low <= high) {
413 unsigned int mid = low + (high - low) / 2;
414 VuDevRegion *cur = &dev->regions[mid];
416 /* Overlap of GPA addresses. */
417 if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) {
418 vu_panic(dev, "regions with overlapping guest physical addresses");
419 return;
421 if (start_gpa >= cur->gpa + cur->size) {
422 low = mid + 1;
424 if (start_gpa < cur->gpa) {
425 high = mid - 1;
428 idx = low;
431 * Convert most of msg_region->mmap_offset to fd_offset. In almost all
432 * cases, this will leave us with mmap_offset == 0, mmap()'ing only
433 * what we really need. Only if a memory region would partially cover
434 * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen
435 * anymore (i.e., modern QEMU).
437 * Note that mmap() with hugetlb would fail if the offset into the file
438 * is not aligned to the huge page size.
440 hugepagesize = get_fd_hugepagesize(fd);
441 if (hugepagesize) {
442 fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize);
443 mmap_offset = msg_region->mmap_offset - fd_offset;
444 } else {
445 fd_offset = msg_region->mmap_offset;
446 mmap_offset = 0;
449 DPRINT(" fd_offset: 0x%016"PRIx64"\n",
450 fd_offset);
451 DPRINT(" new mmap_offset: 0x%016"PRIx64"\n",
452 mmap_offset);
454 mmap_addr = mmap(0, msg_region->memory_size + mmap_offset,
455 prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset);
456 if (mmap_addr == MAP_FAILED) {
457 vu_panic(dev, "region mmap error: %s", strerror(errno));
458 return;
460 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
461 (uint64_t)(uintptr_t)mmap_addr);
463 #if defined(__linux__)
464 /* Don't include all guest memory in a coredump. */
465 madvise(mmap_addr, msg_region->memory_size + mmap_offset,
466 MADV_DONTDUMP);
467 #endif
469 /* Shift all affected entries by 1 to open a hole at idx. */
470 r = &dev->regions[idx];
471 memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx));
472 r->gpa = msg_region->guest_phys_addr;
473 r->size = msg_region->memory_size;
474 r->qva = msg_region->userspace_addr;
475 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
476 r->mmap_offset = mmap_offset;
477 dev->nregions++;
479 if (dev->postcopy_listening) {
481 * Return the address to QEMU so that it can translate the ufd
482 * fault addresses back.
484 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset;
488 static void
489 vmsg_close_fds(VhostUserMsg *vmsg)
491 int i;
493 for (i = 0; i < vmsg->fd_num; i++) {
494 close(vmsg->fds[i]);
498 /* Set reply payload.u64 and clear request flags and fd_num */
499 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val)
501 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */
502 vmsg->size = sizeof(vmsg->payload.u64);
503 vmsg->payload.u64 = val;
504 vmsg->fd_num = 0;
507 /* A test to see if we have userfault available */
508 static bool
509 have_userfault(void)
511 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
512 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
513 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
514 /* Now test the kernel we're running on really has the features */
515 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
516 struct uffdio_api api_struct;
517 if (ufd < 0) {
518 return false;
521 api_struct.api = UFFD_API;
522 api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
523 UFFD_FEATURE_MISSING_HUGETLBFS;
524 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
525 close(ufd);
526 return false;
528 close(ufd);
529 return true;
531 #else
532 return false;
533 #endif
536 static bool
537 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
539 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
540 struct iovec iov = {
541 .iov_base = (char *)vmsg,
542 .iov_len = VHOST_USER_HDR_SIZE,
544 struct msghdr msg = {
545 .msg_iov = &iov,
546 .msg_iovlen = 1,
547 .msg_control = control,
548 .msg_controllen = sizeof(control),
550 size_t fd_size;
551 struct cmsghdr *cmsg;
552 int rc;
554 do {
555 rc = recvmsg(conn_fd, &msg, 0);
556 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
558 if (rc < 0) {
559 vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
560 return false;
563 vmsg->fd_num = 0;
564 for (cmsg = CMSG_FIRSTHDR(&msg);
565 cmsg != NULL;
566 cmsg = CMSG_NXTHDR(&msg, cmsg))
568 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
569 fd_size = cmsg->cmsg_len - CMSG_LEN(0);
570 vmsg->fd_num = fd_size / sizeof(int);
571 assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS);
572 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
573 break;
577 if (vmsg->size > sizeof(vmsg->payload)) {
578 vu_panic(dev,
579 "Error: too big message request: %d, size: vmsg->size: %u, "
580 "while sizeof(vmsg->payload) = %zu\n",
581 vmsg->request, vmsg->size, sizeof(vmsg->payload));
582 goto fail;
585 if (vmsg->size) {
586 do {
587 rc = read(conn_fd, &vmsg->payload, vmsg->size);
588 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
590 if (rc <= 0) {
591 vu_panic(dev, "Error while reading: %s", strerror(errno));
592 goto fail;
595 assert((uint32_t)rc == vmsg->size);
598 return true;
600 fail:
601 vmsg_close_fds(vmsg);
603 return false;
606 static bool
607 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
609 int rc;
610 uint8_t *p = (uint8_t *)vmsg;
611 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
612 struct iovec iov = {
613 .iov_base = (char *)vmsg,
614 .iov_len = VHOST_USER_HDR_SIZE,
616 struct msghdr msg = {
617 .msg_iov = &iov,
618 .msg_iovlen = 1,
619 .msg_control = control,
621 struct cmsghdr *cmsg;
623 memset(control, 0, sizeof(control));
624 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS);
625 if (vmsg->fd_num > 0) {
626 size_t fdsize = vmsg->fd_num * sizeof(int);
627 msg.msg_controllen = CMSG_SPACE(fdsize);
628 cmsg = CMSG_FIRSTHDR(&msg);
629 cmsg->cmsg_len = CMSG_LEN(fdsize);
630 cmsg->cmsg_level = SOL_SOCKET;
631 cmsg->cmsg_type = SCM_RIGHTS;
632 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
633 } else {
634 msg.msg_controllen = 0;
637 do {
638 rc = sendmsg(conn_fd, &msg, 0);
639 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
641 if (vmsg->size) {
642 do {
643 if (vmsg->data) {
644 rc = write(conn_fd, vmsg->data, vmsg->size);
645 } else {
646 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
648 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
651 if (rc <= 0) {
652 vu_panic(dev, "Error while writing: %s", strerror(errno));
653 return false;
656 return true;
659 static bool
660 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
662 /* Set the version in the flags when sending the reply */
663 vmsg->flags &= ~VHOST_USER_VERSION_MASK;
664 vmsg->flags |= VHOST_USER_VERSION;
665 vmsg->flags |= VHOST_USER_REPLY_MASK;
667 return vu_message_write(dev, conn_fd, vmsg);
671 * Processes a reply on the backend channel.
672 * Entered with backend_mutex held and releases it before exit.
673 * Returns true on success.
675 static bool
676 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
678 VhostUserMsg msg_reply;
679 bool result = false;
681 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
682 result = true;
683 goto out;
686 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
687 goto out;
690 if (msg_reply.request != vmsg->request) {
691 DPRINT("Received unexpected msg type. Expected %d received %d",
692 vmsg->request, msg_reply.request);
693 goto out;
696 result = msg_reply.payload.u64 == 0;
698 out:
699 pthread_mutex_unlock(&dev->backend_mutex);
700 return result;
703 /* Kick the log_call_fd if required. */
704 static void
705 vu_log_kick(VuDev *dev)
707 if (dev->log_call_fd != -1) {
708 DPRINT("Kicking the QEMU's log...\n");
709 if (eventfd_write(dev->log_call_fd, 1) < 0) {
710 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
715 static void
716 vu_log_page(uint8_t *log_table, uint64_t page)
718 DPRINT("Logged dirty guest page: %"PRId64"\n", page);
719 qatomic_or(&log_table[page / 8], 1 << (page % 8));
722 static void
723 vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
725 uint64_t page;
727 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
728 !dev->log_table || !length) {
729 return;
732 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
734 page = address / VHOST_LOG_PAGE;
735 while (page * VHOST_LOG_PAGE < address + length) {
736 vu_log_page(dev->log_table, page);
737 page += 1;
740 vu_log_kick(dev);
743 static void
744 vu_kick_cb(VuDev *dev, int condition, void *data)
746 int index = (intptr_t)data;
747 VuVirtq *vq = &dev->vq[index];
748 int sock = vq->kick_fd;
749 eventfd_t kick_data;
750 ssize_t rc;
752 rc = eventfd_read(sock, &kick_data);
753 if (rc == -1) {
754 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
755 dev->remove_watch(dev, dev->vq[index].kick_fd);
756 } else {
757 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
758 kick_data, vq->handler, index);
759 if (vq->handler) {
760 vq->handler(dev, index);
765 static bool
766 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
768 vmsg->payload.u64 =
770 * The following VIRTIO feature bits are supported by our virtqueue
771 * implementation:
773 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY |
774 1ULL << VIRTIO_RING_F_INDIRECT_DESC |
775 1ULL << VIRTIO_RING_F_EVENT_IDX |
776 1ULL << VIRTIO_F_VERSION_1 |
778 /* vhost-user feature bits */
779 1ULL << VHOST_F_LOG_ALL |
780 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
782 if (dev->iface->get_features) {
783 vmsg->payload.u64 |= dev->iface->get_features(dev);
786 vmsg->size = sizeof(vmsg->payload.u64);
787 vmsg->fd_num = 0;
789 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
791 return true;
794 static void
795 vu_set_enable_all_rings(VuDev *dev, bool enabled)
797 uint16_t i;
799 for (i = 0; i < dev->max_queues; i++) {
800 dev->vq[i].enable = enabled;
804 static bool
805 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
807 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
809 dev->features = vmsg->payload.u64;
810 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) {
812 * We only support devices conforming to VIRTIO 1.0 or
813 * later
815 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user");
816 return false;
819 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
820 vu_set_enable_all_rings(dev, true);
823 if (dev->iface->set_features) {
824 dev->iface->set_features(dev, dev->features);
827 return false;
830 static bool
831 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
833 return false;
836 static void
837 vu_close_log(VuDev *dev)
839 if (dev->log_table) {
840 if (munmap(dev->log_table, dev->log_size) != 0) {
841 perror("close log munmap() error");
844 dev->log_table = NULL;
846 if (dev->log_call_fd != -1) {
847 close(dev->log_call_fd);
848 dev->log_call_fd = -1;
852 static bool
853 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
855 vu_set_enable_all_rings(dev, false);
857 return false;
860 static bool
861 generate_faults(VuDev *dev) {
862 unsigned int i;
863 for (i = 0; i < dev->nregions; i++) {
864 #ifdef UFFDIO_REGISTER
865 VuDevRegion *dev_region = &dev->regions[i];
866 int ret;
867 struct uffdio_register reg_struct;
870 * We should already have an open ufd. Mark each memory
871 * range as ufd.
872 * Discard any mapping we have here; note I can't use MADV_REMOVE
873 * or fallocate to make the hole since I don't want to lose
874 * data that's already arrived in the shared process.
875 * TODO: How to do hugepage
877 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
878 dev_region->size + dev_region->mmap_offset,
879 MADV_DONTNEED);
880 if (ret) {
881 fprintf(stderr,
882 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
883 __func__, i, strerror(errno));
886 * Turn off transparent hugepages so we dont get lose wakeups
887 * in neighbouring pages.
888 * TODO: Turn this backon later.
890 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
891 dev_region->size + dev_region->mmap_offset,
892 MADV_NOHUGEPAGE);
893 if (ret) {
895 * Note: This can happen legally on kernels that are configured
896 * without madvise'able hugepages
898 fprintf(stderr,
899 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
900 __func__, i, strerror(errno));
903 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
904 reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
905 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
907 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
908 vu_panic(dev, "%s: Failed to userfault region %d "
909 "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64
910 ": (ufd=%d)%s\n",
911 __func__, i,
912 dev_region->mmap_addr,
913 dev_region->size, dev_region->mmap_offset,
914 dev->postcopy_ufd, strerror(errno));
915 return false;
917 if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) {
918 vu_panic(dev, "%s Region (%d) doesn't support COPY",
919 __func__, i);
920 return false;
922 DPRINT("%s: region %d: Registered userfault for %"
923 PRIx64 " + %" PRIx64 "\n", __func__, i,
924 (uint64_t)reg_struct.range.start,
925 (uint64_t)reg_struct.range.len);
926 /* Now it's registered we can let the client at it */
927 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
928 dev_region->size + dev_region->mmap_offset,
929 PROT_READ | PROT_WRITE)) {
930 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
931 i, strerror(errno));
932 return false;
934 /* TODO: Stash 'zero' support flags somewhere */
935 #endif
938 return true;
941 static bool
942 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
943 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
945 if (vmsg->fd_num != 1) {
946 vmsg_close_fds(vmsg);
947 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd "
948 "should be sent for this message type", vmsg->fd_num);
949 return false;
952 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) {
953 close(vmsg->fds[0]);
954 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at "
955 "least %zu bytes and only %d bytes were received",
956 VHOST_USER_MEM_REG_SIZE, vmsg->size);
957 return false;
960 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) {
961 close(vmsg->fds[0]);
962 vu_panic(dev, "failing attempt to hot add memory via "
963 "VHOST_USER_ADD_MEM_REG message because the backend has "
964 "no free ram slots available");
965 return false;
969 * If we are in postcopy mode and we receive a u64 payload with a 0 value
970 * we know all the postcopy client bases have been received, and we
971 * should start generating faults.
973 if (dev->postcopy_listening &&
974 vmsg->size == sizeof(vmsg->payload.u64) &&
975 vmsg->payload.u64 == 0) {
976 (void)generate_faults(dev);
977 return false;
980 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]);
981 close(vmsg->fds[0]);
983 if (dev->postcopy_listening) {
984 /* Send the message back to qemu with the addresses filled in. */
985 vmsg->fd_num = 0;
986 DPRINT("Successfully added new region in postcopy\n");
987 return true;
989 DPRINT("Successfully added new region\n");
990 return false;
993 static inline bool reg_equal(VuDevRegion *vudev_reg,
994 VhostUserMemoryRegion *msg_reg)
996 if (vudev_reg->gpa == msg_reg->guest_phys_addr &&
997 vudev_reg->qva == msg_reg->userspace_addr &&
998 vudev_reg->size == msg_reg->memory_size) {
999 return true;
1002 return false;
1005 static bool
1006 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
1007 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
1008 unsigned int idx;
1009 VuDevRegion *r;
1011 if (vmsg->fd_num > 1) {
1012 vmsg_close_fds(vmsg);
1013 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd "
1014 "should be sent for this message type", vmsg->fd_num);
1015 return false;
1018 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) {
1019 vmsg_close_fds(vmsg);
1020 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at "
1021 "least %zu bytes and only %d bytes were received",
1022 VHOST_USER_MEM_REG_SIZE, vmsg->size);
1023 return false;
1026 DPRINT("Removing region:\n");
1027 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
1028 msg_region->guest_phys_addr);
1029 DPRINT(" memory_size: 0x%016"PRIx64"\n",
1030 msg_region->memory_size);
1031 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
1032 msg_region->userspace_addr);
1033 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
1034 msg_region->mmap_offset);
1036 r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr);
1037 if (!r || !reg_equal(r, msg_region)) {
1038 vmsg_close_fds(vmsg);
1039 vu_panic(dev, "Specified region not found\n");
1040 return false;
1044 * There might be valid cases where we temporarily remove memory regions
1045 * to readd them again, or remove memory regions and don't use the rings
1046 * anymore before we set the ring addresses and restart the device.
1048 * Unmap all affected rings, remapping them on demand later. This should
1049 * be a corner case.
1051 unmap_rings(dev, r);
1053 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset);
1055 idx = r - dev->regions;
1056 assert(idx < dev->nregions);
1057 /* Shift all affected entries by 1 to close the hole. */
1058 memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1));
1059 DPRINT("Successfully removed a region\n");
1060 dev->nregions--;
1062 vmsg_close_fds(vmsg);
1064 return false;
1067 static bool
1068 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
1070 int fd_num = 0;
1071 int dmabuf_fd = -1;
1072 if (dev->iface->get_shared_object) {
1073 dmabuf_fd = dev->iface->get_shared_object(
1074 dev, &vmsg->payload.object.uuid[0]);
1076 if (dmabuf_fd != -1) {
1077 DPRINT("dmabuf_fd found for requested UUID\n");
1078 vmsg->fds[fd_num++] = dmabuf_fd;
1080 vmsg->fd_num = fd_num;
1082 return true;
1085 static bool
1086 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
1088 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
1089 unsigned int i;
1091 vu_remove_all_mem_regs(dev);
1093 DPRINT("Nregions: %u\n", memory->nregions);
1094 for (i = 0; i < memory->nregions; i++) {
1095 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]);
1096 close(vmsg->fds[i]);
1099 if (dev->postcopy_listening) {
1100 /* Send the message back to qemu with the addresses filled in */
1101 vmsg->fd_num = 0;
1102 if (!vu_send_reply(dev, dev->sock, vmsg)) {
1103 vu_panic(dev, "failed to respond to set-mem-table for postcopy");
1104 return false;
1108 * Wait for QEMU to confirm that it's registered the handler for the
1109 * faults.
1111 if (!dev->read_msg(dev, dev->sock, vmsg) ||
1112 vmsg->size != sizeof(vmsg->payload.u64) ||
1113 vmsg->payload.u64 != 0) {
1114 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
1115 return false;
1118 /* OK, now we can go and register the memory and generate faults */
1119 (void)generate_faults(dev);
1120 return false;
1123 for (i = 0; i < dev->max_queues; i++) {
1124 if (dev->vq[i].vring.desc) {
1125 if (map_ring(dev, &dev->vq[i])) {
1126 vu_panic(dev, "remapping queue %d during setmemtable", i);
1131 return false;
1134 static bool
1135 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
1137 int fd;
1138 uint64_t log_mmap_size, log_mmap_offset;
1139 void *rc;
1141 if (vmsg->fd_num != 1 ||
1142 vmsg->size != sizeof(vmsg->payload.log)) {
1143 vu_panic(dev, "Invalid log_base message");
1144 return true;
1147 fd = vmsg->fds[0];
1148 log_mmap_offset = vmsg->payload.log.mmap_offset;
1149 log_mmap_size = vmsg->payload.log.mmap_size;
1150 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
1151 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
1153 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
1154 log_mmap_offset);
1155 close(fd);
1156 if (rc == MAP_FAILED) {
1157 perror("log mmap error");
1160 if (dev->log_table) {
1161 munmap(dev->log_table, dev->log_size);
1163 dev->log_table = rc;
1164 dev->log_size = log_mmap_size;
1166 vmsg->size = sizeof(vmsg->payload.u64);
1167 vmsg->fd_num = 0;
1169 return true;
1172 static bool
1173 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
1175 if (vmsg->fd_num != 1) {
1176 vu_panic(dev, "Invalid log_fd message");
1177 return false;
1180 if (dev->log_call_fd != -1) {
1181 close(dev->log_call_fd);
1183 dev->log_call_fd = vmsg->fds[0];
1184 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
1186 return false;
1189 static bool
1190 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1192 unsigned int index = vmsg->payload.state.index;
1193 unsigned int num = vmsg->payload.state.num;
1195 DPRINT("State.index: %u\n", index);
1196 DPRINT("State.num: %u\n", num);
1197 dev->vq[index].vring.num = num;
1199 return false;
1202 static bool
1203 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
1205 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
1206 unsigned int index = vra->index;
1207 VuVirtq *vq = &dev->vq[index];
1209 DPRINT("vhost_vring_addr:\n");
1210 DPRINT(" index: %d\n", vra->index);
1211 DPRINT(" flags: %d\n", vra->flags);
1212 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr);
1213 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr);
1214 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr);
1215 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr);
1217 vq->vra = *vra;
1218 vq->vring.flags = vra->flags;
1219 vq->vring.log_guest_addr = vra->log_guest_addr;
1222 if (map_ring(dev, vq)) {
1223 vu_panic(dev, "Invalid vring_addr message");
1224 return false;
1227 vq->used_idx = le16toh(vq->vring.used->idx);
1229 if (vq->last_avail_idx != vq->used_idx) {
1230 bool resume = dev->iface->queue_is_processed_in_order &&
1231 dev->iface->queue_is_processed_in_order(dev, index);
1233 DPRINT("Last avail index != used index: %u != %u%s\n",
1234 vq->last_avail_idx, vq->used_idx,
1235 resume ? ", resuming" : "");
1237 if (resume) {
1238 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
1242 return false;
1245 static bool
1246 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
1248 unsigned int index = vmsg->payload.state.index;
1249 unsigned int num = vmsg->payload.state.num;
1251 DPRINT("State.index: %u\n", index);
1252 DPRINT("State.num: %u\n", num);
1253 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
1255 return false;
1258 static bool
1259 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
1261 unsigned int index = vmsg->payload.state.index;
1263 DPRINT("State.index: %u\n", index);
1264 vmsg->payload.state.num = dev->vq[index].last_avail_idx;
1265 vmsg->size = sizeof(vmsg->payload.state);
1267 dev->vq[index].started = false;
1268 if (dev->iface->queue_set_started) {
1269 dev->iface->queue_set_started(dev, index, false);
1272 if (dev->vq[index].call_fd != -1) {
1273 close(dev->vq[index].call_fd);
1274 dev->vq[index].call_fd = -1;
1276 if (dev->vq[index].kick_fd != -1) {
1277 dev->remove_watch(dev, dev->vq[index].kick_fd);
1278 close(dev->vq[index].kick_fd);
1279 dev->vq[index].kick_fd = -1;
1282 return true;
1285 static bool
1286 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
1288 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1289 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1291 if (index >= dev->max_queues) {
1292 vmsg_close_fds(vmsg);
1293 vu_panic(dev, "Invalid queue index: %u", index);
1294 return false;
1297 if (nofd) {
1298 vmsg_close_fds(vmsg);
1299 return true;
1302 if (vmsg->fd_num != 1) {
1303 vmsg_close_fds(vmsg);
1304 vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
1305 return false;
1308 return true;
1311 static int
1312 inflight_desc_compare(const void *a, const void *b)
1314 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
1315 *desc1 = (VuVirtqInflightDesc *)b;
1317 if (desc1->counter > desc0->counter &&
1318 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
1319 return 1;
1322 return -1;
1325 static int
1326 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
1328 int i = 0;
1330 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
1331 return 0;
1334 if (unlikely(!vq->inflight)) {
1335 return -1;
1338 if (unlikely(!vq->inflight->version)) {
1339 /* initialize the buffer */
1340 vq->inflight->version = INFLIGHT_VERSION;
1341 return 0;
1344 vq->used_idx = le16toh(vq->vring.used->idx);
1345 vq->resubmit_num = 0;
1346 vq->resubmit_list = NULL;
1347 vq->counter = 0;
1349 if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
1350 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
1352 barrier();
1354 vq->inflight->used_idx = vq->used_idx;
1357 for (i = 0; i < vq->inflight->desc_num; i++) {
1358 if (vq->inflight->desc[i].inflight == 1) {
1359 vq->inuse++;
1363 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
1365 if (vq->inuse) {
1366 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc));
1367 if (!vq->resubmit_list) {
1368 return -1;
1371 for (i = 0; i < vq->inflight->desc_num; i++) {
1372 if (vq->inflight->desc[i].inflight) {
1373 vq->resubmit_list[vq->resubmit_num].index = i;
1374 vq->resubmit_list[vq->resubmit_num].counter =
1375 vq->inflight->desc[i].counter;
1376 vq->resubmit_num++;
1380 if (vq->resubmit_num > 1) {
1381 qsort(vq->resubmit_list, vq->resubmit_num,
1382 sizeof(VuVirtqInflightDesc), inflight_desc_compare);
1384 vq->counter = vq->resubmit_list[0].counter + 1;
1387 /* in case of I/O hang after reconnecting */
1388 if (eventfd_write(vq->kick_fd, 1)) {
1389 return -1;
1392 return 0;
1395 static bool
1396 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1398 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1399 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1401 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1403 if (!vu_check_queue_msg_file(dev, vmsg)) {
1404 return false;
1407 if (dev->vq[index].kick_fd != -1) {
1408 dev->remove_watch(dev, dev->vq[index].kick_fd);
1409 close(dev->vq[index].kick_fd);
1410 dev->vq[index].kick_fd = -1;
1413 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0];
1414 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index);
1416 dev->vq[index].started = true;
1417 if (dev->iface->queue_set_started) {
1418 dev->iface->queue_set_started(dev, index, true);
1421 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1422 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1423 vu_kick_cb, (void *)(long)index);
1425 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1426 dev->vq[index].kick_fd, index);
1429 if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1430 vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1433 return false;
1436 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1437 vu_queue_handler_cb handler)
1439 int qidx = vq - dev->vq;
1441 vq->handler = handler;
1442 if (vq->kick_fd >= 0) {
1443 if (handler) {
1444 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1445 vu_kick_cb, (void *)(long)qidx);
1446 } else {
1447 dev->remove_watch(dev, vq->kick_fd);
1452 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1453 int size, int offset)
1455 int qidx = vq - dev->vq;
1456 int fd_num = 0;
1457 VhostUserMsg vmsg = {
1458 .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG,
1459 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1460 .size = sizeof(vmsg.payload.area),
1461 .payload.area = {
1462 .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1463 .size = size,
1464 .offset = offset,
1468 if (fd == -1) {
1469 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1470 } else {
1471 vmsg.fds[fd_num++] = fd;
1474 vmsg.fd_num = fd_num;
1476 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) {
1477 return false;
1480 pthread_mutex_lock(&dev->backend_mutex);
1481 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) {
1482 pthread_mutex_unlock(&dev->backend_mutex);
1483 return false;
1486 /* Also unlocks the backend_mutex */
1487 return vu_process_message_reply(dev, &vmsg);
1490 bool
1491 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
1492 int *dmabuf_fd)
1494 bool result = false;
1495 VhostUserMsg msg_reply;
1496 VhostUserMsg msg = {
1497 .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP,
1498 .size = sizeof(msg.payload.object),
1499 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1502 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
1504 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
1505 return false;
1508 pthread_mutex_lock(&dev->backend_mutex);
1509 if (!vu_message_write(dev, dev->backend_fd, &msg)) {
1510 goto out;
1513 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
1514 goto out;
1517 if (msg_reply.request != msg.request) {
1518 DPRINT("Received unexpected msg type. Expected %d, received %d",
1519 msg.request, msg_reply.request);
1520 goto out;
1523 if (msg_reply.fd_num != 1) {
1524 DPRINT("Received unexpected number of fds. Expected 1, received %d",
1525 msg_reply.fd_num);
1526 goto out;
1529 *dmabuf_fd = msg_reply.fds[0];
1530 result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0;
1531 out:
1532 pthread_mutex_unlock(&dev->backend_mutex);
1534 return result;
1537 static bool
1538 vu_send_message(VuDev *dev, VhostUserMsg *vmsg)
1540 bool result = false;
1541 pthread_mutex_lock(&dev->backend_mutex);
1542 if (!vu_message_write(dev, dev->backend_fd, vmsg)) {
1543 goto out;
1546 result = true;
1547 out:
1548 pthread_mutex_unlock(&dev->backend_mutex);
1550 return result;
1553 bool
1554 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
1556 VhostUserMsg msg = {
1557 .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD,
1558 .size = sizeof(msg.payload.object),
1559 .flags = VHOST_USER_VERSION,
1562 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
1564 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
1565 return false;
1568 return vu_send_message(dev, &msg);
1571 bool
1572 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
1574 VhostUserMsg msg = {
1575 .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE,
1576 .size = sizeof(msg.payload.object),
1577 .flags = VHOST_USER_VERSION,
1580 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
1582 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
1583 return false;
1586 return vu_send_message(dev, &msg);
1589 static bool
1590 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1592 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1593 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1595 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1597 if (!vu_check_queue_msg_file(dev, vmsg)) {
1598 return false;
1601 if (dev->vq[index].call_fd != -1) {
1602 close(dev->vq[index].call_fd);
1603 dev->vq[index].call_fd = -1;
1606 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0];
1608 /* in case of I/O hang after reconnecting */
1609 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) {
1610 return -1;
1613 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index);
1615 return false;
1618 static bool
1619 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1621 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1622 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1624 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1626 if (!vu_check_queue_msg_file(dev, vmsg)) {
1627 return false;
1630 if (dev->vq[index].err_fd != -1) {
1631 close(dev->vq[index].err_fd);
1632 dev->vq[index].err_fd = -1;
1635 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0];
1637 return false;
1640 static bool
1641 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1644 * Note that we support, but intentionally do not set,
1645 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that
1646 * a device implementation can return it in its callback
1647 * (get_protocol_features) if it wants to use this for
1648 * simulation, but it is otherwise not desirable (if even
1649 * implemented by the frontend.)
1651 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1652 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1653 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ |
1654 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1655 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD |
1656 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |
1657 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS;
1659 if (have_userfault()) {
1660 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1663 if (dev->iface->get_config && dev->iface->set_config) {
1664 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1667 if (dev->iface->get_protocol_features) {
1668 features |= dev->iface->get_protocol_features(dev);
1671 vmsg_set_reply_u64(vmsg, features);
1672 return true;
1675 static bool
1676 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1678 uint64_t features = vmsg->payload.u64;
1680 DPRINT("u64: 0x%016"PRIx64"\n", features);
1682 dev->protocol_features = vmsg->payload.u64;
1684 if (vu_has_protocol_feature(dev,
1685 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
1686 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) ||
1687 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
1689 * The use case for using messages for kick/call is simulation, to make
1690 * the kick and call synchronous. To actually get that behaviour, both
1691 * of the other features are required.
1692 * Theoretically, one could use only kick messages, or do them without
1693 * having F_REPLY_ACK, but too many (possibly pending) messages on the
1694 * socket will eventually cause the frontend to hang, to avoid this in
1695 * scenarios where not desired enforce that the settings are in a way
1696 * that actually enables the simulation case.
1698 vu_panic(dev,
1699 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK");
1700 return false;
1703 if (dev->iface->set_protocol_features) {
1704 dev->iface->set_protocol_features(dev, features);
1707 return false;
1710 static bool
1711 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1713 vmsg_set_reply_u64(vmsg, dev->max_queues);
1714 return true;
1717 static bool
1718 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1720 unsigned int index = vmsg->payload.state.index;
1721 unsigned int enable = vmsg->payload.state.num;
1723 DPRINT("State.index: %u\n", index);
1724 DPRINT("State.enable: %u\n", enable);
1726 if (index >= dev->max_queues) {
1727 vu_panic(dev, "Invalid vring_enable index: %u", index);
1728 return false;
1731 dev->vq[index].enable = enable;
1732 return false;
1735 static bool
1736 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1738 if (vmsg->fd_num != 1) {
1739 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num);
1740 return false;
1743 if (dev->backend_fd != -1) {
1744 close(dev->backend_fd);
1746 dev->backend_fd = vmsg->fds[0];
1747 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]);
1749 return false;
1752 static bool
1753 vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1755 int ret = -1;
1757 if (dev->iface->get_config) {
1758 ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1759 vmsg->payload.config.size);
1762 if (ret) {
1763 /* resize to zero to indicate an error to frontend */
1764 vmsg->size = 0;
1767 return true;
1770 static bool
1771 vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1773 int ret = -1;
1775 if (dev->iface->set_config) {
1776 ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1777 vmsg->payload.config.offset,
1778 vmsg->payload.config.size,
1779 vmsg->payload.config.flags);
1780 if (ret) {
1781 vu_panic(dev, "Set virtio configuration space failed");
1785 return false;
1788 static bool
1789 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1791 #ifdef UFFDIO_API
1792 struct uffdio_api api_struct;
1794 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1795 vmsg->size = 0;
1796 #else
1797 dev->postcopy_ufd = -1;
1798 #endif
1800 if (dev->postcopy_ufd == -1) {
1801 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1802 goto out;
1805 #ifdef UFFDIO_API
1806 api_struct.api = UFFD_API;
1807 api_struct.features = 0;
1808 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1809 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1810 close(dev->postcopy_ufd);
1811 dev->postcopy_ufd = -1;
1812 goto out;
1814 /* TODO: Stash feature flags somewhere */
1815 #endif
1817 out:
1818 /* Return a ufd to the QEMU */
1819 vmsg->fd_num = 1;
1820 vmsg->fds[0] = dev->postcopy_ufd;
1821 return true; /* = send a reply */
1824 static bool
1825 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1827 if (dev->nregions) {
1828 vu_panic(dev, "Regions already registered at postcopy-listen");
1829 vmsg_set_reply_u64(vmsg, -1);
1830 return true;
1832 dev->postcopy_listening = true;
1834 vmsg_set_reply_u64(vmsg, 0);
1835 return true;
1838 static bool
1839 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1841 DPRINT("%s: Entry\n", __func__);
1842 dev->postcopy_listening = false;
1843 if (dev->postcopy_ufd > 0) {
1844 close(dev->postcopy_ufd);
1845 dev->postcopy_ufd = -1;
1846 DPRINT("%s: Done close\n", __func__);
1849 vmsg_set_reply_u64(vmsg, 0);
1850 DPRINT("%s: exit\n", __func__);
1851 return true;
1854 static inline uint64_t
1855 vu_inflight_queue_size(uint16_t queue_size)
1857 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1858 sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1861 #ifdef MFD_ALLOW_SEALING
1862 static void *
1863 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd)
1865 void *ptr;
1866 int ret;
1868 *fd = memfd_create(name, MFD_ALLOW_SEALING);
1869 if (*fd < 0) {
1870 return NULL;
1873 ret = ftruncate(*fd, size);
1874 if (ret < 0) {
1875 close(*fd);
1876 return NULL;
1879 ret = fcntl(*fd, F_ADD_SEALS, flags);
1880 if (ret < 0) {
1881 close(*fd);
1882 return NULL;
1885 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0);
1886 if (ptr == MAP_FAILED) {
1887 close(*fd);
1888 return NULL;
1891 return ptr;
1893 #endif
1895 static bool
1896 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1898 int fd = -1;
1899 void *addr = NULL;
1900 uint64_t mmap_size;
1901 uint16_t num_queues, queue_size;
1903 if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1904 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1905 vmsg->payload.inflight.mmap_size = 0;
1906 return true;
1909 num_queues = vmsg->payload.inflight.num_queues;
1910 queue_size = vmsg->payload.inflight.queue_size;
1912 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1913 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1915 mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1917 #ifdef MFD_ALLOW_SEALING
1918 addr = memfd_alloc("vhost-inflight", mmap_size,
1919 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1920 &fd);
1921 #else
1922 vu_panic(dev, "Not implemented: memfd support is missing");
1923 #endif
1925 if (!addr) {
1926 vu_panic(dev, "Failed to alloc vhost inflight area");
1927 vmsg->payload.inflight.mmap_size = 0;
1928 return true;
1931 memset(addr, 0, mmap_size);
1933 dev->inflight_info.addr = addr;
1934 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1935 dev->inflight_info.fd = vmsg->fds[0] = fd;
1936 vmsg->fd_num = 1;
1937 vmsg->payload.inflight.mmap_offset = 0;
1939 DPRINT("send inflight mmap_size: %"PRId64"\n",
1940 vmsg->payload.inflight.mmap_size);
1941 DPRINT("send inflight mmap offset: %"PRId64"\n",
1942 vmsg->payload.inflight.mmap_offset);
1944 return true;
1947 static bool
1948 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1950 int fd, i;
1951 uint64_t mmap_size, mmap_offset;
1952 uint16_t num_queues, queue_size;
1953 void *rc;
1955 if (vmsg->fd_num != 1 ||
1956 vmsg->size != sizeof(vmsg->payload.inflight)) {
1957 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1958 vmsg->size, vmsg->fd_num);
1959 return false;
1962 fd = vmsg->fds[0];
1963 mmap_size = vmsg->payload.inflight.mmap_size;
1964 mmap_offset = vmsg->payload.inflight.mmap_offset;
1965 num_queues = vmsg->payload.inflight.num_queues;
1966 queue_size = vmsg->payload.inflight.queue_size;
1968 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1969 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1970 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1971 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1973 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1974 fd, mmap_offset);
1976 if (rc == MAP_FAILED) {
1977 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1978 return false;
1981 if (dev->inflight_info.fd) {
1982 close(dev->inflight_info.fd);
1985 if (dev->inflight_info.addr) {
1986 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1989 dev->inflight_info.fd = fd;
1990 dev->inflight_info.addr = rc;
1991 dev->inflight_info.size = mmap_size;
1993 for (i = 0; i < num_queues; i++) {
1994 dev->vq[i].inflight = (VuVirtqInflight *)rc;
1995 dev->vq[i].inflight->desc_num = queue_size;
1996 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1999 return false;
2002 static bool
2003 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg)
2005 unsigned int index = vmsg->payload.state.index;
2007 if (index >= dev->max_queues) {
2008 vu_panic(dev, "Invalid queue index: %u", index);
2009 return false;
2012 DPRINT("Got kick message: handler:%p idx:%u\n",
2013 dev->vq[index].handler, index);
2015 if (!dev->vq[index].started) {
2016 dev->vq[index].started = true;
2018 if (dev->iface->queue_set_started) {
2019 dev->iface->queue_set_started(dev, index, true);
2023 if (dev->vq[index].handler) {
2024 dev->vq[index].handler(dev, index);
2027 return false;
2030 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg)
2032 vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS);
2034 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS);
2036 return true;
2039 static bool
2040 vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
2042 int do_reply = 0;
2044 /* Print out generic part of the request. */
2045 DPRINT("================ Vhost user message ================\n");
2046 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
2047 vmsg->request);
2048 DPRINT("Flags: 0x%x\n", vmsg->flags);
2049 DPRINT("Size: %u\n", vmsg->size);
2051 if (vmsg->fd_num) {
2052 int i;
2053 DPRINT("Fds:");
2054 for (i = 0; i < vmsg->fd_num; i++) {
2055 DPRINT(" %d", vmsg->fds[i]);
2057 DPRINT("\n");
2060 if (dev->iface->process_msg &&
2061 dev->iface->process_msg(dev, vmsg, &do_reply)) {
2062 return do_reply;
2065 switch (vmsg->request) {
2066 case VHOST_USER_GET_FEATURES:
2067 return vu_get_features_exec(dev, vmsg);
2068 case VHOST_USER_SET_FEATURES:
2069 return vu_set_features_exec(dev, vmsg);
2070 case VHOST_USER_GET_PROTOCOL_FEATURES:
2071 return vu_get_protocol_features_exec(dev, vmsg);
2072 case VHOST_USER_SET_PROTOCOL_FEATURES:
2073 return vu_set_protocol_features_exec(dev, vmsg);
2074 case VHOST_USER_SET_OWNER:
2075 return vu_set_owner_exec(dev, vmsg);
2076 case VHOST_USER_RESET_OWNER:
2077 return vu_reset_device_exec(dev, vmsg);
2078 case VHOST_USER_SET_MEM_TABLE:
2079 return vu_set_mem_table_exec(dev, vmsg);
2080 case VHOST_USER_SET_LOG_BASE:
2081 return vu_set_log_base_exec(dev, vmsg);
2082 case VHOST_USER_SET_LOG_FD:
2083 return vu_set_log_fd_exec(dev, vmsg);
2084 case VHOST_USER_SET_VRING_NUM:
2085 return vu_set_vring_num_exec(dev, vmsg);
2086 case VHOST_USER_SET_VRING_ADDR:
2087 return vu_set_vring_addr_exec(dev, vmsg);
2088 case VHOST_USER_SET_VRING_BASE:
2089 return vu_set_vring_base_exec(dev, vmsg);
2090 case VHOST_USER_GET_VRING_BASE:
2091 return vu_get_vring_base_exec(dev, vmsg);
2092 case VHOST_USER_SET_VRING_KICK:
2093 return vu_set_vring_kick_exec(dev, vmsg);
2094 case VHOST_USER_SET_VRING_CALL:
2095 return vu_set_vring_call_exec(dev, vmsg);
2096 case VHOST_USER_SET_VRING_ERR:
2097 return vu_set_vring_err_exec(dev, vmsg);
2098 case VHOST_USER_GET_QUEUE_NUM:
2099 return vu_get_queue_num_exec(dev, vmsg);
2100 case VHOST_USER_SET_VRING_ENABLE:
2101 return vu_set_vring_enable_exec(dev, vmsg);
2102 case VHOST_USER_SET_BACKEND_REQ_FD:
2103 return vu_set_backend_req_fd(dev, vmsg);
2104 case VHOST_USER_GET_CONFIG:
2105 return vu_get_config(dev, vmsg);
2106 case VHOST_USER_SET_CONFIG:
2107 return vu_set_config(dev, vmsg);
2108 case VHOST_USER_NONE:
2109 /* if you need processing before exit, override iface->process_msg */
2110 exit(0);
2111 case VHOST_USER_POSTCOPY_ADVISE:
2112 return vu_set_postcopy_advise(dev, vmsg);
2113 case VHOST_USER_POSTCOPY_LISTEN:
2114 return vu_set_postcopy_listen(dev, vmsg);
2115 case VHOST_USER_POSTCOPY_END:
2116 return vu_set_postcopy_end(dev, vmsg);
2117 case VHOST_USER_GET_INFLIGHT_FD:
2118 return vu_get_inflight_fd(dev, vmsg);
2119 case VHOST_USER_SET_INFLIGHT_FD:
2120 return vu_set_inflight_fd(dev, vmsg);
2121 case VHOST_USER_VRING_KICK:
2122 return vu_handle_vring_kick(dev, vmsg);
2123 case VHOST_USER_GET_MAX_MEM_SLOTS:
2124 return vu_handle_get_max_memslots(dev, vmsg);
2125 case VHOST_USER_ADD_MEM_REG:
2126 return vu_add_mem_reg(dev, vmsg);
2127 case VHOST_USER_REM_MEM_REG:
2128 return vu_rem_mem_reg(dev, vmsg);
2129 case VHOST_USER_GET_SHARED_OBJECT:
2130 return vu_get_shared_object(dev, vmsg);
2131 default:
2132 vmsg_close_fds(vmsg);
2133 vu_panic(dev, "Unhandled request: %d", vmsg->request);
2136 return false;
2139 bool
2140 vu_dispatch(VuDev *dev)
2142 VhostUserMsg vmsg = { 0, };
2143 int reply_requested;
2144 bool need_reply, success = false;
2146 if (!dev->read_msg(dev, dev->sock, &vmsg)) {
2147 goto end;
2150 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK;
2152 reply_requested = vu_process_message(dev, &vmsg);
2153 if (!reply_requested && need_reply) {
2154 vmsg_set_reply_u64(&vmsg, 0);
2155 reply_requested = 1;
2158 if (!reply_requested) {
2159 success = true;
2160 goto end;
2163 if (!vu_send_reply(dev, dev->sock, &vmsg)) {
2164 goto end;
2167 success = true;
2169 end:
2170 free(vmsg.data);
2171 return success;
2174 void
2175 vu_deinit(VuDev *dev)
2177 unsigned int i;
2179 vu_remove_all_mem_regs(dev);
2181 for (i = 0; i < dev->max_queues; i++) {
2182 VuVirtq *vq = &dev->vq[i];
2184 if (vq->call_fd != -1) {
2185 close(vq->call_fd);
2186 vq->call_fd = -1;
2189 if (vq->kick_fd != -1) {
2190 dev->remove_watch(dev, vq->kick_fd);
2191 close(vq->kick_fd);
2192 vq->kick_fd = -1;
2195 if (vq->err_fd != -1) {
2196 close(vq->err_fd);
2197 vq->err_fd = -1;
2200 if (vq->resubmit_list) {
2201 free(vq->resubmit_list);
2202 vq->resubmit_list = NULL;
2205 vq->inflight = NULL;
2208 if (dev->inflight_info.addr) {
2209 munmap(dev->inflight_info.addr, dev->inflight_info.size);
2210 dev->inflight_info.addr = NULL;
2213 if (dev->inflight_info.fd > 0) {
2214 close(dev->inflight_info.fd);
2215 dev->inflight_info.fd = -1;
2218 vu_close_log(dev);
2219 if (dev->backend_fd != -1) {
2220 close(dev->backend_fd);
2221 dev->backend_fd = -1;
2223 pthread_mutex_destroy(&dev->backend_mutex);
2225 if (dev->sock != -1) {
2226 close(dev->sock);
2229 free(dev->vq);
2230 dev->vq = NULL;
2231 free(dev->regions);
2232 dev->regions = NULL;
2235 bool
2236 vu_init(VuDev *dev,
2237 uint16_t max_queues,
2238 int socket,
2239 vu_panic_cb panic,
2240 vu_read_msg_cb read_msg,
2241 vu_set_watch_cb set_watch,
2242 vu_remove_watch_cb remove_watch,
2243 const VuDevIface *iface)
2245 uint16_t i;
2247 assert(max_queues > 0);
2248 assert(socket >= 0);
2249 assert(set_watch);
2250 assert(remove_watch);
2251 assert(iface);
2252 assert(panic);
2254 memset(dev, 0, sizeof(*dev));
2256 dev->sock = socket;
2257 dev->panic = panic;
2258 dev->read_msg = read_msg ? read_msg : vu_message_read_default;
2259 dev->set_watch = set_watch;
2260 dev->remove_watch = remove_watch;
2261 dev->iface = iface;
2262 dev->log_call_fd = -1;
2263 pthread_mutex_init(&dev->backend_mutex, NULL);
2264 dev->backend_fd = -1;
2265 dev->max_queues = max_queues;
2267 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0]));
2268 if (!dev->regions) {
2269 DPRINT("%s: failed to malloc mem regions\n", __func__);
2270 return false;
2273 dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
2274 if (!dev->vq) {
2275 DPRINT("%s: failed to malloc virtqueues\n", __func__);
2276 free(dev->regions);
2277 dev->regions = NULL;
2278 return false;
2281 for (i = 0; i < max_queues; i++) {
2282 dev->vq[i] = (VuVirtq) {
2283 .call_fd = -1, .kick_fd = -1, .err_fd = -1,
2284 .notification = true,
2288 return true;
2291 VuVirtq *
2292 vu_get_queue(VuDev *dev, int qidx)
2294 assert(qidx < dev->max_queues);
2295 return &dev->vq[qidx];
2298 bool
2299 vu_queue_enabled(VuDev *dev, VuVirtq *vq)
2301 return vq->enable;
2304 bool
2305 vu_queue_started(const VuDev *dev, const VuVirtq *vq)
2307 return vq->started;
2310 static inline uint16_t
2311 vring_avail_flags(VuVirtq *vq)
2313 return le16toh(vq->vring.avail->flags);
2316 static inline uint16_t
2317 vring_avail_idx(VuVirtq *vq)
2319 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
2321 return vq->shadow_avail_idx;
2324 static inline uint16_t
2325 vring_avail_ring(VuVirtq *vq, int i)
2327 return le16toh(vq->vring.avail->ring[i]);
2330 static inline uint16_t
2331 vring_get_used_event(VuVirtq *vq)
2333 return vring_avail_ring(vq, vq->vring.num);
2336 static int
2337 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
2339 uint16_t num_heads = vring_avail_idx(vq) - idx;
2341 /* Check it isn't doing very strange things with descriptor numbers. */
2342 if (num_heads > vq->vring.num) {
2343 vu_panic(dev, "Guest moved used index from %u to %u",
2344 idx, vq->shadow_avail_idx);
2345 return -1;
2347 if (num_heads) {
2348 /* On success, callers read a descriptor at vq->last_avail_idx.
2349 * Make sure descriptor read does not bypass avail index read. */
2350 smp_rmb();
2353 return num_heads;
2356 static bool
2357 virtqueue_get_head(VuDev *dev, VuVirtq *vq,
2358 unsigned int idx, unsigned int *head)
2360 /* Grab the next descriptor number they're advertising, and increment
2361 * the index we've seen. */
2362 *head = vring_avail_ring(vq, idx % vq->vring.num);
2364 /* If their number is silly, that's a fatal mistake. */
2365 if (*head >= vq->vring.num) {
2366 vu_panic(dev, "Guest says index %u is available", *head);
2367 return false;
2370 return true;
2373 static int
2374 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
2375 uint64_t addr, size_t len)
2377 struct vring_desc *ori_desc;
2378 uint64_t read_len;
2380 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
2381 return -1;
2384 if (len == 0) {
2385 return -1;
2388 while (len) {
2389 read_len = len;
2390 ori_desc = vu_gpa_to_va(dev, &read_len, addr);
2391 if (!ori_desc) {
2392 return -1;
2395 memcpy(desc, ori_desc, read_len);
2396 len -= read_len;
2397 addr += read_len;
2398 desc += read_len;
2401 return 0;
2404 enum {
2405 VIRTQUEUE_READ_DESC_ERROR = -1,
2406 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
2407 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
2410 static int
2411 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
2412 int i, unsigned int max, unsigned int *next)
2414 /* If this descriptor says it doesn't chain, we're done. */
2415 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) {
2416 return VIRTQUEUE_READ_DESC_DONE;
2419 /* Check they're not leading us off end of descriptors. */
2420 *next = le16toh(desc[i].next);
2421 /* Make sure compiler knows to grab that: we don't want it changing! */
2422 smp_wmb();
2424 if (*next >= max) {
2425 vu_panic(dev, "Desc next is %u", *next);
2426 return VIRTQUEUE_READ_DESC_ERROR;
2429 return VIRTQUEUE_READ_DESC_MORE;
2432 void
2433 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
2434 unsigned int *out_bytes,
2435 unsigned max_in_bytes, unsigned max_out_bytes)
2437 unsigned int idx;
2438 unsigned int total_bufs, in_total, out_total;
2439 int rc;
2441 idx = vq->last_avail_idx;
2443 total_bufs = in_total = out_total = 0;
2444 if (!vu_is_vq_usable(dev, vq)) {
2445 goto done;
2448 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
2449 unsigned int max, desc_len, num_bufs, indirect = 0;
2450 uint64_t desc_addr, read_len;
2451 struct vring_desc *desc;
2452 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2453 unsigned int i;
2455 max = vq->vring.num;
2456 num_bufs = total_bufs;
2457 if (!virtqueue_get_head(dev, vq, idx++, &i)) {
2458 goto err;
2460 desc = vq->vring.desc;
2462 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
2463 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
2464 vu_panic(dev, "Invalid size for indirect buffer table");
2465 goto err;
2468 /* If we've got too many, that implies a descriptor loop. */
2469 if (num_bufs >= max) {
2470 vu_panic(dev, "Looped descriptor");
2471 goto err;
2474 /* loop over the indirect descriptor table */
2475 indirect = 1;
2476 desc_addr = le64toh(desc[i].addr);
2477 desc_len = le32toh(desc[i].len);
2478 max = desc_len / sizeof(struct vring_desc);
2479 read_len = desc_len;
2480 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2481 if (unlikely(desc && read_len != desc_len)) {
2482 /* Failed to use zero copy */
2483 desc = NULL;
2484 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2485 desc_addr,
2486 desc_len)) {
2487 desc = desc_buf;
2490 if (!desc) {
2491 vu_panic(dev, "Invalid indirect buffer table");
2492 goto err;
2494 num_bufs = i = 0;
2497 do {
2498 /* If we've got too many, that implies a descriptor loop. */
2499 if (++num_bufs > max) {
2500 vu_panic(dev, "Looped descriptor");
2501 goto err;
2504 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
2505 in_total += le32toh(desc[i].len);
2506 } else {
2507 out_total += le32toh(desc[i].len);
2509 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
2510 goto done;
2512 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2513 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2515 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2516 goto err;
2519 if (!indirect) {
2520 total_bufs = num_bufs;
2521 } else {
2522 total_bufs++;
2525 if (rc < 0) {
2526 goto err;
2528 done:
2529 if (in_bytes) {
2530 *in_bytes = in_total;
2532 if (out_bytes) {
2533 *out_bytes = out_total;
2535 return;
2537 err:
2538 in_total = out_total = 0;
2539 goto done;
2542 bool
2543 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
2544 unsigned int out_bytes)
2546 unsigned int in_total, out_total;
2548 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
2549 in_bytes, out_bytes);
2551 return in_bytes <= in_total && out_bytes <= out_total;
2554 /* Fetch avail_idx from VQ memory only when we really need to know if
2555 * guest has added some buffers. */
2556 bool
2557 vu_queue_empty(VuDev *dev, VuVirtq *vq)
2559 if (!vu_is_vq_usable(dev, vq)) {
2560 return true;
2563 if (vq->shadow_avail_idx != vq->last_avail_idx) {
2564 return false;
2567 return vring_avail_idx(vq) == vq->last_avail_idx;
2570 static bool
2571 vring_notify(VuDev *dev, VuVirtq *vq)
2573 uint16_t old, new;
2574 bool v;
2576 /* We need to expose used array entries before checking used event. */
2577 smp_mb();
2579 /* Always notify when queue is empty (when feature acknowledge) */
2580 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2581 !vq->inuse && vu_queue_empty(dev, vq)) {
2582 return true;
2585 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2586 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2589 v = vq->signalled_used_valid;
2590 vq->signalled_used_valid = true;
2591 old = vq->signalled_used;
2592 new = vq->signalled_used = vq->used_idx;
2593 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2596 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
2598 if (!vu_is_vq_usable(dev, vq)) {
2599 return;
2602 if (!vring_notify(dev, vq)) {
2603 DPRINT("skipped notify...\n");
2604 return;
2607 if (vq->call_fd < 0 &&
2608 vu_has_protocol_feature(dev,
2609 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
2610 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
2611 VhostUserMsg vmsg = {
2612 .request = VHOST_USER_BACKEND_VRING_CALL,
2613 .flags = VHOST_USER_VERSION,
2614 .size = sizeof(vmsg.payload.state),
2615 .payload.state = {
2616 .index = vq - dev->vq,
2619 bool ack = sync &&
2620 vu_has_protocol_feature(dev,
2621 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2623 if (ack) {
2624 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
2627 vu_message_write(dev, dev->backend_fd, &vmsg);
2628 if (ack) {
2629 vu_message_read_default(dev, dev->backend_fd, &vmsg);
2631 return;
2634 if (eventfd_write(vq->call_fd, 1) < 0) {
2635 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
2639 void vu_queue_notify(VuDev *dev, VuVirtq *vq)
2641 _vu_queue_notify(dev, vq, false);
2644 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq)
2646 _vu_queue_notify(dev, vq, true);
2649 void vu_config_change_msg(VuDev *dev)
2651 VhostUserMsg vmsg = {
2652 .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG,
2653 .flags = VHOST_USER_VERSION,
2656 vu_message_write(dev, dev->backend_fd, &vmsg);
2659 static inline void
2660 vring_used_flags_set_bit(VuVirtq *vq, int mask)
2662 uint16_t *flags;
2664 flags = (uint16_t *)((char*)vq->vring.used +
2665 offsetof(struct vring_used, flags));
2666 *flags = htole16(le16toh(*flags) | mask);
2669 static inline void
2670 vring_used_flags_unset_bit(VuVirtq *vq, int mask)
2672 uint16_t *flags;
2674 flags = (uint16_t *)((char*)vq->vring.used +
2675 offsetof(struct vring_used, flags));
2676 *flags = htole16(le16toh(*flags) & ~mask);
2679 static inline void
2680 vring_set_avail_event(VuVirtq *vq, uint16_t val)
2682 uint16_t val_le = htole16(val);
2684 if (!vq->notification) {
2685 return;
2688 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t));
2691 void
2692 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2694 vq->notification = enable;
2695 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2696 vring_set_avail_event(vq, vring_avail_idx(vq));
2697 } else if (enable) {
2698 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2699 } else {
2700 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2702 if (enable) {
2703 /* Expose avail event/used flags before caller checks the avail idx. */
2704 smp_mb();
2708 static bool
2709 virtqueue_map_desc(VuDev *dev,
2710 unsigned int *p_num_sg, struct iovec *iov,
2711 unsigned int max_num_sg, bool is_write,
2712 uint64_t pa, size_t sz)
2714 unsigned num_sg = *p_num_sg;
2716 assert(num_sg <= max_num_sg);
2718 if (!sz) {
2719 vu_panic(dev, "virtio: zero sized buffers are not allowed");
2720 return false;
2723 while (sz) {
2724 uint64_t len = sz;
2726 if (num_sg == max_num_sg) {
2727 vu_panic(dev, "virtio: too many descriptors in indirect table");
2728 return false;
2731 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2732 if (iov[num_sg].iov_base == NULL) {
2733 vu_panic(dev, "virtio: invalid address for buffers");
2734 return false;
2736 iov[num_sg].iov_len = len;
2737 num_sg++;
2738 sz -= len;
2739 pa += len;
2742 *p_num_sg = num_sg;
2743 return true;
2746 static void *
2747 virtqueue_alloc_element(size_t sz,
2748 unsigned out_num, unsigned in_num)
2750 VuVirtqElement *elem;
2751 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2752 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2753 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2755 assert(sz >= sizeof(VuVirtqElement));
2756 elem = malloc(out_sg_end);
2757 if (!elem) {
2758 DPRINT("%s: failed to malloc virtqueue element\n", __func__);
2759 return NULL;
2761 elem->out_num = out_num;
2762 elem->in_num = in_num;
2763 elem->in_sg = (void *)elem + in_sg_ofs;
2764 elem->out_sg = (void *)elem + out_sg_ofs;
2765 return elem;
2768 static void *
2769 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2771 struct vring_desc *desc = vq->vring.desc;
2772 uint64_t desc_addr, read_len;
2773 unsigned int desc_len;
2774 unsigned int max = vq->vring.num;
2775 unsigned int i = idx;
2776 VuVirtqElement *elem;
2777 unsigned int out_num = 0, in_num = 0;
2778 struct iovec iov[VIRTQUEUE_MAX_SIZE];
2779 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2780 int rc;
2782 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
2783 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
2784 vu_panic(dev, "Invalid size for indirect buffer table");
2785 return NULL;
2788 /* loop over the indirect descriptor table */
2789 desc_addr = le64toh(desc[i].addr);
2790 desc_len = le32toh(desc[i].len);
2791 max = desc_len / sizeof(struct vring_desc);
2792 read_len = desc_len;
2793 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2794 if (unlikely(desc && read_len != desc_len)) {
2795 /* Failed to use zero copy */
2796 desc = NULL;
2797 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2798 desc_addr,
2799 desc_len)) {
2800 desc = desc_buf;
2803 if (!desc) {
2804 vu_panic(dev, "Invalid indirect buffer table");
2805 return NULL;
2807 i = 0;
2810 /* Collect all the descriptors */
2811 do {
2812 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
2813 if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
2814 VIRTQUEUE_MAX_SIZE - out_num, true,
2815 le64toh(desc[i].addr),
2816 le32toh(desc[i].len))) {
2817 return NULL;
2819 } else {
2820 if (in_num) {
2821 vu_panic(dev, "Incorrect order for descriptors");
2822 return NULL;
2824 if (!virtqueue_map_desc(dev, &out_num, iov,
2825 VIRTQUEUE_MAX_SIZE, false,
2826 le64toh(desc[i].addr),
2827 le32toh(desc[i].len))) {
2828 return NULL;
2832 /* If we've got too many, that implies a descriptor loop. */
2833 if ((in_num + out_num) > max) {
2834 vu_panic(dev, "Looped descriptor");
2835 return NULL;
2837 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2838 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2840 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2841 vu_panic(dev, "read descriptor error");
2842 return NULL;
2845 /* Now copy what we have collected and mapped */
2846 elem = virtqueue_alloc_element(sz, out_num, in_num);
2847 if (!elem) {
2848 return NULL;
2850 elem->index = idx;
2851 for (i = 0; i < out_num; i++) {
2852 elem->out_sg[i] = iov[i];
2854 for (i = 0; i < in_num; i++) {
2855 elem->in_sg[i] = iov[out_num + i];
2858 return elem;
2861 static int
2862 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2864 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2865 return 0;
2868 if (unlikely(!vq->inflight)) {
2869 return -1;
2872 vq->inflight->desc[desc_idx].counter = vq->counter++;
2873 vq->inflight->desc[desc_idx].inflight = 1;
2875 return 0;
2878 static int
2879 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2881 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2882 return 0;
2885 if (unlikely(!vq->inflight)) {
2886 return -1;
2889 vq->inflight->last_batch_head = desc_idx;
2891 return 0;
2894 static int
2895 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2897 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2898 return 0;
2901 if (unlikely(!vq->inflight)) {
2902 return -1;
2905 barrier();
2907 vq->inflight->desc[desc_idx].inflight = 0;
2909 barrier();
2911 vq->inflight->used_idx = vq->used_idx;
2913 return 0;
2916 void *
2917 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2919 int i;
2920 unsigned int head;
2921 VuVirtqElement *elem;
2923 if (!vu_is_vq_usable(dev, vq)) {
2924 return NULL;
2927 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2928 i = (--vq->resubmit_num);
2929 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2931 if (!vq->resubmit_num) {
2932 free(vq->resubmit_list);
2933 vq->resubmit_list = NULL;
2936 return elem;
2939 if (vu_queue_empty(dev, vq)) {
2940 return NULL;
2943 * Needed after virtio_queue_empty(), see comment in
2944 * virtqueue_num_heads().
2946 smp_rmb();
2948 if (vq->inuse >= vq->vring.num) {
2949 vu_panic(dev, "Virtqueue size exceeded");
2950 return NULL;
2953 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2954 return NULL;
2957 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2958 vring_set_avail_event(vq, vq->last_avail_idx);
2961 elem = vu_queue_map_desc(dev, vq, head, sz);
2963 if (!elem) {
2964 return NULL;
2967 vq->inuse++;
2969 vu_queue_inflight_get(dev, vq, head);
2971 return elem;
2974 static void
2975 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2976 size_t len)
2978 vq->inuse--;
2979 /* unmap, when DMA support is added */
2982 void
2983 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2984 size_t len)
2986 vq->last_avail_idx--;
2987 vu_queue_detach_element(dev, vq, elem, len);
2990 bool
2991 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2993 if (num > vq->inuse) {
2994 return false;
2996 vq->last_avail_idx -= num;
2997 vq->inuse -= num;
2998 return true;
3001 static inline
3002 void vring_used_write(VuDev *dev, VuVirtq *vq,
3003 struct vring_used_elem *uelem, int i)
3005 struct vring_used *used = vq->vring.used;
3007 used->ring[i] = *uelem;
3008 vu_log_write(dev, vq->vring.log_guest_addr +
3009 offsetof(struct vring_used, ring[i]),
3010 sizeof(used->ring[i]));
3014 static void
3015 vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
3016 const VuVirtqElement *elem,
3017 unsigned int len)
3019 struct vring_desc *desc = vq->vring.desc;
3020 unsigned int i, max, min, desc_len;
3021 uint64_t desc_addr, read_len;
3022 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
3023 unsigned num_bufs = 0;
3025 max = vq->vring.num;
3026 i = elem->index;
3028 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
3029 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
3030 vu_panic(dev, "Invalid size for indirect buffer table");
3031 return;
3034 /* loop over the indirect descriptor table */
3035 desc_addr = le64toh(desc[i].addr);
3036 desc_len = le32toh(desc[i].len);
3037 max = desc_len / sizeof(struct vring_desc);
3038 read_len = desc_len;
3039 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
3040 if (unlikely(desc && read_len != desc_len)) {
3041 /* Failed to use zero copy */
3042 desc = NULL;
3043 if (!virtqueue_read_indirect_desc(dev, desc_buf,
3044 desc_addr,
3045 desc_len)) {
3046 desc = desc_buf;
3049 if (!desc) {
3050 vu_panic(dev, "Invalid indirect buffer table");
3051 return;
3053 i = 0;
3056 do {
3057 if (++num_bufs > max) {
3058 vu_panic(dev, "Looped descriptor");
3059 return;
3062 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
3063 min = MIN(le32toh(desc[i].len), len);
3064 vu_log_write(dev, le64toh(desc[i].addr), min);
3065 len -= min;
3068 } while (len > 0 &&
3069 (virtqueue_read_next_desc(dev, desc, i, max, &i)
3070 == VIRTQUEUE_READ_DESC_MORE));
3073 void
3074 vu_queue_fill(VuDev *dev, VuVirtq *vq,
3075 const VuVirtqElement *elem,
3076 unsigned int len, unsigned int idx)
3078 struct vring_used_elem uelem;
3080 if (!vu_is_vq_usable(dev, vq)) {
3081 return;
3084 vu_log_queue_fill(dev, vq, elem, len);
3086 idx = (idx + vq->used_idx) % vq->vring.num;
3088 uelem.id = htole32(elem->index);
3089 uelem.len = htole32(len);
3090 vring_used_write(dev, vq, &uelem, idx);
3093 static inline
3094 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
3096 vq->vring.used->idx = htole16(val);
3097 vu_log_write(dev,
3098 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
3099 sizeof(vq->vring.used->idx));
3101 vq->used_idx = val;
3104 void
3105 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
3107 uint16_t old, new;
3109 if (!vu_is_vq_usable(dev, vq)) {
3110 return;
3113 /* Make sure buffer is written before we update index. */
3114 smp_wmb();
3116 old = vq->used_idx;
3117 new = old + count;
3118 vring_used_idx_set(dev, vq, new);
3119 vq->inuse -= count;
3120 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
3121 vq->signalled_used_valid = false;
3125 void
3126 vu_queue_push(VuDev *dev, VuVirtq *vq,
3127 const VuVirtqElement *elem, unsigned int len)
3129 vu_queue_fill(dev, vq, elem, len, 0);
3130 vu_queue_inflight_pre_put(dev, vq, elem->index);
3131 vu_queue_flush(dev, vq, 1);
3132 vu_queue_inflight_post_put(dev, vq, elem->index);