Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / contrib / libvhost-user / libvhost-user.c
blob56f3bd4a35598c7ba6a46e95a795a80e534a603e
1 /*
2 * Vhost User library
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
16 /* this code avoids GLib dependency */
17 #include <stdlib.h>
18 #include <stdio.h>
19 #include <unistd.h>
20 #include <stdarg.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <assert.h>
24 #include <inttypes.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
28 #include <sys/mman.h>
29 #include "qemu/compiler.h"
31 #if defined(__linux__)
32 #include <sys/syscall.h>
33 #include <fcntl.h>
34 #include <sys/ioctl.h>
35 #include <linux/vhost.h>
37 #ifdef __NR_userfaultfd
38 #include <linux/userfaultfd.h>
39 #endif
41 #endif
43 #include "qemu/atomic.h"
44 #include "qemu/osdep.h"
45 #include "qemu/memfd.h"
47 #include "libvhost-user.h"
49 /* usually provided by GLib */
50 #ifndef MIN
51 #define MIN(x, y) ({ \
52 typeof(x) _min1 = (x); \
53 typeof(y) _min2 = (y); \
54 (void) (&_min1 == &_min2); \
55 _min1 < _min2 ? _min1 : _min2; })
56 #endif
58 /* Round number down to multiple */
59 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
61 /* Round number up to multiple */
62 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
64 /* Align each region to cache line size in inflight buffer */
65 #define INFLIGHT_ALIGNMENT 64
67 /* The version of inflight buffer */
68 #define INFLIGHT_VERSION 1
70 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
72 /* The version of the protocol we support */
73 #define VHOST_USER_VERSION 1
74 #define LIBVHOST_USER_DEBUG 0
76 #define DPRINT(...) \
77 do { \
78 if (LIBVHOST_USER_DEBUG) { \
79 fprintf(stderr, __VA_ARGS__); \
80 } \
81 } while (0)
83 static inline
84 bool has_feature(uint64_t features, unsigned int fbit)
86 assert(fbit < 64);
87 return !!(features & (1ULL << fbit));
90 static inline
91 bool vu_has_feature(VuDev *dev,
92 unsigned int fbit)
94 return has_feature(dev->features, fbit);
97 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit)
99 return has_feature(dev->protocol_features, fbit);
102 static const char *
103 vu_request_to_string(unsigned int req)
105 #define REQ(req) [req] = #req
106 static const char *vu_request_str[] = {
107 REQ(VHOST_USER_NONE),
108 REQ(VHOST_USER_GET_FEATURES),
109 REQ(VHOST_USER_SET_FEATURES),
110 REQ(VHOST_USER_SET_OWNER),
111 REQ(VHOST_USER_RESET_OWNER),
112 REQ(VHOST_USER_SET_MEM_TABLE),
113 REQ(VHOST_USER_SET_LOG_BASE),
114 REQ(VHOST_USER_SET_LOG_FD),
115 REQ(VHOST_USER_SET_VRING_NUM),
116 REQ(VHOST_USER_SET_VRING_ADDR),
117 REQ(VHOST_USER_SET_VRING_BASE),
118 REQ(VHOST_USER_GET_VRING_BASE),
119 REQ(VHOST_USER_SET_VRING_KICK),
120 REQ(VHOST_USER_SET_VRING_CALL),
121 REQ(VHOST_USER_SET_VRING_ERR),
122 REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
123 REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
124 REQ(VHOST_USER_GET_QUEUE_NUM),
125 REQ(VHOST_USER_SET_VRING_ENABLE),
126 REQ(VHOST_USER_SEND_RARP),
127 REQ(VHOST_USER_NET_SET_MTU),
128 REQ(VHOST_USER_SET_SLAVE_REQ_FD),
129 REQ(VHOST_USER_IOTLB_MSG),
130 REQ(VHOST_USER_SET_VRING_ENDIAN),
131 REQ(VHOST_USER_GET_CONFIG),
132 REQ(VHOST_USER_SET_CONFIG),
133 REQ(VHOST_USER_POSTCOPY_ADVISE),
134 REQ(VHOST_USER_POSTCOPY_LISTEN),
135 REQ(VHOST_USER_POSTCOPY_END),
136 REQ(VHOST_USER_GET_INFLIGHT_FD),
137 REQ(VHOST_USER_SET_INFLIGHT_FD),
138 REQ(VHOST_USER_GPU_SET_SOCKET),
139 REQ(VHOST_USER_MAX),
141 #undef REQ
143 if (req < VHOST_USER_MAX) {
144 return vu_request_str[req];
145 } else {
146 return "unknown";
150 static void GCC_FMT_ATTR(2, 3)
151 vu_panic(VuDev *dev, const char *msg, ...)
153 char *buf = NULL;
154 va_list ap;
156 va_start(ap, msg);
157 if (vasprintf(&buf, msg, ap) < 0) {
158 buf = NULL;
160 va_end(ap);
162 dev->broken = true;
163 dev->panic(dev, buf);
164 free(buf);
166 /* FIXME: find a way to call virtio_error? */
169 /* Translate guest physical address to our virtual address. */
170 void *
171 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
173 int i;
175 if (*plen == 0) {
176 return NULL;
179 /* Find matching memory region. */
180 for (i = 0; i < dev->nregions; i++) {
181 VuDevRegion *r = &dev->regions[i];
183 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
184 if ((guest_addr + *plen) > (r->gpa + r->size)) {
185 *plen = r->gpa + r->size - guest_addr;
187 return (void *)(uintptr_t)
188 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
192 return NULL;
195 /* Translate qemu virtual address to our virtual address. */
196 static void *
197 qva_to_va(VuDev *dev, uint64_t qemu_addr)
199 int i;
201 /* Find matching memory region. */
202 for (i = 0; i < dev->nregions; i++) {
203 VuDevRegion *r = &dev->regions[i];
205 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
206 return (void *)(uintptr_t)
207 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
211 return NULL;
214 static void
215 vmsg_close_fds(VhostUserMsg *vmsg)
217 int i;
219 for (i = 0; i < vmsg->fd_num; i++) {
220 close(vmsg->fds[i]);
224 /* Set reply payload.u64 and clear request flags and fd_num */
225 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val)
227 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */
228 vmsg->size = sizeof(vmsg->payload.u64);
229 vmsg->payload.u64 = val;
230 vmsg->fd_num = 0;
233 /* A test to see if we have userfault available */
234 static bool
235 have_userfault(void)
237 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
238 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
239 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
240 /* Now test the kernel we're running on really has the features */
241 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
242 struct uffdio_api api_struct;
243 if (ufd < 0) {
244 return false;
247 api_struct.api = UFFD_API;
248 api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
249 UFFD_FEATURE_MISSING_HUGETLBFS;
250 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
251 close(ufd);
252 return false;
254 close(ufd);
255 return true;
257 #else
258 return false;
259 #endif
262 static bool
263 vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
265 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
266 struct iovec iov = {
267 .iov_base = (char *)vmsg,
268 .iov_len = VHOST_USER_HDR_SIZE,
270 struct msghdr msg = {
271 .msg_iov = &iov,
272 .msg_iovlen = 1,
273 .msg_control = control,
274 .msg_controllen = sizeof(control),
276 size_t fd_size;
277 struct cmsghdr *cmsg;
278 int rc;
280 do {
281 rc = recvmsg(conn_fd, &msg, 0);
282 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
284 if (rc < 0) {
285 vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
286 return false;
289 vmsg->fd_num = 0;
290 for (cmsg = CMSG_FIRSTHDR(&msg);
291 cmsg != NULL;
292 cmsg = CMSG_NXTHDR(&msg, cmsg))
294 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
295 fd_size = cmsg->cmsg_len - CMSG_LEN(0);
296 vmsg->fd_num = fd_size / sizeof(int);
297 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
298 break;
302 if (vmsg->size > sizeof(vmsg->payload)) {
303 vu_panic(dev,
304 "Error: too big message request: %d, size: vmsg->size: %u, "
305 "while sizeof(vmsg->payload) = %zu\n",
306 vmsg->request, vmsg->size, sizeof(vmsg->payload));
307 goto fail;
310 if (vmsg->size) {
311 do {
312 rc = read(conn_fd, &vmsg->payload, vmsg->size);
313 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
315 if (rc <= 0) {
316 vu_panic(dev, "Error while reading: %s", strerror(errno));
317 goto fail;
320 assert(rc == vmsg->size);
323 return true;
325 fail:
326 vmsg_close_fds(vmsg);
328 return false;
331 static bool
332 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
334 int rc;
335 uint8_t *p = (uint8_t *)vmsg;
336 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
337 struct iovec iov = {
338 .iov_base = (char *)vmsg,
339 .iov_len = VHOST_USER_HDR_SIZE,
341 struct msghdr msg = {
342 .msg_iov = &iov,
343 .msg_iovlen = 1,
344 .msg_control = control,
346 struct cmsghdr *cmsg;
348 memset(control, 0, sizeof(control));
349 assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
350 if (vmsg->fd_num > 0) {
351 size_t fdsize = vmsg->fd_num * sizeof(int);
352 msg.msg_controllen = CMSG_SPACE(fdsize);
353 cmsg = CMSG_FIRSTHDR(&msg);
354 cmsg->cmsg_len = CMSG_LEN(fdsize);
355 cmsg->cmsg_level = SOL_SOCKET;
356 cmsg->cmsg_type = SCM_RIGHTS;
357 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
358 } else {
359 msg.msg_controllen = 0;
362 do {
363 rc = sendmsg(conn_fd, &msg, 0);
364 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
366 if (vmsg->size) {
367 do {
368 if (vmsg->data) {
369 rc = write(conn_fd, vmsg->data, vmsg->size);
370 } else {
371 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
373 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
376 if (rc <= 0) {
377 vu_panic(dev, "Error while writing: %s", strerror(errno));
378 return false;
381 return true;
384 static bool
385 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
387 /* Set the version in the flags when sending the reply */
388 vmsg->flags &= ~VHOST_USER_VERSION_MASK;
389 vmsg->flags |= VHOST_USER_VERSION;
390 vmsg->flags |= VHOST_USER_REPLY_MASK;
392 return vu_message_write(dev, conn_fd, vmsg);
395 static bool
396 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
398 VhostUserMsg msg_reply;
400 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
401 return true;
404 if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
405 return false;
408 if (msg_reply.request != vmsg->request) {
409 DPRINT("Received unexpected msg type. Expected %d received %d",
410 vmsg->request, msg_reply.request);
411 return false;
414 return msg_reply.payload.u64 == 0;
417 /* Kick the log_call_fd if required. */
418 static void
419 vu_log_kick(VuDev *dev)
421 if (dev->log_call_fd != -1) {
422 DPRINT("Kicking the QEMU's log...\n");
423 if (eventfd_write(dev->log_call_fd, 1) < 0) {
424 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
429 static void
430 vu_log_page(uint8_t *log_table, uint64_t page)
432 DPRINT("Logged dirty guest page: %"PRId64"\n", page);
433 atomic_or(&log_table[page / 8], 1 << (page % 8));
436 static void
437 vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
439 uint64_t page;
441 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
442 !dev->log_table || !length) {
443 return;
446 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
448 page = address / VHOST_LOG_PAGE;
449 while (page * VHOST_LOG_PAGE < address + length) {
450 vu_log_page(dev->log_table, page);
451 page += 1;
454 vu_log_kick(dev);
457 static void
458 vu_kick_cb(VuDev *dev, int condition, void *data)
460 int index = (intptr_t)data;
461 VuVirtq *vq = &dev->vq[index];
462 int sock = vq->kick_fd;
463 eventfd_t kick_data;
464 ssize_t rc;
466 rc = eventfd_read(sock, &kick_data);
467 if (rc == -1) {
468 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
469 dev->remove_watch(dev, dev->vq[index].kick_fd);
470 } else {
471 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
472 kick_data, vq->handler, index);
473 if (vq->handler) {
474 vq->handler(dev, index);
479 static bool
480 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
482 vmsg->payload.u64 =
483 1ULL << VHOST_F_LOG_ALL |
484 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
486 if (dev->iface->get_features) {
487 vmsg->payload.u64 |= dev->iface->get_features(dev);
490 vmsg->size = sizeof(vmsg->payload.u64);
491 vmsg->fd_num = 0;
493 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
495 return true;
498 static void
499 vu_set_enable_all_rings(VuDev *dev, bool enabled)
501 uint16_t i;
503 for (i = 0; i < dev->max_queues; i++) {
504 dev->vq[i].enable = enabled;
508 static bool
509 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
511 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
513 dev->features = vmsg->payload.u64;
515 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
516 vu_set_enable_all_rings(dev, true);
519 if (dev->iface->set_features) {
520 dev->iface->set_features(dev, dev->features);
523 return false;
526 static bool
527 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
529 return false;
532 static void
533 vu_close_log(VuDev *dev)
535 if (dev->log_table) {
536 if (munmap(dev->log_table, dev->log_size) != 0) {
537 perror("close log munmap() error");
540 dev->log_table = NULL;
542 if (dev->log_call_fd != -1) {
543 close(dev->log_call_fd);
544 dev->log_call_fd = -1;
548 static bool
549 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
551 vu_set_enable_all_rings(dev, false);
553 return false;
556 static bool
557 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
559 int i;
560 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
561 dev->nregions = memory->nregions;
563 DPRINT("Nregions: %d\n", memory->nregions);
564 for (i = 0; i < dev->nregions; i++) {
565 void *mmap_addr;
566 VhostUserMemoryRegion *msg_region = &memory->regions[i];
567 VuDevRegion *dev_region = &dev->regions[i];
569 DPRINT("Region %d\n", i);
570 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
571 msg_region->guest_phys_addr);
572 DPRINT(" memory_size: 0x%016"PRIx64"\n",
573 msg_region->memory_size);
574 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
575 msg_region->userspace_addr);
576 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
577 msg_region->mmap_offset);
579 dev_region->gpa = msg_region->guest_phys_addr;
580 dev_region->size = msg_region->memory_size;
581 dev_region->qva = msg_region->userspace_addr;
582 dev_region->mmap_offset = msg_region->mmap_offset;
584 /* We don't use offset argument of mmap() since the
585 * mapped address has to be page aligned, and we use huge
586 * pages.
587 * In postcopy we're using PROT_NONE here to catch anyone
588 * accessing it before we userfault
590 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
591 PROT_NONE, MAP_SHARED,
592 vmsg->fds[i], 0);
594 if (mmap_addr == MAP_FAILED) {
595 vu_panic(dev, "region mmap error: %s", strerror(errno));
596 } else {
597 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
598 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
599 dev_region->mmap_addr);
602 /* Return the address to QEMU so that it can translate the ufd
603 * fault addresses back.
605 msg_region->userspace_addr = (uintptr_t)(mmap_addr +
606 dev_region->mmap_offset);
607 close(vmsg->fds[i]);
610 /* Send the message back to qemu with the addresses filled in */
611 vmsg->fd_num = 0;
612 if (!vu_send_reply(dev, dev->sock, vmsg)) {
613 vu_panic(dev, "failed to respond to set-mem-table for postcopy");
614 return false;
617 /* Wait for QEMU to confirm that it's registered the handler for the
618 * faults.
620 if (!vu_message_read(dev, dev->sock, vmsg) ||
621 vmsg->size != sizeof(vmsg->payload.u64) ||
622 vmsg->payload.u64 != 0) {
623 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
624 return false;
627 /* OK, now we can go and register the memory and generate faults */
628 for (i = 0; i < dev->nregions; i++) {
629 VuDevRegion *dev_region = &dev->regions[i];
630 int ret;
631 #ifdef UFFDIO_REGISTER
632 /* We should already have an open ufd. Mark each memory
633 * range as ufd.
634 * Discard any mapping we have here; note I can't use MADV_REMOVE
635 * or fallocate to make the hole since I don't want to lose
636 * data that's already arrived in the shared process.
637 * TODO: How to do hugepage
639 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
640 dev_region->size + dev_region->mmap_offset,
641 MADV_DONTNEED);
642 if (ret) {
643 fprintf(stderr,
644 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
645 __func__, i, strerror(errno));
647 /* Turn off transparent hugepages so we dont get lose wakeups
648 * in neighbouring pages.
649 * TODO: Turn this backon later.
651 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
652 dev_region->size + dev_region->mmap_offset,
653 MADV_NOHUGEPAGE);
654 if (ret) {
655 /* Note: This can happen legally on kernels that are configured
656 * without madvise'able hugepages
658 fprintf(stderr,
659 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
660 __func__, i, strerror(errno));
662 struct uffdio_register reg_struct;
663 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
664 reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
665 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
667 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
668 vu_panic(dev, "%s: Failed to userfault region %d "
669 "@%" PRIx64 " + size:%" PRIx64
670 " offset: %" PRIx64 ": (ufd=%d)%s\n",
671 __func__, i,
672 dev_region->mmap_addr,
673 dev_region->size, dev_region->mmap_offset,
674 dev->postcopy_ufd, strerror(errno));
675 return false;
677 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
678 vu_panic(dev, "%s Region (%d) doesn't support COPY",
679 __func__, i);
680 return false;
682 DPRINT("%s: region %d: Registered userfault for %"
683 PRIx64 " + %" PRIx64 "\n", __func__, i,
684 (uint64_t)reg_struct.range.start,
685 (uint64_t)reg_struct.range.len);
686 /* Now it's registered we can let the client at it */
687 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
688 dev_region->size + dev_region->mmap_offset,
689 PROT_READ | PROT_WRITE)) {
690 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
691 i, strerror(errno));
692 return false;
694 /* TODO: Stash 'zero' support flags somewhere */
695 #endif
698 return false;
701 static bool
702 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
704 int i;
705 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
707 for (i = 0; i < dev->nregions; i++) {
708 VuDevRegion *r = &dev->regions[i];
709 void *m = (void *) (uintptr_t) r->mmap_addr;
711 if (m) {
712 munmap(m, r->size + r->mmap_offset);
715 dev->nregions = memory->nregions;
717 if (dev->postcopy_listening) {
718 return vu_set_mem_table_exec_postcopy(dev, vmsg);
721 DPRINT("Nregions: %d\n", memory->nregions);
722 for (i = 0; i < dev->nregions; i++) {
723 void *mmap_addr;
724 VhostUserMemoryRegion *msg_region = &memory->regions[i];
725 VuDevRegion *dev_region = &dev->regions[i];
727 DPRINT("Region %d\n", i);
728 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
729 msg_region->guest_phys_addr);
730 DPRINT(" memory_size: 0x%016"PRIx64"\n",
731 msg_region->memory_size);
732 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
733 msg_region->userspace_addr);
734 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
735 msg_region->mmap_offset);
737 dev_region->gpa = msg_region->guest_phys_addr;
738 dev_region->size = msg_region->memory_size;
739 dev_region->qva = msg_region->userspace_addr;
740 dev_region->mmap_offset = msg_region->mmap_offset;
742 /* We don't use offset argument of mmap() since the
743 * mapped address has to be page aligned, and we use huge
744 * pages. */
745 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
746 PROT_READ | PROT_WRITE, MAP_SHARED,
747 vmsg->fds[i], 0);
749 if (mmap_addr == MAP_FAILED) {
750 vu_panic(dev, "region mmap error: %s", strerror(errno));
751 } else {
752 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
753 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
754 dev_region->mmap_addr);
757 close(vmsg->fds[i]);
760 return false;
763 static bool
764 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
766 int fd;
767 uint64_t log_mmap_size, log_mmap_offset;
768 void *rc;
770 if (vmsg->fd_num != 1 ||
771 vmsg->size != sizeof(vmsg->payload.log)) {
772 vu_panic(dev, "Invalid log_base message");
773 return true;
776 fd = vmsg->fds[0];
777 log_mmap_offset = vmsg->payload.log.mmap_offset;
778 log_mmap_size = vmsg->payload.log.mmap_size;
779 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
780 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
782 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
783 log_mmap_offset);
784 close(fd);
785 if (rc == MAP_FAILED) {
786 perror("log mmap error");
789 if (dev->log_table) {
790 munmap(dev->log_table, dev->log_size);
792 dev->log_table = rc;
793 dev->log_size = log_mmap_size;
795 vmsg->size = sizeof(vmsg->payload.u64);
796 vmsg->fd_num = 0;
798 return true;
801 static bool
802 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
804 if (vmsg->fd_num != 1) {
805 vu_panic(dev, "Invalid log_fd message");
806 return false;
809 if (dev->log_call_fd != -1) {
810 close(dev->log_call_fd);
812 dev->log_call_fd = vmsg->fds[0];
813 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
815 return false;
818 static bool
819 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
821 unsigned int index = vmsg->payload.state.index;
822 unsigned int num = vmsg->payload.state.num;
824 DPRINT("State.index: %d\n", index);
825 DPRINT("State.num: %d\n", num);
826 dev->vq[index].vring.num = num;
828 return false;
831 static bool
832 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
834 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
835 unsigned int index = vra->index;
836 VuVirtq *vq = &dev->vq[index];
838 DPRINT("vhost_vring_addr:\n");
839 DPRINT(" index: %d\n", vra->index);
840 DPRINT(" flags: %d\n", vra->flags);
841 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", vra->desc_user_addr);
842 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", vra->used_user_addr);
843 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", vra->avail_user_addr);
844 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", vra->log_guest_addr);
846 vq->vring.flags = vra->flags;
847 vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
848 vq->vring.used = qva_to_va(dev, vra->used_user_addr);
849 vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
850 vq->vring.log_guest_addr = vra->log_guest_addr;
852 DPRINT("Setting virtq addresses:\n");
853 DPRINT(" vring_desc at %p\n", vq->vring.desc);
854 DPRINT(" vring_used at %p\n", vq->vring.used);
855 DPRINT(" vring_avail at %p\n", vq->vring.avail);
857 if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
858 vu_panic(dev, "Invalid vring_addr message");
859 return false;
862 vq->used_idx = vq->vring.used->idx;
864 if (vq->last_avail_idx != vq->used_idx) {
865 bool resume = dev->iface->queue_is_processed_in_order &&
866 dev->iface->queue_is_processed_in_order(dev, index);
868 DPRINT("Last avail index != used index: %u != %u%s\n",
869 vq->last_avail_idx, vq->used_idx,
870 resume ? ", resuming" : "");
872 if (resume) {
873 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
877 return false;
880 static bool
881 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
883 unsigned int index = vmsg->payload.state.index;
884 unsigned int num = vmsg->payload.state.num;
886 DPRINT("State.index: %d\n", index);
887 DPRINT("State.num: %d\n", num);
888 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
890 return false;
893 static bool
894 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
896 unsigned int index = vmsg->payload.state.index;
898 DPRINT("State.index: %d\n", index);
899 vmsg->payload.state.num = dev->vq[index].last_avail_idx;
900 vmsg->size = sizeof(vmsg->payload.state);
902 dev->vq[index].started = false;
903 if (dev->iface->queue_set_started) {
904 dev->iface->queue_set_started(dev, index, false);
907 if (dev->vq[index].call_fd != -1) {
908 close(dev->vq[index].call_fd);
909 dev->vq[index].call_fd = -1;
911 if (dev->vq[index].kick_fd != -1) {
912 dev->remove_watch(dev, dev->vq[index].kick_fd);
913 close(dev->vq[index].kick_fd);
914 dev->vq[index].kick_fd = -1;
917 return true;
920 static bool
921 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
923 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
925 if (index >= dev->max_queues) {
926 vmsg_close_fds(vmsg);
927 vu_panic(dev, "Invalid queue index: %u", index);
928 return false;
931 if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK ||
932 vmsg->fd_num != 1) {
933 vmsg_close_fds(vmsg);
934 vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
935 return false;
938 return true;
941 static int
942 inflight_desc_compare(const void *a, const void *b)
944 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
945 *desc1 = (VuVirtqInflightDesc *)b;
947 if (desc1->counter > desc0->counter &&
948 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
949 return 1;
952 return -1;
955 static int
956 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
958 int i = 0;
960 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
961 return 0;
964 if (unlikely(!vq->inflight)) {
965 return -1;
968 if (unlikely(!vq->inflight->version)) {
969 /* initialize the buffer */
970 vq->inflight->version = INFLIGHT_VERSION;
971 return 0;
974 vq->used_idx = vq->vring.used->idx;
975 vq->resubmit_num = 0;
976 vq->resubmit_list = NULL;
977 vq->counter = 0;
979 if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
980 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
982 barrier();
984 vq->inflight->used_idx = vq->used_idx;
987 for (i = 0; i < vq->inflight->desc_num; i++) {
988 if (vq->inflight->desc[i].inflight == 1) {
989 vq->inuse++;
993 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
995 if (vq->inuse) {
996 vq->resubmit_list = malloc(sizeof(VuVirtqInflightDesc) * vq->inuse);
997 if (!vq->resubmit_list) {
998 return -1;
1001 for (i = 0; i < vq->inflight->desc_num; i++) {
1002 if (vq->inflight->desc[i].inflight) {
1003 vq->resubmit_list[vq->resubmit_num].index = i;
1004 vq->resubmit_list[vq->resubmit_num].counter =
1005 vq->inflight->desc[i].counter;
1006 vq->resubmit_num++;
1010 if (vq->resubmit_num > 1) {
1011 qsort(vq->resubmit_list, vq->resubmit_num,
1012 sizeof(VuVirtqInflightDesc), inflight_desc_compare);
1014 vq->counter = vq->resubmit_list[0].counter + 1;
1017 /* in case of I/O hang after reconnecting */
1018 if (eventfd_write(vq->kick_fd, 1)) {
1019 return -1;
1022 return 0;
1025 static bool
1026 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1028 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1030 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1032 if (!vu_check_queue_msg_file(dev, vmsg)) {
1033 return false;
1036 if (dev->vq[index].kick_fd != -1) {
1037 dev->remove_watch(dev, dev->vq[index].kick_fd);
1038 close(dev->vq[index].kick_fd);
1039 dev->vq[index].kick_fd = -1;
1042 dev->vq[index].kick_fd = vmsg->fds[0];
1043 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
1045 dev->vq[index].started = true;
1046 if (dev->iface->queue_set_started) {
1047 dev->iface->queue_set_started(dev, index, true);
1050 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1051 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1052 vu_kick_cb, (void *)(long)index);
1054 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1055 dev->vq[index].kick_fd, index);
1058 if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1059 vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1062 return false;
1065 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1066 vu_queue_handler_cb handler)
1068 int qidx = vq - dev->vq;
1070 vq->handler = handler;
1071 if (vq->kick_fd >= 0) {
1072 if (handler) {
1073 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1074 vu_kick_cb, (void *)(long)qidx);
1075 } else {
1076 dev->remove_watch(dev, vq->kick_fd);
1081 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1082 int size, int offset)
1084 int qidx = vq - dev->vq;
1085 int fd_num = 0;
1086 VhostUserMsg vmsg = {
1087 .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1088 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1089 .size = sizeof(vmsg.payload.area),
1090 .payload.area = {
1091 .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1092 .size = size,
1093 .offset = offset,
1097 if (fd == -1) {
1098 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1099 } else {
1100 vmsg.fds[fd_num++] = fd;
1103 vmsg.fd_num = fd_num;
1105 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
1106 return false;
1109 if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
1110 return false;
1113 return vu_process_message_reply(dev, &vmsg);
1116 static bool
1117 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1119 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1121 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1123 if (!vu_check_queue_msg_file(dev, vmsg)) {
1124 return false;
1127 if (dev->vq[index].call_fd != -1) {
1128 close(dev->vq[index].call_fd);
1129 dev->vq[index].call_fd = -1;
1132 dev->vq[index].call_fd = vmsg->fds[0];
1134 /* in case of I/O hang after reconnecting */
1135 if (eventfd_write(vmsg->fds[0], 1)) {
1136 return -1;
1139 DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
1141 return false;
1144 static bool
1145 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1147 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1149 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1151 if (!vu_check_queue_msg_file(dev, vmsg)) {
1152 return false;
1155 if (dev->vq[index].err_fd != -1) {
1156 close(dev->vq[index].err_fd);
1157 dev->vq[index].err_fd = -1;
1160 dev->vq[index].err_fd = vmsg->fds[0];
1162 return false;
1165 static bool
1166 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1168 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1169 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1170 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1171 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1172 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD;
1174 if (have_userfault()) {
1175 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1178 if (dev->iface->get_config && dev->iface->set_config) {
1179 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1182 if (dev->iface->get_protocol_features) {
1183 features |= dev->iface->get_protocol_features(dev);
1186 vmsg_set_reply_u64(vmsg, features);
1187 return true;
1190 static bool
1191 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1193 uint64_t features = vmsg->payload.u64;
1195 DPRINT("u64: 0x%016"PRIx64"\n", features);
1197 dev->protocol_features = vmsg->payload.u64;
1199 if (dev->iface->set_protocol_features) {
1200 dev->iface->set_protocol_features(dev, features);
1203 return false;
1206 static bool
1207 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1209 vmsg_set_reply_u64(vmsg, dev->max_queues);
1210 return true;
1213 static bool
1214 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1216 unsigned int index = vmsg->payload.state.index;
1217 unsigned int enable = vmsg->payload.state.num;
1219 DPRINT("State.index: %d\n", index);
1220 DPRINT("State.enable: %d\n", enable);
1222 if (index >= dev->max_queues) {
1223 vu_panic(dev, "Invalid vring_enable index: %u", index);
1224 return false;
1227 dev->vq[index].enable = enable;
1228 return false;
1231 static bool
1232 vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1234 if (vmsg->fd_num != 1) {
1235 vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
1236 return false;
1239 if (dev->slave_fd != -1) {
1240 close(dev->slave_fd);
1242 dev->slave_fd = vmsg->fds[0];
1243 DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
1245 return false;
1248 static bool
1249 vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1251 int ret = -1;
1253 if (dev->iface->get_config) {
1254 ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1255 vmsg->payload.config.size);
1258 if (ret) {
1259 /* resize to zero to indicate an error to master */
1260 vmsg->size = 0;
1263 return true;
1266 static bool
1267 vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1269 int ret = -1;
1271 if (dev->iface->set_config) {
1272 ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1273 vmsg->payload.config.offset,
1274 vmsg->payload.config.size,
1275 vmsg->payload.config.flags);
1276 if (ret) {
1277 vu_panic(dev, "Set virtio configuration space failed");
1281 return false;
1284 static bool
1285 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1287 dev->postcopy_ufd = -1;
1288 #ifdef UFFDIO_API
1289 struct uffdio_api api_struct;
1291 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1292 vmsg->size = 0;
1293 #endif
1295 if (dev->postcopy_ufd == -1) {
1296 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1297 goto out;
1300 #ifdef UFFDIO_API
1301 api_struct.api = UFFD_API;
1302 api_struct.features = 0;
1303 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1304 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1305 close(dev->postcopy_ufd);
1306 dev->postcopy_ufd = -1;
1307 goto out;
1309 /* TODO: Stash feature flags somewhere */
1310 #endif
1312 out:
1313 /* Return a ufd to the QEMU */
1314 vmsg->fd_num = 1;
1315 vmsg->fds[0] = dev->postcopy_ufd;
1316 return true; /* = send a reply */
1319 static bool
1320 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1322 if (dev->nregions) {
1323 vu_panic(dev, "Regions already registered at postcopy-listen");
1324 vmsg_set_reply_u64(vmsg, -1);
1325 return true;
1327 dev->postcopy_listening = true;
1329 vmsg_set_reply_u64(vmsg, 0);
1330 return true;
1333 static bool
1334 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1336 DPRINT("%s: Entry\n", __func__);
1337 dev->postcopy_listening = false;
1338 if (dev->postcopy_ufd > 0) {
1339 close(dev->postcopy_ufd);
1340 dev->postcopy_ufd = -1;
1341 DPRINT("%s: Done close\n", __func__);
1344 vmsg_set_reply_u64(vmsg, 0);
1345 DPRINT("%s: exit\n", __func__);
1346 return true;
1349 static inline uint64_t
1350 vu_inflight_queue_size(uint16_t queue_size)
1352 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1353 sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1356 static bool
1357 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1359 int fd;
1360 void *addr;
1361 uint64_t mmap_size;
1362 uint16_t num_queues, queue_size;
1364 if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1365 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1366 vmsg->payload.inflight.mmap_size = 0;
1367 return true;
1370 num_queues = vmsg->payload.inflight.num_queues;
1371 queue_size = vmsg->payload.inflight.queue_size;
1373 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1374 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1376 mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1378 addr = qemu_memfd_alloc("vhost-inflight", mmap_size,
1379 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1380 &fd, NULL);
1382 if (!addr) {
1383 vu_panic(dev, "Failed to alloc vhost inflight area");
1384 vmsg->payload.inflight.mmap_size = 0;
1385 return true;
1388 memset(addr, 0, mmap_size);
1390 dev->inflight_info.addr = addr;
1391 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1392 dev->inflight_info.fd = vmsg->fds[0] = fd;
1393 vmsg->fd_num = 1;
1394 vmsg->payload.inflight.mmap_offset = 0;
1396 DPRINT("send inflight mmap_size: %"PRId64"\n",
1397 vmsg->payload.inflight.mmap_size);
1398 DPRINT("send inflight mmap offset: %"PRId64"\n",
1399 vmsg->payload.inflight.mmap_offset);
1401 return true;
1404 static bool
1405 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1407 int fd, i;
1408 uint64_t mmap_size, mmap_offset;
1409 uint16_t num_queues, queue_size;
1410 void *rc;
1412 if (vmsg->fd_num != 1 ||
1413 vmsg->size != sizeof(vmsg->payload.inflight)) {
1414 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1415 vmsg->size, vmsg->fd_num);
1416 return false;
1419 fd = vmsg->fds[0];
1420 mmap_size = vmsg->payload.inflight.mmap_size;
1421 mmap_offset = vmsg->payload.inflight.mmap_offset;
1422 num_queues = vmsg->payload.inflight.num_queues;
1423 queue_size = vmsg->payload.inflight.queue_size;
1425 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1426 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1427 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1428 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1430 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1431 fd, mmap_offset);
1433 if (rc == MAP_FAILED) {
1434 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1435 return false;
1438 if (dev->inflight_info.fd) {
1439 close(dev->inflight_info.fd);
1442 if (dev->inflight_info.addr) {
1443 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1446 dev->inflight_info.fd = fd;
1447 dev->inflight_info.addr = rc;
1448 dev->inflight_info.size = mmap_size;
1450 for (i = 0; i < num_queues; i++) {
1451 dev->vq[i].inflight = (VuVirtqInflight *)rc;
1452 dev->vq[i].inflight->desc_num = queue_size;
1453 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1456 return false;
1459 static bool
1460 vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
1462 int do_reply = 0;
1464 /* Print out generic part of the request. */
1465 DPRINT("================ Vhost user message ================\n");
1466 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
1467 vmsg->request);
1468 DPRINT("Flags: 0x%x\n", vmsg->flags);
1469 DPRINT("Size: %d\n", vmsg->size);
1471 if (vmsg->fd_num) {
1472 int i;
1473 DPRINT("Fds:");
1474 for (i = 0; i < vmsg->fd_num; i++) {
1475 DPRINT(" %d", vmsg->fds[i]);
1477 DPRINT("\n");
1480 if (dev->iface->process_msg &&
1481 dev->iface->process_msg(dev, vmsg, &do_reply)) {
1482 return do_reply;
1485 switch (vmsg->request) {
1486 case VHOST_USER_GET_FEATURES:
1487 return vu_get_features_exec(dev, vmsg);
1488 case VHOST_USER_SET_FEATURES:
1489 return vu_set_features_exec(dev, vmsg);
1490 case VHOST_USER_GET_PROTOCOL_FEATURES:
1491 return vu_get_protocol_features_exec(dev, vmsg);
1492 case VHOST_USER_SET_PROTOCOL_FEATURES:
1493 return vu_set_protocol_features_exec(dev, vmsg);
1494 case VHOST_USER_SET_OWNER:
1495 return vu_set_owner_exec(dev, vmsg);
1496 case VHOST_USER_RESET_OWNER:
1497 return vu_reset_device_exec(dev, vmsg);
1498 case VHOST_USER_SET_MEM_TABLE:
1499 return vu_set_mem_table_exec(dev, vmsg);
1500 case VHOST_USER_SET_LOG_BASE:
1501 return vu_set_log_base_exec(dev, vmsg);
1502 case VHOST_USER_SET_LOG_FD:
1503 return vu_set_log_fd_exec(dev, vmsg);
1504 case VHOST_USER_SET_VRING_NUM:
1505 return vu_set_vring_num_exec(dev, vmsg);
1506 case VHOST_USER_SET_VRING_ADDR:
1507 return vu_set_vring_addr_exec(dev, vmsg);
1508 case VHOST_USER_SET_VRING_BASE:
1509 return vu_set_vring_base_exec(dev, vmsg);
1510 case VHOST_USER_GET_VRING_BASE:
1511 return vu_get_vring_base_exec(dev, vmsg);
1512 case VHOST_USER_SET_VRING_KICK:
1513 return vu_set_vring_kick_exec(dev, vmsg);
1514 case VHOST_USER_SET_VRING_CALL:
1515 return vu_set_vring_call_exec(dev, vmsg);
1516 case VHOST_USER_SET_VRING_ERR:
1517 return vu_set_vring_err_exec(dev, vmsg);
1518 case VHOST_USER_GET_QUEUE_NUM:
1519 return vu_get_queue_num_exec(dev, vmsg);
1520 case VHOST_USER_SET_VRING_ENABLE:
1521 return vu_set_vring_enable_exec(dev, vmsg);
1522 case VHOST_USER_SET_SLAVE_REQ_FD:
1523 return vu_set_slave_req_fd(dev, vmsg);
1524 case VHOST_USER_GET_CONFIG:
1525 return vu_get_config(dev, vmsg);
1526 case VHOST_USER_SET_CONFIG:
1527 return vu_set_config(dev, vmsg);
1528 case VHOST_USER_NONE:
1529 /* if you need processing before exit, override iface->process_msg */
1530 exit(0);
1531 case VHOST_USER_POSTCOPY_ADVISE:
1532 return vu_set_postcopy_advise(dev, vmsg);
1533 case VHOST_USER_POSTCOPY_LISTEN:
1534 return vu_set_postcopy_listen(dev, vmsg);
1535 case VHOST_USER_POSTCOPY_END:
1536 return vu_set_postcopy_end(dev, vmsg);
1537 case VHOST_USER_GET_INFLIGHT_FD:
1538 return vu_get_inflight_fd(dev, vmsg);
1539 case VHOST_USER_SET_INFLIGHT_FD:
1540 return vu_set_inflight_fd(dev, vmsg);
1541 default:
1542 vmsg_close_fds(vmsg);
1543 vu_panic(dev, "Unhandled request: %d", vmsg->request);
1546 return false;
1549 bool
1550 vu_dispatch(VuDev *dev)
1552 VhostUserMsg vmsg = { 0, };
1553 int reply_requested;
1554 bool success = false;
1556 if (!vu_message_read(dev, dev->sock, &vmsg)) {
1557 goto end;
1560 reply_requested = vu_process_message(dev, &vmsg);
1561 if (!reply_requested) {
1562 success = true;
1563 goto end;
1566 if (!vu_send_reply(dev, dev->sock, &vmsg)) {
1567 goto end;
1570 success = true;
1572 end:
1573 free(vmsg.data);
1574 return success;
1577 void
1578 vu_deinit(VuDev *dev)
1580 int i;
1582 for (i = 0; i < dev->nregions; i++) {
1583 VuDevRegion *r = &dev->regions[i];
1584 void *m = (void *) (uintptr_t) r->mmap_addr;
1585 if (m != MAP_FAILED) {
1586 munmap(m, r->size + r->mmap_offset);
1589 dev->nregions = 0;
1591 for (i = 0; i < dev->max_queues; i++) {
1592 VuVirtq *vq = &dev->vq[i];
1594 if (vq->call_fd != -1) {
1595 close(vq->call_fd);
1596 vq->call_fd = -1;
1599 if (vq->kick_fd != -1) {
1600 close(vq->kick_fd);
1601 vq->kick_fd = -1;
1604 if (vq->err_fd != -1) {
1605 close(vq->err_fd);
1606 vq->err_fd = -1;
1609 if (vq->resubmit_list) {
1610 free(vq->resubmit_list);
1611 vq->resubmit_list = NULL;
1614 vq->inflight = NULL;
1617 if (dev->inflight_info.addr) {
1618 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1619 dev->inflight_info.addr = NULL;
1622 if (dev->inflight_info.fd > 0) {
1623 close(dev->inflight_info.fd);
1624 dev->inflight_info.fd = -1;
1627 vu_close_log(dev);
1628 if (dev->slave_fd != -1) {
1629 close(dev->slave_fd);
1630 dev->slave_fd = -1;
1633 if (dev->sock != -1) {
1634 close(dev->sock);
1637 free(dev->vq);
1638 dev->vq = NULL;
1641 bool
1642 vu_init(VuDev *dev,
1643 uint16_t max_queues,
1644 int socket,
1645 vu_panic_cb panic,
1646 vu_set_watch_cb set_watch,
1647 vu_remove_watch_cb remove_watch,
1648 const VuDevIface *iface)
1650 uint16_t i;
1652 assert(max_queues > 0);
1653 assert(socket >= 0);
1654 assert(set_watch);
1655 assert(remove_watch);
1656 assert(iface);
1657 assert(panic);
1659 memset(dev, 0, sizeof(*dev));
1661 dev->sock = socket;
1662 dev->panic = panic;
1663 dev->set_watch = set_watch;
1664 dev->remove_watch = remove_watch;
1665 dev->iface = iface;
1666 dev->log_call_fd = -1;
1667 dev->slave_fd = -1;
1668 dev->max_queues = max_queues;
1670 dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
1671 if (!dev->vq) {
1672 DPRINT("%s: failed to malloc virtqueues\n", __func__);
1673 return false;
1676 for (i = 0; i < max_queues; i++) {
1677 dev->vq[i] = (VuVirtq) {
1678 .call_fd = -1, .kick_fd = -1, .err_fd = -1,
1679 .notification = true,
1683 return true;
1686 VuVirtq *
1687 vu_get_queue(VuDev *dev, int qidx)
1689 assert(qidx < dev->max_queues);
1690 return &dev->vq[qidx];
1693 bool
1694 vu_queue_enabled(VuDev *dev, VuVirtq *vq)
1696 return vq->enable;
1699 bool
1700 vu_queue_started(const VuDev *dev, const VuVirtq *vq)
1702 return vq->started;
1705 static inline uint16_t
1706 vring_avail_flags(VuVirtq *vq)
1708 return vq->vring.avail->flags;
1711 static inline uint16_t
1712 vring_avail_idx(VuVirtq *vq)
1714 vq->shadow_avail_idx = vq->vring.avail->idx;
1716 return vq->shadow_avail_idx;
1719 static inline uint16_t
1720 vring_avail_ring(VuVirtq *vq, int i)
1722 return vq->vring.avail->ring[i];
1725 static inline uint16_t
1726 vring_get_used_event(VuVirtq *vq)
1728 return vring_avail_ring(vq, vq->vring.num);
1731 static int
1732 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
1734 uint16_t num_heads = vring_avail_idx(vq) - idx;
1736 /* Check it isn't doing very strange things with descriptor numbers. */
1737 if (num_heads > vq->vring.num) {
1738 vu_panic(dev, "Guest moved used index from %u to %u",
1739 idx, vq->shadow_avail_idx);
1740 return -1;
1742 if (num_heads) {
1743 /* On success, callers read a descriptor at vq->last_avail_idx.
1744 * Make sure descriptor read does not bypass avail index read. */
1745 smp_rmb();
1748 return num_heads;
1751 static bool
1752 virtqueue_get_head(VuDev *dev, VuVirtq *vq,
1753 unsigned int idx, unsigned int *head)
1755 /* Grab the next descriptor number they're advertising, and increment
1756 * the index we've seen. */
1757 *head = vring_avail_ring(vq, idx % vq->vring.num);
1759 /* If their number is silly, that's a fatal mistake. */
1760 if (*head >= vq->vring.num) {
1761 vu_panic(dev, "Guest says index %u is available", *head);
1762 return false;
1765 return true;
1768 static int
1769 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
1770 uint64_t addr, size_t len)
1772 struct vring_desc *ori_desc;
1773 uint64_t read_len;
1775 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
1776 return -1;
1779 if (len == 0) {
1780 return -1;
1783 while (len) {
1784 read_len = len;
1785 ori_desc = vu_gpa_to_va(dev, &read_len, addr);
1786 if (!ori_desc) {
1787 return -1;
1790 memcpy(desc, ori_desc, read_len);
1791 len -= read_len;
1792 addr += read_len;
1793 desc += read_len;
1796 return 0;
1799 enum {
1800 VIRTQUEUE_READ_DESC_ERROR = -1,
1801 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
1802 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
1805 static int
1806 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
1807 int i, unsigned int max, unsigned int *next)
1809 /* If this descriptor says it doesn't chain, we're done. */
1810 if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
1811 return VIRTQUEUE_READ_DESC_DONE;
1814 /* Check they're not leading us off end of descriptors. */
1815 *next = desc[i].next;
1816 /* Make sure compiler knows to grab that: we don't want it changing! */
1817 smp_wmb();
1819 if (*next >= max) {
1820 vu_panic(dev, "Desc next is %u", *next);
1821 return VIRTQUEUE_READ_DESC_ERROR;
1824 return VIRTQUEUE_READ_DESC_MORE;
1827 void
1828 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
1829 unsigned int *out_bytes,
1830 unsigned max_in_bytes, unsigned max_out_bytes)
1832 unsigned int idx;
1833 unsigned int total_bufs, in_total, out_total;
1834 int rc;
1836 idx = vq->last_avail_idx;
1838 total_bufs = in_total = out_total = 0;
1839 if (unlikely(dev->broken) ||
1840 unlikely(!vq->vring.avail)) {
1841 goto done;
1844 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
1845 unsigned int max, desc_len, num_bufs, indirect = 0;
1846 uint64_t desc_addr, read_len;
1847 struct vring_desc *desc;
1848 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
1849 unsigned int i;
1851 max = vq->vring.num;
1852 num_bufs = total_bufs;
1853 if (!virtqueue_get_head(dev, vq, idx++, &i)) {
1854 goto err;
1856 desc = vq->vring.desc;
1858 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
1859 if (desc[i].len % sizeof(struct vring_desc)) {
1860 vu_panic(dev, "Invalid size for indirect buffer table");
1861 goto err;
1864 /* If we've got too many, that implies a descriptor loop. */
1865 if (num_bufs >= max) {
1866 vu_panic(dev, "Looped descriptor");
1867 goto err;
1870 /* loop over the indirect descriptor table */
1871 indirect = 1;
1872 desc_addr = desc[i].addr;
1873 desc_len = desc[i].len;
1874 max = desc_len / sizeof(struct vring_desc);
1875 read_len = desc_len;
1876 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
1877 if (unlikely(desc && read_len != desc_len)) {
1878 /* Failed to use zero copy */
1879 desc = NULL;
1880 if (!virtqueue_read_indirect_desc(dev, desc_buf,
1881 desc_addr,
1882 desc_len)) {
1883 desc = desc_buf;
1886 if (!desc) {
1887 vu_panic(dev, "Invalid indirect buffer table");
1888 goto err;
1890 num_bufs = i = 0;
1893 do {
1894 /* If we've got too many, that implies a descriptor loop. */
1895 if (++num_bufs > max) {
1896 vu_panic(dev, "Looped descriptor");
1897 goto err;
1900 if (desc[i].flags & VRING_DESC_F_WRITE) {
1901 in_total += desc[i].len;
1902 } else {
1903 out_total += desc[i].len;
1905 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1906 goto done;
1908 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
1909 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1911 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1912 goto err;
1915 if (!indirect) {
1916 total_bufs = num_bufs;
1917 } else {
1918 total_bufs++;
1921 if (rc < 0) {
1922 goto err;
1924 done:
1925 if (in_bytes) {
1926 *in_bytes = in_total;
1928 if (out_bytes) {
1929 *out_bytes = out_total;
1931 return;
1933 err:
1934 in_total = out_total = 0;
1935 goto done;
1938 bool
1939 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
1940 unsigned int out_bytes)
1942 unsigned int in_total, out_total;
1944 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
1945 in_bytes, out_bytes);
1947 return in_bytes <= in_total && out_bytes <= out_total;
1950 /* Fetch avail_idx from VQ memory only when we really need to know if
1951 * guest has added some buffers. */
1952 bool
1953 vu_queue_empty(VuDev *dev, VuVirtq *vq)
1955 if (unlikely(dev->broken) ||
1956 unlikely(!vq->vring.avail)) {
1957 return true;
1960 if (vq->shadow_avail_idx != vq->last_avail_idx) {
1961 return false;
1964 return vring_avail_idx(vq) == vq->last_avail_idx;
1967 static bool
1968 vring_notify(VuDev *dev, VuVirtq *vq)
1970 uint16_t old, new;
1971 bool v;
1973 /* We need to expose used array entries before checking used event. */
1974 smp_mb();
1976 /* Always notify when queue is empty (when feature acknowledge) */
1977 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1978 !vq->inuse && vu_queue_empty(dev, vq)) {
1979 return true;
1982 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1983 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1986 v = vq->signalled_used_valid;
1987 vq->signalled_used_valid = true;
1988 old = vq->signalled_used;
1989 new = vq->signalled_used = vq->used_idx;
1990 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1993 void
1994 vu_queue_notify(VuDev *dev, VuVirtq *vq)
1996 if (unlikely(dev->broken) ||
1997 unlikely(!vq->vring.avail)) {
1998 return;
2001 if (!vring_notify(dev, vq)) {
2002 DPRINT("skipped notify...\n");
2003 return;
2006 if (eventfd_write(vq->call_fd, 1) < 0) {
2007 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
2011 static inline void
2012 vring_used_flags_set_bit(VuVirtq *vq, int mask)
2014 uint16_t *flags;
2016 flags = (uint16_t *)((char*)vq->vring.used +
2017 offsetof(struct vring_used, flags));
2018 *flags |= mask;
2021 static inline void
2022 vring_used_flags_unset_bit(VuVirtq *vq, int mask)
2024 uint16_t *flags;
2026 flags = (uint16_t *)((char*)vq->vring.used +
2027 offsetof(struct vring_used, flags));
2028 *flags &= ~mask;
2031 static inline void
2032 vring_set_avail_event(VuVirtq *vq, uint16_t val)
2034 if (!vq->notification) {
2035 return;
2038 *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
2041 void
2042 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2044 vq->notification = enable;
2045 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2046 vring_set_avail_event(vq, vring_avail_idx(vq));
2047 } else if (enable) {
2048 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2049 } else {
2050 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2052 if (enable) {
2053 /* Expose avail event/used flags before caller checks the avail idx. */
2054 smp_mb();
2058 static void
2059 virtqueue_map_desc(VuDev *dev,
2060 unsigned int *p_num_sg, struct iovec *iov,
2061 unsigned int max_num_sg, bool is_write,
2062 uint64_t pa, size_t sz)
2064 unsigned num_sg = *p_num_sg;
2066 assert(num_sg <= max_num_sg);
2068 if (!sz) {
2069 vu_panic(dev, "virtio: zero sized buffers are not allowed");
2070 return;
2073 while (sz) {
2074 uint64_t len = sz;
2076 if (num_sg == max_num_sg) {
2077 vu_panic(dev, "virtio: too many descriptors in indirect table");
2078 return;
2081 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2082 if (iov[num_sg].iov_base == NULL) {
2083 vu_panic(dev, "virtio: invalid address for buffers");
2084 return;
2086 iov[num_sg].iov_len = len;
2087 num_sg++;
2088 sz -= len;
2089 pa += len;
2092 *p_num_sg = num_sg;
2095 static void *
2096 virtqueue_alloc_element(size_t sz,
2097 unsigned out_num, unsigned in_num)
2099 VuVirtqElement *elem;
2100 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2101 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2102 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2104 assert(sz >= sizeof(VuVirtqElement));
2105 elem = malloc(out_sg_end);
2106 elem->out_num = out_num;
2107 elem->in_num = in_num;
2108 elem->in_sg = (void *)elem + in_sg_ofs;
2109 elem->out_sg = (void *)elem + out_sg_ofs;
2110 return elem;
2113 static void *
2114 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2116 struct vring_desc *desc = vq->vring.desc;
2117 uint64_t desc_addr, read_len;
2118 unsigned int desc_len;
2119 unsigned int max = vq->vring.num;
2120 unsigned int i = idx;
2121 VuVirtqElement *elem;
2122 unsigned int out_num = 0, in_num = 0;
2123 struct iovec iov[VIRTQUEUE_MAX_SIZE];
2124 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2125 int rc;
2127 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2128 if (desc[i].len % sizeof(struct vring_desc)) {
2129 vu_panic(dev, "Invalid size for indirect buffer table");
2132 /* loop over the indirect descriptor table */
2133 desc_addr = desc[i].addr;
2134 desc_len = desc[i].len;
2135 max = desc_len / sizeof(struct vring_desc);
2136 read_len = desc_len;
2137 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2138 if (unlikely(desc && read_len != desc_len)) {
2139 /* Failed to use zero copy */
2140 desc = NULL;
2141 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2142 desc_addr,
2143 desc_len)) {
2144 desc = desc_buf;
2147 if (!desc) {
2148 vu_panic(dev, "Invalid indirect buffer table");
2149 return NULL;
2151 i = 0;
2154 /* Collect all the descriptors */
2155 do {
2156 if (desc[i].flags & VRING_DESC_F_WRITE) {
2157 virtqueue_map_desc(dev, &in_num, iov + out_num,
2158 VIRTQUEUE_MAX_SIZE - out_num, true,
2159 desc[i].addr, desc[i].len);
2160 } else {
2161 if (in_num) {
2162 vu_panic(dev, "Incorrect order for descriptors");
2163 return NULL;
2165 virtqueue_map_desc(dev, &out_num, iov,
2166 VIRTQUEUE_MAX_SIZE, false,
2167 desc[i].addr, desc[i].len);
2170 /* If we've got too many, that implies a descriptor loop. */
2171 if ((in_num + out_num) > max) {
2172 vu_panic(dev, "Looped descriptor");
2174 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2175 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2177 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2178 vu_panic(dev, "read descriptor error");
2179 return NULL;
2182 /* Now copy what we have collected and mapped */
2183 elem = virtqueue_alloc_element(sz, out_num, in_num);
2184 elem->index = idx;
2185 for (i = 0; i < out_num; i++) {
2186 elem->out_sg[i] = iov[i];
2188 for (i = 0; i < in_num; i++) {
2189 elem->in_sg[i] = iov[out_num + i];
2192 return elem;
2195 static int
2196 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2198 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2199 return 0;
2202 if (unlikely(!vq->inflight)) {
2203 return -1;
2206 vq->inflight->desc[desc_idx].counter = vq->counter++;
2207 vq->inflight->desc[desc_idx].inflight = 1;
2209 return 0;
2212 static int
2213 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2215 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2216 return 0;
2219 if (unlikely(!vq->inflight)) {
2220 return -1;
2223 vq->inflight->last_batch_head = desc_idx;
2225 return 0;
2228 static int
2229 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2231 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2232 return 0;
2235 if (unlikely(!vq->inflight)) {
2236 return -1;
2239 barrier();
2241 vq->inflight->desc[desc_idx].inflight = 0;
2243 barrier();
2245 vq->inflight->used_idx = vq->used_idx;
2247 return 0;
2250 void *
2251 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2253 int i;
2254 unsigned int head;
2255 VuVirtqElement *elem;
2257 if (unlikely(dev->broken) ||
2258 unlikely(!vq->vring.avail)) {
2259 return NULL;
2262 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2263 i = (--vq->resubmit_num);
2264 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2266 if (!vq->resubmit_num) {
2267 free(vq->resubmit_list);
2268 vq->resubmit_list = NULL;
2271 return elem;
2274 if (vu_queue_empty(dev, vq)) {
2275 return NULL;
2278 * Needed after virtio_queue_empty(), see comment in
2279 * virtqueue_num_heads().
2281 smp_rmb();
2283 if (vq->inuse >= vq->vring.num) {
2284 vu_panic(dev, "Virtqueue size exceeded");
2285 return NULL;
2288 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2289 return NULL;
2292 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2293 vring_set_avail_event(vq, vq->last_avail_idx);
2296 elem = vu_queue_map_desc(dev, vq, head, sz);
2298 if (!elem) {
2299 return NULL;
2302 vq->inuse++;
2304 vu_queue_inflight_get(dev, vq, head);
2306 return elem;
2309 static void
2310 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2311 size_t len)
2313 vq->inuse--;
2314 /* unmap, when DMA support is added */
2317 void
2318 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2319 size_t len)
2321 vq->last_avail_idx--;
2322 vu_queue_detach_element(dev, vq, elem, len);
2325 bool
2326 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2328 if (num > vq->inuse) {
2329 return false;
2331 vq->last_avail_idx -= num;
2332 vq->inuse -= num;
2333 return true;
2336 static inline
2337 void vring_used_write(VuDev *dev, VuVirtq *vq,
2338 struct vring_used_elem *uelem, int i)
2340 struct vring_used *used = vq->vring.used;
2342 used->ring[i] = *uelem;
2343 vu_log_write(dev, vq->vring.log_guest_addr +
2344 offsetof(struct vring_used, ring[i]),
2345 sizeof(used->ring[i]));
2349 static void
2350 vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
2351 const VuVirtqElement *elem,
2352 unsigned int len)
2354 struct vring_desc *desc = vq->vring.desc;
2355 unsigned int i, max, min, desc_len;
2356 uint64_t desc_addr, read_len;
2357 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2358 unsigned num_bufs = 0;
2360 max = vq->vring.num;
2361 i = elem->index;
2363 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2364 if (desc[i].len % sizeof(struct vring_desc)) {
2365 vu_panic(dev, "Invalid size for indirect buffer table");
2368 /* loop over the indirect descriptor table */
2369 desc_addr = desc[i].addr;
2370 desc_len = desc[i].len;
2371 max = desc_len / sizeof(struct vring_desc);
2372 read_len = desc_len;
2373 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2374 if (unlikely(desc && read_len != desc_len)) {
2375 /* Failed to use zero copy */
2376 desc = NULL;
2377 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2378 desc_addr,
2379 desc_len)) {
2380 desc = desc_buf;
2383 if (!desc) {
2384 vu_panic(dev, "Invalid indirect buffer table");
2385 return;
2387 i = 0;
2390 do {
2391 if (++num_bufs > max) {
2392 vu_panic(dev, "Looped descriptor");
2393 return;
2396 if (desc[i].flags & VRING_DESC_F_WRITE) {
2397 min = MIN(desc[i].len, len);
2398 vu_log_write(dev, desc[i].addr, min);
2399 len -= min;
2402 } while (len > 0 &&
2403 (virtqueue_read_next_desc(dev, desc, i, max, &i)
2404 == VIRTQUEUE_READ_DESC_MORE));
2407 void
2408 vu_queue_fill(VuDev *dev, VuVirtq *vq,
2409 const VuVirtqElement *elem,
2410 unsigned int len, unsigned int idx)
2412 struct vring_used_elem uelem;
2414 if (unlikely(dev->broken) ||
2415 unlikely(!vq->vring.avail)) {
2416 return;
2419 vu_log_queue_fill(dev, vq, elem, len);
2421 idx = (idx + vq->used_idx) % vq->vring.num;
2423 uelem.id = elem->index;
2424 uelem.len = len;
2425 vring_used_write(dev, vq, &uelem, idx);
2428 static inline
2429 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
2431 vq->vring.used->idx = val;
2432 vu_log_write(dev,
2433 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
2434 sizeof(vq->vring.used->idx));
2436 vq->used_idx = val;
2439 void
2440 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
2442 uint16_t old, new;
2444 if (unlikely(dev->broken) ||
2445 unlikely(!vq->vring.avail)) {
2446 return;
2449 /* Make sure buffer is written before we update index. */
2450 smp_wmb();
2452 old = vq->used_idx;
2453 new = old + count;
2454 vring_used_idx_set(dev, vq, new);
2455 vq->inuse -= count;
2456 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
2457 vq->signalled_used_valid = false;
2461 void
2462 vu_queue_push(VuDev *dev, VuVirtq *vq,
2463 const VuVirtqElement *elem, unsigned int len)
2465 vu_queue_fill(dev, vq, elem, len, 0);
2466 vu_queue_inflight_pre_put(dev, vq, elem->index);
2467 vu_queue_flush(dev, vq, 1);
2468 vu_queue_inflight_post_put(dev, vq, elem->index);