4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
16 /* this code avoids GLib dependency */
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
29 #include "qemu/compiler.h"
31 #if defined(__linux__)
32 #include <sys/syscall.h>
34 #include <sys/ioctl.h>
35 #include <linux/vhost.h>
37 #ifdef __NR_userfaultfd
38 #include <linux/userfaultfd.h>
43 #include "qemu/atomic.h"
44 #include "qemu/osdep.h"
45 #include "qemu/memfd.h"
47 #include "libvhost-user.h"
49 /* usually provided by GLib */
51 #define MIN(x, y) ({ \
52 typeof(x) _min1 = (x); \
53 typeof(y) _min2 = (y); \
54 (void) (&_min1 == &_min2); \
55 _min1 < _min2 ? _min1 : _min2; })
58 /* Round number down to multiple */
59 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
61 /* Round number up to multiple */
62 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
64 /* Align each region to cache line size in inflight buffer */
65 #define INFLIGHT_ALIGNMENT 64
67 /* The version of inflight buffer */
68 #define INFLIGHT_VERSION 1
70 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
72 /* The version of the protocol we support */
73 #define VHOST_USER_VERSION 1
74 #define LIBVHOST_USER_DEBUG 0
78 if (LIBVHOST_USER_DEBUG) { \
79 fprintf(stderr, __VA_ARGS__); \
84 bool has_feature(uint64_t features
, unsigned int fbit
)
87 return !!(features
& (1ULL << fbit
));
91 bool vu_has_feature(VuDev
*dev
,
94 return has_feature(dev
->features
, fbit
);
97 static inline bool vu_has_protocol_feature(VuDev
*dev
, unsigned int fbit
)
99 return has_feature(dev
->protocol_features
, fbit
);
103 vu_request_to_string(unsigned int req
)
105 #define REQ(req) [req] = #req
106 static const char *vu_request_str
[] = {
107 REQ(VHOST_USER_NONE
),
108 REQ(VHOST_USER_GET_FEATURES
),
109 REQ(VHOST_USER_SET_FEATURES
),
110 REQ(VHOST_USER_SET_OWNER
),
111 REQ(VHOST_USER_RESET_OWNER
),
112 REQ(VHOST_USER_SET_MEM_TABLE
),
113 REQ(VHOST_USER_SET_LOG_BASE
),
114 REQ(VHOST_USER_SET_LOG_FD
),
115 REQ(VHOST_USER_SET_VRING_NUM
),
116 REQ(VHOST_USER_SET_VRING_ADDR
),
117 REQ(VHOST_USER_SET_VRING_BASE
),
118 REQ(VHOST_USER_GET_VRING_BASE
),
119 REQ(VHOST_USER_SET_VRING_KICK
),
120 REQ(VHOST_USER_SET_VRING_CALL
),
121 REQ(VHOST_USER_SET_VRING_ERR
),
122 REQ(VHOST_USER_GET_PROTOCOL_FEATURES
),
123 REQ(VHOST_USER_SET_PROTOCOL_FEATURES
),
124 REQ(VHOST_USER_GET_QUEUE_NUM
),
125 REQ(VHOST_USER_SET_VRING_ENABLE
),
126 REQ(VHOST_USER_SEND_RARP
),
127 REQ(VHOST_USER_NET_SET_MTU
),
128 REQ(VHOST_USER_SET_SLAVE_REQ_FD
),
129 REQ(VHOST_USER_IOTLB_MSG
),
130 REQ(VHOST_USER_SET_VRING_ENDIAN
),
131 REQ(VHOST_USER_GET_CONFIG
),
132 REQ(VHOST_USER_SET_CONFIG
),
133 REQ(VHOST_USER_POSTCOPY_ADVISE
),
134 REQ(VHOST_USER_POSTCOPY_LISTEN
),
135 REQ(VHOST_USER_POSTCOPY_END
),
136 REQ(VHOST_USER_GET_INFLIGHT_FD
),
137 REQ(VHOST_USER_SET_INFLIGHT_FD
),
138 REQ(VHOST_USER_GPU_SET_SOCKET
),
143 if (req
< VHOST_USER_MAX
) {
144 return vu_request_str
[req
];
151 vu_panic(VuDev
*dev
, const char *msg
, ...)
157 if (vasprintf(&buf
, msg
, ap
) < 0) {
163 dev
->panic(dev
, buf
);
166 /* FIXME: find a way to call virtio_error? */
169 /* Translate guest physical address to our virtual address. */
171 vu_gpa_to_va(VuDev
*dev
, uint64_t *plen
, uint64_t guest_addr
)
179 /* Find matching memory region. */
180 for (i
= 0; i
< dev
->nregions
; i
++) {
181 VuDevRegion
*r
= &dev
->regions
[i
];
183 if ((guest_addr
>= r
->gpa
) && (guest_addr
< (r
->gpa
+ r
->size
))) {
184 if ((guest_addr
+ *plen
) > (r
->gpa
+ r
->size
)) {
185 *plen
= r
->gpa
+ r
->size
- guest_addr
;
187 return (void *)(uintptr_t)
188 guest_addr
- r
->gpa
+ r
->mmap_addr
+ r
->mmap_offset
;
195 /* Translate qemu virtual address to our virtual address. */
197 qva_to_va(VuDev
*dev
, uint64_t qemu_addr
)
201 /* Find matching memory region. */
202 for (i
= 0; i
< dev
->nregions
; i
++) {
203 VuDevRegion
*r
= &dev
->regions
[i
];
205 if ((qemu_addr
>= r
->qva
) && (qemu_addr
< (r
->qva
+ r
->size
))) {
206 return (void *)(uintptr_t)
207 qemu_addr
- r
->qva
+ r
->mmap_addr
+ r
->mmap_offset
;
215 vmsg_close_fds(VhostUserMsg
*vmsg
)
219 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
224 /* Set reply payload.u64 and clear request flags and fd_num */
225 static void vmsg_set_reply_u64(VhostUserMsg
*vmsg
, uint64_t val
)
227 vmsg
->flags
= 0; /* defaults will be set by vu_send_reply() */
228 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
229 vmsg
->payload
.u64
= val
;
233 /* A test to see if we have userfault available */
237 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
238 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
239 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
240 /* Now test the kernel we're running on really has the features */
241 int ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
242 struct uffdio_api api_struct
;
247 api_struct
.api
= UFFD_API
;
248 api_struct
.features
= UFFD_FEATURE_MISSING_SHMEM
|
249 UFFD_FEATURE_MISSING_HUGETLBFS
;
250 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
263 vu_message_read(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
265 char control
[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS
* sizeof(int))] = { };
267 .iov_base
= (char *)vmsg
,
268 .iov_len
= VHOST_USER_HDR_SIZE
,
270 struct msghdr msg
= {
273 .msg_control
= control
,
274 .msg_controllen
= sizeof(control
),
277 struct cmsghdr
*cmsg
;
281 rc
= recvmsg(conn_fd
, &msg
, 0);
282 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
285 vu_panic(dev
, "Error while recvmsg: %s", strerror(errno
));
290 for (cmsg
= CMSG_FIRSTHDR(&msg
);
292 cmsg
= CMSG_NXTHDR(&msg
, cmsg
))
294 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
295 fd_size
= cmsg
->cmsg_len
- CMSG_LEN(0);
296 vmsg
->fd_num
= fd_size
/ sizeof(int);
297 memcpy(vmsg
->fds
, CMSG_DATA(cmsg
), fd_size
);
302 if (vmsg
->size
> sizeof(vmsg
->payload
)) {
304 "Error: too big message request: %d, size: vmsg->size: %u, "
305 "while sizeof(vmsg->payload) = %zu\n",
306 vmsg
->request
, vmsg
->size
, sizeof(vmsg
->payload
));
312 rc
= read(conn_fd
, &vmsg
->payload
, vmsg
->size
);
313 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
316 vu_panic(dev
, "Error while reading: %s", strerror(errno
));
320 assert(rc
== vmsg
->size
);
326 vmsg_close_fds(vmsg
);
332 vu_message_write(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
335 uint8_t *p
= (uint8_t *)vmsg
;
336 char control
[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS
* sizeof(int))] = { };
338 .iov_base
= (char *)vmsg
,
339 .iov_len
= VHOST_USER_HDR_SIZE
,
341 struct msghdr msg
= {
344 .msg_control
= control
,
346 struct cmsghdr
*cmsg
;
348 memset(control
, 0, sizeof(control
));
349 assert(vmsg
->fd_num
<= VHOST_MEMORY_MAX_NREGIONS
);
350 if (vmsg
->fd_num
> 0) {
351 size_t fdsize
= vmsg
->fd_num
* sizeof(int);
352 msg
.msg_controllen
= CMSG_SPACE(fdsize
);
353 cmsg
= CMSG_FIRSTHDR(&msg
);
354 cmsg
->cmsg_len
= CMSG_LEN(fdsize
);
355 cmsg
->cmsg_level
= SOL_SOCKET
;
356 cmsg
->cmsg_type
= SCM_RIGHTS
;
357 memcpy(CMSG_DATA(cmsg
), vmsg
->fds
, fdsize
);
359 msg
.msg_controllen
= 0;
363 rc
= sendmsg(conn_fd
, &msg
, 0);
364 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
369 rc
= write(conn_fd
, vmsg
->data
, vmsg
->size
);
371 rc
= write(conn_fd
, p
+ VHOST_USER_HDR_SIZE
, vmsg
->size
);
373 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
377 vu_panic(dev
, "Error while writing: %s", strerror(errno
));
385 vu_send_reply(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
387 /* Set the version in the flags when sending the reply */
388 vmsg
->flags
&= ~VHOST_USER_VERSION_MASK
;
389 vmsg
->flags
|= VHOST_USER_VERSION
;
390 vmsg
->flags
|= VHOST_USER_REPLY_MASK
;
392 return vu_message_write(dev
, conn_fd
, vmsg
);
396 vu_process_message_reply(VuDev
*dev
, const VhostUserMsg
*vmsg
)
398 VhostUserMsg msg_reply
;
400 if ((vmsg
->flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
404 if (!vu_message_read(dev
, dev
->slave_fd
, &msg_reply
)) {
408 if (msg_reply
.request
!= vmsg
->request
) {
409 DPRINT("Received unexpected msg type. Expected %d received %d",
410 vmsg
->request
, msg_reply
.request
);
414 return msg_reply
.payload
.u64
== 0;
417 /* Kick the log_call_fd if required. */
419 vu_log_kick(VuDev
*dev
)
421 if (dev
->log_call_fd
!= -1) {
422 DPRINT("Kicking the QEMU's log...\n");
423 if (eventfd_write(dev
->log_call_fd
, 1) < 0) {
424 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
430 vu_log_page(uint8_t *log_table
, uint64_t page
)
432 DPRINT("Logged dirty guest page: %"PRId64
"\n", page
);
433 atomic_or(&log_table
[page
/ 8], 1 << (page
% 8));
437 vu_log_write(VuDev
*dev
, uint64_t address
, uint64_t length
)
441 if (!(dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) ||
442 !dev
->log_table
|| !length
) {
446 assert(dev
->log_size
> ((address
+ length
- 1) / VHOST_LOG_PAGE
/ 8));
448 page
= address
/ VHOST_LOG_PAGE
;
449 while (page
* VHOST_LOG_PAGE
< address
+ length
) {
450 vu_log_page(dev
->log_table
, page
);
458 vu_kick_cb(VuDev
*dev
, int condition
, void *data
)
460 int index
= (intptr_t)data
;
461 VuVirtq
*vq
= &dev
->vq
[index
];
462 int sock
= vq
->kick_fd
;
466 rc
= eventfd_read(sock
, &kick_data
);
468 vu_panic(dev
, "kick eventfd_read(): %s", strerror(errno
));
469 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
471 DPRINT("Got kick_data: %016"PRIx64
" handler:%p idx:%d\n",
472 kick_data
, vq
->handler
, index
);
474 vq
->handler(dev
, index
);
480 vu_get_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
483 1ULL << VHOST_F_LOG_ALL
|
484 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
486 if (dev
->iface
->get_features
) {
487 vmsg
->payload
.u64
|= dev
->iface
->get_features(dev
);
490 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
493 DPRINT("Sending back to guest u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
499 vu_set_enable_all_rings(VuDev
*dev
, bool enabled
)
503 for (i
= 0; i
< dev
->max_queues
; i
++) {
504 dev
->vq
[i
].enable
= enabled
;
509 vu_set_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
511 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
513 dev
->features
= vmsg
->payload
.u64
;
515 if (!(dev
->features
& VHOST_USER_F_PROTOCOL_FEATURES
)) {
516 vu_set_enable_all_rings(dev
, true);
519 if (dev
->iface
->set_features
) {
520 dev
->iface
->set_features(dev
, dev
->features
);
527 vu_set_owner_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
533 vu_close_log(VuDev
*dev
)
535 if (dev
->log_table
) {
536 if (munmap(dev
->log_table
, dev
->log_size
) != 0) {
537 perror("close log munmap() error");
540 dev
->log_table
= NULL
;
542 if (dev
->log_call_fd
!= -1) {
543 close(dev
->log_call_fd
);
544 dev
->log_call_fd
= -1;
549 vu_reset_device_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
551 vu_set_enable_all_rings(dev
, false);
557 vu_set_mem_table_exec_postcopy(VuDev
*dev
, VhostUserMsg
*vmsg
)
560 VhostUserMemory m
= vmsg
->payload
.memory
, *memory
= &m
;
561 dev
->nregions
= memory
->nregions
;
563 DPRINT("Nregions: %d\n", memory
->nregions
);
564 for (i
= 0; i
< dev
->nregions
; i
++) {
566 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
567 VuDevRegion
*dev_region
= &dev
->regions
[i
];
569 DPRINT("Region %d\n", i
);
570 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
571 msg_region
->guest_phys_addr
);
572 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
573 msg_region
->memory_size
);
574 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
575 msg_region
->userspace_addr
);
576 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
577 msg_region
->mmap_offset
);
579 dev_region
->gpa
= msg_region
->guest_phys_addr
;
580 dev_region
->size
= msg_region
->memory_size
;
581 dev_region
->qva
= msg_region
->userspace_addr
;
582 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
584 /* We don't use offset argument of mmap() since the
585 * mapped address has to be page aligned, and we use huge
587 * In postcopy we're using PROT_NONE here to catch anyone
588 * accessing it before we userfault
590 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
591 PROT_NONE
, MAP_SHARED
,
594 if (mmap_addr
== MAP_FAILED
) {
595 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
597 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
598 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
599 dev_region
->mmap_addr
);
602 /* Return the address to QEMU so that it can translate the ufd
603 * fault addresses back.
605 msg_region
->userspace_addr
= (uintptr_t)(mmap_addr
+
606 dev_region
->mmap_offset
);
610 /* Send the message back to qemu with the addresses filled in */
612 if (!vu_send_reply(dev
, dev
->sock
, vmsg
)) {
613 vu_panic(dev
, "failed to respond to set-mem-table for postcopy");
617 /* Wait for QEMU to confirm that it's registered the handler for the
620 if (!vu_message_read(dev
, dev
->sock
, vmsg
) ||
621 vmsg
->size
!= sizeof(vmsg
->payload
.u64
) ||
622 vmsg
->payload
.u64
!= 0) {
623 vu_panic(dev
, "failed to receive valid ack for postcopy set-mem-table");
627 /* OK, now we can go and register the memory and generate faults */
628 for (i
= 0; i
< dev
->nregions
; i
++) {
629 VuDevRegion
*dev_region
= &dev
->regions
[i
];
631 #ifdef UFFDIO_REGISTER
632 /* We should already have an open ufd. Mark each memory
634 * Discard any mapping we have here; note I can't use MADV_REMOVE
635 * or fallocate to make the hole since I don't want to lose
636 * data that's already arrived in the shared process.
637 * TODO: How to do hugepage
639 ret
= madvise((void *)(uintptr_t)dev_region
->mmap_addr
,
640 dev_region
->size
+ dev_region
->mmap_offset
,
644 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
645 __func__
, i
, strerror(errno
));
647 /* Turn off transparent hugepages so we dont get lose wakeups
648 * in neighbouring pages.
649 * TODO: Turn this backon later.
651 ret
= madvise((void *)(uintptr_t)dev_region
->mmap_addr
,
652 dev_region
->size
+ dev_region
->mmap_offset
,
655 /* Note: This can happen legally on kernels that are configured
656 * without madvise'able hugepages
659 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
660 __func__
, i
, strerror(errno
));
662 struct uffdio_register reg_struct
;
663 reg_struct
.range
.start
= (uintptr_t)dev_region
->mmap_addr
;
664 reg_struct
.range
.len
= dev_region
->size
+ dev_region
->mmap_offset
;
665 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
667 if (ioctl(dev
->postcopy_ufd
, UFFDIO_REGISTER
, ®_struct
)) {
668 vu_panic(dev
, "%s: Failed to userfault region %d "
669 "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
671 dev_region
->mmap_addr
,
672 dev_region
->size
, dev_region
->mmap_offset
,
673 dev
->postcopy_ufd
, strerror(errno
));
676 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
677 vu_panic(dev
, "%s Region (%d) doesn't support COPY",
681 DPRINT("%s: region %d: Registered userfault for %"
682 PRIx64
" + %" PRIx64
"\n", __func__
, i
,
683 (uint64_t)reg_struct
.range
.start
,
684 (uint64_t)reg_struct
.range
.len
);
685 /* Now it's registered we can let the client at it */
686 if (mprotect((void *)(uintptr_t)dev_region
->mmap_addr
,
687 dev_region
->size
+ dev_region
->mmap_offset
,
688 PROT_READ
| PROT_WRITE
)) {
689 vu_panic(dev
, "failed to mprotect region %d for postcopy (%s)",
693 /* TODO: Stash 'zero' support flags somewhere */
701 vu_set_mem_table_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
704 VhostUserMemory m
= vmsg
->payload
.memory
, *memory
= &m
;
706 for (i
= 0; i
< dev
->nregions
; i
++) {
707 VuDevRegion
*r
= &dev
->regions
[i
];
708 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
711 munmap(m
, r
->size
+ r
->mmap_offset
);
714 dev
->nregions
= memory
->nregions
;
716 if (dev
->postcopy_listening
) {
717 return vu_set_mem_table_exec_postcopy(dev
, vmsg
);
720 DPRINT("Nregions: %d\n", memory
->nregions
);
721 for (i
= 0; i
< dev
->nregions
; i
++) {
723 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
724 VuDevRegion
*dev_region
= &dev
->regions
[i
];
726 DPRINT("Region %d\n", i
);
727 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
728 msg_region
->guest_phys_addr
);
729 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
730 msg_region
->memory_size
);
731 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
732 msg_region
->userspace_addr
);
733 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
734 msg_region
->mmap_offset
);
736 dev_region
->gpa
= msg_region
->guest_phys_addr
;
737 dev_region
->size
= msg_region
->memory_size
;
738 dev_region
->qva
= msg_region
->userspace_addr
;
739 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
741 /* We don't use offset argument of mmap() since the
742 * mapped address has to be page aligned, and we use huge
744 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
745 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
748 if (mmap_addr
== MAP_FAILED
) {
749 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
751 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
752 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
753 dev_region
->mmap_addr
);
763 vu_set_log_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
766 uint64_t log_mmap_size
, log_mmap_offset
;
769 if (vmsg
->fd_num
!= 1 ||
770 vmsg
->size
!= sizeof(vmsg
->payload
.log
)) {
771 vu_panic(dev
, "Invalid log_base message");
776 log_mmap_offset
= vmsg
->payload
.log
.mmap_offset
;
777 log_mmap_size
= vmsg
->payload
.log
.mmap_size
;
778 DPRINT("Log mmap_offset: %"PRId64
"\n", log_mmap_offset
);
779 DPRINT("Log mmap_size: %"PRId64
"\n", log_mmap_size
);
781 rc
= mmap(0, log_mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
,
784 if (rc
== MAP_FAILED
) {
785 perror("log mmap error");
788 if (dev
->log_table
) {
789 munmap(dev
->log_table
, dev
->log_size
);
792 dev
->log_size
= log_mmap_size
;
794 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
801 vu_set_log_fd_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
803 if (vmsg
->fd_num
!= 1) {
804 vu_panic(dev
, "Invalid log_fd message");
808 if (dev
->log_call_fd
!= -1) {
809 close(dev
->log_call_fd
);
811 dev
->log_call_fd
= vmsg
->fds
[0];
812 DPRINT("Got log_call_fd: %d\n", vmsg
->fds
[0]);
818 vu_set_vring_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
820 unsigned int index
= vmsg
->payload
.state
.index
;
821 unsigned int num
= vmsg
->payload
.state
.num
;
823 DPRINT("State.index: %d\n", index
);
824 DPRINT("State.num: %d\n", num
);
825 dev
->vq
[index
].vring
.num
= num
;
831 vu_set_vring_addr_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
833 struct vhost_vring_addr addr
= vmsg
->payload
.addr
, *vra
= &addr
;
834 unsigned int index
= vra
->index
;
835 VuVirtq
*vq
= &dev
->vq
[index
];
837 DPRINT("vhost_vring_addr:\n");
838 DPRINT(" index: %d\n", vra
->index
);
839 DPRINT(" flags: %d\n", vra
->flags
);
840 DPRINT(" desc_user_addr: 0x%016" PRIx64
"\n", vra
->desc_user_addr
);
841 DPRINT(" used_user_addr: 0x%016" PRIx64
"\n", vra
->used_user_addr
);
842 DPRINT(" avail_user_addr: 0x%016" PRIx64
"\n", vra
->avail_user_addr
);
843 DPRINT(" log_guest_addr: 0x%016" PRIx64
"\n", vra
->log_guest_addr
);
845 vq
->vring
.flags
= vra
->flags
;
846 vq
->vring
.desc
= qva_to_va(dev
, vra
->desc_user_addr
);
847 vq
->vring
.used
= qva_to_va(dev
, vra
->used_user_addr
);
848 vq
->vring
.avail
= qva_to_va(dev
, vra
->avail_user_addr
);
849 vq
->vring
.log_guest_addr
= vra
->log_guest_addr
;
851 DPRINT("Setting virtq addresses:\n");
852 DPRINT(" vring_desc at %p\n", vq
->vring
.desc
);
853 DPRINT(" vring_used at %p\n", vq
->vring
.used
);
854 DPRINT(" vring_avail at %p\n", vq
->vring
.avail
);
856 if (!(vq
->vring
.desc
&& vq
->vring
.used
&& vq
->vring
.avail
)) {
857 vu_panic(dev
, "Invalid vring_addr message");
861 vq
->used_idx
= vq
->vring
.used
->idx
;
863 if (vq
->last_avail_idx
!= vq
->used_idx
) {
864 bool resume
= dev
->iface
->queue_is_processed_in_order
&&
865 dev
->iface
->queue_is_processed_in_order(dev
, index
);
867 DPRINT("Last avail index != used index: %u != %u%s\n",
868 vq
->last_avail_idx
, vq
->used_idx
,
869 resume
? ", resuming" : "");
872 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->used_idx
;
880 vu_set_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
882 unsigned int index
= vmsg
->payload
.state
.index
;
883 unsigned int num
= vmsg
->payload
.state
.num
;
885 DPRINT("State.index: %d\n", index
);
886 DPRINT("State.num: %d\n", num
);
887 dev
->vq
[index
].shadow_avail_idx
= dev
->vq
[index
].last_avail_idx
= num
;
893 vu_get_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
895 unsigned int index
= vmsg
->payload
.state
.index
;
897 DPRINT("State.index: %d\n", index
);
898 vmsg
->payload
.state
.num
= dev
->vq
[index
].last_avail_idx
;
899 vmsg
->size
= sizeof(vmsg
->payload
.state
);
901 dev
->vq
[index
].started
= false;
902 if (dev
->iface
->queue_set_started
) {
903 dev
->iface
->queue_set_started(dev
, index
, false);
906 if (dev
->vq
[index
].call_fd
!= -1) {
907 close(dev
->vq
[index
].call_fd
);
908 dev
->vq
[index
].call_fd
= -1;
910 if (dev
->vq
[index
].kick_fd
!= -1) {
911 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
912 close(dev
->vq
[index
].kick_fd
);
913 dev
->vq
[index
].kick_fd
= -1;
920 vu_check_queue_msg_file(VuDev
*dev
, VhostUserMsg
*vmsg
)
922 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
924 if (index
>= dev
->max_queues
) {
925 vmsg_close_fds(vmsg
);
926 vu_panic(dev
, "Invalid queue index: %u", index
);
930 if (vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
||
932 vmsg_close_fds(vmsg
);
933 vu_panic(dev
, "Invalid fds in request: %d", vmsg
->request
);
941 inflight_desc_compare(const void *a
, const void *b
)
943 VuVirtqInflightDesc
*desc0
= (VuVirtqInflightDesc
*)a
,
944 *desc1
= (VuVirtqInflightDesc
*)b
;
946 if (desc1
->counter
> desc0
->counter
&&
947 (desc1
->counter
- desc0
->counter
) < VIRTQUEUE_MAX_SIZE
* 2) {
955 vu_check_queue_inflights(VuDev
*dev
, VuVirtq
*vq
)
959 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
963 if (unlikely(!vq
->inflight
)) {
967 if (unlikely(!vq
->inflight
->version
)) {
968 /* initialize the buffer */
969 vq
->inflight
->version
= INFLIGHT_VERSION
;
973 vq
->used_idx
= vq
->vring
.used
->idx
;
974 vq
->resubmit_num
= 0;
975 vq
->resubmit_list
= NULL
;
978 if (unlikely(vq
->inflight
->used_idx
!= vq
->used_idx
)) {
979 vq
->inflight
->desc
[vq
->inflight
->last_batch_head
].inflight
= 0;
983 vq
->inflight
->used_idx
= vq
->used_idx
;
986 for (i
= 0; i
< vq
->inflight
->desc_num
; i
++) {
987 if (vq
->inflight
->desc
[i
].inflight
== 1) {
992 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->inuse
+ vq
->used_idx
;
995 vq
->resubmit_list
= malloc(sizeof(VuVirtqInflightDesc
) * vq
->inuse
);
996 if (!vq
->resubmit_list
) {
1000 for (i
= 0; i
< vq
->inflight
->desc_num
; i
++) {
1001 if (vq
->inflight
->desc
[i
].inflight
) {
1002 vq
->resubmit_list
[vq
->resubmit_num
].index
= i
;
1003 vq
->resubmit_list
[vq
->resubmit_num
].counter
=
1004 vq
->inflight
->desc
[i
].counter
;
1009 if (vq
->resubmit_num
> 1) {
1010 qsort(vq
->resubmit_list
, vq
->resubmit_num
,
1011 sizeof(VuVirtqInflightDesc
), inflight_desc_compare
);
1013 vq
->counter
= vq
->resubmit_list
[0].counter
+ 1;
1016 /* in case of I/O hang after reconnecting */
1017 if (eventfd_write(vq
->kick_fd
, 1)) {
1025 vu_set_vring_kick_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1027 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1029 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1031 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1035 if (dev
->vq
[index
].kick_fd
!= -1) {
1036 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
1037 close(dev
->vq
[index
].kick_fd
);
1038 dev
->vq
[index
].kick_fd
= -1;
1041 dev
->vq
[index
].kick_fd
= vmsg
->fds
[0];
1042 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg
->fds
[0], index
);
1044 dev
->vq
[index
].started
= true;
1045 if (dev
->iface
->queue_set_started
) {
1046 dev
->iface
->queue_set_started(dev
, index
, true);
1049 if (dev
->vq
[index
].kick_fd
!= -1 && dev
->vq
[index
].handler
) {
1050 dev
->set_watch(dev
, dev
->vq
[index
].kick_fd
, VU_WATCH_IN
,
1051 vu_kick_cb
, (void *)(long)index
);
1053 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1054 dev
->vq
[index
].kick_fd
, index
);
1057 if (vu_check_queue_inflights(dev
, &dev
->vq
[index
])) {
1058 vu_panic(dev
, "Failed to check inflights for vq: %d\n", index
);
1064 void vu_set_queue_handler(VuDev
*dev
, VuVirtq
*vq
,
1065 vu_queue_handler_cb handler
)
1067 int qidx
= vq
- dev
->vq
;
1069 vq
->handler
= handler
;
1070 if (vq
->kick_fd
>= 0) {
1072 dev
->set_watch(dev
, vq
->kick_fd
, VU_WATCH_IN
,
1073 vu_kick_cb
, (void *)(long)qidx
);
1075 dev
->remove_watch(dev
, vq
->kick_fd
);
1080 bool vu_set_queue_host_notifier(VuDev
*dev
, VuVirtq
*vq
, int fd
,
1081 int size
, int offset
)
1083 int qidx
= vq
- dev
->vq
;
1085 VhostUserMsg vmsg
= {
1086 .request
= VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
,
1087 .flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1088 .size
= sizeof(vmsg
.payload
.area
),
1090 .u64
= qidx
& VHOST_USER_VRING_IDX_MASK
,
1097 vmsg
.payload
.area
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1099 vmsg
.fds
[fd_num
++] = fd
;
1102 vmsg
.fd_num
= fd_num
;
1104 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
)) {
1108 if (!vu_message_write(dev
, dev
->slave_fd
, &vmsg
)) {
1112 return vu_process_message_reply(dev
, &vmsg
);
1116 vu_set_vring_call_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1118 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1120 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1122 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1126 if (dev
->vq
[index
].call_fd
!= -1) {
1127 close(dev
->vq
[index
].call_fd
);
1128 dev
->vq
[index
].call_fd
= -1;
1131 dev
->vq
[index
].call_fd
= vmsg
->fds
[0];
1133 /* in case of I/O hang after reconnecting */
1134 if (eventfd_write(vmsg
->fds
[0], 1)) {
1138 DPRINT("Got call_fd: %d for vq: %d\n", vmsg
->fds
[0], index
);
1144 vu_set_vring_err_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1146 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1148 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1150 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1154 if (dev
->vq
[index
].err_fd
!= -1) {
1155 close(dev
->vq
[index
].err_fd
);
1156 dev
->vq
[index
].err_fd
= -1;
1159 dev
->vq
[index
].err_fd
= vmsg
->fds
[0];
1165 vu_get_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1167 uint64_t features
= 1ULL << VHOST_USER_PROTOCOL_F_MQ
|
1168 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD
|
1169 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ
|
1170 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
|
1171 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
;
1173 if (have_userfault()) {
1174 features
|= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT
;
1177 if (dev
->iface
->get_config
&& dev
->iface
->set_config
) {
1178 features
|= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG
;
1181 if (dev
->iface
->get_protocol_features
) {
1182 features
|= dev
->iface
->get_protocol_features(dev
);
1185 vmsg_set_reply_u64(vmsg
, features
);
1190 vu_set_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1192 uint64_t features
= vmsg
->payload
.u64
;
1194 DPRINT("u64: 0x%016"PRIx64
"\n", features
);
1196 dev
->protocol_features
= vmsg
->payload
.u64
;
1198 if (dev
->iface
->set_protocol_features
) {
1199 dev
->iface
->set_protocol_features(dev
, features
);
1206 vu_get_queue_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1208 vmsg_set_reply_u64(vmsg
, dev
->max_queues
);
1213 vu_set_vring_enable_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1215 unsigned int index
= vmsg
->payload
.state
.index
;
1216 unsigned int enable
= vmsg
->payload
.state
.num
;
1218 DPRINT("State.index: %d\n", index
);
1219 DPRINT("State.enable: %d\n", enable
);
1221 if (index
>= dev
->max_queues
) {
1222 vu_panic(dev
, "Invalid vring_enable index: %u", index
);
1226 dev
->vq
[index
].enable
= enable
;
1231 vu_set_slave_req_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1233 if (vmsg
->fd_num
!= 1) {
1234 vu_panic(dev
, "Invalid slave_req_fd message (%d fd's)", vmsg
->fd_num
);
1238 if (dev
->slave_fd
!= -1) {
1239 close(dev
->slave_fd
);
1241 dev
->slave_fd
= vmsg
->fds
[0];
1242 DPRINT("Got slave_fd: %d\n", vmsg
->fds
[0]);
1248 vu_get_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1252 if (dev
->iface
->get_config
) {
1253 ret
= dev
->iface
->get_config(dev
, vmsg
->payload
.config
.region
,
1254 vmsg
->payload
.config
.size
);
1258 /* resize to zero to indicate an error to master */
1266 vu_set_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1270 if (dev
->iface
->set_config
) {
1271 ret
= dev
->iface
->set_config(dev
, vmsg
->payload
.config
.region
,
1272 vmsg
->payload
.config
.offset
,
1273 vmsg
->payload
.config
.size
,
1274 vmsg
->payload
.config
.flags
);
1276 vu_panic(dev
, "Set virtio configuration space failed");
1284 vu_set_postcopy_advise(VuDev
*dev
, VhostUserMsg
*vmsg
)
1286 dev
->postcopy_ufd
= -1;
1288 struct uffdio_api api_struct
;
1290 dev
->postcopy_ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
1294 if (dev
->postcopy_ufd
== -1) {
1295 vu_panic(dev
, "Userfaultfd not available: %s", strerror(errno
));
1300 api_struct
.api
= UFFD_API
;
1301 api_struct
.features
= 0;
1302 if (ioctl(dev
->postcopy_ufd
, UFFDIO_API
, &api_struct
)) {
1303 vu_panic(dev
, "Failed UFFDIO_API: %s", strerror(errno
));
1304 close(dev
->postcopy_ufd
);
1305 dev
->postcopy_ufd
= -1;
1308 /* TODO: Stash feature flags somewhere */
1312 /* Return a ufd to the QEMU */
1314 vmsg
->fds
[0] = dev
->postcopy_ufd
;
1315 return true; /* = send a reply */
1319 vu_set_postcopy_listen(VuDev
*dev
, VhostUserMsg
*vmsg
)
1321 if (dev
->nregions
) {
1322 vu_panic(dev
, "Regions already registered at postcopy-listen");
1323 vmsg_set_reply_u64(vmsg
, -1);
1326 dev
->postcopy_listening
= true;
1328 vmsg_set_reply_u64(vmsg
, 0);
1333 vu_set_postcopy_end(VuDev
*dev
, VhostUserMsg
*vmsg
)
1335 DPRINT("%s: Entry\n", __func__
);
1336 dev
->postcopy_listening
= false;
1337 if (dev
->postcopy_ufd
> 0) {
1338 close(dev
->postcopy_ufd
);
1339 dev
->postcopy_ufd
= -1;
1340 DPRINT("%s: Done close\n", __func__
);
1343 vmsg_set_reply_u64(vmsg
, 0);
1344 DPRINT("%s: exit\n", __func__
);
1348 static inline uint64_t
1349 vu_inflight_queue_size(uint16_t queue_size
)
1351 return ALIGN_UP(sizeof(VuDescStateSplit
) * queue_size
+
1352 sizeof(uint16_t), INFLIGHT_ALIGNMENT
);
1356 vu_get_inflight_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1361 uint16_t num_queues
, queue_size
;
1363 if (vmsg
->size
!= sizeof(vmsg
->payload
.inflight
)) {
1364 vu_panic(dev
, "Invalid get_inflight_fd message:%d", vmsg
->size
);
1365 vmsg
->payload
.inflight
.mmap_size
= 0;
1369 num_queues
= vmsg
->payload
.inflight
.num_queues
;
1370 queue_size
= vmsg
->payload
.inflight
.queue_size
;
1372 DPRINT("set_inflight_fd num_queues: %"PRId16
"\n", num_queues
);
1373 DPRINT("set_inflight_fd queue_size: %"PRId16
"\n", queue_size
);
1375 mmap_size
= vu_inflight_queue_size(queue_size
) * num_queues
;
1377 addr
= qemu_memfd_alloc("vhost-inflight", mmap_size
,
1378 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
1382 vu_panic(dev
, "Failed to alloc vhost inflight area");
1383 vmsg
->payload
.inflight
.mmap_size
= 0;
1387 memset(addr
, 0, mmap_size
);
1389 dev
->inflight_info
.addr
= addr
;
1390 dev
->inflight_info
.size
= vmsg
->payload
.inflight
.mmap_size
= mmap_size
;
1391 dev
->inflight_info
.fd
= vmsg
->fds
[0] = fd
;
1393 vmsg
->payload
.inflight
.mmap_offset
= 0;
1395 DPRINT("send inflight mmap_size: %"PRId64
"\n",
1396 vmsg
->payload
.inflight
.mmap_size
);
1397 DPRINT("send inflight mmap offset: %"PRId64
"\n",
1398 vmsg
->payload
.inflight
.mmap_offset
);
1404 vu_set_inflight_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1407 uint64_t mmap_size
, mmap_offset
;
1408 uint16_t num_queues
, queue_size
;
1411 if (vmsg
->fd_num
!= 1 ||
1412 vmsg
->size
!= sizeof(vmsg
->payload
.inflight
)) {
1413 vu_panic(dev
, "Invalid set_inflight_fd message size:%d fds:%d",
1414 vmsg
->size
, vmsg
->fd_num
);
1419 mmap_size
= vmsg
->payload
.inflight
.mmap_size
;
1420 mmap_offset
= vmsg
->payload
.inflight
.mmap_offset
;
1421 num_queues
= vmsg
->payload
.inflight
.num_queues
;
1422 queue_size
= vmsg
->payload
.inflight
.queue_size
;
1424 DPRINT("set_inflight_fd mmap_size: %"PRId64
"\n", mmap_size
);
1425 DPRINT("set_inflight_fd mmap_offset: %"PRId64
"\n", mmap_offset
);
1426 DPRINT("set_inflight_fd num_queues: %"PRId16
"\n", num_queues
);
1427 DPRINT("set_inflight_fd queue_size: %"PRId16
"\n", queue_size
);
1429 rc
= mmap(0, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1432 if (rc
== MAP_FAILED
) {
1433 vu_panic(dev
, "set_inflight_fd mmap error: %s", strerror(errno
));
1437 if (dev
->inflight_info
.fd
) {
1438 close(dev
->inflight_info
.fd
);
1441 if (dev
->inflight_info
.addr
) {
1442 munmap(dev
->inflight_info
.addr
, dev
->inflight_info
.size
);
1445 dev
->inflight_info
.fd
= fd
;
1446 dev
->inflight_info
.addr
= rc
;
1447 dev
->inflight_info
.size
= mmap_size
;
1449 for (i
= 0; i
< num_queues
; i
++) {
1450 dev
->vq
[i
].inflight
= (VuVirtqInflight
*)rc
;
1451 dev
->vq
[i
].inflight
->desc_num
= queue_size
;
1452 rc
= (void *)((char *)rc
+ vu_inflight_queue_size(queue_size
));
1459 vu_process_message(VuDev
*dev
, VhostUserMsg
*vmsg
)
1463 /* Print out generic part of the request. */
1464 DPRINT("================ Vhost user message ================\n");
1465 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg
->request
),
1467 DPRINT("Flags: 0x%x\n", vmsg
->flags
);
1468 DPRINT("Size: %d\n", vmsg
->size
);
1473 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
1474 DPRINT(" %d", vmsg
->fds
[i
]);
1479 if (dev
->iface
->process_msg
&&
1480 dev
->iface
->process_msg(dev
, vmsg
, &do_reply
)) {
1484 switch (vmsg
->request
) {
1485 case VHOST_USER_GET_FEATURES
:
1486 return vu_get_features_exec(dev
, vmsg
);
1487 case VHOST_USER_SET_FEATURES
:
1488 return vu_set_features_exec(dev
, vmsg
);
1489 case VHOST_USER_GET_PROTOCOL_FEATURES
:
1490 return vu_get_protocol_features_exec(dev
, vmsg
);
1491 case VHOST_USER_SET_PROTOCOL_FEATURES
:
1492 return vu_set_protocol_features_exec(dev
, vmsg
);
1493 case VHOST_USER_SET_OWNER
:
1494 return vu_set_owner_exec(dev
, vmsg
);
1495 case VHOST_USER_RESET_OWNER
:
1496 return vu_reset_device_exec(dev
, vmsg
);
1497 case VHOST_USER_SET_MEM_TABLE
:
1498 return vu_set_mem_table_exec(dev
, vmsg
);
1499 case VHOST_USER_SET_LOG_BASE
:
1500 return vu_set_log_base_exec(dev
, vmsg
);
1501 case VHOST_USER_SET_LOG_FD
:
1502 return vu_set_log_fd_exec(dev
, vmsg
);
1503 case VHOST_USER_SET_VRING_NUM
:
1504 return vu_set_vring_num_exec(dev
, vmsg
);
1505 case VHOST_USER_SET_VRING_ADDR
:
1506 return vu_set_vring_addr_exec(dev
, vmsg
);
1507 case VHOST_USER_SET_VRING_BASE
:
1508 return vu_set_vring_base_exec(dev
, vmsg
);
1509 case VHOST_USER_GET_VRING_BASE
:
1510 return vu_get_vring_base_exec(dev
, vmsg
);
1511 case VHOST_USER_SET_VRING_KICK
:
1512 return vu_set_vring_kick_exec(dev
, vmsg
);
1513 case VHOST_USER_SET_VRING_CALL
:
1514 return vu_set_vring_call_exec(dev
, vmsg
);
1515 case VHOST_USER_SET_VRING_ERR
:
1516 return vu_set_vring_err_exec(dev
, vmsg
);
1517 case VHOST_USER_GET_QUEUE_NUM
:
1518 return vu_get_queue_num_exec(dev
, vmsg
);
1519 case VHOST_USER_SET_VRING_ENABLE
:
1520 return vu_set_vring_enable_exec(dev
, vmsg
);
1521 case VHOST_USER_SET_SLAVE_REQ_FD
:
1522 return vu_set_slave_req_fd(dev
, vmsg
);
1523 case VHOST_USER_GET_CONFIG
:
1524 return vu_get_config(dev
, vmsg
);
1525 case VHOST_USER_SET_CONFIG
:
1526 return vu_set_config(dev
, vmsg
);
1527 case VHOST_USER_NONE
:
1528 /* if you need processing before exit, override iface->process_msg */
1530 case VHOST_USER_POSTCOPY_ADVISE
:
1531 return vu_set_postcopy_advise(dev
, vmsg
);
1532 case VHOST_USER_POSTCOPY_LISTEN
:
1533 return vu_set_postcopy_listen(dev
, vmsg
);
1534 case VHOST_USER_POSTCOPY_END
:
1535 return vu_set_postcopy_end(dev
, vmsg
);
1536 case VHOST_USER_GET_INFLIGHT_FD
:
1537 return vu_get_inflight_fd(dev
, vmsg
);
1538 case VHOST_USER_SET_INFLIGHT_FD
:
1539 return vu_set_inflight_fd(dev
, vmsg
);
1541 vmsg_close_fds(vmsg
);
1542 vu_panic(dev
, "Unhandled request: %d", vmsg
->request
);
1549 vu_dispatch(VuDev
*dev
)
1551 VhostUserMsg vmsg
= { 0, };
1552 int reply_requested
;
1553 bool success
= false;
1555 if (!vu_message_read(dev
, dev
->sock
, &vmsg
)) {
1559 reply_requested
= vu_process_message(dev
, &vmsg
);
1560 if (!reply_requested
) {
1565 if (!vu_send_reply(dev
, dev
->sock
, &vmsg
)) {
1577 vu_deinit(VuDev
*dev
)
1581 for (i
= 0; i
< dev
->nregions
; i
++) {
1582 VuDevRegion
*r
= &dev
->regions
[i
];
1583 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
1584 if (m
!= MAP_FAILED
) {
1585 munmap(m
, r
->size
+ r
->mmap_offset
);
1590 for (i
= 0; i
< dev
->max_queues
; i
++) {
1591 VuVirtq
*vq
= &dev
->vq
[i
];
1593 if (vq
->call_fd
!= -1) {
1598 if (vq
->kick_fd
!= -1) {
1603 if (vq
->err_fd
!= -1) {
1608 if (vq
->resubmit_list
) {
1609 free(vq
->resubmit_list
);
1610 vq
->resubmit_list
= NULL
;
1613 vq
->inflight
= NULL
;
1616 if (dev
->inflight_info
.addr
) {
1617 munmap(dev
->inflight_info
.addr
, dev
->inflight_info
.size
);
1618 dev
->inflight_info
.addr
= NULL
;
1621 if (dev
->inflight_info
.fd
> 0) {
1622 close(dev
->inflight_info
.fd
);
1623 dev
->inflight_info
.fd
= -1;
1627 if (dev
->slave_fd
!= -1) {
1628 close(dev
->slave_fd
);
1632 if (dev
->sock
!= -1) {
1642 uint16_t max_queues
,
1645 vu_set_watch_cb set_watch
,
1646 vu_remove_watch_cb remove_watch
,
1647 const VuDevIface
*iface
)
1651 assert(max_queues
> 0);
1652 assert(socket
>= 0);
1654 assert(remove_watch
);
1658 memset(dev
, 0, sizeof(*dev
));
1662 dev
->set_watch
= set_watch
;
1663 dev
->remove_watch
= remove_watch
;
1665 dev
->log_call_fd
= -1;
1667 dev
->max_queues
= max_queues
;
1669 dev
->vq
= malloc(max_queues
* sizeof(dev
->vq
[0]));
1671 DPRINT("%s: failed to malloc virtqueues\n", __func__
);
1675 for (i
= 0; i
< max_queues
; i
++) {
1676 dev
->vq
[i
] = (VuVirtq
) {
1677 .call_fd
= -1, .kick_fd
= -1, .err_fd
= -1,
1678 .notification
= true,
1686 vu_get_queue(VuDev
*dev
, int qidx
)
1688 assert(qidx
< dev
->max_queues
);
1689 return &dev
->vq
[qidx
];
1693 vu_queue_enabled(VuDev
*dev
, VuVirtq
*vq
)
1699 vu_queue_started(const VuDev
*dev
, const VuVirtq
*vq
)
1704 static inline uint16_t
1705 vring_avail_flags(VuVirtq
*vq
)
1707 return vq
->vring
.avail
->flags
;
1710 static inline uint16_t
1711 vring_avail_idx(VuVirtq
*vq
)
1713 vq
->shadow_avail_idx
= vq
->vring
.avail
->idx
;
1715 return vq
->shadow_avail_idx
;
1718 static inline uint16_t
1719 vring_avail_ring(VuVirtq
*vq
, int i
)
1721 return vq
->vring
.avail
->ring
[i
];
1724 static inline uint16_t
1725 vring_get_used_event(VuVirtq
*vq
)
1727 return vring_avail_ring(vq
, vq
->vring
.num
);
1731 virtqueue_num_heads(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
)
1733 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
1735 /* Check it isn't doing very strange things with descriptor numbers. */
1736 if (num_heads
> vq
->vring
.num
) {
1737 vu_panic(dev
, "Guest moved used index from %u to %u",
1738 idx
, vq
->shadow_avail_idx
);
1742 /* On success, callers read a descriptor at vq->last_avail_idx.
1743 * Make sure descriptor read does not bypass avail index read. */
1751 virtqueue_get_head(VuDev
*dev
, VuVirtq
*vq
,
1752 unsigned int idx
, unsigned int *head
)
1754 /* Grab the next descriptor number they're advertising, and increment
1755 * the index we've seen. */
1756 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
1758 /* If their number is silly, that's a fatal mistake. */
1759 if (*head
>= vq
->vring
.num
) {
1760 vu_panic(dev
, "Guest says index %u is available", head
);
1768 virtqueue_read_indirect_desc(VuDev
*dev
, struct vring_desc
*desc
,
1769 uint64_t addr
, size_t len
)
1771 struct vring_desc
*ori_desc
;
1774 if (len
> (VIRTQUEUE_MAX_SIZE
* sizeof(struct vring_desc
))) {
1784 ori_desc
= vu_gpa_to_va(dev
, &read_len
, addr
);
1789 memcpy(desc
, ori_desc
, read_len
);
1799 VIRTQUEUE_READ_DESC_ERROR
= -1,
1800 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
1801 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
1805 virtqueue_read_next_desc(VuDev
*dev
, struct vring_desc
*desc
,
1806 int i
, unsigned int max
, unsigned int *next
)
1808 /* If this descriptor says it doesn't chain, we're done. */
1809 if (!(desc
[i
].flags
& VRING_DESC_F_NEXT
)) {
1810 return VIRTQUEUE_READ_DESC_DONE
;
1813 /* Check they're not leading us off end of descriptors. */
1814 *next
= desc
[i
].next
;
1815 /* Make sure compiler knows to grab that: we don't want it changing! */
1819 vu_panic(dev
, "Desc next is %u", next
);
1820 return VIRTQUEUE_READ_DESC_ERROR
;
1823 return VIRTQUEUE_READ_DESC_MORE
;
1827 vu_queue_get_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int *in_bytes
,
1828 unsigned int *out_bytes
,
1829 unsigned max_in_bytes
, unsigned max_out_bytes
)
1832 unsigned int total_bufs
, in_total
, out_total
;
1835 idx
= vq
->last_avail_idx
;
1837 total_bufs
= in_total
= out_total
= 0;
1838 if (unlikely(dev
->broken
) ||
1839 unlikely(!vq
->vring
.avail
)) {
1843 while ((rc
= virtqueue_num_heads(dev
, vq
, idx
)) > 0) {
1844 unsigned int max
, desc_len
, num_bufs
, indirect
= 0;
1845 uint64_t desc_addr
, read_len
;
1846 struct vring_desc
*desc
;
1847 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
1850 max
= vq
->vring
.num
;
1851 num_bufs
= total_bufs
;
1852 if (!virtqueue_get_head(dev
, vq
, idx
++, &i
)) {
1855 desc
= vq
->vring
.desc
;
1857 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
1858 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
1859 vu_panic(dev
, "Invalid size for indirect buffer table");
1863 /* If we've got too many, that implies a descriptor loop. */
1864 if (num_bufs
>= max
) {
1865 vu_panic(dev
, "Looped descriptor");
1869 /* loop over the indirect descriptor table */
1871 desc_addr
= desc
[i
].addr
;
1872 desc_len
= desc
[i
].len
;
1873 max
= desc_len
/ sizeof(struct vring_desc
);
1874 read_len
= desc_len
;
1875 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
1876 if (unlikely(desc
&& read_len
!= desc_len
)) {
1877 /* Failed to use zero copy */
1879 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
1886 vu_panic(dev
, "Invalid indirect buffer table");
1893 /* If we've got too many, that implies a descriptor loop. */
1894 if (++num_bufs
> max
) {
1895 vu_panic(dev
, "Looped descriptor");
1899 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
1900 in_total
+= desc
[i
].len
;
1902 out_total
+= desc
[i
].len
;
1904 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1907 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
1908 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1910 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1915 total_bufs
= num_bufs
;
1925 *in_bytes
= in_total
;
1928 *out_bytes
= out_total
;
1933 in_total
= out_total
= 0;
1938 vu_queue_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int in_bytes
,
1939 unsigned int out_bytes
)
1941 unsigned int in_total
, out_total
;
1943 vu_queue_get_avail_bytes(dev
, vq
, &in_total
, &out_total
,
1944 in_bytes
, out_bytes
);
1946 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1949 /* Fetch avail_idx from VQ memory only when we really need to know if
1950 * guest has added some buffers. */
1952 vu_queue_empty(VuDev
*dev
, VuVirtq
*vq
)
1954 if (unlikely(dev
->broken
) ||
1955 unlikely(!vq
->vring
.avail
)) {
1959 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
1963 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
1967 vring_notify(VuDev
*dev
, VuVirtq
*vq
)
1972 /* We need to expose used array entries before checking used event. */
1975 /* Always notify when queue is empty (when feature acknowledge) */
1976 if (vu_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1977 !vq
->inuse
&& vu_queue_empty(dev
, vq
)) {
1981 if (!vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1982 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
1985 v
= vq
->signalled_used_valid
;
1986 vq
->signalled_used_valid
= true;
1987 old
= vq
->signalled_used
;
1988 new = vq
->signalled_used
= vq
->used_idx
;
1989 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
1993 vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
)
1995 if (unlikely(dev
->broken
) ||
1996 unlikely(!vq
->vring
.avail
)) {
2000 if (!vring_notify(dev
, vq
)) {
2001 DPRINT("skipped notify...\n");
2005 if (eventfd_write(vq
->call_fd
, 1) < 0) {
2006 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
2011 vring_used_flags_set_bit(VuVirtq
*vq
, int mask
)
2015 flags
= (uint16_t *)((char*)vq
->vring
.used
+
2016 offsetof(struct vring_used
, flags
));
2021 vring_used_flags_unset_bit(VuVirtq
*vq
, int mask
)
2025 flags
= (uint16_t *)((char*)vq
->vring
.used
+
2026 offsetof(struct vring_used
, flags
));
2031 vring_set_avail_event(VuVirtq
*vq
, uint16_t val
)
2033 if (!vq
->notification
) {
2037 *((uint16_t *) &vq
->vring
.used
->ring
[vq
->vring
.num
]) = val
;
2041 vu_queue_set_notification(VuDev
*dev
, VuVirtq
*vq
, int enable
)
2043 vq
->notification
= enable
;
2044 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2045 vring_set_avail_event(vq
, vring_avail_idx(vq
));
2046 } else if (enable
) {
2047 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
2049 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
2052 /* Expose avail event/used flags before caller checks the avail idx. */
2058 virtqueue_map_desc(VuDev
*dev
,
2059 unsigned int *p_num_sg
, struct iovec
*iov
,
2060 unsigned int max_num_sg
, bool is_write
,
2061 uint64_t pa
, size_t sz
)
2063 unsigned num_sg
= *p_num_sg
;
2065 assert(num_sg
<= max_num_sg
);
2068 vu_panic(dev
, "virtio: zero sized buffers are not allowed");
2075 if (num_sg
== max_num_sg
) {
2076 vu_panic(dev
, "virtio: too many descriptors in indirect table");
2080 iov
[num_sg
].iov_base
= vu_gpa_to_va(dev
, &len
, pa
);
2081 if (iov
[num_sg
].iov_base
== NULL
) {
2082 vu_panic(dev
, "virtio: invalid address for buffers");
2085 iov
[num_sg
].iov_len
= len
;
2095 virtqueue_alloc_element(size_t sz
,
2096 unsigned out_num
, unsigned in_num
)
2098 VuVirtqElement
*elem
;
2099 size_t in_sg_ofs
= ALIGN_UP(sz
, __alignof__(elem
->in_sg
[0]));
2100 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
2101 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
2103 assert(sz
>= sizeof(VuVirtqElement
));
2104 elem
= malloc(out_sg_end
);
2105 elem
->out_num
= out_num
;
2106 elem
->in_num
= in_num
;
2107 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
2108 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
2113 vu_queue_map_desc(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
, size_t sz
)
2115 struct vring_desc
*desc
= vq
->vring
.desc
;
2116 uint64_t desc_addr
, read_len
;
2117 unsigned int desc_len
;
2118 unsigned int max
= vq
->vring
.num
;
2119 unsigned int i
= idx
;
2120 VuVirtqElement
*elem
;
2121 unsigned int out_num
= 0, in_num
= 0;
2122 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
2123 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2126 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
2127 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
2128 vu_panic(dev
, "Invalid size for indirect buffer table");
2131 /* loop over the indirect descriptor table */
2132 desc_addr
= desc
[i
].addr
;
2133 desc_len
= desc
[i
].len
;
2134 max
= desc_len
/ sizeof(struct vring_desc
);
2135 read_len
= desc_len
;
2136 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2137 if (unlikely(desc
&& read_len
!= desc_len
)) {
2138 /* Failed to use zero copy */
2140 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2147 vu_panic(dev
, "Invalid indirect buffer table");
2153 /* Collect all the descriptors */
2155 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
2156 virtqueue_map_desc(dev
, &in_num
, iov
+ out_num
,
2157 VIRTQUEUE_MAX_SIZE
- out_num
, true,
2158 desc
[i
].addr
, desc
[i
].len
);
2161 vu_panic(dev
, "Incorrect order for descriptors");
2164 virtqueue_map_desc(dev
, &out_num
, iov
,
2165 VIRTQUEUE_MAX_SIZE
, false,
2166 desc
[i
].addr
, desc
[i
].len
);
2169 /* If we've got too many, that implies a descriptor loop. */
2170 if ((in_num
+ out_num
) > max
) {
2171 vu_panic(dev
, "Looped descriptor");
2173 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
2174 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
2176 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
2177 vu_panic(dev
, "read descriptor error");
2181 /* Now copy what we have collected and mapped */
2182 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
2184 for (i
= 0; i
< out_num
; i
++) {
2185 elem
->out_sg
[i
] = iov
[i
];
2187 for (i
= 0; i
< in_num
; i
++) {
2188 elem
->in_sg
[i
] = iov
[out_num
+ i
];
2195 vu_queue_inflight_get(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2197 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2201 if (unlikely(!vq
->inflight
)) {
2205 vq
->inflight
->desc
[desc_idx
].counter
= vq
->counter
++;
2206 vq
->inflight
->desc
[desc_idx
].inflight
= 1;
2212 vu_queue_inflight_pre_put(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2214 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2218 if (unlikely(!vq
->inflight
)) {
2222 vq
->inflight
->last_batch_head
= desc_idx
;
2228 vu_queue_inflight_post_put(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2230 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2234 if (unlikely(!vq
->inflight
)) {
2240 vq
->inflight
->desc
[desc_idx
].inflight
= 0;
2244 vq
->inflight
->used_idx
= vq
->used_idx
;
2250 vu_queue_pop(VuDev
*dev
, VuVirtq
*vq
, size_t sz
)
2254 VuVirtqElement
*elem
;
2256 if (unlikely(dev
->broken
) ||
2257 unlikely(!vq
->vring
.avail
)) {
2261 if (unlikely(vq
->resubmit_list
&& vq
->resubmit_num
> 0)) {
2262 i
= (--vq
->resubmit_num
);
2263 elem
= vu_queue_map_desc(dev
, vq
, vq
->resubmit_list
[i
].index
, sz
);
2265 if (!vq
->resubmit_num
) {
2266 free(vq
->resubmit_list
);
2267 vq
->resubmit_list
= NULL
;
2273 if (vu_queue_empty(dev
, vq
)) {
2277 * Needed after virtio_queue_empty(), see comment in
2278 * virtqueue_num_heads().
2282 if (vq
->inuse
>= vq
->vring
.num
) {
2283 vu_panic(dev
, "Virtqueue size exceeded");
2287 if (!virtqueue_get_head(dev
, vq
, vq
->last_avail_idx
++, &head
)) {
2291 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2292 vring_set_avail_event(vq
, vq
->last_avail_idx
);
2295 elem
= vu_queue_map_desc(dev
, vq
, head
, sz
);
2303 vu_queue_inflight_get(dev
, vq
, head
);
2309 vu_queue_detach_element(VuDev
*dev
, VuVirtq
*vq
, VuVirtqElement
*elem
,
2313 /* unmap, when DMA support is added */
2317 vu_queue_unpop(VuDev
*dev
, VuVirtq
*vq
, VuVirtqElement
*elem
,
2320 vq
->last_avail_idx
--;
2321 vu_queue_detach_element(dev
, vq
, elem
, len
);
2325 vu_queue_rewind(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
)
2327 if (num
> vq
->inuse
) {
2330 vq
->last_avail_idx
-= num
;
2336 void vring_used_write(VuDev
*dev
, VuVirtq
*vq
,
2337 struct vring_used_elem
*uelem
, int i
)
2339 struct vring_used
*used
= vq
->vring
.used
;
2341 used
->ring
[i
] = *uelem
;
2342 vu_log_write(dev
, vq
->vring
.log_guest_addr
+
2343 offsetof(struct vring_used
, ring
[i
]),
2344 sizeof(used
->ring
[i
]));
2349 vu_log_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2350 const VuVirtqElement
*elem
,
2353 struct vring_desc
*desc
= vq
->vring
.desc
;
2354 unsigned int i
, max
, min
, desc_len
;
2355 uint64_t desc_addr
, read_len
;
2356 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2357 unsigned num_bufs
= 0;
2359 max
= vq
->vring
.num
;
2362 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
2363 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
2364 vu_panic(dev
, "Invalid size for indirect buffer table");
2367 /* loop over the indirect descriptor table */
2368 desc_addr
= desc
[i
].addr
;
2369 desc_len
= desc
[i
].len
;
2370 max
= desc_len
/ sizeof(struct vring_desc
);
2371 read_len
= desc_len
;
2372 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2373 if (unlikely(desc
&& read_len
!= desc_len
)) {
2374 /* Failed to use zero copy */
2376 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2383 vu_panic(dev
, "Invalid indirect buffer table");
2390 if (++num_bufs
> max
) {
2391 vu_panic(dev
, "Looped descriptor");
2395 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
2396 min
= MIN(desc
[i
].len
, len
);
2397 vu_log_write(dev
, desc
[i
].addr
, min
);
2402 (virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
)
2403 == VIRTQUEUE_READ_DESC_MORE
));
2407 vu_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2408 const VuVirtqElement
*elem
,
2409 unsigned int len
, unsigned int idx
)
2411 struct vring_used_elem uelem
;
2413 if (unlikely(dev
->broken
) ||
2414 unlikely(!vq
->vring
.avail
)) {
2418 vu_log_queue_fill(dev
, vq
, elem
, len
);
2420 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
2422 uelem
.id
= elem
->index
;
2424 vring_used_write(dev
, vq
, &uelem
, idx
);
2428 void vring_used_idx_set(VuDev
*dev
, VuVirtq
*vq
, uint16_t val
)
2430 vq
->vring
.used
->idx
= val
;
2432 vq
->vring
.log_guest_addr
+ offsetof(struct vring_used
, idx
),
2433 sizeof(vq
->vring
.used
->idx
));
2439 vu_queue_flush(VuDev
*dev
, VuVirtq
*vq
, unsigned int count
)
2443 if (unlikely(dev
->broken
) ||
2444 unlikely(!vq
->vring
.avail
)) {
2448 /* Make sure buffer is written before we update index. */
2453 vring_used_idx_set(dev
, vq
, new);
2455 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
))) {
2456 vq
->signalled_used_valid
= false;
2461 vu_queue_push(VuDev
*dev
, VuVirtq
*vq
,
2462 const VuVirtqElement
*elem
, unsigned int len
)
2464 vu_queue_fill(dev
, vq
, elem
, len
, 0);
2465 vu_queue_inflight_pre_put(dev
, vq
, elem
->index
);
2466 vu_queue_flush(dev
, vq
, 1);
2467 vu_queue_inflight_post_put(dev
, vq
, elem
->index
);