4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
16 /* this code avoids GLib dependency */
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
29 #include "qemu/compiler.h"
31 #if defined(__linux__)
32 #include <sys/syscall.h>
34 #include <sys/ioctl.h>
35 #include <linux/vhost.h>
37 #ifdef __NR_userfaultfd
38 #include <linux/userfaultfd.h>
43 #include "qemu/atomic.h"
44 #include "qemu/osdep.h"
45 #include "qemu/bswap.h"
46 #include "qemu/memfd.h"
48 #include "libvhost-user.h"
50 /* usually provided by GLib */
52 #define MIN(x, y) ({ \
53 typeof(x) _min1 = (x); \
54 typeof(y) _min2 = (y); \
55 (void) (&_min1 == &_min2); \
56 _min1 < _min2 ? _min1 : _min2; })
59 /* Round number down to multiple */
60 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
62 /* Round number up to multiple */
63 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
65 /* Align each region to cache line size in inflight buffer */
66 #define INFLIGHT_ALIGNMENT 64
68 /* The version of inflight buffer */
69 #define INFLIGHT_VERSION 1
71 /* The version of the protocol we support */
72 #define VHOST_USER_VERSION 1
73 #define LIBVHOST_USER_DEBUG 0
77 if (LIBVHOST_USER_DEBUG) { \
78 fprintf(stderr, __VA_ARGS__); \
83 bool has_feature(uint64_t features
, unsigned int fbit
)
86 return !!(features
& (1ULL << fbit
));
90 bool vu_has_feature(VuDev
*dev
,
93 return has_feature(dev
->features
, fbit
);
96 static inline bool vu_has_protocol_feature(VuDev
*dev
, unsigned int fbit
)
98 return has_feature(dev
->protocol_features
, fbit
);
102 vu_request_to_string(unsigned int req
)
104 #define REQ(req) [req] = #req
105 static const char *vu_request_str
[] = {
106 REQ(VHOST_USER_NONE
),
107 REQ(VHOST_USER_GET_FEATURES
),
108 REQ(VHOST_USER_SET_FEATURES
),
109 REQ(VHOST_USER_SET_OWNER
),
110 REQ(VHOST_USER_RESET_OWNER
),
111 REQ(VHOST_USER_SET_MEM_TABLE
),
112 REQ(VHOST_USER_SET_LOG_BASE
),
113 REQ(VHOST_USER_SET_LOG_FD
),
114 REQ(VHOST_USER_SET_VRING_NUM
),
115 REQ(VHOST_USER_SET_VRING_ADDR
),
116 REQ(VHOST_USER_SET_VRING_BASE
),
117 REQ(VHOST_USER_GET_VRING_BASE
),
118 REQ(VHOST_USER_SET_VRING_KICK
),
119 REQ(VHOST_USER_SET_VRING_CALL
),
120 REQ(VHOST_USER_SET_VRING_ERR
),
121 REQ(VHOST_USER_GET_PROTOCOL_FEATURES
),
122 REQ(VHOST_USER_SET_PROTOCOL_FEATURES
),
123 REQ(VHOST_USER_GET_QUEUE_NUM
),
124 REQ(VHOST_USER_SET_VRING_ENABLE
),
125 REQ(VHOST_USER_SEND_RARP
),
126 REQ(VHOST_USER_NET_SET_MTU
),
127 REQ(VHOST_USER_SET_SLAVE_REQ_FD
),
128 REQ(VHOST_USER_IOTLB_MSG
),
129 REQ(VHOST_USER_SET_VRING_ENDIAN
),
130 REQ(VHOST_USER_GET_CONFIG
),
131 REQ(VHOST_USER_SET_CONFIG
),
132 REQ(VHOST_USER_POSTCOPY_ADVISE
),
133 REQ(VHOST_USER_POSTCOPY_LISTEN
),
134 REQ(VHOST_USER_POSTCOPY_END
),
135 REQ(VHOST_USER_GET_INFLIGHT_FD
),
136 REQ(VHOST_USER_SET_INFLIGHT_FD
),
137 REQ(VHOST_USER_GPU_SET_SOCKET
),
138 REQ(VHOST_USER_VRING_KICK
),
139 REQ(VHOST_USER_GET_MAX_MEM_SLOTS
),
140 REQ(VHOST_USER_ADD_MEM_REG
),
141 REQ(VHOST_USER_REM_MEM_REG
),
146 if (req
< VHOST_USER_MAX
) {
147 return vu_request_str
[req
];
154 vu_panic(VuDev
*dev
, const char *msg
, ...)
160 if (vasprintf(&buf
, msg
, ap
) < 0) {
166 dev
->panic(dev
, buf
);
171 * find a way to call virtio_error, or perhaps close the connection?
175 /* Translate guest physical address to our virtual address. */
177 vu_gpa_to_va(VuDev
*dev
, uint64_t *plen
, uint64_t guest_addr
)
185 /* Find matching memory region. */
186 for (i
= 0; i
< dev
->nregions
; i
++) {
187 VuDevRegion
*r
= &dev
->regions
[i
];
189 if ((guest_addr
>= r
->gpa
) && (guest_addr
< (r
->gpa
+ r
->size
))) {
190 if ((guest_addr
+ *plen
) > (r
->gpa
+ r
->size
)) {
191 *plen
= r
->gpa
+ r
->size
- guest_addr
;
193 return (void *)(uintptr_t)
194 guest_addr
- r
->gpa
+ r
->mmap_addr
+ r
->mmap_offset
;
201 /* Translate qemu virtual address to our virtual address. */
203 qva_to_va(VuDev
*dev
, uint64_t qemu_addr
)
207 /* Find matching memory region. */
208 for (i
= 0; i
< dev
->nregions
; i
++) {
209 VuDevRegion
*r
= &dev
->regions
[i
];
211 if ((qemu_addr
>= r
->qva
) && (qemu_addr
< (r
->qva
+ r
->size
))) {
212 return (void *)(uintptr_t)
213 qemu_addr
- r
->qva
+ r
->mmap_addr
+ r
->mmap_offset
;
221 vmsg_close_fds(VhostUserMsg
*vmsg
)
225 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
230 /* Set reply payload.u64 and clear request flags and fd_num */
231 static void vmsg_set_reply_u64(VhostUserMsg
*vmsg
, uint64_t val
)
233 vmsg
->flags
= 0; /* defaults will be set by vu_send_reply() */
234 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
235 vmsg
->payload
.u64
= val
;
239 /* A test to see if we have userfault available */
243 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
244 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
245 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
246 /* Now test the kernel we're running on really has the features */
247 int ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
248 struct uffdio_api api_struct
;
253 api_struct
.api
= UFFD_API
;
254 api_struct
.features
= UFFD_FEATURE_MISSING_SHMEM
|
255 UFFD_FEATURE_MISSING_HUGETLBFS
;
256 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
269 vu_message_read_default(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
271 char control
[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS
* sizeof(int))] = {};
273 .iov_base
= (char *)vmsg
,
274 .iov_len
= VHOST_USER_HDR_SIZE
,
276 struct msghdr msg
= {
279 .msg_control
= control
,
280 .msg_controllen
= sizeof(control
),
283 struct cmsghdr
*cmsg
;
287 rc
= recvmsg(conn_fd
, &msg
, 0);
288 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
291 vu_panic(dev
, "Error while recvmsg: %s", strerror(errno
));
296 for (cmsg
= CMSG_FIRSTHDR(&msg
);
298 cmsg
= CMSG_NXTHDR(&msg
, cmsg
))
300 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
301 fd_size
= cmsg
->cmsg_len
- CMSG_LEN(0);
302 vmsg
->fd_num
= fd_size
/ sizeof(int);
303 memcpy(vmsg
->fds
, CMSG_DATA(cmsg
), fd_size
);
308 if (vmsg
->size
> sizeof(vmsg
->payload
)) {
310 "Error: too big message request: %d, size: vmsg->size: %u, "
311 "while sizeof(vmsg->payload) = %zu\n",
312 vmsg
->request
, vmsg
->size
, sizeof(vmsg
->payload
));
318 rc
= read(conn_fd
, &vmsg
->payload
, vmsg
->size
);
319 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
322 vu_panic(dev
, "Error while reading: %s", strerror(errno
));
326 assert(rc
== vmsg
->size
);
332 vmsg_close_fds(vmsg
);
338 vu_message_write(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
341 uint8_t *p
= (uint8_t *)vmsg
;
342 char control
[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS
* sizeof(int))] = {};
344 .iov_base
= (char *)vmsg
,
345 .iov_len
= VHOST_USER_HDR_SIZE
,
347 struct msghdr msg
= {
350 .msg_control
= control
,
352 struct cmsghdr
*cmsg
;
354 memset(control
, 0, sizeof(control
));
355 assert(vmsg
->fd_num
<= VHOST_MEMORY_BASELINE_NREGIONS
);
356 if (vmsg
->fd_num
> 0) {
357 size_t fdsize
= vmsg
->fd_num
* sizeof(int);
358 msg
.msg_controllen
= CMSG_SPACE(fdsize
);
359 cmsg
= CMSG_FIRSTHDR(&msg
);
360 cmsg
->cmsg_len
= CMSG_LEN(fdsize
);
361 cmsg
->cmsg_level
= SOL_SOCKET
;
362 cmsg
->cmsg_type
= SCM_RIGHTS
;
363 memcpy(CMSG_DATA(cmsg
), vmsg
->fds
, fdsize
);
365 msg
.msg_controllen
= 0;
369 rc
= sendmsg(conn_fd
, &msg
, 0);
370 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
375 rc
= write(conn_fd
, vmsg
->data
, vmsg
->size
);
377 rc
= write(conn_fd
, p
+ VHOST_USER_HDR_SIZE
, vmsg
->size
);
379 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
383 vu_panic(dev
, "Error while writing: %s", strerror(errno
));
391 vu_send_reply(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
393 /* Set the version in the flags when sending the reply */
394 vmsg
->flags
&= ~VHOST_USER_VERSION_MASK
;
395 vmsg
->flags
|= VHOST_USER_VERSION
;
396 vmsg
->flags
|= VHOST_USER_REPLY_MASK
;
398 return vu_message_write(dev
, conn_fd
, vmsg
);
402 * Processes a reply on the slave channel.
403 * Entered with slave_mutex held and releases it before exit.
404 * Returns true on success.
407 vu_process_message_reply(VuDev
*dev
, const VhostUserMsg
*vmsg
)
409 VhostUserMsg msg_reply
;
412 if ((vmsg
->flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
417 if (!vu_message_read_default(dev
, dev
->slave_fd
, &msg_reply
)) {
421 if (msg_reply
.request
!= vmsg
->request
) {
422 DPRINT("Received unexpected msg type. Expected %d received %d",
423 vmsg
->request
, msg_reply
.request
);
427 result
= msg_reply
.payload
.u64
== 0;
430 pthread_mutex_unlock(&dev
->slave_mutex
);
434 /* Kick the log_call_fd if required. */
436 vu_log_kick(VuDev
*dev
)
438 if (dev
->log_call_fd
!= -1) {
439 DPRINT("Kicking the QEMU's log...\n");
440 if (eventfd_write(dev
->log_call_fd
, 1) < 0) {
441 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
447 vu_log_page(uint8_t *log_table
, uint64_t page
)
449 DPRINT("Logged dirty guest page: %"PRId64
"\n", page
);
450 qatomic_or(&log_table
[page
/ 8], 1 << (page
% 8));
454 vu_log_write(VuDev
*dev
, uint64_t address
, uint64_t length
)
458 if (!(dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) ||
459 !dev
->log_table
|| !length
) {
463 assert(dev
->log_size
> ((address
+ length
- 1) / VHOST_LOG_PAGE
/ 8));
465 page
= address
/ VHOST_LOG_PAGE
;
466 while (page
* VHOST_LOG_PAGE
< address
+ length
) {
467 vu_log_page(dev
->log_table
, page
);
475 vu_kick_cb(VuDev
*dev
, int condition
, void *data
)
477 int index
= (intptr_t)data
;
478 VuVirtq
*vq
= &dev
->vq
[index
];
479 int sock
= vq
->kick_fd
;
483 rc
= eventfd_read(sock
, &kick_data
);
485 vu_panic(dev
, "kick eventfd_read(): %s", strerror(errno
));
486 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
488 DPRINT("Got kick_data: %016"PRIx64
" handler:%p idx:%d\n",
489 kick_data
, vq
->handler
, index
);
491 vq
->handler(dev
, index
);
497 vu_get_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
501 * The following VIRTIO feature bits are supported by our virtqueue
504 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY
|
505 1ULL << VIRTIO_RING_F_INDIRECT_DESC
|
506 1ULL << VIRTIO_RING_F_EVENT_IDX
|
507 1ULL << VIRTIO_F_VERSION_1
|
509 /* vhost-user feature bits */
510 1ULL << VHOST_F_LOG_ALL
|
511 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
513 if (dev
->iface
->get_features
) {
514 vmsg
->payload
.u64
|= dev
->iface
->get_features(dev
);
517 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
520 DPRINT("Sending back to guest u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
526 vu_set_enable_all_rings(VuDev
*dev
, bool enabled
)
530 for (i
= 0; i
< dev
->max_queues
; i
++) {
531 dev
->vq
[i
].enable
= enabled
;
536 vu_set_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
538 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
540 dev
->features
= vmsg
->payload
.u64
;
541 if (!vu_has_feature(dev
, VIRTIO_F_VERSION_1
)) {
543 * We only support devices conforming to VIRTIO 1.0 or
546 vu_panic(dev
, "virtio legacy devices aren't supported by libvhost-user");
550 if (!(dev
->features
& VHOST_USER_F_PROTOCOL_FEATURES
)) {
551 vu_set_enable_all_rings(dev
, true);
554 if (dev
->iface
->set_features
) {
555 dev
->iface
->set_features(dev
, dev
->features
);
562 vu_set_owner_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
568 vu_close_log(VuDev
*dev
)
570 if (dev
->log_table
) {
571 if (munmap(dev
->log_table
, dev
->log_size
) != 0) {
572 perror("close log munmap() error");
575 dev
->log_table
= NULL
;
577 if (dev
->log_call_fd
!= -1) {
578 close(dev
->log_call_fd
);
579 dev
->log_call_fd
= -1;
584 vu_reset_device_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
586 vu_set_enable_all_rings(dev
, false);
592 map_ring(VuDev
*dev
, VuVirtq
*vq
)
594 vq
->vring
.desc
= qva_to_va(dev
, vq
->vra
.desc_user_addr
);
595 vq
->vring
.used
= qva_to_va(dev
, vq
->vra
.used_user_addr
);
596 vq
->vring
.avail
= qva_to_va(dev
, vq
->vra
.avail_user_addr
);
598 DPRINT("Setting virtq addresses:\n");
599 DPRINT(" vring_desc at %p\n", vq
->vring
.desc
);
600 DPRINT(" vring_used at %p\n", vq
->vring
.used
);
601 DPRINT(" vring_avail at %p\n", vq
->vring
.avail
);
603 return !(vq
->vring
.desc
&& vq
->vring
.used
&& vq
->vring
.avail
);
607 generate_faults(VuDev
*dev
) {
609 for (i
= 0; i
< dev
->nregions
; i
++) {
610 VuDevRegion
*dev_region
= &dev
->regions
[i
];
612 #ifdef UFFDIO_REGISTER
614 * We should already have an open ufd. Mark each memory
616 * Discard any mapping we have here; note I can't use MADV_REMOVE
617 * or fallocate to make the hole since I don't want to lose
618 * data that's already arrived in the shared process.
619 * TODO: How to do hugepage
621 ret
= madvise((void *)(uintptr_t)dev_region
->mmap_addr
,
622 dev_region
->size
+ dev_region
->mmap_offset
,
626 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
627 __func__
, i
, strerror(errno
));
630 * Turn off transparent hugepages so we dont get lose wakeups
631 * in neighbouring pages.
632 * TODO: Turn this backon later.
634 ret
= madvise((void *)(uintptr_t)dev_region
->mmap_addr
,
635 dev_region
->size
+ dev_region
->mmap_offset
,
639 * Note: This can happen legally on kernels that are configured
640 * without madvise'able hugepages
643 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
644 __func__
, i
, strerror(errno
));
646 struct uffdio_register reg_struct
;
647 reg_struct
.range
.start
= (uintptr_t)dev_region
->mmap_addr
;
648 reg_struct
.range
.len
= dev_region
->size
+ dev_region
->mmap_offset
;
649 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
651 if (ioctl(dev
->postcopy_ufd
, UFFDIO_REGISTER
, ®_struct
)) {
652 vu_panic(dev
, "%s: Failed to userfault region %d "
653 "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
655 dev_region
->mmap_addr
,
656 dev_region
->size
, dev_region
->mmap_offset
,
657 dev
->postcopy_ufd
, strerror(errno
));
660 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
661 vu_panic(dev
, "%s Region (%d) doesn't support COPY",
665 DPRINT("%s: region %d: Registered userfault for %"
666 PRIx64
" + %" PRIx64
"\n", __func__
, i
,
667 (uint64_t)reg_struct
.range
.start
,
668 (uint64_t)reg_struct
.range
.len
);
669 /* Now it's registered we can let the client at it */
670 if (mprotect((void *)(uintptr_t)dev_region
->mmap_addr
,
671 dev_region
->size
+ dev_region
->mmap_offset
,
672 PROT_READ
| PROT_WRITE
)) {
673 vu_panic(dev
, "failed to mprotect region %d for postcopy (%s)",
677 /* TODO: Stash 'zero' support flags somewhere */
685 vu_add_mem_reg(VuDev
*dev
, VhostUserMsg
*vmsg
) {
687 bool track_ramblocks
= dev
->postcopy_listening
;
688 VhostUserMemoryRegion m
= vmsg
->payload
.memreg
.region
, *msg_region
= &m
;
689 VuDevRegion
*dev_region
= &dev
->regions
[dev
->nregions
];
693 * If we are in postcopy mode and we receive a u64 payload with a 0 value
694 * we know all the postcopy client bases have been received, and we
695 * should start generating faults.
697 if (track_ramblocks
&&
698 vmsg
->size
== sizeof(vmsg
->payload
.u64
) &&
699 vmsg
->payload
.u64
== 0) {
700 (void)generate_faults(dev
);
704 DPRINT("Adding region: %d\n", dev
->nregions
);
705 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
706 msg_region
->guest_phys_addr
);
707 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
708 msg_region
->memory_size
);
709 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
710 msg_region
->userspace_addr
);
711 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
712 msg_region
->mmap_offset
);
714 dev_region
->gpa
= msg_region
->guest_phys_addr
;
715 dev_region
->size
= msg_region
->memory_size
;
716 dev_region
->qva
= msg_region
->userspace_addr
;
717 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
720 * We don't use offset argument of mmap() since the
721 * mapped address has to be page aligned, and we use huge
724 if (track_ramblocks
) {
726 * In postcopy we're using PROT_NONE here to catch anyone
727 * accessing it before we userfault.
729 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
730 PROT_NONE
, MAP_SHARED
,
733 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
734 PROT_READ
| PROT_WRITE
, MAP_SHARED
, vmsg
->fds
[0],
738 if (mmap_addr
== MAP_FAILED
) {
739 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
741 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
742 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
743 dev_region
->mmap_addr
);
748 if (track_ramblocks
) {
750 * Return the address to QEMU so that it can translate the ufd
751 * fault addresses back.
753 msg_region
->userspace_addr
= (uintptr_t)(mmap_addr
+
754 dev_region
->mmap_offset
);
756 /* Send the message back to qemu with the addresses filled in. */
758 if (!vu_send_reply(dev
, dev
->sock
, vmsg
)) {
759 vu_panic(dev
, "failed to respond to add-mem-region for postcopy");
763 DPRINT("Successfully added new region in postcopy\n");
768 for (i
= 0; i
< dev
->max_queues
; i
++) {
769 if (dev
->vq
[i
].vring
.desc
) {
770 if (map_ring(dev
, &dev
->vq
[i
])) {
771 vu_panic(dev
, "remapping queue %d for new memory region",
777 DPRINT("Successfully added new region\n");
779 vmsg_set_reply_u64(vmsg
, 0);
784 static inline bool reg_equal(VuDevRegion
*vudev_reg
,
785 VhostUserMemoryRegion
*msg_reg
)
787 if (vudev_reg
->gpa
== msg_reg
->guest_phys_addr
&&
788 vudev_reg
->qva
== msg_reg
->userspace_addr
&&
789 vudev_reg
->size
== msg_reg
->memory_size
) {
797 vu_rem_mem_reg(VuDev
*dev
, VhostUserMsg
*vmsg
) {
800 VuDevRegion shadow_regions
[VHOST_USER_MAX_RAM_SLOTS
] = {};
801 VhostUserMemoryRegion m
= vmsg
->payload
.memreg
.region
, *msg_region
= &m
;
803 DPRINT("Removing region:\n");
804 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
805 msg_region
->guest_phys_addr
);
806 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
807 msg_region
->memory_size
);
808 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
809 msg_region
->userspace_addr
);
810 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
811 msg_region
->mmap_offset
);
813 for (i
= 0, j
= 0; i
< dev
->nregions
; i
++) {
814 if (!reg_equal(&dev
->regions
[i
], msg_region
)) {
815 shadow_regions
[j
].gpa
= dev
->regions
[i
].gpa
;
816 shadow_regions
[j
].size
= dev
->regions
[i
].size
;
817 shadow_regions
[j
].qva
= dev
->regions
[i
].qva
;
818 shadow_regions
[j
].mmap_offset
= dev
->regions
[i
].mmap_offset
;
822 VuDevRegion
*r
= &dev
->regions
[i
];
823 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
826 munmap(m
, r
->size
+ r
->mmap_offset
);
832 memcpy(dev
->regions
, shadow_regions
,
833 sizeof(VuDevRegion
) * VHOST_USER_MAX_RAM_SLOTS
);
834 DPRINT("Successfully removed a region\n");
836 vmsg_set_reply_u64(vmsg
, 0);
838 vu_panic(dev
, "Specified region not found\n");
845 vu_set_mem_table_exec_postcopy(VuDev
*dev
, VhostUserMsg
*vmsg
)
848 VhostUserMemory m
= vmsg
->payload
.memory
, *memory
= &m
;
849 dev
->nregions
= memory
->nregions
;
851 DPRINT("Nregions: %d\n", memory
->nregions
);
852 for (i
= 0; i
< dev
->nregions
; i
++) {
854 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
855 VuDevRegion
*dev_region
= &dev
->regions
[i
];
857 DPRINT("Region %d\n", i
);
858 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
859 msg_region
->guest_phys_addr
);
860 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
861 msg_region
->memory_size
);
862 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
863 msg_region
->userspace_addr
);
864 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
865 msg_region
->mmap_offset
);
867 dev_region
->gpa
= msg_region
->guest_phys_addr
;
868 dev_region
->size
= msg_region
->memory_size
;
869 dev_region
->qva
= msg_region
->userspace_addr
;
870 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
872 /* We don't use offset argument of mmap() since the
873 * mapped address has to be page aligned, and we use huge
875 * In postcopy we're using PROT_NONE here to catch anyone
876 * accessing it before we userfault
878 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
879 PROT_NONE
, MAP_SHARED
,
882 if (mmap_addr
== MAP_FAILED
) {
883 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
885 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
886 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
887 dev_region
->mmap_addr
);
890 /* Return the address to QEMU so that it can translate the ufd
891 * fault addresses back.
893 msg_region
->userspace_addr
= (uintptr_t)(mmap_addr
+
894 dev_region
->mmap_offset
);
898 /* Send the message back to qemu with the addresses filled in */
900 if (!vu_send_reply(dev
, dev
->sock
, vmsg
)) {
901 vu_panic(dev
, "failed to respond to set-mem-table for postcopy");
905 /* Wait for QEMU to confirm that it's registered the handler for the
908 if (!dev
->read_msg(dev
, dev
->sock
, vmsg
) ||
909 vmsg
->size
!= sizeof(vmsg
->payload
.u64
) ||
910 vmsg
->payload
.u64
!= 0) {
911 vu_panic(dev
, "failed to receive valid ack for postcopy set-mem-table");
915 /* OK, now we can go and register the memory and generate faults */
916 (void)generate_faults(dev
);
922 vu_set_mem_table_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
925 VhostUserMemory m
= vmsg
->payload
.memory
, *memory
= &m
;
927 for (i
= 0; i
< dev
->nregions
; i
++) {
928 VuDevRegion
*r
= &dev
->regions
[i
];
929 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
932 munmap(m
, r
->size
+ r
->mmap_offset
);
935 dev
->nregions
= memory
->nregions
;
937 if (dev
->postcopy_listening
) {
938 return vu_set_mem_table_exec_postcopy(dev
, vmsg
);
941 DPRINT("Nregions: %d\n", memory
->nregions
);
942 for (i
= 0; i
< dev
->nregions
; i
++) {
944 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
945 VuDevRegion
*dev_region
= &dev
->regions
[i
];
947 DPRINT("Region %d\n", i
);
948 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
949 msg_region
->guest_phys_addr
);
950 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
951 msg_region
->memory_size
);
952 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
953 msg_region
->userspace_addr
);
954 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
955 msg_region
->mmap_offset
);
957 dev_region
->gpa
= msg_region
->guest_phys_addr
;
958 dev_region
->size
= msg_region
->memory_size
;
959 dev_region
->qva
= msg_region
->userspace_addr
;
960 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
962 /* We don't use offset argument of mmap() since the
963 * mapped address has to be page aligned, and we use huge
965 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
966 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
969 if (mmap_addr
== MAP_FAILED
) {
970 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
972 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
973 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
974 dev_region
->mmap_addr
);
980 for (i
= 0; i
< dev
->max_queues
; i
++) {
981 if (dev
->vq
[i
].vring
.desc
) {
982 if (map_ring(dev
, &dev
->vq
[i
])) {
983 vu_panic(dev
, "remapping queue %d during setmemtable", i
);
992 vu_set_log_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
995 uint64_t log_mmap_size
, log_mmap_offset
;
998 if (vmsg
->fd_num
!= 1 ||
999 vmsg
->size
!= sizeof(vmsg
->payload
.log
)) {
1000 vu_panic(dev
, "Invalid log_base message");
1005 log_mmap_offset
= vmsg
->payload
.log
.mmap_offset
;
1006 log_mmap_size
= vmsg
->payload
.log
.mmap_size
;
1007 DPRINT("Log mmap_offset: %"PRId64
"\n", log_mmap_offset
);
1008 DPRINT("Log mmap_size: %"PRId64
"\n", log_mmap_size
);
1010 rc
= mmap(0, log_mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
,
1013 if (rc
== MAP_FAILED
) {
1014 perror("log mmap error");
1017 if (dev
->log_table
) {
1018 munmap(dev
->log_table
, dev
->log_size
);
1020 dev
->log_table
= rc
;
1021 dev
->log_size
= log_mmap_size
;
1023 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
1030 vu_set_log_fd_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1032 if (vmsg
->fd_num
!= 1) {
1033 vu_panic(dev
, "Invalid log_fd message");
1037 if (dev
->log_call_fd
!= -1) {
1038 close(dev
->log_call_fd
);
1040 dev
->log_call_fd
= vmsg
->fds
[0];
1041 DPRINT("Got log_call_fd: %d\n", vmsg
->fds
[0]);
1047 vu_set_vring_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1049 unsigned int index
= vmsg
->payload
.state
.index
;
1050 unsigned int num
= vmsg
->payload
.state
.num
;
1052 DPRINT("State.index: %d\n", index
);
1053 DPRINT("State.num: %d\n", num
);
1054 dev
->vq
[index
].vring
.num
= num
;
1060 vu_set_vring_addr_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1062 struct vhost_vring_addr addr
= vmsg
->payload
.addr
, *vra
= &addr
;
1063 unsigned int index
= vra
->index
;
1064 VuVirtq
*vq
= &dev
->vq
[index
];
1066 DPRINT("vhost_vring_addr:\n");
1067 DPRINT(" index: %d\n", vra
->index
);
1068 DPRINT(" flags: %d\n", vra
->flags
);
1069 DPRINT(" desc_user_addr: 0x%016" PRIx64
"\n", vra
->desc_user_addr
);
1070 DPRINT(" used_user_addr: 0x%016" PRIx64
"\n", vra
->used_user_addr
);
1071 DPRINT(" avail_user_addr: 0x%016" PRIx64
"\n", vra
->avail_user_addr
);
1072 DPRINT(" log_guest_addr: 0x%016" PRIx64
"\n", vra
->log_guest_addr
);
1075 vq
->vring
.flags
= vra
->flags
;
1076 vq
->vring
.log_guest_addr
= vra
->log_guest_addr
;
1079 if (map_ring(dev
, vq
)) {
1080 vu_panic(dev
, "Invalid vring_addr message");
1084 vq
->used_idx
= lduw_le_p(&vq
->vring
.used
->idx
);
1086 if (vq
->last_avail_idx
!= vq
->used_idx
) {
1087 bool resume
= dev
->iface
->queue_is_processed_in_order
&&
1088 dev
->iface
->queue_is_processed_in_order(dev
, index
);
1090 DPRINT("Last avail index != used index: %u != %u%s\n",
1091 vq
->last_avail_idx
, vq
->used_idx
,
1092 resume
? ", resuming" : "");
1095 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->used_idx
;
1103 vu_set_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1105 unsigned int index
= vmsg
->payload
.state
.index
;
1106 unsigned int num
= vmsg
->payload
.state
.num
;
1108 DPRINT("State.index: %d\n", index
);
1109 DPRINT("State.num: %d\n", num
);
1110 dev
->vq
[index
].shadow_avail_idx
= dev
->vq
[index
].last_avail_idx
= num
;
1116 vu_get_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1118 unsigned int index
= vmsg
->payload
.state
.index
;
1120 DPRINT("State.index: %d\n", index
);
1121 vmsg
->payload
.state
.num
= dev
->vq
[index
].last_avail_idx
;
1122 vmsg
->size
= sizeof(vmsg
->payload
.state
);
1124 dev
->vq
[index
].started
= false;
1125 if (dev
->iface
->queue_set_started
) {
1126 dev
->iface
->queue_set_started(dev
, index
, false);
1129 if (dev
->vq
[index
].call_fd
!= -1) {
1130 close(dev
->vq
[index
].call_fd
);
1131 dev
->vq
[index
].call_fd
= -1;
1133 if (dev
->vq
[index
].kick_fd
!= -1) {
1134 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
1135 close(dev
->vq
[index
].kick_fd
);
1136 dev
->vq
[index
].kick_fd
= -1;
1143 vu_check_queue_msg_file(VuDev
*dev
, VhostUserMsg
*vmsg
)
1145 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1146 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1148 if (index
>= dev
->max_queues
) {
1149 vmsg_close_fds(vmsg
);
1150 vu_panic(dev
, "Invalid queue index: %u", index
);
1155 vmsg_close_fds(vmsg
);
1159 if (vmsg
->fd_num
!= 1) {
1160 vmsg_close_fds(vmsg
);
1161 vu_panic(dev
, "Invalid fds in request: %d", vmsg
->request
);
1169 inflight_desc_compare(const void *a
, const void *b
)
1171 VuVirtqInflightDesc
*desc0
= (VuVirtqInflightDesc
*)a
,
1172 *desc1
= (VuVirtqInflightDesc
*)b
;
1174 if (desc1
->counter
> desc0
->counter
&&
1175 (desc1
->counter
- desc0
->counter
) < VIRTQUEUE_MAX_SIZE
* 2) {
1183 vu_check_queue_inflights(VuDev
*dev
, VuVirtq
*vq
)
1187 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
1191 if (unlikely(!vq
->inflight
)) {
1195 if (unlikely(!vq
->inflight
->version
)) {
1196 /* initialize the buffer */
1197 vq
->inflight
->version
= INFLIGHT_VERSION
;
1201 vq
->used_idx
= lduw_le_p(&vq
->vring
.used
->idx
);
1202 vq
->resubmit_num
= 0;
1203 vq
->resubmit_list
= NULL
;
1206 if (unlikely(vq
->inflight
->used_idx
!= vq
->used_idx
)) {
1207 vq
->inflight
->desc
[vq
->inflight
->last_batch_head
].inflight
= 0;
1211 vq
->inflight
->used_idx
= vq
->used_idx
;
1214 for (i
= 0; i
< vq
->inflight
->desc_num
; i
++) {
1215 if (vq
->inflight
->desc
[i
].inflight
== 1) {
1220 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->inuse
+ vq
->used_idx
;
1223 vq
->resubmit_list
= calloc(vq
->inuse
, sizeof(VuVirtqInflightDesc
));
1224 if (!vq
->resubmit_list
) {
1228 for (i
= 0; i
< vq
->inflight
->desc_num
; i
++) {
1229 if (vq
->inflight
->desc
[i
].inflight
) {
1230 vq
->resubmit_list
[vq
->resubmit_num
].index
= i
;
1231 vq
->resubmit_list
[vq
->resubmit_num
].counter
=
1232 vq
->inflight
->desc
[i
].counter
;
1237 if (vq
->resubmit_num
> 1) {
1238 qsort(vq
->resubmit_list
, vq
->resubmit_num
,
1239 sizeof(VuVirtqInflightDesc
), inflight_desc_compare
);
1241 vq
->counter
= vq
->resubmit_list
[0].counter
+ 1;
1244 /* in case of I/O hang after reconnecting */
1245 if (eventfd_write(vq
->kick_fd
, 1)) {
1253 vu_set_vring_kick_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1255 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1256 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1258 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1260 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1264 if (dev
->vq
[index
].kick_fd
!= -1) {
1265 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
1266 close(dev
->vq
[index
].kick_fd
);
1267 dev
->vq
[index
].kick_fd
= -1;
1270 dev
->vq
[index
].kick_fd
= nofd
? -1 : vmsg
->fds
[0];
1271 DPRINT("Got kick_fd: %d for vq: %d\n", dev
->vq
[index
].kick_fd
, index
);
1273 dev
->vq
[index
].started
= true;
1274 if (dev
->iface
->queue_set_started
) {
1275 dev
->iface
->queue_set_started(dev
, index
, true);
1278 if (dev
->vq
[index
].kick_fd
!= -1 && dev
->vq
[index
].handler
) {
1279 dev
->set_watch(dev
, dev
->vq
[index
].kick_fd
, VU_WATCH_IN
,
1280 vu_kick_cb
, (void *)(long)index
);
1282 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1283 dev
->vq
[index
].kick_fd
, index
);
1286 if (vu_check_queue_inflights(dev
, &dev
->vq
[index
])) {
1287 vu_panic(dev
, "Failed to check inflights for vq: %d\n", index
);
1293 void vu_set_queue_handler(VuDev
*dev
, VuVirtq
*vq
,
1294 vu_queue_handler_cb handler
)
1296 int qidx
= vq
- dev
->vq
;
1298 vq
->handler
= handler
;
1299 if (vq
->kick_fd
>= 0) {
1301 dev
->set_watch(dev
, vq
->kick_fd
, VU_WATCH_IN
,
1302 vu_kick_cb
, (void *)(long)qidx
);
1304 dev
->remove_watch(dev
, vq
->kick_fd
);
1309 bool vu_set_queue_host_notifier(VuDev
*dev
, VuVirtq
*vq
, int fd
,
1310 int size
, int offset
)
1312 int qidx
= vq
- dev
->vq
;
1314 VhostUserMsg vmsg
= {
1315 .request
= VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
,
1316 .flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1317 .size
= sizeof(vmsg
.payload
.area
),
1319 .u64
= qidx
& VHOST_USER_VRING_IDX_MASK
,
1326 vmsg
.payload
.area
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1328 vmsg
.fds
[fd_num
++] = fd
;
1331 vmsg
.fd_num
= fd_num
;
1333 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
)) {
1337 pthread_mutex_lock(&dev
->slave_mutex
);
1338 if (!vu_message_write(dev
, dev
->slave_fd
, &vmsg
)) {
1339 pthread_mutex_unlock(&dev
->slave_mutex
);
1343 /* Also unlocks the slave_mutex */
1344 return vu_process_message_reply(dev
, &vmsg
);
1348 vu_set_vring_call_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1350 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1351 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1353 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1355 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1359 if (dev
->vq
[index
].call_fd
!= -1) {
1360 close(dev
->vq
[index
].call_fd
);
1361 dev
->vq
[index
].call_fd
= -1;
1364 dev
->vq
[index
].call_fd
= nofd
? -1 : vmsg
->fds
[0];
1366 /* in case of I/O hang after reconnecting */
1367 if (dev
->vq
[index
].call_fd
!= -1 && eventfd_write(vmsg
->fds
[0], 1)) {
1371 DPRINT("Got call_fd: %d for vq: %d\n", dev
->vq
[index
].call_fd
, index
);
1377 vu_set_vring_err_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1379 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1380 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1382 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1384 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1388 if (dev
->vq
[index
].err_fd
!= -1) {
1389 close(dev
->vq
[index
].err_fd
);
1390 dev
->vq
[index
].err_fd
= -1;
1393 dev
->vq
[index
].err_fd
= nofd
? -1 : vmsg
->fds
[0];
1399 vu_get_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1402 * Note that we support, but intentionally do not set,
1403 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that
1404 * a device implementation can return it in its callback
1405 * (get_protocol_features) if it wants to use this for
1406 * simulation, but it is otherwise not desirable (if even
1407 * implemented by the master.)
1409 uint64_t features
= 1ULL << VHOST_USER_PROTOCOL_F_MQ
|
1410 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD
|
1411 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ
|
1412 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
|
1413 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
|
1414 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK
|
1415 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
;
1417 if (have_userfault()) {
1418 features
|= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT
;
1421 if (dev
->iface
->get_config
&& dev
->iface
->set_config
) {
1422 features
|= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG
;
1425 if (dev
->iface
->get_protocol_features
) {
1426 features
|= dev
->iface
->get_protocol_features(dev
);
1429 vmsg_set_reply_u64(vmsg
, features
);
1434 vu_set_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1436 uint64_t features
= vmsg
->payload
.u64
;
1438 DPRINT("u64: 0x%016"PRIx64
"\n", features
);
1440 dev
->protocol_features
= vmsg
->payload
.u64
;
1442 if (vu_has_protocol_feature(dev
,
1443 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS
) &&
1444 (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_SLAVE_REQ
) ||
1445 !vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_REPLY_ACK
))) {
1447 * The use case for using messages for kick/call is simulation, to make
1448 * the kick and call synchronous. To actually get that behaviour, both
1449 * of the other features are required.
1450 * Theoretically, one could use only kick messages, or do them without
1451 * having F_REPLY_ACK, but too many (possibly pending) messages on the
1452 * socket will eventually cause the master to hang, to avoid this in
1453 * scenarios where not desired enforce that the settings are in a way
1454 * that actually enables the simulation case.
1457 "F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK");
1461 if (dev
->iface
->set_protocol_features
) {
1462 dev
->iface
->set_protocol_features(dev
, features
);
1469 vu_get_queue_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1471 vmsg_set_reply_u64(vmsg
, dev
->max_queues
);
1476 vu_set_vring_enable_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1478 unsigned int index
= vmsg
->payload
.state
.index
;
1479 unsigned int enable
= vmsg
->payload
.state
.num
;
1481 DPRINT("State.index: %d\n", index
);
1482 DPRINT("State.enable: %d\n", enable
);
1484 if (index
>= dev
->max_queues
) {
1485 vu_panic(dev
, "Invalid vring_enable index: %u", index
);
1489 dev
->vq
[index
].enable
= enable
;
1494 vu_set_slave_req_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1496 if (vmsg
->fd_num
!= 1) {
1497 vu_panic(dev
, "Invalid slave_req_fd message (%d fd's)", vmsg
->fd_num
);
1501 if (dev
->slave_fd
!= -1) {
1502 close(dev
->slave_fd
);
1504 dev
->slave_fd
= vmsg
->fds
[0];
1505 DPRINT("Got slave_fd: %d\n", vmsg
->fds
[0]);
1511 vu_get_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1515 if (dev
->iface
->get_config
) {
1516 ret
= dev
->iface
->get_config(dev
, vmsg
->payload
.config
.region
,
1517 vmsg
->payload
.config
.size
);
1521 /* resize to zero to indicate an error to master */
1529 vu_set_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1533 if (dev
->iface
->set_config
) {
1534 ret
= dev
->iface
->set_config(dev
, vmsg
->payload
.config
.region
,
1535 vmsg
->payload
.config
.offset
,
1536 vmsg
->payload
.config
.size
,
1537 vmsg
->payload
.config
.flags
);
1539 vu_panic(dev
, "Set virtio configuration space failed");
1547 vu_set_postcopy_advise(VuDev
*dev
, VhostUserMsg
*vmsg
)
1549 dev
->postcopy_ufd
= -1;
1551 struct uffdio_api api_struct
;
1553 dev
->postcopy_ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
1557 if (dev
->postcopy_ufd
== -1) {
1558 vu_panic(dev
, "Userfaultfd not available: %s", strerror(errno
));
1563 api_struct
.api
= UFFD_API
;
1564 api_struct
.features
= 0;
1565 if (ioctl(dev
->postcopy_ufd
, UFFDIO_API
, &api_struct
)) {
1566 vu_panic(dev
, "Failed UFFDIO_API: %s", strerror(errno
));
1567 close(dev
->postcopy_ufd
);
1568 dev
->postcopy_ufd
= -1;
1571 /* TODO: Stash feature flags somewhere */
1575 /* Return a ufd to the QEMU */
1577 vmsg
->fds
[0] = dev
->postcopy_ufd
;
1578 return true; /* = send a reply */
1582 vu_set_postcopy_listen(VuDev
*dev
, VhostUserMsg
*vmsg
)
1584 if (dev
->nregions
) {
1585 vu_panic(dev
, "Regions already registered at postcopy-listen");
1586 vmsg_set_reply_u64(vmsg
, -1);
1589 dev
->postcopy_listening
= true;
1591 vmsg_set_reply_u64(vmsg
, 0);
1596 vu_set_postcopy_end(VuDev
*dev
, VhostUserMsg
*vmsg
)
1598 DPRINT("%s: Entry\n", __func__
);
1599 dev
->postcopy_listening
= false;
1600 if (dev
->postcopy_ufd
> 0) {
1601 close(dev
->postcopy_ufd
);
1602 dev
->postcopy_ufd
= -1;
1603 DPRINT("%s: Done close\n", __func__
);
1606 vmsg_set_reply_u64(vmsg
, 0);
1607 DPRINT("%s: exit\n", __func__
);
1611 static inline uint64_t
1612 vu_inflight_queue_size(uint16_t queue_size
)
1614 return ALIGN_UP(sizeof(VuDescStateSplit
) * queue_size
+
1615 sizeof(uint16_t), INFLIGHT_ALIGNMENT
);
1619 vu_get_inflight_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1624 uint16_t num_queues
, queue_size
;
1626 if (vmsg
->size
!= sizeof(vmsg
->payload
.inflight
)) {
1627 vu_panic(dev
, "Invalid get_inflight_fd message:%d", vmsg
->size
);
1628 vmsg
->payload
.inflight
.mmap_size
= 0;
1632 num_queues
= vmsg
->payload
.inflight
.num_queues
;
1633 queue_size
= vmsg
->payload
.inflight
.queue_size
;
1635 DPRINT("set_inflight_fd num_queues: %"PRId16
"\n", num_queues
);
1636 DPRINT("set_inflight_fd queue_size: %"PRId16
"\n", queue_size
);
1638 mmap_size
= vu_inflight_queue_size(queue_size
) * num_queues
;
1640 addr
= qemu_memfd_alloc("vhost-inflight", mmap_size
,
1641 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
1645 vu_panic(dev
, "Failed to alloc vhost inflight area");
1646 vmsg
->payload
.inflight
.mmap_size
= 0;
1650 memset(addr
, 0, mmap_size
);
1652 dev
->inflight_info
.addr
= addr
;
1653 dev
->inflight_info
.size
= vmsg
->payload
.inflight
.mmap_size
= mmap_size
;
1654 dev
->inflight_info
.fd
= vmsg
->fds
[0] = fd
;
1656 vmsg
->payload
.inflight
.mmap_offset
= 0;
1658 DPRINT("send inflight mmap_size: %"PRId64
"\n",
1659 vmsg
->payload
.inflight
.mmap_size
);
1660 DPRINT("send inflight mmap offset: %"PRId64
"\n",
1661 vmsg
->payload
.inflight
.mmap_offset
);
1667 vu_set_inflight_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1670 uint64_t mmap_size
, mmap_offset
;
1671 uint16_t num_queues
, queue_size
;
1674 if (vmsg
->fd_num
!= 1 ||
1675 vmsg
->size
!= sizeof(vmsg
->payload
.inflight
)) {
1676 vu_panic(dev
, "Invalid set_inflight_fd message size:%d fds:%d",
1677 vmsg
->size
, vmsg
->fd_num
);
1682 mmap_size
= vmsg
->payload
.inflight
.mmap_size
;
1683 mmap_offset
= vmsg
->payload
.inflight
.mmap_offset
;
1684 num_queues
= vmsg
->payload
.inflight
.num_queues
;
1685 queue_size
= vmsg
->payload
.inflight
.queue_size
;
1687 DPRINT("set_inflight_fd mmap_size: %"PRId64
"\n", mmap_size
);
1688 DPRINT("set_inflight_fd mmap_offset: %"PRId64
"\n", mmap_offset
);
1689 DPRINT("set_inflight_fd num_queues: %"PRId16
"\n", num_queues
);
1690 DPRINT("set_inflight_fd queue_size: %"PRId16
"\n", queue_size
);
1692 rc
= mmap(0, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1695 if (rc
== MAP_FAILED
) {
1696 vu_panic(dev
, "set_inflight_fd mmap error: %s", strerror(errno
));
1700 if (dev
->inflight_info
.fd
) {
1701 close(dev
->inflight_info
.fd
);
1704 if (dev
->inflight_info
.addr
) {
1705 munmap(dev
->inflight_info
.addr
, dev
->inflight_info
.size
);
1708 dev
->inflight_info
.fd
= fd
;
1709 dev
->inflight_info
.addr
= rc
;
1710 dev
->inflight_info
.size
= mmap_size
;
1712 for (i
= 0; i
< num_queues
; i
++) {
1713 dev
->vq
[i
].inflight
= (VuVirtqInflight
*)rc
;
1714 dev
->vq
[i
].inflight
->desc_num
= queue_size
;
1715 rc
= (void *)((char *)rc
+ vu_inflight_queue_size(queue_size
));
1722 vu_handle_vring_kick(VuDev
*dev
, VhostUserMsg
*vmsg
)
1724 unsigned int index
= vmsg
->payload
.state
.index
;
1726 if (index
>= dev
->max_queues
) {
1727 vu_panic(dev
, "Invalid queue index: %u", index
);
1731 DPRINT("Got kick message: handler:%p idx:%d\n",
1732 dev
->vq
[index
].handler
, index
);
1734 if (!dev
->vq
[index
].started
) {
1735 dev
->vq
[index
].started
= true;
1737 if (dev
->iface
->queue_set_started
) {
1738 dev
->iface
->queue_set_started(dev
, index
, true);
1742 if (dev
->vq
[index
].handler
) {
1743 dev
->vq
[index
].handler(dev
, index
);
1749 static bool vu_handle_get_max_memslots(VuDev
*dev
, VhostUserMsg
*vmsg
)
1751 vmsg
->flags
= VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
;
1752 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
1753 vmsg
->payload
.u64
= VHOST_USER_MAX_RAM_SLOTS
;
1756 if (!vu_message_write(dev
, dev
->sock
, vmsg
)) {
1757 vu_panic(dev
, "Failed to send max ram slots: %s\n", strerror(errno
));
1760 DPRINT("u64: 0x%016"PRIx64
"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS
);
1766 vu_process_message(VuDev
*dev
, VhostUserMsg
*vmsg
)
1770 /* Print out generic part of the request. */
1771 DPRINT("================ Vhost user message ================\n");
1772 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg
->request
),
1774 DPRINT("Flags: 0x%x\n", vmsg
->flags
);
1775 DPRINT("Size: %d\n", vmsg
->size
);
1780 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
1781 DPRINT(" %d", vmsg
->fds
[i
]);
1786 if (dev
->iface
->process_msg
&&
1787 dev
->iface
->process_msg(dev
, vmsg
, &do_reply
)) {
1791 switch (vmsg
->request
) {
1792 case VHOST_USER_GET_FEATURES
:
1793 return vu_get_features_exec(dev
, vmsg
);
1794 case VHOST_USER_SET_FEATURES
:
1795 return vu_set_features_exec(dev
, vmsg
);
1796 case VHOST_USER_GET_PROTOCOL_FEATURES
:
1797 return vu_get_protocol_features_exec(dev
, vmsg
);
1798 case VHOST_USER_SET_PROTOCOL_FEATURES
:
1799 return vu_set_protocol_features_exec(dev
, vmsg
);
1800 case VHOST_USER_SET_OWNER
:
1801 return vu_set_owner_exec(dev
, vmsg
);
1802 case VHOST_USER_RESET_OWNER
:
1803 return vu_reset_device_exec(dev
, vmsg
);
1804 case VHOST_USER_SET_MEM_TABLE
:
1805 return vu_set_mem_table_exec(dev
, vmsg
);
1806 case VHOST_USER_SET_LOG_BASE
:
1807 return vu_set_log_base_exec(dev
, vmsg
);
1808 case VHOST_USER_SET_LOG_FD
:
1809 return vu_set_log_fd_exec(dev
, vmsg
);
1810 case VHOST_USER_SET_VRING_NUM
:
1811 return vu_set_vring_num_exec(dev
, vmsg
);
1812 case VHOST_USER_SET_VRING_ADDR
:
1813 return vu_set_vring_addr_exec(dev
, vmsg
);
1814 case VHOST_USER_SET_VRING_BASE
:
1815 return vu_set_vring_base_exec(dev
, vmsg
);
1816 case VHOST_USER_GET_VRING_BASE
:
1817 return vu_get_vring_base_exec(dev
, vmsg
);
1818 case VHOST_USER_SET_VRING_KICK
:
1819 return vu_set_vring_kick_exec(dev
, vmsg
);
1820 case VHOST_USER_SET_VRING_CALL
:
1821 return vu_set_vring_call_exec(dev
, vmsg
);
1822 case VHOST_USER_SET_VRING_ERR
:
1823 return vu_set_vring_err_exec(dev
, vmsg
);
1824 case VHOST_USER_GET_QUEUE_NUM
:
1825 return vu_get_queue_num_exec(dev
, vmsg
);
1826 case VHOST_USER_SET_VRING_ENABLE
:
1827 return vu_set_vring_enable_exec(dev
, vmsg
);
1828 case VHOST_USER_SET_SLAVE_REQ_FD
:
1829 return vu_set_slave_req_fd(dev
, vmsg
);
1830 case VHOST_USER_GET_CONFIG
:
1831 return vu_get_config(dev
, vmsg
);
1832 case VHOST_USER_SET_CONFIG
:
1833 return vu_set_config(dev
, vmsg
);
1834 case VHOST_USER_NONE
:
1835 /* if you need processing before exit, override iface->process_msg */
1837 case VHOST_USER_POSTCOPY_ADVISE
:
1838 return vu_set_postcopy_advise(dev
, vmsg
);
1839 case VHOST_USER_POSTCOPY_LISTEN
:
1840 return vu_set_postcopy_listen(dev
, vmsg
);
1841 case VHOST_USER_POSTCOPY_END
:
1842 return vu_set_postcopy_end(dev
, vmsg
);
1843 case VHOST_USER_GET_INFLIGHT_FD
:
1844 return vu_get_inflight_fd(dev
, vmsg
);
1845 case VHOST_USER_SET_INFLIGHT_FD
:
1846 return vu_set_inflight_fd(dev
, vmsg
);
1847 case VHOST_USER_VRING_KICK
:
1848 return vu_handle_vring_kick(dev
, vmsg
);
1849 case VHOST_USER_GET_MAX_MEM_SLOTS
:
1850 return vu_handle_get_max_memslots(dev
, vmsg
);
1851 case VHOST_USER_ADD_MEM_REG
:
1852 return vu_add_mem_reg(dev
, vmsg
);
1853 case VHOST_USER_REM_MEM_REG
:
1854 return vu_rem_mem_reg(dev
, vmsg
);
1856 vmsg_close_fds(vmsg
);
1857 vu_panic(dev
, "Unhandled request: %d", vmsg
->request
);
1864 vu_dispatch(VuDev
*dev
)
1866 VhostUserMsg vmsg
= { 0, };
1867 int reply_requested
;
1868 bool need_reply
, success
= false;
1870 if (!dev
->read_msg(dev
, dev
->sock
, &vmsg
)) {
1874 need_reply
= vmsg
.flags
& VHOST_USER_NEED_REPLY_MASK
;
1876 reply_requested
= vu_process_message(dev
, &vmsg
);
1877 if (!reply_requested
&& need_reply
) {
1878 vmsg_set_reply_u64(&vmsg
, 0);
1879 reply_requested
= 1;
1882 if (!reply_requested
) {
1887 if (!vu_send_reply(dev
, dev
->sock
, &vmsg
)) {
1899 vu_deinit(VuDev
*dev
)
1903 for (i
= 0; i
< dev
->nregions
; i
++) {
1904 VuDevRegion
*r
= &dev
->regions
[i
];
1905 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
1906 if (m
!= MAP_FAILED
) {
1907 munmap(m
, r
->size
+ r
->mmap_offset
);
1912 for (i
= 0; i
< dev
->max_queues
; i
++) {
1913 VuVirtq
*vq
= &dev
->vq
[i
];
1915 if (vq
->call_fd
!= -1) {
1920 if (vq
->kick_fd
!= -1) {
1921 dev
->remove_watch(dev
, vq
->kick_fd
);
1926 if (vq
->err_fd
!= -1) {
1931 if (vq
->resubmit_list
) {
1932 free(vq
->resubmit_list
);
1933 vq
->resubmit_list
= NULL
;
1936 vq
->inflight
= NULL
;
1939 if (dev
->inflight_info
.addr
) {
1940 munmap(dev
->inflight_info
.addr
, dev
->inflight_info
.size
);
1941 dev
->inflight_info
.addr
= NULL
;
1944 if (dev
->inflight_info
.fd
> 0) {
1945 close(dev
->inflight_info
.fd
);
1946 dev
->inflight_info
.fd
= -1;
1950 if (dev
->slave_fd
!= -1) {
1951 close(dev
->slave_fd
);
1954 pthread_mutex_destroy(&dev
->slave_mutex
);
1956 if (dev
->sock
!= -1) {
1966 uint16_t max_queues
,
1969 vu_read_msg_cb read_msg
,
1970 vu_set_watch_cb set_watch
,
1971 vu_remove_watch_cb remove_watch
,
1972 const VuDevIface
*iface
)
1976 assert(max_queues
> 0);
1977 assert(socket
>= 0);
1979 assert(remove_watch
);
1983 memset(dev
, 0, sizeof(*dev
));
1987 dev
->read_msg
= read_msg
? read_msg
: vu_message_read_default
;
1988 dev
->set_watch
= set_watch
;
1989 dev
->remove_watch
= remove_watch
;
1991 dev
->log_call_fd
= -1;
1992 pthread_mutex_init(&dev
->slave_mutex
, NULL
);
1994 dev
->max_queues
= max_queues
;
1996 dev
->vq
= malloc(max_queues
* sizeof(dev
->vq
[0]));
1998 DPRINT("%s: failed to malloc virtqueues\n", __func__
);
2002 for (i
= 0; i
< max_queues
; i
++) {
2003 dev
->vq
[i
] = (VuVirtq
) {
2004 .call_fd
= -1, .kick_fd
= -1, .err_fd
= -1,
2005 .notification
= true,
2013 vu_get_queue(VuDev
*dev
, int qidx
)
2015 assert(qidx
< dev
->max_queues
);
2016 return &dev
->vq
[qidx
];
2020 vu_queue_enabled(VuDev
*dev
, VuVirtq
*vq
)
2026 vu_queue_started(const VuDev
*dev
, const VuVirtq
*vq
)
2031 static inline uint16_t
2032 vring_avail_flags(VuVirtq
*vq
)
2034 return lduw_le_p(&vq
->vring
.avail
->flags
);
2037 static inline uint16_t
2038 vring_avail_idx(VuVirtq
*vq
)
2040 vq
->shadow_avail_idx
= lduw_le_p(&vq
->vring
.avail
->idx
);
2042 return vq
->shadow_avail_idx
;
2045 static inline uint16_t
2046 vring_avail_ring(VuVirtq
*vq
, int i
)
2048 return lduw_le_p(&vq
->vring
.avail
->ring
[i
]);
2051 static inline uint16_t
2052 vring_get_used_event(VuVirtq
*vq
)
2054 return vring_avail_ring(vq
, vq
->vring
.num
);
2058 virtqueue_num_heads(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
)
2060 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
2062 /* Check it isn't doing very strange things with descriptor numbers. */
2063 if (num_heads
> vq
->vring
.num
) {
2064 vu_panic(dev
, "Guest moved used index from %u to %u",
2065 idx
, vq
->shadow_avail_idx
);
2069 /* On success, callers read a descriptor at vq->last_avail_idx.
2070 * Make sure descriptor read does not bypass avail index read. */
2078 virtqueue_get_head(VuDev
*dev
, VuVirtq
*vq
,
2079 unsigned int idx
, unsigned int *head
)
2081 /* Grab the next descriptor number they're advertising, and increment
2082 * the index we've seen. */
2083 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
2085 /* If their number is silly, that's a fatal mistake. */
2086 if (*head
>= vq
->vring
.num
) {
2087 vu_panic(dev
, "Guest says index %u is available", *head
);
2095 virtqueue_read_indirect_desc(VuDev
*dev
, struct vring_desc
*desc
,
2096 uint64_t addr
, size_t len
)
2098 struct vring_desc
*ori_desc
;
2101 if (len
> (VIRTQUEUE_MAX_SIZE
* sizeof(struct vring_desc
))) {
2111 ori_desc
= vu_gpa_to_va(dev
, &read_len
, addr
);
2116 memcpy(desc
, ori_desc
, read_len
);
2126 VIRTQUEUE_READ_DESC_ERROR
= -1,
2127 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
2128 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
2132 virtqueue_read_next_desc(VuDev
*dev
, struct vring_desc
*desc
,
2133 int i
, unsigned int max
, unsigned int *next
)
2135 /* If this descriptor says it doesn't chain, we're done. */
2136 if (!(lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_NEXT
)) {
2137 return VIRTQUEUE_READ_DESC_DONE
;
2140 /* Check they're not leading us off end of descriptors. */
2141 *next
= lduw_le_p(&desc
[i
].next
);
2142 /* Make sure compiler knows to grab that: we don't want it changing! */
2146 vu_panic(dev
, "Desc next is %u", *next
);
2147 return VIRTQUEUE_READ_DESC_ERROR
;
2150 return VIRTQUEUE_READ_DESC_MORE
;
2154 vu_queue_get_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int *in_bytes
,
2155 unsigned int *out_bytes
,
2156 unsigned max_in_bytes
, unsigned max_out_bytes
)
2159 unsigned int total_bufs
, in_total
, out_total
;
2162 idx
= vq
->last_avail_idx
;
2164 total_bufs
= in_total
= out_total
= 0;
2165 if (unlikely(dev
->broken
) ||
2166 unlikely(!vq
->vring
.avail
)) {
2170 while ((rc
= virtqueue_num_heads(dev
, vq
, idx
)) > 0) {
2171 unsigned int max
, desc_len
, num_bufs
, indirect
= 0;
2172 uint64_t desc_addr
, read_len
;
2173 struct vring_desc
*desc
;
2174 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2177 max
= vq
->vring
.num
;
2178 num_bufs
= total_bufs
;
2179 if (!virtqueue_get_head(dev
, vq
, idx
++, &i
)) {
2182 desc
= vq
->vring
.desc
;
2184 if (lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_INDIRECT
) {
2185 if (ldl_le_p(&desc
[i
].len
) % sizeof(struct vring_desc
)) {
2186 vu_panic(dev
, "Invalid size for indirect buffer table");
2190 /* If we've got too many, that implies a descriptor loop. */
2191 if (num_bufs
>= max
) {
2192 vu_panic(dev
, "Looped descriptor");
2196 /* loop over the indirect descriptor table */
2198 desc_addr
= ldq_le_p(&desc
[i
].addr
);
2199 desc_len
= ldl_le_p(&desc
[i
].len
);
2200 max
= desc_len
/ sizeof(struct vring_desc
);
2201 read_len
= desc_len
;
2202 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2203 if (unlikely(desc
&& read_len
!= desc_len
)) {
2204 /* Failed to use zero copy */
2206 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2213 vu_panic(dev
, "Invalid indirect buffer table");
2220 /* If we've got too many, that implies a descriptor loop. */
2221 if (++num_bufs
> max
) {
2222 vu_panic(dev
, "Looped descriptor");
2226 if (lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_WRITE
) {
2227 in_total
+= ldl_le_p(&desc
[i
].len
);
2229 out_total
+= ldl_le_p(&desc
[i
].len
);
2231 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
2234 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
2235 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
2237 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
2242 total_bufs
= num_bufs
;
2252 *in_bytes
= in_total
;
2255 *out_bytes
= out_total
;
2260 in_total
= out_total
= 0;
2265 vu_queue_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int in_bytes
,
2266 unsigned int out_bytes
)
2268 unsigned int in_total
, out_total
;
2270 vu_queue_get_avail_bytes(dev
, vq
, &in_total
, &out_total
,
2271 in_bytes
, out_bytes
);
2273 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
2276 /* Fetch avail_idx from VQ memory only when we really need to know if
2277 * guest has added some buffers. */
2279 vu_queue_empty(VuDev
*dev
, VuVirtq
*vq
)
2281 if (unlikely(dev
->broken
) ||
2282 unlikely(!vq
->vring
.avail
)) {
2286 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
2290 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
2294 vring_notify(VuDev
*dev
, VuVirtq
*vq
)
2299 /* We need to expose used array entries before checking used event. */
2302 /* Always notify when queue is empty (when feature acknowledge) */
2303 if (vu_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2304 !vq
->inuse
&& vu_queue_empty(dev
, vq
)) {
2308 if (!vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2309 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
2312 v
= vq
->signalled_used_valid
;
2313 vq
->signalled_used_valid
= true;
2314 old
= vq
->signalled_used
;
2315 new = vq
->signalled_used
= vq
->used_idx
;
2316 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
2319 static void _vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
, bool sync
)
2321 if (unlikely(dev
->broken
) ||
2322 unlikely(!vq
->vring
.avail
)) {
2326 if (!vring_notify(dev
, vq
)) {
2327 DPRINT("skipped notify...\n");
2331 if (vq
->call_fd
< 0 &&
2332 vu_has_protocol_feature(dev
,
2333 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS
) &&
2334 vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_SLAVE_REQ
)) {
2335 VhostUserMsg vmsg
= {
2336 .request
= VHOST_USER_SLAVE_VRING_CALL
,
2337 .flags
= VHOST_USER_VERSION
,
2338 .size
= sizeof(vmsg
.payload
.state
),
2340 .index
= vq
- dev
->vq
,
2344 vu_has_protocol_feature(dev
,
2345 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2348 vmsg
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2351 vu_message_write(dev
, dev
->slave_fd
, &vmsg
);
2353 vu_message_read_default(dev
, dev
->slave_fd
, &vmsg
);
2358 if (eventfd_write(vq
->call_fd
, 1) < 0) {
2359 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
2363 void vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
)
2365 _vu_queue_notify(dev
, vq
, false);
2368 void vu_queue_notify_sync(VuDev
*dev
, VuVirtq
*vq
)
2370 _vu_queue_notify(dev
, vq
, true);
2374 vring_used_flags_set_bit(VuVirtq
*vq
, int mask
)
2378 flags
= (uint16_t *)((char*)vq
->vring
.used
+
2379 offsetof(struct vring_used
, flags
));
2380 stw_le_p(flags
, lduw_le_p(flags
) | mask
);
2384 vring_used_flags_unset_bit(VuVirtq
*vq
, int mask
)
2388 flags
= (uint16_t *)((char*)vq
->vring
.used
+
2389 offsetof(struct vring_used
, flags
));
2390 stw_le_p(flags
, lduw_le_p(flags
) & ~mask
);
2394 vring_set_avail_event(VuVirtq
*vq
, uint16_t val
)
2396 if (!vq
->notification
) {
2400 stw_le_p(&vq
->vring
.used
->ring
[vq
->vring
.num
], val
);
2404 vu_queue_set_notification(VuDev
*dev
, VuVirtq
*vq
, int enable
)
2406 vq
->notification
= enable
;
2407 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2408 vring_set_avail_event(vq
, vring_avail_idx(vq
));
2409 } else if (enable
) {
2410 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
2412 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
2415 /* Expose avail event/used flags before caller checks the avail idx. */
2421 virtqueue_map_desc(VuDev
*dev
,
2422 unsigned int *p_num_sg
, struct iovec
*iov
,
2423 unsigned int max_num_sg
, bool is_write
,
2424 uint64_t pa
, size_t sz
)
2426 unsigned num_sg
= *p_num_sg
;
2428 assert(num_sg
<= max_num_sg
);
2431 vu_panic(dev
, "virtio: zero sized buffers are not allowed");
2438 if (num_sg
== max_num_sg
) {
2439 vu_panic(dev
, "virtio: too many descriptors in indirect table");
2443 iov
[num_sg
].iov_base
= vu_gpa_to_va(dev
, &len
, pa
);
2444 if (iov
[num_sg
].iov_base
== NULL
) {
2445 vu_panic(dev
, "virtio: invalid address for buffers");
2448 iov
[num_sg
].iov_len
= len
;
2459 virtqueue_alloc_element(size_t sz
,
2460 unsigned out_num
, unsigned in_num
)
2462 VuVirtqElement
*elem
;
2463 size_t in_sg_ofs
= ALIGN_UP(sz
, __alignof__(elem
->in_sg
[0]));
2464 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
2465 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
2467 assert(sz
>= sizeof(VuVirtqElement
));
2468 elem
= malloc(out_sg_end
);
2469 elem
->out_num
= out_num
;
2470 elem
->in_num
= in_num
;
2471 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
2472 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
2477 vu_queue_map_desc(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
, size_t sz
)
2479 struct vring_desc
*desc
= vq
->vring
.desc
;
2480 uint64_t desc_addr
, read_len
;
2481 unsigned int desc_len
;
2482 unsigned int max
= vq
->vring
.num
;
2483 unsigned int i
= idx
;
2484 VuVirtqElement
*elem
;
2485 unsigned int out_num
= 0, in_num
= 0;
2486 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
2487 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2490 if (lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_INDIRECT
) {
2491 if (ldl_le_p(&desc
[i
].len
) % sizeof(struct vring_desc
)) {
2492 vu_panic(dev
, "Invalid size for indirect buffer table");
2496 /* loop over the indirect descriptor table */
2497 desc_addr
= ldq_le_p(&desc
[i
].addr
);
2498 desc_len
= ldl_le_p(&desc
[i
].len
);
2499 max
= desc_len
/ sizeof(struct vring_desc
);
2500 read_len
= desc_len
;
2501 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2502 if (unlikely(desc
&& read_len
!= desc_len
)) {
2503 /* Failed to use zero copy */
2505 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2512 vu_panic(dev
, "Invalid indirect buffer table");
2518 /* Collect all the descriptors */
2520 if (lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_WRITE
) {
2521 if (!virtqueue_map_desc(dev
, &in_num
, iov
+ out_num
,
2522 VIRTQUEUE_MAX_SIZE
- out_num
, true,
2523 ldq_le_p(&desc
[i
].addr
),
2524 ldl_le_p(&desc
[i
].len
))) {
2529 vu_panic(dev
, "Incorrect order for descriptors");
2532 if (!virtqueue_map_desc(dev
, &out_num
, iov
,
2533 VIRTQUEUE_MAX_SIZE
, false,
2534 ldq_le_p(&desc
[i
].addr
),
2535 ldl_le_p(&desc
[i
].len
))) {
2540 /* If we've got too many, that implies a descriptor loop. */
2541 if ((in_num
+ out_num
) > max
) {
2542 vu_panic(dev
, "Looped descriptor");
2545 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
2546 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
2548 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
2549 vu_panic(dev
, "read descriptor error");
2553 /* Now copy what we have collected and mapped */
2554 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
2556 for (i
= 0; i
< out_num
; i
++) {
2557 elem
->out_sg
[i
] = iov
[i
];
2559 for (i
= 0; i
< in_num
; i
++) {
2560 elem
->in_sg
[i
] = iov
[out_num
+ i
];
2567 vu_queue_inflight_get(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2569 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2573 if (unlikely(!vq
->inflight
)) {
2577 vq
->inflight
->desc
[desc_idx
].counter
= vq
->counter
++;
2578 vq
->inflight
->desc
[desc_idx
].inflight
= 1;
2584 vu_queue_inflight_pre_put(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2586 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2590 if (unlikely(!vq
->inflight
)) {
2594 vq
->inflight
->last_batch_head
= desc_idx
;
2600 vu_queue_inflight_post_put(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2602 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2606 if (unlikely(!vq
->inflight
)) {
2612 vq
->inflight
->desc
[desc_idx
].inflight
= 0;
2616 vq
->inflight
->used_idx
= vq
->used_idx
;
2622 vu_queue_pop(VuDev
*dev
, VuVirtq
*vq
, size_t sz
)
2626 VuVirtqElement
*elem
;
2628 if (unlikely(dev
->broken
) ||
2629 unlikely(!vq
->vring
.avail
)) {
2633 if (unlikely(vq
->resubmit_list
&& vq
->resubmit_num
> 0)) {
2634 i
= (--vq
->resubmit_num
);
2635 elem
= vu_queue_map_desc(dev
, vq
, vq
->resubmit_list
[i
].index
, sz
);
2637 if (!vq
->resubmit_num
) {
2638 free(vq
->resubmit_list
);
2639 vq
->resubmit_list
= NULL
;
2645 if (vu_queue_empty(dev
, vq
)) {
2649 * Needed after virtio_queue_empty(), see comment in
2650 * virtqueue_num_heads().
2654 if (vq
->inuse
>= vq
->vring
.num
) {
2655 vu_panic(dev
, "Virtqueue size exceeded");
2659 if (!virtqueue_get_head(dev
, vq
, vq
->last_avail_idx
++, &head
)) {
2663 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2664 vring_set_avail_event(vq
, vq
->last_avail_idx
);
2667 elem
= vu_queue_map_desc(dev
, vq
, head
, sz
);
2675 vu_queue_inflight_get(dev
, vq
, head
);
2681 vu_queue_detach_element(VuDev
*dev
, VuVirtq
*vq
, VuVirtqElement
*elem
,
2685 /* unmap, when DMA support is added */
2689 vu_queue_unpop(VuDev
*dev
, VuVirtq
*vq
, VuVirtqElement
*elem
,
2692 vq
->last_avail_idx
--;
2693 vu_queue_detach_element(dev
, vq
, elem
, len
);
2697 vu_queue_rewind(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
)
2699 if (num
> vq
->inuse
) {
2702 vq
->last_avail_idx
-= num
;
2708 void vring_used_write(VuDev
*dev
, VuVirtq
*vq
,
2709 struct vring_used_elem
*uelem
, int i
)
2711 struct vring_used
*used
= vq
->vring
.used
;
2713 used
->ring
[i
] = *uelem
;
2714 vu_log_write(dev
, vq
->vring
.log_guest_addr
+
2715 offsetof(struct vring_used
, ring
[i
]),
2716 sizeof(used
->ring
[i
]));
2721 vu_log_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2722 const VuVirtqElement
*elem
,
2725 struct vring_desc
*desc
= vq
->vring
.desc
;
2726 unsigned int i
, max
, min
, desc_len
;
2727 uint64_t desc_addr
, read_len
;
2728 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2729 unsigned num_bufs
= 0;
2731 max
= vq
->vring
.num
;
2734 if (lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_INDIRECT
) {
2735 if (ldl_le_p(&desc
[i
].len
) % sizeof(struct vring_desc
)) {
2736 vu_panic(dev
, "Invalid size for indirect buffer table");
2740 /* loop over the indirect descriptor table */
2741 desc_addr
= ldq_le_p(&desc
[i
].addr
);
2742 desc_len
= ldl_le_p(&desc
[i
].len
);
2743 max
= desc_len
/ sizeof(struct vring_desc
);
2744 read_len
= desc_len
;
2745 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2746 if (unlikely(desc
&& read_len
!= desc_len
)) {
2747 /* Failed to use zero copy */
2749 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2756 vu_panic(dev
, "Invalid indirect buffer table");
2763 if (++num_bufs
> max
) {
2764 vu_panic(dev
, "Looped descriptor");
2768 if (lduw_le_p(&desc
[i
].flags
) & VRING_DESC_F_WRITE
) {
2769 min
= MIN(ldl_le_p(&desc
[i
].len
), len
);
2770 vu_log_write(dev
, ldq_le_p(&desc
[i
].addr
), min
);
2775 (virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
)
2776 == VIRTQUEUE_READ_DESC_MORE
));
2780 vu_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2781 const VuVirtqElement
*elem
,
2782 unsigned int len
, unsigned int idx
)
2784 struct vring_used_elem uelem
;
2786 if (unlikely(dev
->broken
) ||
2787 unlikely(!vq
->vring
.avail
)) {
2791 vu_log_queue_fill(dev
, vq
, elem
, len
);
2793 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
2795 stl_le_p(&uelem
.id
, elem
->index
);
2796 stl_le_p(&uelem
.len
, len
);
2797 vring_used_write(dev
, vq
, &uelem
, idx
);
2801 void vring_used_idx_set(VuDev
*dev
, VuVirtq
*vq
, uint16_t val
)
2803 stw_le_p(&vq
->vring
.used
->idx
, val
);
2805 vq
->vring
.log_guest_addr
+ offsetof(struct vring_used
, idx
),
2806 sizeof(vq
->vring
.used
->idx
));
2812 vu_queue_flush(VuDev
*dev
, VuVirtq
*vq
, unsigned int count
)
2816 if (unlikely(dev
->broken
) ||
2817 unlikely(!vq
->vring
.avail
)) {
2821 /* Make sure buffer is written before we update index. */
2826 vring_used_idx_set(dev
, vq
, new);
2828 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
))) {
2829 vq
->signalled_used_valid
= false;
2834 vu_queue_push(VuDev
*dev
, VuVirtq
*vq
,
2835 const VuVirtqElement
*elem
, unsigned int len
)
2837 vu_queue_fill(dev
, vq
, elem
, len
, 0);
2838 vu_queue_inflight_pre_put(dev
, vq
, elem
->index
);
2839 vu_queue_flush(dev
, vq
, 1);
2840 vu_queue_inflight_post_put(dev
, vq
, elem
->index
);