4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "io/channel-socket.h"
20 #include "sysemu/kvm.h"
21 #include "qemu/error-report.h"
22 #include "qemu/main-loop.h"
23 #include "qemu/sockets.h"
24 #include "sysemu/cryptodev.h"
25 #include "migration/migration.h"
26 #include "migration/postcopy-ram.h"
28 #include "exec/ramblock.h"
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
34 #include "standard-headers/linux/vhost_types.h"
37 #include <linux/userfaultfd.h>
40 #define VHOST_MEMORY_BASELINE_NREGIONS 8
41 #define VHOST_USER_F_PROTOCOL_FEATURES 30
42 #define VHOST_USER_SLAVE_MAX_FDS 8
45 * Set maximum number of RAM slots supported to
46 * the maximum number supported by the target
49 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
50 defined(TARGET_ARM) || defined(TARGET_ARM_64)
51 #include "hw/acpi/acpi.h"
52 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
54 #elif defined(TARGET_PPC) || defined(TARGET_PPC64)
55 #include "hw/ppc/spapr.h"
56 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
59 #define VHOST_USER_MAX_RAM_SLOTS 512
63 * Maximum size of virtio device config space
65 #define VHOST_USER_MAX_CONFIG_SIZE 256
67 enum VhostUserProtocolFeature
{
68 VHOST_USER_PROTOCOL_F_MQ
= 0,
69 VHOST_USER_PROTOCOL_F_LOG_SHMFD
= 1,
70 VHOST_USER_PROTOCOL_F_RARP
= 2,
71 VHOST_USER_PROTOCOL_F_REPLY_ACK
= 3,
72 VHOST_USER_PROTOCOL_F_NET_MTU
= 4,
73 VHOST_USER_PROTOCOL_F_SLAVE_REQ
= 5,
74 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
= 6,
75 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
= 7,
76 VHOST_USER_PROTOCOL_F_PAGEFAULT
= 8,
77 VHOST_USER_PROTOCOL_F_CONFIG
= 9,
78 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
= 10,
79 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
= 11,
80 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
= 12,
81 VHOST_USER_PROTOCOL_F_RESET_DEVICE
= 13,
82 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
83 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
= 15,
84 VHOST_USER_PROTOCOL_F_STATUS
= 16,
85 VHOST_USER_PROTOCOL_F_MAX
88 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
90 typedef enum VhostUserRequest
{
92 VHOST_USER_GET_FEATURES
= 1,
93 VHOST_USER_SET_FEATURES
= 2,
94 VHOST_USER_SET_OWNER
= 3,
95 VHOST_USER_RESET_OWNER
= 4,
96 VHOST_USER_SET_MEM_TABLE
= 5,
97 VHOST_USER_SET_LOG_BASE
= 6,
98 VHOST_USER_SET_LOG_FD
= 7,
99 VHOST_USER_SET_VRING_NUM
= 8,
100 VHOST_USER_SET_VRING_ADDR
= 9,
101 VHOST_USER_SET_VRING_BASE
= 10,
102 VHOST_USER_GET_VRING_BASE
= 11,
103 VHOST_USER_SET_VRING_KICK
= 12,
104 VHOST_USER_SET_VRING_CALL
= 13,
105 VHOST_USER_SET_VRING_ERR
= 14,
106 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
107 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
108 VHOST_USER_GET_QUEUE_NUM
= 17,
109 VHOST_USER_SET_VRING_ENABLE
= 18,
110 VHOST_USER_SEND_RARP
= 19,
111 VHOST_USER_NET_SET_MTU
= 20,
112 VHOST_USER_SET_SLAVE_REQ_FD
= 21,
113 VHOST_USER_IOTLB_MSG
= 22,
114 VHOST_USER_SET_VRING_ENDIAN
= 23,
115 VHOST_USER_GET_CONFIG
= 24,
116 VHOST_USER_SET_CONFIG
= 25,
117 VHOST_USER_CREATE_CRYPTO_SESSION
= 26,
118 VHOST_USER_CLOSE_CRYPTO_SESSION
= 27,
119 VHOST_USER_POSTCOPY_ADVISE
= 28,
120 VHOST_USER_POSTCOPY_LISTEN
= 29,
121 VHOST_USER_POSTCOPY_END
= 30,
122 VHOST_USER_GET_INFLIGHT_FD
= 31,
123 VHOST_USER_SET_INFLIGHT_FD
= 32,
124 VHOST_USER_GPU_SET_SOCKET
= 33,
125 VHOST_USER_RESET_DEVICE
= 34,
126 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
127 VHOST_USER_GET_MAX_MEM_SLOTS
= 36,
128 VHOST_USER_ADD_MEM_REG
= 37,
129 VHOST_USER_REM_MEM_REG
= 38,
130 VHOST_USER_SET_STATUS
= 39,
131 VHOST_USER_GET_STATUS
= 40,
135 typedef enum VhostUserSlaveRequest
{
136 VHOST_USER_SLAVE_NONE
= 0,
137 VHOST_USER_SLAVE_IOTLB_MSG
= 1,
138 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
= 2,
139 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
= 3,
141 } VhostUserSlaveRequest
;
143 typedef struct VhostUserMemoryRegion
{
144 uint64_t guest_phys_addr
;
145 uint64_t memory_size
;
146 uint64_t userspace_addr
;
147 uint64_t mmap_offset
;
148 } VhostUserMemoryRegion
;
150 typedef struct VhostUserMemory
{
153 VhostUserMemoryRegion regions
[VHOST_MEMORY_BASELINE_NREGIONS
];
156 typedef struct VhostUserMemRegMsg
{
158 VhostUserMemoryRegion region
;
159 } VhostUserMemRegMsg
;
161 typedef struct VhostUserLog
{
163 uint64_t mmap_offset
;
166 typedef struct VhostUserConfig
{
170 uint8_t region
[VHOST_USER_MAX_CONFIG_SIZE
];
173 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
174 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
176 typedef struct VhostUserCryptoSession
{
177 /* session id for success, -1 on errors */
179 CryptoDevBackendSymSessionInfo session_setup_data
;
180 uint8_t key
[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN
];
181 uint8_t auth_key
[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN
];
182 } VhostUserCryptoSession
;
184 static VhostUserConfig c
__attribute__ ((unused
));
185 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
189 typedef struct VhostUserVringArea
{
193 } VhostUserVringArea
;
195 typedef struct VhostUserInflight
{
197 uint64_t mmap_offset
;
203 VhostUserRequest request
;
205 #define VHOST_USER_VERSION_MASK (0x3)
206 #define VHOST_USER_REPLY_MASK (0x1 << 2)
207 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
209 uint32_t size
; /* the following payload size */
210 } QEMU_PACKED VhostUserHeader
;
213 #define VHOST_USER_VRING_IDX_MASK (0xff)
214 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
216 struct vhost_vring_state state
;
217 struct vhost_vring_addr addr
;
218 VhostUserMemory memory
;
219 VhostUserMemRegMsg mem_reg
;
221 struct vhost_iotlb_msg iotlb
;
222 VhostUserConfig config
;
223 VhostUserCryptoSession session
;
224 VhostUserVringArea area
;
225 VhostUserInflight inflight
;
228 typedef struct VhostUserMsg
{
230 VhostUserPayload payload
;
231 } QEMU_PACKED VhostUserMsg
;
233 static VhostUserMsg m
__attribute__ ((unused
));
234 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
236 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
238 /* The version of the protocol we support */
239 #define VHOST_USER_VERSION (0x1)
242 struct vhost_dev
*dev
;
243 /* Shared between vhost devs of the same virtio device */
244 VhostUserState
*user
;
245 QIOChannel
*slave_ioc
;
247 NotifierWithReturn postcopy_notifier
;
248 struct PostCopyFD postcopy_fd
;
249 uint64_t postcopy_client_bases
[VHOST_USER_MAX_RAM_SLOTS
];
250 /* Length of the region_rb and region_rb_offset arrays */
251 size_t region_rb_len
;
252 /* RAMBlock associated with a given region */
253 RAMBlock
**region_rb
;
255 * The offset from the start of the RAMBlock to the start of the
258 ram_addr_t
*region_rb_offset
;
260 /* True once we've entered postcopy_listen */
261 bool postcopy_listen
;
263 /* Our current regions */
264 int num_shadow_regions
;
265 struct vhost_memory_region shadow_regions
[VHOST_USER_MAX_RAM_SLOTS
];
268 struct scrub_regions
{
269 struct vhost_memory_region
*region
;
274 static bool ioeventfd_enabled(void)
276 return !kvm_enabled() || kvm_eventfds_enabled();
279 static int vhost_user_read_header(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
281 struct vhost_user
*u
= dev
->opaque
;
282 CharBackend
*chr
= u
->user
->chr
;
283 uint8_t *p
= (uint8_t *) msg
;
284 int r
, size
= VHOST_USER_HDR_SIZE
;
286 r
= qemu_chr_fe_read_all(chr
, p
, size
);
288 int saved_errno
= errno
;
289 error_report("Failed to read msg header. Read %d instead of %d."
290 " Original request %d.", r
, size
, msg
->hdr
.request
);
291 return r
< 0 ? -saved_errno
: -EIO
;
294 /* validate received flags */
295 if (msg
->hdr
.flags
!= (VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
)) {
296 error_report("Failed to read msg header."
297 " Flags 0x%x instead of 0x%x.", msg
->hdr
.flags
,
298 VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
);
302 trace_vhost_user_read(msg
->hdr
.request
, msg
->hdr
.flags
);
307 struct vhost_user_read_cb_data
{
308 struct vhost_dev
*dev
;
314 static gboolean
vhost_user_read_cb(void *do_not_use
, GIOCondition condition
,
317 struct vhost_user_read_cb_data
*data
= opaque
;
318 struct vhost_dev
*dev
= data
->dev
;
319 VhostUserMsg
*msg
= data
->msg
;
320 struct vhost_user
*u
= dev
->opaque
;
321 CharBackend
*chr
= u
->user
->chr
;
322 uint8_t *p
= (uint8_t *) msg
;
325 r
= vhost_user_read_header(dev
, msg
);
331 /* validate message size is sane */
332 if (msg
->hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
333 error_report("Failed to read msg header."
334 " Size %d exceeds the maximum %zu.", msg
->hdr
.size
,
335 VHOST_USER_PAYLOAD_SIZE
);
341 p
+= VHOST_USER_HDR_SIZE
;
342 size
= msg
->hdr
.size
;
343 r
= qemu_chr_fe_read_all(chr
, p
, size
);
345 int saved_errno
= errno
;
346 error_report("Failed to read msg payload."
347 " Read %d instead of %d.", r
, msg
->hdr
.size
);
348 data
->ret
= r
< 0 ? -saved_errno
: -EIO
;
354 g_main_loop_quit(data
->loop
);
355 return G_SOURCE_REMOVE
;
358 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
362 * This updates the read handler to use a new event loop context.
363 * Event sources are removed from the previous context : this ensures
364 * that events detected in the previous context are purged. They will
365 * be re-detected and processed in the new context.
367 static void slave_update_read_handler(struct vhost_dev
*dev
,
370 struct vhost_user
*u
= dev
->opaque
;
377 g_source_destroy(u
->slave_src
);
378 g_source_unref(u
->slave_src
);
381 u
->slave_src
= qio_channel_add_watch_source(u
->slave_ioc
,
383 slave_read
, dev
, NULL
,
387 static int vhost_user_read(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
389 struct vhost_user
*u
= dev
->opaque
;
390 CharBackend
*chr
= u
->user
->chr
;
391 GMainContext
*prev_ctxt
= chr
->chr
->gcontext
;
392 GMainContext
*ctxt
= g_main_context_new();
393 GMainLoop
*loop
= g_main_loop_new(ctxt
, FALSE
);
394 struct vhost_user_read_cb_data data
= {
402 * We want to be able to monitor the slave channel fd while waiting
403 * for chr I/O. This requires an event loop, but we can't nest the
404 * one to which chr is currently attached : its fd handlers might not
405 * be prepared for re-entrancy. So we create a new one and switch chr
408 slave_update_read_handler(dev
, ctxt
);
409 qemu_chr_be_update_read_handlers(chr
->chr
, ctxt
);
410 qemu_chr_fe_add_watch(chr
, G_IO_IN
| G_IO_HUP
, vhost_user_read_cb
, &data
);
412 g_main_loop_run(loop
);
415 * Restore the previous event loop context. This also destroys/recreates
416 * event sources : this guarantees that all pending events in the original
417 * context that have been processed by the nested loop are purged.
419 qemu_chr_be_update_read_handlers(chr
->chr
, prev_ctxt
);
420 slave_update_read_handler(dev
, NULL
);
422 g_main_loop_unref(loop
);
423 g_main_context_unref(ctxt
);
428 static int process_message_reply(struct vhost_dev
*dev
,
429 const VhostUserMsg
*msg
)
432 VhostUserMsg msg_reply
;
434 if ((msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
438 ret
= vhost_user_read(dev
, &msg_reply
);
443 if (msg_reply
.hdr
.request
!= msg
->hdr
.request
) {
444 error_report("Received unexpected msg type. "
445 "Expected %d received %d",
446 msg
->hdr
.request
, msg_reply
.hdr
.request
);
450 return msg_reply
.payload
.u64
? -EIO
: 0;
453 static bool vhost_user_one_time_request(VhostUserRequest request
)
456 case VHOST_USER_SET_OWNER
:
457 case VHOST_USER_RESET_OWNER
:
458 case VHOST_USER_SET_MEM_TABLE
:
459 case VHOST_USER_GET_QUEUE_NUM
:
460 case VHOST_USER_NET_SET_MTU
:
467 /* most non-init callers ignore the error */
468 static int vhost_user_write(struct vhost_dev
*dev
, VhostUserMsg
*msg
,
469 int *fds
, int fd_num
)
471 struct vhost_user
*u
= dev
->opaque
;
472 CharBackend
*chr
= u
->user
->chr
;
473 int ret
, size
= VHOST_USER_HDR_SIZE
+ msg
->hdr
.size
;
476 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
477 * we just need send it once in the first time. For later such
478 * request, we just ignore it.
480 if (vhost_user_one_time_request(msg
->hdr
.request
) && dev
->vq_index
!= 0) {
481 msg
->hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
485 if (qemu_chr_fe_set_msgfds(chr
, fds
, fd_num
) < 0) {
486 error_report("Failed to set msg fds.");
490 ret
= qemu_chr_fe_write_all(chr
, (const uint8_t *) msg
, size
);
492 int saved_errno
= errno
;
493 error_report("Failed to write msg."
494 " Wrote %d instead of %d.", ret
, size
);
495 return ret
< 0 ? -saved_errno
: -EIO
;
498 trace_vhost_user_write(msg
->hdr
.request
, msg
->hdr
.flags
);
503 int vhost_user_gpu_set_socket(struct vhost_dev
*dev
, int fd
)
506 .hdr
.request
= VHOST_USER_GPU_SET_SOCKET
,
507 .hdr
.flags
= VHOST_USER_VERSION
,
510 return vhost_user_write(dev
, &msg
, &fd
, 1);
513 static int vhost_user_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
514 struct vhost_log
*log
)
516 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
518 bool shmfd
= virtio_has_feature(dev
->protocol_features
,
519 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
522 .hdr
.request
= VHOST_USER_SET_LOG_BASE
,
523 .hdr
.flags
= VHOST_USER_VERSION
,
524 .payload
.log
.mmap_size
= log
->size
* sizeof(*(log
->log
)),
525 .payload
.log
.mmap_offset
= 0,
526 .hdr
.size
= sizeof(msg
.payload
.log
),
529 if (shmfd
&& log
->fd
!= -1) {
530 fds
[fd_num
++] = log
->fd
;
533 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
540 ret
= vhost_user_read(dev
, &msg
);
545 if (msg
.hdr
.request
!= VHOST_USER_SET_LOG_BASE
) {
546 error_report("Received unexpected msg type. "
547 "Expected %d received %d",
548 VHOST_USER_SET_LOG_BASE
, msg
.hdr
.request
);
556 static MemoryRegion
*vhost_user_get_mr_data(uint64_t addr
, ram_addr_t
*offset
,
561 assert((uintptr_t)addr
== addr
);
562 mr
= memory_region_from_host((void *)(uintptr_t)addr
, offset
);
563 *fd
= memory_region_get_fd(mr
);
568 static void vhost_user_fill_msg_region(VhostUserMemoryRegion
*dst
,
569 struct vhost_memory_region
*src
,
570 uint64_t mmap_offset
)
572 assert(src
!= NULL
&& dst
!= NULL
);
573 dst
->userspace_addr
= src
->userspace_addr
;
574 dst
->memory_size
= src
->memory_size
;
575 dst
->guest_phys_addr
= src
->guest_phys_addr
;
576 dst
->mmap_offset
= mmap_offset
;
579 static int vhost_user_fill_set_mem_table_msg(struct vhost_user
*u
,
580 struct vhost_dev
*dev
,
582 int *fds
, size_t *fd_num
,
583 bool track_ramblocks
)
588 struct vhost_memory_region
*reg
;
589 VhostUserMemoryRegion region_buffer
;
591 msg
->hdr
.request
= VHOST_USER_SET_MEM_TABLE
;
593 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
594 reg
= dev
->mem
->regions
+ i
;
596 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
598 if (track_ramblocks
) {
599 assert(*fd_num
< VHOST_MEMORY_BASELINE_NREGIONS
);
600 trace_vhost_user_set_mem_table_withfd(*fd_num
, mr
->name
,
602 reg
->guest_phys_addr
,
605 u
->region_rb_offset
[i
] = offset
;
606 u
->region_rb
[i
] = mr
->ram_block
;
607 } else if (*fd_num
== VHOST_MEMORY_BASELINE_NREGIONS
) {
608 error_report("Failed preparing vhost-user memory table msg");
611 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
612 msg
->payload
.memory
.regions
[*fd_num
] = region_buffer
;
613 fds
[(*fd_num
)++] = fd
;
614 } else if (track_ramblocks
) {
615 u
->region_rb_offset
[i
] = 0;
616 u
->region_rb
[i
] = NULL
;
620 msg
->payload
.memory
.nregions
= *fd_num
;
623 error_report("Failed initializing vhost-user memory map, "
624 "consider using -object memory-backend-file share=on");
628 msg
->hdr
.size
= sizeof(msg
->payload
.memory
.nregions
);
629 msg
->hdr
.size
+= sizeof(msg
->payload
.memory
.padding
);
630 msg
->hdr
.size
+= *fd_num
* sizeof(VhostUserMemoryRegion
);
635 static inline bool reg_equal(struct vhost_memory_region
*shadow_reg
,
636 struct vhost_memory_region
*vdev_reg
)
638 return shadow_reg
->guest_phys_addr
== vdev_reg
->guest_phys_addr
&&
639 shadow_reg
->userspace_addr
== vdev_reg
->userspace_addr
&&
640 shadow_reg
->memory_size
== vdev_reg
->memory_size
;
643 static void scrub_shadow_regions(struct vhost_dev
*dev
,
644 struct scrub_regions
*add_reg
,
646 struct scrub_regions
*rem_reg
,
647 int *nr_rem_reg
, uint64_t *shadow_pcb
,
648 bool track_ramblocks
)
650 struct vhost_user
*u
= dev
->opaque
;
651 bool found
[VHOST_USER_MAX_RAM_SLOTS
] = {};
652 struct vhost_memory_region
*reg
, *shadow_reg
;
653 int i
, j
, fd
, add_idx
= 0, rm_idx
= 0, fd_num
= 0;
659 * Find memory regions present in our shadow state which are not in
660 * the device's current memory state.
662 * Mark regions in both the shadow and device state as "found".
664 for (i
= 0; i
< u
->num_shadow_regions
; i
++) {
665 shadow_reg
= &u
->shadow_regions
[i
];
668 for (j
= 0; j
< dev
->mem
->nregions
; j
++) {
669 reg
= &dev
->mem
->regions
[j
];
671 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
673 if (reg_equal(shadow_reg
, reg
)) {
676 if (track_ramblocks
) {
678 * Reset postcopy client bases, region_rb, and
679 * region_rb_offset in case regions are removed.
682 u
->region_rb_offset
[j
] = offset
;
683 u
->region_rb
[j
] = mr
->ram_block
;
684 shadow_pcb
[j
] = u
->postcopy_client_bases
[i
];
686 u
->region_rb_offset
[j
] = 0;
687 u
->region_rb
[j
] = NULL
;
695 * If the region was not found in the current device memory state
696 * create an entry for it in the removed list.
699 rem_reg
[rm_idx
].region
= shadow_reg
;
700 rem_reg
[rm_idx
++].reg_idx
= i
;
705 * For regions not marked "found", create entries in the added list.
707 * Note their indexes in the device memory state and the indexes of their
710 for (i
= 0; i
< dev
->mem
->nregions
; i
++) {
711 reg
= &dev
->mem
->regions
[i
];
712 vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
718 * If the region was in both the shadow and device state we don't
719 * need to send a VHOST_USER_ADD_MEM_REG message for it.
725 add_reg
[add_idx
].region
= reg
;
726 add_reg
[add_idx
].reg_idx
= i
;
727 add_reg
[add_idx
++].fd_idx
= fd_num
;
729 *nr_rem_reg
= rm_idx
;
730 *nr_add_reg
= add_idx
;
735 static int send_remove_regions(struct vhost_dev
*dev
,
736 struct scrub_regions
*remove_reg
,
737 int nr_rem_reg
, VhostUserMsg
*msg
,
738 bool reply_supported
)
740 struct vhost_user
*u
= dev
->opaque
;
741 struct vhost_memory_region
*shadow_reg
;
742 int i
, fd
, shadow_reg_idx
, ret
;
744 VhostUserMemoryRegion region_buffer
;
747 * The regions in remove_reg appear in the same order they do in the
748 * shadow table. Therefore we can minimize memory copies by iterating
749 * through remove_reg backwards.
751 for (i
= nr_rem_reg
- 1; i
>= 0; i
--) {
752 shadow_reg
= remove_reg
[i
].region
;
753 shadow_reg_idx
= remove_reg
[i
].reg_idx
;
755 vhost_user_get_mr_data(shadow_reg
->userspace_addr
, &offset
, &fd
);
758 msg
->hdr
.request
= VHOST_USER_REM_MEM_REG
;
759 vhost_user_fill_msg_region(®ion_buffer
, shadow_reg
, 0);
760 msg
->payload
.mem_reg
.region
= region_buffer
;
762 ret
= vhost_user_write(dev
, msg
, NULL
, 0);
767 if (reply_supported
) {
768 ret
= process_message_reply(dev
, msg
);
776 * At this point we know the backend has unmapped the region. It is now
777 * safe to remove it from the shadow table.
779 memmove(&u
->shadow_regions
[shadow_reg_idx
],
780 &u
->shadow_regions
[shadow_reg_idx
+ 1],
781 sizeof(struct vhost_memory_region
) *
782 (u
->num_shadow_regions
- shadow_reg_idx
- 1));
783 u
->num_shadow_regions
--;
789 static int send_add_regions(struct vhost_dev
*dev
,
790 struct scrub_regions
*add_reg
, int nr_add_reg
,
791 VhostUserMsg
*msg
, uint64_t *shadow_pcb
,
792 bool reply_supported
, bool track_ramblocks
)
794 struct vhost_user
*u
= dev
->opaque
;
795 int i
, fd
, ret
, reg_idx
, reg_fd_idx
;
796 struct vhost_memory_region
*reg
;
799 VhostUserMsg msg_reply
;
800 VhostUserMemoryRegion region_buffer
;
802 for (i
= 0; i
< nr_add_reg
; i
++) {
803 reg
= add_reg
[i
].region
;
804 reg_idx
= add_reg
[i
].reg_idx
;
805 reg_fd_idx
= add_reg
[i
].fd_idx
;
807 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
810 if (track_ramblocks
) {
811 trace_vhost_user_set_mem_table_withfd(reg_fd_idx
, mr
->name
,
813 reg
->guest_phys_addr
,
816 u
->region_rb_offset
[reg_idx
] = offset
;
817 u
->region_rb
[reg_idx
] = mr
->ram_block
;
819 msg
->hdr
.request
= VHOST_USER_ADD_MEM_REG
;
820 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
821 msg
->payload
.mem_reg
.region
= region_buffer
;
823 ret
= vhost_user_write(dev
, msg
, &fd
, 1);
828 if (track_ramblocks
) {
831 ret
= vhost_user_read(dev
, &msg_reply
);
836 reply_gpa
= msg_reply
.payload
.mem_reg
.region
.guest_phys_addr
;
838 if (msg_reply
.hdr
.request
!= VHOST_USER_ADD_MEM_REG
) {
839 error_report("%s: Received unexpected msg type."
840 "Expected %d received %d", __func__
,
841 VHOST_USER_ADD_MEM_REG
,
842 msg_reply
.hdr
.request
);
847 * We're using the same structure, just reusing one of the
848 * fields, so it should be the same size.
850 if (msg_reply
.hdr
.size
!= msg
->hdr
.size
) {
851 error_report("%s: Unexpected size for postcopy reply "
852 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
857 /* Get the postcopy client base from the backend's reply. */
858 if (reply_gpa
== dev
->mem
->regions
[reg_idx
].guest_phys_addr
) {
859 shadow_pcb
[reg_idx
] =
860 msg_reply
.payload
.mem_reg
.region
.userspace_addr
;
861 trace_vhost_user_set_mem_table_postcopy(
862 msg_reply
.payload
.mem_reg
.region
.userspace_addr
,
863 msg
->payload
.mem_reg
.region
.userspace_addr
,
864 reg_fd_idx
, reg_idx
);
866 error_report("%s: invalid postcopy reply for region. "
867 "Got guest physical address %" PRIX64
", expected "
868 "%" PRIX64
, __func__
, reply_gpa
,
869 dev
->mem
->regions
[reg_idx
].guest_phys_addr
);
872 } else if (reply_supported
) {
873 ret
= process_message_reply(dev
, msg
);
878 } else if (track_ramblocks
) {
879 u
->region_rb_offset
[reg_idx
] = 0;
880 u
->region_rb
[reg_idx
] = NULL
;
884 * At this point, we know the backend has mapped in the new
885 * region, if the region has a valid file descriptor.
887 * The region should now be added to the shadow table.
889 u
->shadow_regions
[u
->num_shadow_regions
].guest_phys_addr
=
890 reg
->guest_phys_addr
;
891 u
->shadow_regions
[u
->num_shadow_regions
].userspace_addr
=
893 u
->shadow_regions
[u
->num_shadow_regions
].memory_size
=
895 u
->num_shadow_regions
++;
901 static int vhost_user_add_remove_regions(struct vhost_dev
*dev
,
903 bool reply_supported
,
904 bool track_ramblocks
)
906 struct vhost_user
*u
= dev
->opaque
;
907 struct scrub_regions add_reg
[VHOST_USER_MAX_RAM_SLOTS
];
908 struct scrub_regions rem_reg
[VHOST_USER_MAX_RAM_SLOTS
];
909 uint64_t shadow_pcb
[VHOST_USER_MAX_RAM_SLOTS
] = {};
910 int nr_add_reg
, nr_rem_reg
;
913 msg
->hdr
.size
= sizeof(msg
->payload
.mem_reg
);
915 /* Find the regions which need to be removed or added. */
916 scrub_shadow_regions(dev
, add_reg
, &nr_add_reg
, rem_reg
, &nr_rem_reg
,
917 shadow_pcb
, track_ramblocks
);
920 ret
= send_remove_regions(dev
, rem_reg
, nr_rem_reg
, msg
,
928 ret
= send_add_regions(dev
, add_reg
, nr_add_reg
, msg
, shadow_pcb
,
929 reply_supported
, track_ramblocks
);
935 if (track_ramblocks
) {
936 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
937 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
939 * Now we've registered this with the postcopy code, we ack to the
940 * client, because now we're in the position to be able to deal with
941 * any faults it generates.
943 /* TODO: Use this for failure cases as well with a bad value. */
944 msg
->hdr
.size
= sizeof(msg
->payload
.u64
);
945 msg
->payload
.u64
= 0; /* OK */
947 ret
= vhost_user_write(dev
, msg
, NULL
, 0);
956 if (track_ramblocks
) {
957 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
958 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
964 static int vhost_user_set_mem_table_postcopy(struct vhost_dev
*dev
,
965 struct vhost_memory
*mem
,
966 bool reply_supported
,
967 bool config_mem_slots
)
969 struct vhost_user
*u
= dev
->opaque
;
970 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
972 VhostUserMsg msg_reply
;
977 .hdr
.flags
= VHOST_USER_VERSION
,
980 if (u
->region_rb_len
< dev
->mem
->nregions
) {
981 u
->region_rb
= g_renew(RAMBlock
*, u
->region_rb
, dev
->mem
->nregions
);
982 u
->region_rb_offset
= g_renew(ram_addr_t
, u
->region_rb_offset
,
984 memset(&(u
->region_rb
[u
->region_rb_len
]), '\0',
985 sizeof(RAMBlock
*) * (dev
->mem
->nregions
- u
->region_rb_len
));
986 memset(&(u
->region_rb_offset
[u
->region_rb_len
]), '\0',
987 sizeof(ram_addr_t
) * (dev
->mem
->nregions
- u
->region_rb_len
));
988 u
->region_rb_len
= dev
->mem
->nregions
;
991 if (config_mem_slots
) {
992 ret
= vhost_user_add_remove_regions(dev
, &msg
, reply_supported
, true);
997 ret
= vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
1003 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
1008 ret
= vhost_user_read(dev
, &msg_reply
);
1013 if (msg_reply
.hdr
.request
!= VHOST_USER_SET_MEM_TABLE
) {
1014 error_report("%s: Received unexpected msg type."
1015 "Expected %d received %d", __func__
,
1016 VHOST_USER_SET_MEM_TABLE
, msg_reply
.hdr
.request
);
1021 * We're using the same structure, just reusing one of the
1022 * fields, so it should be the same size.
1024 if (msg_reply
.hdr
.size
!= msg
.hdr
.size
) {
1025 error_report("%s: Unexpected size for postcopy reply "
1026 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
1031 memset(u
->postcopy_client_bases
, 0,
1032 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
1035 * They're in the same order as the regions that were sent
1036 * but some of the regions were skipped (above) if they
1039 for (msg_i
= 0, region_i
= 0;
1040 region_i
< dev
->mem
->nregions
;
1042 if (msg_i
< fd_num
&&
1043 msg_reply
.payload
.memory
.regions
[msg_i
].guest_phys_addr
==
1044 dev
->mem
->regions
[region_i
].guest_phys_addr
) {
1045 u
->postcopy_client_bases
[region_i
] =
1046 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
;
1047 trace_vhost_user_set_mem_table_postcopy(
1048 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1049 msg
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1054 if (msg_i
!= fd_num
) {
1055 error_report("%s: postcopy reply not fully consumed "
1057 __func__
, msg_i
, fd_num
);
1062 * Now we've registered this with the postcopy code, we ack to the
1063 * client, because now we're in the position to be able to deal
1064 * with any faults it generates.
1066 /* TODO: Use this for failure cases as well with a bad value. */
1067 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
1068 msg
.payload
.u64
= 0; /* OK */
1069 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1078 static int vhost_user_set_mem_table(struct vhost_dev
*dev
,
1079 struct vhost_memory
*mem
)
1081 struct vhost_user
*u
= dev
->opaque
;
1082 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
1084 bool do_postcopy
= u
->postcopy_listen
&& u
->postcopy_fd
.handler
;
1085 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1086 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1087 bool config_mem_slots
=
1088 virtio_has_feature(dev
->protocol_features
,
1089 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
);
1094 * Postcopy has enough differences that it's best done in it's own
1097 return vhost_user_set_mem_table_postcopy(dev
, mem
, reply_supported
,
1101 VhostUserMsg msg
= {
1102 .hdr
.flags
= VHOST_USER_VERSION
,
1105 if (reply_supported
) {
1106 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1109 if (config_mem_slots
) {
1110 ret
= vhost_user_add_remove_regions(dev
, &msg
, reply_supported
, false);
1115 ret
= vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
1121 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
1126 if (reply_supported
) {
1127 return process_message_reply(dev
, &msg
);
1134 static int vhost_user_set_vring_endian(struct vhost_dev
*dev
,
1135 struct vhost_vring_state
*ring
)
1137 bool cross_endian
= virtio_has_feature(dev
->protocol_features
,
1138 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
);
1139 VhostUserMsg msg
= {
1140 .hdr
.request
= VHOST_USER_SET_VRING_ENDIAN
,
1141 .hdr
.flags
= VHOST_USER_VERSION
,
1142 .payload
.state
= *ring
,
1143 .hdr
.size
= sizeof(msg
.payload
.state
),
1146 if (!cross_endian
) {
1147 error_report("vhost-user trying to send unhandled ioctl");
1151 return vhost_user_write(dev
, &msg
, NULL
, 0);
1154 static int vhost_set_vring(struct vhost_dev
*dev
,
1155 unsigned long int request
,
1156 struct vhost_vring_state
*ring
)
1158 VhostUserMsg msg
= {
1159 .hdr
.request
= request
,
1160 .hdr
.flags
= VHOST_USER_VERSION
,
1161 .payload
.state
= *ring
,
1162 .hdr
.size
= sizeof(msg
.payload
.state
),
1165 return vhost_user_write(dev
, &msg
, NULL
, 0);
1168 static int vhost_user_set_vring_num(struct vhost_dev
*dev
,
1169 struct vhost_vring_state
*ring
)
1171 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_NUM
, ring
);
1174 static void vhost_user_host_notifier_free(VhostUserHostNotifier
*n
)
1176 assert(n
&& n
->unmap_addr
);
1177 munmap(n
->unmap_addr
, qemu_real_host_page_size());
1178 n
->unmap_addr
= NULL
;
1182 * clean-up function for notifier, will finally free the structure
1185 static void vhost_user_host_notifier_remove(VhostUserHostNotifier
*n
,
1190 virtio_queue_set_host_notifier_mr(vdev
, n
->idx
, &n
->mr
, false);
1192 assert(!n
->unmap_addr
);
1193 n
->unmap_addr
= n
->addr
;
1195 call_rcu(n
, vhost_user_host_notifier_free
, rcu
);
1199 static int vhost_user_set_vring_base(struct vhost_dev
*dev
,
1200 struct vhost_vring_state
*ring
)
1202 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_BASE
, ring
);
1205 static int vhost_user_set_vring_enable(struct vhost_dev
*dev
, int enable
)
1209 if (!virtio_has_feature(dev
->features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
1213 for (i
= 0; i
< dev
->nvqs
; ++i
) {
1215 struct vhost_vring_state state
= {
1216 .index
= dev
->vq_index
+ i
,
1220 ret
= vhost_set_vring(dev
, VHOST_USER_SET_VRING_ENABLE
, &state
);
1223 * Restoring the previous state is likely infeasible, as well as
1224 * proceeding regardless the error, so just bail out and hope for
1225 * the device-level recovery.
1234 static VhostUserHostNotifier
*fetch_notifier(VhostUserState
*u
,
1237 if (idx
>= u
->notifiers
->len
) {
1240 return g_ptr_array_index(u
->notifiers
, idx
);
1243 static int vhost_user_get_vring_base(struct vhost_dev
*dev
,
1244 struct vhost_vring_state
*ring
)
1247 VhostUserMsg msg
= {
1248 .hdr
.request
= VHOST_USER_GET_VRING_BASE
,
1249 .hdr
.flags
= VHOST_USER_VERSION
,
1250 .payload
.state
= *ring
,
1251 .hdr
.size
= sizeof(msg
.payload
.state
),
1253 struct vhost_user
*u
= dev
->opaque
;
1255 VhostUserHostNotifier
*n
= fetch_notifier(u
->user
, ring
->index
);
1257 vhost_user_host_notifier_remove(n
, dev
->vdev
);
1260 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1265 ret
= vhost_user_read(dev
, &msg
);
1270 if (msg
.hdr
.request
!= VHOST_USER_GET_VRING_BASE
) {
1271 error_report("Received unexpected msg type. Expected %d received %d",
1272 VHOST_USER_GET_VRING_BASE
, msg
.hdr
.request
);
1276 if (msg
.hdr
.size
!= sizeof(msg
.payload
.state
)) {
1277 error_report("Received bad msg size.");
1281 *ring
= msg
.payload
.state
;
1286 static int vhost_set_vring_file(struct vhost_dev
*dev
,
1287 VhostUserRequest request
,
1288 struct vhost_vring_file
*file
)
1290 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
1292 VhostUserMsg msg
= {
1293 .hdr
.request
= request
,
1294 .hdr
.flags
= VHOST_USER_VERSION
,
1295 .payload
.u64
= file
->index
& VHOST_USER_VRING_IDX_MASK
,
1296 .hdr
.size
= sizeof(msg
.payload
.u64
),
1299 if (ioeventfd_enabled() && file
->fd
> 0) {
1300 fds
[fd_num
++] = file
->fd
;
1302 msg
.payload
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1305 return vhost_user_write(dev
, &msg
, fds
, fd_num
);
1308 static int vhost_user_set_vring_kick(struct vhost_dev
*dev
,
1309 struct vhost_vring_file
*file
)
1311 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_KICK
, file
);
1314 static int vhost_user_set_vring_call(struct vhost_dev
*dev
,
1315 struct vhost_vring_file
*file
)
1317 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_CALL
, file
);
1320 static int vhost_user_set_vring_err(struct vhost_dev
*dev
,
1321 struct vhost_vring_file
*file
)
1323 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_ERR
, file
);
1326 static int vhost_user_get_u64(struct vhost_dev
*dev
, int request
, uint64_t *u64
)
1329 VhostUserMsg msg
= {
1330 .hdr
.request
= request
,
1331 .hdr
.flags
= VHOST_USER_VERSION
,
1334 if (vhost_user_one_time_request(request
) && dev
->vq_index
!= 0) {
1338 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1343 ret
= vhost_user_read(dev
, &msg
);
1348 if (msg
.hdr
.request
!= request
) {
1349 error_report("Received unexpected msg type. Expected %d received %d",
1350 request
, msg
.hdr
.request
);
1354 if (msg
.hdr
.size
!= sizeof(msg
.payload
.u64
)) {
1355 error_report("Received bad msg size.");
1359 *u64
= msg
.payload
.u64
;
1364 static int vhost_user_get_features(struct vhost_dev
*dev
, uint64_t *features
)
1366 if (vhost_user_get_u64(dev
, VHOST_USER_GET_FEATURES
, features
) < 0) {
1373 static int enforce_reply(struct vhost_dev
*dev
,
1374 const VhostUserMsg
*msg
)
1378 if (msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1379 return process_message_reply(dev
, msg
);
1383 * We need to wait for a reply but the backend does not
1384 * support replies for the command we just sent.
1385 * Send VHOST_USER_GET_FEATURES which makes all backends
1388 return vhost_user_get_features(dev
, &dummy
);
1391 static int vhost_user_set_vring_addr(struct vhost_dev
*dev
,
1392 struct vhost_vring_addr
*addr
)
1395 VhostUserMsg msg
= {
1396 .hdr
.request
= VHOST_USER_SET_VRING_ADDR
,
1397 .hdr
.flags
= VHOST_USER_VERSION
,
1398 .payload
.addr
= *addr
,
1399 .hdr
.size
= sizeof(msg
.payload
.addr
),
1402 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1403 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1406 * wait for a reply if logging is enabled to make sure
1407 * backend is actually logging changes
1409 bool wait_for_reply
= addr
->flags
& (1 << VHOST_VRING_F_LOG
);
1411 if (reply_supported
&& wait_for_reply
) {
1412 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1415 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1420 if (wait_for_reply
) {
1421 return enforce_reply(dev
, &msg
);
1427 static int vhost_user_set_u64(struct vhost_dev
*dev
, int request
, uint64_t u64
,
1428 bool wait_for_reply
)
1430 VhostUserMsg msg
= {
1431 .hdr
.request
= request
,
1432 .hdr
.flags
= VHOST_USER_VERSION
,
1434 .hdr
.size
= sizeof(msg
.payload
.u64
),
1438 if (wait_for_reply
) {
1439 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1440 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1441 if (reply_supported
) {
1442 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1446 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1451 if (wait_for_reply
) {
1452 return enforce_reply(dev
, &msg
);
1458 static int vhost_user_set_status(struct vhost_dev
*dev
, uint8_t status
)
1460 return vhost_user_set_u64(dev
, VHOST_USER_SET_STATUS
, status
, false);
1463 static int vhost_user_get_status(struct vhost_dev
*dev
, uint8_t *status
)
1468 ret
= vhost_user_get_u64(dev
, VHOST_USER_GET_STATUS
, &value
);
1477 static int vhost_user_add_status(struct vhost_dev
*dev
, uint8_t status
)
1482 ret
= vhost_user_get_status(dev
, &s
);
1487 if ((s
& status
) == status
) {
1492 return vhost_user_set_status(dev
, s
);
1495 static int vhost_user_set_features(struct vhost_dev
*dev
,
1499 * wait for a reply if logging is enabled to make sure
1500 * backend is actually logging changes
1502 bool log_enabled
= features
& (0x1ULL
<< VHOST_F_LOG_ALL
);
1506 * We need to include any extra backend only feature bits that
1507 * might be needed by our device. Currently this includes the
1508 * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
1511 ret
= vhost_user_set_u64(dev
, VHOST_USER_SET_FEATURES
,
1512 features
| dev
->backend_features
,
1515 if (virtio_has_feature(dev
->protocol_features
,
1516 VHOST_USER_PROTOCOL_F_STATUS
)) {
1518 return vhost_user_add_status(dev
, VIRTIO_CONFIG_S_FEATURES_OK
);
1525 static int vhost_user_set_protocol_features(struct vhost_dev
*dev
,
1528 return vhost_user_set_u64(dev
, VHOST_USER_SET_PROTOCOL_FEATURES
, features
,
1532 static int vhost_user_set_owner(struct vhost_dev
*dev
)
1534 VhostUserMsg msg
= {
1535 .hdr
.request
= VHOST_USER_SET_OWNER
,
1536 .hdr
.flags
= VHOST_USER_VERSION
,
1539 return vhost_user_write(dev
, &msg
, NULL
, 0);
1542 static int vhost_user_get_max_memslots(struct vhost_dev
*dev
,
1543 uint64_t *max_memslots
)
1545 uint64_t backend_max_memslots
;
1548 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_MAX_MEM_SLOTS
,
1549 &backend_max_memslots
);
1554 *max_memslots
= backend_max_memslots
;
1559 static int vhost_user_reset_device(struct vhost_dev
*dev
)
1561 VhostUserMsg msg
= {
1562 .hdr
.flags
= VHOST_USER_VERSION
,
1565 msg
.hdr
.request
= virtio_has_feature(dev
->protocol_features
,
1566 VHOST_USER_PROTOCOL_F_RESET_DEVICE
)
1567 ? VHOST_USER_RESET_DEVICE
1568 : VHOST_USER_RESET_OWNER
;
1570 return vhost_user_write(dev
, &msg
, NULL
, 0);
1573 static int vhost_user_slave_handle_config_change(struct vhost_dev
*dev
)
1575 if (!dev
->config_ops
|| !dev
->config_ops
->vhost_dev_config_notifier
) {
1579 return dev
->config_ops
->vhost_dev_config_notifier(dev
);
1583 * Fetch or create the notifier for a given idx. Newly created
1584 * notifiers are added to the pointer array that tracks them.
1586 static VhostUserHostNotifier
*fetch_or_create_notifier(VhostUserState
*u
,
1589 VhostUserHostNotifier
*n
= NULL
;
1590 if (idx
>= u
->notifiers
->len
) {
1591 g_ptr_array_set_size(u
->notifiers
, idx
+ 1);
1594 n
= g_ptr_array_index(u
->notifiers
, idx
);
1597 * In case notification arrive out-of-order,
1598 * make room for current index.
1600 g_ptr_array_remove_index(u
->notifiers
, idx
);
1601 n
= g_new0(VhostUserHostNotifier
, 1);
1603 g_ptr_array_insert(u
->notifiers
, idx
, n
);
1604 trace_vhost_user_create_notifier(idx
, n
);
1610 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev
*dev
,
1611 VhostUserVringArea
*area
,
1614 int queue_idx
= area
->u64
& VHOST_USER_VRING_IDX_MASK
;
1615 size_t page_size
= qemu_real_host_page_size();
1616 struct vhost_user
*u
= dev
->opaque
;
1617 VhostUserState
*user
= u
->user
;
1618 VirtIODevice
*vdev
= dev
->vdev
;
1619 VhostUserHostNotifier
*n
;
1623 if (!virtio_has_feature(dev
->protocol_features
,
1624 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
) ||
1625 vdev
== NULL
|| queue_idx
>= virtio_get_num_queues(vdev
)) {
1630 * Fetch notifier and invalidate any old data before setting up
1631 * new mapped address.
1633 n
= fetch_or_create_notifier(user
, queue_idx
);
1634 vhost_user_host_notifier_remove(n
, vdev
);
1636 if (area
->u64
& VHOST_USER_VRING_NOFD_MASK
) {
1641 if (area
->size
!= page_size
) {
1645 addr
= mmap(NULL
, page_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1647 if (addr
== MAP_FAILED
) {
1651 name
= g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1653 if (!n
->mr
.ram
) { /* Don't init again after suspend. */
1654 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
1657 n
->mr
.ram_block
->host
= addr
;
1661 if (virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, true)) {
1662 object_unparent(OBJECT(&n
->mr
));
1663 munmap(addr
, page_size
);
1672 static void close_slave_channel(struct vhost_user
*u
)
1674 g_source_destroy(u
->slave_src
);
1675 g_source_unref(u
->slave_src
);
1676 u
->slave_src
= NULL
;
1677 object_unref(OBJECT(u
->slave_ioc
));
1678 u
->slave_ioc
= NULL
;
1681 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
1684 struct vhost_dev
*dev
= opaque
;
1685 struct vhost_user
*u
= dev
->opaque
;
1686 VhostUserHeader hdr
= { 0, };
1687 VhostUserPayload payload
= { 0, };
1688 Error
*local_err
= NULL
;
1689 gboolean rc
= G_SOURCE_CONTINUE
;
1692 g_autofree
int *fd
= NULL
;
1697 iov
.iov_base
= &hdr
;
1698 iov
.iov_len
= VHOST_USER_HDR_SIZE
;
1700 if (qio_channel_readv_full_all(ioc
, &iov
, 1, &fd
, &fdsize
, &local_err
)) {
1701 error_report_err(local_err
);
1705 if (hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
1706 error_report("Failed to read msg header."
1707 " Size %d exceeds the maximum %zu.", hdr
.size
,
1708 VHOST_USER_PAYLOAD_SIZE
);
1713 if (qio_channel_read_all(ioc
, (char *) &payload
, hdr
.size
, &local_err
)) {
1714 error_report_err(local_err
);
1718 switch (hdr
.request
) {
1719 case VHOST_USER_SLAVE_IOTLB_MSG
:
1720 ret
= vhost_backend_handle_iotlb_msg(dev
, &payload
.iotlb
);
1722 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
:
1723 ret
= vhost_user_slave_handle_config_change(dev
);
1725 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
:
1726 ret
= vhost_user_slave_handle_vring_host_notifier(dev
, &payload
.area
,
1730 error_report("Received unexpected msg type: %d.", hdr
.request
);
1735 * REPLY_ACK feature handling. Other reply types has to be managed
1736 * directly in their request handlers.
1738 if (hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1739 struct iovec iovec
[2];
1742 hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
1743 hdr
.flags
|= VHOST_USER_REPLY_MASK
;
1745 payload
.u64
= !!ret
;
1746 hdr
.size
= sizeof(payload
.u64
);
1748 iovec
[0].iov_base
= &hdr
;
1749 iovec
[0].iov_len
= VHOST_USER_HDR_SIZE
;
1750 iovec
[1].iov_base
= &payload
;
1751 iovec
[1].iov_len
= hdr
.size
;
1753 if (qio_channel_writev_all(ioc
, iovec
, ARRAY_SIZE(iovec
), &local_err
)) {
1754 error_report_err(local_err
);
1762 close_slave_channel(u
);
1763 rc
= G_SOURCE_REMOVE
;
1767 for (i
= 0; i
< fdsize
; i
++) {
1774 static int vhost_setup_slave_channel(struct vhost_dev
*dev
)
1776 VhostUserMsg msg
= {
1777 .hdr
.request
= VHOST_USER_SET_SLAVE_REQ_FD
,
1778 .hdr
.flags
= VHOST_USER_VERSION
,
1780 struct vhost_user
*u
= dev
->opaque
;
1782 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1783 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1784 Error
*local_err
= NULL
;
1787 if (!virtio_has_feature(dev
->protocol_features
,
1788 VHOST_USER_PROTOCOL_F_SLAVE_REQ
)) {
1792 if (qemu_socketpair(PF_UNIX
, SOCK_STREAM
, 0, sv
) == -1) {
1793 int saved_errno
= errno
;
1794 error_report("socketpair() failed");
1795 return -saved_errno
;
1798 ioc
= QIO_CHANNEL(qio_channel_socket_new_fd(sv
[0], &local_err
));
1800 error_report_err(local_err
);
1801 return -ECONNREFUSED
;
1804 slave_update_read_handler(dev
, NULL
);
1806 if (reply_supported
) {
1807 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1810 ret
= vhost_user_write(dev
, &msg
, &sv
[1], 1);
1815 if (reply_supported
) {
1816 ret
= process_message_reply(dev
, &msg
);
1822 close_slave_channel(u
);
1830 * Called back from the postcopy fault thread when a fault is received on our
1832 * TODO: This is Linux specific
1834 static int vhost_user_postcopy_fault_handler(struct PostCopyFD
*pcfd
,
1837 struct vhost_dev
*dev
= pcfd
->data
;
1838 struct vhost_user
*u
= dev
->opaque
;
1839 struct uffd_msg
*msg
= ufd
;
1840 uint64_t faultaddr
= msg
->arg
.pagefault
.address
;
1841 RAMBlock
*rb
= NULL
;
1845 trace_vhost_user_postcopy_fault_handler(pcfd
->idstr
, faultaddr
,
1846 dev
->mem
->nregions
);
1847 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1848 trace_vhost_user_postcopy_fault_handler_loop(i
,
1849 u
->postcopy_client_bases
[i
], dev
->mem
->regions
[i
].memory_size
);
1850 if (faultaddr
>= u
->postcopy_client_bases
[i
]) {
1851 /* Ofset of the fault address in the vhost region */
1852 uint64_t region_offset
= faultaddr
- u
->postcopy_client_bases
[i
];
1853 if (region_offset
< dev
->mem
->regions
[i
].memory_size
) {
1854 rb_offset
= region_offset
+ u
->region_rb_offset
[i
];
1855 trace_vhost_user_postcopy_fault_handler_found(i
,
1856 region_offset
, rb_offset
);
1857 rb
= u
->region_rb
[i
];
1858 return postcopy_request_shared_page(pcfd
, rb
, faultaddr
,
1863 error_report("%s: Failed to find region for fault %" PRIx64
,
1864 __func__
, faultaddr
);
1868 static int vhost_user_postcopy_waker(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
1871 struct vhost_dev
*dev
= pcfd
->data
;
1872 struct vhost_user
*u
= dev
->opaque
;
1875 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb
), offset
);
1880 /* Translate the offset into an address in the clients address space */
1881 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1882 if (u
->region_rb
[i
] == rb
&&
1883 offset
>= u
->region_rb_offset
[i
] &&
1884 offset
< (u
->region_rb_offset
[i
] +
1885 dev
->mem
->regions
[i
].memory_size
)) {
1886 uint64_t client_addr
= (offset
- u
->region_rb_offset
[i
]) +
1887 u
->postcopy_client_bases
[i
];
1888 trace_vhost_user_postcopy_waker_found(client_addr
);
1889 return postcopy_wake_shared(pcfd
, client_addr
, rb
);
1893 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb
), offset
);
1899 * Called at the start of an inbound postcopy on reception of the
1902 static int vhost_user_postcopy_advise(struct vhost_dev
*dev
, Error
**errp
)
1905 struct vhost_user
*u
= dev
->opaque
;
1906 CharBackend
*chr
= u
->user
->chr
;
1909 VhostUserMsg msg
= {
1910 .hdr
.request
= VHOST_USER_POSTCOPY_ADVISE
,
1911 .hdr
.flags
= VHOST_USER_VERSION
,
1914 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1916 error_setg(errp
, "Failed to send postcopy_advise to vhost");
1920 ret
= vhost_user_read(dev
, &msg
);
1922 error_setg(errp
, "Failed to get postcopy_advise reply from vhost");
1926 if (msg
.hdr
.request
!= VHOST_USER_POSTCOPY_ADVISE
) {
1927 error_setg(errp
, "Unexpected msg type. Expected %d received %d",
1928 VHOST_USER_POSTCOPY_ADVISE
, msg
.hdr
.request
);
1933 error_setg(errp
, "Received bad msg size.");
1936 ufd
= qemu_chr_fe_get_msgfd(chr
);
1938 error_setg(errp
, "%s: Failed to get ufd", __func__
);
1941 qemu_socket_set_nonblock(ufd
);
1943 /* register ufd with userfault thread */
1944 u
->postcopy_fd
.fd
= ufd
;
1945 u
->postcopy_fd
.data
= dev
;
1946 u
->postcopy_fd
.handler
= vhost_user_postcopy_fault_handler
;
1947 u
->postcopy_fd
.waker
= vhost_user_postcopy_waker
;
1948 u
->postcopy_fd
.idstr
= "vhost-user"; /* Need to find unique name */
1949 postcopy_register_shared_ufd(&u
->postcopy_fd
);
1952 error_setg(errp
, "Postcopy not supported on non-Linux systems");
1958 * Called at the switch to postcopy on reception of the 'listen' command.
1960 static int vhost_user_postcopy_listen(struct vhost_dev
*dev
, Error
**errp
)
1962 struct vhost_user
*u
= dev
->opaque
;
1964 VhostUserMsg msg
= {
1965 .hdr
.request
= VHOST_USER_POSTCOPY_LISTEN
,
1966 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1968 u
->postcopy_listen
= true;
1970 trace_vhost_user_postcopy_listen();
1972 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1974 error_setg(errp
, "Failed to send postcopy_listen to vhost");
1978 ret
= process_message_reply(dev
, &msg
);
1980 error_setg(errp
, "Failed to receive reply to postcopy_listen");
1988 * Called at the end of postcopy
1990 static int vhost_user_postcopy_end(struct vhost_dev
*dev
, Error
**errp
)
1992 VhostUserMsg msg
= {
1993 .hdr
.request
= VHOST_USER_POSTCOPY_END
,
1994 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1997 struct vhost_user
*u
= dev
->opaque
;
1999 trace_vhost_user_postcopy_end_entry();
2001 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2003 error_setg(errp
, "Failed to send postcopy_end to vhost");
2007 ret
= process_message_reply(dev
, &msg
);
2009 error_setg(errp
, "Failed to receive reply to postcopy_end");
2012 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
2013 close(u
->postcopy_fd
.fd
);
2014 u
->postcopy_fd
.handler
= NULL
;
2016 trace_vhost_user_postcopy_end_exit();
2021 static int vhost_user_postcopy_notifier(NotifierWithReturn
*notifier
,
2024 struct PostcopyNotifyData
*pnd
= opaque
;
2025 struct vhost_user
*u
= container_of(notifier
, struct vhost_user
,
2027 struct vhost_dev
*dev
= u
->dev
;
2029 switch (pnd
->reason
) {
2030 case POSTCOPY_NOTIFY_PROBE
:
2031 if (!virtio_has_feature(dev
->protocol_features
,
2032 VHOST_USER_PROTOCOL_F_PAGEFAULT
)) {
2033 /* TODO: Get the device name into this error somehow */
2034 error_setg(pnd
->errp
,
2035 "vhost-user backend not capable of postcopy");
2040 case POSTCOPY_NOTIFY_INBOUND_ADVISE
:
2041 return vhost_user_postcopy_advise(dev
, pnd
->errp
);
2043 case POSTCOPY_NOTIFY_INBOUND_LISTEN
:
2044 return vhost_user_postcopy_listen(dev
, pnd
->errp
);
2046 case POSTCOPY_NOTIFY_INBOUND_END
:
2047 return vhost_user_postcopy_end(dev
, pnd
->errp
);
2050 /* We ignore notifications we don't know */
2057 static int vhost_user_backend_init(struct vhost_dev
*dev
, void *opaque
,
2060 uint64_t features
, ram_slots
;
2061 struct vhost_user
*u
;
2062 VhostUserState
*vus
= (VhostUserState
*) opaque
;
2065 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2067 u
= g_new0(struct vhost_user
, 1);
2072 err
= vhost_user_get_features(dev
, &features
);
2074 error_setg_errno(errp
, -err
, "vhost_backend_init failed");
2078 if (virtio_has_feature(features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
2079 bool supports_f_config
= vus
->supports_config
||
2080 (dev
->config_ops
&& dev
->config_ops
->vhost_dev_config_notifier
);
2081 uint64_t protocol_features
;
2083 dev
->backend_features
|= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
2085 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_PROTOCOL_FEATURES
,
2086 &protocol_features
);
2088 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2093 * We will use all the protocol features we support - although
2094 * we suppress F_CONFIG if we know QEMUs internal code can not support
2097 protocol_features
&= VHOST_USER_PROTOCOL_FEATURE_MASK
;
2099 if (supports_f_config
) {
2100 if (!virtio_has_feature(protocol_features
,
2101 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2102 error_setg(errp
, "vhost-user device expecting "
2103 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
2108 if (virtio_has_feature(protocol_features
,
2109 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2110 warn_reportf_err(*errp
, "vhost-user backend supports "
2111 "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
2112 protocol_features
&= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG
);
2116 /* final set of protocol features */
2117 dev
->protocol_features
= protocol_features
;
2118 err
= vhost_user_set_protocol_features(dev
, dev
->protocol_features
);
2120 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2124 /* query the max queues we support if backend supports Multiple Queue */
2125 if (dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_MQ
)) {
2126 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_QUEUE_NUM
,
2129 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2133 dev
->max_queues
= 1;
2136 if (dev
->num_queues
&& dev
->max_queues
< dev
->num_queues
) {
2137 error_setg(errp
, "The maximum number of queues supported by the "
2138 "backend is %" PRIu64
, dev
->max_queues
);
2142 if (virtio_has_feature(features
, VIRTIO_F_IOMMU_PLATFORM
) &&
2143 !(virtio_has_feature(dev
->protocol_features
,
2144 VHOST_USER_PROTOCOL_F_SLAVE_REQ
) &&
2145 virtio_has_feature(dev
->protocol_features
,
2146 VHOST_USER_PROTOCOL_F_REPLY_ACK
))) {
2147 error_setg(errp
, "IOMMU support requires reply-ack and "
2148 "slave-req protocol features.");
2152 /* get max memory regions if backend supports configurable RAM slots */
2153 if (!virtio_has_feature(dev
->protocol_features
,
2154 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
)) {
2155 u
->user
->memory_slots
= VHOST_MEMORY_BASELINE_NREGIONS
;
2157 err
= vhost_user_get_max_memslots(dev
, &ram_slots
);
2159 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2163 if (ram_slots
< u
->user
->memory_slots
) {
2164 error_setg(errp
, "The backend specified a max ram slots limit "
2165 "of %" PRIu64
", when the prior validated limit was "
2166 "%d. This limit should never decrease.", ram_slots
,
2167 u
->user
->memory_slots
);
2171 u
->user
->memory_slots
= MIN(ram_slots
, VHOST_USER_MAX_RAM_SLOTS
);
2175 if (dev
->migration_blocker
== NULL
&&
2176 !virtio_has_feature(dev
->protocol_features
,
2177 VHOST_USER_PROTOCOL_F_LOG_SHMFD
)) {
2178 error_setg(&dev
->migration_blocker
,
2179 "Migration disabled: vhost-user backend lacks "
2180 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2183 if (dev
->vq_index
== 0) {
2184 err
= vhost_setup_slave_channel(dev
);
2186 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2191 u
->postcopy_notifier
.notify
= vhost_user_postcopy_notifier
;
2192 postcopy_add_notifier(&u
->postcopy_notifier
);
2197 static int vhost_user_backend_cleanup(struct vhost_dev
*dev
)
2199 struct vhost_user
*u
;
2201 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2204 if (u
->postcopy_notifier
.notify
) {
2205 postcopy_remove_notifier(&u
->postcopy_notifier
);
2206 u
->postcopy_notifier
.notify
= NULL
;
2208 u
->postcopy_listen
= false;
2209 if (u
->postcopy_fd
.handler
) {
2210 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
2211 close(u
->postcopy_fd
.fd
);
2212 u
->postcopy_fd
.handler
= NULL
;
2215 close_slave_channel(u
);
2217 g_free(u
->region_rb
);
2218 u
->region_rb
= NULL
;
2219 g_free(u
->region_rb_offset
);
2220 u
->region_rb_offset
= NULL
;
2221 u
->region_rb_len
= 0;
2228 static int vhost_user_get_vq_index(struct vhost_dev
*dev
, int idx
)
2230 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
2235 static int vhost_user_memslots_limit(struct vhost_dev
*dev
)
2237 struct vhost_user
*u
= dev
->opaque
;
2239 return u
->user
->memory_slots
;
2242 static bool vhost_user_requires_shm_log(struct vhost_dev
*dev
)
2244 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2246 return virtio_has_feature(dev
->protocol_features
,
2247 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
2250 static int vhost_user_migration_done(struct vhost_dev
*dev
, char* mac_addr
)
2252 VhostUserMsg msg
= { };
2254 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2256 /* If guest supports GUEST_ANNOUNCE do nothing */
2257 if (virtio_has_feature(dev
->acked_features
, VIRTIO_NET_F_GUEST_ANNOUNCE
)) {
2261 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2262 if (virtio_has_feature(dev
->protocol_features
,
2263 VHOST_USER_PROTOCOL_F_RARP
)) {
2264 msg
.hdr
.request
= VHOST_USER_SEND_RARP
;
2265 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2266 memcpy((char *)&msg
.payload
.u64
, mac_addr
, 6);
2267 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2269 return vhost_user_write(dev
, &msg
, NULL
, 0);
2274 static bool vhost_user_can_merge(struct vhost_dev
*dev
,
2275 uint64_t start1
, uint64_t size1
,
2276 uint64_t start2
, uint64_t size2
)
2281 (void)vhost_user_get_mr_data(start1
, &offset
, &mfd
);
2282 (void)vhost_user_get_mr_data(start2
, &offset
, &rfd
);
2287 static int vhost_user_net_set_mtu(struct vhost_dev
*dev
, uint16_t mtu
)
2290 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2291 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2294 if (!(dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU
))) {
2298 msg
.hdr
.request
= VHOST_USER_NET_SET_MTU
;
2299 msg
.payload
.u64
= mtu
;
2300 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2301 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2302 if (reply_supported
) {
2303 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2306 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2311 /* If reply_ack supported, slave has to ack specified MTU is valid */
2312 if (reply_supported
) {
2313 return process_message_reply(dev
, &msg
);
2319 static int vhost_user_send_device_iotlb_msg(struct vhost_dev
*dev
,
2320 struct vhost_iotlb_msg
*imsg
)
2323 VhostUserMsg msg
= {
2324 .hdr
.request
= VHOST_USER_IOTLB_MSG
,
2325 .hdr
.size
= sizeof(msg
.payload
.iotlb
),
2326 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
2327 .payload
.iotlb
= *imsg
,
2330 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2335 return process_message_reply(dev
, &msg
);
2339 static void vhost_user_set_iotlb_callback(struct vhost_dev
*dev
, int enabled
)
2341 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2344 static int vhost_user_get_config(struct vhost_dev
*dev
, uint8_t *config
,
2345 uint32_t config_len
, Error
**errp
)
2348 VhostUserMsg msg
= {
2349 .hdr
.request
= VHOST_USER_GET_CONFIG
,
2350 .hdr
.flags
= VHOST_USER_VERSION
,
2351 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
,
2354 if (!virtio_has_feature(dev
->protocol_features
,
2355 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2356 error_setg(errp
, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2360 assert(config_len
<= VHOST_USER_MAX_CONFIG_SIZE
);
2362 msg
.payload
.config
.offset
= 0;
2363 msg
.payload
.config
.size
= config_len
;
2364 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2366 error_setg_errno(errp
, -ret
, "vhost_get_config failed");
2370 ret
= vhost_user_read(dev
, &msg
);
2372 error_setg_errno(errp
, -ret
, "vhost_get_config failed");
2376 if (msg
.hdr
.request
!= VHOST_USER_GET_CONFIG
) {
2378 "Received unexpected msg type. Expected %d received %d",
2379 VHOST_USER_GET_CONFIG
, msg
.hdr
.request
);
2383 if (msg
.hdr
.size
!= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
) {
2384 error_setg(errp
, "Received bad msg size.");
2388 memcpy(config
, msg
.payload
.config
.region
, config_len
);
2393 static int vhost_user_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
2394 uint32_t offset
, uint32_t size
, uint32_t flags
)
2398 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2399 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2401 VhostUserMsg msg
= {
2402 .hdr
.request
= VHOST_USER_SET_CONFIG
,
2403 .hdr
.flags
= VHOST_USER_VERSION
,
2404 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ size
,
2407 if (!virtio_has_feature(dev
->protocol_features
,
2408 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2412 if (reply_supported
) {
2413 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2416 if (size
> VHOST_USER_MAX_CONFIG_SIZE
) {
2420 msg
.payload
.config
.offset
= offset
,
2421 msg
.payload
.config
.size
= size
,
2422 msg
.payload
.config
.flags
= flags
,
2423 p
= msg
.payload
.config
.region
;
2424 memcpy(p
, data
, size
);
2426 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2431 if (reply_supported
) {
2432 return process_message_reply(dev
, &msg
);
2438 static int vhost_user_crypto_create_session(struct vhost_dev
*dev
,
2440 uint64_t *session_id
)
2443 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2444 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2445 CryptoDevBackendSymSessionInfo
*sess_info
= session_info
;
2446 VhostUserMsg msg
= {
2447 .hdr
.request
= VHOST_USER_CREATE_CRYPTO_SESSION
,
2448 .hdr
.flags
= VHOST_USER_VERSION
,
2449 .hdr
.size
= sizeof(msg
.payload
.session
),
2452 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2454 if (!crypto_session
) {
2455 error_report("vhost-user trying to send unhandled ioctl");
2459 memcpy(&msg
.payload
.session
.session_setup_data
, sess_info
,
2460 sizeof(CryptoDevBackendSymSessionInfo
));
2461 if (sess_info
->key_len
) {
2462 memcpy(&msg
.payload
.session
.key
, sess_info
->cipher_key
,
2463 sess_info
->key_len
);
2465 if (sess_info
->auth_key_len
> 0) {
2466 memcpy(&msg
.payload
.session
.auth_key
, sess_info
->auth_key
,
2467 sess_info
->auth_key_len
);
2469 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2471 error_report("vhost_user_write() return %d, create session failed",
2476 ret
= vhost_user_read(dev
, &msg
);
2478 error_report("vhost_user_read() return %d, create session failed",
2483 if (msg
.hdr
.request
!= VHOST_USER_CREATE_CRYPTO_SESSION
) {
2484 error_report("Received unexpected msg type. Expected %d received %d",
2485 VHOST_USER_CREATE_CRYPTO_SESSION
, msg
.hdr
.request
);
2489 if (msg
.hdr
.size
!= sizeof(msg
.payload
.session
)) {
2490 error_report("Received bad msg size.");
2494 if (msg
.payload
.session
.session_id
< 0) {
2495 error_report("Bad session id: %" PRId64
"",
2496 msg
.payload
.session
.session_id
);
2499 *session_id
= msg
.payload
.session
.session_id
;
2505 vhost_user_crypto_close_session(struct vhost_dev
*dev
, uint64_t session_id
)
2508 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2509 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2510 VhostUserMsg msg
= {
2511 .hdr
.request
= VHOST_USER_CLOSE_CRYPTO_SESSION
,
2512 .hdr
.flags
= VHOST_USER_VERSION
,
2513 .hdr
.size
= sizeof(msg
.payload
.u64
),
2515 msg
.payload
.u64
= session_id
;
2517 if (!crypto_session
) {
2518 error_report("vhost-user trying to send unhandled ioctl");
2522 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2524 error_report("vhost_user_write() return %d, close session failed",
2532 static bool vhost_user_mem_section_filter(struct vhost_dev
*dev
,
2533 MemoryRegionSection
*section
)
2537 result
= memory_region_get_fd(section
->mr
) >= 0;
2542 static int vhost_user_get_inflight_fd(struct vhost_dev
*dev
,
2543 uint16_t queue_size
,
2544 struct vhost_inflight
*inflight
)
2549 struct vhost_user
*u
= dev
->opaque
;
2550 CharBackend
*chr
= u
->user
->chr
;
2551 VhostUserMsg msg
= {
2552 .hdr
.request
= VHOST_USER_GET_INFLIGHT_FD
,
2553 .hdr
.flags
= VHOST_USER_VERSION
,
2554 .payload
.inflight
.num_queues
= dev
->nvqs
,
2555 .payload
.inflight
.queue_size
= queue_size
,
2556 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2559 if (!virtio_has_feature(dev
->protocol_features
,
2560 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2564 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2569 ret
= vhost_user_read(dev
, &msg
);
2574 if (msg
.hdr
.request
!= VHOST_USER_GET_INFLIGHT_FD
) {
2575 error_report("Received unexpected msg type. "
2576 "Expected %d received %d",
2577 VHOST_USER_GET_INFLIGHT_FD
, msg
.hdr
.request
);
2581 if (msg
.hdr
.size
!= sizeof(msg
.payload
.inflight
)) {
2582 error_report("Received bad msg size.");
2586 if (!msg
.payload
.inflight
.mmap_size
) {
2590 fd
= qemu_chr_fe_get_msgfd(chr
);
2592 error_report("Failed to get mem fd");
2596 addr
= mmap(0, msg
.payload
.inflight
.mmap_size
, PROT_READ
| PROT_WRITE
,
2597 MAP_SHARED
, fd
, msg
.payload
.inflight
.mmap_offset
);
2599 if (addr
== MAP_FAILED
) {
2600 error_report("Failed to mmap mem fd");
2605 inflight
->addr
= addr
;
2607 inflight
->size
= msg
.payload
.inflight
.mmap_size
;
2608 inflight
->offset
= msg
.payload
.inflight
.mmap_offset
;
2609 inflight
->queue_size
= queue_size
;
2614 static int vhost_user_set_inflight_fd(struct vhost_dev
*dev
,
2615 struct vhost_inflight
*inflight
)
2617 VhostUserMsg msg
= {
2618 .hdr
.request
= VHOST_USER_SET_INFLIGHT_FD
,
2619 .hdr
.flags
= VHOST_USER_VERSION
,
2620 .payload
.inflight
.mmap_size
= inflight
->size
,
2621 .payload
.inflight
.mmap_offset
= inflight
->offset
,
2622 .payload
.inflight
.num_queues
= dev
->nvqs
,
2623 .payload
.inflight
.queue_size
= inflight
->queue_size
,
2624 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2627 if (!virtio_has_feature(dev
->protocol_features
,
2628 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2632 return vhost_user_write(dev
, &msg
, &inflight
->fd
, 1);
2635 static void vhost_user_state_destroy(gpointer data
)
2637 VhostUserHostNotifier
*n
= (VhostUserHostNotifier
*) data
;
2639 vhost_user_host_notifier_remove(n
, NULL
);
2640 object_unparent(OBJECT(&n
->mr
));
2642 * We can't free until vhost_user_host_notifier_remove has
2643 * done it's thing so schedule the free with RCU.
2649 bool vhost_user_init(VhostUserState
*user
, CharBackend
*chr
, Error
**errp
)
2652 error_setg(errp
, "Cannot initialize vhost-user state");
2656 user
->memory_slots
= 0;
2657 user
->notifiers
= g_ptr_array_new_full(VIRTIO_QUEUE_MAX
/ 4,
2658 &vhost_user_state_destroy
);
2662 void vhost_user_cleanup(VhostUserState
*user
)
2667 memory_region_transaction_begin();
2668 user
->notifiers
= (GPtrArray
*) g_ptr_array_free(user
->notifiers
, true);
2669 memory_region_transaction_commit();
2673 static int vhost_user_dev_start(struct vhost_dev
*dev
, bool started
)
2675 if (!virtio_has_feature(dev
->protocol_features
,
2676 VHOST_USER_PROTOCOL_F_STATUS
)) {
2680 /* Set device status only for last queue pair */
2681 if (dev
->vq_index
+ dev
->nvqs
!= dev
->vq_index_end
) {
2686 return vhost_user_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
2687 VIRTIO_CONFIG_S_DRIVER
|
2688 VIRTIO_CONFIG_S_DRIVER_OK
);
2690 return vhost_user_set_status(dev
, 0);
2694 const VhostOps user_ops
= {
2695 .backend_type
= VHOST_BACKEND_TYPE_USER
,
2696 .vhost_backend_init
= vhost_user_backend_init
,
2697 .vhost_backend_cleanup
= vhost_user_backend_cleanup
,
2698 .vhost_backend_memslots_limit
= vhost_user_memslots_limit
,
2699 .vhost_set_log_base
= vhost_user_set_log_base
,
2700 .vhost_set_mem_table
= vhost_user_set_mem_table
,
2701 .vhost_set_vring_addr
= vhost_user_set_vring_addr
,
2702 .vhost_set_vring_endian
= vhost_user_set_vring_endian
,
2703 .vhost_set_vring_num
= vhost_user_set_vring_num
,
2704 .vhost_set_vring_base
= vhost_user_set_vring_base
,
2705 .vhost_get_vring_base
= vhost_user_get_vring_base
,
2706 .vhost_set_vring_kick
= vhost_user_set_vring_kick
,
2707 .vhost_set_vring_call
= vhost_user_set_vring_call
,
2708 .vhost_set_vring_err
= vhost_user_set_vring_err
,
2709 .vhost_set_features
= vhost_user_set_features
,
2710 .vhost_get_features
= vhost_user_get_features
,
2711 .vhost_set_owner
= vhost_user_set_owner
,
2712 .vhost_reset_device
= vhost_user_reset_device
,
2713 .vhost_get_vq_index
= vhost_user_get_vq_index
,
2714 .vhost_set_vring_enable
= vhost_user_set_vring_enable
,
2715 .vhost_requires_shm_log
= vhost_user_requires_shm_log
,
2716 .vhost_migration_done
= vhost_user_migration_done
,
2717 .vhost_backend_can_merge
= vhost_user_can_merge
,
2718 .vhost_net_set_mtu
= vhost_user_net_set_mtu
,
2719 .vhost_set_iotlb_callback
= vhost_user_set_iotlb_callback
,
2720 .vhost_send_device_iotlb_msg
= vhost_user_send_device_iotlb_msg
,
2721 .vhost_get_config
= vhost_user_get_config
,
2722 .vhost_set_config
= vhost_user_set_config
,
2723 .vhost_crypto_create_session
= vhost_user_crypto_create_session
,
2724 .vhost_crypto_close_session
= vhost_user_crypto_close_session
,
2725 .vhost_backend_mem_section_filter
= vhost_user_mem_section_filter
,
2726 .vhost_get_inflight_fd
= vhost_user_get_inflight_fd
,
2727 .vhost_set_inflight_fd
= vhost_user_set_inflight_fd
,
2728 .vhost_dev_start
= vhost_user_dev_start
,