4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "io/channel-socket.h"
20 #include "sysemu/kvm.h"
21 #include "qemu/error-report.h"
22 #include "qemu/main-loop.h"
23 #include "qemu/sockets.h"
24 #include "sysemu/cryptodev.h"
25 #include "migration/migration.h"
26 #include "migration/postcopy-ram.h"
28 #include "exec/ramblock.h"
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
34 #include "standard-headers/linux/vhost_types.h"
37 #include <linux/userfaultfd.h>
40 #define VHOST_MEMORY_BASELINE_NREGIONS 8
41 #define VHOST_USER_F_PROTOCOL_FEATURES 30
42 #define VHOST_USER_SLAVE_MAX_FDS 8
45 * Set maximum number of RAM slots supported to
46 * the maximum number supported by the target
49 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
50 defined(TARGET_ARM) || defined(TARGET_ARM_64)
51 #include "hw/acpi/acpi.h"
52 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
54 #elif defined(TARGET_PPC) || defined(TARGET_PPC64)
55 #include "hw/ppc/spapr.h"
56 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
59 #define VHOST_USER_MAX_RAM_SLOTS 512
63 * Maximum size of virtio device config space
65 #define VHOST_USER_MAX_CONFIG_SIZE 256
67 enum VhostUserProtocolFeature
{
68 VHOST_USER_PROTOCOL_F_MQ
= 0,
69 VHOST_USER_PROTOCOL_F_LOG_SHMFD
= 1,
70 VHOST_USER_PROTOCOL_F_RARP
= 2,
71 VHOST_USER_PROTOCOL_F_REPLY_ACK
= 3,
72 VHOST_USER_PROTOCOL_F_NET_MTU
= 4,
73 VHOST_USER_PROTOCOL_F_SLAVE_REQ
= 5,
74 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
= 6,
75 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
= 7,
76 VHOST_USER_PROTOCOL_F_PAGEFAULT
= 8,
77 VHOST_USER_PROTOCOL_F_CONFIG
= 9,
78 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
= 10,
79 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
= 11,
80 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
= 12,
81 VHOST_USER_PROTOCOL_F_RESET_DEVICE
= 13,
82 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
83 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
= 15,
84 VHOST_USER_PROTOCOL_F_MAX
87 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
89 typedef enum VhostUserRequest
{
91 VHOST_USER_GET_FEATURES
= 1,
92 VHOST_USER_SET_FEATURES
= 2,
93 VHOST_USER_SET_OWNER
= 3,
94 VHOST_USER_RESET_OWNER
= 4,
95 VHOST_USER_SET_MEM_TABLE
= 5,
96 VHOST_USER_SET_LOG_BASE
= 6,
97 VHOST_USER_SET_LOG_FD
= 7,
98 VHOST_USER_SET_VRING_NUM
= 8,
99 VHOST_USER_SET_VRING_ADDR
= 9,
100 VHOST_USER_SET_VRING_BASE
= 10,
101 VHOST_USER_GET_VRING_BASE
= 11,
102 VHOST_USER_SET_VRING_KICK
= 12,
103 VHOST_USER_SET_VRING_CALL
= 13,
104 VHOST_USER_SET_VRING_ERR
= 14,
105 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
106 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
107 VHOST_USER_GET_QUEUE_NUM
= 17,
108 VHOST_USER_SET_VRING_ENABLE
= 18,
109 VHOST_USER_SEND_RARP
= 19,
110 VHOST_USER_NET_SET_MTU
= 20,
111 VHOST_USER_SET_SLAVE_REQ_FD
= 21,
112 VHOST_USER_IOTLB_MSG
= 22,
113 VHOST_USER_SET_VRING_ENDIAN
= 23,
114 VHOST_USER_GET_CONFIG
= 24,
115 VHOST_USER_SET_CONFIG
= 25,
116 VHOST_USER_CREATE_CRYPTO_SESSION
= 26,
117 VHOST_USER_CLOSE_CRYPTO_SESSION
= 27,
118 VHOST_USER_POSTCOPY_ADVISE
= 28,
119 VHOST_USER_POSTCOPY_LISTEN
= 29,
120 VHOST_USER_POSTCOPY_END
= 30,
121 VHOST_USER_GET_INFLIGHT_FD
= 31,
122 VHOST_USER_SET_INFLIGHT_FD
= 32,
123 VHOST_USER_GPU_SET_SOCKET
= 33,
124 VHOST_USER_RESET_DEVICE
= 34,
125 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
126 VHOST_USER_GET_MAX_MEM_SLOTS
= 36,
127 VHOST_USER_ADD_MEM_REG
= 37,
128 VHOST_USER_REM_MEM_REG
= 38,
132 typedef enum VhostUserSlaveRequest
{
133 VHOST_USER_SLAVE_NONE
= 0,
134 VHOST_USER_SLAVE_IOTLB_MSG
= 1,
135 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
= 2,
136 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
= 3,
138 } VhostUserSlaveRequest
;
140 typedef struct VhostUserMemoryRegion
{
141 uint64_t guest_phys_addr
;
142 uint64_t memory_size
;
143 uint64_t userspace_addr
;
144 uint64_t mmap_offset
;
145 } VhostUserMemoryRegion
;
147 typedef struct VhostUserMemory
{
150 VhostUserMemoryRegion regions
[VHOST_MEMORY_BASELINE_NREGIONS
];
153 typedef struct VhostUserMemRegMsg
{
155 VhostUserMemoryRegion region
;
156 } VhostUserMemRegMsg
;
158 typedef struct VhostUserLog
{
160 uint64_t mmap_offset
;
163 typedef struct VhostUserConfig
{
167 uint8_t region
[VHOST_USER_MAX_CONFIG_SIZE
];
170 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
171 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
173 typedef struct VhostUserCryptoSession
{
174 /* session id for success, -1 on errors */
176 CryptoDevBackendSymSessionInfo session_setup_data
;
177 uint8_t key
[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN
];
178 uint8_t auth_key
[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN
];
179 } VhostUserCryptoSession
;
181 static VhostUserConfig c
__attribute__ ((unused
));
182 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
186 typedef struct VhostUserVringArea
{
190 } VhostUserVringArea
;
192 typedef struct VhostUserInflight
{
194 uint64_t mmap_offset
;
200 VhostUserRequest request
;
202 #define VHOST_USER_VERSION_MASK (0x3)
203 #define VHOST_USER_REPLY_MASK (0x1<<2)
204 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
206 uint32_t size
; /* the following payload size */
207 } QEMU_PACKED VhostUserHeader
;
210 #define VHOST_USER_VRING_IDX_MASK (0xff)
211 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
213 struct vhost_vring_state state
;
214 struct vhost_vring_addr addr
;
215 VhostUserMemory memory
;
216 VhostUserMemRegMsg mem_reg
;
218 struct vhost_iotlb_msg iotlb
;
219 VhostUserConfig config
;
220 VhostUserCryptoSession session
;
221 VhostUserVringArea area
;
222 VhostUserInflight inflight
;
225 typedef struct VhostUserMsg
{
227 VhostUserPayload payload
;
228 } QEMU_PACKED VhostUserMsg
;
230 static VhostUserMsg m
__attribute__ ((unused
));
231 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
233 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
235 /* The version of the protocol we support */
236 #define VHOST_USER_VERSION (0x1)
239 struct vhost_dev
*dev
;
240 /* Shared between vhost devs of the same virtio device */
241 VhostUserState
*user
;
242 QIOChannel
*slave_ioc
;
244 NotifierWithReturn postcopy_notifier
;
245 struct PostCopyFD postcopy_fd
;
246 uint64_t postcopy_client_bases
[VHOST_USER_MAX_RAM_SLOTS
];
247 /* Length of the region_rb and region_rb_offset arrays */
248 size_t region_rb_len
;
249 /* RAMBlock associated with a given region */
250 RAMBlock
**region_rb
;
251 /* The offset from the start of the RAMBlock to the start of the
254 ram_addr_t
*region_rb_offset
;
256 /* True once we've entered postcopy_listen */
257 bool postcopy_listen
;
259 /* Our current regions */
260 int num_shadow_regions
;
261 struct vhost_memory_region shadow_regions
[VHOST_USER_MAX_RAM_SLOTS
];
264 struct scrub_regions
{
265 struct vhost_memory_region
*region
;
270 static bool ioeventfd_enabled(void)
272 return !kvm_enabled() || kvm_eventfds_enabled();
275 static int vhost_user_read_header(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
277 struct vhost_user
*u
= dev
->opaque
;
278 CharBackend
*chr
= u
->user
->chr
;
279 uint8_t *p
= (uint8_t *) msg
;
280 int r
, size
= VHOST_USER_HDR_SIZE
;
282 r
= qemu_chr_fe_read_all(chr
, p
, size
);
284 int saved_errno
= errno
;
285 error_report("Failed to read msg header. Read %d instead of %d."
286 " Original request %d.", r
, size
, msg
->hdr
.request
);
287 return r
< 0 ? -saved_errno
: -EIO
;
290 /* validate received flags */
291 if (msg
->hdr
.flags
!= (VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
)) {
292 error_report("Failed to read msg header."
293 " Flags 0x%x instead of 0x%x.", msg
->hdr
.flags
,
294 VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
);
298 trace_vhost_user_read(msg
->hdr
.request
, msg
->hdr
.flags
);
303 struct vhost_user_read_cb_data
{
304 struct vhost_dev
*dev
;
310 static gboolean
vhost_user_read_cb(void *do_not_use
, GIOCondition condition
,
313 struct vhost_user_read_cb_data
*data
= opaque
;
314 struct vhost_dev
*dev
= data
->dev
;
315 VhostUserMsg
*msg
= data
->msg
;
316 struct vhost_user
*u
= dev
->opaque
;
317 CharBackend
*chr
= u
->user
->chr
;
318 uint8_t *p
= (uint8_t *) msg
;
321 r
= vhost_user_read_header(dev
, msg
);
327 /* validate message size is sane */
328 if (msg
->hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
329 error_report("Failed to read msg header."
330 " Size %d exceeds the maximum %zu.", msg
->hdr
.size
,
331 VHOST_USER_PAYLOAD_SIZE
);
337 p
+= VHOST_USER_HDR_SIZE
;
338 size
= msg
->hdr
.size
;
339 r
= qemu_chr_fe_read_all(chr
, p
, size
);
341 int saved_errno
= errno
;
342 error_report("Failed to read msg payload."
343 " Read %d instead of %d.", r
, msg
->hdr
.size
);
344 data
->ret
= r
< 0 ? -saved_errno
: -EIO
;
350 g_main_loop_quit(data
->loop
);
351 return G_SOURCE_REMOVE
;
354 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
358 * This updates the read handler to use a new event loop context.
359 * Event sources are removed from the previous context : this ensures
360 * that events detected in the previous context are purged. They will
361 * be re-detected and processed in the new context.
363 static void slave_update_read_handler(struct vhost_dev
*dev
,
366 struct vhost_user
*u
= dev
->opaque
;
373 g_source_destroy(u
->slave_src
);
374 g_source_unref(u
->slave_src
);
377 u
->slave_src
= qio_channel_add_watch_source(u
->slave_ioc
,
379 slave_read
, dev
, NULL
,
383 static int vhost_user_read(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
385 struct vhost_user
*u
= dev
->opaque
;
386 CharBackend
*chr
= u
->user
->chr
;
387 GMainContext
*prev_ctxt
= chr
->chr
->gcontext
;
388 GMainContext
*ctxt
= g_main_context_new();
389 GMainLoop
*loop
= g_main_loop_new(ctxt
, FALSE
);
390 struct vhost_user_read_cb_data data
= {
398 * We want to be able to monitor the slave channel fd while waiting
399 * for chr I/O. This requires an event loop, but we can't nest the
400 * one to which chr is currently attached : its fd handlers might not
401 * be prepared for re-entrancy. So we create a new one and switch chr
404 slave_update_read_handler(dev
, ctxt
);
405 qemu_chr_be_update_read_handlers(chr
->chr
, ctxt
);
406 qemu_chr_fe_add_watch(chr
, G_IO_IN
| G_IO_HUP
, vhost_user_read_cb
, &data
);
408 g_main_loop_run(loop
);
411 * Restore the previous event loop context. This also destroys/recreates
412 * event sources : this guarantees that all pending events in the original
413 * context that have been processed by the nested loop are purged.
415 qemu_chr_be_update_read_handlers(chr
->chr
, prev_ctxt
);
416 slave_update_read_handler(dev
, NULL
);
418 g_main_loop_unref(loop
);
419 g_main_context_unref(ctxt
);
424 static int process_message_reply(struct vhost_dev
*dev
,
425 const VhostUserMsg
*msg
)
428 VhostUserMsg msg_reply
;
430 if ((msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
434 ret
= vhost_user_read(dev
, &msg_reply
);
439 if (msg_reply
.hdr
.request
!= msg
->hdr
.request
) {
440 error_report("Received unexpected msg type. "
441 "Expected %d received %d",
442 msg
->hdr
.request
, msg_reply
.hdr
.request
);
446 return msg_reply
.payload
.u64
? -EIO
: 0;
449 static bool vhost_user_one_time_request(VhostUserRequest request
)
452 case VHOST_USER_SET_OWNER
:
453 case VHOST_USER_RESET_OWNER
:
454 case VHOST_USER_SET_MEM_TABLE
:
455 case VHOST_USER_GET_QUEUE_NUM
:
456 case VHOST_USER_NET_SET_MTU
:
463 /* most non-init callers ignore the error */
464 static int vhost_user_write(struct vhost_dev
*dev
, VhostUserMsg
*msg
,
465 int *fds
, int fd_num
)
467 struct vhost_user
*u
= dev
->opaque
;
468 CharBackend
*chr
= u
->user
->chr
;
469 int ret
, size
= VHOST_USER_HDR_SIZE
+ msg
->hdr
.size
;
472 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
473 * we just need send it once in the first time. For later such
474 * request, we just ignore it.
476 if (vhost_user_one_time_request(msg
->hdr
.request
) && dev
->vq_index
!= 0) {
477 msg
->hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
481 if (qemu_chr_fe_set_msgfds(chr
, fds
, fd_num
) < 0) {
482 error_report("Failed to set msg fds.");
486 ret
= qemu_chr_fe_write_all(chr
, (const uint8_t *) msg
, size
);
488 int saved_errno
= errno
;
489 error_report("Failed to write msg."
490 " Wrote %d instead of %d.", ret
, size
);
491 return ret
< 0 ? -saved_errno
: -EIO
;
494 trace_vhost_user_write(msg
->hdr
.request
, msg
->hdr
.flags
);
499 int vhost_user_gpu_set_socket(struct vhost_dev
*dev
, int fd
)
502 .hdr
.request
= VHOST_USER_GPU_SET_SOCKET
,
503 .hdr
.flags
= VHOST_USER_VERSION
,
506 return vhost_user_write(dev
, &msg
, &fd
, 1);
509 static int vhost_user_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
510 struct vhost_log
*log
)
512 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
514 bool shmfd
= virtio_has_feature(dev
->protocol_features
,
515 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
518 .hdr
.request
= VHOST_USER_SET_LOG_BASE
,
519 .hdr
.flags
= VHOST_USER_VERSION
,
520 .payload
.log
.mmap_size
= log
->size
* sizeof(*(log
->log
)),
521 .payload
.log
.mmap_offset
= 0,
522 .hdr
.size
= sizeof(msg
.payload
.log
),
525 if (shmfd
&& log
->fd
!= -1) {
526 fds
[fd_num
++] = log
->fd
;
529 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
536 ret
= vhost_user_read(dev
, &msg
);
541 if (msg
.hdr
.request
!= VHOST_USER_SET_LOG_BASE
) {
542 error_report("Received unexpected msg type. "
543 "Expected %d received %d",
544 VHOST_USER_SET_LOG_BASE
, msg
.hdr
.request
);
552 static MemoryRegion
*vhost_user_get_mr_data(uint64_t addr
, ram_addr_t
*offset
,
557 assert((uintptr_t)addr
== addr
);
558 mr
= memory_region_from_host((void *)(uintptr_t)addr
, offset
);
559 *fd
= memory_region_get_fd(mr
);
564 static void vhost_user_fill_msg_region(VhostUserMemoryRegion
*dst
,
565 struct vhost_memory_region
*src
,
566 uint64_t mmap_offset
)
568 assert(src
!= NULL
&& dst
!= NULL
);
569 dst
->userspace_addr
= src
->userspace_addr
;
570 dst
->memory_size
= src
->memory_size
;
571 dst
->guest_phys_addr
= src
->guest_phys_addr
;
572 dst
->mmap_offset
= mmap_offset
;
575 static int vhost_user_fill_set_mem_table_msg(struct vhost_user
*u
,
576 struct vhost_dev
*dev
,
578 int *fds
, size_t *fd_num
,
579 bool track_ramblocks
)
584 struct vhost_memory_region
*reg
;
585 VhostUserMemoryRegion region_buffer
;
587 msg
->hdr
.request
= VHOST_USER_SET_MEM_TABLE
;
589 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
590 reg
= dev
->mem
->regions
+ i
;
592 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
594 if (track_ramblocks
) {
595 assert(*fd_num
< VHOST_MEMORY_BASELINE_NREGIONS
);
596 trace_vhost_user_set_mem_table_withfd(*fd_num
, mr
->name
,
598 reg
->guest_phys_addr
,
601 u
->region_rb_offset
[i
] = offset
;
602 u
->region_rb
[i
] = mr
->ram_block
;
603 } else if (*fd_num
== VHOST_MEMORY_BASELINE_NREGIONS
) {
604 error_report("Failed preparing vhost-user memory table msg");
607 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
608 msg
->payload
.memory
.regions
[*fd_num
] = region_buffer
;
609 fds
[(*fd_num
)++] = fd
;
610 } else if (track_ramblocks
) {
611 u
->region_rb_offset
[i
] = 0;
612 u
->region_rb
[i
] = NULL
;
616 msg
->payload
.memory
.nregions
= *fd_num
;
619 error_report("Failed initializing vhost-user memory map, "
620 "consider using -object memory-backend-file share=on");
624 msg
->hdr
.size
= sizeof(msg
->payload
.memory
.nregions
);
625 msg
->hdr
.size
+= sizeof(msg
->payload
.memory
.padding
);
626 msg
->hdr
.size
+= *fd_num
* sizeof(VhostUserMemoryRegion
);
631 static inline bool reg_equal(struct vhost_memory_region
*shadow_reg
,
632 struct vhost_memory_region
*vdev_reg
)
634 return shadow_reg
->guest_phys_addr
== vdev_reg
->guest_phys_addr
&&
635 shadow_reg
->userspace_addr
== vdev_reg
->userspace_addr
&&
636 shadow_reg
->memory_size
== vdev_reg
->memory_size
;
639 static void scrub_shadow_regions(struct vhost_dev
*dev
,
640 struct scrub_regions
*add_reg
,
642 struct scrub_regions
*rem_reg
,
643 int *nr_rem_reg
, uint64_t *shadow_pcb
,
644 bool track_ramblocks
)
646 struct vhost_user
*u
= dev
->opaque
;
647 bool found
[VHOST_USER_MAX_RAM_SLOTS
] = {};
648 struct vhost_memory_region
*reg
, *shadow_reg
;
649 int i
, j
, fd
, add_idx
= 0, rm_idx
= 0, fd_num
= 0;
655 * Find memory regions present in our shadow state which are not in
656 * the device's current memory state.
658 * Mark regions in both the shadow and device state as "found".
660 for (i
= 0; i
< u
->num_shadow_regions
; i
++) {
661 shadow_reg
= &u
->shadow_regions
[i
];
664 for (j
= 0; j
< dev
->mem
->nregions
; j
++) {
665 reg
= &dev
->mem
->regions
[j
];
667 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
669 if (reg_equal(shadow_reg
, reg
)) {
672 if (track_ramblocks
) {
674 * Reset postcopy client bases, region_rb, and
675 * region_rb_offset in case regions are removed.
678 u
->region_rb_offset
[j
] = offset
;
679 u
->region_rb
[j
] = mr
->ram_block
;
680 shadow_pcb
[j
] = u
->postcopy_client_bases
[i
];
682 u
->region_rb_offset
[j
] = 0;
683 u
->region_rb
[j
] = NULL
;
691 * If the region was not found in the current device memory state
692 * create an entry for it in the removed list.
695 rem_reg
[rm_idx
].region
= shadow_reg
;
696 rem_reg
[rm_idx
++].reg_idx
= i
;
701 * For regions not marked "found", create entries in the added list.
703 * Note their indexes in the device memory state and the indexes of their
706 for (i
= 0; i
< dev
->mem
->nregions
; i
++) {
707 reg
= &dev
->mem
->regions
[i
];
708 vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
714 * If the region was in both the shadow and device state we don't
715 * need to send a VHOST_USER_ADD_MEM_REG message for it.
721 add_reg
[add_idx
].region
= reg
;
722 add_reg
[add_idx
].reg_idx
= i
;
723 add_reg
[add_idx
++].fd_idx
= fd_num
;
725 *nr_rem_reg
= rm_idx
;
726 *nr_add_reg
= add_idx
;
731 static int send_remove_regions(struct vhost_dev
*dev
,
732 struct scrub_regions
*remove_reg
,
733 int nr_rem_reg
, VhostUserMsg
*msg
,
734 bool reply_supported
)
736 struct vhost_user
*u
= dev
->opaque
;
737 struct vhost_memory_region
*shadow_reg
;
738 int i
, fd
, shadow_reg_idx
, ret
;
740 VhostUserMemoryRegion region_buffer
;
743 * The regions in remove_reg appear in the same order they do in the
744 * shadow table. Therefore we can minimize memory copies by iterating
745 * through remove_reg backwards.
747 for (i
= nr_rem_reg
- 1; i
>= 0; i
--) {
748 shadow_reg
= remove_reg
[i
].region
;
749 shadow_reg_idx
= remove_reg
[i
].reg_idx
;
751 vhost_user_get_mr_data(shadow_reg
->userspace_addr
, &offset
, &fd
);
754 msg
->hdr
.request
= VHOST_USER_REM_MEM_REG
;
755 vhost_user_fill_msg_region(®ion_buffer
, shadow_reg
, 0);
756 msg
->payload
.mem_reg
.region
= region_buffer
;
758 ret
= vhost_user_write(dev
, msg
, NULL
, 0);
763 if (reply_supported
) {
764 ret
= process_message_reply(dev
, msg
);
772 * At this point we know the backend has unmapped the region. It is now
773 * safe to remove it from the shadow table.
775 memmove(&u
->shadow_regions
[shadow_reg_idx
],
776 &u
->shadow_regions
[shadow_reg_idx
+ 1],
777 sizeof(struct vhost_memory_region
) *
778 (u
->num_shadow_regions
- shadow_reg_idx
- 1));
779 u
->num_shadow_regions
--;
785 static int send_add_regions(struct vhost_dev
*dev
,
786 struct scrub_regions
*add_reg
, int nr_add_reg
,
787 VhostUserMsg
*msg
, uint64_t *shadow_pcb
,
788 bool reply_supported
, bool track_ramblocks
)
790 struct vhost_user
*u
= dev
->opaque
;
791 int i
, fd
, ret
, reg_idx
, reg_fd_idx
;
792 struct vhost_memory_region
*reg
;
795 VhostUserMsg msg_reply
;
796 VhostUserMemoryRegion region_buffer
;
798 for (i
= 0; i
< nr_add_reg
; i
++) {
799 reg
= add_reg
[i
].region
;
800 reg_idx
= add_reg
[i
].reg_idx
;
801 reg_fd_idx
= add_reg
[i
].fd_idx
;
803 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
806 if (track_ramblocks
) {
807 trace_vhost_user_set_mem_table_withfd(reg_fd_idx
, mr
->name
,
809 reg
->guest_phys_addr
,
812 u
->region_rb_offset
[reg_idx
] = offset
;
813 u
->region_rb
[reg_idx
] = mr
->ram_block
;
815 msg
->hdr
.request
= VHOST_USER_ADD_MEM_REG
;
816 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
817 msg
->payload
.mem_reg
.region
= region_buffer
;
819 ret
= vhost_user_write(dev
, msg
, &fd
, 1);
824 if (track_ramblocks
) {
827 ret
= vhost_user_read(dev
, &msg_reply
);
832 reply_gpa
= msg_reply
.payload
.mem_reg
.region
.guest_phys_addr
;
834 if (msg_reply
.hdr
.request
!= VHOST_USER_ADD_MEM_REG
) {
835 error_report("%s: Received unexpected msg type."
836 "Expected %d received %d", __func__
,
837 VHOST_USER_ADD_MEM_REG
,
838 msg_reply
.hdr
.request
);
843 * We're using the same structure, just reusing one of the
844 * fields, so it should be the same size.
846 if (msg_reply
.hdr
.size
!= msg
->hdr
.size
) {
847 error_report("%s: Unexpected size for postcopy reply "
848 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
853 /* Get the postcopy client base from the backend's reply. */
854 if (reply_gpa
== dev
->mem
->regions
[reg_idx
].guest_phys_addr
) {
855 shadow_pcb
[reg_idx
] =
856 msg_reply
.payload
.mem_reg
.region
.userspace_addr
;
857 trace_vhost_user_set_mem_table_postcopy(
858 msg_reply
.payload
.mem_reg
.region
.userspace_addr
,
859 msg
->payload
.mem_reg
.region
.userspace_addr
,
860 reg_fd_idx
, reg_idx
);
862 error_report("%s: invalid postcopy reply for region. "
863 "Got guest physical address %" PRIX64
", expected "
864 "%" PRIX64
, __func__
, reply_gpa
,
865 dev
->mem
->regions
[reg_idx
].guest_phys_addr
);
868 } else if (reply_supported
) {
869 ret
= process_message_reply(dev
, msg
);
874 } else if (track_ramblocks
) {
875 u
->region_rb_offset
[reg_idx
] = 0;
876 u
->region_rb
[reg_idx
] = NULL
;
880 * At this point, we know the backend has mapped in the new
881 * region, if the region has a valid file descriptor.
883 * The region should now be added to the shadow table.
885 u
->shadow_regions
[u
->num_shadow_regions
].guest_phys_addr
=
886 reg
->guest_phys_addr
;
887 u
->shadow_regions
[u
->num_shadow_regions
].userspace_addr
=
889 u
->shadow_regions
[u
->num_shadow_regions
].memory_size
=
891 u
->num_shadow_regions
++;
897 static int vhost_user_add_remove_regions(struct vhost_dev
*dev
,
899 bool reply_supported
,
900 bool track_ramblocks
)
902 struct vhost_user
*u
= dev
->opaque
;
903 struct scrub_regions add_reg
[VHOST_USER_MAX_RAM_SLOTS
];
904 struct scrub_regions rem_reg
[VHOST_USER_MAX_RAM_SLOTS
];
905 uint64_t shadow_pcb
[VHOST_USER_MAX_RAM_SLOTS
] = {};
906 int nr_add_reg
, nr_rem_reg
;
909 msg
->hdr
.size
= sizeof(msg
->payload
.mem_reg
);
911 /* Find the regions which need to be removed or added. */
912 scrub_shadow_regions(dev
, add_reg
, &nr_add_reg
, rem_reg
, &nr_rem_reg
,
913 shadow_pcb
, track_ramblocks
);
916 ret
= send_remove_regions(dev
, rem_reg
, nr_rem_reg
, msg
,
924 ret
= send_add_regions(dev
, add_reg
, nr_add_reg
, msg
, shadow_pcb
,
925 reply_supported
, track_ramblocks
);
931 if (track_ramblocks
) {
932 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
933 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
935 * Now we've registered this with the postcopy code, we ack to the
936 * client, because now we're in the position to be able to deal with
937 * any faults it generates.
939 /* TODO: Use this for failure cases as well with a bad value. */
940 msg
->hdr
.size
= sizeof(msg
->payload
.u64
);
941 msg
->payload
.u64
= 0; /* OK */
943 ret
= vhost_user_write(dev
, msg
, NULL
, 0);
952 if (track_ramblocks
) {
953 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
954 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
960 static int vhost_user_set_mem_table_postcopy(struct vhost_dev
*dev
,
961 struct vhost_memory
*mem
,
962 bool reply_supported
,
963 bool config_mem_slots
)
965 struct vhost_user
*u
= dev
->opaque
;
966 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
968 VhostUserMsg msg_reply
;
973 .hdr
.flags
= VHOST_USER_VERSION
,
976 if (u
->region_rb_len
< dev
->mem
->nregions
) {
977 u
->region_rb
= g_renew(RAMBlock
*, u
->region_rb
, dev
->mem
->nregions
);
978 u
->region_rb_offset
= g_renew(ram_addr_t
, u
->region_rb_offset
,
980 memset(&(u
->region_rb
[u
->region_rb_len
]), '\0',
981 sizeof(RAMBlock
*) * (dev
->mem
->nregions
- u
->region_rb_len
));
982 memset(&(u
->region_rb_offset
[u
->region_rb_len
]), '\0',
983 sizeof(ram_addr_t
) * (dev
->mem
->nregions
- u
->region_rb_len
));
984 u
->region_rb_len
= dev
->mem
->nregions
;
987 if (config_mem_slots
) {
988 ret
= vhost_user_add_remove_regions(dev
, &msg
, reply_supported
, true);
993 ret
= vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
999 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
1004 ret
= vhost_user_read(dev
, &msg_reply
);
1009 if (msg_reply
.hdr
.request
!= VHOST_USER_SET_MEM_TABLE
) {
1010 error_report("%s: Received unexpected msg type."
1011 "Expected %d received %d", __func__
,
1012 VHOST_USER_SET_MEM_TABLE
, msg_reply
.hdr
.request
);
1017 * We're using the same structure, just reusing one of the
1018 * fields, so it should be the same size.
1020 if (msg_reply
.hdr
.size
!= msg
.hdr
.size
) {
1021 error_report("%s: Unexpected size for postcopy reply "
1022 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
1027 memset(u
->postcopy_client_bases
, 0,
1028 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
1031 * They're in the same order as the regions that were sent
1032 * but some of the regions were skipped (above) if they
1035 for (msg_i
= 0, region_i
= 0;
1036 region_i
< dev
->mem
->nregions
;
1038 if (msg_i
< fd_num
&&
1039 msg_reply
.payload
.memory
.regions
[msg_i
].guest_phys_addr
==
1040 dev
->mem
->regions
[region_i
].guest_phys_addr
) {
1041 u
->postcopy_client_bases
[region_i
] =
1042 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
;
1043 trace_vhost_user_set_mem_table_postcopy(
1044 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1045 msg
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1050 if (msg_i
!= fd_num
) {
1051 error_report("%s: postcopy reply not fully consumed "
1053 __func__
, msg_i
, fd_num
);
1058 * Now we've registered this with the postcopy code, we ack to the
1059 * client, because now we're in the position to be able to deal
1060 * with any faults it generates.
1062 /* TODO: Use this for failure cases as well with a bad value. */
1063 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
1064 msg
.payload
.u64
= 0; /* OK */
1065 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1074 static int vhost_user_set_mem_table(struct vhost_dev
*dev
,
1075 struct vhost_memory
*mem
)
1077 struct vhost_user
*u
= dev
->opaque
;
1078 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
1080 bool do_postcopy
= u
->postcopy_listen
&& u
->postcopy_fd
.handler
;
1081 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1082 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1083 bool config_mem_slots
=
1084 virtio_has_feature(dev
->protocol_features
,
1085 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
);
1090 * Postcopy has enough differences that it's best done in it's own
1093 return vhost_user_set_mem_table_postcopy(dev
, mem
, reply_supported
,
1097 VhostUserMsg msg
= {
1098 .hdr
.flags
= VHOST_USER_VERSION
,
1101 if (reply_supported
) {
1102 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1105 if (config_mem_slots
) {
1106 ret
= vhost_user_add_remove_regions(dev
, &msg
, reply_supported
, false);
1111 ret
= vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
1117 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
1122 if (reply_supported
) {
1123 return process_message_reply(dev
, &msg
);
1130 static int vhost_user_set_vring_endian(struct vhost_dev
*dev
,
1131 struct vhost_vring_state
*ring
)
1133 bool cross_endian
= virtio_has_feature(dev
->protocol_features
,
1134 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
);
1135 VhostUserMsg msg
= {
1136 .hdr
.request
= VHOST_USER_SET_VRING_ENDIAN
,
1137 .hdr
.flags
= VHOST_USER_VERSION
,
1138 .payload
.state
= *ring
,
1139 .hdr
.size
= sizeof(msg
.payload
.state
),
1142 if (!cross_endian
) {
1143 error_report("vhost-user trying to send unhandled ioctl");
1147 return vhost_user_write(dev
, &msg
, NULL
, 0);
1150 static int vhost_set_vring(struct vhost_dev
*dev
,
1151 unsigned long int request
,
1152 struct vhost_vring_state
*ring
)
1154 VhostUserMsg msg
= {
1155 .hdr
.request
= request
,
1156 .hdr
.flags
= VHOST_USER_VERSION
,
1157 .payload
.state
= *ring
,
1158 .hdr
.size
= sizeof(msg
.payload
.state
),
1161 return vhost_user_write(dev
, &msg
, NULL
, 0);
1164 static int vhost_user_set_vring_num(struct vhost_dev
*dev
,
1165 struct vhost_vring_state
*ring
)
1167 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_NUM
, ring
);
1170 static void vhost_user_host_notifier_free(VhostUserHostNotifier
*n
)
1172 assert(n
&& n
->unmap_addr
);
1173 munmap(n
->unmap_addr
, qemu_real_host_page_size());
1174 n
->unmap_addr
= NULL
;
1178 * clean-up function for notifier, will finally free the structure
1181 static void vhost_user_host_notifier_remove(VhostUserHostNotifier
*n
,
1186 virtio_queue_set_host_notifier_mr(vdev
, n
->idx
, &n
->mr
, false);
1188 assert(!n
->unmap_addr
);
1189 n
->unmap_addr
= n
->addr
;
1191 call_rcu(n
, vhost_user_host_notifier_free
, rcu
);
1195 static int vhost_user_set_vring_base(struct vhost_dev
*dev
,
1196 struct vhost_vring_state
*ring
)
1198 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_BASE
, ring
);
1201 static int vhost_user_set_vring_enable(struct vhost_dev
*dev
, int enable
)
1205 if (!virtio_has_feature(dev
->features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
1209 for (i
= 0; i
< dev
->nvqs
; ++i
) {
1211 struct vhost_vring_state state
= {
1212 .index
= dev
->vq_index
+ i
,
1216 ret
= vhost_set_vring(dev
, VHOST_USER_SET_VRING_ENABLE
, &state
);
1219 * Restoring the previous state is likely infeasible, as well as
1220 * proceeding regardless the error, so just bail out and hope for
1221 * the device-level recovery.
1230 static VhostUserHostNotifier
*fetch_notifier(VhostUserState
*u
,
1233 if (idx
>= u
->notifiers
->len
) {
1236 return g_ptr_array_index(u
->notifiers
, idx
);
1239 static int vhost_user_get_vring_base(struct vhost_dev
*dev
,
1240 struct vhost_vring_state
*ring
)
1243 VhostUserMsg msg
= {
1244 .hdr
.request
= VHOST_USER_GET_VRING_BASE
,
1245 .hdr
.flags
= VHOST_USER_VERSION
,
1246 .payload
.state
= *ring
,
1247 .hdr
.size
= sizeof(msg
.payload
.state
),
1249 struct vhost_user
*u
= dev
->opaque
;
1251 VhostUserHostNotifier
*n
= fetch_notifier(u
->user
, ring
->index
);
1253 vhost_user_host_notifier_remove(n
, dev
->vdev
);
1256 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1261 ret
= vhost_user_read(dev
, &msg
);
1266 if (msg
.hdr
.request
!= VHOST_USER_GET_VRING_BASE
) {
1267 error_report("Received unexpected msg type. Expected %d received %d",
1268 VHOST_USER_GET_VRING_BASE
, msg
.hdr
.request
);
1272 if (msg
.hdr
.size
!= sizeof(msg
.payload
.state
)) {
1273 error_report("Received bad msg size.");
1277 *ring
= msg
.payload
.state
;
1282 static int vhost_set_vring_file(struct vhost_dev
*dev
,
1283 VhostUserRequest request
,
1284 struct vhost_vring_file
*file
)
1286 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
1288 VhostUserMsg msg
= {
1289 .hdr
.request
= request
,
1290 .hdr
.flags
= VHOST_USER_VERSION
,
1291 .payload
.u64
= file
->index
& VHOST_USER_VRING_IDX_MASK
,
1292 .hdr
.size
= sizeof(msg
.payload
.u64
),
1295 if (ioeventfd_enabled() && file
->fd
> 0) {
1296 fds
[fd_num
++] = file
->fd
;
1298 msg
.payload
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1301 return vhost_user_write(dev
, &msg
, fds
, fd_num
);
1304 static int vhost_user_set_vring_kick(struct vhost_dev
*dev
,
1305 struct vhost_vring_file
*file
)
1307 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_KICK
, file
);
1310 static int vhost_user_set_vring_call(struct vhost_dev
*dev
,
1311 struct vhost_vring_file
*file
)
1313 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_CALL
, file
);
1316 static int vhost_user_set_vring_err(struct vhost_dev
*dev
,
1317 struct vhost_vring_file
*file
)
1319 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_ERR
, file
);
1322 static int vhost_user_get_u64(struct vhost_dev
*dev
, int request
, uint64_t *u64
)
1325 VhostUserMsg msg
= {
1326 .hdr
.request
= request
,
1327 .hdr
.flags
= VHOST_USER_VERSION
,
1330 if (vhost_user_one_time_request(request
) && dev
->vq_index
!= 0) {
1334 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1339 ret
= vhost_user_read(dev
, &msg
);
1344 if (msg
.hdr
.request
!= request
) {
1345 error_report("Received unexpected msg type. Expected %d received %d",
1346 request
, msg
.hdr
.request
);
1350 if (msg
.hdr
.size
!= sizeof(msg
.payload
.u64
)) {
1351 error_report("Received bad msg size.");
1355 *u64
= msg
.payload
.u64
;
1360 static int vhost_user_get_features(struct vhost_dev
*dev
, uint64_t *features
)
1362 if (vhost_user_get_u64(dev
, VHOST_USER_GET_FEATURES
, features
) < 0) {
1369 static int enforce_reply(struct vhost_dev
*dev
,
1370 const VhostUserMsg
*msg
)
1374 if (msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1375 return process_message_reply(dev
, msg
);
1379 * We need to wait for a reply but the backend does not
1380 * support replies for the command we just sent.
1381 * Send VHOST_USER_GET_FEATURES which makes all backends
1384 return vhost_user_get_features(dev
, &dummy
);
1387 static int vhost_user_set_vring_addr(struct vhost_dev
*dev
,
1388 struct vhost_vring_addr
*addr
)
1391 VhostUserMsg msg
= {
1392 .hdr
.request
= VHOST_USER_SET_VRING_ADDR
,
1393 .hdr
.flags
= VHOST_USER_VERSION
,
1394 .payload
.addr
= *addr
,
1395 .hdr
.size
= sizeof(msg
.payload
.addr
),
1398 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1399 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1402 * wait for a reply if logging is enabled to make sure
1403 * backend is actually logging changes
1405 bool wait_for_reply
= addr
->flags
& (1 << VHOST_VRING_F_LOG
);
1407 if (reply_supported
&& wait_for_reply
) {
1408 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1411 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1416 if (wait_for_reply
) {
1417 return enforce_reply(dev
, &msg
);
1423 static int vhost_user_set_u64(struct vhost_dev
*dev
, int request
, uint64_t u64
,
1424 bool wait_for_reply
)
1426 VhostUserMsg msg
= {
1427 .hdr
.request
= request
,
1428 .hdr
.flags
= VHOST_USER_VERSION
,
1430 .hdr
.size
= sizeof(msg
.payload
.u64
),
1434 if (wait_for_reply
) {
1435 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1436 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1437 if (reply_supported
) {
1438 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1442 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1447 if (wait_for_reply
) {
1448 return enforce_reply(dev
, &msg
);
1454 static int vhost_user_set_features(struct vhost_dev
*dev
,
1458 * wait for a reply if logging is enabled to make sure
1459 * backend is actually logging changes
1461 bool log_enabled
= features
& (0x1ULL
<< VHOST_F_LOG_ALL
);
1463 return vhost_user_set_u64(dev
, VHOST_USER_SET_FEATURES
, features
,
1467 static int vhost_user_set_protocol_features(struct vhost_dev
*dev
,
1470 return vhost_user_set_u64(dev
, VHOST_USER_SET_PROTOCOL_FEATURES
, features
,
1474 static int vhost_user_set_owner(struct vhost_dev
*dev
)
1476 VhostUserMsg msg
= {
1477 .hdr
.request
= VHOST_USER_SET_OWNER
,
1478 .hdr
.flags
= VHOST_USER_VERSION
,
1481 return vhost_user_write(dev
, &msg
, NULL
, 0);
1484 static int vhost_user_get_max_memslots(struct vhost_dev
*dev
,
1485 uint64_t *max_memslots
)
1487 uint64_t backend_max_memslots
;
1490 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_MAX_MEM_SLOTS
,
1491 &backend_max_memslots
);
1496 *max_memslots
= backend_max_memslots
;
1501 static int vhost_user_reset_device(struct vhost_dev
*dev
)
1503 VhostUserMsg msg
= {
1504 .hdr
.flags
= VHOST_USER_VERSION
,
1507 msg
.hdr
.request
= virtio_has_feature(dev
->protocol_features
,
1508 VHOST_USER_PROTOCOL_F_RESET_DEVICE
)
1509 ? VHOST_USER_RESET_DEVICE
1510 : VHOST_USER_RESET_OWNER
;
1512 return vhost_user_write(dev
, &msg
, NULL
, 0);
1515 static int vhost_user_slave_handle_config_change(struct vhost_dev
*dev
)
1517 if (!dev
->config_ops
|| !dev
->config_ops
->vhost_dev_config_notifier
) {
1521 return dev
->config_ops
->vhost_dev_config_notifier(dev
);
1525 * Fetch or create the notifier for a given idx. Newly created
1526 * notifiers are added to the pointer array that tracks them.
1528 static VhostUserHostNotifier
*fetch_or_create_notifier(VhostUserState
*u
,
1531 VhostUserHostNotifier
*n
= NULL
;
1532 if (idx
>= u
->notifiers
->len
) {
1533 g_ptr_array_set_size(u
->notifiers
, idx
+ 1);
1536 n
= g_ptr_array_index(u
->notifiers
, idx
);
1538 n
= g_new0(VhostUserHostNotifier
, 1);
1540 g_ptr_array_insert(u
->notifiers
, idx
, n
);
1541 trace_vhost_user_create_notifier(idx
, n
);
1547 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev
*dev
,
1548 VhostUserVringArea
*area
,
1551 int queue_idx
= area
->u64
& VHOST_USER_VRING_IDX_MASK
;
1552 size_t page_size
= qemu_real_host_page_size();
1553 struct vhost_user
*u
= dev
->opaque
;
1554 VhostUserState
*user
= u
->user
;
1555 VirtIODevice
*vdev
= dev
->vdev
;
1556 VhostUserHostNotifier
*n
;
1560 if (!virtio_has_feature(dev
->protocol_features
,
1561 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
) ||
1562 vdev
== NULL
|| queue_idx
>= virtio_get_num_queues(vdev
)) {
1567 * Fetch notifier and invalidate any old data before setting up
1568 * new mapped address.
1570 n
= fetch_or_create_notifier(user
, queue_idx
);
1571 vhost_user_host_notifier_remove(n
, vdev
);
1573 if (area
->u64
& VHOST_USER_VRING_NOFD_MASK
) {
1578 if (area
->size
!= page_size
) {
1582 addr
= mmap(NULL
, page_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1584 if (addr
== MAP_FAILED
) {
1588 name
= g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1590 if (!n
->mr
.ram
) { /* Don't init again after suspend. */
1591 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
1594 n
->mr
.ram_block
->host
= addr
;
1598 if (virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, true)) {
1599 object_unparent(OBJECT(&n
->mr
));
1600 munmap(addr
, page_size
);
1609 static void close_slave_channel(struct vhost_user
*u
)
1611 g_source_destroy(u
->slave_src
);
1612 g_source_unref(u
->slave_src
);
1613 u
->slave_src
= NULL
;
1614 object_unref(OBJECT(u
->slave_ioc
));
1615 u
->slave_ioc
= NULL
;
1618 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
1621 struct vhost_dev
*dev
= opaque
;
1622 struct vhost_user
*u
= dev
->opaque
;
1623 VhostUserHeader hdr
= { 0, };
1624 VhostUserPayload payload
= { 0, };
1625 Error
*local_err
= NULL
;
1626 gboolean rc
= G_SOURCE_CONTINUE
;
1629 g_autofree
int *fd
= NULL
;
1634 iov
.iov_base
= &hdr
;
1635 iov
.iov_len
= VHOST_USER_HDR_SIZE
;
1637 if (qio_channel_readv_full_all(ioc
, &iov
, 1, &fd
, &fdsize
, &local_err
)) {
1638 error_report_err(local_err
);
1642 if (hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
1643 error_report("Failed to read msg header."
1644 " Size %d exceeds the maximum %zu.", hdr
.size
,
1645 VHOST_USER_PAYLOAD_SIZE
);
1650 if (qio_channel_read_all(ioc
, (char *) &payload
, hdr
.size
, &local_err
)) {
1651 error_report_err(local_err
);
1655 switch (hdr
.request
) {
1656 case VHOST_USER_SLAVE_IOTLB_MSG
:
1657 ret
= vhost_backend_handle_iotlb_msg(dev
, &payload
.iotlb
);
1659 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
:
1660 ret
= vhost_user_slave_handle_config_change(dev
);
1662 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
:
1663 ret
= vhost_user_slave_handle_vring_host_notifier(dev
, &payload
.area
,
1667 error_report("Received unexpected msg type: %d.", hdr
.request
);
1672 * REPLY_ACK feature handling. Other reply types has to be managed
1673 * directly in their request handlers.
1675 if (hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1676 struct iovec iovec
[2];
1679 hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
1680 hdr
.flags
|= VHOST_USER_REPLY_MASK
;
1682 payload
.u64
= !!ret
;
1683 hdr
.size
= sizeof(payload
.u64
);
1685 iovec
[0].iov_base
= &hdr
;
1686 iovec
[0].iov_len
= VHOST_USER_HDR_SIZE
;
1687 iovec
[1].iov_base
= &payload
;
1688 iovec
[1].iov_len
= hdr
.size
;
1690 if (qio_channel_writev_all(ioc
, iovec
, ARRAY_SIZE(iovec
), &local_err
)) {
1691 error_report_err(local_err
);
1699 close_slave_channel(u
);
1700 rc
= G_SOURCE_REMOVE
;
1704 for (i
= 0; i
< fdsize
; i
++) {
1711 static int vhost_setup_slave_channel(struct vhost_dev
*dev
)
1713 VhostUserMsg msg
= {
1714 .hdr
.request
= VHOST_USER_SET_SLAVE_REQ_FD
,
1715 .hdr
.flags
= VHOST_USER_VERSION
,
1717 struct vhost_user
*u
= dev
->opaque
;
1719 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1720 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1721 Error
*local_err
= NULL
;
1724 if (!virtio_has_feature(dev
->protocol_features
,
1725 VHOST_USER_PROTOCOL_F_SLAVE_REQ
)) {
1729 if (qemu_socketpair(PF_UNIX
, SOCK_STREAM
, 0, sv
) == -1) {
1730 int saved_errno
= errno
;
1731 error_report("socketpair() failed");
1732 return -saved_errno
;
1735 ioc
= QIO_CHANNEL(qio_channel_socket_new_fd(sv
[0], &local_err
));
1737 error_report_err(local_err
);
1738 return -ECONNREFUSED
;
1741 slave_update_read_handler(dev
, NULL
);
1743 if (reply_supported
) {
1744 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1747 ret
= vhost_user_write(dev
, &msg
, &sv
[1], 1);
1752 if (reply_supported
) {
1753 ret
= process_message_reply(dev
, &msg
);
1759 close_slave_channel(u
);
1767 * Called back from the postcopy fault thread when a fault is received on our
1769 * TODO: This is Linux specific
1771 static int vhost_user_postcopy_fault_handler(struct PostCopyFD
*pcfd
,
1774 struct vhost_dev
*dev
= pcfd
->data
;
1775 struct vhost_user
*u
= dev
->opaque
;
1776 struct uffd_msg
*msg
= ufd
;
1777 uint64_t faultaddr
= msg
->arg
.pagefault
.address
;
1778 RAMBlock
*rb
= NULL
;
1782 trace_vhost_user_postcopy_fault_handler(pcfd
->idstr
, faultaddr
,
1783 dev
->mem
->nregions
);
1784 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1785 trace_vhost_user_postcopy_fault_handler_loop(i
,
1786 u
->postcopy_client_bases
[i
], dev
->mem
->regions
[i
].memory_size
);
1787 if (faultaddr
>= u
->postcopy_client_bases
[i
]) {
1788 /* Ofset of the fault address in the vhost region */
1789 uint64_t region_offset
= faultaddr
- u
->postcopy_client_bases
[i
];
1790 if (region_offset
< dev
->mem
->regions
[i
].memory_size
) {
1791 rb_offset
= region_offset
+ u
->region_rb_offset
[i
];
1792 trace_vhost_user_postcopy_fault_handler_found(i
,
1793 region_offset
, rb_offset
);
1794 rb
= u
->region_rb
[i
];
1795 return postcopy_request_shared_page(pcfd
, rb
, faultaddr
,
1800 error_report("%s: Failed to find region for fault %" PRIx64
,
1801 __func__
, faultaddr
);
1805 static int vhost_user_postcopy_waker(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
1808 struct vhost_dev
*dev
= pcfd
->data
;
1809 struct vhost_user
*u
= dev
->opaque
;
1812 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb
), offset
);
1817 /* Translate the offset into an address in the clients address space */
1818 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1819 if (u
->region_rb
[i
] == rb
&&
1820 offset
>= u
->region_rb_offset
[i
] &&
1821 offset
< (u
->region_rb_offset
[i
] +
1822 dev
->mem
->regions
[i
].memory_size
)) {
1823 uint64_t client_addr
= (offset
- u
->region_rb_offset
[i
]) +
1824 u
->postcopy_client_bases
[i
];
1825 trace_vhost_user_postcopy_waker_found(client_addr
);
1826 return postcopy_wake_shared(pcfd
, client_addr
, rb
);
1830 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb
), offset
);
1836 * Called at the start of an inbound postcopy on reception of the
1839 static int vhost_user_postcopy_advise(struct vhost_dev
*dev
, Error
**errp
)
1842 struct vhost_user
*u
= dev
->opaque
;
1843 CharBackend
*chr
= u
->user
->chr
;
1846 VhostUserMsg msg
= {
1847 .hdr
.request
= VHOST_USER_POSTCOPY_ADVISE
,
1848 .hdr
.flags
= VHOST_USER_VERSION
,
1851 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1853 error_setg(errp
, "Failed to send postcopy_advise to vhost");
1857 ret
= vhost_user_read(dev
, &msg
);
1859 error_setg(errp
, "Failed to get postcopy_advise reply from vhost");
1863 if (msg
.hdr
.request
!= VHOST_USER_POSTCOPY_ADVISE
) {
1864 error_setg(errp
, "Unexpected msg type. Expected %d received %d",
1865 VHOST_USER_POSTCOPY_ADVISE
, msg
.hdr
.request
);
1870 error_setg(errp
, "Received bad msg size.");
1873 ufd
= qemu_chr_fe_get_msgfd(chr
);
1875 error_setg(errp
, "%s: Failed to get ufd", __func__
);
1878 qemu_socket_set_nonblock(ufd
);
1880 /* register ufd with userfault thread */
1881 u
->postcopy_fd
.fd
= ufd
;
1882 u
->postcopy_fd
.data
= dev
;
1883 u
->postcopy_fd
.handler
= vhost_user_postcopy_fault_handler
;
1884 u
->postcopy_fd
.waker
= vhost_user_postcopy_waker
;
1885 u
->postcopy_fd
.idstr
= "vhost-user"; /* Need to find unique name */
1886 postcopy_register_shared_ufd(&u
->postcopy_fd
);
1889 error_setg(errp
, "Postcopy not supported on non-Linux systems");
1895 * Called at the switch to postcopy on reception of the 'listen' command.
1897 static int vhost_user_postcopy_listen(struct vhost_dev
*dev
, Error
**errp
)
1899 struct vhost_user
*u
= dev
->opaque
;
1901 VhostUserMsg msg
= {
1902 .hdr
.request
= VHOST_USER_POSTCOPY_LISTEN
,
1903 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1905 u
->postcopy_listen
= true;
1907 trace_vhost_user_postcopy_listen();
1909 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1911 error_setg(errp
, "Failed to send postcopy_listen to vhost");
1915 ret
= process_message_reply(dev
, &msg
);
1917 error_setg(errp
, "Failed to receive reply to postcopy_listen");
1925 * Called at the end of postcopy
1927 static int vhost_user_postcopy_end(struct vhost_dev
*dev
, Error
**errp
)
1929 VhostUserMsg msg
= {
1930 .hdr
.request
= VHOST_USER_POSTCOPY_END
,
1931 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1934 struct vhost_user
*u
= dev
->opaque
;
1936 trace_vhost_user_postcopy_end_entry();
1938 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1940 error_setg(errp
, "Failed to send postcopy_end to vhost");
1944 ret
= process_message_reply(dev
, &msg
);
1946 error_setg(errp
, "Failed to receive reply to postcopy_end");
1949 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
1950 close(u
->postcopy_fd
.fd
);
1951 u
->postcopy_fd
.handler
= NULL
;
1953 trace_vhost_user_postcopy_end_exit();
1958 static int vhost_user_postcopy_notifier(NotifierWithReturn
*notifier
,
1961 struct PostcopyNotifyData
*pnd
= opaque
;
1962 struct vhost_user
*u
= container_of(notifier
, struct vhost_user
,
1964 struct vhost_dev
*dev
= u
->dev
;
1966 switch (pnd
->reason
) {
1967 case POSTCOPY_NOTIFY_PROBE
:
1968 if (!virtio_has_feature(dev
->protocol_features
,
1969 VHOST_USER_PROTOCOL_F_PAGEFAULT
)) {
1970 /* TODO: Get the device name into this error somehow */
1971 error_setg(pnd
->errp
,
1972 "vhost-user backend not capable of postcopy");
1977 case POSTCOPY_NOTIFY_INBOUND_ADVISE
:
1978 return vhost_user_postcopy_advise(dev
, pnd
->errp
);
1980 case POSTCOPY_NOTIFY_INBOUND_LISTEN
:
1981 return vhost_user_postcopy_listen(dev
, pnd
->errp
);
1983 case POSTCOPY_NOTIFY_INBOUND_END
:
1984 return vhost_user_postcopy_end(dev
, pnd
->errp
);
1987 /* We ignore notifications we don't know */
1994 static int vhost_user_backend_init(struct vhost_dev
*dev
, void *opaque
,
1997 uint64_t features
, ram_slots
;
1998 struct vhost_user
*u
;
1999 VhostUserState
*vus
= (VhostUserState
*) opaque
;
2002 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2004 u
= g_new0(struct vhost_user
, 1);
2009 err
= vhost_user_get_features(dev
, &features
);
2011 error_setg_errno(errp
, -err
, "vhost_backend_init failed");
2015 if (virtio_has_feature(features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
2016 bool supports_f_config
= vus
->supports_config
||
2017 (dev
->config_ops
&& dev
->config_ops
->vhost_dev_config_notifier
);
2018 uint64_t protocol_features
;
2020 dev
->backend_features
|= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
2022 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_PROTOCOL_FEATURES
,
2023 &protocol_features
);
2025 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2030 * We will use all the protocol features we support - although
2031 * we suppress F_CONFIG if we know QEMUs internal code can not support
2034 protocol_features
&= VHOST_USER_PROTOCOL_FEATURE_MASK
;
2036 if (supports_f_config
) {
2037 if (!virtio_has_feature(protocol_features
,
2038 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2039 error_setg(errp
, "vhost-user device expecting "
2040 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
2045 if (virtio_has_feature(protocol_features
,
2046 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2047 warn_reportf_err(*errp
, "vhost-user backend supports "
2048 "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
2049 protocol_features
&= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG
);
2053 /* final set of protocol features */
2054 dev
->protocol_features
= protocol_features
;
2055 err
= vhost_user_set_protocol_features(dev
, dev
->protocol_features
);
2057 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2061 /* query the max queues we support if backend supports Multiple Queue */
2062 if (dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_MQ
)) {
2063 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_QUEUE_NUM
,
2066 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2070 dev
->max_queues
= 1;
2073 if (dev
->num_queues
&& dev
->max_queues
< dev
->num_queues
) {
2074 error_setg(errp
, "The maximum number of queues supported by the "
2075 "backend is %" PRIu64
, dev
->max_queues
);
2079 if (virtio_has_feature(features
, VIRTIO_F_IOMMU_PLATFORM
) &&
2080 !(virtio_has_feature(dev
->protocol_features
,
2081 VHOST_USER_PROTOCOL_F_SLAVE_REQ
) &&
2082 virtio_has_feature(dev
->protocol_features
,
2083 VHOST_USER_PROTOCOL_F_REPLY_ACK
))) {
2084 error_setg(errp
, "IOMMU support requires reply-ack and "
2085 "slave-req protocol features.");
2089 /* get max memory regions if backend supports configurable RAM slots */
2090 if (!virtio_has_feature(dev
->protocol_features
,
2091 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
)) {
2092 u
->user
->memory_slots
= VHOST_MEMORY_BASELINE_NREGIONS
;
2094 err
= vhost_user_get_max_memslots(dev
, &ram_slots
);
2096 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2100 if (ram_slots
< u
->user
->memory_slots
) {
2101 error_setg(errp
, "The backend specified a max ram slots limit "
2102 "of %" PRIu64
", when the prior validated limit was "
2103 "%d. This limit should never decrease.", ram_slots
,
2104 u
->user
->memory_slots
);
2108 u
->user
->memory_slots
= MIN(ram_slots
, VHOST_USER_MAX_RAM_SLOTS
);
2112 if (dev
->migration_blocker
== NULL
&&
2113 !virtio_has_feature(dev
->protocol_features
,
2114 VHOST_USER_PROTOCOL_F_LOG_SHMFD
)) {
2115 error_setg(&dev
->migration_blocker
,
2116 "Migration disabled: vhost-user backend lacks "
2117 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2120 if (dev
->vq_index
== 0) {
2121 err
= vhost_setup_slave_channel(dev
);
2123 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2128 u
->postcopy_notifier
.notify
= vhost_user_postcopy_notifier
;
2129 postcopy_add_notifier(&u
->postcopy_notifier
);
2134 static int vhost_user_backend_cleanup(struct vhost_dev
*dev
)
2136 struct vhost_user
*u
;
2138 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2141 if (u
->postcopy_notifier
.notify
) {
2142 postcopy_remove_notifier(&u
->postcopy_notifier
);
2143 u
->postcopy_notifier
.notify
= NULL
;
2145 u
->postcopy_listen
= false;
2146 if (u
->postcopy_fd
.handler
) {
2147 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
2148 close(u
->postcopy_fd
.fd
);
2149 u
->postcopy_fd
.handler
= NULL
;
2152 close_slave_channel(u
);
2154 g_free(u
->region_rb
);
2155 u
->region_rb
= NULL
;
2156 g_free(u
->region_rb_offset
);
2157 u
->region_rb_offset
= NULL
;
2158 u
->region_rb_len
= 0;
2165 static int vhost_user_get_vq_index(struct vhost_dev
*dev
, int idx
)
2167 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
2172 static int vhost_user_memslots_limit(struct vhost_dev
*dev
)
2174 struct vhost_user
*u
= dev
->opaque
;
2176 return u
->user
->memory_slots
;
2179 static bool vhost_user_requires_shm_log(struct vhost_dev
*dev
)
2181 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2183 return virtio_has_feature(dev
->protocol_features
,
2184 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
2187 static int vhost_user_migration_done(struct vhost_dev
*dev
, char* mac_addr
)
2189 VhostUserMsg msg
= { };
2191 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2193 /* If guest supports GUEST_ANNOUNCE do nothing */
2194 if (virtio_has_feature(dev
->acked_features
, VIRTIO_NET_F_GUEST_ANNOUNCE
)) {
2198 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2199 if (virtio_has_feature(dev
->protocol_features
,
2200 VHOST_USER_PROTOCOL_F_RARP
)) {
2201 msg
.hdr
.request
= VHOST_USER_SEND_RARP
;
2202 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2203 memcpy((char *)&msg
.payload
.u64
, mac_addr
, 6);
2204 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2206 return vhost_user_write(dev
, &msg
, NULL
, 0);
2211 static bool vhost_user_can_merge(struct vhost_dev
*dev
,
2212 uint64_t start1
, uint64_t size1
,
2213 uint64_t start2
, uint64_t size2
)
2218 (void)vhost_user_get_mr_data(start1
, &offset
, &mfd
);
2219 (void)vhost_user_get_mr_data(start2
, &offset
, &rfd
);
2224 static int vhost_user_net_set_mtu(struct vhost_dev
*dev
, uint16_t mtu
)
2227 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2228 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2231 if (!(dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU
))) {
2235 msg
.hdr
.request
= VHOST_USER_NET_SET_MTU
;
2236 msg
.payload
.u64
= mtu
;
2237 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2238 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2239 if (reply_supported
) {
2240 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2243 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2248 /* If reply_ack supported, slave has to ack specified MTU is valid */
2249 if (reply_supported
) {
2250 return process_message_reply(dev
, &msg
);
2256 static int vhost_user_send_device_iotlb_msg(struct vhost_dev
*dev
,
2257 struct vhost_iotlb_msg
*imsg
)
2260 VhostUserMsg msg
= {
2261 .hdr
.request
= VHOST_USER_IOTLB_MSG
,
2262 .hdr
.size
= sizeof(msg
.payload
.iotlb
),
2263 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
2264 .payload
.iotlb
= *imsg
,
2267 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2272 return process_message_reply(dev
, &msg
);
2276 static void vhost_user_set_iotlb_callback(struct vhost_dev
*dev
, int enabled
)
2278 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2281 static int vhost_user_get_config(struct vhost_dev
*dev
, uint8_t *config
,
2282 uint32_t config_len
, Error
**errp
)
2285 VhostUserMsg msg
= {
2286 .hdr
.request
= VHOST_USER_GET_CONFIG
,
2287 .hdr
.flags
= VHOST_USER_VERSION
,
2288 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
,
2291 if (!virtio_has_feature(dev
->protocol_features
,
2292 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2293 error_setg(errp
, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2297 assert(config_len
<= VHOST_USER_MAX_CONFIG_SIZE
);
2299 msg
.payload
.config
.offset
= 0;
2300 msg
.payload
.config
.size
= config_len
;
2301 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2303 error_setg_errno(errp
, -ret
, "vhost_get_config failed");
2307 ret
= vhost_user_read(dev
, &msg
);
2309 error_setg_errno(errp
, -ret
, "vhost_get_config failed");
2313 if (msg
.hdr
.request
!= VHOST_USER_GET_CONFIG
) {
2315 "Received unexpected msg type. Expected %d received %d",
2316 VHOST_USER_GET_CONFIG
, msg
.hdr
.request
);
2320 if (msg
.hdr
.size
!= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
) {
2321 error_setg(errp
, "Received bad msg size.");
2325 memcpy(config
, msg
.payload
.config
.region
, config_len
);
2330 static int vhost_user_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
2331 uint32_t offset
, uint32_t size
, uint32_t flags
)
2335 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2336 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2338 VhostUserMsg msg
= {
2339 .hdr
.request
= VHOST_USER_SET_CONFIG
,
2340 .hdr
.flags
= VHOST_USER_VERSION
,
2341 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ size
,
2344 if (!virtio_has_feature(dev
->protocol_features
,
2345 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2349 if (reply_supported
) {
2350 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2353 if (size
> VHOST_USER_MAX_CONFIG_SIZE
) {
2357 msg
.payload
.config
.offset
= offset
,
2358 msg
.payload
.config
.size
= size
,
2359 msg
.payload
.config
.flags
= flags
,
2360 p
= msg
.payload
.config
.region
;
2361 memcpy(p
, data
, size
);
2363 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2368 if (reply_supported
) {
2369 return process_message_reply(dev
, &msg
);
2375 static int vhost_user_crypto_create_session(struct vhost_dev
*dev
,
2377 uint64_t *session_id
)
2380 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2381 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2382 CryptoDevBackendSymSessionInfo
*sess_info
= session_info
;
2383 VhostUserMsg msg
= {
2384 .hdr
.request
= VHOST_USER_CREATE_CRYPTO_SESSION
,
2385 .hdr
.flags
= VHOST_USER_VERSION
,
2386 .hdr
.size
= sizeof(msg
.payload
.session
),
2389 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2391 if (!crypto_session
) {
2392 error_report("vhost-user trying to send unhandled ioctl");
2396 memcpy(&msg
.payload
.session
.session_setup_data
, sess_info
,
2397 sizeof(CryptoDevBackendSymSessionInfo
));
2398 if (sess_info
->key_len
) {
2399 memcpy(&msg
.payload
.session
.key
, sess_info
->cipher_key
,
2400 sess_info
->key_len
);
2402 if (sess_info
->auth_key_len
> 0) {
2403 memcpy(&msg
.payload
.session
.auth_key
, sess_info
->auth_key
,
2404 sess_info
->auth_key_len
);
2406 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2408 error_report("vhost_user_write() return %d, create session failed",
2413 ret
= vhost_user_read(dev
, &msg
);
2415 error_report("vhost_user_read() return %d, create session failed",
2420 if (msg
.hdr
.request
!= VHOST_USER_CREATE_CRYPTO_SESSION
) {
2421 error_report("Received unexpected msg type. Expected %d received %d",
2422 VHOST_USER_CREATE_CRYPTO_SESSION
, msg
.hdr
.request
);
2426 if (msg
.hdr
.size
!= sizeof(msg
.payload
.session
)) {
2427 error_report("Received bad msg size.");
2431 if (msg
.payload
.session
.session_id
< 0) {
2432 error_report("Bad session id: %" PRId64
"",
2433 msg
.payload
.session
.session_id
);
2436 *session_id
= msg
.payload
.session
.session_id
;
2442 vhost_user_crypto_close_session(struct vhost_dev
*dev
, uint64_t session_id
)
2445 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2446 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2447 VhostUserMsg msg
= {
2448 .hdr
.request
= VHOST_USER_CLOSE_CRYPTO_SESSION
,
2449 .hdr
.flags
= VHOST_USER_VERSION
,
2450 .hdr
.size
= sizeof(msg
.payload
.u64
),
2452 msg
.payload
.u64
= session_id
;
2454 if (!crypto_session
) {
2455 error_report("vhost-user trying to send unhandled ioctl");
2459 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2461 error_report("vhost_user_write() return %d, close session failed",
2469 static bool vhost_user_mem_section_filter(struct vhost_dev
*dev
,
2470 MemoryRegionSection
*section
)
2474 result
= memory_region_get_fd(section
->mr
) >= 0;
2479 static int vhost_user_get_inflight_fd(struct vhost_dev
*dev
,
2480 uint16_t queue_size
,
2481 struct vhost_inflight
*inflight
)
2486 struct vhost_user
*u
= dev
->opaque
;
2487 CharBackend
*chr
= u
->user
->chr
;
2488 VhostUserMsg msg
= {
2489 .hdr
.request
= VHOST_USER_GET_INFLIGHT_FD
,
2490 .hdr
.flags
= VHOST_USER_VERSION
,
2491 .payload
.inflight
.num_queues
= dev
->nvqs
,
2492 .payload
.inflight
.queue_size
= queue_size
,
2493 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2496 if (!virtio_has_feature(dev
->protocol_features
,
2497 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2501 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2506 ret
= vhost_user_read(dev
, &msg
);
2511 if (msg
.hdr
.request
!= VHOST_USER_GET_INFLIGHT_FD
) {
2512 error_report("Received unexpected msg type. "
2513 "Expected %d received %d",
2514 VHOST_USER_GET_INFLIGHT_FD
, msg
.hdr
.request
);
2518 if (msg
.hdr
.size
!= sizeof(msg
.payload
.inflight
)) {
2519 error_report("Received bad msg size.");
2523 if (!msg
.payload
.inflight
.mmap_size
) {
2527 fd
= qemu_chr_fe_get_msgfd(chr
);
2529 error_report("Failed to get mem fd");
2533 addr
= mmap(0, msg
.payload
.inflight
.mmap_size
, PROT_READ
| PROT_WRITE
,
2534 MAP_SHARED
, fd
, msg
.payload
.inflight
.mmap_offset
);
2536 if (addr
== MAP_FAILED
) {
2537 error_report("Failed to mmap mem fd");
2542 inflight
->addr
= addr
;
2544 inflight
->size
= msg
.payload
.inflight
.mmap_size
;
2545 inflight
->offset
= msg
.payload
.inflight
.mmap_offset
;
2546 inflight
->queue_size
= queue_size
;
2551 static int vhost_user_set_inflight_fd(struct vhost_dev
*dev
,
2552 struct vhost_inflight
*inflight
)
2554 VhostUserMsg msg
= {
2555 .hdr
.request
= VHOST_USER_SET_INFLIGHT_FD
,
2556 .hdr
.flags
= VHOST_USER_VERSION
,
2557 .payload
.inflight
.mmap_size
= inflight
->size
,
2558 .payload
.inflight
.mmap_offset
= inflight
->offset
,
2559 .payload
.inflight
.num_queues
= dev
->nvqs
,
2560 .payload
.inflight
.queue_size
= inflight
->queue_size
,
2561 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2564 if (!virtio_has_feature(dev
->protocol_features
,
2565 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2569 return vhost_user_write(dev
, &msg
, &inflight
->fd
, 1);
2572 static void vhost_user_state_destroy(gpointer data
)
2574 VhostUserHostNotifier
*n
= (VhostUserHostNotifier
*) data
;
2576 vhost_user_host_notifier_remove(n
, NULL
);
2577 object_unparent(OBJECT(&n
->mr
));
2579 * We can't free until vhost_user_host_notifier_remove has
2580 * done it's thing so schedule the free with RCU.
2586 bool vhost_user_init(VhostUserState
*user
, CharBackend
*chr
, Error
**errp
)
2589 error_setg(errp
, "Cannot initialize vhost-user state");
2593 user
->memory_slots
= 0;
2594 user
->notifiers
= g_ptr_array_new_full(VIRTIO_QUEUE_MAX
/ 4,
2595 &vhost_user_state_destroy
);
2599 void vhost_user_cleanup(VhostUserState
*user
)
2604 memory_region_transaction_begin();
2605 user
->notifiers
= (GPtrArray
*) g_ptr_array_free(user
->notifiers
, true);
2606 memory_region_transaction_commit();
2610 const VhostOps user_ops
= {
2611 .backend_type
= VHOST_BACKEND_TYPE_USER
,
2612 .vhost_backend_init
= vhost_user_backend_init
,
2613 .vhost_backend_cleanup
= vhost_user_backend_cleanup
,
2614 .vhost_backend_memslots_limit
= vhost_user_memslots_limit
,
2615 .vhost_set_log_base
= vhost_user_set_log_base
,
2616 .vhost_set_mem_table
= vhost_user_set_mem_table
,
2617 .vhost_set_vring_addr
= vhost_user_set_vring_addr
,
2618 .vhost_set_vring_endian
= vhost_user_set_vring_endian
,
2619 .vhost_set_vring_num
= vhost_user_set_vring_num
,
2620 .vhost_set_vring_base
= vhost_user_set_vring_base
,
2621 .vhost_get_vring_base
= vhost_user_get_vring_base
,
2622 .vhost_set_vring_kick
= vhost_user_set_vring_kick
,
2623 .vhost_set_vring_call
= vhost_user_set_vring_call
,
2624 .vhost_set_vring_err
= vhost_user_set_vring_err
,
2625 .vhost_set_features
= vhost_user_set_features
,
2626 .vhost_get_features
= vhost_user_get_features
,
2627 .vhost_set_owner
= vhost_user_set_owner
,
2628 .vhost_reset_device
= vhost_user_reset_device
,
2629 .vhost_get_vq_index
= vhost_user_get_vq_index
,
2630 .vhost_set_vring_enable
= vhost_user_set_vring_enable
,
2631 .vhost_requires_shm_log
= vhost_user_requires_shm_log
,
2632 .vhost_migration_done
= vhost_user_migration_done
,
2633 .vhost_backend_can_merge
= vhost_user_can_merge
,
2634 .vhost_net_set_mtu
= vhost_user_net_set_mtu
,
2635 .vhost_set_iotlb_callback
= vhost_user_set_iotlb_callback
,
2636 .vhost_send_device_iotlb_msg
= vhost_user_send_device_iotlb_msg
,
2637 .vhost_get_config
= vhost_user_get_config
,
2638 .vhost_set_config
= vhost_user_set_config
,
2639 .vhost_crypto_create_session
= vhost_user_crypto_create_session
,
2640 .vhost_crypto_close_session
= vhost_user_crypto_close_session
,
2641 .vhost_backend_mem_section_filter
= vhost_user_mem_section_filter
,
2642 .vhost_get_inflight_fd
= vhost_user_get_inflight_fd
,
2643 .vhost_set_inflight_fd
= vhost_user_set_inflight_fd
,