find_ram_offset: Align ram_addr_t allocation on long boundaries
[qemu/kevin.git] / hw / virtio / vhost-user.c
blob093675ed98fa2546566678aa682589b5905a786f
1 /*
2 * vhost-user
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-backend.h"
15 #include "hw/virtio/virtio-net.h"
16 #include "chardev/char-fe.h"
17 #include "sysemu/kvm.h"
18 #include "qemu/error-report.h"
19 #include "qemu/sockets.h"
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
23 #include <sys/un.h>
24 #include <linux/vhost.h>
26 #define VHOST_MEMORY_MAX_NREGIONS 8
27 #define VHOST_USER_F_PROTOCOL_FEATURES 30
29 enum VhostUserProtocolFeature {
30 VHOST_USER_PROTOCOL_F_MQ = 0,
31 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
32 VHOST_USER_PROTOCOL_F_RARP = 2,
33 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
34 VHOST_USER_PROTOCOL_F_NET_MTU = 4,
35 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
36 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
38 VHOST_USER_PROTOCOL_F_MAX
41 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
43 typedef enum VhostUserRequest {
44 VHOST_USER_NONE = 0,
45 VHOST_USER_GET_FEATURES = 1,
46 VHOST_USER_SET_FEATURES = 2,
47 VHOST_USER_SET_OWNER = 3,
48 VHOST_USER_RESET_OWNER = 4,
49 VHOST_USER_SET_MEM_TABLE = 5,
50 VHOST_USER_SET_LOG_BASE = 6,
51 VHOST_USER_SET_LOG_FD = 7,
52 VHOST_USER_SET_VRING_NUM = 8,
53 VHOST_USER_SET_VRING_ADDR = 9,
54 VHOST_USER_SET_VRING_BASE = 10,
55 VHOST_USER_GET_VRING_BASE = 11,
56 VHOST_USER_SET_VRING_KICK = 12,
57 VHOST_USER_SET_VRING_CALL = 13,
58 VHOST_USER_SET_VRING_ERR = 14,
59 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
60 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
61 VHOST_USER_GET_QUEUE_NUM = 17,
62 VHOST_USER_SET_VRING_ENABLE = 18,
63 VHOST_USER_SEND_RARP = 19,
64 VHOST_USER_NET_SET_MTU = 20,
65 VHOST_USER_SET_SLAVE_REQ_FD = 21,
66 VHOST_USER_IOTLB_MSG = 22,
67 VHOST_USER_SET_VRING_ENDIAN = 23,
68 VHOST_USER_MAX
69 } VhostUserRequest;
71 typedef enum VhostUserSlaveRequest {
72 VHOST_USER_SLAVE_NONE = 0,
73 VHOST_USER_SLAVE_IOTLB_MSG = 1,
74 VHOST_USER_SLAVE_MAX
75 } VhostUserSlaveRequest;
77 typedef struct VhostUserMemoryRegion {
78 uint64_t guest_phys_addr;
79 uint64_t memory_size;
80 uint64_t userspace_addr;
81 uint64_t mmap_offset;
82 } VhostUserMemoryRegion;
84 typedef struct VhostUserMemory {
85 uint32_t nregions;
86 uint32_t padding;
87 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
88 } VhostUserMemory;
90 typedef struct VhostUserLog {
91 uint64_t mmap_size;
92 uint64_t mmap_offset;
93 } VhostUserLog;
95 typedef struct VhostUserMsg {
96 VhostUserRequest request;
98 #define VHOST_USER_VERSION_MASK (0x3)
99 #define VHOST_USER_REPLY_MASK (0x1<<2)
100 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
101 uint32_t flags;
102 uint32_t size; /* the following payload size */
103 union {
104 #define VHOST_USER_VRING_IDX_MASK (0xff)
105 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
106 uint64_t u64;
107 struct vhost_vring_state state;
108 struct vhost_vring_addr addr;
109 VhostUserMemory memory;
110 VhostUserLog log;
111 struct vhost_iotlb_msg iotlb;
112 } payload;
113 } QEMU_PACKED VhostUserMsg;
115 static VhostUserMsg m __attribute__ ((unused));
116 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
117 + sizeof(m.flags) \
118 + sizeof(m.size))
120 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
122 /* The version of the protocol we support */
123 #define VHOST_USER_VERSION (0x1)
125 struct vhost_user {
126 CharBackend *chr;
127 int slave_fd;
130 static bool ioeventfd_enabled(void)
132 return kvm_enabled() && kvm_eventfds_enabled();
135 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
137 struct vhost_user *u = dev->opaque;
138 CharBackend *chr = u->chr;
139 uint8_t *p = (uint8_t *) msg;
140 int r, size = VHOST_USER_HDR_SIZE;
142 r = qemu_chr_fe_read_all(chr, p, size);
143 if (r != size) {
144 error_report("Failed to read msg header. Read %d instead of %d."
145 " Original request %d.", r, size, msg->request);
146 goto fail;
149 /* validate received flags */
150 if (msg->flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
151 error_report("Failed to read msg header."
152 " Flags 0x%x instead of 0x%x.", msg->flags,
153 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
154 goto fail;
157 /* validate message size is sane */
158 if (msg->size > VHOST_USER_PAYLOAD_SIZE) {
159 error_report("Failed to read msg header."
160 " Size %d exceeds the maximum %zu.", msg->size,
161 VHOST_USER_PAYLOAD_SIZE);
162 goto fail;
165 if (msg->size) {
166 p += VHOST_USER_HDR_SIZE;
167 size = msg->size;
168 r = qemu_chr_fe_read_all(chr, p, size);
169 if (r != size) {
170 error_report("Failed to read msg payload."
171 " Read %d instead of %d.", r, msg->size);
172 goto fail;
176 return 0;
178 fail:
179 return -1;
182 static int process_message_reply(struct vhost_dev *dev,
183 const VhostUserMsg *msg)
185 VhostUserMsg msg_reply;
187 if ((msg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
188 return 0;
191 if (vhost_user_read(dev, &msg_reply) < 0) {
192 return -1;
195 if (msg_reply.request != msg->request) {
196 error_report("Received unexpected msg type."
197 "Expected %d received %d",
198 msg->request, msg_reply.request);
199 return -1;
202 return msg_reply.payload.u64 ? -1 : 0;
205 static bool vhost_user_one_time_request(VhostUserRequest request)
207 switch (request) {
208 case VHOST_USER_SET_OWNER:
209 case VHOST_USER_RESET_OWNER:
210 case VHOST_USER_SET_MEM_TABLE:
211 case VHOST_USER_GET_QUEUE_NUM:
212 case VHOST_USER_NET_SET_MTU:
213 return true;
214 default:
215 return false;
219 /* most non-init callers ignore the error */
220 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
221 int *fds, int fd_num)
223 struct vhost_user *u = dev->opaque;
224 CharBackend *chr = u->chr;
225 int ret, size = VHOST_USER_HDR_SIZE + msg->size;
228 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
229 * we just need send it once in the first time. For later such
230 * request, we just ignore it.
232 if (vhost_user_one_time_request(msg->request) && dev->vq_index != 0) {
233 msg->flags &= ~VHOST_USER_NEED_REPLY_MASK;
234 return 0;
237 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
238 error_report("Failed to set msg fds.");
239 return -1;
242 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
243 if (ret != size) {
244 error_report("Failed to write msg."
245 " Wrote %d instead of %d.", ret, size);
246 return -1;
249 return 0;
252 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
253 struct vhost_log *log)
255 int fds[VHOST_MEMORY_MAX_NREGIONS];
256 size_t fd_num = 0;
257 bool shmfd = virtio_has_feature(dev->protocol_features,
258 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
259 VhostUserMsg msg = {
260 .request = VHOST_USER_SET_LOG_BASE,
261 .flags = VHOST_USER_VERSION,
262 .payload.log.mmap_size = log->size * sizeof(*(log->log)),
263 .payload.log.mmap_offset = 0,
264 .size = sizeof(msg.payload.log),
267 if (shmfd && log->fd != -1) {
268 fds[fd_num++] = log->fd;
271 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
272 return -1;
275 if (shmfd) {
276 msg.size = 0;
277 if (vhost_user_read(dev, &msg) < 0) {
278 return -1;
281 if (msg.request != VHOST_USER_SET_LOG_BASE) {
282 error_report("Received unexpected msg type. "
283 "Expected %d received %d",
284 VHOST_USER_SET_LOG_BASE, msg.request);
285 return -1;
289 return 0;
292 static int vhost_user_set_mem_table(struct vhost_dev *dev,
293 struct vhost_memory *mem)
295 int fds[VHOST_MEMORY_MAX_NREGIONS];
296 int i, fd;
297 size_t fd_num = 0;
298 bool reply_supported = virtio_has_feature(dev->protocol_features,
299 VHOST_USER_PROTOCOL_F_REPLY_ACK);
301 VhostUserMsg msg = {
302 .request = VHOST_USER_SET_MEM_TABLE,
303 .flags = VHOST_USER_VERSION,
306 if (reply_supported) {
307 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
310 for (i = 0; i < dev->mem->nregions; ++i) {
311 struct vhost_memory_region *reg = dev->mem->regions + i;
312 ram_addr_t offset;
313 MemoryRegion *mr;
315 assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
316 mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
317 &offset);
318 fd = memory_region_get_fd(mr);
319 if (fd > 0) {
320 msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
321 msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
322 msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
323 msg.payload.memory.regions[fd_num].mmap_offset = offset;
324 assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
325 fds[fd_num++] = fd;
329 msg.payload.memory.nregions = fd_num;
331 if (!fd_num) {
332 error_report("Failed initializing vhost-user memory map, "
333 "consider using -object memory-backend-file share=on");
334 return -1;
337 msg.size = sizeof(msg.payload.memory.nregions);
338 msg.size += sizeof(msg.payload.memory.padding);
339 msg.size += fd_num * sizeof(VhostUserMemoryRegion);
341 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
342 return -1;
345 if (reply_supported) {
346 return process_message_reply(dev, &msg);
349 return 0;
352 static int vhost_user_set_vring_addr(struct vhost_dev *dev,
353 struct vhost_vring_addr *addr)
355 VhostUserMsg msg = {
356 .request = VHOST_USER_SET_VRING_ADDR,
357 .flags = VHOST_USER_VERSION,
358 .payload.addr = *addr,
359 .size = sizeof(msg.payload.addr),
362 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
363 return -1;
366 return 0;
369 static int vhost_user_set_vring_endian(struct vhost_dev *dev,
370 struct vhost_vring_state *ring)
372 bool cross_endian = virtio_has_feature(dev->protocol_features,
373 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
374 VhostUserMsg msg = {
375 .request = VHOST_USER_SET_VRING_ENDIAN,
376 .flags = VHOST_USER_VERSION,
377 .payload.state = *ring,
378 .size = sizeof(msg.payload.state),
381 if (!cross_endian) {
382 error_report("vhost-user trying to send unhandled ioctl");
383 return -1;
386 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
387 return -1;
390 return 0;
393 static int vhost_set_vring(struct vhost_dev *dev,
394 unsigned long int request,
395 struct vhost_vring_state *ring)
397 VhostUserMsg msg = {
398 .request = request,
399 .flags = VHOST_USER_VERSION,
400 .payload.state = *ring,
401 .size = sizeof(msg.payload.state),
404 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
405 return -1;
408 return 0;
411 static int vhost_user_set_vring_num(struct vhost_dev *dev,
412 struct vhost_vring_state *ring)
414 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
417 static int vhost_user_set_vring_base(struct vhost_dev *dev,
418 struct vhost_vring_state *ring)
420 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
423 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
425 int i;
427 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
428 return -1;
431 for (i = 0; i < dev->nvqs; ++i) {
432 struct vhost_vring_state state = {
433 .index = dev->vq_index + i,
434 .num = enable,
437 vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
440 return 0;
443 static int vhost_user_get_vring_base(struct vhost_dev *dev,
444 struct vhost_vring_state *ring)
446 VhostUserMsg msg = {
447 .request = VHOST_USER_GET_VRING_BASE,
448 .flags = VHOST_USER_VERSION,
449 .payload.state = *ring,
450 .size = sizeof(msg.payload.state),
453 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
454 return -1;
457 if (vhost_user_read(dev, &msg) < 0) {
458 return -1;
461 if (msg.request != VHOST_USER_GET_VRING_BASE) {
462 error_report("Received unexpected msg type. Expected %d received %d",
463 VHOST_USER_GET_VRING_BASE, msg.request);
464 return -1;
467 if (msg.size != sizeof(msg.payload.state)) {
468 error_report("Received bad msg size.");
469 return -1;
472 *ring = msg.payload.state;
474 return 0;
477 static int vhost_set_vring_file(struct vhost_dev *dev,
478 VhostUserRequest request,
479 struct vhost_vring_file *file)
481 int fds[VHOST_MEMORY_MAX_NREGIONS];
482 size_t fd_num = 0;
483 VhostUserMsg msg = {
484 .request = request,
485 .flags = VHOST_USER_VERSION,
486 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
487 .size = sizeof(msg.payload.u64),
490 if (ioeventfd_enabled() && file->fd > 0) {
491 fds[fd_num++] = file->fd;
492 } else {
493 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
496 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
497 return -1;
500 return 0;
503 static int vhost_user_set_vring_kick(struct vhost_dev *dev,
504 struct vhost_vring_file *file)
506 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
509 static int vhost_user_set_vring_call(struct vhost_dev *dev,
510 struct vhost_vring_file *file)
512 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
515 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
517 VhostUserMsg msg = {
518 .request = request,
519 .flags = VHOST_USER_VERSION,
520 .payload.u64 = u64,
521 .size = sizeof(msg.payload.u64),
524 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
525 return -1;
528 return 0;
531 static int vhost_user_set_features(struct vhost_dev *dev,
532 uint64_t features)
534 return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
537 static int vhost_user_set_protocol_features(struct vhost_dev *dev,
538 uint64_t features)
540 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
543 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
545 VhostUserMsg msg = {
546 .request = request,
547 .flags = VHOST_USER_VERSION,
550 if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
551 return 0;
554 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
555 return -1;
558 if (vhost_user_read(dev, &msg) < 0) {
559 return -1;
562 if (msg.request != request) {
563 error_report("Received unexpected msg type. Expected %d received %d",
564 request, msg.request);
565 return -1;
568 if (msg.size != sizeof(msg.payload.u64)) {
569 error_report("Received bad msg size.");
570 return -1;
573 *u64 = msg.payload.u64;
575 return 0;
578 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
580 return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
583 static int vhost_user_set_owner(struct vhost_dev *dev)
585 VhostUserMsg msg = {
586 .request = VHOST_USER_SET_OWNER,
587 .flags = VHOST_USER_VERSION,
590 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
591 return -1;
594 return 0;
597 static int vhost_user_reset_device(struct vhost_dev *dev)
599 VhostUserMsg msg = {
600 .request = VHOST_USER_RESET_OWNER,
601 .flags = VHOST_USER_VERSION,
604 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
605 return -1;
608 return 0;
611 static void slave_read(void *opaque)
613 struct vhost_dev *dev = opaque;
614 struct vhost_user *u = dev->opaque;
615 VhostUserMsg msg = { 0, };
616 int size, ret = 0;
618 /* Read header */
619 size = read(u->slave_fd, &msg, VHOST_USER_HDR_SIZE);
620 if (size != VHOST_USER_HDR_SIZE) {
621 error_report("Failed to read from slave.");
622 goto err;
625 if (msg.size > VHOST_USER_PAYLOAD_SIZE) {
626 error_report("Failed to read msg header."
627 " Size %d exceeds the maximum %zu.", msg.size,
628 VHOST_USER_PAYLOAD_SIZE);
629 goto err;
632 /* Read payload */
633 size = read(u->slave_fd, &msg.payload, msg.size);
634 if (size != msg.size) {
635 error_report("Failed to read payload from slave.");
636 goto err;
639 switch (msg.request) {
640 case VHOST_USER_SLAVE_IOTLB_MSG:
641 ret = vhost_backend_handle_iotlb_msg(dev, &msg.payload.iotlb);
642 break;
643 default:
644 error_report("Received unexpected msg type.");
645 ret = -EINVAL;
649 * REPLY_ACK feature handling. Other reply types has to be managed
650 * directly in their request handlers.
652 if (msg.flags & VHOST_USER_NEED_REPLY_MASK) {
653 msg.flags &= ~VHOST_USER_NEED_REPLY_MASK;
654 msg.flags |= VHOST_USER_REPLY_MASK;
656 msg.payload.u64 = !!ret;
657 msg.size = sizeof(msg.payload.u64);
659 size = write(u->slave_fd, &msg, VHOST_USER_HDR_SIZE + msg.size);
660 if (size != VHOST_USER_HDR_SIZE + msg.size) {
661 error_report("Failed to send msg reply to slave.");
662 goto err;
666 return;
668 err:
669 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
670 close(u->slave_fd);
671 u->slave_fd = -1;
672 return;
675 static int vhost_setup_slave_channel(struct vhost_dev *dev)
677 VhostUserMsg msg = {
678 .request = VHOST_USER_SET_SLAVE_REQ_FD,
679 .flags = VHOST_USER_VERSION,
681 struct vhost_user *u = dev->opaque;
682 int sv[2], ret = 0;
683 bool reply_supported = virtio_has_feature(dev->protocol_features,
684 VHOST_USER_PROTOCOL_F_REPLY_ACK);
686 if (!virtio_has_feature(dev->protocol_features,
687 VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
688 return 0;
691 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
692 error_report("socketpair() failed");
693 return -1;
696 u->slave_fd = sv[0];
697 qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev);
699 if (reply_supported) {
700 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
703 ret = vhost_user_write(dev, &msg, &sv[1], 1);
704 if (ret) {
705 goto out;
708 if (reply_supported) {
709 ret = process_message_reply(dev, &msg);
712 out:
713 close(sv[1]);
714 if (ret) {
715 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
716 close(u->slave_fd);
717 u->slave_fd = -1;
720 return ret;
723 static int vhost_user_init(struct vhost_dev *dev, void *opaque)
725 uint64_t features, protocol_features;
726 struct vhost_user *u;
727 int err;
729 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
731 u = g_new0(struct vhost_user, 1);
732 u->chr = opaque;
733 u->slave_fd = -1;
734 dev->opaque = u;
736 err = vhost_user_get_features(dev, &features);
737 if (err < 0) {
738 return err;
741 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
742 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
744 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
745 &protocol_features);
746 if (err < 0) {
747 return err;
750 dev->protocol_features =
751 protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
752 err = vhost_user_set_protocol_features(dev, dev->protocol_features);
753 if (err < 0) {
754 return err;
757 /* query the max queues we support if backend supports Multiple Queue */
758 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
759 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
760 &dev->max_queues);
761 if (err < 0) {
762 return err;
766 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
767 !(virtio_has_feature(dev->protocol_features,
768 VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
769 virtio_has_feature(dev->protocol_features,
770 VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
771 error_report("IOMMU support requires reply-ack and "
772 "slave-req protocol features.");
773 return -1;
777 if (dev->migration_blocker == NULL &&
778 !virtio_has_feature(dev->protocol_features,
779 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
780 error_setg(&dev->migration_blocker,
781 "Migration disabled: vhost-user backend lacks "
782 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
785 err = vhost_setup_slave_channel(dev);
786 if (err < 0) {
787 return err;
790 return 0;
793 static int vhost_user_cleanup(struct vhost_dev *dev)
795 struct vhost_user *u;
797 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
799 u = dev->opaque;
800 if (u->slave_fd >= 0) {
801 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
802 close(u->slave_fd);
803 u->slave_fd = -1;
805 g_free(u);
806 dev->opaque = 0;
808 return 0;
811 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
813 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
815 return idx;
818 static int vhost_user_memslots_limit(struct vhost_dev *dev)
820 return VHOST_MEMORY_MAX_NREGIONS;
823 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
825 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
827 return virtio_has_feature(dev->protocol_features,
828 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
831 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
833 VhostUserMsg msg = { 0 };
835 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
837 /* If guest supports GUEST_ANNOUNCE do nothing */
838 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
839 return 0;
842 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
843 if (virtio_has_feature(dev->protocol_features,
844 VHOST_USER_PROTOCOL_F_RARP)) {
845 msg.request = VHOST_USER_SEND_RARP;
846 msg.flags = VHOST_USER_VERSION;
847 memcpy((char *)&msg.payload.u64, mac_addr, 6);
848 msg.size = sizeof(msg.payload.u64);
850 return vhost_user_write(dev, &msg, NULL, 0);
852 return -1;
855 static bool vhost_user_can_merge(struct vhost_dev *dev,
856 uint64_t start1, uint64_t size1,
857 uint64_t start2, uint64_t size2)
859 ram_addr_t offset;
860 int mfd, rfd;
861 MemoryRegion *mr;
863 mr = memory_region_from_host((void *)(uintptr_t)start1, &offset);
864 mfd = memory_region_get_fd(mr);
866 mr = memory_region_from_host((void *)(uintptr_t)start2, &offset);
867 rfd = memory_region_get_fd(mr);
869 return mfd == rfd;
872 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
874 VhostUserMsg msg;
875 bool reply_supported = virtio_has_feature(dev->protocol_features,
876 VHOST_USER_PROTOCOL_F_REPLY_ACK);
878 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
879 return 0;
882 msg.request = VHOST_USER_NET_SET_MTU;
883 msg.payload.u64 = mtu;
884 msg.size = sizeof(msg.payload.u64);
885 msg.flags = VHOST_USER_VERSION;
886 if (reply_supported) {
887 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
890 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
891 return -1;
894 /* If reply_ack supported, slave has to ack specified MTU is valid */
895 if (reply_supported) {
896 return process_message_reply(dev, &msg);
899 return 0;
902 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
903 struct vhost_iotlb_msg *imsg)
905 VhostUserMsg msg = {
906 .request = VHOST_USER_IOTLB_MSG,
907 .size = sizeof(msg.payload.iotlb),
908 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
909 .payload.iotlb = *imsg,
912 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
913 return -EFAULT;
916 return process_message_reply(dev, &msg);
920 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
922 /* No-op as the receive channel is not dedicated to IOTLB messages. */
925 const VhostOps user_ops = {
926 .backend_type = VHOST_BACKEND_TYPE_USER,
927 .vhost_backend_init = vhost_user_init,
928 .vhost_backend_cleanup = vhost_user_cleanup,
929 .vhost_backend_memslots_limit = vhost_user_memslots_limit,
930 .vhost_set_log_base = vhost_user_set_log_base,
931 .vhost_set_mem_table = vhost_user_set_mem_table,
932 .vhost_set_vring_addr = vhost_user_set_vring_addr,
933 .vhost_set_vring_endian = vhost_user_set_vring_endian,
934 .vhost_set_vring_num = vhost_user_set_vring_num,
935 .vhost_set_vring_base = vhost_user_set_vring_base,
936 .vhost_get_vring_base = vhost_user_get_vring_base,
937 .vhost_set_vring_kick = vhost_user_set_vring_kick,
938 .vhost_set_vring_call = vhost_user_set_vring_call,
939 .vhost_set_features = vhost_user_set_features,
940 .vhost_get_features = vhost_user_get_features,
941 .vhost_set_owner = vhost_user_set_owner,
942 .vhost_reset_device = vhost_user_reset_device,
943 .vhost_get_vq_index = vhost_user_get_vq_index,
944 .vhost_set_vring_enable = vhost_user_set_vring_enable,
945 .vhost_requires_shm_log = vhost_user_requires_shm_log,
946 .vhost_migration_done = vhost_user_migration_done,
947 .vhost_backend_can_merge = vhost_user_can_merge,
948 .vhost_net_set_mtu = vhost_user_net_set_mtu,
949 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
950 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,