vhost-user: add a migration blocker
[qemu/ar7.git] / hw / virtio / vhost-user.c
blobfb3aa40d59010a0dd68c9976059f00ada8bb2de7
1 /*
2 * vhost-user
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "hw/virtio/vhost.h"
12 #include "hw/virtio/vhost-backend.h"
13 #include "sysemu/char.h"
14 #include "sysemu/kvm.h"
15 #include "qemu/error-report.h"
16 #include "qemu/sockets.h"
17 #include "exec/ram_addr.h"
18 #include "migration/migration.h"
20 #include <fcntl.h>
21 #include <unistd.h>
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
24 #include <sys/un.h>
25 #include <linux/vhost.h>
27 #define VHOST_MEMORY_MAX_NREGIONS 8
28 #define VHOST_USER_F_PROTOCOL_FEATURES 30
30 #define VHOST_USER_PROTOCOL_FEATURE_MASK 0x3ULL
31 #define VHOST_USER_PROTOCOL_F_MQ 0
32 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
34 typedef enum VhostUserRequest {
35 VHOST_USER_NONE = 0,
36 VHOST_USER_GET_FEATURES = 1,
37 VHOST_USER_SET_FEATURES = 2,
38 VHOST_USER_SET_OWNER = 3,
39 VHOST_USER_RESET_DEVICE = 4,
40 VHOST_USER_SET_MEM_TABLE = 5,
41 VHOST_USER_SET_LOG_BASE = 6,
42 VHOST_USER_SET_LOG_FD = 7,
43 VHOST_USER_SET_VRING_NUM = 8,
44 VHOST_USER_SET_VRING_ADDR = 9,
45 VHOST_USER_SET_VRING_BASE = 10,
46 VHOST_USER_GET_VRING_BASE = 11,
47 VHOST_USER_SET_VRING_KICK = 12,
48 VHOST_USER_SET_VRING_CALL = 13,
49 VHOST_USER_SET_VRING_ERR = 14,
50 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
51 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
52 VHOST_USER_GET_QUEUE_NUM = 17,
53 VHOST_USER_SET_VRING_ENABLE = 18,
54 VHOST_USER_MAX
55 } VhostUserRequest;
57 typedef struct VhostUserMemoryRegion {
58 uint64_t guest_phys_addr;
59 uint64_t memory_size;
60 uint64_t userspace_addr;
61 uint64_t mmap_offset;
62 } VhostUserMemoryRegion;
64 typedef struct VhostUserMemory {
65 uint32_t nregions;
66 uint32_t padding;
67 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
68 } VhostUserMemory;
70 typedef struct VhostUserMsg {
71 VhostUserRequest request;
73 #define VHOST_USER_VERSION_MASK (0x3)
74 #define VHOST_USER_REPLY_MASK (0x1<<2)
75 uint32_t flags;
76 uint32_t size; /* the following payload size */
77 union {
78 #define VHOST_USER_VRING_IDX_MASK (0xff)
79 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
80 uint64_t u64;
81 struct vhost_vring_state state;
82 struct vhost_vring_addr addr;
83 VhostUserMemory memory;
85 } QEMU_PACKED VhostUserMsg;
87 static VhostUserMsg m __attribute__ ((unused));
88 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
89 + sizeof(m.flags) \
90 + sizeof(m.size))
92 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
94 /* The version of the protocol we support */
95 #define VHOST_USER_VERSION (0x1)
97 static bool ioeventfd_enabled(void)
99 return kvm_enabled() && kvm_eventfds_enabled();
102 static unsigned long int ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
103 -1, /* VHOST_USER_NONE */
104 VHOST_GET_FEATURES, /* VHOST_USER_GET_FEATURES */
105 VHOST_SET_FEATURES, /* VHOST_USER_SET_FEATURES */
106 VHOST_SET_OWNER, /* VHOST_USER_SET_OWNER */
107 VHOST_RESET_DEVICE, /* VHOST_USER_RESET_DEVICE */
108 VHOST_SET_MEM_TABLE, /* VHOST_USER_SET_MEM_TABLE */
109 VHOST_SET_LOG_BASE, /* VHOST_USER_SET_LOG_BASE */
110 VHOST_SET_LOG_FD, /* VHOST_USER_SET_LOG_FD */
111 VHOST_SET_VRING_NUM, /* VHOST_USER_SET_VRING_NUM */
112 VHOST_SET_VRING_ADDR, /* VHOST_USER_SET_VRING_ADDR */
113 VHOST_SET_VRING_BASE, /* VHOST_USER_SET_VRING_BASE */
114 VHOST_GET_VRING_BASE, /* VHOST_USER_GET_VRING_BASE */
115 VHOST_SET_VRING_KICK, /* VHOST_USER_SET_VRING_KICK */
116 VHOST_SET_VRING_CALL, /* VHOST_USER_SET_VRING_CALL */
117 VHOST_SET_VRING_ERR /* VHOST_USER_SET_VRING_ERR */
120 static VhostUserRequest vhost_user_request_translate(unsigned long int request)
122 VhostUserRequest idx;
124 for (idx = 0; idx < VHOST_USER_MAX; idx++) {
125 if (ioctl_to_vhost_user_request[idx] == request) {
126 break;
130 return (idx == VHOST_USER_MAX) ? VHOST_USER_NONE : idx;
133 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
135 CharDriverState *chr = dev->opaque;
136 uint8_t *p = (uint8_t *) msg;
137 int r, size = VHOST_USER_HDR_SIZE;
139 r = qemu_chr_fe_read_all(chr, p, size);
140 if (r != size) {
141 error_report("Failed to read msg header. Read %d instead of %d.", r,
142 size);
143 goto fail;
146 /* validate received flags */
147 if (msg->flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
148 error_report("Failed to read msg header."
149 " Flags 0x%x instead of 0x%x.", msg->flags,
150 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
151 goto fail;
154 /* validate message size is sane */
155 if (msg->size > VHOST_USER_PAYLOAD_SIZE) {
156 error_report("Failed to read msg header."
157 " Size %d exceeds the maximum %zu.", msg->size,
158 VHOST_USER_PAYLOAD_SIZE);
159 goto fail;
162 if (msg->size) {
163 p += VHOST_USER_HDR_SIZE;
164 size = msg->size;
165 r = qemu_chr_fe_read_all(chr, p, size);
166 if (r != size) {
167 error_report("Failed to read msg payload."
168 " Read %d instead of %d.", r, msg->size);
169 goto fail;
173 return 0;
175 fail:
176 return -1;
179 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
180 int *fds, int fd_num)
182 CharDriverState *chr = dev->opaque;
183 int size = VHOST_USER_HDR_SIZE + msg->size;
185 if (fd_num) {
186 qemu_chr_fe_set_msgfds(chr, fds, fd_num);
189 return qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size) == size ?
190 0 : -1;
193 static bool vhost_user_one_time_request(VhostUserRequest request)
195 switch (request) {
196 case VHOST_USER_SET_OWNER:
197 case VHOST_USER_RESET_DEVICE:
198 case VHOST_USER_SET_MEM_TABLE:
199 case VHOST_USER_GET_QUEUE_NUM:
200 return true;
201 default:
202 return false;
206 static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
207 void *arg)
209 VhostUserMsg msg;
210 VhostUserRequest msg_request;
211 struct vhost_vring_file *file = 0;
212 int need_reply = 0;
213 int fds[VHOST_MEMORY_MAX_NREGIONS];
214 int i, fd;
215 size_t fd_num = 0;
217 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
219 /* only translate vhost ioctl requests */
220 if (request > VHOST_USER_MAX) {
221 msg_request = vhost_user_request_translate(request);
222 } else {
223 msg_request = request;
227 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
228 * we just need send it once in the first time. For later such
229 * request, we just ignore it.
231 if (vhost_user_one_time_request(msg_request) && dev->vq_index != 0) {
232 return 0;
235 msg.request = msg_request;
236 msg.flags = VHOST_USER_VERSION;
237 msg.size = 0;
239 switch (msg_request) {
240 case VHOST_USER_GET_FEATURES:
241 case VHOST_USER_GET_PROTOCOL_FEATURES:
242 case VHOST_USER_GET_QUEUE_NUM:
243 need_reply = 1;
244 break;
246 case VHOST_USER_SET_FEATURES:
247 case VHOST_USER_SET_PROTOCOL_FEATURES:
248 msg.u64 = *((__u64 *) arg);
249 msg.size = sizeof(m.u64);
250 break;
252 case VHOST_USER_SET_OWNER:
253 case VHOST_USER_RESET_DEVICE:
254 break;
256 case VHOST_USER_SET_MEM_TABLE:
257 for (i = 0; i < dev->mem->nregions; ++i) {
258 struct vhost_memory_region *reg = dev->mem->regions + i;
259 ram_addr_t ram_addr;
261 assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
262 qemu_ram_addr_from_host((void *)(uintptr_t)reg->userspace_addr, &ram_addr);
263 fd = qemu_get_ram_fd(ram_addr);
264 if (fd > 0) {
265 msg.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
266 msg.memory.regions[fd_num].memory_size = reg->memory_size;
267 msg.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
268 msg.memory.regions[fd_num].mmap_offset = reg->userspace_addr -
269 (uintptr_t) qemu_get_ram_block_host_ptr(ram_addr);
270 assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
271 fds[fd_num++] = fd;
275 msg.memory.nregions = fd_num;
277 if (!fd_num) {
278 error_report("Failed initializing vhost-user memory map, "
279 "consider using -object memory-backend-file share=on");
280 return -1;
283 msg.size = sizeof(m.memory.nregions);
284 msg.size += sizeof(m.memory.padding);
285 msg.size += fd_num * sizeof(VhostUserMemoryRegion);
287 break;
289 case VHOST_USER_SET_LOG_FD:
290 fds[fd_num++] = *((int *) arg);
291 break;
293 case VHOST_USER_SET_VRING_NUM:
294 case VHOST_USER_SET_VRING_BASE:
295 case VHOST_USER_SET_VRING_ENABLE:
296 memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
297 msg.size = sizeof(m.state);
298 break;
300 case VHOST_USER_GET_VRING_BASE:
301 memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
302 msg.size = sizeof(m.state);
303 need_reply = 1;
304 break;
306 case VHOST_USER_SET_VRING_ADDR:
307 memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
308 msg.size = sizeof(m.addr);
309 break;
311 case VHOST_USER_SET_VRING_KICK:
312 case VHOST_USER_SET_VRING_CALL:
313 case VHOST_USER_SET_VRING_ERR:
314 file = arg;
315 msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
316 msg.size = sizeof(m.u64);
317 if (ioeventfd_enabled() && file->fd > 0) {
318 fds[fd_num++] = file->fd;
319 } else {
320 msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
322 break;
323 default:
324 error_report("vhost-user trying to send unhandled ioctl");
325 return -1;
326 break;
329 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
330 return 0;
333 if (need_reply) {
334 if (vhost_user_read(dev, &msg) < 0) {
335 return 0;
338 if (msg_request != msg.request) {
339 error_report("Received unexpected msg type."
340 " Expected %d received %d", msg_request, msg.request);
341 return -1;
344 switch (msg_request) {
345 case VHOST_USER_GET_FEATURES:
346 case VHOST_USER_GET_PROTOCOL_FEATURES:
347 case VHOST_USER_GET_QUEUE_NUM:
348 if (msg.size != sizeof(m.u64)) {
349 error_report("Received bad msg size.");
350 return -1;
352 *((__u64 *) arg) = msg.u64;
353 break;
354 case VHOST_USER_GET_VRING_BASE:
355 if (msg.size != sizeof(m.state)) {
356 error_report("Received bad msg size.");
357 return -1;
359 memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
360 break;
361 default:
362 error_report("Received unexpected msg type.");
363 return -1;
364 break;
368 return 0;
371 static int vhost_set_log_base(struct vhost_dev *dev, uint64_t base,
372 struct vhost_log *log)
374 int fds[VHOST_MEMORY_MAX_NREGIONS];
375 size_t fd_num = 0;
376 bool shmfd = virtio_has_feature(dev->protocol_features,
377 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
378 VhostUserMsg msg = {
379 .request = VHOST_USER_SET_LOG_BASE,
380 .flags = VHOST_USER_VERSION,
381 .u64 = base,
382 .size = sizeof(m.u64),
385 if (shmfd && log->fd != -1) {
386 fds[fd_num++] = log->fd;
389 vhost_user_write(dev, &msg, fds, fd_num);
391 if (shmfd) {
392 msg.size = 0;
393 if (vhost_user_read(dev, &msg) < 0) {
394 return 0;
397 if (msg.request != VHOST_USER_SET_LOG_BASE) {
398 error_report("Received unexpected msg type. "
399 "Expected %d received %d",
400 VHOST_USER_SET_LOG_BASE, msg.request);
401 return -1;
405 return 0;
408 static int vhost_user_init(struct vhost_dev *dev, void *opaque)
410 unsigned long long features;
411 int err;
413 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
415 dev->opaque = opaque;
417 err = vhost_user_call(dev, VHOST_USER_GET_FEATURES, &features);
418 if (err < 0) {
419 return err;
422 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
423 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
425 err = vhost_user_call(dev, VHOST_USER_GET_PROTOCOL_FEATURES, &features);
426 if (err < 0) {
427 return err;
430 dev->protocol_features = features & VHOST_USER_PROTOCOL_FEATURE_MASK;
431 err = vhost_user_call(dev, VHOST_USER_SET_PROTOCOL_FEATURES,
432 &dev->protocol_features);
433 if (err < 0) {
434 return err;
437 /* query the max queues we support if backend supports Multiple Queue */
438 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
439 err = vhost_user_call(dev, VHOST_USER_GET_QUEUE_NUM, &dev->max_queues);
440 if (err < 0) {
441 return err;
446 if (dev->migration_blocker == NULL &&
447 !virtio_has_feature(dev->protocol_features,
448 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
449 error_setg(&dev->migration_blocker,
450 "Migration disabled: vhost-user backend lacks "
451 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
454 return 0;
457 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
459 struct vhost_vring_state state = {
460 .index = dev->vq_index,
461 .num = enable,
464 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
466 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ))) {
467 return -1;
470 return vhost_user_call(dev, VHOST_USER_SET_VRING_ENABLE, &state);
473 static int vhost_user_cleanup(struct vhost_dev *dev)
475 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
477 dev->opaque = 0;
479 return 0;
482 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
484 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
486 return idx;
489 static int vhost_user_memslots_limit(struct vhost_dev *dev)
491 return VHOST_MEMORY_MAX_NREGIONS;
494 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
496 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
498 return virtio_has_feature(dev->protocol_features,
499 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
502 const VhostOps user_ops = {
503 .backend_type = VHOST_BACKEND_TYPE_USER,
504 .vhost_call = vhost_user_call,
505 .vhost_backend_init = vhost_user_init,
506 .vhost_backend_cleanup = vhost_user_cleanup,
507 .vhost_backend_get_vq_index = vhost_user_get_vq_index,
508 .vhost_backend_set_vring_enable = vhost_user_set_vring_enable,
509 .vhost_backend_memslots_limit = vhost_user_memslots_limit,
510 .vhost_set_log_base = vhost_set_log_base,
511 .vhost_requires_shm_log = vhost_user_requires_shm_log,