virtio: add some migration doc
[qemu/ar7.git] / hw / virtio / vhost.c
blobf14a5c513358150799fc002107fd66576b4ce1bf
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "hw/virtio/vhost.h"
17 #include "hw/hw.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include "qemu/error-report.h"
21 #include <linux/vhost.h>
22 #include "exec/address-spaces.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "migration/migration.h"
27 static struct vhost_log *vhost_log;
29 static unsigned int used_memslots;
30 static QLIST_HEAD(, vhost_dev) vhost_devices =
31 QLIST_HEAD_INITIALIZER(vhost_devices);
33 bool vhost_has_free_slot(void)
35 unsigned int slots_limit = ~0U;
36 struct vhost_dev *hdev;
38 QLIST_FOREACH(hdev, &vhost_devices, entry) {
39 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
40 slots_limit = MIN(slots_limit, r);
42 return slots_limit > used_memslots;
45 static void vhost_dev_sync_region(struct vhost_dev *dev,
46 MemoryRegionSection *section,
47 uint64_t mfirst, uint64_t mlast,
48 uint64_t rfirst, uint64_t rlast)
50 vhost_log_chunk_t *log = dev->log->log;
52 uint64_t start = MAX(mfirst, rfirst);
53 uint64_t end = MIN(mlast, rlast);
54 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
55 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
56 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
58 if (end < start) {
59 return;
61 assert(end / VHOST_LOG_CHUNK < dev->log_size);
62 assert(start / VHOST_LOG_CHUNK < dev->log_size);
64 for (;from < to; ++from) {
65 vhost_log_chunk_t log;
66 /* We first check with non-atomic: much cheaper,
67 * and we expect non-dirty to be the common case. */
68 if (!*from) {
69 addr += VHOST_LOG_CHUNK;
70 continue;
72 /* Data must be read atomically. We don't really need barrier semantics
73 * but it's easier to use atomic_* than roll our own. */
74 log = atomic_xchg(from, 0);
75 while (log) {
76 int bit = ctzl(log);
77 hwaddr page_addr;
78 hwaddr section_offset;
79 hwaddr mr_offset;
80 page_addr = addr + bit * VHOST_LOG_PAGE;
81 section_offset = page_addr - section->offset_within_address_space;
82 mr_offset = section_offset + section->offset_within_region;
83 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
84 log &= ~(0x1ull << bit);
86 addr += VHOST_LOG_CHUNK;
90 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
91 MemoryRegionSection *section,
92 hwaddr first,
93 hwaddr last)
95 int i;
96 hwaddr start_addr;
97 hwaddr end_addr;
99 if (!dev->log_enabled || !dev->started) {
100 return 0;
102 start_addr = section->offset_within_address_space;
103 end_addr = range_get_last(start_addr, int128_get64(section->size));
104 start_addr = MAX(first, start_addr);
105 end_addr = MIN(last, end_addr);
107 for (i = 0; i < dev->mem->nregions; ++i) {
108 struct vhost_memory_region *reg = dev->mem->regions + i;
109 vhost_dev_sync_region(dev, section, start_addr, end_addr,
110 reg->guest_phys_addr,
111 range_get_last(reg->guest_phys_addr,
112 reg->memory_size));
114 for (i = 0; i < dev->nvqs; ++i) {
115 struct vhost_virtqueue *vq = dev->vqs + i;
116 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
117 range_get_last(vq->used_phys, vq->used_size));
119 return 0;
122 static void vhost_log_sync(MemoryListener *listener,
123 MemoryRegionSection *section)
125 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
126 memory_listener);
127 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
130 static void vhost_log_sync_range(struct vhost_dev *dev,
131 hwaddr first, hwaddr last)
133 int i;
134 /* FIXME: this is N^2 in number of sections */
135 for (i = 0; i < dev->n_mem_sections; ++i) {
136 MemoryRegionSection *section = &dev->mem_sections[i];
137 vhost_sync_dirty_bitmap(dev, section, first, last);
141 /* Assign/unassign. Keep an unsorted array of non-overlapping
142 * memory regions in dev->mem. */
143 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
144 uint64_t start_addr,
145 uint64_t size)
147 int from, to, n = dev->mem->nregions;
148 /* Track overlapping/split regions for sanity checking. */
149 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
151 for (from = 0, to = 0; from < n; ++from, ++to) {
152 struct vhost_memory_region *reg = dev->mem->regions + to;
153 uint64_t reglast;
154 uint64_t memlast;
155 uint64_t change;
157 /* clone old region */
158 if (to != from) {
159 memcpy(reg, dev->mem->regions + from, sizeof *reg);
162 /* No overlap is simple */
163 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
164 start_addr, size)) {
165 continue;
168 /* Split only happens if supplied region
169 * is in the middle of an existing one. Thus it can not
170 * overlap with any other existing region. */
171 assert(!split);
173 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
174 memlast = range_get_last(start_addr, size);
176 /* Remove whole region */
177 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
178 --dev->mem->nregions;
179 --to;
180 ++overlap_middle;
181 continue;
184 /* Shrink region */
185 if (memlast >= reglast) {
186 reg->memory_size = start_addr - reg->guest_phys_addr;
187 assert(reg->memory_size);
188 assert(!overlap_end);
189 ++overlap_end;
190 continue;
193 /* Shift region */
194 if (start_addr <= reg->guest_phys_addr) {
195 change = memlast + 1 - reg->guest_phys_addr;
196 reg->memory_size -= change;
197 reg->guest_phys_addr += change;
198 reg->userspace_addr += change;
199 assert(reg->memory_size);
200 assert(!overlap_start);
201 ++overlap_start;
202 continue;
205 /* This only happens if supplied region
206 * is in the middle of an existing one. Thus it can not
207 * overlap with any other existing region. */
208 assert(!overlap_start);
209 assert(!overlap_end);
210 assert(!overlap_middle);
211 /* Split region: shrink first part, shift second part. */
212 memcpy(dev->mem->regions + n, reg, sizeof *reg);
213 reg->memory_size = start_addr - reg->guest_phys_addr;
214 assert(reg->memory_size);
215 change = memlast + 1 - reg->guest_phys_addr;
216 reg = dev->mem->regions + n;
217 reg->memory_size -= change;
218 assert(reg->memory_size);
219 reg->guest_phys_addr += change;
220 reg->userspace_addr += change;
221 /* Never add more than 1 region */
222 assert(dev->mem->nregions == n);
223 ++dev->mem->nregions;
224 ++split;
228 /* Called after unassign, so no regions overlap the given range. */
229 static void vhost_dev_assign_memory(struct vhost_dev *dev,
230 uint64_t start_addr,
231 uint64_t size,
232 uint64_t uaddr)
234 int from, to;
235 struct vhost_memory_region *merged = NULL;
236 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
237 struct vhost_memory_region *reg = dev->mem->regions + to;
238 uint64_t prlast, urlast;
239 uint64_t pmlast, umlast;
240 uint64_t s, e, u;
242 /* clone old region */
243 if (to != from) {
244 memcpy(reg, dev->mem->regions + from, sizeof *reg);
246 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
247 pmlast = range_get_last(start_addr, size);
248 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
249 umlast = range_get_last(uaddr, size);
251 /* check for overlapping regions: should never happen. */
252 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
253 /* Not an adjacent or overlapping region - do not merge. */
254 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
255 (pmlast + 1 != reg->guest_phys_addr ||
256 umlast + 1 != reg->userspace_addr)) {
257 continue;
260 if (merged) {
261 --to;
262 assert(to >= 0);
263 } else {
264 merged = reg;
266 u = MIN(uaddr, reg->userspace_addr);
267 s = MIN(start_addr, reg->guest_phys_addr);
268 e = MAX(pmlast, prlast);
269 uaddr = merged->userspace_addr = u;
270 start_addr = merged->guest_phys_addr = s;
271 size = merged->memory_size = e - s + 1;
272 assert(merged->memory_size);
275 if (!merged) {
276 struct vhost_memory_region *reg = dev->mem->regions + to;
277 memset(reg, 0, sizeof *reg);
278 reg->memory_size = size;
279 assert(reg->memory_size);
280 reg->guest_phys_addr = start_addr;
281 reg->userspace_addr = uaddr;
282 ++to;
284 assert(to <= dev->mem->nregions + 1);
285 dev->mem->nregions = to;
288 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
290 uint64_t log_size = 0;
291 int i;
292 for (i = 0; i < dev->mem->nregions; ++i) {
293 struct vhost_memory_region *reg = dev->mem->regions + i;
294 uint64_t last = range_get_last(reg->guest_phys_addr,
295 reg->memory_size);
296 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
298 for (i = 0; i < dev->nvqs; ++i) {
299 struct vhost_virtqueue *vq = dev->vqs + i;
300 uint64_t last = vq->used_phys + vq->used_size - 1;
301 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
303 return log_size;
305 static struct vhost_log *vhost_log_alloc(uint64_t size)
307 struct vhost_log *log = g_malloc0(sizeof *log + size * sizeof(*(log->log)));
309 log->size = size;
310 log->refcnt = 1;
312 return log;
315 static struct vhost_log *vhost_log_get(uint64_t size)
317 if (!vhost_log || vhost_log->size != size) {
318 vhost_log = vhost_log_alloc(size);
319 } else {
320 ++vhost_log->refcnt;
323 return vhost_log;
326 static void vhost_log_put(struct vhost_dev *dev, bool sync)
328 struct vhost_log *log = dev->log;
330 if (!log) {
331 return;
334 --log->refcnt;
335 if (log->refcnt == 0) {
336 /* Sync only the range covered by the old log */
337 if (dev->log_size && sync) {
338 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
340 if (vhost_log == log) {
341 vhost_log = NULL;
343 g_free(log);
347 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
349 struct vhost_log *log = vhost_log_get(size);
350 uint64_t log_base = (uintptr_t)log->log;
351 int r;
353 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_LOG_BASE, &log_base);
354 assert(r >= 0);
355 vhost_log_put(dev, true);
356 dev->log = log;
357 dev->log_size = size;
360 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
361 uint64_t start_addr,
362 uint64_t size)
364 int i;
365 int r = 0;
367 for (i = 0; !r && i < dev->nvqs; ++i) {
368 struct vhost_virtqueue *vq = dev->vqs + i;
369 hwaddr l;
370 void *p;
372 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
373 continue;
375 l = vq->ring_size;
376 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
377 if (!p || l != vq->ring_size) {
378 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
379 r = -ENOMEM;
381 if (p != vq->ring) {
382 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
383 r = -EBUSY;
385 cpu_physical_memory_unmap(p, l, 0, 0);
387 return r;
390 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
391 uint64_t start_addr,
392 uint64_t size)
394 int i, n = dev->mem->nregions;
395 for (i = 0; i < n; ++i) {
396 struct vhost_memory_region *reg = dev->mem->regions + i;
397 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
398 start_addr, size)) {
399 return reg;
402 return NULL;
405 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
406 uint64_t start_addr,
407 uint64_t size,
408 uint64_t uaddr)
410 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
411 uint64_t reglast;
412 uint64_t memlast;
414 if (!reg) {
415 return true;
418 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
419 memlast = range_get_last(start_addr, size);
421 /* Need to extend region? */
422 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
423 return true;
425 /* userspace_addr changed? */
426 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
429 static void vhost_set_memory(MemoryListener *listener,
430 MemoryRegionSection *section,
431 bool add)
433 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
434 memory_listener);
435 hwaddr start_addr = section->offset_within_address_space;
436 ram_addr_t size = int128_get64(section->size);
437 bool log_dirty =
438 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
439 int s = offsetof(struct vhost_memory, regions) +
440 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
441 void *ram;
443 dev->mem = g_realloc(dev->mem, s);
445 if (log_dirty) {
446 add = false;
449 assert(size);
451 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
452 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
453 if (add) {
454 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
455 /* Region exists with same address. Nothing to do. */
456 return;
458 } else {
459 if (!vhost_dev_find_reg(dev, start_addr, size)) {
460 /* Removing region that we don't access. Nothing to do. */
461 return;
465 vhost_dev_unassign_memory(dev, start_addr, size);
466 if (add) {
467 /* Add given mapping, merging adjacent regions if any */
468 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
469 } else {
470 /* Remove old mapping for this memory, if any. */
471 vhost_dev_unassign_memory(dev, start_addr, size);
473 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
474 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
475 dev->memory_changed = true;
476 used_memslots = dev->mem->nregions;
479 static bool vhost_section(MemoryRegionSection *section)
481 return memory_region_is_ram(section->mr);
484 static void vhost_begin(MemoryListener *listener)
486 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
487 memory_listener);
488 dev->mem_changed_end_addr = 0;
489 dev->mem_changed_start_addr = -1;
492 static void vhost_commit(MemoryListener *listener)
494 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
495 memory_listener);
496 hwaddr start_addr = 0;
497 ram_addr_t size = 0;
498 uint64_t log_size;
499 int r;
501 if (!dev->memory_changed) {
502 return;
504 if (!dev->started) {
505 return;
507 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
508 return;
511 if (dev->started) {
512 start_addr = dev->mem_changed_start_addr;
513 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
515 r = vhost_verify_ring_mappings(dev, start_addr, size);
516 assert(r >= 0);
519 if (!dev->log_enabled) {
520 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
521 assert(r >= 0);
522 dev->memory_changed = false;
523 return;
525 log_size = vhost_get_log_size(dev);
526 /* We allocate an extra 4K bytes to log,
527 * to reduce the * number of reallocations. */
528 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
529 /* To log more, must increase log size before table update. */
530 if (dev->log_size < log_size) {
531 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
533 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
534 assert(r >= 0);
535 /* To log less, can only decrease log size after table update. */
536 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
537 vhost_dev_log_resize(dev, log_size);
539 dev->memory_changed = false;
542 static void vhost_region_add(MemoryListener *listener,
543 MemoryRegionSection *section)
545 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
546 memory_listener);
548 if (!vhost_section(section)) {
549 return;
552 ++dev->n_mem_sections;
553 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
554 dev->n_mem_sections);
555 dev->mem_sections[dev->n_mem_sections - 1] = *section;
556 memory_region_ref(section->mr);
557 vhost_set_memory(listener, section, true);
560 static void vhost_region_del(MemoryListener *listener,
561 MemoryRegionSection *section)
563 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
564 memory_listener);
565 int i;
567 if (!vhost_section(section)) {
568 return;
571 vhost_set_memory(listener, section, false);
572 memory_region_unref(section->mr);
573 for (i = 0; i < dev->n_mem_sections; ++i) {
574 if (dev->mem_sections[i].offset_within_address_space
575 == section->offset_within_address_space) {
576 --dev->n_mem_sections;
577 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
578 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
579 break;
584 static void vhost_region_nop(MemoryListener *listener,
585 MemoryRegionSection *section)
589 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
590 struct vhost_virtqueue *vq,
591 unsigned idx, bool enable_log)
593 struct vhost_vring_addr addr = {
594 .index = idx,
595 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
596 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
597 .used_user_addr = (uint64_t)(unsigned long)vq->used,
598 .log_guest_addr = vq->used_phys,
599 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
601 int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr);
602 if (r < 0) {
603 return -errno;
605 return 0;
608 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
610 uint64_t features = dev->acked_features;
611 int r;
612 if (enable_log) {
613 features |= 0x1ULL << VHOST_F_LOG_ALL;
615 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
616 return r < 0 ? -errno : 0;
619 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
621 int r, t, i;
622 r = vhost_dev_set_features(dev, enable_log);
623 if (r < 0) {
624 goto err_features;
626 for (i = 0; i < dev->nvqs; ++i) {
627 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
628 enable_log);
629 if (r < 0) {
630 goto err_vq;
633 return 0;
634 err_vq:
635 for (; i >= 0; --i) {
636 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
637 dev->log_enabled);
638 assert(t >= 0);
640 t = vhost_dev_set_features(dev, dev->log_enabled);
641 assert(t >= 0);
642 err_features:
643 return r;
646 static int vhost_migration_log(MemoryListener *listener, int enable)
648 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
649 memory_listener);
650 int r;
651 if (!!enable == dev->log_enabled) {
652 return 0;
654 if (!dev->started) {
655 dev->log_enabled = enable;
656 return 0;
658 if (!enable) {
659 r = vhost_dev_set_log(dev, false);
660 if (r < 0) {
661 return r;
663 vhost_log_put(dev, false);
664 dev->log = NULL;
665 dev->log_size = 0;
666 } else {
667 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
668 r = vhost_dev_set_log(dev, true);
669 if (r < 0) {
670 return r;
673 dev->log_enabled = enable;
674 return 0;
677 static void vhost_log_global_start(MemoryListener *listener)
679 int r;
681 r = vhost_migration_log(listener, true);
682 if (r < 0) {
683 abort();
687 static void vhost_log_global_stop(MemoryListener *listener)
689 int r;
691 r = vhost_migration_log(listener, false);
692 if (r < 0) {
693 abort();
697 static void vhost_log_start(MemoryListener *listener,
698 MemoryRegionSection *section,
699 int old, int new)
701 /* FIXME: implement */
704 static void vhost_log_stop(MemoryListener *listener,
705 MemoryRegionSection *section,
706 int old, int new)
708 /* FIXME: implement */
711 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
712 bool is_big_endian,
713 int vhost_vq_index)
715 struct vhost_vring_state s = {
716 .index = vhost_vq_index,
717 .num = is_big_endian
720 if (!dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ENDIAN, &s)) {
721 return 0;
724 if (errno == ENOTTY) {
725 error_report("vhost does not support cross-endian");
726 return -ENOSYS;
729 return -errno;
732 static int vhost_virtqueue_start(struct vhost_dev *dev,
733 struct VirtIODevice *vdev,
734 struct vhost_virtqueue *vq,
735 unsigned idx)
737 hwaddr s, l, a;
738 int r;
739 int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, idx);
740 struct vhost_vring_file file = {
741 .index = vhost_vq_index
743 struct vhost_vring_state state = {
744 .index = vhost_vq_index
746 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
749 vq->num = state.num = virtio_queue_get_num(vdev, idx);
750 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
751 if (r) {
752 return -errno;
755 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
756 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
757 if (r) {
758 return -errno;
761 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
762 virtio_legacy_is_cross_endian(vdev)) {
763 r = vhost_virtqueue_set_vring_endian_legacy(dev,
764 virtio_is_big_endian(vdev),
765 vhost_vq_index);
766 if (r) {
767 return -errno;
771 s = l = virtio_queue_get_desc_size(vdev, idx);
772 a = virtio_queue_get_desc_addr(vdev, idx);
773 vq->desc = cpu_physical_memory_map(a, &l, 0);
774 if (!vq->desc || l != s) {
775 r = -ENOMEM;
776 goto fail_alloc_desc;
778 s = l = virtio_queue_get_avail_size(vdev, idx);
779 a = virtio_queue_get_avail_addr(vdev, idx);
780 vq->avail = cpu_physical_memory_map(a, &l, 0);
781 if (!vq->avail || l != s) {
782 r = -ENOMEM;
783 goto fail_alloc_avail;
785 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
786 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
787 vq->used = cpu_physical_memory_map(a, &l, 1);
788 if (!vq->used || l != s) {
789 r = -ENOMEM;
790 goto fail_alloc_used;
793 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
794 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
795 vq->ring = cpu_physical_memory_map(a, &l, 1);
796 if (!vq->ring || l != s) {
797 r = -ENOMEM;
798 goto fail_alloc_ring;
801 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
802 if (r < 0) {
803 r = -errno;
804 goto fail_alloc;
807 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
808 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
809 if (r) {
810 r = -errno;
811 goto fail_kick;
814 /* Clear and discard previous events if any. */
815 event_notifier_test_and_clear(&vq->masked_notifier);
817 return 0;
819 fail_kick:
820 fail_alloc:
821 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
822 0, 0);
823 fail_alloc_ring:
824 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
825 0, 0);
826 fail_alloc_used:
827 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
828 0, 0);
829 fail_alloc_avail:
830 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
831 0, 0);
832 fail_alloc_desc:
833 return r;
836 static void vhost_virtqueue_stop(struct vhost_dev *dev,
837 struct VirtIODevice *vdev,
838 struct vhost_virtqueue *vq,
839 unsigned idx)
841 int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, idx);
842 struct vhost_vring_state state = {
843 .index = vhost_vq_index,
845 int r;
847 r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state);
848 if (r < 0) {
849 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
850 fflush(stderr);
852 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
853 virtio_queue_invalidate_signalled_used(vdev, idx);
855 /* In the cross-endian case, we need to reset the vring endianness to
856 * native as legacy devices expect so by default.
858 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
859 virtio_legacy_is_cross_endian(vdev)) {
860 r = vhost_virtqueue_set_vring_endian_legacy(dev,
861 !virtio_is_big_endian(vdev),
862 vhost_vq_index);
863 if (r < 0) {
864 error_report("failed to reset vring endianness");
868 assert (r >= 0);
869 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
870 0, virtio_queue_get_ring_size(vdev, idx));
871 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
872 1, virtio_queue_get_used_size(vdev, idx));
873 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
874 0, virtio_queue_get_avail_size(vdev, idx));
875 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
876 0, virtio_queue_get_desc_size(vdev, idx));
879 static void vhost_eventfd_add(MemoryListener *listener,
880 MemoryRegionSection *section,
881 bool match_data, uint64_t data, EventNotifier *e)
885 static void vhost_eventfd_del(MemoryListener *listener,
886 MemoryRegionSection *section,
887 bool match_data, uint64_t data, EventNotifier *e)
891 static int vhost_virtqueue_init(struct vhost_dev *dev,
892 struct vhost_virtqueue *vq, int n)
894 int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, n);
895 struct vhost_vring_file file = {
896 .index = vhost_vq_index,
898 int r = event_notifier_init(&vq->masked_notifier, 0);
899 if (r < 0) {
900 return r;
903 file.fd = event_notifier_get_fd(&vq->masked_notifier);
904 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
905 if (r) {
906 r = -errno;
907 goto fail_call;
909 return 0;
910 fail_call:
911 event_notifier_cleanup(&vq->masked_notifier);
912 return r;
915 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
917 event_notifier_cleanup(&vq->masked_notifier);
920 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
921 VhostBackendType backend_type)
923 uint64_t features;
924 int i, r;
926 if (vhost_set_backend_type(hdev, backend_type) < 0) {
927 close((uintptr_t)opaque);
928 return -1;
931 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
932 close((uintptr_t)opaque);
933 return -errno;
936 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
937 fprintf(stderr, "vhost backend memory slots limit is less"
938 " than current number of present memory slots\n");
939 close((uintptr_t)opaque);
940 return -1;
942 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
944 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
945 if (r < 0) {
946 goto fail;
949 r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
950 if (r < 0) {
951 goto fail;
954 for (i = 0; i < hdev->nvqs; ++i) {
955 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
956 if (r < 0) {
957 goto fail_vq;
960 hdev->features = features;
962 hdev->memory_listener = (MemoryListener) {
963 .begin = vhost_begin,
964 .commit = vhost_commit,
965 .region_add = vhost_region_add,
966 .region_del = vhost_region_del,
967 .region_nop = vhost_region_nop,
968 .log_start = vhost_log_start,
969 .log_stop = vhost_log_stop,
970 .log_sync = vhost_log_sync,
971 .log_global_start = vhost_log_global_start,
972 .log_global_stop = vhost_log_global_stop,
973 .eventfd_add = vhost_eventfd_add,
974 .eventfd_del = vhost_eventfd_del,
975 .priority = 10
977 hdev->migration_blocker = NULL;
978 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
979 error_setg(&hdev->migration_blocker,
980 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
981 migrate_add_blocker(hdev->migration_blocker);
983 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
984 hdev->n_mem_sections = 0;
985 hdev->mem_sections = NULL;
986 hdev->log = NULL;
987 hdev->log_size = 0;
988 hdev->log_enabled = false;
989 hdev->started = false;
990 hdev->memory_changed = false;
991 memory_listener_register(&hdev->memory_listener, &address_space_memory);
992 return 0;
993 fail_vq:
994 while (--i >= 0) {
995 vhost_virtqueue_cleanup(hdev->vqs + i);
997 fail:
998 r = -errno;
999 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1000 QLIST_REMOVE(hdev, entry);
1001 return r;
1004 void vhost_dev_cleanup(struct vhost_dev *hdev)
1006 int i;
1007 for (i = 0; i < hdev->nvqs; ++i) {
1008 vhost_virtqueue_cleanup(hdev->vqs + i);
1010 memory_listener_unregister(&hdev->memory_listener);
1011 if (hdev->migration_blocker) {
1012 migrate_del_blocker(hdev->migration_blocker);
1013 error_free(hdev->migration_blocker);
1015 g_free(hdev->mem);
1016 g_free(hdev->mem_sections);
1017 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1018 QLIST_REMOVE(hdev, entry);
1021 /* Stop processing guest IO notifications in qemu.
1022 * Start processing them in vhost in kernel.
1024 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1026 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1027 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1028 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1029 int i, r, e;
1030 if (!k->set_host_notifier) {
1031 fprintf(stderr, "binding does not support host notifiers\n");
1032 r = -ENOSYS;
1033 goto fail;
1036 for (i = 0; i < hdev->nvqs; ++i) {
1037 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
1038 if (r < 0) {
1039 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1040 goto fail_vq;
1044 return 0;
1045 fail_vq:
1046 while (--i >= 0) {
1047 e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1048 if (e < 0) {
1049 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1050 fflush(stderr);
1052 assert (e >= 0);
1054 fail:
1055 return r;
1058 /* Stop processing guest IO notifications in vhost.
1059 * Start processing them in qemu.
1060 * This might actually run the qemu handlers right away,
1061 * so virtio in qemu must be completely setup when this is called.
1063 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1065 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1066 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1067 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1068 int i, r;
1070 for (i = 0; i < hdev->nvqs; ++i) {
1071 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1072 if (r < 0) {
1073 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1074 fflush(stderr);
1076 assert (r >= 0);
1080 /* Test and clear event pending status.
1081 * Should be called after unmask to avoid losing events.
1083 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1085 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1086 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1087 return event_notifier_test_and_clear(&vq->masked_notifier);
1090 /* Mask/unmask events from this vq. */
1091 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1092 bool mask)
1094 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1095 int r, index = n - hdev->vq_index;
1096 struct vhost_vring_file file;
1098 if (mask) {
1099 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1100 } else {
1101 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1104 file.index = hdev->vhost_ops->vhost_backend_get_vq_index(hdev, n);
1105 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file);
1106 assert(r >= 0);
1109 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1110 uint64_t features)
1112 const int *bit = feature_bits;
1113 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1114 uint64_t bit_mask = (1ULL << *bit);
1115 if (!(hdev->features & bit_mask)) {
1116 features &= ~bit_mask;
1118 bit++;
1120 return features;
1123 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1124 uint64_t features)
1126 const int *bit = feature_bits;
1127 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1128 uint64_t bit_mask = (1ULL << *bit);
1129 if (features & bit_mask) {
1130 hdev->acked_features |= bit_mask;
1132 bit++;
1136 /* Host notifiers must be enabled at this point. */
1137 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1139 int i, r;
1141 hdev->started = true;
1143 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1144 if (r < 0) {
1145 goto fail_features;
1147 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
1148 if (r < 0) {
1149 r = -errno;
1150 goto fail_mem;
1152 for (i = 0; i < hdev->nvqs; ++i) {
1153 r = vhost_virtqueue_start(hdev,
1154 vdev,
1155 hdev->vqs + i,
1156 hdev->vq_index + i);
1157 if (r < 0) {
1158 goto fail_vq;
1162 if (hdev->log_enabled) {
1163 uint64_t log_base;
1165 hdev->log_size = vhost_get_log_size(hdev);
1166 hdev->log = vhost_log_get(hdev->log_size);
1167 log_base = (uintptr_t)hdev->log->log;
1168 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE,
1169 hdev->log_size ? &log_base : NULL);
1170 if (r < 0) {
1171 r = -errno;
1172 goto fail_log;
1176 return 0;
1177 fail_log:
1178 vhost_log_put(hdev, false);
1179 fail_vq:
1180 while (--i >= 0) {
1181 vhost_virtqueue_stop(hdev,
1182 vdev,
1183 hdev->vqs + i,
1184 hdev->vq_index + i);
1186 i = hdev->nvqs;
1187 fail_mem:
1188 fail_features:
1190 hdev->started = false;
1191 return r;
1194 /* Host notifiers must be enabled at this point. */
1195 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1197 int i;
1199 for (i = 0; i < hdev->nvqs; ++i) {
1200 vhost_virtqueue_stop(hdev,
1201 vdev,
1202 hdev->vqs + i,
1203 hdev->vq_index + i);
1206 vhost_log_put(hdev, true);
1207 hdev->started = false;
1208 hdev->log = NULL;
1209 hdev->log_size = 0;