crypto: add support for the cast5-128 cipher algorithm
[qemu/ar7.git] / hw / virtio / vhost.c
blob392d848819a67152881113aa34b02485bb300303
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "hw/virtio/vhost.h"
18 #include "hw/hw.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include <linux/vhost.h>
24 #include "exec/address-spaces.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27 #include "migration/migration.h"
29 static struct vhost_log *vhost_log;
30 static struct vhost_log *vhost_log_shm;
32 static unsigned int used_memslots;
33 static QLIST_HEAD(, vhost_dev) vhost_devices =
34 QLIST_HEAD_INITIALIZER(vhost_devices);
36 bool vhost_has_free_slot(void)
38 unsigned int slots_limit = ~0U;
39 struct vhost_dev *hdev;
41 QLIST_FOREACH(hdev, &vhost_devices, entry) {
42 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
43 slots_limit = MIN(slots_limit, r);
45 return slots_limit > used_memslots;
48 static void vhost_dev_sync_region(struct vhost_dev *dev,
49 MemoryRegionSection *section,
50 uint64_t mfirst, uint64_t mlast,
51 uint64_t rfirst, uint64_t rlast)
53 vhost_log_chunk_t *log = dev->log->log;
55 uint64_t start = MAX(mfirst, rfirst);
56 uint64_t end = MIN(mlast, rlast);
57 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
58 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
59 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
61 if (end < start) {
62 return;
64 assert(end / VHOST_LOG_CHUNK < dev->log_size);
65 assert(start / VHOST_LOG_CHUNK < dev->log_size);
67 for (;from < to; ++from) {
68 vhost_log_chunk_t log;
69 /* We first check with non-atomic: much cheaper,
70 * and we expect non-dirty to be the common case. */
71 if (!*from) {
72 addr += VHOST_LOG_CHUNK;
73 continue;
75 /* Data must be read atomically. We don't really need barrier semantics
76 * but it's easier to use atomic_* than roll our own. */
77 log = atomic_xchg(from, 0);
78 while (log) {
79 int bit = ctzl(log);
80 hwaddr page_addr;
81 hwaddr section_offset;
82 hwaddr mr_offset;
83 page_addr = addr + bit * VHOST_LOG_PAGE;
84 section_offset = page_addr - section->offset_within_address_space;
85 mr_offset = section_offset + section->offset_within_region;
86 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
87 log &= ~(0x1ull << bit);
89 addr += VHOST_LOG_CHUNK;
93 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
94 MemoryRegionSection *section,
95 hwaddr first,
96 hwaddr last)
98 int i;
99 hwaddr start_addr;
100 hwaddr end_addr;
102 if (!dev->log_enabled || !dev->started) {
103 return 0;
105 start_addr = section->offset_within_address_space;
106 end_addr = range_get_last(start_addr, int128_get64(section->size));
107 start_addr = MAX(first, start_addr);
108 end_addr = MIN(last, end_addr);
110 for (i = 0; i < dev->mem->nregions; ++i) {
111 struct vhost_memory_region *reg = dev->mem->regions + i;
112 vhost_dev_sync_region(dev, section, start_addr, end_addr,
113 reg->guest_phys_addr,
114 range_get_last(reg->guest_phys_addr,
115 reg->memory_size));
117 for (i = 0; i < dev->nvqs; ++i) {
118 struct vhost_virtqueue *vq = dev->vqs + i;
119 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
120 range_get_last(vq->used_phys, vq->used_size));
122 return 0;
125 static void vhost_log_sync(MemoryListener *listener,
126 MemoryRegionSection *section)
128 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
129 memory_listener);
130 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
133 static void vhost_log_sync_range(struct vhost_dev *dev,
134 hwaddr first, hwaddr last)
136 int i;
137 /* FIXME: this is N^2 in number of sections */
138 for (i = 0; i < dev->n_mem_sections; ++i) {
139 MemoryRegionSection *section = &dev->mem_sections[i];
140 vhost_sync_dirty_bitmap(dev, section, first, last);
144 /* Assign/unassign. Keep an unsorted array of non-overlapping
145 * memory regions in dev->mem. */
146 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
147 uint64_t start_addr,
148 uint64_t size)
150 int from, to, n = dev->mem->nregions;
151 /* Track overlapping/split regions for sanity checking. */
152 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
154 for (from = 0, to = 0; from < n; ++from, ++to) {
155 struct vhost_memory_region *reg = dev->mem->regions + to;
156 uint64_t reglast;
157 uint64_t memlast;
158 uint64_t change;
160 /* clone old region */
161 if (to != from) {
162 memcpy(reg, dev->mem->regions + from, sizeof *reg);
165 /* No overlap is simple */
166 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
167 start_addr, size)) {
168 continue;
171 /* Split only happens if supplied region
172 * is in the middle of an existing one. Thus it can not
173 * overlap with any other existing region. */
174 assert(!split);
176 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
177 memlast = range_get_last(start_addr, size);
179 /* Remove whole region */
180 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
181 --dev->mem->nregions;
182 --to;
183 ++overlap_middle;
184 continue;
187 /* Shrink region */
188 if (memlast >= reglast) {
189 reg->memory_size = start_addr - reg->guest_phys_addr;
190 assert(reg->memory_size);
191 assert(!overlap_end);
192 ++overlap_end;
193 continue;
196 /* Shift region */
197 if (start_addr <= reg->guest_phys_addr) {
198 change = memlast + 1 - reg->guest_phys_addr;
199 reg->memory_size -= change;
200 reg->guest_phys_addr += change;
201 reg->userspace_addr += change;
202 assert(reg->memory_size);
203 assert(!overlap_start);
204 ++overlap_start;
205 continue;
208 /* This only happens if supplied region
209 * is in the middle of an existing one. Thus it can not
210 * overlap with any other existing region. */
211 assert(!overlap_start);
212 assert(!overlap_end);
213 assert(!overlap_middle);
214 /* Split region: shrink first part, shift second part. */
215 memcpy(dev->mem->regions + n, reg, sizeof *reg);
216 reg->memory_size = start_addr - reg->guest_phys_addr;
217 assert(reg->memory_size);
218 change = memlast + 1 - reg->guest_phys_addr;
219 reg = dev->mem->regions + n;
220 reg->memory_size -= change;
221 assert(reg->memory_size);
222 reg->guest_phys_addr += change;
223 reg->userspace_addr += change;
224 /* Never add more than 1 region */
225 assert(dev->mem->nregions == n);
226 ++dev->mem->nregions;
227 ++split;
231 /* Called after unassign, so no regions overlap the given range. */
232 static void vhost_dev_assign_memory(struct vhost_dev *dev,
233 uint64_t start_addr,
234 uint64_t size,
235 uint64_t uaddr)
237 int from, to;
238 struct vhost_memory_region *merged = NULL;
239 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
240 struct vhost_memory_region *reg = dev->mem->regions + to;
241 uint64_t prlast, urlast;
242 uint64_t pmlast, umlast;
243 uint64_t s, e, u;
245 /* clone old region */
246 if (to != from) {
247 memcpy(reg, dev->mem->regions + from, sizeof *reg);
249 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
250 pmlast = range_get_last(start_addr, size);
251 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
252 umlast = range_get_last(uaddr, size);
254 /* check for overlapping regions: should never happen. */
255 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
256 /* Not an adjacent or overlapping region - do not merge. */
257 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
258 (pmlast + 1 != reg->guest_phys_addr ||
259 umlast + 1 != reg->userspace_addr)) {
260 continue;
263 if (dev->vhost_ops->vhost_backend_can_merge &&
264 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
265 reg->userspace_addr,
266 reg->memory_size)) {
267 continue;
270 if (merged) {
271 --to;
272 assert(to >= 0);
273 } else {
274 merged = reg;
276 u = MIN(uaddr, reg->userspace_addr);
277 s = MIN(start_addr, reg->guest_phys_addr);
278 e = MAX(pmlast, prlast);
279 uaddr = merged->userspace_addr = u;
280 start_addr = merged->guest_phys_addr = s;
281 size = merged->memory_size = e - s + 1;
282 assert(merged->memory_size);
285 if (!merged) {
286 struct vhost_memory_region *reg = dev->mem->regions + to;
287 memset(reg, 0, sizeof *reg);
288 reg->memory_size = size;
289 assert(reg->memory_size);
290 reg->guest_phys_addr = start_addr;
291 reg->userspace_addr = uaddr;
292 ++to;
294 assert(to <= dev->mem->nregions + 1);
295 dev->mem->nregions = to;
298 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
300 uint64_t log_size = 0;
301 int i;
302 for (i = 0; i < dev->mem->nregions; ++i) {
303 struct vhost_memory_region *reg = dev->mem->regions + i;
304 uint64_t last = range_get_last(reg->guest_phys_addr,
305 reg->memory_size);
306 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
308 for (i = 0; i < dev->nvqs; ++i) {
309 struct vhost_virtqueue *vq = dev->vqs + i;
310 uint64_t last = vq->used_phys + vq->used_size - 1;
311 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
313 return log_size;
316 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
318 struct vhost_log *log;
319 uint64_t logsize = size * sizeof(*(log->log));
320 int fd = -1;
322 log = g_new0(struct vhost_log, 1);
323 if (share) {
324 log->log = qemu_memfd_alloc("vhost-log", logsize,
325 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
326 &fd);
327 memset(log->log, 0, logsize);
328 } else {
329 log->log = g_malloc0(logsize);
332 log->size = size;
333 log->refcnt = 1;
334 log->fd = fd;
336 return log;
339 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
341 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
343 if (!log || log->size != size) {
344 log = vhost_log_alloc(size, share);
345 if (share) {
346 vhost_log_shm = log;
347 } else {
348 vhost_log = log;
350 } else {
351 ++log->refcnt;
354 return log;
357 static void vhost_log_put(struct vhost_dev *dev, bool sync)
359 struct vhost_log *log = dev->log;
361 if (!log) {
362 return;
365 --log->refcnt;
366 if (log->refcnt == 0) {
367 /* Sync only the range covered by the old log */
368 if (dev->log_size && sync) {
369 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
372 if (vhost_log == log) {
373 g_free(log->log);
374 vhost_log = NULL;
375 } else if (vhost_log_shm == log) {
376 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
377 log->fd);
378 vhost_log_shm = NULL;
381 g_free(log);
385 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
387 return dev->vhost_ops->vhost_requires_shm_log &&
388 dev->vhost_ops->vhost_requires_shm_log(dev);
391 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
393 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
394 uint64_t log_base = (uintptr_t)log->log;
395 int r;
397 /* inform backend of log switching, this must be done before
398 releasing the current log, to ensure no logging is lost */
399 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
400 assert(r >= 0);
401 vhost_log_put(dev, true);
402 dev->log = log;
403 dev->log_size = size;
406 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
407 uint64_t start_addr,
408 uint64_t size)
410 int i;
411 int r = 0;
413 for (i = 0; !r && i < dev->nvqs; ++i) {
414 struct vhost_virtqueue *vq = dev->vqs + i;
415 hwaddr l;
416 void *p;
418 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
419 continue;
421 l = vq->ring_size;
422 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
423 if (!p || l != vq->ring_size) {
424 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
425 r = -ENOMEM;
427 if (p != vq->ring) {
428 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
429 r = -EBUSY;
431 cpu_physical_memory_unmap(p, l, 0, 0);
433 return r;
436 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
437 uint64_t start_addr,
438 uint64_t size)
440 int i, n = dev->mem->nregions;
441 for (i = 0; i < n; ++i) {
442 struct vhost_memory_region *reg = dev->mem->regions + i;
443 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
444 start_addr, size)) {
445 return reg;
448 return NULL;
451 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
452 uint64_t start_addr,
453 uint64_t size,
454 uint64_t uaddr)
456 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
457 uint64_t reglast;
458 uint64_t memlast;
460 if (!reg) {
461 return true;
464 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
465 memlast = range_get_last(start_addr, size);
467 /* Need to extend region? */
468 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
469 return true;
471 /* userspace_addr changed? */
472 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
475 static void vhost_set_memory(MemoryListener *listener,
476 MemoryRegionSection *section,
477 bool add)
479 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
480 memory_listener);
481 hwaddr start_addr = section->offset_within_address_space;
482 ram_addr_t size = int128_get64(section->size);
483 bool log_dirty =
484 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
485 int s = offsetof(struct vhost_memory, regions) +
486 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
487 void *ram;
489 dev->mem = g_realloc(dev->mem, s);
491 if (log_dirty) {
492 add = false;
495 assert(size);
497 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
498 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
499 if (add) {
500 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
501 /* Region exists with same address. Nothing to do. */
502 return;
504 } else {
505 if (!vhost_dev_find_reg(dev, start_addr, size)) {
506 /* Removing region that we don't access. Nothing to do. */
507 return;
511 vhost_dev_unassign_memory(dev, start_addr, size);
512 if (add) {
513 /* Add given mapping, merging adjacent regions if any */
514 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
515 } else {
516 /* Remove old mapping for this memory, if any. */
517 vhost_dev_unassign_memory(dev, start_addr, size);
519 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
520 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
521 dev->memory_changed = true;
522 used_memslots = dev->mem->nregions;
525 static bool vhost_section(MemoryRegionSection *section)
527 return memory_region_is_ram(section->mr);
530 static void vhost_begin(MemoryListener *listener)
532 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
533 memory_listener);
534 dev->mem_changed_end_addr = 0;
535 dev->mem_changed_start_addr = -1;
538 static void vhost_commit(MemoryListener *listener)
540 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
541 memory_listener);
542 hwaddr start_addr = 0;
543 ram_addr_t size = 0;
544 uint64_t log_size;
545 int r;
547 if (!dev->memory_changed) {
548 return;
550 if (!dev->started) {
551 return;
553 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
554 return;
557 if (dev->started) {
558 start_addr = dev->mem_changed_start_addr;
559 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
561 r = vhost_verify_ring_mappings(dev, start_addr, size);
562 assert(r >= 0);
565 if (!dev->log_enabled) {
566 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
567 assert(r >= 0);
568 dev->memory_changed = false;
569 return;
571 log_size = vhost_get_log_size(dev);
572 /* We allocate an extra 4K bytes to log,
573 * to reduce the * number of reallocations. */
574 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
575 /* To log more, must increase log size before table update. */
576 if (dev->log_size < log_size) {
577 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
579 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
580 assert(r >= 0);
581 /* To log less, can only decrease log size after table update. */
582 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
583 vhost_dev_log_resize(dev, log_size);
585 dev->memory_changed = false;
588 static void vhost_region_add(MemoryListener *listener,
589 MemoryRegionSection *section)
591 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
592 memory_listener);
594 if (!vhost_section(section)) {
595 return;
598 ++dev->n_mem_sections;
599 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
600 dev->n_mem_sections);
601 dev->mem_sections[dev->n_mem_sections - 1] = *section;
602 memory_region_ref(section->mr);
603 vhost_set_memory(listener, section, true);
606 static void vhost_region_del(MemoryListener *listener,
607 MemoryRegionSection *section)
609 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
610 memory_listener);
611 int i;
613 if (!vhost_section(section)) {
614 return;
617 vhost_set_memory(listener, section, false);
618 memory_region_unref(section->mr);
619 for (i = 0; i < dev->n_mem_sections; ++i) {
620 if (dev->mem_sections[i].offset_within_address_space
621 == section->offset_within_address_space) {
622 --dev->n_mem_sections;
623 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
624 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
625 break;
630 static void vhost_region_nop(MemoryListener *listener,
631 MemoryRegionSection *section)
635 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
636 struct vhost_virtqueue *vq,
637 unsigned idx, bool enable_log)
639 struct vhost_vring_addr addr = {
640 .index = idx,
641 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
642 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
643 .used_user_addr = (uint64_t)(unsigned long)vq->used,
644 .log_guest_addr = vq->used_phys,
645 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
647 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
648 if (r < 0) {
649 return -errno;
651 return 0;
654 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
656 uint64_t features = dev->acked_features;
657 int r;
658 if (enable_log) {
659 features |= 0x1ULL << VHOST_F_LOG_ALL;
661 r = dev->vhost_ops->vhost_set_features(dev, features);
662 return r < 0 ? -errno : 0;
665 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
667 int r, t, i, idx;
668 r = vhost_dev_set_features(dev, enable_log);
669 if (r < 0) {
670 goto err_features;
672 for (i = 0; i < dev->nvqs; ++i) {
673 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
674 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
675 enable_log);
676 if (r < 0) {
677 goto err_vq;
680 return 0;
681 err_vq:
682 for (; i >= 0; --i) {
683 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
684 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
685 dev->log_enabled);
686 assert(t >= 0);
688 t = vhost_dev_set_features(dev, dev->log_enabled);
689 assert(t >= 0);
690 err_features:
691 return r;
694 static int vhost_migration_log(MemoryListener *listener, int enable)
696 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
697 memory_listener);
698 int r;
699 if (!!enable == dev->log_enabled) {
700 return 0;
702 if (!dev->started) {
703 dev->log_enabled = enable;
704 return 0;
706 if (!enable) {
707 r = vhost_dev_set_log(dev, false);
708 if (r < 0) {
709 return r;
711 vhost_log_put(dev, false);
712 dev->log = NULL;
713 dev->log_size = 0;
714 } else {
715 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
716 r = vhost_dev_set_log(dev, true);
717 if (r < 0) {
718 return r;
721 dev->log_enabled = enable;
722 return 0;
725 static void vhost_log_global_start(MemoryListener *listener)
727 int r;
729 r = vhost_migration_log(listener, true);
730 if (r < 0) {
731 abort();
735 static void vhost_log_global_stop(MemoryListener *listener)
737 int r;
739 r = vhost_migration_log(listener, false);
740 if (r < 0) {
741 abort();
745 static void vhost_log_start(MemoryListener *listener,
746 MemoryRegionSection *section,
747 int old, int new)
749 /* FIXME: implement */
752 static void vhost_log_stop(MemoryListener *listener,
753 MemoryRegionSection *section,
754 int old, int new)
756 /* FIXME: implement */
759 /* The vhost driver natively knows how to handle the vrings of non
760 * cross-endian legacy devices and modern devices. Only legacy devices
761 * exposed to a bi-endian guest may require the vhost driver to use a
762 * specific endianness.
764 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
766 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
767 return false;
769 #ifdef TARGET_IS_BIENDIAN
770 #ifdef HOST_WORDS_BIGENDIAN
771 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
772 #else
773 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
774 #endif
775 #else
776 return false;
777 #endif
780 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
781 bool is_big_endian,
782 int vhost_vq_index)
784 struct vhost_vring_state s = {
785 .index = vhost_vq_index,
786 .num = is_big_endian
789 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
790 return 0;
793 if (errno == ENOTTY) {
794 error_report("vhost does not support cross-endian");
795 return -ENOSYS;
798 return -errno;
801 static int vhost_virtqueue_start(struct vhost_dev *dev,
802 struct VirtIODevice *vdev,
803 struct vhost_virtqueue *vq,
804 unsigned idx)
806 hwaddr s, l, a;
807 int r;
808 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
809 struct vhost_vring_file file = {
810 .index = vhost_vq_index
812 struct vhost_vring_state state = {
813 .index = vhost_vq_index
815 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
818 vq->num = state.num = virtio_queue_get_num(vdev, idx);
819 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
820 if (r) {
821 return -errno;
824 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
825 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
826 if (r) {
827 return -errno;
830 if (vhost_needs_vring_endian(vdev)) {
831 r = vhost_virtqueue_set_vring_endian_legacy(dev,
832 virtio_is_big_endian(vdev),
833 vhost_vq_index);
834 if (r) {
835 return -errno;
839 s = l = virtio_queue_get_desc_size(vdev, idx);
840 a = virtio_queue_get_desc_addr(vdev, idx);
841 vq->desc = cpu_physical_memory_map(a, &l, 0);
842 if (!vq->desc || l != s) {
843 r = -ENOMEM;
844 goto fail_alloc_desc;
846 s = l = virtio_queue_get_avail_size(vdev, idx);
847 a = virtio_queue_get_avail_addr(vdev, idx);
848 vq->avail = cpu_physical_memory_map(a, &l, 0);
849 if (!vq->avail || l != s) {
850 r = -ENOMEM;
851 goto fail_alloc_avail;
853 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
854 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
855 vq->used = cpu_physical_memory_map(a, &l, 1);
856 if (!vq->used || l != s) {
857 r = -ENOMEM;
858 goto fail_alloc_used;
861 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
862 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
863 vq->ring = cpu_physical_memory_map(a, &l, 1);
864 if (!vq->ring || l != s) {
865 r = -ENOMEM;
866 goto fail_alloc_ring;
869 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
870 if (r < 0) {
871 r = -errno;
872 goto fail_alloc;
875 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
876 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
877 if (r) {
878 r = -errno;
879 goto fail_kick;
882 /* Clear and discard previous events if any. */
883 event_notifier_test_and_clear(&vq->masked_notifier);
885 /* Init vring in unmasked state, unless guest_notifier_mask
886 * will do it later.
888 if (!vdev->use_guest_notifier_mask) {
889 /* TODO: check and handle errors. */
890 vhost_virtqueue_mask(dev, vdev, idx, false);
893 return 0;
895 fail_kick:
896 fail_alloc:
897 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
898 0, 0);
899 fail_alloc_ring:
900 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
901 0, 0);
902 fail_alloc_used:
903 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
904 0, 0);
905 fail_alloc_avail:
906 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
907 0, 0);
908 fail_alloc_desc:
909 return r;
912 static void vhost_virtqueue_stop(struct vhost_dev *dev,
913 struct VirtIODevice *vdev,
914 struct vhost_virtqueue *vq,
915 unsigned idx)
917 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
918 struct vhost_vring_state state = {
919 .index = vhost_vq_index,
921 int r;
923 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
924 if (r < 0) {
925 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
926 fflush(stderr);
928 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
929 virtio_queue_invalidate_signalled_used(vdev, idx);
931 /* In the cross-endian case, we need to reset the vring endianness to
932 * native as legacy devices expect so by default.
934 if (vhost_needs_vring_endian(vdev)) {
935 r = vhost_virtqueue_set_vring_endian_legacy(dev,
936 !virtio_is_big_endian(vdev),
937 vhost_vq_index);
938 if (r < 0) {
939 error_report("failed to reset vring endianness");
943 assert (r >= 0);
944 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
945 0, virtio_queue_get_ring_size(vdev, idx));
946 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
947 1, virtio_queue_get_used_size(vdev, idx));
948 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
949 0, virtio_queue_get_avail_size(vdev, idx));
950 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
951 0, virtio_queue_get_desc_size(vdev, idx));
954 static void vhost_eventfd_add(MemoryListener *listener,
955 MemoryRegionSection *section,
956 bool match_data, uint64_t data, EventNotifier *e)
960 static void vhost_eventfd_del(MemoryListener *listener,
961 MemoryRegionSection *section,
962 bool match_data, uint64_t data, EventNotifier *e)
966 static int vhost_virtqueue_init(struct vhost_dev *dev,
967 struct vhost_virtqueue *vq, int n)
969 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
970 struct vhost_vring_file file = {
971 .index = vhost_vq_index,
973 int r = event_notifier_init(&vq->masked_notifier, 0);
974 if (r < 0) {
975 return r;
978 file.fd = event_notifier_get_fd(&vq->masked_notifier);
979 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
980 if (r) {
981 r = -errno;
982 goto fail_call;
984 return 0;
985 fail_call:
986 event_notifier_cleanup(&vq->masked_notifier);
987 return r;
990 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
992 event_notifier_cleanup(&vq->masked_notifier);
995 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
996 VhostBackendType backend_type)
998 uint64_t features;
999 int i, r;
1001 hdev->migration_blocker = NULL;
1003 if (vhost_set_backend_type(hdev, backend_type) < 0) {
1004 close((uintptr_t)opaque);
1005 return -1;
1008 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
1009 close((uintptr_t)opaque);
1010 return -errno;
1013 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1014 fprintf(stderr, "vhost backend memory slots limit is less"
1015 " than current number of present memory slots\n");
1016 close((uintptr_t)opaque);
1017 return -1;
1019 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1021 r = hdev->vhost_ops->vhost_set_owner(hdev);
1022 if (r < 0) {
1023 goto fail;
1026 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1027 if (r < 0) {
1028 goto fail;
1031 for (i = 0; i < hdev->nvqs; ++i) {
1032 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1033 if (r < 0) {
1034 goto fail_vq;
1037 hdev->features = features;
1039 hdev->memory_listener = (MemoryListener) {
1040 .begin = vhost_begin,
1041 .commit = vhost_commit,
1042 .region_add = vhost_region_add,
1043 .region_del = vhost_region_del,
1044 .region_nop = vhost_region_nop,
1045 .log_start = vhost_log_start,
1046 .log_stop = vhost_log_stop,
1047 .log_sync = vhost_log_sync,
1048 .log_global_start = vhost_log_global_start,
1049 .log_global_stop = vhost_log_global_stop,
1050 .eventfd_add = vhost_eventfd_add,
1051 .eventfd_del = vhost_eventfd_del,
1052 .priority = 10
1055 if (hdev->migration_blocker == NULL) {
1056 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1057 error_setg(&hdev->migration_blocker,
1058 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1059 } else if (!qemu_memfd_check()) {
1060 error_setg(&hdev->migration_blocker,
1061 "Migration disabled: failed to allocate shared memory");
1065 if (hdev->migration_blocker != NULL) {
1066 migrate_add_blocker(hdev->migration_blocker);
1069 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1070 hdev->n_mem_sections = 0;
1071 hdev->mem_sections = NULL;
1072 hdev->log = NULL;
1073 hdev->log_size = 0;
1074 hdev->log_enabled = false;
1075 hdev->started = false;
1076 hdev->memory_changed = false;
1077 memory_listener_register(&hdev->memory_listener, &address_space_memory);
1078 return 0;
1079 fail_vq:
1080 while (--i >= 0) {
1081 vhost_virtqueue_cleanup(hdev->vqs + i);
1083 fail:
1084 r = -errno;
1085 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1086 QLIST_REMOVE(hdev, entry);
1087 return r;
1090 void vhost_dev_cleanup(struct vhost_dev *hdev)
1092 int i;
1093 for (i = 0; i < hdev->nvqs; ++i) {
1094 vhost_virtqueue_cleanup(hdev->vqs + i);
1096 memory_listener_unregister(&hdev->memory_listener);
1097 if (hdev->migration_blocker) {
1098 migrate_del_blocker(hdev->migration_blocker);
1099 error_free(hdev->migration_blocker);
1101 g_free(hdev->mem);
1102 g_free(hdev->mem_sections);
1103 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1104 QLIST_REMOVE(hdev, entry);
1107 /* Stop processing guest IO notifications in qemu.
1108 * Start processing them in vhost in kernel.
1110 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1112 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1113 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1114 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1115 int i, r, e;
1116 if (!k->set_host_notifier) {
1117 fprintf(stderr, "binding does not support host notifiers\n");
1118 r = -ENOSYS;
1119 goto fail;
1122 for (i = 0; i < hdev->nvqs; ++i) {
1123 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
1124 if (r < 0) {
1125 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1126 goto fail_vq;
1130 return 0;
1131 fail_vq:
1132 while (--i >= 0) {
1133 e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1134 if (e < 0) {
1135 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1136 fflush(stderr);
1138 assert (e >= 0);
1140 fail:
1141 return r;
1144 /* Stop processing guest IO notifications in vhost.
1145 * Start processing them in qemu.
1146 * This might actually run the qemu handlers right away,
1147 * so virtio in qemu must be completely setup when this is called.
1149 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1151 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1152 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1153 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1154 int i, r;
1156 for (i = 0; i < hdev->nvqs; ++i) {
1157 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1158 if (r < 0) {
1159 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1160 fflush(stderr);
1162 assert (r >= 0);
1166 /* Test and clear event pending status.
1167 * Should be called after unmask to avoid losing events.
1169 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1171 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1172 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1173 return event_notifier_test_and_clear(&vq->masked_notifier);
1176 /* Mask/unmask events from this vq. */
1177 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1178 bool mask)
1180 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1181 int r, index = n - hdev->vq_index;
1182 struct vhost_vring_file file;
1184 if (mask) {
1185 assert(vdev->use_guest_notifier_mask);
1186 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1187 } else {
1188 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1191 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1192 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1193 assert(r >= 0);
1196 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1197 uint64_t features)
1199 const int *bit = feature_bits;
1200 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1201 uint64_t bit_mask = (1ULL << *bit);
1202 if (!(hdev->features & bit_mask)) {
1203 features &= ~bit_mask;
1205 bit++;
1207 return features;
1210 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1211 uint64_t features)
1213 const int *bit = feature_bits;
1214 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1215 uint64_t bit_mask = (1ULL << *bit);
1216 if (features & bit_mask) {
1217 hdev->acked_features |= bit_mask;
1219 bit++;
1223 /* Host notifiers must be enabled at this point. */
1224 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1226 int i, r;
1228 hdev->started = true;
1230 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1231 if (r < 0) {
1232 goto fail_features;
1234 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1235 if (r < 0) {
1236 r = -errno;
1237 goto fail_mem;
1239 for (i = 0; i < hdev->nvqs; ++i) {
1240 r = vhost_virtqueue_start(hdev,
1241 vdev,
1242 hdev->vqs + i,
1243 hdev->vq_index + i);
1244 if (r < 0) {
1245 goto fail_vq;
1249 if (hdev->log_enabled) {
1250 uint64_t log_base;
1252 hdev->log_size = vhost_get_log_size(hdev);
1253 hdev->log = vhost_log_get(hdev->log_size,
1254 vhost_dev_log_is_shared(hdev));
1255 log_base = (uintptr_t)hdev->log->log;
1256 r = hdev->vhost_ops->vhost_set_log_base(hdev,
1257 hdev->log_size ? log_base : 0,
1258 hdev->log);
1259 if (r < 0) {
1260 r = -errno;
1261 goto fail_log;
1265 return 0;
1266 fail_log:
1267 vhost_log_put(hdev, false);
1268 fail_vq:
1269 while (--i >= 0) {
1270 vhost_virtqueue_stop(hdev,
1271 vdev,
1272 hdev->vqs + i,
1273 hdev->vq_index + i);
1275 i = hdev->nvqs;
1276 fail_mem:
1277 fail_features:
1279 hdev->started = false;
1280 return r;
1283 /* Host notifiers must be enabled at this point. */
1284 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1286 int i;
1288 for (i = 0; i < hdev->nvqs; ++i) {
1289 vhost_virtqueue_stop(hdev,
1290 vdev,
1291 hdev->vqs + i,
1292 hdev->vq_index + i);
1295 vhost_log_put(hdev, true);
1296 hdev->started = false;
1297 hdev->log = NULL;
1298 hdev->log_size = 0;