vhost: Simplify ring verification checks
[qemu/ar7.git] / hw / virtio / vhost.c
blob91cab5131c7a4546933b2d206093d7aca429e74e
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37 strerror(errno), errno); } while (0)
38 #else
39 #define VHOST_OPS_DEBUG(fmt, ...) \
40 do { } while (0)
41 #endif
43 static struct vhost_log *vhost_log;
44 static struct vhost_log *vhost_log_shm;
46 static unsigned int used_memslots;
47 static QLIST_HEAD(, vhost_dev) vhost_devices =
48 QLIST_HEAD_INITIALIZER(vhost_devices);
50 bool vhost_has_free_slot(void)
52 unsigned int slots_limit = ~0U;
53 struct vhost_dev *hdev;
55 QLIST_FOREACH(hdev, &vhost_devices, entry) {
56 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57 slots_limit = MIN(slots_limit, r);
59 return slots_limit > used_memslots;
62 static void vhost_dev_sync_region(struct vhost_dev *dev,
63 MemoryRegionSection *section,
64 uint64_t mfirst, uint64_t mlast,
65 uint64_t rfirst, uint64_t rlast)
67 vhost_log_chunk_t *log = dev->log->log;
69 uint64_t start = MAX(mfirst, rfirst);
70 uint64_t end = MIN(mlast, rlast);
71 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
73 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
75 if (end < start) {
76 return;
78 assert(end / VHOST_LOG_CHUNK < dev->log_size);
79 assert(start / VHOST_LOG_CHUNK < dev->log_size);
81 for (;from < to; ++from) {
82 vhost_log_chunk_t log;
83 /* We first check with non-atomic: much cheaper,
84 * and we expect non-dirty to be the common case. */
85 if (!*from) {
86 addr += VHOST_LOG_CHUNK;
87 continue;
89 /* Data must be read atomically. We don't really need barrier semantics
90 * but it's easier to use atomic_* than roll our own. */
91 log = atomic_xchg(from, 0);
92 while (log) {
93 int bit = ctzl(log);
94 hwaddr page_addr;
95 hwaddr section_offset;
96 hwaddr mr_offset;
97 page_addr = addr + bit * VHOST_LOG_PAGE;
98 section_offset = page_addr - section->offset_within_address_space;
99 mr_offset = section_offset + section->offset_within_region;
100 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
101 log &= ~(0x1ull << bit);
103 addr += VHOST_LOG_CHUNK;
107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108 MemoryRegionSection *section,
109 hwaddr first,
110 hwaddr last)
112 int i;
113 hwaddr start_addr;
114 hwaddr end_addr;
116 if (!dev->log_enabled || !dev->started) {
117 return 0;
119 start_addr = section->offset_within_address_space;
120 end_addr = range_get_last(start_addr, int128_get64(section->size));
121 start_addr = MAX(first, start_addr);
122 end_addr = MIN(last, end_addr);
124 for (i = 0; i < dev->mem->nregions; ++i) {
125 struct vhost_memory_region *reg = dev->mem->regions + i;
126 vhost_dev_sync_region(dev, section, start_addr, end_addr,
127 reg->guest_phys_addr,
128 range_get_last(reg->guest_phys_addr,
129 reg->memory_size));
131 for (i = 0; i < dev->nvqs; ++i) {
132 struct vhost_virtqueue *vq = dev->vqs + i;
133 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
134 range_get_last(vq->used_phys, vq->used_size));
136 return 0;
139 static void vhost_log_sync(MemoryListener *listener,
140 MemoryRegionSection *section)
142 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143 memory_listener);
144 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
147 static void vhost_log_sync_range(struct vhost_dev *dev,
148 hwaddr first, hwaddr last)
150 int i;
151 /* FIXME: this is N^2 in number of sections */
152 for (i = 0; i < dev->n_mem_sections; ++i) {
153 MemoryRegionSection *section = &dev->mem_sections[i];
154 vhost_sync_dirty_bitmap(dev, section, first, last);
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159 * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161 uint64_t start_addr,
162 uint64_t size)
164 int from, to, n = dev->mem->nregions;
165 /* Track overlapping/split regions for sanity checking. */
166 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
168 for (from = 0, to = 0; from < n; ++from, ++to) {
169 struct vhost_memory_region *reg = dev->mem->regions + to;
170 uint64_t reglast;
171 uint64_t memlast;
172 uint64_t change;
174 /* clone old region */
175 if (to != from) {
176 memcpy(reg, dev->mem->regions + from, sizeof *reg);
179 /* No overlap is simple */
180 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181 start_addr, size)) {
182 continue;
185 /* Split only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
188 assert(!split);
190 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191 memlast = range_get_last(start_addr, size);
193 /* Remove whole region */
194 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195 --dev->mem->nregions;
196 --to;
197 ++overlap_middle;
198 continue;
201 /* Shrink region */
202 if (memlast >= reglast) {
203 reg->memory_size = start_addr - reg->guest_phys_addr;
204 assert(reg->memory_size);
205 assert(!overlap_end);
206 ++overlap_end;
207 continue;
210 /* Shift region */
211 if (start_addr <= reg->guest_phys_addr) {
212 change = memlast + 1 - reg->guest_phys_addr;
213 reg->memory_size -= change;
214 reg->guest_phys_addr += change;
215 reg->userspace_addr += change;
216 assert(reg->memory_size);
217 assert(!overlap_start);
218 ++overlap_start;
219 continue;
222 /* This only happens if supplied region
223 * is in the middle of an existing one. Thus it can not
224 * overlap with any other existing region. */
225 assert(!overlap_start);
226 assert(!overlap_end);
227 assert(!overlap_middle);
228 /* Split region: shrink first part, shift second part. */
229 memcpy(dev->mem->regions + n, reg, sizeof *reg);
230 reg->memory_size = start_addr - reg->guest_phys_addr;
231 assert(reg->memory_size);
232 change = memlast + 1 - reg->guest_phys_addr;
233 reg = dev->mem->regions + n;
234 reg->memory_size -= change;
235 assert(reg->memory_size);
236 reg->guest_phys_addr += change;
237 reg->userspace_addr += change;
238 /* Never add more than 1 region */
239 assert(dev->mem->nregions == n);
240 ++dev->mem->nregions;
241 ++split;
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev *dev,
247 uint64_t start_addr,
248 uint64_t size,
249 uint64_t uaddr)
251 int from, to;
252 struct vhost_memory_region *merged = NULL;
253 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254 struct vhost_memory_region *reg = dev->mem->regions + to;
255 uint64_t prlast, urlast;
256 uint64_t pmlast, umlast;
257 uint64_t s, e, u;
259 /* clone old region */
260 if (to != from) {
261 memcpy(reg, dev->mem->regions + from, sizeof *reg);
263 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264 pmlast = range_get_last(start_addr, size);
265 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266 umlast = range_get_last(uaddr, size);
268 /* check for overlapping regions: should never happen. */
269 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270 /* Not an adjacent or overlapping region - do not merge. */
271 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272 (pmlast + 1 != reg->guest_phys_addr ||
273 umlast + 1 != reg->userspace_addr)) {
274 continue;
277 if (dev->vhost_ops->vhost_backend_can_merge &&
278 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279 reg->userspace_addr,
280 reg->memory_size)) {
281 continue;
284 if (merged) {
285 --to;
286 assert(to >= 0);
287 } else {
288 merged = reg;
290 u = MIN(uaddr, reg->userspace_addr);
291 s = MIN(start_addr, reg->guest_phys_addr);
292 e = MAX(pmlast, prlast);
293 uaddr = merged->userspace_addr = u;
294 start_addr = merged->guest_phys_addr = s;
295 size = merged->memory_size = e - s + 1;
296 assert(merged->memory_size);
299 if (!merged) {
300 struct vhost_memory_region *reg = dev->mem->regions + to;
301 memset(reg, 0, sizeof *reg);
302 reg->memory_size = size;
303 assert(reg->memory_size);
304 reg->guest_phys_addr = start_addr;
305 reg->userspace_addr = uaddr;
306 ++to;
308 assert(to <= dev->mem->nregions + 1);
309 dev->mem->nregions = to;
312 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
314 uint64_t log_size = 0;
315 int i;
316 for (i = 0; i < dev->mem->nregions; ++i) {
317 struct vhost_memory_region *reg = dev->mem->regions + i;
318 uint64_t last = range_get_last(reg->guest_phys_addr,
319 reg->memory_size);
320 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
322 for (i = 0; i < dev->nvqs; ++i) {
323 struct vhost_virtqueue *vq = dev->vqs + i;
324 uint64_t last = vq->used_phys + vq->used_size - 1;
325 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
327 return log_size;
330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
332 Error *err = NULL;
333 struct vhost_log *log;
334 uint64_t logsize = size * sizeof(*(log->log));
335 int fd = -1;
337 log = g_new0(struct vhost_log, 1);
338 if (share) {
339 log->log = qemu_memfd_alloc("vhost-log", logsize,
340 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
341 &fd, &err);
342 if (err) {
343 error_report_err(err);
344 g_free(log);
345 return NULL;
347 memset(log->log, 0, logsize);
348 } else {
349 log->log = g_malloc0(logsize);
352 log->size = size;
353 log->refcnt = 1;
354 log->fd = fd;
356 return log;
359 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
361 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
363 if (!log || log->size != size) {
364 log = vhost_log_alloc(size, share);
365 if (share) {
366 vhost_log_shm = log;
367 } else {
368 vhost_log = log;
370 } else {
371 ++log->refcnt;
374 return log;
377 static void vhost_log_put(struct vhost_dev *dev, bool sync)
379 struct vhost_log *log = dev->log;
381 if (!log) {
382 return;
385 --log->refcnt;
386 if (log->refcnt == 0) {
387 /* Sync only the range covered by the old log */
388 if (dev->log_size && sync) {
389 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
392 if (vhost_log == log) {
393 g_free(log->log);
394 vhost_log = NULL;
395 } else if (vhost_log_shm == log) {
396 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
397 log->fd);
398 vhost_log_shm = NULL;
401 g_free(log);
404 dev->log = NULL;
405 dev->log_size = 0;
408 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
410 return dev->vhost_ops->vhost_requires_shm_log &&
411 dev->vhost_ops->vhost_requires_shm_log(dev);
414 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
416 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
417 uint64_t log_base = (uintptr_t)log->log;
418 int r;
420 /* inform backend of log switching, this must be done before
421 releasing the current log, to ensure no logging is lost */
422 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
423 if (r < 0) {
424 VHOST_OPS_DEBUG("vhost_set_log_base failed");
427 vhost_log_put(dev, true);
428 dev->log = log;
429 dev->log_size = size;
432 static int vhost_dev_has_iommu(struct vhost_dev *dev)
434 VirtIODevice *vdev = dev->vdev;
436 return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
439 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
440 hwaddr *plen, int is_write)
442 if (!vhost_dev_has_iommu(dev)) {
443 return cpu_physical_memory_map(addr, plen, is_write);
444 } else {
445 return (void *)(uintptr_t)addr;
449 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
450 hwaddr len, int is_write,
451 hwaddr access_len)
453 if (!vhost_dev_has_iommu(dev)) {
454 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
458 static int vhost_verify_ring_part_mapping(void *ring_hva,
459 uint64_t ring_gpa,
460 uint64_t ring_size,
461 void *reg_hva,
462 uint64_t reg_gpa,
463 uint64_t reg_size)
465 uint64_t hva_ring_offset;
466 uint64_t ring_last = range_get_last(ring_gpa, ring_size);
467 uint64_t reg_last = range_get_last(reg_gpa, reg_size);
469 if (ring_last < reg_gpa || ring_gpa > reg_last) {
470 return 0;
472 /* check that whole ring's is mapped */
473 if (ring_last > reg_last) {
474 return -ENOMEM;
476 /* check that ring's MemoryRegion wasn't replaced */
477 hva_ring_offset = ring_gpa - reg_gpa;
478 if (ring_hva != reg_hva + hva_ring_offset) {
479 return -EBUSY;
482 return 0;
485 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
486 void *reg_hva,
487 uint64_t reg_gpa,
488 uint64_t reg_size)
490 int i, j;
491 int r = 0;
492 const char *part_name[] = {
493 "descriptor table",
494 "available ring",
495 "used ring"
498 for (i = 0; i < dev->nvqs; ++i) {
499 struct vhost_virtqueue *vq = dev->vqs + i;
501 j = 0;
502 r = vhost_verify_ring_part_mapping(
503 vq->desc, vq->desc_phys, vq->desc_size,
504 reg_hva, reg_gpa, reg_size);
505 if (r) {
506 break;
509 j++;
510 r = vhost_verify_ring_part_mapping(
511 vq->desc, vq->desc_phys, vq->desc_size,
512 reg_hva, reg_gpa, reg_size);
513 if (r) {
514 break;
517 j++;
518 r = vhost_verify_ring_part_mapping(
519 vq->desc, vq->desc_phys, vq->desc_size,
520 reg_hva, reg_gpa, reg_size);
521 if (r) {
522 break;
526 if (r == -ENOMEM) {
527 error_report("Unable to map %s for ring %d", part_name[j], i);
528 } else if (r == -EBUSY) {
529 error_report("%s relocated for ring %d", part_name[j], i);
531 return r;
534 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
535 uint64_t start_addr,
536 uint64_t size)
538 int i, n = dev->mem->nregions;
539 for (i = 0; i < n; ++i) {
540 struct vhost_memory_region *reg = dev->mem->regions + i;
541 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
542 start_addr, size)) {
543 return reg;
546 return NULL;
549 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
550 uint64_t start_addr,
551 uint64_t size,
552 uint64_t uaddr)
554 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
555 uint64_t reglast;
556 uint64_t memlast;
558 if (!reg) {
559 return true;
562 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
563 memlast = range_get_last(start_addr, size);
565 /* Need to extend region? */
566 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
567 return true;
569 /* userspace_addr changed? */
570 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
573 static void vhost_set_memory(MemoryListener *listener,
574 MemoryRegionSection *section,
575 bool add)
577 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
578 memory_listener);
579 hwaddr start_addr = section->offset_within_address_space;
580 ram_addr_t size = int128_get64(section->size);
581 bool log_dirty =
582 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
583 int s = offsetof(struct vhost_memory, regions) +
584 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
585 void *ram;
587 dev->mem = g_realloc(dev->mem, s);
589 if (log_dirty) {
590 add = false;
593 assert(size);
595 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
596 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
597 if (add) {
598 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
599 /* Region exists with same address. Nothing to do. */
600 return;
602 } else {
603 if (!vhost_dev_find_reg(dev, start_addr, size)) {
604 /* Removing region that we don't access. Nothing to do. */
605 return;
609 vhost_dev_unassign_memory(dev, start_addr, size);
610 if (add) {
611 /* Add given mapping, merging adjacent regions if any */
612 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
613 } else {
614 /* Remove old mapping for this memory, if any. */
615 vhost_dev_unassign_memory(dev, start_addr, size);
617 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
618 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
619 dev->memory_changed = true;
620 used_memslots = dev->mem->nregions;
623 static bool vhost_section(MemoryRegionSection *section)
625 return memory_region_is_ram(section->mr) &&
626 !memory_region_is_rom(section->mr);
629 static void vhost_begin(MemoryListener *listener)
631 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
632 memory_listener);
633 dev->mem_changed_end_addr = 0;
634 dev->mem_changed_start_addr = -1;
635 dev->tmp_sections = NULL;
636 dev->n_tmp_sections = 0;
639 static void vhost_commit(MemoryListener *listener)
641 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
642 memory_listener);
643 MemoryRegionSection *old_sections;
644 int n_old_sections;
645 uint64_t log_size;
646 int r;
647 int i;
649 old_sections = dev->mem_sections;
650 n_old_sections = dev->n_mem_sections;
651 dev->mem_sections = dev->tmp_sections;
652 dev->n_mem_sections = dev->n_tmp_sections;
654 if (!dev->memory_changed) {
655 goto out;
657 if (!dev->started) {
658 goto out;
660 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
661 goto out;
664 for (i = 0; i < dev->mem->nregions; i++) {
665 if (vhost_verify_ring_mappings(dev,
666 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
667 dev->mem->regions[i].guest_phys_addr,
668 dev->mem->regions[i].memory_size)) {
669 error_report("Verify ring failure on region %d", i);
670 abort();
674 if (!dev->log_enabled) {
675 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
676 if (r < 0) {
677 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
679 dev->memory_changed = false;
680 goto out;
682 log_size = vhost_get_log_size(dev);
683 /* We allocate an extra 4K bytes to log,
684 * to reduce the * number of reallocations. */
685 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
686 /* To log more, must increase log size before table update. */
687 if (dev->log_size < log_size) {
688 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
690 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
691 if (r < 0) {
692 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
694 /* To log less, can only decrease log size after table update. */
695 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
696 vhost_dev_log_resize(dev, log_size);
698 dev->memory_changed = false;
700 out:
701 /* Deref the old list of sections, this must happen _after_ the
702 * vhost_set_mem_table to ensure the client isn't still using the
703 * section we're about to unref.
705 while (n_old_sections--) {
706 memory_region_unref(old_sections[n_old_sections].mr);
708 g_free(old_sections);
709 return;
712 static void vhost_add_section(struct vhost_dev *dev,
713 MemoryRegionSection *section)
715 ++dev->n_tmp_sections;
716 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
717 dev->n_tmp_sections);
718 dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
719 memory_region_ref(section->mr);
722 static void vhost_region_add(MemoryListener *listener,
723 MemoryRegionSection *section)
725 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
726 memory_listener);
728 if (!vhost_section(section)) {
729 return;
732 vhost_add_section(dev, section);
733 vhost_set_memory(listener, section, true);
736 static void vhost_region_nop(MemoryListener *listener,
737 MemoryRegionSection *section)
739 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
740 memory_listener);
742 if (!vhost_section(section)) {
743 return;
746 vhost_add_section(dev, section);
749 static void vhost_region_del(MemoryListener *listener,
750 MemoryRegionSection *section)
752 if (!vhost_section(section)) {
753 return;
756 vhost_set_memory(listener, section, false);
759 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
761 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
762 struct vhost_dev *hdev = iommu->hdev;
763 hwaddr iova = iotlb->iova + iommu->iommu_offset;
765 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
766 iotlb->addr_mask + 1)) {
767 error_report("Fail to invalidate device iotlb");
771 static void vhost_iommu_region_add(MemoryListener *listener,
772 MemoryRegionSection *section)
774 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
775 iommu_listener);
776 struct vhost_iommu *iommu;
777 Int128 end;
779 if (!memory_region_is_iommu(section->mr)) {
780 return;
783 iommu = g_malloc0(sizeof(*iommu));
784 end = int128_add(int128_make64(section->offset_within_region),
785 section->size);
786 end = int128_sub(end, int128_one());
787 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
788 IOMMU_NOTIFIER_UNMAP,
789 section->offset_within_region,
790 int128_get64(end));
791 iommu->mr = section->mr;
792 iommu->iommu_offset = section->offset_within_address_space -
793 section->offset_within_region;
794 iommu->hdev = dev;
795 memory_region_register_iommu_notifier(section->mr, &iommu->n);
796 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
797 /* TODO: can replay help performance here? */
800 static void vhost_iommu_region_del(MemoryListener *listener,
801 MemoryRegionSection *section)
803 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
804 iommu_listener);
805 struct vhost_iommu *iommu;
807 if (!memory_region_is_iommu(section->mr)) {
808 return;
811 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
812 if (iommu->mr == section->mr &&
813 iommu->n.start == section->offset_within_region) {
814 memory_region_unregister_iommu_notifier(iommu->mr,
815 &iommu->n);
816 QLIST_REMOVE(iommu, iommu_next);
817 g_free(iommu);
818 break;
823 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
824 struct vhost_virtqueue *vq,
825 unsigned idx, bool enable_log)
827 struct vhost_vring_addr addr = {
828 .index = idx,
829 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
830 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
831 .used_user_addr = (uint64_t)(unsigned long)vq->used,
832 .log_guest_addr = vq->used_phys,
833 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
835 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
836 if (r < 0) {
837 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
838 return -errno;
840 return 0;
843 static int vhost_dev_set_features(struct vhost_dev *dev,
844 bool enable_log)
846 uint64_t features = dev->acked_features;
847 int r;
848 if (enable_log) {
849 features |= 0x1ULL << VHOST_F_LOG_ALL;
851 r = dev->vhost_ops->vhost_set_features(dev, features);
852 if (r < 0) {
853 VHOST_OPS_DEBUG("vhost_set_features failed");
855 return r < 0 ? -errno : 0;
858 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
860 int r, i, idx;
861 r = vhost_dev_set_features(dev, enable_log);
862 if (r < 0) {
863 goto err_features;
865 for (i = 0; i < dev->nvqs; ++i) {
866 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
867 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
868 enable_log);
869 if (r < 0) {
870 goto err_vq;
873 return 0;
874 err_vq:
875 for (; i >= 0; --i) {
876 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
877 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
878 dev->log_enabled);
880 vhost_dev_set_features(dev, dev->log_enabled);
881 err_features:
882 return r;
885 static int vhost_migration_log(MemoryListener *listener, int enable)
887 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
888 memory_listener);
889 int r;
890 if (!!enable == dev->log_enabled) {
891 return 0;
893 if (!dev->started) {
894 dev->log_enabled = enable;
895 return 0;
897 if (!enable) {
898 r = vhost_dev_set_log(dev, false);
899 if (r < 0) {
900 return r;
902 vhost_log_put(dev, false);
903 } else {
904 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
905 r = vhost_dev_set_log(dev, true);
906 if (r < 0) {
907 return r;
910 dev->log_enabled = enable;
911 return 0;
914 static void vhost_log_global_start(MemoryListener *listener)
916 int r;
918 r = vhost_migration_log(listener, true);
919 if (r < 0) {
920 abort();
924 static void vhost_log_global_stop(MemoryListener *listener)
926 int r;
928 r = vhost_migration_log(listener, false);
929 if (r < 0) {
930 abort();
934 static void vhost_log_start(MemoryListener *listener,
935 MemoryRegionSection *section,
936 int old, int new)
938 /* FIXME: implement */
941 static void vhost_log_stop(MemoryListener *listener,
942 MemoryRegionSection *section,
943 int old, int new)
945 /* FIXME: implement */
948 /* The vhost driver natively knows how to handle the vrings of non
949 * cross-endian legacy devices and modern devices. Only legacy devices
950 * exposed to a bi-endian guest may require the vhost driver to use a
951 * specific endianness.
953 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
955 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
956 return false;
958 #ifdef HOST_WORDS_BIGENDIAN
959 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
960 #else
961 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
962 #endif
965 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
966 bool is_big_endian,
967 int vhost_vq_index)
969 struct vhost_vring_state s = {
970 .index = vhost_vq_index,
971 .num = is_big_endian
974 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
975 return 0;
978 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
979 if (errno == ENOTTY) {
980 error_report("vhost does not support cross-endian");
981 return -ENOSYS;
984 return -errno;
987 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
988 uint64_t gpa, uint64_t *uaddr,
989 uint64_t *len)
991 int i;
993 for (i = 0; i < hdev->mem->nregions; i++) {
994 struct vhost_memory_region *reg = hdev->mem->regions + i;
996 if (gpa >= reg->guest_phys_addr &&
997 reg->guest_phys_addr + reg->memory_size > gpa) {
998 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
999 *len = reg->guest_phys_addr + reg->memory_size - gpa;
1000 return 0;
1004 return -EFAULT;
1007 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
1009 IOMMUTLBEntry iotlb;
1010 uint64_t uaddr, len;
1011 int ret = -EFAULT;
1013 rcu_read_lock();
1015 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
1016 iova, write);
1017 if (iotlb.target_as != NULL) {
1018 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
1019 &uaddr, &len);
1020 if (ret) {
1021 error_report("Fail to lookup the translated address "
1022 "%"PRIx64, iotlb.translated_addr);
1023 goto out;
1026 len = MIN(iotlb.addr_mask + 1, len);
1027 iova = iova & ~iotlb.addr_mask;
1029 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1030 len, iotlb.perm);
1031 if (ret) {
1032 error_report("Fail to update device iotlb");
1033 goto out;
1036 out:
1037 rcu_read_unlock();
1039 return ret;
1042 static int vhost_virtqueue_start(struct vhost_dev *dev,
1043 struct VirtIODevice *vdev,
1044 struct vhost_virtqueue *vq,
1045 unsigned idx)
1047 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1048 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1049 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1050 hwaddr s, l, a;
1051 int r;
1052 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1053 struct vhost_vring_file file = {
1054 .index = vhost_vq_index
1056 struct vhost_vring_state state = {
1057 .index = vhost_vq_index
1059 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1062 vq->num = state.num = virtio_queue_get_num(vdev, idx);
1063 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1064 if (r) {
1065 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1066 return -errno;
1069 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1070 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1071 if (r) {
1072 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1073 return -errno;
1076 if (vhost_needs_vring_endian(vdev)) {
1077 r = vhost_virtqueue_set_vring_endian_legacy(dev,
1078 virtio_is_big_endian(vdev),
1079 vhost_vq_index);
1080 if (r) {
1081 return -errno;
1085 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1086 vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
1087 vq->desc = vhost_memory_map(dev, a, &l, 0);
1088 if (!vq->desc || l != s) {
1089 r = -ENOMEM;
1090 goto fail_alloc_desc;
1092 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1093 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1094 vq->avail = vhost_memory_map(dev, a, &l, 0);
1095 if (!vq->avail || l != s) {
1096 r = -ENOMEM;
1097 goto fail_alloc_avail;
1099 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1100 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1101 vq->used = vhost_memory_map(dev, a, &l, 1);
1102 if (!vq->used || l != s) {
1103 r = -ENOMEM;
1104 goto fail_alloc_used;
1107 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1108 if (r < 0) {
1109 r = -errno;
1110 goto fail_alloc;
1113 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1114 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1115 if (r) {
1116 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1117 r = -errno;
1118 goto fail_kick;
1121 /* Clear and discard previous events if any. */
1122 event_notifier_test_and_clear(&vq->masked_notifier);
1124 /* Init vring in unmasked state, unless guest_notifier_mask
1125 * will do it later.
1127 if (!vdev->use_guest_notifier_mask) {
1128 /* TODO: check and handle errors. */
1129 vhost_virtqueue_mask(dev, vdev, idx, false);
1132 if (k->query_guest_notifiers &&
1133 k->query_guest_notifiers(qbus->parent) &&
1134 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1135 file.fd = -1;
1136 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1137 if (r) {
1138 goto fail_vector;
1142 return 0;
1144 fail_vector:
1145 fail_kick:
1146 fail_alloc:
1147 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1148 0, 0);
1149 fail_alloc_used:
1150 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1151 0, 0);
1152 fail_alloc_avail:
1153 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1154 0, 0);
1155 fail_alloc_desc:
1156 return r;
1159 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1160 struct VirtIODevice *vdev,
1161 struct vhost_virtqueue *vq,
1162 unsigned idx)
1164 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1165 struct vhost_vring_state state = {
1166 .index = vhost_vq_index,
1168 int r;
1170 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1171 if (r < 0) {
1172 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1173 /* Connection to the backend is broken, so let's sync internal
1174 * last avail idx to the device used idx.
1176 virtio_queue_restore_last_avail_idx(vdev, idx);
1177 } else {
1178 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1180 virtio_queue_invalidate_signalled_used(vdev, idx);
1181 virtio_queue_update_used_idx(vdev, idx);
1183 /* In the cross-endian case, we need to reset the vring endianness to
1184 * native as legacy devices expect so by default.
1186 if (vhost_needs_vring_endian(vdev)) {
1187 vhost_virtqueue_set_vring_endian_legacy(dev,
1188 !virtio_is_big_endian(vdev),
1189 vhost_vq_index);
1192 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1193 1, virtio_queue_get_used_size(vdev, idx));
1194 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1195 0, virtio_queue_get_avail_size(vdev, idx));
1196 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1197 0, virtio_queue_get_desc_size(vdev, idx));
1200 static void vhost_eventfd_add(MemoryListener *listener,
1201 MemoryRegionSection *section,
1202 bool match_data, uint64_t data, EventNotifier *e)
1206 static void vhost_eventfd_del(MemoryListener *listener,
1207 MemoryRegionSection *section,
1208 bool match_data, uint64_t data, EventNotifier *e)
1212 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1213 int n, uint32_t timeout)
1215 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1216 struct vhost_vring_state state = {
1217 .index = vhost_vq_index,
1218 .num = timeout,
1220 int r;
1222 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1223 return -EINVAL;
1226 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1227 if (r) {
1228 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1229 return r;
1232 return 0;
1235 static int vhost_virtqueue_init(struct vhost_dev *dev,
1236 struct vhost_virtqueue *vq, int n)
1238 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1239 struct vhost_vring_file file = {
1240 .index = vhost_vq_index,
1242 int r = event_notifier_init(&vq->masked_notifier, 0);
1243 if (r < 0) {
1244 return r;
1247 file.fd = event_notifier_get_fd(&vq->masked_notifier);
1248 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1249 if (r) {
1250 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1251 r = -errno;
1252 goto fail_call;
1255 vq->dev = dev;
1257 return 0;
1258 fail_call:
1259 event_notifier_cleanup(&vq->masked_notifier);
1260 return r;
1263 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1265 event_notifier_cleanup(&vq->masked_notifier);
1268 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1269 VhostBackendType backend_type, uint32_t busyloop_timeout)
1271 uint64_t features;
1272 int i, r, n_initialized_vqs = 0;
1273 Error *local_err = NULL;
1275 hdev->vdev = NULL;
1276 hdev->migration_blocker = NULL;
1278 r = vhost_set_backend_type(hdev, backend_type);
1279 assert(r >= 0);
1281 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1282 if (r < 0) {
1283 goto fail;
1286 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1287 error_report("vhost backend memory slots limit is less"
1288 " than current number of present memory slots");
1289 r = -1;
1290 goto fail;
1293 r = hdev->vhost_ops->vhost_set_owner(hdev);
1294 if (r < 0) {
1295 VHOST_OPS_DEBUG("vhost_set_owner failed");
1296 goto fail;
1299 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1300 if (r < 0) {
1301 VHOST_OPS_DEBUG("vhost_get_features failed");
1302 goto fail;
1305 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1306 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1307 if (r < 0) {
1308 goto fail;
1312 if (busyloop_timeout) {
1313 for (i = 0; i < hdev->nvqs; ++i) {
1314 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1315 busyloop_timeout);
1316 if (r < 0) {
1317 goto fail_busyloop;
1322 hdev->features = features;
1324 hdev->memory_listener = (MemoryListener) {
1325 .begin = vhost_begin,
1326 .commit = vhost_commit,
1327 .region_add = vhost_region_add,
1328 .region_del = vhost_region_del,
1329 .region_nop = vhost_region_nop,
1330 .log_start = vhost_log_start,
1331 .log_stop = vhost_log_stop,
1332 .log_sync = vhost_log_sync,
1333 .log_global_start = vhost_log_global_start,
1334 .log_global_stop = vhost_log_global_stop,
1335 .eventfd_add = vhost_eventfd_add,
1336 .eventfd_del = vhost_eventfd_del,
1337 .priority = 10
1340 hdev->iommu_listener = (MemoryListener) {
1341 .region_add = vhost_iommu_region_add,
1342 .region_del = vhost_iommu_region_del,
1345 if (hdev->migration_blocker == NULL) {
1346 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1347 error_setg(&hdev->migration_blocker,
1348 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1349 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1350 error_setg(&hdev->migration_blocker,
1351 "Migration disabled: failed to allocate shared memory");
1355 if (hdev->migration_blocker != NULL) {
1356 r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1357 if (local_err) {
1358 error_report_err(local_err);
1359 error_free(hdev->migration_blocker);
1360 goto fail_busyloop;
1364 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1365 hdev->n_mem_sections = 0;
1366 hdev->mem_sections = NULL;
1367 hdev->log = NULL;
1368 hdev->log_size = 0;
1369 hdev->log_enabled = false;
1370 hdev->started = false;
1371 hdev->memory_changed = false;
1372 memory_listener_register(&hdev->memory_listener, &address_space_memory);
1373 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1374 return 0;
1376 fail_busyloop:
1377 while (--i >= 0) {
1378 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1380 fail:
1381 hdev->nvqs = n_initialized_vqs;
1382 vhost_dev_cleanup(hdev);
1383 return r;
1386 void vhost_dev_cleanup(struct vhost_dev *hdev)
1388 int i;
1390 for (i = 0; i < hdev->nvqs; ++i) {
1391 vhost_virtqueue_cleanup(hdev->vqs + i);
1393 if (hdev->mem) {
1394 /* those are only safe after successful init */
1395 memory_listener_unregister(&hdev->memory_listener);
1396 QLIST_REMOVE(hdev, entry);
1398 if (hdev->migration_blocker) {
1399 migrate_del_blocker(hdev->migration_blocker);
1400 error_free(hdev->migration_blocker);
1402 g_free(hdev->mem);
1403 g_free(hdev->mem_sections);
1404 if (hdev->vhost_ops) {
1405 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1407 assert(!hdev->log);
1409 memset(hdev, 0, sizeof(struct vhost_dev));
1412 /* Stop processing guest IO notifications in qemu.
1413 * Start processing them in vhost in kernel.
1415 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1417 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1418 int i, r, e;
1420 /* We will pass the notifiers to the kernel, make sure that QEMU
1421 * doesn't interfere.
1423 r = virtio_device_grab_ioeventfd(vdev);
1424 if (r < 0) {
1425 error_report("binding does not support host notifiers");
1426 goto fail;
1429 for (i = 0; i < hdev->nvqs; ++i) {
1430 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1431 true);
1432 if (r < 0) {
1433 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1434 goto fail_vq;
1438 return 0;
1439 fail_vq:
1440 while (--i >= 0) {
1441 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1442 false);
1443 if (e < 0) {
1444 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1446 assert (e >= 0);
1447 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1449 virtio_device_release_ioeventfd(vdev);
1450 fail:
1451 return r;
1454 /* Stop processing guest IO notifications in vhost.
1455 * Start processing them in qemu.
1456 * This might actually run the qemu handlers right away,
1457 * so virtio in qemu must be completely setup when this is called.
1459 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1461 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1462 int i, r;
1464 for (i = 0; i < hdev->nvqs; ++i) {
1465 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1466 false);
1467 if (r < 0) {
1468 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1470 assert (r >= 0);
1471 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1473 virtio_device_release_ioeventfd(vdev);
1476 /* Test and clear event pending status.
1477 * Should be called after unmask to avoid losing events.
1479 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1481 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1482 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1483 return event_notifier_test_and_clear(&vq->masked_notifier);
1486 /* Mask/unmask events from this vq. */
1487 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1488 bool mask)
1490 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1491 int r, index = n - hdev->vq_index;
1492 struct vhost_vring_file file;
1494 /* should only be called after backend is connected */
1495 assert(hdev->vhost_ops);
1497 if (mask) {
1498 assert(vdev->use_guest_notifier_mask);
1499 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1500 } else {
1501 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1504 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1505 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1506 if (r < 0) {
1507 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1511 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1512 uint64_t features)
1514 const int *bit = feature_bits;
1515 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1516 uint64_t bit_mask = (1ULL << *bit);
1517 if (!(hdev->features & bit_mask)) {
1518 features &= ~bit_mask;
1520 bit++;
1522 return features;
1525 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1526 uint64_t features)
1528 const int *bit = feature_bits;
1529 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1530 uint64_t bit_mask = (1ULL << *bit);
1531 if (features & bit_mask) {
1532 hdev->acked_features |= bit_mask;
1534 bit++;
1538 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1539 uint32_t config_len)
1541 assert(hdev->vhost_ops);
1543 if (hdev->vhost_ops->vhost_get_config) {
1544 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1547 return -1;
1550 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1551 uint32_t offset, uint32_t size, uint32_t flags)
1553 assert(hdev->vhost_ops);
1555 if (hdev->vhost_ops->vhost_set_config) {
1556 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1557 size, flags);
1560 return -1;
1563 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1564 const VhostDevConfigOps *ops)
1566 assert(hdev->vhost_ops);
1567 hdev->config_ops = ops;
1570 /* Host notifiers must be enabled at this point. */
1571 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1573 int i, r;
1575 /* should only be called after backend is connected */
1576 assert(hdev->vhost_ops);
1578 hdev->started = true;
1579 hdev->vdev = vdev;
1581 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1582 if (r < 0) {
1583 goto fail_features;
1586 if (vhost_dev_has_iommu(hdev)) {
1587 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1590 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1591 if (r < 0) {
1592 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1593 r = -errno;
1594 goto fail_mem;
1596 for (i = 0; i < hdev->nvqs; ++i) {
1597 r = vhost_virtqueue_start(hdev,
1598 vdev,
1599 hdev->vqs + i,
1600 hdev->vq_index + i);
1601 if (r < 0) {
1602 goto fail_vq;
1606 if (hdev->log_enabled) {
1607 uint64_t log_base;
1609 hdev->log_size = vhost_get_log_size(hdev);
1610 hdev->log = vhost_log_get(hdev->log_size,
1611 vhost_dev_log_is_shared(hdev));
1612 log_base = (uintptr_t)hdev->log->log;
1613 r = hdev->vhost_ops->vhost_set_log_base(hdev,
1614 hdev->log_size ? log_base : 0,
1615 hdev->log);
1616 if (r < 0) {
1617 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1618 r = -errno;
1619 goto fail_log;
1623 if (vhost_dev_has_iommu(hdev)) {
1624 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1626 /* Update used ring information for IOTLB to work correctly,
1627 * vhost-kernel code requires for this.*/
1628 for (i = 0; i < hdev->nvqs; ++i) {
1629 struct vhost_virtqueue *vq = hdev->vqs + i;
1630 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1633 return 0;
1634 fail_log:
1635 vhost_log_put(hdev, false);
1636 fail_vq:
1637 while (--i >= 0) {
1638 vhost_virtqueue_stop(hdev,
1639 vdev,
1640 hdev->vqs + i,
1641 hdev->vq_index + i);
1643 i = hdev->nvqs;
1645 fail_mem:
1646 fail_features:
1648 hdev->started = false;
1649 return r;
1652 /* Host notifiers must be enabled at this point. */
1653 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1655 int i;
1657 /* should only be called after backend is connected */
1658 assert(hdev->vhost_ops);
1660 for (i = 0; i < hdev->nvqs; ++i) {
1661 vhost_virtqueue_stop(hdev,
1662 vdev,
1663 hdev->vqs + i,
1664 hdev->vq_index + i);
1667 if (vhost_dev_has_iommu(hdev)) {
1668 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1669 memory_listener_unregister(&hdev->iommu_listener);
1671 vhost_log_put(hdev, true);
1672 hdev->started = false;
1673 hdev->vdev = NULL;
1676 int vhost_net_set_backend(struct vhost_dev *hdev,
1677 struct vhost_vring_file *file)
1679 if (hdev->vhost_ops->vhost_net_set_backend) {
1680 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1683 return -1;