Merge remote-tracking branch 'remotes/vivier2/tags/trivial-branch-for-6.0-pull-reques...
[qemu/ar7.git] / hw / virtio / vhost.c
blob28c7d781721a56a61c95f83271d6dd31f202a1f5
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include "standard-headers/linux/vhost_types.h"
24 #include "exec/address-spaces.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27 #include "migration/blocker.h"
28 #include "migration/qemu-file-types.h"
29 #include "sysemu/dma.h"
30 #include "sysemu/tcg.h"
31 #include "trace.h"
33 /* enabled until disconnected backend stabilizes */
34 #define _VHOST_DEBUG 1
36 #ifdef _VHOST_DEBUG
37 #define VHOST_OPS_DEBUG(fmt, ...) \
38 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
39 strerror(errno), errno); } while (0)
40 #else
41 #define VHOST_OPS_DEBUG(fmt, ...) \
42 do { } while (0)
43 #endif
45 static struct vhost_log *vhost_log;
46 static struct vhost_log *vhost_log_shm;
48 static unsigned int used_memslots;
49 static QLIST_HEAD(, vhost_dev) vhost_devices =
50 QLIST_HEAD_INITIALIZER(vhost_devices);
52 bool vhost_has_free_slot(void)
54 unsigned int slots_limit = ~0U;
55 struct vhost_dev *hdev;
57 QLIST_FOREACH(hdev, &vhost_devices, entry) {
58 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
59 slots_limit = MIN(slots_limit, r);
61 return slots_limit > used_memslots;
64 static void vhost_dev_sync_region(struct vhost_dev *dev,
65 MemoryRegionSection *section,
66 uint64_t mfirst, uint64_t mlast,
67 uint64_t rfirst, uint64_t rlast)
69 vhost_log_chunk_t *log = dev->log->log;
71 uint64_t start = MAX(mfirst, rfirst);
72 uint64_t end = MIN(mlast, rlast);
73 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
74 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
75 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
77 if (end < start) {
78 return;
80 assert(end / VHOST_LOG_CHUNK < dev->log_size);
81 assert(start / VHOST_LOG_CHUNK < dev->log_size);
83 for (;from < to; ++from) {
84 vhost_log_chunk_t log;
85 /* We first check with non-atomic: much cheaper,
86 * and we expect non-dirty to be the common case. */
87 if (!*from) {
88 addr += VHOST_LOG_CHUNK;
89 continue;
91 /* Data must be read atomically. We don't really need barrier semantics
92 * but it's easier to use atomic_* than roll our own. */
93 log = qatomic_xchg(from, 0);
94 while (log) {
95 int bit = ctzl(log);
96 hwaddr page_addr;
97 hwaddr section_offset;
98 hwaddr mr_offset;
99 page_addr = addr + bit * VHOST_LOG_PAGE;
100 section_offset = page_addr - section->offset_within_address_space;
101 mr_offset = section_offset + section->offset_within_region;
102 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
103 log &= ~(0x1ull << bit);
105 addr += VHOST_LOG_CHUNK;
109 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
110 MemoryRegionSection *section,
111 hwaddr first,
112 hwaddr last)
114 int i;
115 hwaddr start_addr;
116 hwaddr end_addr;
118 if (!dev->log_enabled || !dev->started) {
119 return 0;
121 start_addr = section->offset_within_address_space;
122 end_addr = range_get_last(start_addr, int128_get64(section->size));
123 start_addr = MAX(first, start_addr);
124 end_addr = MIN(last, end_addr);
126 for (i = 0; i < dev->mem->nregions; ++i) {
127 struct vhost_memory_region *reg = dev->mem->regions + i;
128 vhost_dev_sync_region(dev, section, start_addr, end_addr,
129 reg->guest_phys_addr,
130 range_get_last(reg->guest_phys_addr,
131 reg->memory_size));
133 for (i = 0; i < dev->nvqs; ++i) {
134 struct vhost_virtqueue *vq = dev->vqs + i;
136 if (!vq->used_phys && !vq->used_size) {
137 continue;
140 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
141 range_get_last(vq->used_phys, vq->used_size));
143 return 0;
146 static void vhost_log_sync(MemoryListener *listener,
147 MemoryRegionSection *section)
149 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
150 memory_listener);
151 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
154 static void vhost_log_sync_range(struct vhost_dev *dev,
155 hwaddr first, hwaddr last)
157 int i;
158 /* FIXME: this is N^2 in number of sections */
159 for (i = 0; i < dev->n_mem_sections; ++i) {
160 MemoryRegionSection *section = &dev->mem_sections[i];
161 vhost_sync_dirty_bitmap(dev, section, first, last);
165 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
167 uint64_t log_size = 0;
168 int i;
169 for (i = 0; i < dev->mem->nregions; ++i) {
170 struct vhost_memory_region *reg = dev->mem->regions + i;
171 uint64_t last = range_get_last(reg->guest_phys_addr,
172 reg->memory_size);
173 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
175 return log_size;
178 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
180 Error *err = NULL;
181 struct vhost_log *log;
182 uint64_t logsize = size * sizeof(*(log->log));
183 int fd = -1;
185 log = g_new0(struct vhost_log, 1);
186 if (share) {
187 log->log = qemu_memfd_alloc("vhost-log", logsize,
188 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
189 &fd, &err);
190 if (err) {
191 error_report_err(err);
192 g_free(log);
193 return NULL;
195 memset(log->log, 0, logsize);
196 } else {
197 log->log = g_malloc0(logsize);
200 log->size = size;
201 log->refcnt = 1;
202 log->fd = fd;
204 return log;
207 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
209 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
211 if (!log || log->size != size) {
212 log = vhost_log_alloc(size, share);
213 if (share) {
214 vhost_log_shm = log;
215 } else {
216 vhost_log = log;
218 } else {
219 ++log->refcnt;
222 return log;
225 static void vhost_log_put(struct vhost_dev *dev, bool sync)
227 struct vhost_log *log = dev->log;
229 if (!log) {
230 return;
233 --log->refcnt;
234 if (log->refcnt == 0) {
235 /* Sync only the range covered by the old log */
236 if (dev->log_size && sync) {
237 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
240 if (vhost_log == log) {
241 g_free(log->log);
242 vhost_log = NULL;
243 } else if (vhost_log_shm == log) {
244 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
245 log->fd);
246 vhost_log_shm = NULL;
249 g_free(log);
252 dev->log = NULL;
253 dev->log_size = 0;
256 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
258 return dev->vhost_ops->vhost_requires_shm_log &&
259 dev->vhost_ops->vhost_requires_shm_log(dev);
262 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
264 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
265 uint64_t log_base = (uintptr_t)log->log;
266 int r;
268 /* inform backend of log switching, this must be done before
269 releasing the current log, to ensure no logging is lost */
270 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
271 if (r < 0) {
272 VHOST_OPS_DEBUG("vhost_set_log_base failed");
275 vhost_log_put(dev, true);
276 dev->log = log;
277 dev->log_size = size;
280 static int vhost_dev_has_iommu(struct vhost_dev *dev)
282 VirtIODevice *vdev = dev->vdev;
285 * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
286 * incremental memory mapping API via IOTLB API. For platform that
287 * does not have IOMMU, there's no need to enable this feature
288 * which may cause unnecessary IOTLB miss/update trnasactions.
290 return vdev->dma_as != &address_space_memory &&
291 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
294 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
295 hwaddr *plen, bool is_write)
297 if (!vhost_dev_has_iommu(dev)) {
298 return cpu_physical_memory_map(addr, plen, is_write);
299 } else {
300 return (void *)(uintptr_t)addr;
304 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
305 hwaddr len, int is_write,
306 hwaddr access_len)
308 if (!vhost_dev_has_iommu(dev)) {
309 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
313 static int vhost_verify_ring_part_mapping(void *ring_hva,
314 uint64_t ring_gpa,
315 uint64_t ring_size,
316 void *reg_hva,
317 uint64_t reg_gpa,
318 uint64_t reg_size)
320 uint64_t hva_ring_offset;
321 uint64_t ring_last = range_get_last(ring_gpa, ring_size);
322 uint64_t reg_last = range_get_last(reg_gpa, reg_size);
324 if (ring_last < reg_gpa || ring_gpa > reg_last) {
325 return 0;
327 /* check that whole ring's is mapped */
328 if (ring_last > reg_last) {
329 return -ENOMEM;
331 /* check that ring's MemoryRegion wasn't replaced */
332 hva_ring_offset = ring_gpa - reg_gpa;
333 if (ring_hva != reg_hva + hva_ring_offset) {
334 return -EBUSY;
337 return 0;
340 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
341 void *reg_hva,
342 uint64_t reg_gpa,
343 uint64_t reg_size)
345 int i, j;
346 int r = 0;
347 const char *part_name[] = {
348 "descriptor table",
349 "available ring",
350 "used ring"
353 if (vhost_dev_has_iommu(dev)) {
354 return 0;
357 for (i = 0; i < dev->nvqs; ++i) {
358 struct vhost_virtqueue *vq = dev->vqs + i;
360 if (vq->desc_phys == 0) {
361 continue;
364 j = 0;
365 r = vhost_verify_ring_part_mapping(
366 vq->desc, vq->desc_phys, vq->desc_size,
367 reg_hva, reg_gpa, reg_size);
368 if (r) {
369 break;
372 j++;
373 r = vhost_verify_ring_part_mapping(
374 vq->avail, vq->avail_phys, vq->avail_size,
375 reg_hva, reg_gpa, reg_size);
376 if (r) {
377 break;
380 j++;
381 r = vhost_verify_ring_part_mapping(
382 vq->used, vq->used_phys, vq->used_size,
383 reg_hva, reg_gpa, reg_size);
384 if (r) {
385 break;
389 if (r == -ENOMEM) {
390 error_report("Unable to map %s for ring %d", part_name[j], i);
391 } else if (r == -EBUSY) {
392 error_report("%s relocated for ring %d", part_name[j], i);
394 return r;
398 * vhost_section: identify sections needed for vhost access
400 * We only care about RAM sections here (where virtqueue and guest
401 * internals accessed by virtio might live). If we find one we still
402 * allow the backend to potentially filter it out of our list.
404 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
406 MemoryRegion *mr = section->mr;
408 if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) {
409 uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr);
410 uint8_t handled_dirty;
413 * Kernel based vhost doesn't handle any block which is doing
414 * dirty-tracking other than migration for which it has
415 * specific logging support. However for TCG the kernel never
416 * gets involved anyway so we can also ignore it's
417 * self-modiying code detection flags. However a vhost-user
418 * client could still confuse a TCG guest if it re-writes
419 * executable memory that has already been translated.
421 handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) |
422 (1 << DIRTY_MEMORY_CODE);
424 if (dirty_mask & ~handled_dirty) {
425 trace_vhost_reject_section(mr->name, 1);
426 return false;
429 if (dev->vhost_ops->vhost_backend_mem_section_filter &&
430 !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) {
431 trace_vhost_reject_section(mr->name, 2);
432 return false;
435 trace_vhost_section(mr->name);
436 return true;
437 } else {
438 trace_vhost_reject_section(mr->name, 3);
439 return false;
443 static void vhost_begin(MemoryListener *listener)
445 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
446 memory_listener);
447 dev->tmp_sections = NULL;
448 dev->n_tmp_sections = 0;
451 static void vhost_commit(MemoryListener *listener)
453 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
454 memory_listener);
455 MemoryRegionSection *old_sections;
456 int n_old_sections;
457 uint64_t log_size;
458 size_t regions_size;
459 int r;
460 int i;
461 bool changed = false;
463 /* Note we can be called before the device is started, but then
464 * starting the device calls set_mem_table, so we need to have
465 * built the data structures.
467 old_sections = dev->mem_sections;
468 n_old_sections = dev->n_mem_sections;
469 dev->mem_sections = dev->tmp_sections;
470 dev->n_mem_sections = dev->n_tmp_sections;
472 if (dev->n_mem_sections != n_old_sections) {
473 changed = true;
474 } else {
475 /* Same size, lets check the contents */
476 for (int i = 0; i < n_old_sections; i++) {
477 if (!MemoryRegionSection_eq(&old_sections[i],
478 &dev->mem_sections[i])) {
479 changed = true;
480 break;
485 trace_vhost_commit(dev->started, changed);
486 if (!changed) {
487 goto out;
490 /* Rebuild the regions list from the new sections list */
491 regions_size = offsetof(struct vhost_memory, regions) +
492 dev->n_mem_sections * sizeof dev->mem->regions[0];
493 dev->mem = g_realloc(dev->mem, regions_size);
494 dev->mem->nregions = dev->n_mem_sections;
495 used_memslots = dev->mem->nregions;
496 for (i = 0; i < dev->n_mem_sections; i++) {
497 struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
498 struct MemoryRegionSection *mrs = dev->mem_sections + i;
500 cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
501 cur_vmr->memory_size = int128_get64(mrs->size);
502 cur_vmr->userspace_addr =
503 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
504 mrs->offset_within_region;
505 cur_vmr->flags_padding = 0;
508 if (!dev->started) {
509 goto out;
512 for (i = 0; i < dev->mem->nregions; i++) {
513 if (vhost_verify_ring_mappings(dev,
514 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
515 dev->mem->regions[i].guest_phys_addr,
516 dev->mem->regions[i].memory_size)) {
517 error_report("Verify ring failure on region %d", i);
518 abort();
522 if (!dev->log_enabled) {
523 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
524 if (r < 0) {
525 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
527 goto out;
529 log_size = vhost_get_log_size(dev);
530 /* We allocate an extra 4K bytes to log,
531 * to reduce the * number of reallocations. */
532 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
533 /* To log more, must increase log size before table update. */
534 if (dev->log_size < log_size) {
535 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
537 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
538 if (r < 0) {
539 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
541 /* To log less, can only decrease log size after table update. */
542 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
543 vhost_dev_log_resize(dev, log_size);
546 out:
547 /* Deref the old list of sections, this must happen _after_ the
548 * vhost_set_mem_table to ensure the client isn't still using the
549 * section we're about to unref.
551 while (n_old_sections--) {
552 memory_region_unref(old_sections[n_old_sections].mr);
554 g_free(old_sections);
555 return;
558 /* Adds the section data to the tmp_section structure.
559 * It relies on the listener calling us in memory address order
560 * and for each region (via the _add and _nop methods) to
561 * join neighbours.
563 static void vhost_region_add_section(struct vhost_dev *dev,
564 MemoryRegionSection *section)
566 bool need_add = true;
567 uint64_t mrs_size = int128_get64(section->size);
568 uint64_t mrs_gpa = section->offset_within_address_space;
569 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
570 section->offset_within_region;
571 RAMBlock *mrs_rb = section->mr->ram_block;
573 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
574 mrs_host);
576 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
577 /* Round the section to it's page size */
578 /* First align the start down to a page boundary */
579 size_t mrs_page = qemu_ram_pagesize(mrs_rb);
580 uint64_t alignage = mrs_host & (mrs_page - 1);
581 if (alignage) {
582 mrs_host -= alignage;
583 mrs_size += alignage;
584 mrs_gpa -= alignage;
586 /* Now align the size up to a page boundary */
587 alignage = mrs_size & (mrs_page - 1);
588 if (alignage) {
589 mrs_size += mrs_page - alignage;
591 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
592 mrs_size, mrs_host);
595 if (dev->n_tmp_sections) {
596 /* Since we already have at least one section, lets see if
597 * this extends it; since we're scanning in order, we only
598 * have to look at the last one, and the FlatView that calls
599 * us shouldn't have overlaps.
601 MemoryRegionSection *prev_sec = dev->tmp_sections +
602 (dev->n_tmp_sections - 1);
603 uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
604 uint64_t prev_size = int128_get64(prev_sec->size);
605 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
606 uint64_t prev_host_start =
607 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
608 prev_sec->offset_within_region;
609 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
611 if (mrs_gpa <= (prev_gpa_end + 1)) {
612 /* OK, looks like overlapping/intersecting - it's possible that
613 * the rounding to page sizes has made them overlap, but they should
614 * match up in the same RAMBlock if they do.
616 if (mrs_gpa < prev_gpa_start) {
617 error_report("%s:Section '%s' rounded to %"PRIx64
618 " prior to previous '%s' %"PRIx64,
619 __func__, section->mr->name, mrs_gpa,
620 prev_sec->mr->name, prev_gpa_start);
621 /* A way to cleanly fail here would be better */
622 return;
624 /* Offset from the start of the previous GPA to this GPA */
625 size_t offset = mrs_gpa - prev_gpa_start;
627 if (prev_host_start + offset == mrs_host &&
628 section->mr == prev_sec->mr &&
629 (!dev->vhost_ops->vhost_backend_can_merge ||
630 dev->vhost_ops->vhost_backend_can_merge(dev,
631 mrs_host, mrs_size,
632 prev_host_start, prev_size))) {
633 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
634 need_add = false;
635 prev_sec->offset_within_address_space =
636 MIN(prev_gpa_start, mrs_gpa);
637 prev_sec->offset_within_region =
638 MIN(prev_host_start, mrs_host) -
639 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
640 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
641 mrs_host));
642 trace_vhost_region_add_section_merge(section->mr->name,
643 int128_get64(prev_sec->size),
644 prev_sec->offset_within_address_space,
645 prev_sec->offset_within_region);
646 } else {
647 /* adjoining regions are fine, but overlapping ones with
648 * different blocks/offsets shouldn't happen
650 if (mrs_gpa != prev_gpa_end + 1) {
651 error_report("%s: Overlapping but not coherent sections "
652 "at %"PRIx64,
653 __func__, mrs_gpa);
654 return;
660 if (need_add) {
661 ++dev->n_tmp_sections;
662 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
663 dev->n_tmp_sections);
664 dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
665 /* The flatview isn't stable and we don't use it, making it NULL
666 * means we can memcmp the list.
668 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
669 memory_region_ref(section->mr);
673 /* Used for both add and nop callbacks */
674 static void vhost_region_addnop(MemoryListener *listener,
675 MemoryRegionSection *section)
677 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
678 memory_listener);
680 if (!vhost_section(dev, section)) {
681 return;
683 vhost_region_add_section(dev, section);
686 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
688 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
689 struct vhost_dev *hdev = iommu->hdev;
690 hwaddr iova = iotlb->iova + iommu->iommu_offset;
692 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
693 iotlb->addr_mask + 1)) {
694 error_report("Fail to invalidate device iotlb");
698 static void vhost_iommu_region_add(MemoryListener *listener,
699 MemoryRegionSection *section)
701 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
702 iommu_listener);
703 struct vhost_iommu *iommu;
704 Int128 end;
705 int iommu_idx;
706 IOMMUMemoryRegion *iommu_mr;
708 if (!memory_region_is_iommu(section->mr)) {
709 return;
712 iommu_mr = IOMMU_MEMORY_REGION(section->mr);
714 iommu = g_malloc0(sizeof(*iommu));
715 end = int128_add(int128_make64(section->offset_within_region),
716 section->size);
717 end = int128_sub(end, int128_one());
718 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
719 MEMTXATTRS_UNSPECIFIED);
720 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
721 IOMMU_NOTIFIER_DEVIOTLB_UNMAP,
722 section->offset_within_region,
723 int128_get64(end),
724 iommu_idx);
725 iommu->mr = section->mr;
726 iommu->iommu_offset = section->offset_within_address_space -
727 section->offset_within_region;
728 iommu->hdev = dev;
729 memory_region_register_iommu_notifier(section->mr, &iommu->n,
730 &error_fatal);
731 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
732 /* TODO: can replay help performance here? */
735 static void vhost_iommu_region_del(MemoryListener *listener,
736 MemoryRegionSection *section)
738 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
739 iommu_listener);
740 struct vhost_iommu *iommu;
742 if (!memory_region_is_iommu(section->mr)) {
743 return;
746 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
747 if (iommu->mr == section->mr &&
748 iommu->n.start == section->offset_within_region) {
749 memory_region_unregister_iommu_notifier(iommu->mr,
750 &iommu->n);
751 QLIST_REMOVE(iommu, iommu_next);
752 g_free(iommu);
753 break;
758 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
759 struct vhost_virtqueue *vq,
760 unsigned idx, bool enable_log)
762 struct vhost_vring_addr addr;
763 int r;
764 memset(&addr, 0, sizeof(struct vhost_vring_addr));
766 if (dev->vhost_ops->vhost_vq_get_addr) {
767 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
768 if (r < 0) {
769 VHOST_OPS_DEBUG("vhost_vq_get_addr failed");
770 return -errno;
772 } else {
773 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
774 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail;
775 addr.used_user_addr = (uint64_t)(unsigned long)vq->used;
777 addr.index = idx;
778 addr.log_guest_addr = vq->used_phys;
779 addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
780 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
781 if (r < 0) {
782 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
783 return -errno;
785 return 0;
788 static int vhost_dev_set_features(struct vhost_dev *dev,
789 bool enable_log)
791 uint64_t features = dev->acked_features;
792 int r;
793 if (enable_log) {
794 features |= 0x1ULL << VHOST_F_LOG_ALL;
796 if (!vhost_dev_has_iommu(dev)) {
797 features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
799 if (dev->vhost_ops->vhost_force_iommu) {
800 if (dev->vhost_ops->vhost_force_iommu(dev) == true) {
801 features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM;
804 r = dev->vhost_ops->vhost_set_features(dev, features);
805 if (r < 0) {
806 VHOST_OPS_DEBUG("vhost_set_features failed");
807 goto out;
809 if (dev->vhost_ops->vhost_set_backend_cap) {
810 r = dev->vhost_ops->vhost_set_backend_cap(dev);
811 if (r < 0) {
812 VHOST_OPS_DEBUG("vhost_set_backend_cap failed");
813 goto out;
817 out:
818 return r < 0 ? -errno : 0;
821 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
823 int r, i, idx;
824 hwaddr addr;
826 r = vhost_dev_set_features(dev, enable_log);
827 if (r < 0) {
828 goto err_features;
830 for (i = 0; i < dev->nvqs; ++i) {
831 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
832 addr = virtio_queue_get_desc_addr(dev->vdev, idx);
833 if (!addr) {
835 * The queue might not be ready for start. If this
836 * is the case there is no reason to continue the process.
837 * The similar logic is used by the vhost_virtqueue_start()
838 * routine.
840 continue;
842 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
843 enable_log);
844 if (r < 0) {
845 goto err_vq;
848 return 0;
849 err_vq:
850 for (; i >= 0; --i) {
851 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
852 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
853 dev->log_enabled);
855 vhost_dev_set_features(dev, dev->log_enabled);
856 err_features:
857 return r;
860 static int vhost_migration_log(MemoryListener *listener, bool enable)
862 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
863 memory_listener);
864 int r;
865 if (enable == dev->log_enabled) {
866 return 0;
868 if (!dev->started) {
869 dev->log_enabled = enable;
870 return 0;
873 r = 0;
874 if (!enable) {
875 r = vhost_dev_set_log(dev, false);
876 if (r < 0) {
877 goto check_dev_state;
879 vhost_log_put(dev, false);
880 } else {
881 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
882 r = vhost_dev_set_log(dev, true);
883 if (r < 0) {
884 goto check_dev_state;
888 check_dev_state:
889 dev->log_enabled = enable;
891 * vhost-user-* devices could change their state during log
892 * initialization due to disconnect. So check dev state after
893 * vhost communication.
895 if (!dev->started) {
897 * Since device is in the stopped state, it is okay for
898 * migration. Return success.
900 r = 0;
902 if (r) {
903 /* An error is occured. */
904 dev->log_enabled = false;
907 return r;
910 static void vhost_log_global_start(MemoryListener *listener)
912 int r;
914 r = vhost_migration_log(listener, true);
915 if (r < 0) {
916 abort();
920 static void vhost_log_global_stop(MemoryListener *listener)
922 int r;
924 r = vhost_migration_log(listener, false);
925 if (r < 0) {
926 abort();
930 static void vhost_log_start(MemoryListener *listener,
931 MemoryRegionSection *section,
932 int old, int new)
934 /* FIXME: implement */
937 static void vhost_log_stop(MemoryListener *listener,
938 MemoryRegionSection *section,
939 int old, int new)
941 /* FIXME: implement */
944 /* The vhost driver natively knows how to handle the vrings of non
945 * cross-endian legacy devices and modern devices. Only legacy devices
946 * exposed to a bi-endian guest may require the vhost driver to use a
947 * specific endianness.
949 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
951 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
952 return false;
954 #ifdef HOST_WORDS_BIGENDIAN
955 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
956 #else
957 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
958 #endif
961 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
962 bool is_big_endian,
963 int vhost_vq_index)
965 struct vhost_vring_state s = {
966 .index = vhost_vq_index,
967 .num = is_big_endian
970 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
971 return 0;
974 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
975 if (errno == ENOTTY) {
976 error_report("vhost does not support cross-endian");
977 return -ENOSYS;
980 return -errno;
983 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
984 uint64_t gpa, uint64_t *uaddr,
985 uint64_t *len)
987 int i;
989 for (i = 0; i < hdev->mem->nregions; i++) {
990 struct vhost_memory_region *reg = hdev->mem->regions + i;
992 if (gpa >= reg->guest_phys_addr &&
993 reg->guest_phys_addr + reg->memory_size > gpa) {
994 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
995 *len = reg->guest_phys_addr + reg->memory_size - gpa;
996 return 0;
1000 return -EFAULT;
1003 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
1005 IOMMUTLBEntry iotlb;
1006 uint64_t uaddr, len;
1007 int ret = -EFAULT;
1009 RCU_READ_LOCK_GUARD();
1011 trace_vhost_iotlb_miss(dev, 1);
1013 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
1014 iova, write,
1015 MEMTXATTRS_UNSPECIFIED);
1016 if (iotlb.target_as != NULL) {
1017 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
1018 &uaddr, &len);
1019 if (ret) {
1020 trace_vhost_iotlb_miss(dev, 3);
1021 error_report("Fail to lookup the translated address "
1022 "%"PRIx64, iotlb.translated_addr);
1023 goto out;
1026 len = MIN(iotlb.addr_mask + 1, len);
1027 iova = iova & ~iotlb.addr_mask;
1029 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1030 len, iotlb.perm);
1031 if (ret) {
1032 trace_vhost_iotlb_miss(dev, 4);
1033 error_report("Fail to update device iotlb");
1034 goto out;
1038 trace_vhost_iotlb_miss(dev, 2);
1040 out:
1041 return ret;
1044 static int vhost_virtqueue_start(struct vhost_dev *dev,
1045 struct VirtIODevice *vdev,
1046 struct vhost_virtqueue *vq,
1047 unsigned idx)
1049 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1050 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1051 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1052 hwaddr s, l, a;
1053 int r;
1054 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1055 struct vhost_vring_file file = {
1056 .index = vhost_vq_index
1058 struct vhost_vring_state state = {
1059 .index = vhost_vq_index
1061 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1063 a = virtio_queue_get_desc_addr(vdev, idx);
1064 if (a == 0) {
1065 /* Queue might not be ready for start */
1066 return 0;
1069 vq->num = state.num = virtio_queue_get_num(vdev, idx);
1070 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1071 if (r) {
1072 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1073 return -errno;
1076 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1077 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1078 if (r) {
1079 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1080 return -errno;
1083 if (vhost_needs_vring_endian(vdev)) {
1084 r = vhost_virtqueue_set_vring_endian_legacy(dev,
1085 virtio_is_big_endian(vdev),
1086 vhost_vq_index);
1087 if (r) {
1088 return -errno;
1092 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1093 vq->desc_phys = a;
1094 vq->desc = vhost_memory_map(dev, a, &l, false);
1095 if (!vq->desc || l != s) {
1096 r = -ENOMEM;
1097 goto fail_alloc_desc;
1099 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1100 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1101 vq->avail = vhost_memory_map(dev, a, &l, false);
1102 if (!vq->avail || l != s) {
1103 r = -ENOMEM;
1104 goto fail_alloc_avail;
1106 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1107 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1108 vq->used = vhost_memory_map(dev, a, &l, true);
1109 if (!vq->used || l != s) {
1110 r = -ENOMEM;
1111 goto fail_alloc_used;
1114 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1115 if (r < 0) {
1116 r = -errno;
1117 goto fail_alloc;
1120 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1121 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1122 if (r) {
1123 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1124 r = -errno;
1125 goto fail_kick;
1128 /* Clear and discard previous events if any. */
1129 event_notifier_test_and_clear(&vq->masked_notifier);
1131 /* Init vring in unmasked state, unless guest_notifier_mask
1132 * will do it later.
1134 if (!vdev->use_guest_notifier_mask) {
1135 /* TODO: check and handle errors. */
1136 vhost_virtqueue_mask(dev, vdev, idx, false);
1139 if (k->query_guest_notifiers &&
1140 k->query_guest_notifiers(qbus->parent) &&
1141 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1142 file.fd = -1;
1143 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1144 if (r) {
1145 goto fail_vector;
1149 return 0;
1151 fail_vector:
1152 fail_kick:
1153 fail_alloc:
1154 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1155 0, 0);
1156 fail_alloc_used:
1157 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1158 0, 0);
1159 fail_alloc_avail:
1160 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1161 0, 0);
1162 fail_alloc_desc:
1163 return r;
1166 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1167 struct VirtIODevice *vdev,
1168 struct vhost_virtqueue *vq,
1169 unsigned idx)
1171 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1172 struct vhost_vring_state state = {
1173 .index = vhost_vq_index,
1175 int r;
1177 if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1178 /* Don't stop the virtqueue which might have not been started */
1179 return;
1182 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1183 if (r < 0) {
1184 VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
1185 /* Connection to the backend is broken, so let's sync internal
1186 * last avail idx to the device used idx.
1188 virtio_queue_restore_last_avail_idx(vdev, idx);
1189 } else {
1190 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1192 virtio_queue_invalidate_signalled_used(vdev, idx);
1193 virtio_queue_update_used_idx(vdev, idx);
1195 /* In the cross-endian case, we need to reset the vring endianness to
1196 * native as legacy devices expect so by default.
1198 if (vhost_needs_vring_endian(vdev)) {
1199 vhost_virtqueue_set_vring_endian_legacy(dev,
1200 !virtio_is_big_endian(vdev),
1201 vhost_vq_index);
1204 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1205 1, virtio_queue_get_used_size(vdev, idx));
1206 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1207 0, virtio_queue_get_avail_size(vdev, idx));
1208 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1209 0, virtio_queue_get_desc_size(vdev, idx));
1212 static void vhost_eventfd_add(MemoryListener *listener,
1213 MemoryRegionSection *section,
1214 bool match_data, uint64_t data, EventNotifier *e)
1218 static void vhost_eventfd_del(MemoryListener *listener,
1219 MemoryRegionSection *section,
1220 bool match_data, uint64_t data, EventNotifier *e)
1224 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1225 int n, uint32_t timeout)
1227 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1228 struct vhost_vring_state state = {
1229 .index = vhost_vq_index,
1230 .num = timeout,
1232 int r;
1234 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1235 return -EINVAL;
1238 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1239 if (r) {
1240 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1241 return r;
1244 return 0;
1247 static int vhost_virtqueue_init(struct vhost_dev *dev,
1248 struct vhost_virtqueue *vq, int n)
1250 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1251 struct vhost_vring_file file = {
1252 .index = vhost_vq_index,
1254 int r = event_notifier_init(&vq->masked_notifier, 0);
1255 if (r < 0) {
1256 return r;
1259 file.fd = event_notifier_get_fd(&vq->masked_notifier);
1260 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1261 if (r) {
1262 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1263 r = -errno;
1264 goto fail_call;
1267 vq->dev = dev;
1269 return 0;
1270 fail_call:
1271 event_notifier_cleanup(&vq->masked_notifier);
1272 return r;
1275 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1277 event_notifier_cleanup(&vq->masked_notifier);
1280 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1281 VhostBackendType backend_type, uint32_t busyloop_timeout)
1283 uint64_t features;
1284 int i, r, n_initialized_vqs = 0;
1285 Error *local_err = NULL;
1287 hdev->vdev = NULL;
1288 hdev->migration_blocker = NULL;
1290 r = vhost_set_backend_type(hdev, backend_type);
1291 assert(r >= 0);
1293 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1294 if (r < 0) {
1295 goto fail;
1298 r = hdev->vhost_ops->vhost_set_owner(hdev);
1299 if (r < 0) {
1300 VHOST_OPS_DEBUG("vhost_set_owner failed");
1301 goto fail;
1304 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1305 if (r < 0) {
1306 VHOST_OPS_DEBUG("vhost_get_features failed");
1307 goto fail;
1310 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1311 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1312 if (r < 0) {
1313 goto fail;
1317 if (busyloop_timeout) {
1318 for (i = 0; i < hdev->nvqs; ++i) {
1319 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1320 busyloop_timeout);
1321 if (r < 0) {
1322 goto fail_busyloop;
1327 hdev->features = features;
1329 hdev->memory_listener = (MemoryListener) {
1330 .begin = vhost_begin,
1331 .commit = vhost_commit,
1332 .region_add = vhost_region_addnop,
1333 .region_nop = vhost_region_addnop,
1334 .log_start = vhost_log_start,
1335 .log_stop = vhost_log_stop,
1336 .log_sync = vhost_log_sync,
1337 .log_global_start = vhost_log_global_start,
1338 .log_global_stop = vhost_log_global_stop,
1339 .eventfd_add = vhost_eventfd_add,
1340 .eventfd_del = vhost_eventfd_del,
1341 .priority = 10
1344 hdev->iommu_listener = (MemoryListener) {
1345 .region_add = vhost_iommu_region_add,
1346 .region_del = vhost_iommu_region_del,
1349 if (hdev->migration_blocker == NULL) {
1350 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1351 error_setg(&hdev->migration_blocker,
1352 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1353 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1354 error_setg(&hdev->migration_blocker,
1355 "Migration disabled: failed to allocate shared memory");
1359 if (hdev->migration_blocker != NULL) {
1360 r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1361 if (local_err) {
1362 error_report_err(local_err);
1363 error_free(hdev->migration_blocker);
1364 goto fail_busyloop;
1368 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1369 hdev->n_mem_sections = 0;
1370 hdev->mem_sections = NULL;
1371 hdev->log = NULL;
1372 hdev->log_size = 0;
1373 hdev->log_enabled = false;
1374 hdev->started = false;
1375 memory_listener_register(&hdev->memory_listener, &address_space_memory);
1376 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1378 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1379 error_report("vhost backend memory slots limit is less"
1380 " than current number of present memory slots");
1381 r = -1;
1382 if (busyloop_timeout) {
1383 goto fail_busyloop;
1384 } else {
1385 goto fail;
1389 return 0;
1391 fail_busyloop:
1392 while (--i >= 0) {
1393 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1395 fail:
1396 hdev->nvqs = n_initialized_vqs;
1397 vhost_dev_cleanup(hdev);
1398 return r;
1401 void vhost_dev_cleanup(struct vhost_dev *hdev)
1403 int i;
1405 for (i = 0; i < hdev->nvqs; ++i) {
1406 vhost_virtqueue_cleanup(hdev->vqs + i);
1408 if (hdev->mem) {
1409 /* those are only safe after successful init */
1410 memory_listener_unregister(&hdev->memory_listener);
1411 QLIST_REMOVE(hdev, entry);
1413 if (hdev->migration_blocker) {
1414 migrate_del_blocker(hdev->migration_blocker);
1415 error_free(hdev->migration_blocker);
1417 g_free(hdev->mem);
1418 g_free(hdev->mem_sections);
1419 if (hdev->vhost_ops) {
1420 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1422 assert(!hdev->log);
1424 memset(hdev, 0, sizeof(struct vhost_dev));
1427 /* Stop processing guest IO notifications in qemu.
1428 * Start processing them in vhost in kernel.
1430 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1432 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1433 int i, r, e;
1435 /* We will pass the notifiers to the kernel, make sure that QEMU
1436 * doesn't interfere.
1438 r = virtio_device_grab_ioeventfd(vdev);
1439 if (r < 0) {
1440 error_report("binding does not support host notifiers");
1441 goto fail;
1444 for (i = 0; i < hdev->nvqs; ++i) {
1445 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1446 true);
1447 if (r < 0) {
1448 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1449 goto fail_vq;
1453 return 0;
1454 fail_vq:
1455 while (--i >= 0) {
1456 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1457 false);
1458 if (e < 0) {
1459 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1461 assert (e >= 0);
1462 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1464 virtio_device_release_ioeventfd(vdev);
1465 fail:
1466 return r;
1469 /* Stop processing guest IO notifications in vhost.
1470 * Start processing them in qemu.
1471 * This might actually run the qemu handlers right away,
1472 * so virtio in qemu must be completely setup when this is called.
1474 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1476 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1477 int i, r;
1479 for (i = 0; i < hdev->nvqs; ++i) {
1480 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1481 false);
1482 if (r < 0) {
1483 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1485 assert (r >= 0);
1486 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1488 virtio_device_release_ioeventfd(vdev);
1491 /* Test and clear event pending status.
1492 * Should be called after unmask to avoid losing events.
1494 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1496 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1497 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1498 return event_notifier_test_and_clear(&vq->masked_notifier);
1501 /* Mask/unmask events from this vq. */
1502 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1503 bool mask)
1505 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1506 int r, index = n - hdev->vq_index;
1507 struct vhost_vring_file file;
1509 /* should only be called after backend is connected */
1510 assert(hdev->vhost_ops);
1512 if (mask) {
1513 assert(vdev->use_guest_notifier_mask);
1514 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1515 } else {
1516 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1519 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1520 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1521 if (r < 0) {
1522 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1526 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1527 uint64_t features)
1529 const int *bit = feature_bits;
1530 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1531 uint64_t bit_mask = (1ULL << *bit);
1532 if (!(hdev->features & bit_mask)) {
1533 features &= ~bit_mask;
1535 bit++;
1537 return features;
1540 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1541 uint64_t features)
1543 const int *bit = feature_bits;
1544 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1545 uint64_t bit_mask = (1ULL << *bit);
1546 if (features & bit_mask) {
1547 hdev->acked_features |= bit_mask;
1549 bit++;
1553 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1554 uint32_t config_len)
1556 assert(hdev->vhost_ops);
1558 if (hdev->vhost_ops->vhost_get_config) {
1559 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1562 return -1;
1565 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1566 uint32_t offset, uint32_t size, uint32_t flags)
1568 assert(hdev->vhost_ops);
1570 if (hdev->vhost_ops->vhost_set_config) {
1571 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1572 size, flags);
1575 return -1;
1578 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1579 const VhostDevConfigOps *ops)
1581 hdev->config_ops = ops;
1584 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1586 if (inflight && inflight->addr) {
1587 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1588 inflight->addr = NULL;
1589 inflight->fd = -1;
1593 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1594 uint64_t new_size)
1596 Error *err = NULL;
1597 int fd = -1;
1598 void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1599 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1600 &fd, &err);
1602 if (err) {
1603 error_report_err(err);
1604 return -1;
1607 vhost_dev_free_inflight(inflight);
1608 inflight->offset = 0;
1609 inflight->addr = addr;
1610 inflight->fd = fd;
1611 inflight->size = new_size;
1613 return 0;
1616 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1618 if (inflight->addr) {
1619 qemu_put_be64(f, inflight->size);
1620 qemu_put_be16(f, inflight->queue_size);
1621 qemu_put_buffer(f, inflight->addr, inflight->size);
1622 } else {
1623 qemu_put_be64(f, 0);
1627 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1629 uint64_t size;
1631 size = qemu_get_be64(f);
1632 if (!size) {
1633 return 0;
1636 if (inflight->size != size) {
1637 if (vhost_dev_resize_inflight(inflight, size)) {
1638 return -1;
1641 inflight->queue_size = qemu_get_be16(f);
1643 qemu_get_buffer(f, inflight->addr, size);
1645 return 0;
1648 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
1650 int r;
1652 if (hdev->vhost_ops->vhost_get_inflight_fd == NULL ||
1653 hdev->vhost_ops->vhost_set_inflight_fd == NULL) {
1654 return 0;
1657 hdev->vdev = vdev;
1659 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1660 if (r < 0) {
1661 VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed");
1662 return r;
1665 return 0;
1668 int vhost_dev_set_inflight(struct vhost_dev *dev,
1669 struct vhost_inflight *inflight)
1671 int r;
1673 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1674 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1675 if (r) {
1676 VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
1677 return -errno;
1681 return 0;
1684 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1685 struct vhost_inflight *inflight)
1687 int r;
1689 if (dev->vhost_ops->vhost_get_inflight_fd) {
1690 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1691 if (r) {
1692 VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
1693 return -errno;
1697 return 0;
1700 /* Host notifiers must be enabled at this point. */
1701 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1703 int i, r;
1705 /* should only be called after backend is connected */
1706 assert(hdev->vhost_ops);
1708 hdev->started = true;
1709 hdev->vdev = vdev;
1711 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1712 if (r < 0) {
1713 goto fail_features;
1716 if (vhost_dev_has_iommu(hdev)) {
1717 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1720 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1721 if (r < 0) {
1722 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1723 r = -errno;
1724 goto fail_mem;
1726 for (i = 0; i < hdev->nvqs; ++i) {
1727 r = vhost_virtqueue_start(hdev,
1728 vdev,
1729 hdev->vqs + i,
1730 hdev->vq_index + i);
1731 if (r < 0) {
1732 goto fail_vq;
1736 if (hdev->log_enabled) {
1737 uint64_t log_base;
1739 hdev->log_size = vhost_get_log_size(hdev);
1740 hdev->log = vhost_log_get(hdev->log_size,
1741 vhost_dev_log_is_shared(hdev));
1742 log_base = (uintptr_t)hdev->log->log;
1743 r = hdev->vhost_ops->vhost_set_log_base(hdev,
1744 hdev->log_size ? log_base : 0,
1745 hdev->log);
1746 if (r < 0) {
1747 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1748 r = -errno;
1749 goto fail_log;
1752 if (hdev->vhost_ops->vhost_dev_start) {
1753 r = hdev->vhost_ops->vhost_dev_start(hdev, true);
1754 if (r) {
1755 goto fail_log;
1758 if (vhost_dev_has_iommu(hdev) &&
1759 hdev->vhost_ops->vhost_set_iotlb_callback) {
1760 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1762 /* Update used ring information for IOTLB to work correctly,
1763 * vhost-kernel code requires for this.*/
1764 for (i = 0; i < hdev->nvqs; ++i) {
1765 struct vhost_virtqueue *vq = hdev->vqs + i;
1766 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1769 return 0;
1770 fail_log:
1771 vhost_log_put(hdev, false);
1772 fail_vq:
1773 while (--i >= 0) {
1774 vhost_virtqueue_stop(hdev,
1775 vdev,
1776 hdev->vqs + i,
1777 hdev->vq_index + i);
1780 fail_mem:
1781 fail_features:
1783 hdev->started = false;
1784 return r;
1787 /* Host notifiers must be enabled at this point. */
1788 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1790 int i;
1792 /* should only be called after backend is connected */
1793 assert(hdev->vhost_ops);
1795 if (hdev->vhost_ops->vhost_dev_start) {
1796 hdev->vhost_ops->vhost_dev_start(hdev, false);
1798 for (i = 0; i < hdev->nvqs; ++i) {
1799 vhost_virtqueue_stop(hdev,
1800 vdev,
1801 hdev->vqs + i,
1802 hdev->vq_index + i);
1805 if (vhost_dev_has_iommu(hdev)) {
1806 if (hdev->vhost_ops->vhost_set_iotlb_callback) {
1807 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1809 memory_listener_unregister(&hdev->iommu_listener);
1811 vhost_log_put(hdev, true);
1812 hdev->started = false;
1813 hdev->vdev = NULL;
1816 int vhost_net_set_backend(struct vhost_dev *hdev,
1817 struct vhost_vring_file *file)
1819 if (hdev->vhost_ops->vhost_net_set_backend) {
1820 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1823 return -1;