4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38 strerror(errno), errno); } while (0)
40 #define VHOST_OPS_DEBUG(fmt, ...) \
44 static struct vhost_log
*vhost_log
;
45 static struct vhost_log
*vhost_log_shm
;
47 static unsigned int used_memslots
;
48 static QLIST_HEAD(, vhost_dev
) vhost_devices
=
49 QLIST_HEAD_INITIALIZER(vhost_devices
);
51 bool vhost_has_free_slot(void)
53 unsigned int slots_limit
= ~0U;
54 struct vhost_dev
*hdev
;
56 QLIST_FOREACH(hdev
, &vhost_devices
, entry
) {
57 unsigned int r
= hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
);
58 slots_limit
= MIN(slots_limit
, r
);
60 return slots_limit
> used_memslots
;
63 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
64 MemoryRegionSection
*section
,
65 uint64_t mfirst
, uint64_t mlast
,
66 uint64_t rfirst
, uint64_t rlast
)
68 vhost_log_chunk_t
*log
= dev
->log
->log
;
70 uint64_t start
= MAX(mfirst
, rfirst
);
71 uint64_t end
= MIN(mlast
, rlast
);
72 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
73 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
74 uint64_t addr
= QEMU_ALIGN_DOWN(start
, VHOST_LOG_CHUNK
);
79 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
80 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
82 for (;from
< to
; ++from
) {
83 vhost_log_chunk_t log
;
84 /* We first check with non-atomic: much cheaper,
85 * and we expect non-dirty to be the common case. */
87 addr
+= VHOST_LOG_CHUNK
;
90 /* Data must be read atomically. We don't really need barrier semantics
91 * but it's easier to use atomic_* than roll our own. */
92 log
= atomic_xchg(from
, 0);
96 hwaddr section_offset
;
98 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
99 section_offset
= page_addr
- section
->offset_within_address_space
;
100 mr_offset
= section_offset
+ section
->offset_within_region
;
101 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
102 log
&= ~(0x1ull
<< bit
);
104 addr
+= VHOST_LOG_CHUNK
;
108 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
109 MemoryRegionSection
*section
,
117 if (!dev
->log_enabled
|| !dev
->started
) {
120 start_addr
= section
->offset_within_address_space
;
121 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
122 start_addr
= MAX(first
, start_addr
);
123 end_addr
= MIN(last
, end_addr
);
125 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
126 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
127 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
128 reg
->guest_phys_addr
,
129 range_get_last(reg
->guest_phys_addr
,
132 for (i
= 0; i
< dev
->nvqs
; ++i
) {
133 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
134 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
135 range_get_last(vq
->used_phys
, vq
->used_size
));
140 static void vhost_log_sync(MemoryListener
*listener
,
141 MemoryRegionSection
*section
)
143 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
145 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
148 static void vhost_log_sync_range(struct vhost_dev
*dev
,
149 hwaddr first
, hwaddr last
)
152 /* FIXME: this is N^2 in number of sections */
153 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
154 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
155 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
159 /* Assign/unassign. Keep an unsorted array of non-overlapping
160 * memory regions in dev->mem. */
161 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
165 int from
, to
, n
= dev
->mem
->nregions
;
166 /* Track overlapping/split regions for sanity checking. */
167 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
169 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
170 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
175 /* clone old region */
177 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
180 /* No overlap is simple */
181 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
186 /* Split only happens if supplied region
187 * is in the middle of an existing one. Thus it can not
188 * overlap with any other existing region. */
191 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
192 memlast
= range_get_last(start_addr
, size
);
194 /* Remove whole region */
195 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
196 --dev
->mem
->nregions
;
203 if (memlast
>= reglast
) {
204 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
205 assert(reg
->memory_size
);
206 assert(!overlap_end
);
212 if (start_addr
<= reg
->guest_phys_addr
) {
213 change
= memlast
+ 1 - reg
->guest_phys_addr
;
214 reg
->memory_size
-= change
;
215 reg
->guest_phys_addr
+= change
;
216 reg
->userspace_addr
+= change
;
217 assert(reg
->memory_size
);
218 assert(!overlap_start
);
223 /* This only happens if supplied region
224 * is in the middle of an existing one. Thus it can not
225 * overlap with any other existing region. */
226 assert(!overlap_start
);
227 assert(!overlap_end
);
228 assert(!overlap_middle
);
229 /* Split region: shrink first part, shift second part. */
230 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
231 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
232 assert(reg
->memory_size
);
233 change
= memlast
+ 1 - reg
->guest_phys_addr
;
234 reg
= dev
->mem
->regions
+ n
;
235 reg
->memory_size
-= change
;
236 assert(reg
->memory_size
);
237 reg
->guest_phys_addr
+= change
;
238 reg
->userspace_addr
+= change
;
239 /* Never add more than 1 region */
240 assert(dev
->mem
->nregions
== n
);
241 ++dev
->mem
->nregions
;
246 /* Called after unassign, so no regions overlap the given range. */
247 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
253 struct vhost_memory_region
*merged
= NULL
;
254 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
255 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
256 uint64_t prlast
, urlast
;
257 uint64_t pmlast
, umlast
;
260 /* clone old region */
262 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
264 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
265 pmlast
= range_get_last(start_addr
, size
);
266 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
267 umlast
= range_get_last(uaddr
, size
);
269 /* check for overlapping regions: should never happen. */
270 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
271 /* Not an adjacent or overlapping region - do not merge. */
272 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
273 (pmlast
+ 1 != reg
->guest_phys_addr
||
274 umlast
+ 1 != reg
->userspace_addr
)) {
278 if (dev
->vhost_ops
->vhost_backend_can_merge
&&
279 !dev
->vhost_ops
->vhost_backend_can_merge(dev
, uaddr
, size
,
291 u
= MIN(uaddr
, reg
->userspace_addr
);
292 s
= MIN(start_addr
, reg
->guest_phys_addr
);
293 e
= MAX(pmlast
, prlast
);
294 uaddr
= merged
->userspace_addr
= u
;
295 start_addr
= merged
->guest_phys_addr
= s
;
296 size
= merged
->memory_size
= e
- s
+ 1;
297 assert(merged
->memory_size
);
301 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
302 memset(reg
, 0, sizeof *reg
);
303 reg
->memory_size
= size
;
304 assert(reg
->memory_size
);
305 reg
->guest_phys_addr
= start_addr
;
306 reg
->userspace_addr
= uaddr
;
309 assert(to
<= dev
->mem
->nregions
+ 1);
310 dev
->mem
->nregions
= to
;
313 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
315 uint64_t log_size
= 0;
317 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
318 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
319 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
321 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
323 for (i
= 0; i
< dev
->nvqs
; ++i
) {
324 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
325 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
326 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
331 static struct vhost_log
*vhost_log_alloc(uint64_t size
, bool share
)
333 struct vhost_log
*log
;
334 uint64_t logsize
= size
* sizeof(*(log
->log
));
337 log
= g_new0(struct vhost_log
, 1);
339 log
->log
= qemu_memfd_alloc("vhost-log", logsize
,
340 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
342 memset(log
->log
, 0, logsize
);
344 log
->log
= g_malloc0(logsize
);
354 static struct vhost_log
*vhost_log_get(uint64_t size
, bool share
)
356 struct vhost_log
*log
= share
? vhost_log_shm
: vhost_log
;
358 if (!log
|| log
->size
!= size
) {
359 log
= vhost_log_alloc(size
, share
);
372 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
374 struct vhost_log
*log
= dev
->log
;
381 if (log
->refcnt
== 0) {
382 /* Sync only the range covered by the old log */
383 if (dev
->log_size
&& sync
) {
384 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
387 if (vhost_log
== log
) {
390 } else if (vhost_log_shm
== log
) {
391 qemu_memfd_free(log
->log
, log
->size
* sizeof(*(log
->log
)),
393 vhost_log_shm
= NULL
;
403 static bool vhost_dev_log_is_shared(struct vhost_dev
*dev
)
405 return dev
->vhost_ops
->vhost_requires_shm_log
&&
406 dev
->vhost_ops
->vhost_requires_shm_log(dev
);
409 static inline void vhost_dev_log_resize(struct vhost_dev
*dev
, uint64_t size
)
411 struct vhost_log
*log
= vhost_log_get(size
, vhost_dev_log_is_shared(dev
));
412 uint64_t log_base
= (uintptr_t)log
->log
;
415 /* inform backend of log switching, this must be done before
416 releasing the current log, to ensure no logging is lost */
417 r
= dev
->vhost_ops
->vhost_set_log_base(dev
, log_base
, log
);
419 VHOST_OPS_DEBUG("vhost_set_log_base failed");
422 vhost_log_put(dev
, true);
424 dev
->log_size
= size
;
427 static int vhost_dev_has_iommu(struct vhost_dev
*dev
)
429 VirtIODevice
*vdev
= dev
->vdev
;
431 return virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
);
434 static void *vhost_memory_map(struct vhost_dev
*dev
, hwaddr addr
,
435 hwaddr
*plen
, int is_write
)
437 if (!vhost_dev_has_iommu(dev
)) {
438 return cpu_physical_memory_map(addr
, plen
, is_write
);
440 return (void *)(uintptr_t)addr
;
444 static void vhost_memory_unmap(struct vhost_dev
*dev
, void *buffer
,
445 hwaddr len
, int is_write
,
448 if (!vhost_dev_has_iommu(dev
)) {
449 cpu_physical_memory_unmap(buffer
, len
, is_write
, access_len
);
453 static int vhost_verify_ring_part_mapping(struct vhost_dev
*dev
,
464 if (!ranges_overlap(start_addr
, size
, part_addr
, part_size
)) {
468 p
= vhost_memory_map(dev
, part_addr
, &l
, 1);
469 if (!p
|| l
!= part_size
) {
475 vhost_memory_unmap(dev
, p
, l
, 0, 0);
479 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
485 const char *part_name
[] = {
491 for (i
= 0; i
< dev
->nvqs
; ++i
) {
492 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
495 r
= vhost_verify_ring_part_mapping(dev
, vq
->desc
, vq
->desc_phys
,
496 vq
->desc_size
, start_addr
, size
);
502 r
= vhost_verify_ring_part_mapping(dev
, vq
->avail
, vq
->avail_phys
,
503 vq
->avail_size
, start_addr
, size
);
509 r
= vhost_verify_ring_part_mapping(dev
, vq
->used
, vq
->used_phys
,
510 vq
->used_size
, start_addr
, size
);
517 error_report("Unable to map %s for ring %d", part_name
[j
], i
);
518 } else if (r
== -EBUSY
) {
519 error_report("%s relocated for ring %d", part_name
[j
], i
);
524 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
528 int i
, n
= dev
->mem
->nregions
;
529 for (i
= 0; i
< n
; ++i
) {
530 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
531 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
539 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
544 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
552 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
553 memlast
= range_get_last(start_addr
, size
);
555 /* Need to extend region? */
556 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
559 /* userspace_addr changed? */
560 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
563 static void vhost_set_memory(MemoryListener
*listener
,
564 MemoryRegionSection
*section
,
567 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
569 hwaddr start_addr
= section
->offset_within_address_space
;
570 ram_addr_t size
= int128_get64(section
->size
);
572 memory_region_get_dirty_log_mask(section
->mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
573 int s
= offsetof(struct vhost_memory
, regions
) +
574 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
577 dev
->mem
= g_realloc(dev
->mem
, s
);
585 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
586 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
588 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
589 /* Region exists with same address. Nothing to do. */
593 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
594 /* Removing region that we don't access. Nothing to do. */
599 vhost_dev_unassign_memory(dev
, start_addr
, size
);
601 /* Add given mapping, merging adjacent regions if any */
602 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
604 /* Remove old mapping for this memory, if any. */
605 vhost_dev_unassign_memory(dev
, start_addr
, size
);
607 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
608 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
609 dev
->memory_changed
= true;
610 used_memslots
= dev
->mem
->nregions
;
613 static bool vhost_section(MemoryRegionSection
*section
)
615 return memory_region_is_ram(section
->mr
) &&
616 !memory_region_is_rom(section
->mr
);
619 static void vhost_begin(MemoryListener
*listener
)
621 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
623 dev
->mem_changed_end_addr
= 0;
624 dev
->mem_changed_start_addr
= -1;
627 static void vhost_commit(MemoryListener
*listener
)
629 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
631 hwaddr start_addr
= 0;
636 if (!dev
->memory_changed
) {
642 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
647 start_addr
= dev
->mem_changed_start_addr
;
648 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
650 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
654 if (!dev
->log_enabled
) {
655 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
657 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
659 dev
->memory_changed
= false;
662 log_size
= vhost_get_log_size(dev
);
663 /* We allocate an extra 4K bytes to log,
664 * to reduce the * number of reallocations. */
665 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
666 /* To log more, must increase log size before table update. */
667 if (dev
->log_size
< log_size
) {
668 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
670 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
672 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
674 /* To log less, can only decrease log size after table update. */
675 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
676 vhost_dev_log_resize(dev
, log_size
);
678 dev
->memory_changed
= false;
681 static void vhost_region_add(MemoryListener
*listener
,
682 MemoryRegionSection
*section
)
684 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
687 if (!vhost_section(section
)) {
691 trace_vhost_region_add(dev
, section
->mr
->name
?: NULL
);
692 ++dev
->n_mem_sections
;
693 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
694 dev
->n_mem_sections
);
695 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
696 memory_region_ref(section
->mr
);
697 vhost_set_memory(listener
, section
, true);
700 static void vhost_region_del(MemoryListener
*listener
,
701 MemoryRegionSection
*section
)
703 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
707 if (!vhost_section(section
)) {
711 trace_vhost_region_del(dev
, section
->mr
->name
?: NULL
);
712 vhost_set_memory(listener
, section
, false);
713 memory_region_unref(section
->mr
);
714 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
715 if (dev
->mem_sections
[i
].offset_within_address_space
716 == section
->offset_within_address_space
) {
717 --dev
->n_mem_sections
;
718 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
719 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
725 static void vhost_iommu_unmap_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
727 struct vhost_iommu
*iommu
= container_of(n
, struct vhost_iommu
, n
);
728 struct vhost_dev
*hdev
= iommu
->hdev
;
729 hwaddr iova
= iotlb
->iova
+ iommu
->iommu_offset
;
731 if (vhost_backend_invalidate_device_iotlb(hdev
, iova
,
732 iotlb
->addr_mask
+ 1)) {
733 error_report("Fail to invalidate device iotlb");
737 static void vhost_iommu_region_add(MemoryListener
*listener
,
738 MemoryRegionSection
*section
)
740 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
742 struct vhost_iommu
*iommu
;
745 if (!memory_region_is_iommu(section
->mr
)) {
749 trace_vhost_iommu_region_add(dev
, section
->mr
->name
?: NULL
);
751 iommu
= g_malloc0(sizeof(*iommu
));
752 end
= int128_add(int128_make64(section
->offset_within_region
),
754 end
= int128_sub(end
, int128_one());
755 iommu_notifier_init(&iommu
->n
, vhost_iommu_unmap_notify
,
756 IOMMU_NOTIFIER_UNMAP
,
757 section
->offset_within_region
,
759 iommu
->mr
= section
->mr
;
760 iommu
->iommu_offset
= section
->offset_within_address_space
-
761 section
->offset_within_region
;
763 memory_region_register_iommu_notifier(section
->mr
, &iommu
->n
);
764 QLIST_INSERT_HEAD(&dev
->iommu_list
, iommu
, iommu_next
);
765 /* TODO: can replay help performance here? */
768 static void vhost_iommu_region_del(MemoryListener
*listener
,
769 MemoryRegionSection
*section
)
771 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
773 struct vhost_iommu
*iommu
;
775 if (!memory_region_is_iommu(section
->mr
)) {
779 trace_vhost_iommu_region_del(dev
, section
->mr
->name
?: NULL
);
781 QLIST_FOREACH(iommu
, &dev
->iommu_list
, iommu_next
) {
782 if (iommu
->mr
== section
->mr
&&
783 iommu
->n
.start
== section
->offset_within_region
) {
784 memory_region_unregister_iommu_notifier(iommu
->mr
,
786 QLIST_REMOVE(iommu
, iommu_next
);
793 static void vhost_region_nop(MemoryListener
*listener
,
794 MemoryRegionSection
*section
)
798 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
799 struct vhost_virtqueue
*vq
,
800 unsigned idx
, bool enable_log
)
802 struct vhost_vring_addr addr
= {
804 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
805 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
806 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
807 .log_guest_addr
= vq
->used_phys
,
808 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
810 int r
= dev
->vhost_ops
->vhost_set_vring_addr(dev
, &addr
);
812 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
818 static int vhost_dev_set_features(struct vhost_dev
*dev
,
821 uint64_t features
= dev
->acked_features
;
824 features
|= 0x1ULL
<< VHOST_F_LOG_ALL
;
826 r
= dev
->vhost_ops
->vhost_set_features(dev
, features
);
828 VHOST_OPS_DEBUG("vhost_set_features failed");
830 return r
< 0 ? -errno
: 0;
833 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
836 r
= vhost_dev_set_features(dev
, enable_log
);
840 for (i
= 0; i
< dev
->nvqs
; ++i
) {
841 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
842 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
850 for (; i
>= 0; --i
) {
851 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
852 vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
855 vhost_dev_set_features(dev
, dev
->log_enabled
);
860 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
862 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
865 if (!!enable
== dev
->log_enabled
) {
869 dev
->log_enabled
= enable
;
873 r
= vhost_dev_set_log(dev
, false);
877 vhost_log_put(dev
, false);
879 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
880 r
= vhost_dev_set_log(dev
, true);
885 dev
->log_enabled
= enable
;
889 static void vhost_log_global_start(MemoryListener
*listener
)
893 r
= vhost_migration_log(listener
, true);
899 static void vhost_log_global_stop(MemoryListener
*listener
)
903 r
= vhost_migration_log(listener
, false);
909 static void vhost_log_start(MemoryListener
*listener
,
910 MemoryRegionSection
*section
,
913 /* FIXME: implement */
916 static void vhost_log_stop(MemoryListener
*listener
,
917 MemoryRegionSection
*section
,
920 /* FIXME: implement */
923 /* The vhost driver natively knows how to handle the vrings of non
924 * cross-endian legacy devices and modern devices. Only legacy devices
925 * exposed to a bi-endian guest may require the vhost driver to use a
926 * specific endianness.
928 static inline bool vhost_needs_vring_endian(VirtIODevice
*vdev
)
930 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
933 #ifdef HOST_WORDS_BIGENDIAN
934 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_LITTLE
;
936 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_BIG
;
940 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev
*dev
,
944 struct vhost_vring_state s
= {
945 .index
= vhost_vq_index
,
949 if (!dev
->vhost_ops
->vhost_set_vring_endian(dev
, &s
)) {
953 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
954 if (errno
== ENOTTY
) {
955 error_report("vhost does not support cross-endian");
962 static int vhost_memory_region_lookup(struct vhost_dev
*hdev
,
963 uint64_t gpa
, uint64_t *uaddr
,
968 for (i
= 0; i
< hdev
->mem
->nregions
; i
++) {
969 struct vhost_memory_region
*reg
= hdev
->mem
->regions
+ i
;
971 if (gpa
>= reg
->guest_phys_addr
&&
972 reg
->guest_phys_addr
+ reg
->memory_size
> gpa
) {
973 *uaddr
= reg
->userspace_addr
+ gpa
- reg
->guest_phys_addr
;
974 *len
= reg
->guest_phys_addr
+ reg
->memory_size
- gpa
;
982 int vhost_device_iotlb_miss(struct vhost_dev
*dev
, uint64_t iova
, int write
)
990 iotlb
= address_space_get_iotlb_entry(dev
->vdev
->dma_as
,
992 if (iotlb
.target_as
!= NULL
) {
993 ret
= vhost_memory_region_lookup(dev
, iotlb
.translated_addr
,
996 error_report("Fail to lookup the translated address "
997 "%"PRIx64
, iotlb
.translated_addr
);
1001 len
= MIN(iotlb
.addr_mask
+ 1, len
);
1002 iova
= iova
& ~iotlb
.addr_mask
;
1004 ret
= vhost_backend_update_device_iotlb(dev
, iova
, uaddr
,
1007 error_report("Fail to update device iotlb");
1017 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
1018 struct VirtIODevice
*vdev
,
1019 struct vhost_virtqueue
*vq
,
1022 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1023 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1024 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1027 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
1028 struct vhost_vring_file file
= {
1029 .index
= vhost_vq_index
1031 struct vhost_vring_state state
= {
1032 .index
= vhost_vq_index
1034 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
1037 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
1038 r
= dev
->vhost_ops
->vhost_set_vring_num(dev
, &state
);
1040 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1044 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
1045 r
= dev
->vhost_ops
->vhost_set_vring_base(dev
, &state
);
1047 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1051 if (vhost_needs_vring_endian(vdev
)) {
1052 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
1053 virtio_is_big_endian(vdev
),
1060 vq
->desc_size
= s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
1061 vq
->desc_phys
= a
= virtio_queue_get_desc_addr(vdev
, idx
);
1062 vq
->desc
= vhost_memory_map(dev
, a
, &l
, 0);
1063 if (!vq
->desc
|| l
!= s
) {
1065 goto fail_alloc_desc
;
1067 vq
->avail_size
= s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
1068 vq
->avail_phys
= a
= virtio_queue_get_avail_addr(vdev
, idx
);
1069 vq
->avail
= vhost_memory_map(dev
, a
, &l
, 0);
1070 if (!vq
->avail
|| l
!= s
) {
1072 goto fail_alloc_avail
;
1074 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
1075 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
1076 vq
->used
= vhost_memory_map(dev
, a
, &l
, 1);
1077 if (!vq
->used
|| l
!= s
) {
1079 goto fail_alloc_used
;
1082 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
1088 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
1089 r
= dev
->vhost_ops
->vhost_set_vring_kick(dev
, &file
);
1091 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1096 /* Clear and discard previous events if any. */
1097 event_notifier_test_and_clear(&vq
->masked_notifier
);
1099 /* Init vring in unmasked state, unless guest_notifier_mask
1102 if (!vdev
->use_guest_notifier_mask
) {
1103 /* TODO: check and handle errors. */
1104 vhost_virtqueue_mask(dev
, vdev
, idx
, false);
1107 if (k
->query_guest_notifiers
&&
1108 k
->query_guest_notifiers(qbus
->parent
) &&
1109 virtio_queue_vector(vdev
, idx
) == VIRTIO_NO_VECTOR
) {
1111 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
1122 vhost_memory_unmap(dev
, vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
1125 vhost_memory_unmap(dev
, vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
1128 vhost_memory_unmap(dev
, vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
1134 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
1135 struct VirtIODevice
*vdev
,
1136 struct vhost_virtqueue
*vq
,
1139 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
1140 struct vhost_vring_state state
= {
1141 .index
= vhost_vq_index
,
1145 r
= dev
->vhost_ops
->vhost_get_vring_base(dev
, &state
);
1147 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx
, r
);
1148 /* Connection to the backend is broken, so let's sync internal
1149 * last avail idx to the device used idx.
1151 virtio_queue_restore_last_avail_idx(vdev
, idx
);
1153 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
1155 virtio_queue_invalidate_signalled_used(vdev
, idx
);
1156 virtio_queue_update_used_idx(vdev
, idx
);
1158 /* In the cross-endian case, we need to reset the vring endianness to
1159 * native as legacy devices expect so by default.
1161 if (vhost_needs_vring_endian(vdev
)) {
1162 vhost_virtqueue_set_vring_endian_legacy(dev
,
1163 !virtio_is_big_endian(vdev
),
1167 vhost_memory_unmap(dev
, vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
1168 1, virtio_queue_get_used_size(vdev
, idx
));
1169 vhost_memory_unmap(dev
, vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
1170 0, virtio_queue_get_avail_size(vdev
, idx
));
1171 vhost_memory_unmap(dev
, vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
1172 0, virtio_queue_get_desc_size(vdev
, idx
));
1175 static void vhost_eventfd_add(MemoryListener
*listener
,
1176 MemoryRegionSection
*section
,
1177 bool match_data
, uint64_t data
, EventNotifier
*e
)
1181 static void vhost_eventfd_del(MemoryListener
*listener
,
1182 MemoryRegionSection
*section
,
1183 bool match_data
, uint64_t data
, EventNotifier
*e
)
1187 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev
*dev
,
1188 int n
, uint32_t timeout
)
1190 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1191 struct vhost_vring_state state
= {
1192 .index
= vhost_vq_index
,
1197 if (!dev
->vhost_ops
->vhost_set_vring_busyloop_timeout
) {
1201 r
= dev
->vhost_ops
->vhost_set_vring_busyloop_timeout(dev
, &state
);
1203 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1210 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
1211 struct vhost_virtqueue
*vq
, int n
)
1213 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1214 struct vhost_vring_file file
= {
1215 .index
= vhost_vq_index
,
1217 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
1222 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
1223 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
1225 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1234 event_notifier_cleanup(&vq
->masked_notifier
);
1238 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
1240 event_notifier_cleanup(&vq
->masked_notifier
);
1243 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
1244 VhostBackendType backend_type
, uint32_t busyloop_timeout
)
1247 int i
, r
, n_initialized_vqs
= 0;
1248 Error
*local_err
= NULL
;
1251 hdev
->migration_blocker
= NULL
;
1253 r
= vhost_set_backend_type(hdev
, backend_type
);
1256 r
= hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
);
1261 if (used_memslots
> hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
)) {
1262 error_report("vhost backend memory slots limit is less"
1263 " than current number of present memory slots");
1268 r
= hdev
->vhost_ops
->vhost_set_owner(hdev
);
1270 VHOST_OPS_DEBUG("vhost_set_owner failed");
1274 r
= hdev
->vhost_ops
->vhost_get_features(hdev
, &features
);
1276 VHOST_OPS_DEBUG("vhost_get_features failed");
1280 for (i
= 0; i
< hdev
->nvqs
; ++i
, ++n_initialized_vqs
) {
1281 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, hdev
->vq_index
+ i
);
1287 if (busyloop_timeout
) {
1288 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1289 r
= vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
,
1297 hdev
->features
= features
;
1299 hdev
->memory_listener
= (MemoryListener
) {
1300 .begin
= vhost_begin
,
1301 .commit
= vhost_commit
,
1302 .region_add
= vhost_region_add
,
1303 .region_del
= vhost_region_del
,
1304 .region_nop
= vhost_region_nop
,
1305 .log_start
= vhost_log_start
,
1306 .log_stop
= vhost_log_stop
,
1307 .log_sync
= vhost_log_sync
,
1308 .log_global_start
= vhost_log_global_start
,
1309 .log_global_stop
= vhost_log_global_stop
,
1310 .eventfd_add
= vhost_eventfd_add
,
1311 .eventfd_del
= vhost_eventfd_del
,
1315 hdev
->iommu_listener
= (MemoryListener
) {
1316 .region_add
= vhost_iommu_region_add
,
1317 .region_del
= vhost_iommu_region_del
,
1320 if (hdev
->migration_blocker
== NULL
) {
1321 if (!(hdev
->features
& (0x1ULL
<< VHOST_F_LOG_ALL
))) {
1322 error_setg(&hdev
->migration_blocker
,
1323 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1324 } else if (vhost_dev_log_is_shared(hdev
) && !qemu_memfd_check()) {
1325 error_setg(&hdev
->migration_blocker
,
1326 "Migration disabled: failed to allocate shared memory");
1330 if (hdev
->migration_blocker
!= NULL
) {
1331 r
= migrate_add_blocker(hdev
->migration_blocker
, &local_err
);
1333 error_report_err(local_err
);
1334 error_free(hdev
->migration_blocker
);
1339 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
1340 hdev
->n_mem_sections
= 0;
1341 hdev
->mem_sections
= NULL
;
1344 hdev
->log_enabled
= false;
1345 hdev
->started
= false;
1346 hdev
->memory_changed
= false;
1347 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
1348 QLIST_INSERT_HEAD(&vhost_devices
, hdev
, entry
);
1353 vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
, 0);
1356 hdev
->nvqs
= n_initialized_vqs
;
1357 vhost_dev_cleanup(hdev
);
1361 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
1365 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1366 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1369 /* those are only safe after successful init */
1370 memory_listener_unregister(&hdev
->memory_listener
);
1371 QLIST_REMOVE(hdev
, entry
);
1373 if (hdev
->migration_blocker
) {
1374 migrate_del_blocker(hdev
->migration_blocker
);
1375 error_free(hdev
->migration_blocker
);
1378 g_free(hdev
->mem_sections
);
1379 if (hdev
->vhost_ops
) {
1380 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1384 memset(hdev
, 0, sizeof(struct vhost_dev
));
1387 /* Stop processing guest IO notifications in qemu.
1388 * Start processing them in vhost in kernel.
1390 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1392 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1395 /* We will pass the notifiers to the kernel, make sure that QEMU
1396 * doesn't interfere.
1398 r
= virtio_device_grab_ioeventfd(vdev
);
1400 error_report("binding does not support host notifiers");
1404 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1405 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1408 error_report("vhost VQ %d notifier binding failed: %d", i
, -r
);
1416 e
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1419 error_report("vhost VQ %d notifier cleanup error: %d", i
, -r
);
1423 virtio_device_release_ioeventfd(vdev
);
1428 /* Stop processing guest IO notifications in vhost.
1429 * Start processing them in qemu.
1430 * This might actually run the qemu handlers right away,
1431 * so virtio in qemu must be completely setup when this is called.
1433 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1435 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1438 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1439 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1442 error_report("vhost VQ %d notifier cleanup failed: %d", i
, -r
);
1446 virtio_device_release_ioeventfd(vdev
);
1449 /* Test and clear event pending status.
1450 * Should be called after unmask to avoid losing events.
1452 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1454 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1455 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1456 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1459 /* Mask/unmask events from this vq. */
1460 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1463 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1464 int r
, index
= n
- hdev
->vq_index
;
1465 struct vhost_vring_file file
;
1467 /* should only be called after backend is connected */
1468 assert(hdev
->vhost_ops
);
1471 assert(vdev
->use_guest_notifier_mask
);
1472 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1474 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1477 file
.index
= hdev
->vhost_ops
->vhost_get_vq_index(hdev
, n
);
1478 r
= hdev
->vhost_ops
->vhost_set_vring_call(hdev
, &file
);
1480 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1484 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1487 const int *bit
= feature_bits
;
1488 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1489 uint64_t bit_mask
= (1ULL << *bit
);
1490 if (!(hdev
->features
& bit_mask
)) {
1491 features
&= ~bit_mask
;
1498 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1501 const int *bit
= feature_bits
;
1502 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1503 uint64_t bit_mask
= (1ULL << *bit
);
1504 if (features
& bit_mask
) {
1505 hdev
->acked_features
|= bit_mask
;
1511 int vhost_dev_get_config(struct vhost_dev
*hdev
, uint8_t *config
,
1512 uint32_t config_len
)
1514 assert(hdev
->vhost_ops
);
1516 if (hdev
->vhost_ops
->vhost_get_config
) {
1517 return hdev
->vhost_ops
->vhost_get_config(hdev
, config
, config_len
);
1523 int vhost_dev_set_config(struct vhost_dev
*hdev
, const uint8_t *data
,
1524 uint32_t offset
, uint32_t size
, uint32_t flags
)
1526 assert(hdev
->vhost_ops
);
1528 if (hdev
->vhost_ops
->vhost_set_config
) {
1529 return hdev
->vhost_ops
->vhost_set_config(hdev
, data
, offset
,
1536 void vhost_dev_set_config_notifier(struct vhost_dev
*hdev
,
1537 const VhostDevConfigOps
*ops
)
1539 assert(hdev
->vhost_ops
);
1540 hdev
->config_ops
= ops
;
1543 /* Host notifiers must be enabled at this point. */
1544 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1548 /* should only be called after backend is connected */
1549 assert(hdev
->vhost_ops
);
1551 hdev
->started
= true;
1554 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1559 if (vhost_dev_has_iommu(hdev
)) {
1560 memory_listener_register(&hdev
->iommu_listener
, vdev
->dma_as
);
1563 r
= hdev
->vhost_ops
->vhost_set_mem_table(hdev
, hdev
->mem
);
1565 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1569 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1570 r
= vhost_virtqueue_start(hdev
,
1573 hdev
->vq_index
+ i
);
1579 if (hdev
->log_enabled
) {
1582 hdev
->log_size
= vhost_get_log_size(hdev
);
1583 hdev
->log
= vhost_log_get(hdev
->log_size
,
1584 vhost_dev_log_is_shared(hdev
));
1585 log_base
= (uintptr_t)hdev
->log
->log
;
1586 r
= hdev
->vhost_ops
->vhost_set_log_base(hdev
,
1587 hdev
->log_size
? log_base
: 0,
1590 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1596 if (vhost_dev_has_iommu(hdev
)) {
1597 hdev
->vhost_ops
->vhost_set_iotlb_callback(hdev
, true);
1599 /* Update used ring information for IOTLB to work correctly,
1600 * vhost-kernel code requires for this.*/
1601 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1602 struct vhost_virtqueue
*vq
= hdev
->vqs
+ i
;
1603 vhost_device_iotlb_miss(hdev
, vq
->used_phys
, true);
1608 vhost_log_put(hdev
, false);
1611 vhost_virtqueue_stop(hdev
,
1614 hdev
->vq_index
+ i
);
1621 hdev
->started
= false;
1625 /* Host notifiers must be enabled at this point. */
1626 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1630 /* should only be called after backend is connected */
1631 assert(hdev
->vhost_ops
);
1633 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1634 vhost_virtqueue_stop(hdev
,
1637 hdev
->vq_index
+ i
);
1640 if (vhost_dev_has_iommu(hdev
)) {
1641 hdev
->vhost_ops
->vhost_set_iotlb_callback(hdev
, false);
1642 memory_listener_unregister(&hdev
->iommu_listener
);
1644 vhost_log_put(hdev
, true);
1645 hdev
->started
= false;
1649 int vhost_net_set_backend(struct vhost_dev
*hdev
,
1650 struct vhost_vring_file
*file
)
1652 if (hdev
->vhost_ops
->vhost_net_set_backend
) {
1653 return hdev
->vhost_ops
->vhost_net_set_backend(hdev
, file
);