4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include <linux/vhost.h>
24 #include "exec/address-spaces.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27 #include "migration/migration.h"
29 static struct vhost_log
*vhost_log
;
30 static struct vhost_log
*vhost_log_shm
;
32 static unsigned int used_memslots
;
33 static QLIST_HEAD(, vhost_dev
) vhost_devices
=
34 QLIST_HEAD_INITIALIZER(vhost_devices
);
36 bool vhost_has_free_slot(void)
38 unsigned int slots_limit
= ~0U;
39 struct vhost_dev
*hdev
;
41 QLIST_FOREACH(hdev
, &vhost_devices
, entry
) {
42 unsigned int r
= hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
);
43 slots_limit
= MIN(slots_limit
, r
);
45 return slots_limit
> used_memslots
;
48 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
49 MemoryRegionSection
*section
,
50 uint64_t mfirst
, uint64_t mlast
,
51 uint64_t rfirst
, uint64_t rlast
)
53 vhost_log_chunk_t
*log
= dev
->log
->log
;
55 uint64_t start
= MAX(mfirst
, rfirst
);
56 uint64_t end
= MIN(mlast
, rlast
);
57 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
58 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
59 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
64 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
65 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
67 for (;from
< to
; ++from
) {
68 vhost_log_chunk_t log
;
69 /* We first check with non-atomic: much cheaper,
70 * and we expect non-dirty to be the common case. */
72 addr
+= VHOST_LOG_CHUNK
;
75 /* Data must be read atomically. We don't really need barrier semantics
76 * but it's easier to use atomic_* than roll our own. */
77 log
= atomic_xchg(from
, 0);
81 hwaddr section_offset
;
83 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
84 section_offset
= page_addr
- section
->offset_within_address_space
;
85 mr_offset
= section_offset
+ section
->offset_within_region
;
86 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
87 log
&= ~(0x1ull
<< bit
);
89 addr
+= VHOST_LOG_CHUNK
;
93 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
94 MemoryRegionSection
*section
,
102 if (!dev
->log_enabled
|| !dev
->started
) {
105 start_addr
= section
->offset_within_address_space
;
106 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
107 start_addr
= MAX(first
, start_addr
);
108 end_addr
= MIN(last
, end_addr
);
110 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
111 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
112 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
113 reg
->guest_phys_addr
,
114 range_get_last(reg
->guest_phys_addr
,
117 for (i
= 0; i
< dev
->nvqs
; ++i
) {
118 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
119 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
120 range_get_last(vq
->used_phys
, vq
->used_size
));
125 static void vhost_log_sync(MemoryListener
*listener
,
126 MemoryRegionSection
*section
)
128 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
130 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
133 static void vhost_log_sync_range(struct vhost_dev
*dev
,
134 hwaddr first
, hwaddr last
)
137 /* FIXME: this is N^2 in number of sections */
138 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
139 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
140 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
144 /* Assign/unassign. Keep an unsorted array of non-overlapping
145 * memory regions in dev->mem. */
146 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
150 int from
, to
, n
= dev
->mem
->nregions
;
151 /* Track overlapping/split regions for sanity checking. */
152 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
154 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
155 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
160 /* clone old region */
162 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
165 /* No overlap is simple */
166 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
171 /* Split only happens if supplied region
172 * is in the middle of an existing one. Thus it can not
173 * overlap with any other existing region. */
176 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
177 memlast
= range_get_last(start_addr
, size
);
179 /* Remove whole region */
180 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
181 --dev
->mem
->nregions
;
188 if (memlast
>= reglast
) {
189 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
190 assert(reg
->memory_size
);
191 assert(!overlap_end
);
197 if (start_addr
<= reg
->guest_phys_addr
) {
198 change
= memlast
+ 1 - reg
->guest_phys_addr
;
199 reg
->memory_size
-= change
;
200 reg
->guest_phys_addr
+= change
;
201 reg
->userspace_addr
+= change
;
202 assert(reg
->memory_size
);
203 assert(!overlap_start
);
208 /* This only happens if supplied region
209 * is in the middle of an existing one. Thus it can not
210 * overlap with any other existing region. */
211 assert(!overlap_start
);
212 assert(!overlap_end
);
213 assert(!overlap_middle
);
214 /* Split region: shrink first part, shift second part. */
215 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
216 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
217 assert(reg
->memory_size
);
218 change
= memlast
+ 1 - reg
->guest_phys_addr
;
219 reg
= dev
->mem
->regions
+ n
;
220 reg
->memory_size
-= change
;
221 assert(reg
->memory_size
);
222 reg
->guest_phys_addr
+= change
;
223 reg
->userspace_addr
+= change
;
224 /* Never add more than 1 region */
225 assert(dev
->mem
->nregions
== n
);
226 ++dev
->mem
->nregions
;
231 /* Called after unassign, so no regions overlap the given range. */
232 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
238 struct vhost_memory_region
*merged
= NULL
;
239 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
240 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
241 uint64_t prlast
, urlast
;
242 uint64_t pmlast
, umlast
;
245 /* clone old region */
247 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
249 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
250 pmlast
= range_get_last(start_addr
, size
);
251 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
252 umlast
= range_get_last(uaddr
, size
);
254 /* check for overlapping regions: should never happen. */
255 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
256 /* Not an adjacent or overlapping region - do not merge. */
257 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
258 (pmlast
+ 1 != reg
->guest_phys_addr
||
259 umlast
+ 1 != reg
->userspace_addr
)) {
263 if (dev
->vhost_ops
->vhost_backend_can_merge
&&
264 !dev
->vhost_ops
->vhost_backend_can_merge(dev
, uaddr
, size
,
276 u
= MIN(uaddr
, reg
->userspace_addr
);
277 s
= MIN(start_addr
, reg
->guest_phys_addr
);
278 e
= MAX(pmlast
, prlast
);
279 uaddr
= merged
->userspace_addr
= u
;
280 start_addr
= merged
->guest_phys_addr
= s
;
281 size
= merged
->memory_size
= e
- s
+ 1;
282 assert(merged
->memory_size
);
286 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
287 memset(reg
, 0, sizeof *reg
);
288 reg
->memory_size
= size
;
289 assert(reg
->memory_size
);
290 reg
->guest_phys_addr
= start_addr
;
291 reg
->userspace_addr
= uaddr
;
294 assert(to
<= dev
->mem
->nregions
+ 1);
295 dev
->mem
->nregions
= to
;
298 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
300 uint64_t log_size
= 0;
302 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
303 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
304 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
306 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
308 for (i
= 0; i
< dev
->nvqs
; ++i
) {
309 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
310 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
311 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
316 static struct vhost_log
*vhost_log_alloc(uint64_t size
, bool share
)
318 struct vhost_log
*log
;
319 uint64_t logsize
= size
* sizeof(*(log
->log
));
322 log
= g_new0(struct vhost_log
, 1);
324 log
->log
= qemu_memfd_alloc("vhost-log", logsize
,
325 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
327 memset(log
->log
, 0, logsize
);
329 log
->log
= g_malloc0(logsize
);
339 static struct vhost_log
*vhost_log_get(uint64_t size
, bool share
)
341 struct vhost_log
*log
= share
? vhost_log_shm
: vhost_log
;
343 if (!log
|| log
->size
!= size
) {
344 log
= vhost_log_alloc(size
, share
);
357 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
359 struct vhost_log
*log
= dev
->log
;
366 if (log
->refcnt
== 0) {
367 /* Sync only the range covered by the old log */
368 if (dev
->log_size
&& sync
) {
369 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
372 if (vhost_log
== log
) {
375 } else if (vhost_log_shm
== log
) {
376 qemu_memfd_free(log
->log
, log
->size
* sizeof(*(log
->log
)),
378 vhost_log_shm
= NULL
;
385 static bool vhost_dev_log_is_shared(struct vhost_dev
*dev
)
387 return dev
->vhost_ops
->vhost_requires_shm_log
&&
388 dev
->vhost_ops
->vhost_requires_shm_log(dev
);
391 static inline void vhost_dev_log_resize(struct vhost_dev
*dev
, uint64_t size
)
393 struct vhost_log
*log
= vhost_log_get(size
, vhost_dev_log_is_shared(dev
));
394 uint64_t log_base
= (uintptr_t)log
->log
;
397 /* inform backend of log switching, this must be done before
398 releasing the current log, to ensure no logging is lost */
399 r
= dev
->vhost_ops
->vhost_set_log_base(dev
, log_base
, log
);
401 vhost_log_put(dev
, true);
403 dev
->log_size
= size
;
406 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
413 for (i
= 0; !r
&& i
< dev
->nvqs
; ++i
) {
414 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
418 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
422 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
423 if (!p
|| l
!= vq
->ring_size
) {
424 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
428 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
431 cpu_physical_memory_unmap(p
, l
, 0, 0);
436 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
440 int i
, n
= dev
->mem
->nregions
;
441 for (i
= 0; i
< n
; ++i
) {
442 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
443 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
451 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
456 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
464 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
465 memlast
= range_get_last(start_addr
, size
);
467 /* Need to extend region? */
468 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
471 /* userspace_addr changed? */
472 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
475 static void vhost_set_memory(MemoryListener
*listener
,
476 MemoryRegionSection
*section
,
479 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
481 hwaddr start_addr
= section
->offset_within_address_space
;
482 ram_addr_t size
= int128_get64(section
->size
);
484 memory_region_get_dirty_log_mask(section
->mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
485 int s
= offsetof(struct vhost_memory
, regions
) +
486 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
489 dev
->mem
= g_realloc(dev
->mem
, s
);
497 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
498 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
500 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
501 /* Region exists with same address. Nothing to do. */
505 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
506 /* Removing region that we don't access. Nothing to do. */
511 vhost_dev_unassign_memory(dev
, start_addr
, size
);
513 /* Add given mapping, merging adjacent regions if any */
514 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
516 /* Remove old mapping for this memory, if any. */
517 vhost_dev_unassign_memory(dev
, start_addr
, size
);
519 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
520 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
521 dev
->memory_changed
= true;
522 used_memslots
= dev
->mem
->nregions
;
525 static bool vhost_section(MemoryRegionSection
*section
)
527 return memory_region_is_ram(section
->mr
);
530 static void vhost_begin(MemoryListener
*listener
)
532 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
534 dev
->mem_changed_end_addr
= 0;
535 dev
->mem_changed_start_addr
= -1;
538 static void vhost_commit(MemoryListener
*listener
)
540 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
542 hwaddr start_addr
= 0;
547 if (!dev
->memory_changed
) {
553 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
558 start_addr
= dev
->mem_changed_start_addr
;
559 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
561 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
565 if (!dev
->log_enabled
) {
566 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
568 dev
->memory_changed
= false;
571 log_size
= vhost_get_log_size(dev
);
572 /* We allocate an extra 4K bytes to log,
573 * to reduce the * number of reallocations. */
574 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
575 /* To log more, must increase log size before table update. */
576 if (dev
->log_size
< log_size
) {
577 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
579 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
581 /* To log less, can only decrease log size after table update. */
582 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
583 vhost_dev_log_resize(dev
, log_size
);
585 dev
->memory_changed
= false;
588 static void vhost_region_add(MemoryListener
*listener
,
589 MemoryRegionSection
*section
)
591 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
594 if (!vhost_section(section
)) {
598 ++dev
->n_mem_sections
;
599 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
600 dev
->n_mem_sections
);
601 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
602 memory_region_ref(section
->mr
);
603 vhost_set_memory(listener
, section
, true);
606 static void vhost_region_del(MemoryListener
*listener
,
607 MemoryRegionSection
*section
)
609 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
613 if (!vhost_section(section
)) {
617 vhost_set_memory(listener
, section
, false);
618 memory_region_unref(section
->mr
);
619 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
620 if (dev
->mem_sections
[i
].offset_within_address_space
621 == section
->offset_within_address_space
) {
622 --dev
->n_mem_sections
;
623 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
624 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
630 static void vhost_region_nop(MemoryListener
*listener
,
631 MemoryRegionSection
*section
)
635 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
636 struct vhost_virtqueue
*vq
,
637 unsigned idx
, bool enable_log
)
639 struct vhost_vring_addr addr
= {
641 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
642 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
643 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
644 .log_guest_addr
= vq
->used_phys
,
645 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
647 int r
= dev
->vhost_ops
->vhost_set_vring_addr(dev
, &addr
);
654 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
656 uint64_t features
= dev
->acked_features
;
659 features
|= 0x1ULL
<< VHOST_F_LOG_ALL
;
661 r
= dev
->vhost_ops
->vhost_set_features(dev
, features
);
662 return r
< 0 ? -errno
: 0;
665 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
668 r
= vhost_dev_set_features(dev
, enable_log
);
672 for (i
= 0; i
< dev
->nvqs
; ++i
) {
673 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
674 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
682 for (; i
>= 0; --i
) {
683 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
684 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
688 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
694 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
696 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
699 if (!!enable
== dev
->log_enabled
) {
703 dev
->log_enabled
= enable
;
707 r
= vhost_dev_set_log(dev
, false);
711 vhost_log_put(dev
, false);
715 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
716 r
= vhost_dev_set_log(dev
, true);
721 dev
->log_enabled
= enable
;
725 static void vhost_log_global_start(MemoryListener
*listener
)
729 r
= vhost_migration_log(listener
, true);
735 static void vhost_log_global_stop(MemoryListener
*listener
)
739 r
= vhost_migration_log(listener
, false);
745 static void vhost_log_start(MemoryListener
*listener
,
746 MemoryRegionSection
*section
,
749 /* FIXME: implement */
752 static void vhost_log_stop(MemoryListener
*listener
,
753 MemoryRegionSection
*section
,
756 /* FIXME: implement */
759 /* The vhost driver natively knows how to handle the vrings of non
760 * cross-endian legacy devices and modern devices. Only legacy devices
761 * exposed to a bi-endian guest may require the vhost driver to use a
762 * specific endianness.
764 static inline bool vhost_needs_vring_endian(VirtIODevice
*vdev
)
766 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
769 #ifdef TARGET_IS_BIENDIAN
770 #ifdef HOST_WORDS_BIGENDIAN
771 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_LITTLE
;
773 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_BIG
;
780 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev
*dev
,
784 struct vhost_vring_state s
= {
785 .index
= vhost_vq_index
,
789 if (!dev
->vhost_ops
->vhost_set_vring_endian(dev
, &s
)) {
793 if (errno
== ENOTTY
) {
794 error_report("vhost does not support cross-endian");
801 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
802 struct VirtIODevice
*vdev
,
803 struct vhost_virtqueue
*vq
,
808 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
809 struct vhost_vring_file file
= {
810 .index
= vhost_vq_index
812 struct vhost_vring_state state
= {
813 .index
= vhost_vq_index
815 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
818 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
819 r
= dev
->vhost_ops
->vhost_set_vring_num(dev
, &state
);
824 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
825 r
= dev
->vhost_ops
->vhost_set_vring_base(dev
, &state
);
830 if (vhost_needs_vring_endian(vdev
)) {
831 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
832 virtio_is_big_endian(vdev
),
839 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
840 a
= virtio_queue_get_desc_addr(vdev
, idx
);
841 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
842 if (!vq
->desc
|| l
!= s
) {
844 goto fail_alloc_desc
;
846 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
847 a
= virtio_queue_get_avail_addr(vdev
, idx
);
848 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
849 if (!vq
->avail
|| l
!= s
) {
851 goto fail_alloc_avail
;
853 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
854 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
855 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
856 if (!vq
->used
|| l
!= s
) {
858 goto fail_alloc_used
;
861 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
862 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
863 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
864 if (!vq
->ring
|| l
!= s
) {
866 goto fail_alloc_ring
;
869 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
875 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
876 r
= dev
->vhost_ops
->vhost_set_vring_kick(dev
, &file
);
882 /* Clear and discard previous events if any. */
883 event_notifier_test_and_clear(&vq
->masked_notifier
);
885 /* Init vring in unmasked state, unless guest_notifier_mask
888 if (!vdev
->use_guest_notifier_mask
) {
889 /* TODO: check and handle errors. */
890 vhost_virtqueue_mask(dev
, vdev
, idx
, false);
897 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
900 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
903 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
906 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
912 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
913 struct VirtIODevice
*vdev
,
914 struct vhost_virtqueue
*vq
,
917 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
918 struct vhost_vring_state state
= {
919 .index
= vhost_vq_index
,
923 r
= dev
->vhost_ops
->vhost_get_vring_base(dev
, &state
);
925 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
928 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
929 virtio_queue_invalidate_signalled_used(vdev
, idx
);
931 /* In the cross-endian case, we need to reset the vring endianness to
932 * native as legacy devices expect so by default.
934 if (vhost_needs_vring_endian(vdev
)) {
935 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
936 !virtio_is_big_endian(vdev
),
939 error_report("failed to reset vring endianness");
944 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
945 0, virtio_queue_get_ring_size(vdev
, idx
));
946 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
947 1, virtio_queue_get_used_size(vdev
, idx
));
948 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
949 0, virtio_queue_get_avail_size(vdev
, idx
));
950 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
951 0, virtio_queue_get_desc_size(vdev
, idx
));
954 static void vhost_eventfd_add(MemoryListener
*listener
,
955 MemoryRegionSection
*section
,
956 bool match_data
, uint64_t data
, EventNotifier
*e
)
960 static void vhost_eventfd_del(MemoryListener
*listener
,
961 MemoryRegionSection
*section
,
962 bool match_data
, uint64_t data
, EventNotifier
*e
)
966 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
967 struct vhost_virtqueue
*vq
, int n
)
969 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
970 struct vhost_vring_file file
= {
971 .index
= vhost_vq_index
,
973 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
978 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
979 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
986 event_notifier_cleanup(&vq
->masked_notifier
);
990 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
992 event_notifier_cleanup(&vq
->masked_notifier
);
995 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
996 VhostBackendType backend_type
)
1001 hdev
->migration_blocker
= NULL
;
1003 if (vhost_set_backend_type(hdev
, backend_type
) < 0) {
1004 close((uintptr_t)opaque
);
1008 if (hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
) < 0) {
1009 close((uintptr_t)opaque
);
1013 if (used_memslots
> hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
)) {
1014 fprintf(stderr
, "vhost backend memory slots limit is less"
1015 " than current number of present memory slots\n");
1016 close((uintptr_t)opaque
);
1019 QLIST_INSERT_HEAD(&vhost_devices
, hdev
, entry
);
1021 r
= hdev
->vhost_ops
->vhost_set_owner(hdev
);
1026 r
= hdev
->vhost_ops
->vhost_get_features(hdev
, &features
);
1031 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1032 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, hdev
->vq_index
+ i
);
1037 hdev
->features
= features
;
1039 hdev
->memory_listener
= (MemoryListener
) {
1040 .begin
= vhost_begin
,
1041 .commit
= vhost_commit
,
1042 .region_add
= vhost_region_add
,
1043 .region_del
= vhost_region_del
,
1044 .region_nop
= vhost_region_nop
,
1045 .log_start
= vhost_log_start
,
1046 .log_stop
= vhost_log_stop
,
1047 .log_sync
= vhost_log_sync
,
1048 .log_global_start
= vhost_log_global_start
,
1049 .log_global_stop
= vhost_log_global_stop
,
1050 .eventfd_add
= vhost_eventfd_add
,
1051 .eventfd_del
= vhost_eventfd_del
,
1055 if (hdev
->migration_blocker
== NULL
) {
1056 if (!(hdev
->features
& (0x1ULL
<< VHOST_F_LOG_ALL
))) {
1057 error_setg(&hdev
->migration_blocker
,
1058 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1059 } else if (!qemu_memfd_check()) {
1060 error_setg(&hdev
->migration_blocker
,
1061 "Migration disabled: failed to allocate shared memory");
1065 if (hdev
->migration_blocker
!= NULL
) {
1066 migrate_add_blocker(hdev
->migration_blocker
);
1069 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
1070 hdev
->n_mem_sections
= 0;
1071 hdev
->mem_sections
= NULL
;
1074 hdev
->log_enabled
= false;
1075 hdev
->started
= false;
1076 hdev
->memory_changed
= false;
1077 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
1081 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1085 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1086 QLIST_REMOVE(hdev
, entry
);
1090 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
1093 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1094 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1096 memory_listener_unregister(&hdev
->memory_listener
);
1097 if (hdev
->migration_blocker
) {
1098 migrate_del_blocker(hdev
->migration_blocker
);
1099 error_free(hdev
->migration_blocker
);
1102 g_free(hdev
->mem_sections
);
1103 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1104 QLIST_REMOVE(hdev
, entry
);
1107 /* Stop processing guest IO notifications in qemu.
1108 * Start processing them in vhost in kernel.
1110 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1112 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1113 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1114 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1116 if (!k
->set_host_notifier
) {
1117 fprintf(stderr
, "binding does not support host notifiers\n");
1122 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1123 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, true);
1125 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
1133 e
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
1135 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
1144 /* Stop processing guest IO notifications in vhost.
1145 * Start processing them in qemu.
1146 * This might actually run the qemu handlers right away,
1147 * so virtio in qemu must be completely setup when this is called.
1149 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1151 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1152 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1153 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1156 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1157 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
1159 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
1166 /* Test and clear event pending status.
1167 * Should be called after unmask to avoid losing events.
1169 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1171 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1172 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1173 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1176 /* Mask/unmask events from this vq. */
1177 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1180 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1181 int r
, index
= n
- hdev
->vq_index
;
1182 struct vhost_vring_file file
;
1185 assert(vdev
->use_guest_notifier_mask
);
1186 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1188 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1191 file
.index
= hdev
->vhost_ops
->vhost_get_vq_index(hdev
, n
);
1192 r
= hdev
->vhost_ops
->vhost_set_vring_call(hdev
, &file
);
1196 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1199 const int *bit
= feature_bits
;
1200 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1201 uint64_t bit_mask
= (1ULL << *bit
);
1202 if (!(hdev
->features
& bit_mask
)) {
1203 features
&= ~bit_mask
;
1210 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1213 const int *bit
= feature_bits
;
1214 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1215 uint64_t bit_mask
= (1ULL << *bit
);
1216 if (features
& bit_mask
) {
1217 hdev
->acked_features
|= bit_mask
;
1223 /* Host notifiers must be enabled at this point. */
1224 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1228 hdev
->started
= true;
1230 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1234 r
= hdev
->vhost_ops
->vhost_set_mem_table(hdev
, hdev
->mem
);
1239 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1240 r
= vhost_virtqueue_start(hdev
,
1243 hdev
->vq_index
+ i
);
1249 if (hdev
->log_enabled
) {
1252 hdev
->log_size
= vhost_get_log_size(hdev
);
1253 hdev
->log
= vhost_log_get(hdev
->log_size
,
1254 vhost_dev_log_is_shared(hdev
));
1255 log_base
= (uintptr_t)hdev
->log
->log
;
1256 r
= hdev
->vhost_ops
->vhost_set_log_base(hdev
,
1257 hdev
->log_size
? log_base
: 0,
1267 vhost_log_put(hdev
, false);
1270 vhost_virtqueue_stop(hdev
,
1273 hdev
->vq_index
+ i
);
1279 hdev
->started
= false;
1283 /* Host notifiers must be enabled at this point. */
1284 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1288 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1289 vhost_virtqueue_stop(hdev
,
1292 hdev
->vq_index
+ i
);
1295 vhost_log_put(hdev
, true);
1296 hdev
->started
= false;