4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "hw/virtio/vhost.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include "qemu/error-report.h"
21 #include "qemu/memfd.h"
22 #include <linux/vhost.h>
23 #include "exec/address-spaces.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "migration/migration.h"
28 static struct vhost_log
*vhost_log
;
29 static struct vhost_log
*vhost_log_shm
;
31 static unsigned int used_memslots
;
32 static QLIST_HEAD(, vhost_dev
) vhost_devices
=
33 QLIST_HEAD_INITIALIZER(vhost_devices
);
35 bool vhost_has_free_slot(void)
37 unsigned int slots_limit
= ~0U;
38 struct vhost_dev
*hdev
;
40 QLIST_FOREACH(hdev
, &vhost_devices
, entry
) {
41 unsigned int r
= hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
);
42 slots_limit
= MIN(slots_limit
, r
);
44 return slots_limit
> used_memslots
;
47 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
48 MemoryRegionSection
*section
,
49 uint64_t mfirst
, uint64_t mlast
,
50 uint64_t rfirst
, uint64_t rlast
)
52 vhost_log_chunk_t
*log
= dev
->log
->log
;
54 uint64_t start
= MAX(mfirst
, rfirst
);
55 uint64_t end
= MIN(mlast
, rlast
);
56 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
57 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
58 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
63 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
64 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
66 for (;from
< to
; ++from
) {
67 vhost_log_chunk_t log
;
68 /* We first check with non-atomic: much cheaper,
69 * and we expect non-dirty to be the common case. */
71 addr
+= VHOST_LOG_CHUNK
;
74 /* Data must be read atomically. We don't really need barrier semantics
75 * but it's easier to use atomic_* than roll our own. */
76 log
= atomic_xchg(from
, 0);
80 hwaddr section_offset
;
82 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
83 section_offset
= page_addr
- section
->offset_within_address_space
;
84 mr_offset
= section_offset
+ section
->offset_within_region
;
85 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
86 log
&= ~(0x1ull
<< bit
);
88 addr
+= VHOST_LOG_CHUNK
;
92 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
93 MemoryRegionSection
*section
,
101 if (!dev
->log_enabled
|| !dev
->started
) {
104 start_addr
= section
->offset_within_address_space
;
105 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
106 start_addr
= MAX(first
, start_addr
);
107 end_addr
= MIN(last
, end_addr
);
109 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
110 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
111 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
112 reg
->guest_phys_addr
,
113 range_get_last(reg
->guest_phys_addr
,
116 for (i
= 0; i
< dev
->nvqs
; ++i
) {
117 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
118 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
119 range_get_last(vq
->used_phys
, vq
->used_size
));
124 static void vhost_log_sync(MemoryListener
*listener
,
125 MemoryRegionSection
*section
)
127 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
129 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
132 static void vhost_log_sync_range(struct vhost_dev
*dev
,
133 hwaddr first
, hwaddr last
)
136 /* FIXME: this is N^2 in number of sections */
137 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
138 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
139 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
143 /* Assign/unassign. Keep an unsorted array of non-overlapping
144 * memory regions in dev->mem. */
145 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
149 int from
, to
, n
= dev
->mem
->nregions
;
150 /* Track overlapping/split regions for sanity checking. */
151 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
153 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
154 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
159 /* clone old region */
161 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
164 /* No overlap is simple */
165 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
170 /* Split only happens if supplied region
171 * is in the middle of an existing one. Thus it can not
172 * overlap with any other existing region. */
175 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
176 memlast
= range_get_last(start_addr
, size
);
178 /* Remove whole region */
179 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
180 --dev
->mem
->nregions
;
187 if (memlast
>= reglast
) {
188 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
189 assert(reg
->memory_size
);
190 assert(!overlap_end
);
196 if (start_addr
<= reg
->guest_phys_addr
) {
197 change
= memlast
+ 1 - reg
->guest_phys_addr
;
198 reg
->memory_size
-= change
;
199 reg
->guest_phys_addr
+= change
;
200 reg
->userspace_addr
+= change
;
201 assert(reg
->memory_size
);
202 assert(!overlap_start
);
207 /* This only happens if supplied region
208 * is in the middle of an existing one. Thus it can not
209 * overlap with any other existing region. */
210 assert(!overlap_start
);
211 assert(!overlap_end
);
212 assert(!overlap_middle
);
213 /* Split region: shrink first part, shift second part. */
214 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
215 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
216 assert(reg
->memory_size
);
217 change
= memlast
+ 1 - reg
->guest_phys_addr
;
218 reg
= dev
->mem
->regions
+ n
;
219 reg
->memory_size
-= change
;
220 assert(reg
->memory_size
);
221 reg
->guest_phys_addr
+= change
;
222 reg
->userspace_addr
+= change
;
223 /* Never add more than 1 region */
224 assert(dev
->mem
->nregions
== n
);
225 ++dev
->mem
->nregions
;
230 /* Called after unassign, so no regions overlap the given range. */
231 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
237 struct vhost_memory_region
*merged
= NULL
;
238 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
239 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
240 uint64_t prlast
, urlast
;
241 uint64_t pmlast
, umlast
;
244 /* clone old region */
246 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
248 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
249 pmlast
= range_get_last(start_addr
, size
);
250 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
251 umlast
= range_get_last(uaddr
, size
);
253 /* check for overlapping regions: should never happen. */
254 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
255 /* Not an adjacent or overlapping region - do not merge. */
256 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
257 (pmlast
+ 1 != reg
->guest_phys_addr
||
258 umlast
+ 1 != reg
->userspace_addr
)) {
268 u
= MIN(uaddr
, reg
->userspace_addr
);
269 s
= MIN(start_addr
, reg
->guest_phys_addr
);
270 e
= MAX(pmlast
, prlast
);
271 uaddr
= merged
->userspace_addr
= u
;
272 start_addr
= merged
->guest_phys_addr
= s
;
273 size
= merged
->memory_size
= e
- s
+ 1;
274 assert(merged
->memory_size
);
278 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
279 memset(reg
, 0, sizeof *reg
);
280 reg
->memory_size
= size
;
281 assert(reg
->memory_size
);
282 reg
->guest_phys_addr
= start_addr
;
283 reg
->userspace_addr
= uaddr
;
286 assert(to
<= dev
->mem
->nregions
+ 1);
287 dev
->mem
->nregions
= to
;
290 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
292 uint64_t log_size
= 0;
294 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
295 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
296 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
298 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
300 for (i
= 0; i
< dev
->nvqs
; ++i
) {
301 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
302 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
303 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
308 static struct vhost_log
*vhost_log_alloc(uint64_t size
, bool share
)
310 struct vhost_log
*log
;
311 uint64_t logsize
= size
* sizeof(*(log
->log
));
314 log
= g_new0(struct vhost_log
, 1);
316 log
->log
= qemu_memfd_alloc("vhost-log", logsize
,
317 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
319 memset(log
->log
, 0, logsize
);
321 log
->log
= g_malloc0(logsize
);
331 static struct vhost_log
*vhost_log_get(uint64_t size
, bool share
)
333 struct vhost_log
*log
= share
? vhost_log_shm
: vhost_log
;
335 if (!log
|| log
->size
!= size
) {
336 log
= vhost_log_alloc(size
, share
);
349 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
351 struct vhost_log
*log
= dev
->log
;
358 if (log
->refcnt
== 0) {
359 /* Sync only the range covered by the old log */
360 if (dev
->log_size
&& sync
) {
361 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
364 if (vhost_log
== log
) {
367 } else if (vhost_log_shm
== log
) {
368 qemu_memfd_free(log
->log
, log
->size
* sizeof(*(log
->log
)),
370 vhost_log_shm
= NULL
;
377 static bool vhost_dev_log_is_shared(struct vhost_dev
*dev
)
379 return dev
->vhost_ops
->vhost_requires_shm_log
&&
380 dev
->vhost_ops
->vhost_requires_shm_log(dev
);
383 static inline void vhost_dev_log_resize(struct vhost_dev
*dev
, uint64_t size
)
385 struct vhost_log
*log
= vhost_log_get(size
, vhost_dev_log_is_shared(dev
));
386 uint64_t log_base
= (uintptr_t)log
->log
;
389 /* inform backend of log switching, this must be done before
390 releasing the current log, to ensure no logging is lost */
391 r
= dev
->vhost_ops
->vhost_set_log_base(dev
, log_base
, log
);
393 vhost_log_put(dev
, true);
395 dev
->log_size
= size
;
398 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
405 for (i
= 0; !r
&& i
< dev
->nvqs
; ++i
) {
406 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
410 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
414 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
415 if (!p
|| l
!= vq
->ring_size
) {
416 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
420 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
423 cpu_physical_memory_unmap(p
, l
, 0, 0);
428 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
432 int i
, n
= dev
->mem
->nregions
;
433 for (i
= 0; i
< n
; ++i
) {
434 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
435 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
443 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
448 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
456 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
457 memlast
= range_get_last(start_addr
, size
);
459 /* Need to extend region? */
460 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
463 /* userspace_addr changed? */
464 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
467 static void vhost_set_memory(MemoryListener
*listener
,
468 MemoryRegionSection
*section
,
471 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
473 hwaddr start_addr
= section
->offset_within_address_space
;
474 ram_addr_t size
= int128_get64(section
->size
);
476 memory_region_get_dirty_log_mask(section
->mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
477 int s
= offsetof(struct vhost_memory
, regions
) +
478 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
481 dev
->mem
= g_realloc(dev
->mem
, s
);
489 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
490 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
492 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
493 /* Region exists with same address. Nothing to do. */
497 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
498 /* Removing region that we don't access. Nothing to do. */
503 vhost_dev_unassign_memory(dev
, start_addr
, size
);
505 /* Add given mapping, merging adjacent regions if any */
506 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
508 /* Remove old mapping for this memory, if any. */
509 vhost_dev_unassign_memory(dev
, start_addr
, size
);
511 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
512 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
513 dev
->memory_changed
= true;
514 used_memslots
= dev
->mem
->nregions
;
517 static bool vhost_section(MemoryRegionSection
*section
)
519 return memory_region_is_ram(section
->mr
);
522 static void vhost_begin(MemoryListener
*listener
)
524 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
526 dev
->mem_changed_end_addr
= 0;
527 dev
->mem_changed_start_addr
= -1;
530 static void vhost_commit(MemoryListener
*listener
)
532 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
534 hwaddr start_addr
= 0;
539 if (!dev
->memory_changed
) {
545 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
550 start_addr
= dev
->mem_changed_start_addr
;
551 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
553 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
557 if (!dev
->log_enabled
) {
558 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
560 dev
->memory_changed
= false;
563 log_size
= vhost_get_log_size(dev
);
564 /* We allocate an extra 4K bytes to log,
565 * to reduce the * number of reallocations. */
566 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
567 /* To log more, must increase log size before table update. */
568 if (dev
->log_size
< log_size
) {
569 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
571 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
573 /* To log less, can only decrease log size after table update. */
574 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
575 vhost_dev_log_resize(dev
, log_size
);
577 dev
->memory_changed
= false;
580 static void vhost_region_add(MemoryListener
*listener
,
581 MemoryRegionSection
*section
)
583 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
586 if (!vhost_section(section
)) {
590 ++dev
->n_mem_sections
;
591 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
592 dev
->n_mem_sections
);
593 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
594 memory_region_ref(section
->mr
);
595 vhost_set_memory(listener
, section
, true);
598 static void vhost_region_del(MemoryListener
*listener
,
599 MemoryRegionSection
*section
)
601 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
605 if (!vhost_section(section
)) {
609 vhost_set_memory(listener
, section
, false);
610 memory_region_unref(section
->mr
);
611 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
612 if (dev
->mem_sections
[i
].offset_within_address_space
613 == section
->offset_within_address_space
) {
614 --dev
->n_mem_sections
;
615 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
616 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
622 static void vhost_region_nop(MemoryListener
*listener
,
623 MemoryRegionSection
*section
)
627 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
628 struct vhost_virtqueue
*vq
,
629 unsigned idx
, bool enable_log
)
631 struct vhost_vring_addr addr
= {
633 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
634 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
635 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
636 .log_guest_addr
= vq
->used_phys
,
637 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
639 int r
= dev
->vhost_ops
->vhost_set_vring_addr(dev
, &addr
);
646 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
648 uint64_t features
= dev
->acked_features
;
651 features
|= 0x1ULL
<< VHOST_F_LOG_ALL
;
653 r
= dev
->vhost_ops
->vhost_set_features(dev
, features
);
654 return r
< 0 ? -errno
: 0;
657 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
660 r
= vhost_dev_set_features(dev
, enable_log
);
664 for (i
= 0; i
< dev
->nvqs
; ++i
) {
665 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
666 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
674 for (; i
>= 0; --i
) {
675 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
676 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
680 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
686 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
688 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
691 if (!!enable
== dev
->log_enabled
) {
695 dev
->log_enabled
= enable
;
699 r
= vhost_dev_set_log(dev
, false);
703 vhost_log_put(dev
, false);
707 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
708 r
= vhost_dev_set_log(dev
, true);
713 dev
->log_enabled
= enable
;
717 static void vhost_log_global_start(MemoryListener
*listener
)
721 r
= vhost_migration_log(listener
, true);
727 static void vhost_log_global_stop(MemoryListener
*listener
)
731 r
= vhost_migration_log(listener
, false);
737 static void vhost_log_start(MemoryListener
*listener
,
738 MemoryRegionSection
*section
,
741 /* FIXME: implement */
744 static void vhost_log_stop(MemoryListener
*listener
,
745 MemoryRegionSection
*section
,
748 /* FIXME: implement */
751 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev
*dev
,
755 struct vhost_vring_state s
= {
756 .index
= vhost_vq_index
,
760 if (!dev
->vhost_ops
->vhost_set_vring_endian(dev
, &s
)) {
764 if (errno
== ENOTTY
) {
765 error_report("vhost does not support cross-endian");
772 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
773 struct VirtIODevice
*vdev
,
774 struct vhost_virtqueue
*vq
,
779 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
780 struct vhost_vring_file file
= {
781 .index
= vhost_vq_index
783 struct vhost_vring_state state
= {
784 .index
= vhost_vq_index
786 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
789 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
790 r
= dev
->vhost_ops
->vhost_set_vring_num(dev
, &state
);
795 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
796 r
= dev
->vhost_ops
->vhost_set_vring_base(dev
, &state
);
801 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
802 virtio_legacy_is_cross_endian(vdev
)) {
803 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
804 virtio_is_big_endian(vdev
),
811 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
812 a
= virtio_queue_get_desc_addr(vdev
, idx
);
813 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
814 if (!vq
->desc
|| l
!= s
) {
816 goto fail_alloc_desc
;
818 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
819 a
= virtio_queue_get_avail_addr(vdev
, idx
);
820 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
821 if (!vq
->avail
|| l
!= s
) {
823 goto fail_alloc_avail
;
825 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
826 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
827 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
828 if (!vq
->used
|| l
!= s
) {
830 goto fail_alloc_used
;
833 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
834 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
835 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
836 if (!vq
->ring
|| l
!= s
) {
838 goto fail_alloc_ring
;
841 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
847 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
848 r
= dev
->vhost_ops
->vhost_set_vring_kick(dev
, &file
);
854 /* Clear and discard previous events if any. */
855 event_notifier_test_and_clear(&vq
->masked_notifier
);
861 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
864 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
867 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
870 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
876 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
877 struct VirtIODevice
*vdev
,
878 struct vhost_virtqueue
*vq
,
881 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
882 struct vhost_vring_state state
= {
883 .index
= vhost_vq_index
,
887 r
= dev
->vhost_ops
->vhost_get_vring_base(dev
, &state
);
889 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
892 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
893 virtio_queue_invalidate_signalled_used(vdev
, idx
);
895 /* In the cross-endian case, we need to reset the vring endianness to
896 * native as legacy devices expect so by default.
898 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
899 virtio_legacy_is_cross_endian(vdev
)) {
900 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
901 !virtio_is_big_endian(vdev
),
904 error_report("failed to reset vring endianness");
909 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
910 0, virtio_queue_get_ring_size(vdev
, idx
));
911 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
912 1, virtio_queue_get_used_size(vdev
, idx
));
913 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
914 0, virtio_queue_get_avail_size(vdev
, idx
));
915 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
916 0, virtio_queue_get_desc_size(vdev
, idx
));
919 static void vhost_eventfd_add(MemoryListener
*listener
,
920 MemoryRegionSection
*section
,
921 bool match_data
, uint64_t data
, EventNotifier
*e
)
925 static void vhost_eventfd_del(MemoryListener
*listener
,
926 MemoryRegionSection
*section
,
927 bool match_data
, uint64_t data
, EventNotifier
*e
)
931 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
932 struct vhost_virtqueue
*vq
, int n
)
934 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
935 struct vhost_vring_file file
= {
936 .index
= vhost_vq_index
,
938 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
943 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
944 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
951 event_notifier_cleanup(&vq
->masked_notifier
);
955 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
957 event_notifier_cleanup(&vq
->masked_notifier
);
960 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
961 VhostBackendType backend_type
)
966 hdev
->migration_blocker
= NULL
;
968 if (vhost_set_backend_type(hdev
, backend_type
) < 0) {
969 close((uintptr_t)opaque
);
973 if (hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
) < 0) {
974 close((uintptr_t)opaque
);
978 if (used_memslots
> hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
)) {
979 fprintf(stderr
, "vhost backend memory slots limit is less"
980 " than current number of present memory slots\n");
981 close((uintptr_t)opaque
);
984 QLIST_INSERT_HEAD(&vhost_devices
, hdev
, entry
);
986 r
= hdev
->vhost_ops
->vhost_set_owner(hdev
);
991 r
= hdev
->vhost_ops
->vhost_get_features(hdev
, &features
);
996 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
997 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, hdev
->vq_index
+ i
);
1002 hdev
->features
= features
;
1004 hdev
->memory_listener
= (MemoryListener
) {
1005 .begin
= vhost_begin
,
1006 .commit
= vhost_commit
,
1007 .region_add
= vhost_region_add
,
1008 .region_del
= vhost_region_del
,
1009 .region_nop
= vhost_region_nop
,
1010 .log_start
= vhost_log_start
,
1011 .log_stop
= vhost_log_stop
,
1012 .log_sync
= vhost_log_sync
,
1013 .log_global_start
= vhost_log_global_start
,
1014 .log_global_stop
= vhost_log_global_stop
,
1015 .eventfd_add
= vhost_eventfd_add
,
1016 .eventfd_del
= vhost_eventfd_del
,
1020 if (hdev
->migration_blocker
== NULL
) {
1021 if (!(hdev
->features
& (0x1ULL
<< VHOST_F_LOG_ALL
))) {
1022 error_setg(&hdev
->migration_blocker
,
1023 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1024 } else if (!qemu_memfd_check()) {
1025 error_setg(&hdev
->migration_blocker
,
1026 "Migration disabled: failed to allocate shared memory");
1030 if (hdev
->migration_blocker
!= NULL
) {
1031 migrate_add_blocker(hdev
->migration_blocker
);
1034 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
1035 hdev
->n_mem_sections
= 0;
1036 hdev
->mem_sections
= NULL
;
1039 hdev
->log_enabled
= false;
1040 hdev
->started
= false;
1041 hdev
->memory_changed
= false;
1042 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
1046 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1050 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1051 QLIST_REMOVE(hdev
, entry
);
1055 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
1058 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1059 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1061 memory_listener_unregister(&hdev
->memory_listener
);
1062 if (hdev
->migration_blocker
) {
1063 migrate_del_blocker(hdev
->migration_blocker
);
1064 error_free(hdev
->migration_blocker
);
1067 g_free(hdev
->mem_sections
);
1068 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1069 QLIST_REMOVE(hdev
, entry
);
1072 /* Stop processing guest IO notifications in qemu.
1073 * Start processing them in vhost in kernel.
1075 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1077 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1078 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1079 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1081 if (!k
->set_host_notifier
) {
1082 fprintf(stderr
, "binding does not support host notifiers\n");
1087 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1088 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, true);
1090 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
1098 e
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
1100 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
1109 /* Stop processing guest IO notifications in vhost.
1110 * Start processing them in qemu.
1111 * This might actually run the qemu handlers right away,
1112 * so virtio in qemu must be completely setup when this is called.
1114 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1116 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1117 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1118 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1121 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1122 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
1124 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
1131 /* Test and clear event pending status.
1132 * Should be called after unmask to avoid losing events.
1134 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1136 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1137 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1138 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1141 /* Mask/unmask events from this vq. */
1142 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1145 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1146 int r
, index
= n
- hdev
->vq_index
;
1147 struct vhost_vring_file file
;
1150 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1152 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1155 file
.index
= hdev
->vhost_ops
->vhost_get_vq_index(hdev
, n
);
1156 r
= hdev
->vhost_ops
->vhost_set_vring_call(hdev
, &file
);
1160 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1163 const int *bit
= feature_bits
;
1164 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1165 uint64_t bit_mask
= (1ULL << *bit
);
1166 if (!(hdev
->features
& bit_mask
)) {
1167 features
&= ~bit_mask
;
1174 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1177 const int *bit
= feature_bits
;
1178 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1179 uint64_t bit_mask
= (1ULL << *bit
);
1180 if (features
& bit_mask
) {
1181 hdev
->acked_features
|= bit_mask
;
1187 /* Host notifiers must be enabled at this point. */
1188 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1192 hdev
->started
= true;
1194 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1198 r
= hdev
->vhost_ops
->vhost_set_mem_table(hdev
, hdev
->mem
);
1203 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1204 r
= vhost_virtqueue_start(hdev
,
1207 hdev
->vq_index
+ i
);
1213 if (hdev
->log_enabled
) {
1216 hdev
->log_size
= vhost_get_log_size(hdev
);
1217 hdev
->log
= vhost_log_get(hdev
->log_size
,
1218 vhost_dev_log_is_shared(hdev
));
1219 log_base
= (uintptr_t)hdev
->log
->log
;
1220 r
= hdev
->vhost_ops
->vhost_set_log_base(hdev
,
1221 hdev
->log_size
? log_base
: 0,
1231 vhost_log_put(hdev
, false);
1234 vhost_virtqueue_stop(hdev
,
1237 hdev
->vq_index
+ i
);
1243 hdev
->started
= false;
1247 /* Host notifiers must be enabled at this point. */
1248 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1252 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1253 vhost_virtqueue_stop(hdev
,
1256 hdev
->vq_index
+ i
);
1259 vhost_log_put(hdev
, true);
1260 hdev
->started
= false;