4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/migration.h"
30 /* enabled until disconnected backend stabilizes */
31 #define _VHOST_DEBUG 1
34 #define VHOST_OPS_DEBUG(fmt, ...) \
35 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
36 strerror(errno), errno); } while (0)
38 #define VHOST_OPS_DEBUG(fmt, ...) \
42 static struct vhost_log
*vhost_log
;
43 static struct vhost_log
*vhost_log_shm
;
45 static unsigned int used_memslots
;
46 static QLIST_HEAD(, vhost_dev
) vhost_devices
=
47 QLIST_HEAD_INITIALIZER(vhost_devices
);
49 bool vhost_has_free_slot(void)
51 unsigned int slots_limit
= ~0U;
52 struct vhost_dev
*hdev
;
54 QLIST_FOREACH(hdev
, &vhost_devices
, entry
) {
55 unsigned int r
= hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
);
56 slots_limit
= MIN(slots_limit
, r
);
58 return slots_limit
> used_memslots
;
61 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
62 MemoryRegionSection
*section
,
63 uint64_t mfirst
, uint64_t mlast
,
64 uint64_t rfirst
, uint64_t rlast
)
66 vhost_log_chunk_t
*log
= dev
->log
->log
;
68 uint64_t start
= MAX(mfirst
, rfirst
);
69 uint64_t end
= MIN(mlast
, rlast
);
70 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
71 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
72 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
77 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
78 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
80 for (;from
< to
; ++from
) {
81 vhost_log_chunk_t log
;
82 /* We first check with non-atomic: much cheaper,
83 * and we expect non-dirty to be the common case. */
85 addr
+= VHOST_LOG_CHUNK
;
88 /* Data must be read atomically. We don't really need barrier semantics
89 * but it's easier to use atomic_* than roll our own. */
90 log
= atomic_xchg(from
, 0);
94 hwaddr section_offset
;
96 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
97 section_offset
= page_addr
- section
->offset_within_address_space
;
98 mr_offset
= section_offset
+ section
->offset_within_region
;
99 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
100 log
&= ~(0x1ull
<< bit
);
102 addr
+= VHOST_LOG_CHUNK
;
106 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
107 MemoryRegionSection
*section
,
115 if (!dev
->log_enabled
|| !dev
->started
) {
118 start_addr
= section
->offset_within_address_space
;
119 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
120 start_addr
= MAX(first
, start_addr
);
121 end_addr
= MIN(last
, end_addr
);
123 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
124 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
125 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
126 reg
->guest_phys_addr
,
127 range_get_last(reg
->guest_phys_addr
,
130 for (i
= 0; i
< dev
->nvqs
; ++i
) {
131 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
132 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
133 range_get_last(vq
->used_phys
, vq
->used_size
));
138 static void vhost_log_sync(MemoryListener
*listener
,
139 MemoryRegionSection
*section
)
141 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
143 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
146 static void vhost_log_sync_range(struct vhost_dev
*dev
,
147 hwaddr first
, hwaddr last
)
150 /* FIXME: this is N^2 in number of sections */
151 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
152 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
153 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
157 /* Assign/unassign. Keep an unsorted array of non-overlapping
158 * memory regions in dev->mem. */
159 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
163 int from
, to
, n
= dev
->mem
->nregions
;
164 /* Track overlapping/split regions for sanity checking. */
165 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
167 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
168 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
173 /* clone old region */
175 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
178 /* No overlap is simple */
179 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
184 /* Split only happens if supplied region
185 * is in the middle of an existing one. Thus it can not
186 * overlap with any other existing region. */
189 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
190 memlast
= range_get_last(start_addr
, size
);
192 /* Remove whole region */
193 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
194 --dev
->mem
->nregions
;
201 if (memlast
>= reglast
) {
202 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
203 assert(reg
->memory_size
);
204 assert(!overlap_end
);
210 if (start_addr
<= reg
->guest_phys_addr
) {
211 change
= memlast
+ 1 - reg
->guest_phys_addr
;
212 reg
->memory_size
-= change
;
213 reg
->guest_phys_addr
+= change
;
214 reg
->userspace_addr
+= change
;
215 assert(reg
->memory_size
);
216 assert(!overlap_start
);
221 /* This only happens if supplied region
222 * is in the middle of an existing one. Thus it can not
223 * overlap with any other existing region. */
224 assert(!overlap_start
);
225 assert(!overlap_end
);
226 assert(!overlap_middle
);
227 /* Split region: shrink first part, shift second part. */
228 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
229 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
230 assert(reg
->memory_size
);
231 change
= memlast
+ 1 - reg
->guest_phys_addr
;
232 reg
= dev
->mem
->regions
+ n
;
233 reg
->memory_size
-= change
;
234 assert(reg
->memory_size
);
235 reg
->guest_phys_addr
+= change
;
236 reg
->userspace_addr
+= change
;
237 /* Never add more than 1 region */
238 assert(dev
->mem
->nregions
== n
);
239 ++dev
->mem
->nregions
;
244 /* Called after unassign, so no regions overlap the given range. */
245 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
251 struct vhost_memory_region
*merged
= NULL
;
252 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
253 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
254 uint64_t prlast
, urlast
;
255 uint64_t pmlast
, umlast
;
258 /* clone old region */
260 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
262 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
263 pmlast
= range_get_last(start_addr
, size
);
264 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
265 umlast
= range_get_last(uaddr
, size
);
267 /* check for overlapping regions: should never happen. */
268 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
269 /* Not an adjacent or overlapping region - do not merge. */
270 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
271 (pmlast
+ 1 != reg
->guest_phys_addr
||
272 umlast
+ 1 != reg
->userspace_addr
)) {
276 if (dev
->vhost_ops
->vhost_backend_can_merge
&&
277 !dev
->vhost_ops
->vhost_backend_can_merge(dev
, uaddr
, size
,
289 u
= MIN(uaddr
, reg
->userspace_addr
);
290 s
= MIN(start_addr
, reg
->guest_phys_addr
);
291 e
= MAX(pmlast
, prlast
);
292 uaddr
= merged
->userspace_addr
= u
;
293 start_addr
= merged
->guest_phys_addr
= s
;
294 size
= merged
->memory_size
= e
- s
+ 1;
295 assert(merged
->memory_size
);
299 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
300 memset(reg
, 0, sizeof *reg
);
301 reg
->memory_size
= size
;
302 assert(reg
->memory_size
);
303 reg
->guest_phys_addr
= start_addr
;
304 reg
->userspace_addr
= uaddr
;
307 assert(to
<= dev
->mem
->nregions
+ 1);
308 dev
->mem
->nregions
= to
;
311 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
313 uint64_t log_size
= 0;
315 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
316 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
317 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
319 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
321 for (i
= 0; i
< dev
->nvqs
; ++i
) {
322 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
323 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
324 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
329 static struct vhost_log
*vhost_log_alloc(uint64_t size
, bool share
)
331 struct vhost_log
*log
;
332 uint64_t logsize
= size
* sizeof(*(log
->log
));
335 log
= g_new0(struct vhost_log
, 1);
337 log
->log
= qemu_memfd_alloc("vhost-log", logsize
,
338 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
340 memset(log
->log
, 0, logsize
);
342 log
->log
= g_malloc0(logsize
);
352 static struct vhost_log
*vhost_log_get(uint64_t size
, bool share
)
354 struct vhost_log
*log
= share
? vhost_log_shm
: vhost_log
;
356 if (!log
|| log
->size
!= size
) {
357 log
= vhost_log_alloc(size
, share
);
370 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
372 struct vhost_log
*log
= dev
->log
;
381 if (log
->refcnt
== 0) {
382 /* Sync only the range covered by the old log */
383 if (dev
->log_size
&& sync
) {
384 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
387 if (vhost_log
== log
) {
390 } else if (vhost_log_shm
== log
) {
391 qemu_memfd_free(log
->log
, log
->size
* sizeof(*(log
->log
)),
393 vhost_log_shm
= NULL
;
400 static bool vhost_dev_log_is_shared(struct vhost_dev
*dev
)
402 return dev
->vhost_ops
->vhost_requires_shm_log
&&
403 dev
->vhost_ops
->vhost_requires_shm_log(dev
);
406 static inline void vhost_dev_log_resize(struct vhost_dev
*dev
, uint64_t size
)
408 struct vhost_log
*log
= vhost_log_get(size
, vhost_dev_log_is_shared(dev
));
409 uint64_t log_base
= (uintptr_t)log
->log
;
412 /* inform backend of log switching, this must be done before
413 releasing the current log, to ensure no logging is lost */
414 r
= dev
->vhost_ops
->vhost_set_log_base(dev
, log_base
, log
);
416 VHOST_OPS_DEBUG("vhost_set_log_base failed");
419 vhost_log_put(dev
, true);
421 dev
->log_size
= size
;
424 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
431 for (i
= 0; !r
&& i
< dev
->nvqs
; ++i
) {
432 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
436 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
440 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
441 if (!p
|| l
!= vq
->ring_size
) {
442 error_report("Unable to map ring buffer for ring %d", i
);
446 error_report("Ring buffer relocated for ring %d", i
);
449 cpu_physical_memory_unmap(p
, l
, 0, 0);
454 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
458 int i
, n
= dev
->mem
->nregions
;
459 for (i
= 0; i
< n
; ++i
) {
460 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
461 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
469 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
474 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
482 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
483 memlast
= range_get_last(start_addr
, size
);
485 /* Need to extend region? */
486 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
489 /* userspace_addr changed? */
490 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
493 static void vhost_set_memory(MemoryListener
*listener
,
494 MemoryRegionSection
*section
,
497 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
499 hwaddr start_addr
= section
->offset_within_address_space
;
500 ram_addr_t size
= int128_get64(section
->size
);
502 memory_region_get_dirty_log_mask(section
->mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
503 int s
= offsetof(struct vhost_memory
, regions
) +
504 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
507 dev
->mem
= g_realloc(dev
->mem
, s
);
515 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
516 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
518 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
519 /* Region exists with same address. Nothing to do. */
523 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
524 /* Removing region that we don't access. Nothing to do. */
529 vhost_dev_unassign_memory(dev
, start_addr
, size
);
531 /* Add given mapping, merging adjacent regions if any */
532 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
534 /* Remove old mapping for this memory, if any. */
535 vhost_dev_unassign_memory(dev
, start_addr
, size
);
537 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
538 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
539 dev
->memory_changed
= true;
540 used_memslots
= dev
->mem
->nregions
;
543 static bool vhost_section(MemoryRegionSection
*section
)
545 return memory_region_is_ram(section
->mr
);
548 static void vhost_begin(MemoryListener
*listener
)
550 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
552 dev
->mem_changed_end_addr
= 0;
553 dev
->mem_changed_start_addr
= -1;
556 static void vhost_commit(MemoryListener
*listener
)
558 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
560 hwaddr start_addr
= 0;
565 if (!dev
->memory_changed
) {
571 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
576 start_addr
= dev
->mem_changed_start_addr
;
577 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
579 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
583 if (!dev
->log_enabled
) {
584 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
586 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
588 dev
->memory_changed
= false;
591 log_size
= vhost_get_log_size(dev
);
592 /* We allocate an extra 4K bytes to log,
593 * to reduce the * number of reallocations. */
594 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
595 /* To log more, must increase log size before table update. */
596 if (dev
->log_size
< log_size
) {
597 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
599 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
601 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
603 /* To log less, can only decrease log size after table update. */
604 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
605 vhost_dev_log_resize(dev
, log_size
);
607 dev
->memory_changed
= false;
610 static void vhost_region_add(MemoryListener
*listener
,
611 MemoryRegionSection
*section
)
613 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
616 if (!vhost_section(section
)) {
620 ++dev
->n_mem_sections
;
621 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
622 dev
->n_mem_sections
);
623 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
624 memory_region_ref(section
->mr
);
625 vhost_set_memory(listener
, section
, true);
628 static void vhost_region_del(MemoryListener
*listener
,
629 MemoryRegionSection
*section
)
631 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
635 if (!vhost_section(section
)) {
639 vhost_set_memory(listener
, section
, false);
640 memory_region_unref(section
->mr
);
641 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
642 if (dev
->mem_sections
[i
].offset_within_address_space
643 == section
->offset_within_address_space
) {
644 --dev
->n_mem_sections
;
645 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
646 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
652 static void vhost_region_nop(MemoryListener
*listener
,
653 MemoryRegionSection
*section
)
657 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
658 struct vhost_virtqueue
*vq
,
659 unsigned idx
, bool enable_log
)
661 struct vhost_vring_addr addr
= {
663 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
664 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
665 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
666 .log_guest_addr
= vq
->used_phys
,
667 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
669 int r
= dev
->vhost_ops
->vhost_set_vring_addr(dev
, &addr
);
671 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
677 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
679 uint64_t features
= dev
->acked_features
;
682 features
|= 0x1ULL
<< VHOST_F_LOG_ALL
;
684 r
= dev
->vhost_ops
->vhost_set_features(dev
, features
);
686 VHOST_OPS_DEBUG("vhost_set_features failed");
688 return r
< 0 ? -errno
: 0;
691 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
694 r
= vhost_dev_set_features(dev
, enable_log
);
698 for (i
= 0; i
< dev
->nvqs
; ++i
) {
699 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
700 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
708 for (; i
>= 0; --i
) {
709 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
710 vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
713 vhost_dev_set_features(dev
, dev
->log_enabled
);
718 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
720 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
723 if (!!enable
== dev
->log_enabled
) {
727 dev
->log_enabled
= enable
;
731 r
= vhost_dev_set_log(dev
, false);
735 vhost_log_put(dev
, false);
737 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
738 r
= vhost_dev_set_log(dev
, true);
743 dev
->log_enabled
= enable
;
747 static void vhost_log_global_start(MemoryListener
*listener
)
751 r
= vhost_migration_log(listener
, true);
757 static void vhost_log_global_stop(MemoryListener
*listener
)
761 r
= vhost_migration_log(listener
, false);
767 static void vhost_log_start(MemoryListener
*listener
,
768 MemoryRegionSection
*section
,
771 /* FIXME: implement */
774 static void vhost_log_stop(MemoryListener
*listener
,
775 MemoryRegionSection
*section
,
778 /* FIXME: implement */
781 /* The vhost driver natively knows how to handle the vrings of non
782 * cross-endian legacy devices and modern devices. Only legacy devices
783 * exposed to a bi-endian guest may require the vhost driver to use a
784 * specific endianness.
786 static inline bool vhost_needs_vring_endian(VirtIODevice
*vdev
)
788 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
791 #ifdef HOST_WORDS_BIGENDIAN
792 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_LITTLE
;
794 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_BIG
;
798 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev
*dev
,
802 struct vhost_vring_state s
= {
803 .index
= vhost_vq_index
,
807 if (!dev
->vhost_ops
->vhost_set_vring_endian(dev
, &s
)) {
811 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
812 if (errno
== ENOTTY
) {
813 error_report("vhost does not support cross-endian");
820 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
821 struct VirtIODevice
*vdev
,
822 struct vhost_virtqueue
*vq
,
827 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
828 struct vhost_vring_file file
= {
829 .index
= vhost_vq_index
831 struct vhost_vring_state state
= {
832 .index
= vhost_vq_index
834 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
837 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
838 r
= dev
->vhost_ops
->vhost_set_vring_num(dev
, &state
);
840 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
844 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
845 r
= dev
->vhost_ops
->vhost_set_vring_base(dev
, &state
);
847 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
851 if (vhost_needs_vring_endian(vdev
)) {
852 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
853 virtio_is_big_endian(vdev
),
860 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
861 a
= virtio_queue_get_desc_addr(vdev
, idx
);
862 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
863 if (!vq
->desc
|| l
!= s
) {
865 goto fail_alloc_desc
;
867 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
868 a
= virtio_queue_get_avail_addr(vdev
, idx
);
869 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
870 if (!vq
->avail
|| l
!= s
) {
872 goto fail_alloc_avail
;
874 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
875 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
876 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
877 if (!vq
->used
|| l
!= s
) {
879 goto fail_alloc_used
;
882 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
883 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
884 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
885 if (!vq
->ring
|| l
!= s
) {
887 goto fail_alloc_ring
;
890 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
896 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
897 r
= dev
->vhost_ops
->vhost_set_vring_kick(dev
, &file
);
899 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
904 /* Clear and discard previous events if any. */
905 event_notifier_test_and_clear(&vq
->masked_notifier
);
907 /* Init vring in unmasked state, unless guest_notifier_mask
910 if (!vdev
->use_guest_notifier_mask
) {
911 /* TODO: check and handle errors. */
912 vhost_virtqueue_mask(dev
, vdev
, idx
, false);
919 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
922 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
925 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
928 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
934 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
935 struct VirtIODevice
*vdev
,
936 struct vhost_virtqueue
*vq
,
939 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
940 struct vhost_vring_state state
= {
941 .index
= vhost_vq_index
,
945 r
= dev
->vhost_ops
->vhost_get_vring_base(dev
, &state
);
947 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx
, r
);
949 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
951 virtio_queue_invalidate_signalled_used(vdev
, idx
);
953 /* In the cross-endian case, we need to reset the vring endianness to
954 * native as legacy devices expect so by default.
956 if (vhost_needs_vring_endian(vdev
)) {
957 vhost_virtqueue_set_vring_endian_legacy(dev
,
958 !virtio_is_big_endian(vdev
),
962 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
963 0, virtio_queue_get_ring_size(vdev
, idx
));
964 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
965 1, virtio_queue_get_used_size(vdev
, idx
));
966 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
967 0, virtio_queue_get_avail_size(vdev
, idx
));
968 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
969 0, virtio_queue_get_desc_size(vdev
, idx
));
972 static void vhost_eventfd_add(MemoryListener
*listener
,
973 MemoryRegionSection
*section
,
974 bool match_data
, uint64_t data
, EventNotifier
*e
)
978 static void vhost_eventfd_del(MemoryListener
*listener
,
979 MemoryRegionSection
*section
,
980 bool match_data
, uint64_t data
, EventNotifier
*e
)
984 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev
*dev
,
985 int n
, uint32_t timeout
)
987 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
988 struct vhost_vring_state state
= {
989 .index
= vhost_vq_index
,
994 if (!dev
->vhost_ops
->vhost_set_vring_busyloop_timeout
) {
998 r
= dev
->vhost_ops
->vhost_set_vring_busyloop_timeout(dev
, &state
);
1000 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1007 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
1008 struct vhost_virtqueue
*vq
, int n
)
1010 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1011 struct vhost_vring_file file
= {
1012 .index
= vhost_vq_index
,
1014 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
1019 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
1020 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
1022 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1028 event_notifier_cleanup(&vq
->masked_notifier
);
1032 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
1034 event_notifier_cleanup(&vq
->masked_notifier
);
1037 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
1038 VhostBackendType backend_type
, uint32_t busyloop_timeout
)
1041 int i
, r
, n_initialized_vqs
= 0;
1043 hdev
->migration_blocker
= NULL
;
1045 r
= vhost_set_backend_type(hdev
, backend_type
);
1048 r
= hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
);
1053 if (used_memslots
> hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
)) {
1054 error_report("vhost backend memory slots limit is less"
1055 " than current number of present memory slots");
1060 r
= hdev
->vhost_ops
->vhost_set_owner(hdev
);
1062 VHOST_OPS_DEBUG("vhost_set_owner failed");
1066 r
= hdev
->vhost_ops
->vhost_get_features(hdev
, &features
);
1068 VHOST_OPS_DEBUG("vhost_get_features failed");
1072 for (i
= 0; i
< hdev
->nvqs
; ++i
, ++n_initialized_vqs
) {
1073 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, hdev
->vq_index
+ i
);
1079 if (busyloop_timeout
) {
1080 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1081 r
= vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
,
1089 hdev
->features
= features
;
1091 hdev
->memory_listener
= (MemoryListener
) {
1092 .begin
= vhost_begin
,
1093 .commit
= vhost_commit
,
1094 .region_add
= vhost_region_add
,
1095 .region_del
= vhost_region_del
,
1096 .region_nop
= vhost_region_nop
,
1097 .log_start
= vhost_log_start
,
1098 .log_stop
= vhost_log_stop
,
1099 .log_sync
= vhost_log_sync
,
1100 .log_global_start
= vhost_log_global_start
,
1101 .log_global_stop
= vhost_log_global_stop
,
1102 .eventfd_add
= vhost_eventfd_add
,
1103 .eventfd_del
= vhost_eventfd_del
,
1107 if (hdev
->migration_blocker
== NULL
) {
1108 if (!(hdev
->features
& (0x1ULL
<< VHOST_F_LOG_ALL
))) {
1109 error_setg(&hdev
->migration_blocker
,
1110 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1111 } else if (!qemu_memfd_check()) {
1112 error_setg(&hdev
->migration_blocker
,
1113 "Migration disabled: failed to allocate shared memory");
1117 if (hdev
->migration_blocker
!= NULL
) {
1118 migrate_add_blocker(hdev
->migration_blocker
);
1121 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
1122 hdev
->n_mem_sections
= 0;
1123 hdev
->mem_sections
= NULL
;
1126 hdev
->log_enabled
= false;
1127 hdev
->started
= false;
1128 hdev
->memory_changed
= false;
1129 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
1130 QLIST_INSERT_HEAD(&vhost_devices
, hdev
, entry
);
1135 vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
, 0);
1138 hdev
->nvqs
= n_initialized_vqs
;
1139 vhost_dev_cleanup(hdev
);
1143 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
1147 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1148 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1151 /* those are only safe after successful init */
1152 memory_listener_unregister(&hdev
->memory_listener
);
1153 QLIST_REMOVE(hdev
, entry
);
1155 if (hdev
->migration_blocker
) {
1156 migrate_del_blocker(hdev
->migration_blocker
);
1157 error_free(hdev
->migration_blocker
);
1160 g_free(hdev
->mem_sections
);
1161 if (hdev
->vhost_ops
) {
1162 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1166 memset(hdev
, 0, sizeof(struct vhost_dev
));
1169 /* Stop processing guest IO notifications in qemu.
1170 * Start processing them in vhost in kernel.
1172 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1174 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1175 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1176 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1179 if (!k
->ioeventfd_started
) {
1180 error_report("binding does not support host notifiers");
1185 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1186 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1189 error_report("vhost VQ %d notifier binding failed: %d", i
, -r
);
1197 e
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1200 error_report("vhost VQ %d notifier cleanup error: %d", i
, -r
);
1208 /* Stop processing guest IO notifications in vhost.
1209 * Start processing them in qemu.
1210 * This might actually run the qemu handlers right away,
1211 * so virtio in qemu must be completely setup when this is called.
1213 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1215 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1218 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1219 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1222 error_report("vhost VQ %d notifier cleanup failed: %d", i
, -r
);
1228 /* Test and clear event pending status.
1229 * Should be called after unmask to avoid losing events.
1231 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1233 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1234 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1235 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1238 /* Mask/unmask events from this vq. */
1239 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1242 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1243 int r
, index
= n
- hdev
->vq_index
;
1244 struct vhost_vring_file file
;
1246 /* should only be called after backend is connected */
1247 assert(hdev
->vhost_ops
);
1250 assert(vdev
->use_guest_notifier_mask
);
1251 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1253 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1256 file
.index
= hdev
->vhost_ops
->vhost_get_vq_index(hdev
, n
);
1257 r
= hdev
->vhost_ops
->vhost_set_vring_call(hdev
, &file
);
1259 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1263 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1266 const int *bit
= feature_bits
;
1267 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1268 uint64_t bit_mask
= (1ULL << *bit
);
1269 if (!(hdev
->features
& bit_mask
)) {
1270 features
&= ~bit_mask
;
1277 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1280 const int *bit
= feature_bits
;
1281 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1282 uint64_t bit_mask
= (1ULL << *bit
);
1283 if (features
& bit_mask
) {
1284 hdev
->acked_features
|= bit_mask
;
1290 /* Host notifiers must be enabled at this point. */
1291 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1295 /* should only be called after backend is connected */
1296 assert(hdev
->vhost_ops
);
1298 hdev
->started
= true;
1300 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1304 r
= hdev
->vhost_ops
->vhost_set_mem_table(hdev
, hdev
->mem
);
1306 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1310 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1311 r
= vhost_virtqueue_start(hdev
,
1314 hdev
->vq_index
+ i
);
1320 if (hdev
->log_enabled
) {
1323 hdev
->log_size
= vhost_get_log_size(hdev
);
1324 hdev
->log
= vhost_log_get(hdev
->log_size
,
1325 vhost_dev_log_is_shared(hdev
));
1326 log_base
= (uintptr_t)hdev
->log
->log
;
1327 r
= hdev
->vhost_ops
->vhost_set_log_base(hdev
,
1328 hdev
->log_size
? log_base
: 0,
1331 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1339 vhost_log_put(hdev
, false);
1342 vhost_virtqueue_stop(hdev
,
1345 hdev
->vq_index
+ i
);
1351 hdev
->started
= false;
1355 /* Host notifiers must be enabled at this point. */
1356 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1360 /* should only be called after backend is connected */
1361 assert(hdev
->vhost_ops
);
1363 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1364 vhost_virtqueue_stop(hdev
,
1367 hdev
->vq_index
+ i
);
1370 vhost_log_put(hdev
, true);
1371 hdev
->started
= false;
1374 int vhost_net_set_backend(struct vhost_dev
*hdev
,
1375 struct vhost_vring_file
*file
)
1377 if (hdev
->vhost_ops
->vhost_net_set_backend
) {
1378 return hdev
->vhost_ops
->vhost_net_set_backend(hdev
, file
);