4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/migration.h"
29 #include "sysemu/dma.h"
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37 strerror(errno), errno); } while (0)
39 #define VHOST_OPS_DEBUG(fmt, ...) \
43 static struct vhost_log
*vhost_log
;
44 static struct vhost_log
*vhost_log_shm
;
46 static unsigned int used_memslots
;
47 static QLIST_HEAD(, vhost_dev
) vhost_devices
=
48 QLIST_HEAD_INITIALIZER(vhost_devices
);
50 bool vhost_has_free_slot(void)
52 unsigned int slots_limit
= ~0U;
53 struct vhost_dev
*hdev
;
55 QLIST_FOREACH(hdev
, &vhost_devices
, entry
) {
56 unsigned int r
= hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
);
57 slots_limit
= MIN(slots_limit
, r
);
59 return slots_limit
> used_memslots
;
62 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
63 MemoryRegionSection
*section
,
64 uint64_t mfirst
, uint64_t mlast
,
65 uint64_t rfirst
, uint64_t rlast
)
67 vhost_log_chunk_t
*log
= dev
->log
->log
;
69 uint64_t start
= MAX(mfirst
, rfirst
);
70 uint64_t end
= MIN(mlast
, rlast
);
71 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
72 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
73 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
78 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
79 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
81 for (;from
< to
; ++from
) {
82 vhost_log_chunk_t log
;
83 /* We first check with non-atomic: much cheaper,
84 * and we expect non-dirty to be the common case. */
86 addr
+= VHOST_LOG_CHUNK
;
89 /* Data must be read atomically. We don't really need barrier semantics
90 * but it's easier to use atomic_* than roll our own. */
91 log
= atomic_xchg(from
, 0);
95 hwaddr section_offset
;
97 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
98 section_offset
= page_addr
- section
->offset_within_address_space
;
99 mr_offset
= section_offset
+ section
->offset_within_region
;
100 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
101 log
&= ~(0x1ull
<< bit
);
103 addr
+= VHOST_LOG_CHUNK
;
107 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
108 MemoryRegionSection
*section
,
116 if (!dev
->log_enabled
|| !dev
->started
) {
119 start_addr
= section
->offset_within_address_space
;
120 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
121 start_addr
= MAX(first
, start_addr
);
122 end_addr
= MIN(last
, end_addr
);
124 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
125 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
126 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
127 reg
->guest_phys_addr
,
128 range_get_last(reg
->guest_phys_addr
,
131 for (i
= 0; i
< dev
->nvqs
; ++i
) {
132 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
133 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
134 range_get_last(vq
->used_phys
, vq
->used_size
));
139 static void vhost_log_sync(MemoryListener
*listener
,
140 MemoryRegionSection
*section
)
142 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
144 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
147 static void vhost_log_sync_range(struct vhost_dev
*dev
,
148 hwaddr first
, hwaddr last
)
151 /* FIXME: this is N^2 in number of sections */
152 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
153 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
154 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159 * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
164 int from
, to
, n
= dev
->mem
->nregions
;
165 /* Track overlapping/split regions for sanity checking. */
166 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
168 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
169 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
174 /* clone old region */
176 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
179 /* No overlap is simple */
180 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
185 /* Split only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
190 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
191 memlast
= range_get_last(start_addr
, size
);
193 /* Remove whole region */
194 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
195 --dev
->mem
->nregions
;
202 if (memlast
>= reglast
) {
203 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
204 assert(reg
->memory_size
);
205 assert(!overlap_end
);
211 if (start_addr
<= reg
->guest_phys_addr
) {
212 change
= memlast
+ 1 - reg
->guest_phys_addr
;
213 reg
->memory_size
-= change
;
214 reg
->guest_phys_addr
+= change
;
215 reg
->userspace_addr
+= change
;
216 assert(reg
->memory_size
);
217 assert(!overlap_start
);
222 /* This only happens if supplied region
223 * is in the middle of an existing one. Thus it can not
224 * overlap with any other existing region. */
225 assert(!overlap_start
);
226 assert(!overlap_end
);
227 assert(!overlap_middle
);
228 /* Split region: shrink first part, shift second part. */
229 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
230 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
231 assert(reg
->memory_size
);
232 change
= memlast
+ 1 - reg
->guest_phys_addr
;
233 reg
= dev
->mem
->regions
+ n
;
234 reg
->memory_size
-= change
;
235 assert(reg
->memory_size
);
236 reg
->guest_phys_addr
+= change
;
237 reg
->userspace_addr
+= change
;
238 /* Never add more than 1 region */
239 assert(dev
->mem
->nregions
== n
);
240 ++dev
->mem
->nregions
;
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
252 struct vhost_memory_region
*merged
= NULL
;
253 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
254 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
255 uint64_t prlast
, urlast
;
256 uint64_t pmlast
, umlast
;
259 /* clone old region */
261 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
263 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
264 pmlast
= range_get_last(start_addr
, size
);
265 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
266 umlast
= range_get_last(uaddr
, size
);
268 /* check for overlapping regions: should never happen. */
269 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
270 /* Not an adjacent or overlapping region - do not merge. */
271 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
272 (pmlast
+ 1 != reg
->guest_phys_addr
||
273 umlast
+ 1 != reg
->userspace_addr
)) {
277 if (dev
->vhost_ops
->vhost_backend_can_merge
&&
278 !dev
->vhost_ops
->vhost_backend_can_merge(dev
, uaddr
, size
,
290 u
= MIN(uaddr
, reg
->userspace_addr
);
291 s
= MIN(start_addr
, reg
->guest_phys_addr
);
292 e
= MAX(pmlast
, prlast
);
293 uaddr
= merged
->userspace_addr
= u
;
294 start_addr
= merged
->guest_phys_addr
= s
;
295 size
= merged
->memory_size
= e
- s
+ 1;
296 assert(merged
->memory_size
);
300 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
301 memset(reg
, 0, sizeof *reg
);
302 reg
->memory_size
= size
;
303 assert(reg
->memory_size
);
304 reg
->guest_phys_addr
= start_addr
;
305 reg
->userspace_addr
= uaddr
;
308 assert(to
<= dev
->mem
->nregions
+ 1);
309 dev
->mem
->nregions
= to
;
312 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
314 uint64_t log_size
= 0;
316 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
317 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
318 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
320 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
322 for (i
= 0; i
< dev
->nvqs
; ++i
) {
323 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
324 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
325 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
330 static struct vhost_log
*vhost_log_alloc(uint64_t size
, bool share
)
332 struct vhost_log
*log
;
333 uint64_t logsize
= size
* sizeof(*(log
->log
));
336 log
= g_new0(struct vhost_log
, 1);
338 log
->log
= qemu_memfd_alloc("vhost-log", logsize
,
339 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
341 memset(log
->log
, 0, logsize
);
343 log
->log
= g_malloc0(logsize
);
353 static struct vhost_log
*vhost_log_get(uint64_t size
, bool share
)
355 struct vhost_log
*log
= share
? vhost_log_shm
: vhost_log
;
357 if (!log
|| log
->size
!= size
) {
358 log
= vhost_log_alloc(size
, share
);
371 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
373 struct vhost_log
*log
= dev
->log
;
382 if (log
->refcnt
== 0) {
383 /* Sync only the range covered by the old log */
384 if (dev
->log_size
&& sync
) {
385 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
388 if (vhost_log
== log
) {
391 } else if (vhost_log_shm
== log
) {
392 qemu_memfd_free(log
->log
, log
->size
* sizeof(*(log
->log
)),
394 vhost_log_shm
= NULL
;
401 static bool vhost_dev_log_is_shared(struct vhost_dev
*dev
)
403 return dev
->vhost_ops
->vhost_requires_shm_log
&&
404 dev
->vhost_ops
->vhost_requires_shm_log(dev
);
407 static inline void vhost_dev_log_resize(struct vhost_dev
*dev
, uint64_t size
)
409 struct vhost_log
*log
= vhost_log_get(size
, vhost_dev_log_is_shared(dev
));
410 uint64_t log_base
= (uintptr_t)log
->log
;
413 /* inform backend of log switching, this must be done before
414 releasing the current log, to ensure no logging is lost */
415 r
= dev
->vhost_ops
->vhost_set_log_base(dev
, log_base
, log
);
417 VHOST_OPS_DEBUG("vhost_set_log_base failed");
420 vhost_log_put(dev
, true);
422 dev
->log_size
= size
;
425 static int vhost_dev_has_iommu(struct vhost_dev
*dev
)
427 VirtIODevice
*vdev
= dev
->vdev
;
429 return virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
);
432 static void *vhost_memory_map(struct vhost_dev
*dev
, hwaddr addr
,
433 hwaddr
*plen
, int is_write
)
435 if (!vhost_dev_has_iommu(dev
)) {
436 return cpu_physical_memory_map(addr
, plen
, is_write
);
438 return (void *)(uintptr_t)addr
;
442 static void vhost_memory_unmap(struct vhost_dev
*dev
, void *buffer
,
443 hwaddr len
, int is_write
,
446 if (!vhost_dev_has_iommu(dev
)) {
447 cpu_physical_memory_unmap(buffer
, len
, is_write
, access_len
);
451 static int vhost_verify_ring_part_mapping(struct vhost_dev
*dev
,
462 if (!ranges_overlap(start_addr
, size
, part_addr
, part_size
)) {
466 p
= vhost_memory_map(dev
, part_addr
, &l
, 1);
467 if (!p
|| l
!= part_size
) {
473 vhost_memory_unmap(dev
, p
, l
, 0, 0);
477 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
483 const char *part_name
[] = {
489 for (i
= 0; i
< dev
->nvqs
; ++i
) {
490 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
493 r
= vhost_verify_ring_part_mapping(dev
, vq
->desc
, vq
->desc_phys
,
494 vq
->desc_size
, start_addr
, size
);
500 r
= vhost_verify_ring_part_mapping(dev
, vq
->avail
, vq
->avail_phys
,
501 vq
->avail_size
, start_addr
, size
);
507 r
= vhost_verify_ring_part_mapping(dev
, vq
->used
, vq
->used_phys
,
508 vq
->used_size
, start_addr
, size
);
515 error_report("Unable to map %s for ring %d", part_name
[j
], i
);
516 } else if (r
== -EBUSY
) {
517 error_report("%s relocated for ring %d", part_name
[j
], i
);
522 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
526 int i
, n
= dev
->mem
->nregions
;
527 for (i
= 0; i
< n
; ++i
) {
528 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
529 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
537 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
542 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
550 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
551 memlast
= range_get_last(start_addr
, size
);
553 /* Need to extend region? */
554 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
557 /* userspace_addr changed? */
558 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
561 static void vhost_set_memory(MemoryListener
*listener
,
562 MemoryRegionSection
*section
,
565 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
567 hwaddr start_addr
= section
->offset_within_address_space
;
568 ram_addr_t size
= int128_get64(section
->size
);
570 memory_region_get_dirty_log_mask(section
->mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
571 int s
= offsetof(struct vhost_memory
, regions
) +
572 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
575 dev
->mem
= g_realloc(dev
->mem
, s
);
583 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
584 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
586 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
587 /* Region exists with same address. Nothing to do. */
591 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
592 /* Removing region that we don't access. Nothing to do. */
597 vhost_dev_unassign_memory(dev
, start_addr
, size
);
599 /* Add given mapping, merging adjacent regions if any */
600 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
602 /* Remove old mapping for this memory, if any. */
603 vhost_dev_unassign_memory(dev
, start_addr
, size
);
605 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
606 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
607 dev
->memory_changed
= true;
608 used_memslots
= dev
->mem
->nregions
;
611 static bool vhost_section(MemoryRegionSection
*section
)
613 return memory_region_is_ram(section
->mr
) &&
614 !memory_region_is_rom(section
->mr
);
617 static void vhost_begin(MemoryListener
*listener
)
619 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
621 dev
->mem_changed_end_addr
= 0;
622 dev
->mem_changed_start_addr
= -1;
625 static void vhost_commit(MemoryListener
*listener
)
627 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
629 hwaddr start_addr
= 0;
634 if (!dev
->memory_changed
) {
640 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
645 start_addr
= dev
->mem_changed_start_addr
;
646 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
648 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
652 if (!dev
->log_enabled
) {
653 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
655 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
657 dev
->memory_changed
= false;
660 log_size
= vhost_get_log_size(dev
);
661 /* We allocate an extra 4K bytes to log,
662 * to reduce the * number of reallocations. */
663 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
664 /* To log more, must increase log size before table update. */
665 if (dev
->log_size
< log_size
) {
666 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
668 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
670 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
672 /* To log less, can only decrease log size after table update. */
673 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
674 vhost_dev_log_resize(dev
, log_size
);
676 dev
->memory_changed
= false;
679 static void vhost_region_add(MemoryListener
*listener
,
680 MemoryRegionSection
*section
)
682 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
685 if (!vhost_section(section
)) {
689 ++dev
->n_mem_sections
;
690 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
691 dev
->n_mem_sections
);
692 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
693 memory_region_ref(section
->mr
);
694 vhost_set_memory(listener
, section
, true);
697 static void vhost_region_del(MemoryListener
*listener
,
698 MemoryRegionSection
*section
)
700 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
704 if (!vhost_section(section
)) {
708 vhost_set_memory(listener
, section
, false);
709 memory_region_unref(section
->mr
);
710 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
711 if (dev
->mem_sections
[i
].offset_within_address_space
712 == section
->offset_within_address_space
) {
713 --dev
->n_mem_sections
;
714 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
715 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
721 static void vhost_iommu_unmap_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
723 struct vhost_iommu
*iommu
= container_of(n
, struct vhost_iommu
, n
);
724 struct vhost_dev
*hdev
= iommu
->hdev
;
725 hwaddr iova
= iotlb
->iova
+ iommu
->iommu_offset
;
727 if (hdev
->vhost_ops
->vhost_invalidate_device_iotlb(hdev
, iova
,
728 iotlb
->addr_mask
+ 1)) {
729 error_report("Fail to invalidate device iotlb");
733 static void vhost_iommu_region_add(MemoryListener
*listener
,
734 MemoryRegionSection
*section
)
736 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
738 struct vhost_iommu
*iommu
;
740 if (!memory_region_is_iommu(section
->mr
)) {
744 iommu
= g_malloc0(sizeof(*iommu
));
745 iommu
->n
.notify
= vhost_iommu_unmap_notify
;
746 iommu
->n
.notifier_flags
= IOMMU_NOTIFIER_UNMAP
;
747 iommu
->mr
= section
->mr
;
748 iommu
->iommu_offset
= section
->offset_within_address_space
-
749 section
->offset_within_region
;
751 memory_region_register_iommu_notifier(section
->mr
, &iommu
->n
);
752 QLIST_INSERT_HEAD(&dev
->iommu_list
, iommu
, iommu_next
);
753 /* TODO: can replay help performance here? */
756 static void vhost_iommu_region_del(MemoryListener
*listener
,
757 MemoryRegionSection
*section
)
759 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
761 struct vhost_iommu
*iommu
;
763 if (!memory_region_is_iommu(section
->mr
)) {
767 QLIST_FOREACH(iommu
, &dev
->iommu_list
, iommu_next
) {
768 if (iommu
->mr
== section
->mr
) {
769 memory_region_unregister_iommu_notifier(iommu
->mr
,
771 QLIST_REMOVE(iommu
, iommu_next
);
778 static void vhost_region_nop(MemoryListener
*listener
,
779 MemoryRegionSection
*section
)
783 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
784 struct vhost_virtqueue
*vq
,
785 unsigned idx
, bool enable_log
)
787 struct vhost_vring_addr addr
= {
789 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
790 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
791 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
792 .log_guest_addr
= vq
->used_phys
,
793 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
795 int r
= dev
->vhost_ops
->vhost_set_vring_addr(dev
, &addr
);
797 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
803 static int vhost_dev_set_features(struct vhost_dev
*dev
,
806 uint64_t features
= dev
->acked_features
;
809 features
|= 0x1ULL
<< VHOST_F_LOG_ALL
;
811 r
= dev
->vhost_ops
->vhost_set_features(dev
, features
);
813 VHOST_OPS_DEBUG("vhost_set_features failed");
815 return r
< 0 ? -errno
: 0;
818 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
821 r
= vhost_dev_set_features(dev
, enable_log
);
825 for (i
= 0; i
< dev
->nvqs
; ++i
) {
826 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
827 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
835 for (; i
>= 0; --i
) {
836 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
837 vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
840 vhost_dev_set_features(dev
, dev
->log_enabled
);
845 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
847 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
850 if (!!enable
== dev
->log_enabled
) {
854 dev
->log_enabled
= enable
;
858 r
= vhost_dev_set_log(dev
, false);
862 vhost_log_put(dev
, false);
864 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
865 r
= vhost_dev_set_log(dev
, true);
870 dev
->log_enabled
= enable
;
874 static void vhost_log_global_start(MemoryListener
*listener
)
878 r
= vhost_migration_log(listener
, true);
884 static void vhost_log_global_stop(MemoryListener
*listener
)
888 r
= vhost_migration_log(listener
, false);
894 static void vhost_log_start(MemoryListener
*listener
,
895 MemoryRegionSection
*section
,
898 /* FIXME: implement */
901 static void vhost_log_stop(MemoryListener
*listener
,
902 MemoryRegionSection
*section
,
905 /* FIXME: implement */
908 /* The vhost driver natively knows how to handle the vrings of non
909 * cross-endian legacy devices and modern devices. Only legacy devices
910 * exposed to a bi-endian guest may require the vhost driver to use a
911 * specific endianness.
913 static inline bool vhost_needs_vring_endian(VirtIODevice
*vdev
)
915 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
918 #ifdef HOST_WORDS_BIGENDIAN
919 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_LITTLE
;
921 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_BIG
;
925 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev
*dev
,
929 struct vhost_vring_state s
= {
930 .index
= vhost_vq_index
,
934 if (!dev
->vhost_ops
->vhost_set_vring_endian(dev
, &s
)) {
938 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
939 if (errno
== ENOTTY
) {
940 error_report("vhost does not support cross-endian");
947 static int vhost_memory_region_lookup(struct vhost_dev
*hdev
,
948 uint64_t gpa
, uint64_t *uaddr
,
953 for (i
= 0; i
< hdev
->mem
->nregions
; i
++) {
954 struct vhost_memory_region
*reg
= hdev
->mem
->regions
+ i
;
956 if (gpa
>= reg
->guest_phys_addr
&&
957 reg
->guest_phys_addr
+ reg
->memory_size
> gpa
) {
958 *uaddr
= reg
->userspace_addr
+ gpa
- reg
->guest_phys_addr
;
959 *len
= reg
->guest_phys_addr
+ reg
->memory_size
- gpa
;
967 void vhost_device_iotlb_miss(struct vhost_dev
*dev
, uint64_t iova
, int write
)
974 iotlb
= address_space_get_iotlb_entry(dev
->vdev
->dma_as
,
976 if (iotlb
.target_as
!= NULL
) {
977 if (vhost_memory_region_lookup(dev
, iotlb
.translated_addr
,
979 error_report("Fail to lookup the translated address "
980 "%"PRIx64
, iotlb
.translated_addr
);
984 len
= MIN(iotlb
.addr_mask
+ 1, len
);
985 iova
= iova
& ~iotlb
.addr_mask
;
987 if (dev
->vhost_ops
->vhost_update_device_iotlb(dev
, iova
, uaddr
,
989 error_report("Fail to update device iotlb");
997 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
998 struct VirtIODevice
*vdev
,
999 struct vhost_virtqueue
*vq
,
1002 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1003 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1004 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1007 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
1008 struct vhost_vring_file file
= {
1009 .index
= vhost_vq_index
1011 struct vhost_vring_state state
= {
1012 .index
= vhost_vq_index
1014 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
1017 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
1018 r
= dev
->vhost_ops
->vhost_set_vring_num(dev
, &state
);
1020 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1024 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
1025 r
= dev
->vhost_ops
->vhost_set_vring_base(dev
, &state
);
1027 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1031 if (vhost_needs_vring_endian(vdev
)) {
1032 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
1033 virtio_is_big_endian(vdev
),
1040 vq
->desc_size
= s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
1041 vq
->desc_phys
= a
= virtio_queue_get_desc_addr(vdev
, idx
);
1042 vq
->desc
= vhost_memory_map(dev
, a
, &l
, 0);
1043 if (!vq
->desc
|| l
!= s
) {
1045 goto fail_alloc_desc
;
1047 vq
->avail_size
= s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
1048 vq
->avail_phys
= a
= virtio_queue_get_avail_addr(vdev
, idx
);
1049 vq
->avail
= vhost_memory_map(dev
, a
, &l
, 0);
1050 if (!vq
->avail
|| l
!= s
) {
1052 goto fail_alloc_avail
;
1054 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
1055 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
1056 vq
->used
= vhost_memory_map(dev
, a
, &l
, 1);
1057 if (!vq
->used
|| l
!= s
) {
1059 goto fail_alloc_used
;
1062 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
1068 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
1069 r
= dev
->vhost_ops
->vhost_set_vring_kick(dev
, &file
);
1071 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1076 /* Clear and discard previous events if any. */
1077 event_notifier_test_and_clear(&vq
->masked_notifier
);
1079 /* Init vring in unmasked state, unless guest_notifier_mask
1082 if (!vdev
->use_guest_notifier_mask
) {
1083 /* TODO: check and handle errors. */
1084 vhost_virtqueue_mask(dev
, vdev
, idx
, false);
1087 if (k
->query_guest_notifiers
&&
1088 k
->query_guest_notifiers(qbus
->parent
) &&
1089 virtio_queue_vector(vdev
, idx
) == VIRTIO_NO_VECTOR
) {
1091 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
1102 vhost_memory_unmap(dev
, vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
1105 vhost_memory_unmap(dev
, vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
1108 vhost_memory_unmap(dev
, vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
1114 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
1115 struct VirtIODevice
*vdev
,
1116 struct vhost_virtqueue
*vq
,
1119 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
1120 struct vhost_vring_state state
= {
1121 .index
= vhost_vq_index
,
1125 r
= dev
->vhost_ops
->vhost_get_vring_base(dev
, &state
);
1127 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx
, r
);
1129 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
1131 virtio_queue_invalidate_signalled_used(vdev
, idx
);
1132 virtio_queue_update_used_idx(vdev
, idx
);
1134 /* In the cross-endian case, we need to reset the vring endianness to
1135 * native as legacy devices expect so by default.
1137 if (vhost_needs_vring_endian(vdev
)) {
1138 vhost_virtqueue_set_vring_endian_legacy(dev
,
1139 !virtio_is_big_endian(vdev
),
1143 vhost_memory_unmap(dev
, vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
1144 1, virtio_queue_get_used_size(vdev
, idx
));
1145 vhost_memory_unmap(dev
, vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
1146 0, virtio_queue_get_avail_size(vdev
, idx
));
1147 vhost_memory_unmap(dev
, vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
1148 0, virtio_queue_get_desc_size(vdev
, idx
));
1151 static void vhost_eventfd_add(MemoryListener
*listener
,
1152 MemoryRegionSection
*section
,
1153 bool match_data
, uint64_t data
, EventNotifier
*e
)
1157 static void vhost_eventfd_del(MemoryListener
*listener
,
1158 MemoryRegionSection
*section
,
1159 bool match_data
, uint64_t data
, EventNotifier
*e
)
1163 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev
*dev
,
1164 int n
, uint32_t timeout
)
1166 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1167 struct vhost_vring_state state
= {
1168 .index
= vhost_vq_index
,
1173 if (!dev
->vhost_ops
->vhost_set_vring_busyloop_timeout
) {
1177 r
= dev
->vhost_ops
->vhost_set_vring_busyloop_timeout(dev
, &state
);
1179 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1186 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
1187 struct vhost_virtqueue
*vq
, int n
)
1189 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1190 struct vhost_vring_file file
= {
1191 .index
= vhost_vq_index
,
1193 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
1198 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
1199 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
1201 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1210 event_notifier_cleanup(&vq
->masked_notifier
);
1214 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
1216 event_notifier_cleanup(&vq
->masked_notifier
);
1219 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
1220 VhostBackendType backend_type
, uint32_t busyloop_timeout
)
1223 int i
, r
, n_initialized_vqs
= 0;
1224 Error
*local_err
= NULL
;
1227 hdev
->migration_blocker
= NULL
;
1229 r
= vhost_set_backend_type(hdev
, backend_type
);
1232 r
= hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
);
1237 if (used_memslots
> hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
)) {
1238 error_report("vhost backend memory slots limit is less"
1239 " than current number of present memory slots");
1244 r
= hdev
->vhost_ops
->vhost_set_owner(hdev
);
1246 VHOST_OPS_DEBUG("vhost_set_owner failed");
1250 r
= hdev
->vhost_ops
->vhost_get_features(hdev
, &features
);
1252 VHOST_OPS_DEBUG("vhost_get_features failed");
1256 for (i
= 0; i
< hdev
->nvqs
; ++i
, ++n_initialized_vqs
) {
1257 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, hdev
->vq_index
+ i
);
1263 if (busyloop_timeout
) {
1264 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1265 r
= vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
,
1273 hdev
->features
= features
;
1275 hdev
->memory_listener
= (MemoryListener
) {
1276 .begin
= vhost_begin
,
1277 .commit
= vhost_commit
,
1278 .region_add
= vhost_region_add
,
1279 .region_del
= vhost_region_del
,
1280 .region_nop
= vhost_region_nop
,
1281 .log_start
= vhost_log_start
,
1282 .log_stop
= vhost_log_stop
,
1283 .log_sync
= vhost_log_sync
,
1284 .log_global_start
= vhost_log_global_start
,
1285 .log_global_stop
= vhost_log_global_stop
,
1286 .eventfd_add
= vhost_eventfd_add
,
1287 .eventfd_del
= vhost_eventfd_del
,
1291 hdev
->iommu_listener
= (MemoryListener
) {
1292 .region_add
= vhost_iommu_region_add
,
1293 .region_del
= vhost_iommu_region_del
,
1296 if (hdev
->migration_blocker
== NULL
) {
1297 if (!(hdev
->features
& (0x1ULL
<< VHOST_F_LOG_ALL
))) {
1298 error_setg(&hdev
->migration_blocker
,
1299 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1300 } else if (vhost_dev_log_is_shared(hdev
) && !qemu_memfd_check()) {
1301 error_setg(&hdev
->migration_blocker
,
1302 "Migration disabled: failed to allocate shared memory");
1306 if (hdev
->migration_blocker
!= NULL
) {
1307 r
= migrate_add_blocker(hdev
->migration_blocker
, &local_err
);
1309 error_report_err(local_err
);
1310 error_free(hdev
->migration_blocker
);
1315 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
1316 hdev
->n_mem_sections
= 0;
1317 hdev
->mem_sections
= NULL
;
1320 hdev
->log_enabled
= false;
1321 hdev
->started
= false;
1322 hdev
->memory_changed
= false;
1323 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
1324 QLIST_INSERT_HEAD(&vhost_devices
, hdev
, entry
);
1329 vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
, 0);
1332 hdev
->nvqs
= n_initialized_vqs
;
1333 vhost_dev_cleanup(hdev
);
1337 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
1341 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1342 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1345 /* those are only safe after successful init */
1346 memory_listener_unregister(&hdev
->memory_listener
);
1347 QLIST_REMOVE(hdev
, entry
);
1349 if (hdev
->migration_blocker
) {
1350 migrate_del_blocker(hdev
->migration_blocker
);
1351 error_free(hdev
->migration_blocker
);
1354 g_free(hdev
->mem_sections
);
1355 if (hdev
->vhost_ops
) {
1356 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1360 memset(hdev
, 0, sizeof(struct vhost_dev
));
1363 /* Stop processing guest IO notifications in qemu.
1364 * Start processing them in vhost in kernel.
1366 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1368 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1371 /* We will pass the notifiers to the kernel, make sure that QEMU
1372 * doesn't interfere.
1374 r
= virtio_device_grab_ioeventfd(vdev
);
1376 error_report("binding does not support host notifiers");
1380 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1381 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1384 error_report("vhost VQ %d notifier binding failed: %d", i
, -r
);
1392 e
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1395 error_report("vhost VQ %d notifier cleanup error: %d", i
, -r
);
1399 virtio_device_release_ioeventfd(vdev
);
1404 /* Stop processing guest IO notifications in vhost.
1405 * Start processing them in qemu.
1406 * This might actually run the qemu handlers right away,
1407 * so virtio in qemu must be completely setup when this is called.
1409 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1411 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1414 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1415 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1418 error_report("vhost VQ %d notifier cleanup failed: %d", i
, -r
);
1422 virtio_device_release_ioeventfd(vdev
);
1425 /* Test and clear event pending status.
1426 * Should be called after unmask to avoid losing events.
1428 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1430 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1431 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1432 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1435 /* Mask/unmask events from this vq. */
1436 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1439 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1440 int r
, index
= n
- hdev
->vq_index
;
1441 struct vhost_vring_file file
;
1443 /* should only be called after backend is connected */
1444 assert(hdev
->vhost_ops
);
1447 assert(vdev
->use_guest_notifier_mask
);
1448 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1450 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1453 file
.index
= hdev
->vhost_ops
->vhost_get_vq_index(hdev
, n
);
1454 r
= hdev
->vhost_ops
->vhost_set_vring_call(hdev
, &file
);
1456 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1460 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1463 const int *bit
= feature_bits
;
1464 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1465 uint64_t bit_mask
= (1ULL << *bit
);
1466 if (!(hdev
->features
& bit_mask
)) {
1467 features
&= ~bit_mask
;
1474 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1477 const int *bit
= feature_bits
;
1478 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1479 uint64_t bit_mask
= (1ULL << *bit
);
1480 if (features
& bit_mask
) {
1481 hdev
->acked_features
|= bit_mask
;
1487 /* Host notifiers must be enabled at this point. */
1488 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1492 /* should only be called after backend is connected */
1493 assert(hdev
->vhost_ops
);
1495 hdev
->started
= true;
1498 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1503 if (vhost_dev_has_iommu(hdev
)) {
1504 memory_listener_register(&hdev
->iommu_listener
, vdev
->dma_as
);
1507 r
= hdev
->vhost_ops
->vhost_set_mem_table(hdev
, hdev
->mem
);
1509 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1513 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1514 r
= vhost_virtqueue_start(hdev
,
1517 hdev
->vq_index
+ i
);
1523 if (hdev
->log_enabled
) {
1526 hdev
->log_size
= vhost_get_log_size(hdev
);
1527 hdev
->log
= vhost_log_get(hdev
->log_size
,
1528 vhost_dev_log_is_shared(hdev
));
1529 log_base
= (uintptr_t)hdev
->log
->log
;
1530 r
= hdev
->vhost_ops
->vhost_set_log_base(hdev
,
1531 hdev
->log_size
? log_base
: 0,
1534 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1540 if (vhost_dev_has_iommu(hdev
)) {
1541 hdev
->vhost_ops
->vhost_set_iotlb_callback(hdev
, true);
1543 /* Update used ring information for IOTLB to work correctly,
1544 * vhost-kernel code requires for this.*/
1545 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1546 struct vhost_virtqueue
*vq
= hdev
->vqs
+ i
;
1547 vhost_device_iotlb_miss(hdev
, vq
->used_phys
, true);
1552 vhost_log_put(hdev
, false);
1555 vhost_virtqueue_stop(hdev
,
1558 hdev
->vq_index
+ i
);
1565 hdev
->started
= false;
1569 /* Host notifiers must be enabled at this point. */
1570 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1574 /* should only be called after backend is connected */
1575 assert(hdev
->vhost_ops
);
1577 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1578 vhost_virtqueue_stop(hdev
,
1581 hdev
->vq_index
+ i
);
1584 if (vhost_dev_has_iommu(hdev
)) {
1585 hdev
->vhost_ops
->vhost_set_iotlb_callback(hdev
, false);
1586 memory_listener_unregister(&hdev
->iommu_listener
);
1588 vhost_log_put(hdev
, true);
1589 hdev
->started
= false;
1593 int vhost_net_set_backend(struct vhost_dev
*hdev
,
1594 struct vhost_vring_file
*file
)
1596 if (hdev
->vhost_ops
->vhost_net_set_backend
) {
1597 return hdev
->vhost_ops
->vhost_net_set_backend(hdev
, file
);