4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "hw/virtio/vhost.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22 #include "hw/virtio/virtio-bus.h"
23 #include "migration/migration.h"
25 static struct vhost_log
*vhost_log
;
27 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
28 MemoryRegionSection
*section
,
29 uint64_t mfirst
, uint64_t mlast
,
30 uint64_t rfirst
, uint64_t rlast
)
32 vhost_log_chunk_t
*log
= dev
->log
->log
;
34 uint64_t start
= MAX(mfirst
, rfirst
);
35 uint64_t end
= MIN(mlast
, rlast
);
36 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
37 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
38 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
43 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
44 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
46 for (;from
< to
; ++from
) {
47 vhost_log_chunk_t log
;
48 /* We first check with non-atomic: much cheaper,
49 * and we expect non-dirty to be the common case. */
51 addr
+= VHOST_LOG_CHUNK
;
54 /* Data must be read atomically. We don't really need barrier semantics
55 * but it's easier to use atomic_* than roll our own. */
56 log
= atomic_xchg(from
, 0);
60 hwaddr section_offset
;
62 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
63 section_offset
= page_addr
- section
->offset_within_address_space
;
64 mr_offset
= section_offset
+ section
->offset_within_region
;
65 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
66 log
&= ~(0x1ull
<< bit
);
68 addr
+= VHOST_LOG_CHUNK
;
72 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
73 MemoryRegionSection
*section
,
81 if (!dev
->log_enabled
|| !dev
->started
) {
84 start_addr
= section
->offset_within_address_space
;
85 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
86 start_addr
= MAX(first
, start_addr
);
87 end_addr
= MIN(last
, end_addr
);
89 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
90 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
91 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
93 range_get_last(reg
->guest_phys_addr
,
96 for (i
= 0; i
< dev
->nvqs
; ++i
) {
97 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
98 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
99 range_get_last(vq
->used_phys
, vq
->used_size
));
104 static void vhost_log_sync(MemoryListener
*listener
,
105 MemoryRegionSection
*section
)
107 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
109 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
112 static void vhost_log_sync_range(struct vhost_dev
*dev
,
113 hwaddr first
, hwaddr last
)
116 /* FIXME: this is N^2 in number of sections */
117 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
118 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
119 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
123 /* Assign/unassign. Keep an unsorted array of non-overlapping
124 * memory regions in dev->mem. */
125 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
129 int from
, to
, n
= dev
->mem
->nregions
;
130 /* Track overlapping/split regions for sanity checking. */
131 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
133 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
134 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
139 /* clone old region */
141 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
144 /* No overlap is simple */
145 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
150 /* Split only happens if supplied region
151 * is in the middle of an existing one. Thus it can not
152 * overlap with any other existing region. */
155 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
156 memlast
= range_get_last(start_addr
, size
);
158 /* Remove whole region */
159 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
160 --dev
->mem
->nregions
;
167 if (memlast
>= reglast
) {
168 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
169 assert(reg
->memory_size
);
170 assert(!overlap_end
);
176 if (start_addr
<= reg
->guest_phys_addr
) {
177 change
= memlast
+ 1 - reg
->guest_phys_addr
;
178 reg
->memory_size
-= change
;
179 reg
->guest_phys_addr
+= change
;
180 reg
->userspace_addr
+= change
;
181 assert(reg
->memory_size
);
182 assert(!overlap_start
);
187 /* This only happens if supplied region
188 * is in the middle of an existing one. Thus it can not
189 * overlap with any other existing region. */
190 assert(!overlap_start
);
191 assert(!overlap_end
);
192 assert(!overlap_middle
);
193 /* Split region: shrink first part, shift second part. */
194 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
195 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
196 assert(reg
->memory_size
);
197 change
= memlast
+ 1 - reg
->guest_phys_addr
;
198 reg
= dev
->mem
->regions
+ n
;
199 reg
->memory_size
-= change
;
200 assert(reg
->memory_size
);
201 reg
->guest_phys_addr
+= change
;
202 reg
->userspace_addr
+= change
;
203 /* Never add more than 1 region */
204 assert(dev
->mem
->nregions
== n
);
205 ++dev
->mem
->nregions
;
210 /* Called after unassign, so no regions overlap the given range. */
211 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
217 struct vhost_memory_region
*merged
= NULL
;
218 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
219 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
220 uint64_t prlast
, urlast
;
221 uint64_t pmlast
, umlast
;
224 /* clone old region */
226 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
228 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
229 pmlast
= range_get_last(start_addr
, size
);
230 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
231 umlast
= range_get_last(uaddr
, size
);
233 /* check for overlapping regions: should never happen. */
234 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
235 /* Not an adjacent or overlapping region - do not merge. */
236 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
237 (pmlast
+ 1 != reg
->guest_phys_addr
||
238 umlast
+ 1 != reg
->userspace_addr
)) {
248 u
= MIN(uaddr
, reg
->userspace_addr
);
249 s
= MIN(start_addr
, reg
->guest_phys_addr
);
250 e
= MAX(pmlast
, prlast
);
251 uaddr
= merged
->userspace_addr
= u
;
252 start_addr
= merged
->guest_phys_addr
= s
;
253 size
= merged
->memory_size
= e
- s
+ 1;
254 assert(merged
->memory_size
);
258 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
259 memset(reg
, 0, sizeof *reg
);
260 reg
->memory_size
= size
;
261 assert(reg
->memory_size
);
262 reg
->guest_phys_addr
= start_addr
;
263 reg
->userspace_addr
= uaddr
;
266 assert(to
<= dev
->mem
->nregions
+ 1);
267 dev
->mem
->nregions
= to
;
270 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
272 uint64_t log_size
= 0;
274 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
275 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
276 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
278 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
280 for (i
= 0; i
< dev
->nvqs
; ++i
) {
281 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
282 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
283 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
287 static struct vhost_log
*vhost_log_alloc(uint64_t size
)
289 struct vhost_log
*log
= g_malloc0(sizeof *log
+ size
* sizeof(*(log
->log
)));
297 static struct vhost_log
*vhost_log_get(uint64_t size
)
299 if (!vhost_log
|| vhost_log
->size
!= size
) {
300 vhost_log
= vhost_log_alloc(size
);
308 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
310 struct vhost_log
*log
= dev
->log
;
317 if (log
->refcnt
== 0) {
318 /* Sync only the range covered by the old log */
319 if (dev
->log_size
&& sync
) {
320 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
322 if (vhost_log
== log
) {
329 static inline void vhost_dev_log_resize(struct vhost_dev
* dev
, uint64_t size
)
331 struct vhost_log
*log
= vhost_log_get(size
);
332 uint64_t log_base
= (uintptr_t)log
->log
;
335 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_LOG_BASE
, &log_base
);
337 vhost_log_put(dev
, true);
339 dev
->log_size
= size
;
342 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
349 for (i
= 0; !r
&& i
< dev
->nvqs
; ++i
) {
350 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
354 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
358 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
359 if (!p
|| l
!= vq
->ring_size
) {
360 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
364 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
367 cpu_physical_memory_unmap(p
, l
, 0, 0);
372 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
376 int i
, n
= dev
->mem
->nregions
;
377 for (i
= 0; i
< n
; ++i
) {
378 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
379 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
387 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
392 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
400 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
401 memlast
= range_get_last(start_addr
, size
);
403 /* Need to extend region? */
404 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
407 /* userspace_addr changed? */
408 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
411 static void vhost_set_memory(MemoryListener
*listener
,
412 MemoryRegionSection
*section
,
415 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
417 hwaddr start_addr
= section
->offset_within_address_space
;
418 ram_addr_t size
= int128_get64(section
->size
);
420 memory_region_get_dirty_log_mask(section
->mr
) & ~(1 << DIRTY_MEMORY_MIGRATION
);
421 int s
= offsetof(struct vhost_memory
, regions
) +
422 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
425 dev
->mem
= g_realloc(dev
->mem
, s
);
433 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
434 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
436 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
437 /* Region exists with same address. Nothing to do. */
441 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
442 /* Removing region that we don't access. Nothing to do. */
447 vhost_dev_unassign_memory(dev
, start_addr
, size
);
449 /* Add given mapping, merging adjacent regions if any */
450 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
452 /* Remove old mapping for this memory, if any. */
453 vhost_dev_unassign_memory(dev
, start_addr
, size
);
455 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
456 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
457 dev
->memory_changed
= true;
460 static bool vhost_section(MemoryRegionSection
*section
)
462 return memory_region_is_ram(section
->mr
);
465 static void vhost_begin(MemoryListener
*listener
)
467 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
469 dev
->mem_changed_end_addr
= 0;
470 dev
->mem_changed_start_addr
= -1;
473 static void vhost_commit(MemoryListener
*listener
)
475 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
477 hwaddr start_addr
= 0;
482 if (!dev
->memory_changed
) {
488 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
493 start_addr
= dev
->mem_changed_start_addr
;
494 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
496 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
500 if (!dev
->log_enabled
) {
501 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_MEM_TABLE
, dev
->mem
);
503 dev
->memory_changed
= false;
506 log_size
= vhost_get_log_size(dev
);
507 /* We allocate an extra 4K bytes to log,
508 * to reduce the * number of reallocations. */
509 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
510 /* To log more, must increase log size before table update. */
511 if (dev
->log_size
< log_size
) {
512 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
514 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_MEM_TABLE
, dev
->mem
);
516 /* To log less, can only decrease log size after table update. */
517 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
518 vhost_dev_log_resize(dev
, log_size
);
520 dev
->memory_changed
= false;
523 static void vhost_region_add(MemoryListener
*listener
,
524 MemoryRegionSection
*section
)
526 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
529 if (!vhost_section(section
)) {
533 ++dev
->n_mem_sections
;
534 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
535 dev
->n_mem_sections
);
536 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
537 memory_region_ref(section
->mr
);
538 vhost_set_memory(listener
, section
, true);
541 static void vhost_region_del(MemoryListener
*listener
,
542 MemoryRegionSection
*section
)
544 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
548 if (!vhost_section(section
)) {
552 vhost_set_memory(listener
, section
, false);
553 memory_region_unref(section
->mr
);
554 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
555 if (dev
->mem_sections
[i
].offset_within_address_space
556 == section
->offset_within_address_space
) {
557 --dev
->n_mem_sections
;
558 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
559 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
565 static void vhost_region_nop(MemoryListener
*listener
,
566 MemoryRegionSection
*section
)
570 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
571 struct vhost_virtqueue
*vq
,
572 unsigned idx
, bool enable_log
)
574 struct vhost_vring_addr addr
= {
576 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
577 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
578 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
579 .log_guest_addr
= vq
->used_phys
,
580 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
582 int r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_VRING_ADDR
, &addr
);
589 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
591 uint64_t features
= dev
->acked_features
;
594 features
|= 0x1 << VHOST_F_LOG_ALL
;
596 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_FEATURES
, &features
);
597 return r
< 0 ? -errno
: 0;
600 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
603 r
= vhost_dev_set_features(dev
, enable_log
);
607 for (i
= 0; i
< dev
->nvqs
; ++i
) {
608 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
616 for (; i
>= 0; --i
) {
617 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
621 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
627 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
629 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
632 if (!!enable
== dev
->log_enabled
) {
636 dev
->log_enabled
= enable
;
640 r
= vhost_dev_set_log(dev
, false);
644 vhost_log_put(dev
, false);
648 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
649 r
= vhost_dev_set_log(dev
, true);
654 dev
->log_enabled
= enable
;
658 static void vhost_log_global_start(MemoryListener
*listener
)
662 r
= vhost_migration_log(listener
, true);
668 static void vhost_log_global_stop(MemoryListener
*listener
)
672 r
= vhost_migration_log(listener
, false);
678 static void vhost_log_start(MemoryListener
*listener
,
679 MemoryRegionSection
*section
)
681 /* FIXME: implement */
684 static void vhost_log_stop(MemoryListener
*listener
,
685 MemoryRegionSection
*section
)
687 /* FIXME: implement */
690 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
691 struct VirtIODevice
*vdev
,
692 struct vhost_virtqueue
*vq
,
697 int vhost_vq_index
= idx
- dev
->vq_index
;
698 struct vhost_vring_file file
= {
699 .index
= vhost_vq_index
701 struct vhost_vring_state state
= {
702 .index
= vhost_vq_index
704 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
706 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
708 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
709 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_VRING_NUM
, &state
);
714 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
715 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_VRING_BASE
, &state
);
720 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
721 a
= virtio_queue_get_desc_addr(vdev
, idx
);
722 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
723 if (!vq
->desc
|| l
!= s
) {
725 goto fail_alloc_desc
;
727 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
728 a
= virtio_queue_get_avail_addr(vdev
, idx
);
729 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
730 if (!vq
->avail
|| l
!= s
) {
732 goto fail_alloc_avail
;
734 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
735 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
736 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
737 if (!vq
->used
|| l
!= s
) {
739 goto fail_alloc_used
;
742 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
743 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
744 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
745 if (!vq
->ring
|| l
!= s
) {
747 goto fail_alloc_ring
;
750 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
756 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
757 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_VRING_KICK
, &file
);
763 /* Clear and discard previous events if any. */
764 event_notifier_test_and_clear(&vq
->masked_notifier
);
770 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
773 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
776 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
779 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
785 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
786 struct VirtIODevice
*vdev
,
787 struct vhost_virtqueue
*vq
,
790 struct vhost_vring_state state
= {
791 .index
= idx
- dev
->vq_index
794 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
795 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_GET_VRING_BASE
, &state
);
797 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
800 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
801 virtio_queue_invalidate_signalled_used(vdev
, idx
);
803 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
804 0, virtio_queue_get_ring_size(vdev
, idx
));
805 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
806 1, virtio_queue_get_used_size(vdev
, idx
));
807 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
808 0, virtio_queue_get_avail_size(vdev
, idx
));
809 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
810 0, virtio_queue_get_desc_size(vdev
, idx
));
813 static void vhost_eventfd_add(MemoryListener
*listener
,
814 MemoryRegionSection
*section
,
815 bool match_data
, uint64_t data
, EventNotifier
*e
)
819 static void vhost_eventfd_del(MemoryListener
*listener
,
820 MemoryRegionSection
*section
,
821 bool match_data
, uint64_t data
, EventNotifier
*e
)
825 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
826 struct vhost_virtqueue
*vq
, int n
)
828 struct vhost_vring_file file
= {
831 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
836 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
837 r
= dev
->vhost_ops
->vhost_call(dev
, VHOST_SET_VRING_CALL
, &file
);
844 event_notifier_cleanup(&vq
->masked_notifier
);
848 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
850 event_notifier_cleanup(&vq
->masked_notifier
);
853 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
854 VhostBackendType backend_type
, bool force
)
859 if (vhost_set_backend_type(hdev
, backend_type
) < 0) {
860 close((uintptr_t)opaque
);
864 if (hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
) < 0) {
865 close((uintptr_t)opaque
);
869 r
= hdev
->vhost_ops
->vhost_call(hdev
, VHOST_SET_OWNER
, NULL
);
874 r
= hdev
->vhost_ops
->vhost_call(hdev
, VHOST_GET_FEATURES
, &features
);
879 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
880 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, i
);
885 hdev
->features
= features
;
887 hdev
->memory_listener
= (MemoryListener
) {
888 .begin
= vhost_begin
,
889 .commit
= vhost_commit
,
890 .region_add
= vhost_region_add
,
891 .region_del
= vhost_region_del
,
892 .region_nop
= vhost_region_nop
,
893 .log_start
= vhost_log_start
,
894 .log_stop
= vhost_log_stop
,
895 .log_sync
= vhost_log_sync
,
896 .log_global_start
= vhost_log_global_start
,
897 .log_global_stop
= vhost_log_global_stop
,
898 .eventfd_add
= vhost_eventfd_add
,
899 .eventfd_del
= vhost_eventfd_del
,
902 hdev
->migration_blocker
= NULL
;
903 if (!(hdev
->features
& (0x1 << VHOST_F_LOG_ALL
))) {
904 error_setg(&hdev
->migration_blocker
,
905 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
906 migrate_add_blocker(hdev
->migration_blocker
);
908 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
909 hdev
->n_mem_sections
= 0;
910 hdev
->mem_sections
= NULL
;
913 hdev
->log_enabled
= false;
914 hdev
->started
= false;
915 hdev
->memory_changed
= false;
916 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
921 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
925 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
929 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
932 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
933 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
935 memory_listener_unregister(&hdev
->memory_listener
);
936 if (hdev
->migration_blocker
) {
937 migrate_del_blocker(hdev
->migration_blocker
);
938 error_free(hdev
->migration_blocker
);
941 g_free(hdev
->mem_sections
);
942 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
945 bool vhost_dev_query(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
947 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
948 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
949 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
951 return !k
->query_guest_notifiers
||
952 k
->query_guest_notifiers(qbus
->parent
) ||
956 /* Stop processing guest IO notifications in qemu.
957 * Start processing them in vhost in kernel.
959 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
961 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
962 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
963 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
965 if (!k
->set_host_notifier
) {
966 fprintf(stderr
, "binding does not support host notifiers\n");
971 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
972 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, true);
974 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
982 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
984 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
993 /* Stop processing guest IO notifications in vhost.
994 * Start processing them in qemu.
995 * This might actually run the qemu handlers right away,
996 * so virtio in qemu must be completely setup when this is called.
998 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1000 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1001 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
1002 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
1005 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1006 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
1008 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
1015 /* Test and clear event pending status.
1016 * Should be called after unmask to avoid losing events.
1018 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1020 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1021 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1022 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1025 /* Mask/unmask events from this vq. */
1026 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1029 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1030 int r
, index
= n
- hdev
->vq_index
;
1032 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1034 struct vhost_vring_file file
= {
1038 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1040 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1042 r
= hdev
->vhost_ops
->vhost_call(hdev
, VHOST_SET_VRING_CALL
, &file
);
1046 unsigned vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1049 const int *bit
= feature_bits
;
1050 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1051 unsigned bit_mask
= (1 << *bit
);
1052 if (!(hdev
->features
& bit_mask
)) {
1053 features
&= ~bit_mask
;
1060 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1063 const int *bit
= feature_bits
;
1064 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1065 unsigned bit_mask
= (1 << *bit
);
1066 if (features
& bit_mask
) {
1067 hdev
->acked_features
|= bit_mask
;
1073 /* Host notifiers must be enabled at this point. */
1074 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1078 hdev
->started
= true;
1080 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1084 r
= hdev
->vhost_ops
->vhost_call(hdev
, VHOST_SET_MEM_TABLE
, hdev
->mem
);
1089 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1090 r
= vhost_virtqueue_start(hdev
,
1093 hdev
->vq_index
+ i
);
1099 if (hdev
->log_enabled
) {
1102 hdev
->log_size
= vhost_get_log_size(hdev
);
1103 hdev
->log
= vhost_log_get(hdev
->log_size
);
1104 log_base
= (uintptr_t)hdev
->log
->log
;
1105 r
= hdev
->vhost_ops
->vhost_call(hdev
, VHOST_SET_LOG_BASE
,
1106 hdev
->log_size
? &log_base
: NULL
);
1115 if (hdev
->log_size
) {
1116 vhost_log_put(hdev
, false);
1120 vhost_virtqueue_stop(hdev
,
1123 hdev
->vq_index
+ i
);
1129 hdev
->started
= false;
1133 /* Host notifiers must be enabled at this point. */
1134 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1138 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1139 vhost_virtqueue_stop(hdev
,
1142 hdev
->vq_index
+ i
);
1145 vhost_log_put(hdev
, true);
1146 hdev
->started
= false;