4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include <sys/ioctl.h>
20 #include <linux/vhost.h>
21 #include "exec-memory.h"
23 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
24 MemoryRegionSection
*section
,
25 uint64_t mfirst
, uint64_t mlast
,
26 uint64_t rfirst
, uint64_t rlast
)
28 uint64_t start
= MAX(mfirst
, rfirst
);
29 uint64_t end
= MIN(mlast
, rlast
);
30 vhost_log_chunk_t
*from
= dev
->log
+ start
/ VHOST_LOG_CHUNK
;
31 vhost_log_chunk_t
*to
= dev
->log
+ end
/ VHOST_LOG_CHUNK
+ 1;
32 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
34 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
35 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
39 for (;from
< to
; ++from
) {
40 vhost_log_chunk_t log
;
42 /* We first check with non-atomic: much cheaper,
43 * and we expect non-dirty to be the common case. */
45 addr
+= VHOST_LOG_CHUNK
;
48 /* Data must be read atomically. We don't really
49 * need the barrier semantics of __sync
50 * builtins, but it's easier to use them than
52 log
= __sync_fetch_and_and(from
, 0);
53 while ((bit
= sizeof(log
) > sizeof(int) ?
54 ffsll(log
) : ffs(log
))) {
57 ram_addr
= section
->offset_within_region
+ bit
* VHOST_LOG_PAGE
;
58 memory_region_set_dirty(section
->mr
, ram_addr
, VHOST_LOG_PAGE
);
59 log
&= ~(0x1ull
<< bit
);
61 addr
+= VHOST_LOG_CHUNK
;
65 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
66 MemoryRegionSection
*section
,
67 target_phys_addr_t start_addr
,
68 target_phys_addr_t end_addr
)
72 if (!dev
->log_enabled
|| !dev
->started
) {
75 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
76 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
77 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
79 range_get_last(reg
->guest_phys_addr
,
82 for (i
= 0; i
< dev
->nvqs
; ++i
) {
83 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
84 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
85 range_get_last(vq
->used_phys
, vq
->used_size
));
90 static void vhost_log_sync(MemoryListener
*listener
,
91 MemoryRegionSection
*section
)
93 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
95 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
96 target_phys_addr_t end_addr
= start_addr
+ section
->size
;
98 vhost_sync_dirty_bitmap(dev
, section
, start_addr
, end_addr
);
101 /* Assign/unassign. Keep an unsorted array of non-overlapping
102 * memory regions in dev->mem. */
103 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
107 int from
, to
, n
= dev
->mem
->nregions
;
108 /* Track overlapping/split regions for sanity checking. */
109 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
111 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
112 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
117 /* clone old region */
119 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
122 /* No overlap is simple */
123 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
128 /* Split only happens if supplied region
129 * is in the middle of an existing one. Thus it can not
130 * overlap with any other existing region. */
133 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
134 memlast
= range_get_last(start_addr
, size
);
136 /* Remove whole region */
137 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
138 --dev
->mem
->nregions
;
145 if (memlast
>= reglast
) {
146 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
147 assert(reg
->memory_size
);
148 assert(!overlap_end
);
154 if (start_addr
<= reg
->guest_phys_addr
) {
155 change
= memlast
+ 1 - reg
->guest_phys_addr
;
156 reg
->memory_size
-= change
;
157 reg
->guest_phys_addr
+= change
;
158 reg
->userspace_addr
+= change
;
159 assert(reg
->memory_size
);
160 assert(!overlap_start
);
165 /* This only happens if supplied region
166 * is in the middle of an existing one. Thus it can not
167 * overlap with any other existing region. */
168 assert(!overlap_start
);
169 assert(!overlap_end
);
170 assert(!overlap_middle
);
171 /* Split region: shrink first part, shift second part. */
172 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
173 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
174 assert(reg
->memory_size
);
175 change
= memlast
+ 1 - reg
->guest_phys_addr
;
176 reg
= dev
->mem
->regions
+ n
;
177 reg
->memory_size
-= change
;
178 assert(reg
->memory_size
);
179 reg
->guest_phys_addr
+= change
;
180 reg
->userspace_addr
+= change
;
181 /* Never add more than 1 region */
182 assert(dev
->mem
->nregions
== n
);
183 ++dev
->mem
->nregions
;
188 /* Called after unassign, so no regions overlap the given range. */
189 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
195 struct vhost_memory_region
*merged
= NULL
;
196 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
197 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
198 uint64_t prlast
, urlast
;
199 uint64_t pmlast
, umlast
;
202 /* clone old region */
204 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
206 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
207 pmlast
= range_get_last(start_addr
, size
);
208 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
209 umlast
= range_get_last(uaddr
, size
);
211 /* check for overlapping regions: should never happen. */
212 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
213 /* Not an adjacent or overlapping region - do not merge. */
214 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
215 (pmlast
+ 1 != reg
->guest_phys_addr
||
216 umlast
+ 1 != reg
->userspace_addr
)) {
226 u
= MIN(uaddr
, reg
->userspace_addr
);
227 s
= MIN(start_addr
, reg
->guest_phys_addr
);
228 e
= MAX(pmlast
, prlast
);
229 uaddr
= merged
->userspace_addr
= u
;
230 start_addr
= merged
->guest_phys_addr
= s
;
231 size
= merged
->memory_size
= e
- s
+ 1;
232 assert(merged
->memory_size
);
236 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
237 memset(reg
, 0, sizeof *reg
);
238 reg
->memory_size
= size
;
239 assert(reg
->memory_size
);
240 reg
->guest_phys_addr
= start_addr
;
241 reg
->userspace_addr
= uaddr
;
244 assert(to
<= dev
->mem
->nregions
+ 1);
245 dev
->mem
->nregions
= to
;
248 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
250 uint64_t log_size
= 0;
252 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
253 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
254 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
256 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
258 for (i
= 0; i
< dev
->nvqs
; ++i
) {
259 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
260 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
261 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
266 static inline void vhost_dev_log_resize(struct vhost_dev
* dev
, uint64_t size
)
268 vhost_log_chunk_t
*log
;
272 log
= g_malloc0(size
* sizeof *log
);
276 log_base
= (uint64_t)(unsigned long)log
;
277 r
= ioctl(dev
->control
, VHOST_SET_LOG_BASE
, &log_base
);
279 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
280 vhost_sync_dirty_bitmap(dev
, &dev
->mem_sections
[i
],
281 0, (target_phys_addr_t
)~0x0ull
);
287 dev
->log_size
= size
;
290 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
295 for (i
= 0; i
< dev
->nvqs
; ++i
) {
296 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
297 target_phys_addr_t l
;
300 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
304 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
305 if (!p
|| l
!= vq
->ring_size
) {
306 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
310 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
313 cpu_physical_memory_unmap(p
, l
, 0, 0);
318 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
322 int i
, n
= dev
->mem
->nregions
;
323 for (i
= 0; i
< n
; ++i
) {
324 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
325 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
333 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
338 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
346 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
347 memlast
= range_get_last(start_addr
, size
);
349 /* Need to extend region? */
350 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
353 /* userspace_addr changed? */
354 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
357 static void vhost_set_memory(MemoryListener
*listener
,
358 MemoryRegionSection
*section
,
361 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
363 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
364 ram_addr_t size
= section
->size
;
365 bool log_dirty
= memory_region_is_logging(section
->mr
);
366 int s
= offsetof(struct vhost_memory
, regions
) +
367 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
372 dev
->mem
= g_realloc(dev
->mem
, s
);
380 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
381 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
383 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
384 /* Region exists with same address. Nothing to do. */
388 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
389 /* Removing region that we don't access. Nothing to do. */
394 vhost_dev_unassign_memory(dev
, start_addr
, size
);
396 /* Add given mapping, merging adjacent regions if any */
397 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
399 /* Remove old mapping for this memory, if any. */
400 vhost_dev_unassign_memory(dev
, start_addr
, size
);
408 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
412 if (!dev
->log_enabled
) {
413 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
417 log_size
= vhost_get_log_size(dev
);
418 /* We allocate an extra 4K bytes to log,
419 * to reduce the * number of reallocations. */
420 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
421 /* To log more, must increase log size before table update. */
422 if (dev
->log_size
< log_size
) {
423 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
425 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
427 /* To log less, can only decrease log size after table update. */
428 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
429 vhost_dev_log_resize(dev
, log_size
);
433 static bool vhost_section(MemoryRegionSection
*section
)
435 return section
->address_space
== get_system_memory()
436 && memory_region_is_ram(section
->mr
);
439 static void vhost_begin(MemoryListener
*listener
)
443 static void vhost_commit(MemoryListener
*listener
)
447 static void vhost_region_add(MemoryListener
*listener
,
448 MemoryRegionSection
*section
)
450 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
453 if (!vhost_section(section
)) {
457 ++dev
->n_mem_sections
;
458 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
459 dev
->n_mem_sections
);
460 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
461 vhost_set_memory(listener
, section
, true);
464 static void vhost_region_del(MemoryListener
*listener
,
465 MemoryRegionSection
*section
)
467 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
471 if (!vhost_section(section
)) {
475 vhost_set_memory(listener
, section
, false);
476 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
477 if (dev
->mem_sections
[i
].offset_within_address_space
478 == section
->offset_within_address_space
) {
479 --dev
->n_mem_sections
;
480 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
481 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
487 static void vhost_region_nop(MemoryListener
*listener
,
488 MemoryRegionSection
*section
)
492 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
493 struct vhost_virtqueue
*vq
,
494 unsigned idx
, bool enable_log
)
496 struct vhost_vring_addr addr
= {
498 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
499 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
500 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
501 .log_guest_addr
= vq
->used_phys
,
502 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
504 int r
= ioctl(dev
->control
, VHOST_SET_VRING_ADDR
, &addr
);
511 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
513 uint64_t features
= dev
->acked_features
;
516 features
|= 0x1 << VHOST_F_LOG_ALL
;
518 r
= ioctl(dev
->control
, VHOST_SET_FEATURES
, &features
);
519 return r
< 0 ? -errno
: 0;
522 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
525 r
= vhost_dev_set_features(dev
, enable_log
);
529 for (i
= 0; i
< dev
->nvqs
; ++i
) {
530 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
538 for (; i
>= 0; --i
) {
539 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
543 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
549 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
551 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
554 if (!!enable
== dev
->log_enabled
) {
558 dev
->log_enabled
= enable
;
562 r
= vhost_dev_set_log(dev
, false);
572 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
573 r
= vhost_dev_set_log(dev
, true);
578 dev
->log_enabled
= enable
;
582 static void vhost_log_global_start(MemoryListener
*listener
)
586 r
= vhost_migration_log(listener
, true);
592 static void vhost_log_global_stop(MemoryListener
*listener
)
596 r
= vhost_migration_log(listener
, false);
602 static void vhost_log_start(MemoryListener
*listener
,
603 MemoryRegionSection
*section
)
605 /* FIXME: implement */
608 static void vhost_log_stop(MemoryListener
*listener
,
609 MemoryRegionSection
*section
)
611 /* FIXME: implement */
614 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
615 struct VirtIODevice
*vdev
,
616 struct vhost_virtqueue
*vq
,
619 target_phys_addr_t s
, l
, a
;
621 struct vhost_vring_file file
= {
624 struct vhost_vring_state state
= {
627 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
629 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
630 r
= ioctl(dev
->control
, VHOST_SET_VRING_NUM
, &state
);
635 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
636 r
= ioctl(dev
->control
, VHOST_SET_VRING_BASE
, &state
);
641 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
642 a
= virtio_queue_get_desc_addr(vdev
, idx
);
643 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
644 if (!vq
->desc
|| l
!= s
) {
646 goto fail_alloc_desc
;
648 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
649 a
= virtio_queue_get_avail_addr(vdev
, idx
);
650 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
651 if (!vq
->avail
|| l
!= s
) {
653 goto fail_alloc_avail
;
655 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
656 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
657 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
658 if (!vq
->used
|| l
!= s
) {
660 goto fail_alloc_used
;
663 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
664 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
665 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
666 if (!vq
->ring
|| l
!= s
) {
668 goto fail_alloc_ring
;
671 r
= vhost_virtqueue_set_addr(dev
, vq
, idx
, dev
->log_enabled
);
676 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
677 r
= ioctl(dev
->control
, VHOST_SET_VRING_KICK
, &file
);
683 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
684 r
= ioctl(dev
->control
, VHOST_SET_VRING_CALL
, &file
);
695 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
698 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
701 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
704 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
710 static void vhost_virtqueue_cleanup(struct vhost_dev
*dev
,
711 struct VirtIODevice
*vdev
,
712 struct vhost_virtqueue
*vq
,
715 struct vhost_vring_state state
= {
719 r
= ioctl(dev
->control
, VHOST_GET_VRING_BASE
, &state
);
721 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
724 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
726 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
727 0, virtio_queue_get_ring_size(vdev
, idx
));
728 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
729 1, virtio_queue_get_used_size(vdev
, idx
));
730 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
731 0, virtio_queue_get_avail_size(vdev
, idx
));
732 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
733 0, virtio_queue_get_desc_size(vdev
, idx
));
736 static void vhost_eventfd_add(MemoryListener
*listener
,
737 MemoryRegionSection
*section
,
738 bool match_data
, uint64_t data
, int fd
)
742 static void vhost_eventfd_del(MemoryListener
*listener
,
743 MemoryRegionSection
*section
,
744 bool match_data
, uint64_t data
, int fd
)
748 int vhost_dev_init(struct vhost_dev
*hdev
, int devfd
, bool force
)
753 hdev
->control
= devfd
;
755 hdev
->control
= open("/dev/vhost-net", O_RDWR
);
756 if (hdev
->control
< 0) {
760 r
= ioctl(hdev
->control
, VHOST_SET_OWNER
, NULL
);
765 r
= ioctl(hdev
->control
, VHOST_GET_FEATURES
, &features
);
769 hdev
->features
= features
;
771 hdev
->memory_listener
= (MemoryListener
) {
772 .begin
= vhost_begin
,
773 .commit
= vhost_commit
,
774 .region_add
= vhost_region_add
,
775 .region_del
= vhost_region_del
,
776 .region_nop
= vhost_region_nop
,
777 .log_start
= vhost_log_start
,
778 .log_stop
= vhost_log_stop
,
779 .log_sync
= vhost_log_sync
,
780 .log_global_start
= vhost_log_global_start
,
781 .log_global_stop
= vhost_log_global_stop
,
782 .eventfd_add
= vhost_eventfd_add
,
783 .eventfd_del
= vhost_eventfd_del
,
786 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
787 hdev
->n_mem_sections
= 0;
788 hdev
->mem_sections
= NULL
;
791 hdev
->log_enabled
= false;
792 hdev
->started
= false;
793 memory_listener_register(&hdev
->memory_listener
, NULL
);
798 close(hdev
->control
);
802 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
804 memory_listener_unregister(&hdev
->memory_listener
);
806 g_free(hdev
->mem_sections
);
807 close(hdev
->control
);
810 bool vhost_dev_query(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
812 return !vdev
->binding
->query_guest_notifiers
||
813 vdev
->binding
->query_guest_notifiers(vdev
->binding_opaque
) ||
817 /* Stop processing guest IO notifications in qemu.
818 * Start processing them in vhost in kernel.
820 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
823 if (!vdev
->binding
->set_host_notifier
) {
824 fprintf(stderr
, "binding does not support host notifiers\n");
829 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
830 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
, i
, true);
832 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
840 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
, i
, false);
842 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
851 /* Stop processing guest IO notifications in vhost.
852 * Start processing them in qemu.
853 * This might actually run the qemu handlers right away,
854 * so virtio in qemu must be completely setup when this is called.
856 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
860 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
861 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
, i
, false);
863 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
870 /* Host notifiers must be enabled at this point. */
871 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
874 if (!vdev
->binding
->set_guest_notifiers
) {
875 fprintf(stderr
, "binding does not support guest notifiers\n");
880 r
= vdev
->binding
->set_guest_notifiers(vdev
->binding_opaque
, true);
882 fprintf(stderr
, "Error binding guest notifier: %d\n", -r
);
886 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
890 r
= ioctl(hdev
->control
, VHOST_SET_MEM_TABLE
, hdev
->mem
);
895 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
896 r
= vhost_virtqueue_init(hdev
,
905 if (hdev
->log_enabled
) {
906 hdev
->log_size
= vhost_get_log_size(hdev
);
907 hdev
->log
= hdev
->log_size
?
908 g_malloc0(hdev
->log_size
* sizeof *hdev
->log
) : NULL
;
909 r
= ioctl(hdev
->control
, VHOST_SET_LOG_BASE
,
910 (uint64_t)(unsigned long)hdev
->log
);
917 hdev
->started
= true;
923 vhost_virtqueue_cleanup(hdev
,
930 vdev
->binding
->set_guest_notifiers(vdev
->binding_opaque
, false);
936 /* Host notifiers must be enabled at this point. */
937 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
941 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
942 vhost_virtqueue_cleanup(hdev
,
947 for (i
= 0; i
< hdev
->n_mem_sections
; ++i
) {
948 vhost_sync_dirty_bitmap(hdev
, &hdev
->mem_sections
[i
],
949 0, (target_phys_addr_t
)~0x0ull
);
951 r
= vdev
->binding
->set_guest_notifiers(vdev
->binding_opaque
, false);
953 fprintf(stderr
, "vhost guest notifier cleanup failed: %d\n", r
);
958 hdev
->started
= false;