4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include <sys/ioctl.h>
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
23 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
24 MemoryRegionSection
*section
,
25 uint64_t mfirst
, uint64_t mlast
,
26 uint64_t rfirst
, uint64_t rlast
)
28 uint64_t start
= MAX(mfirst
, rfirst
);
29 uint64_t end
= MIN(mlast
, rlast
);
30 vhost_log_chunk_t
*from
= dev
->log
+ start
/ VHOST_LOG_CHUNK
;
31 vhost_log_chunk_t
*to
= dev
->log
+ end
/ VHOST_LOG_CHUNK
+ 1;
32 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
37 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
38 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
40 for (;from
< to
; ++from
) {
41 vhost_log_chunk_t log
;
43 /* We first check with non-atomic: much cheaper,
44 * and we expect non-dirty to be the common case. */
46 addr
+= VHOST_LOG_CHUNK
;
49 /* Data must be read atomically. We don't really
50 * need the barrier semantics of __sync
51 * builtins, but it's easier to use them than
53 log
= __sync_fetch_and_and(from
, 0);
54 while ((bit
= sizeof(log
) > sizeof(int) ?
55 ffsll(log
) : ffs(log
))) {
57 hwaddr section_offset
;
60 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
61 section_offset
= page_addr
- section
->offset_within_address_space
;
62 mr_offset
= section_offset
+ section
->offset_within_region
;
63 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
64 log
&= ~(0x1ull
<< bit
);
66 addr
+= VHOST_LOG_CHUNK
;
70 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
71 MemoryRegionSection
*section
,
79 if (!dev
->log_enabled
|| !dev
->started
) {
82 start_addr
= section
->offset_within_address_space
;
83 end_addr
= range_get_last(start_addr
, section
->size
);
84 start_addr
= MAX(first
, start_addr
);
85 end_addr
= MIN(last
, end_addr
);
87 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
88 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
89 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
91 range_get_last(reg
->guest_phys_addr
,
94 for (i
= 0; i
< dev
->nvqs
; ++i
) {
95 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
96 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
97 range_get_last(vq
->used_phys
, vq
->used_size
));
102 static void vhost_log_sync(MemoryListener
*listener
,
103 MemoryRegionSection
*section
)
105 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
107 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
110 static void vhost_log_sync_range(struct vhost_dev
*dev
,
111 hwaddr first
, hwaddr last
)
114 /* FIXME: this is N^2 in number of sections */
115 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
116 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
117 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
121 /* Assign/unassign. Keep an unsorted array of non-overlapping
122 * memory regions in dev->mem. */
123 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
127 int from
, to
, n
= dev
->mem
->nregions
;
128 /* Track overlapping/split regions for sanity checking. */
129 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
131 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
132 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
137 /* clone old region */
139 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
142 /* No overlap is simple */
143 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
148 /* Split only happens if supplied region
149 * is in the middle of an existing one. Thus it can not
150 * overlap with any other existing region. */
153 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
154 memlast
= range_get_last(start_addr
, size
);
156 /* Remove whole region */
157 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
158 --dev
->mem
->nregions
;
165 if (memlast
>= reglast
) {
166 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
167 assert(reg
->memory_size
);
168 assert(!overlap_end
);
174 if (start_addr
<= reg
->guest_phys_addr
) {
175 change
= memlast
+ 1 - reg
->guest_phys_addr
;
176 reg
->memory_size
-= change
;
177 reg
->guest_phys_addr
+= change
;
178 reg
->userspace_addr
+= change
;
179 assert(reg
->memory_size
);
180 assert(!overlap_start
);
185 /* This only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
188 assert(!overlap_start
);
189 assert(!overlap_end
);
190 assert(!overlap_middle
);
191 /* Split region: shrink first part, shift second part. */
192 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
193 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
194 assert(reg
->memory_size
);
195 change
= memlast
+ 1 - reg
->guest_phys_addr
;
196 reg
= dev
->mem
->regions
+ n
;
197 reg
->memory_size
-= change
;
198 assert(reg
->memory_size
);
199 reg
->guest_phys_addr
+= change
;
200 reg
->userspace_addr
+= change
;
201 /* Never add more than 1 region */
202 assert(dev
->mem
->nregions
== n
);
203 ++dev
->mem
->nregions
;
208 /* Called after unassign, so no regions overlap the given range. */
209 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
215 struct vhost_memory_region
*merged
= NULL
;
216 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
217 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
218 uint64_t prlast
, urlast
;
219 uint64_t pmlast
, umlast
;
222 /* clone old region */
224 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
226 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
227 pmlast
= range_get_last(start_addr
, size
);
228 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
229 umlast
= range_get_last(uaddr
, size
);
231 /* check for overlapping regions: should never happen. */
232 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
233 /* Not an adjacent or overlapping region - do not merge. */
234 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
235 (pmlast
+ 1 != reg
->guest_phys_addr
||
236 umlast
+ 1 != reg
->userspace_addr
)) {
246 u
= MIN(uaddr
, reg
->userspace_addr
);
247 s
= MIN(start_addr
, reg
->guest_phys_addr
);
248 e
= MAX(pmlast
, prlast
);
249 uaddr
= merged
->userspace_addr
= u
;
250 start_addr
= merged
->guest_phys_addr
= s
;
251 size
= merged
->memory_size
= e
- s
+ 1;
252 assert(merged
->memory_size
);
256 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
257 memset(reg
, 0, sizeof *reg
);
258 reg
->memory_size
= size
;
259 assert(reg
->memory_size
);
260 reg
->guest_phys_addr
= start_addr
;
261 reg
->userspace_addr
= uaddr
;
264 assert(to
<= dev
->mem
->nregions
+ 1);
265 dev
->mem
->nregions
= to
;
268 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
270 uint64_t log_size
= 0;
272 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
273 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
274 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
276 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
278 for (i
= 0; i
< dev
->nvqs
; ++i
) {
279 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
280 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
281 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
286 static inline void vhost_dev_log_resize(struct vhost_dev
* dev
, uint64_t size
)
288 vhost_log_chunk_t
*log
;
292 log
= g_malloc0(size
* sizeof *log
);
293 log_base
= (uint64_t)(unsigned long)log
;
294 r
= ioctl(dev
->control
, VHOST_SET_LOG_BASE
, &log_base
);
296 /* Sync only the range covered by the old log */
298 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
304 dev
->log_size
= size
;
307 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
312 for (i
= 0; i
< dev
->nvqs
; ++i
) {
313 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
317 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
321 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
322 if (!p
|| l
!= vq
->ring_size
) {
323 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
327 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
330 cpu_physical_memory_unmap(p
, l
, 0, 0);
335 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
339 int i
, n
= dev
->mem
->nregions
;
340 for (i
= 0; i
< n
; ++i
) {
341 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
342 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
350 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
355 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
363 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
364 memlast
= range_get_last(start_addr
, size
);
366 /* Need to extend region? */
367 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
370 /* userspace_addr changed? */
371 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
374 static void vhost_set_memory(MemoryListener
*listener
,
375 MemoryRegionSection
*section
,
378 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
380 hwaddr start_addr
= section
->offset_within_address_space
;
381 ram_addr_t size
= section
->size
;
382 bool log_dirty
= memory_region_is_logging(section
->mr
);
383 int s
= offsetof(struct vhost_memory
, regions
) +
384 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
389 dev
->mem
= g_realloc(dev
->mem
, s
);
397 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
398 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
400 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
401 /* Region exists with same address. Nothing to do. */
405 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
406 /* Removing region that we don't access. Nothing to do. */
411 vhost_dev_unassign_memory(dev
, start_addr
, size
);
413 /* Add given mapping, merging adjacent regions if any */
414 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
416 /* Remove old mapping for this memory, if any. */
417 vhost_dev_unassign_memory(dev
, start_addr
, size
);
425 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
429 if (!dev
->log_enabled
) {
430 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
434 log_size
= vhost_get_log_size(dev
);
435 /* We allocate an extra 4K bytes to log,
436 * to reduce the * number of reallocations. */
437 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
438 /* To log more, must increase log size before table update. */
439 if (dev
->log_size
< log_size
) {
440 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
442 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
444 /* To log less, can only decrease log size after table update. */
445 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
446 vhost_dev_log_resize(dev
, log_size
);
450 static bool vhost_section(MemoryRegionSection
*section
)
452 return memory_region_is_ram(section
->mr
);
455 static void vhost_begin(MemoryListener
*listener
)
459 static void vhost_commit(MemoryListener
*listener
)
463 static void vhost_region_add(MemoryListener
*listener
,
464 MemoryRegionSection
*section
)
466 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
469 if (!vhost_section(section
)) {
473 ++dev
->n_mem_sections
;
474 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
475 dev
->n_mem_sections
);
476 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
477 vhost_set_memory(listener
, section
, true);
480 static void vhost_region_del(MemoryListener
*listener
,
481 MemoryRegionSection
*section
)
483 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
487 if (!vhost_section(section
)) {
491 vhost_set_memory(listener
, section
, false);
492 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
493 if (dev
->mem_sections
[i
].offset_within_address_space
494 == section
->offset_within_address_space
) {
495 --dev
->n_mem_sections
;
496 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
497 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
503 static void vhost_region_nop(MemoryListener
*listener
,
504 MemoryRegionSection
*section
)
508 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
509 struct vhost_virtqueue
*vq
,
510 unsigned idx
, bool enable_log
)
512 struct vhost_vring_addr addr
= {
514 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
515 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
516 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
517 .log_guest_addr
= vq
->used_phys
,
518 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
520 int r
= ioctl(dev
->control
, VHOST_SET_VRING_ADDR
, &addr
);
527 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
529 uint64_t features
= dev
->acked_features
;
532 features
|= 0x1 << VHOST_F_LOG_ALL
;
534 r
= ioctl(dev
->control
, VHOST_SET_FEATURES
, &features
);
535 return r
< 0 ? -errno
: 0;
538 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
541 r
= vhost_dev_set_features(dev
, enable_log
);
545 for (i
= 0; i
< dev
->nvqs
; ++i
) {
546 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
554 for (; i
>= 0; --i
) {
555 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
559 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
565 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
567 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
570 if (!!enable
== dev
->log_enabled
) {
574 dev
->log_enabled
= enable
;
578 r
= vhost_dev_set_log(dev
, false);
588 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
589 r
= vhost_dev_set_log(dev
, true);
594 dev
->log_enabled
= enable
;
598 static void vhost_log_global_start(MemoryListener
*listener
)
602 r
= vhost_migration_log(listener
, true);
608 static void vhost_log_global_stop(MemoryListener
*listener
)
612 r
= vhost_migration_log(listener
, false);
618 static void vhost_log_start(MemoryListener
*listener
,
619 MemoryRegionSection
*section
)
621 /* FIXME: implement */
624 static void vhost_log_stop(MemoryListener
*listener
,
625 MemoryRegionSection
*section
)
627 /* FIXME: implement */
630 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
631 struct VirtIODevice
*vdev
,
632 struct vhost_virtqueue
*vq
,
637 int vhost_vq_index
= idx
- dev
->vq_index
;
638 struct vhost_vring_file file
= {
639 .index
= vhost_vq_index
641 struct vhost_vring_state state
= {
642 .index
= vhost_vq_index
644 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
646 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
648 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
649 r
= ioctl(dev
->control
, VHOST_SET_VRING_NUM
, &state
);
654 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
655 r
= ioctl(dev
->control
, VHOST_SET_VRING_BASE
, &state
);
660 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
661 a
= virtio_queue_get_desc_addr(vdev
, idx
);
662 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
663 if (!vq
->desc
|| l
!= s
) {
665 goto fail_alloc_desc
;
667 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
668 a
= virtio_queue_get_avail_addr(vdev
, idx
);
669 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
670 if (!vq
->avail
|| l
!= s
) {
672 goto fail_alloc_avail
;
674 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
675 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
676 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
677 if (!vq
->used
|| l
!= s
) {
679 goto fail_alloc_used
;
682 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
683 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
684 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
685 if (!vq
->ring
|| l
!= s
) {
687 goto fail_alloc_ring
;
690 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
696 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
697 r
= ioctl(dev
->control
, VHOST_SET_VRING_KICK
, &file
);
703 /* Clear and discard previous events if any. */
704 event_notifier_test_and_clear(&vq
->masked_notifier
);
710 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
713 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
716 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
719 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
725 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
726 struct VirtIODevice
*vdev
,
727 struct vhost_virtqueue
*vq
,
730 struct vhost_vring_state state
= {
731 .index
= idx
- dev
->vq_index
734 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
735 r
= ioctl(dev
->control
, VHOST_GET_VRING_BASE
, &state
);
737 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
740 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
742 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
743 0, virtio_queue_get_ring_size(vdev
, idx
));
744 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
745 1, virtio_queue_get_used_size(vdev
, idx
));
746 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
747 0, virtio_queue_get_avail_size(vdev
, idx
));
748 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
749 0, virtio_queue_get_desc_size(vdev
, idx
));
752 static void vhost_eventfd_add(MemoryListener
*listener
,
753 MemoryRegionSection
*section
,
754 bool match_data
, uint64_t data
, EventNotifier
*e
)
758 static void vhost_eventfd_del(MemoryListener
*listener
,
759 MemoryRegionSection
*section
,
760 bool match_data
, uint64_t data
, EventNotifier
*e
)
764 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
765 struct vhost_virtqueue
*vq
, int n
)
767 struct vhost_vring_file file
= {
770 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
775 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
776 r
= ioctl(dev
->control
, VHOST_SET_VRING_CALL
, &file
);
783 event_notifier_cleanup(&vq
->masked_notifier
);
787 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
789 event_notifier_cleanup(&vq
->masked_notifier
);
792 int vhost_dev_init(struct vhost_dev
*hdev
, int devfd
, const char *devpath
,
798 hdev
->control
= devfd
;
800 hdev
->control
= open(devpath
, O_RDWR
);
801 if (hdev
->control
< 0) {
805 r
= ioctl(hdev
->control
, VHOST_SET_OWNER
, NULL
);
810 r
= ioctl(hdev
->control
, VHOST_GET_FEATURES
, &features
);
815 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
816 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, i
);
821 hdev
->features
= features
;
823 hdev
->memory_listener
= (MemoryListener
) {
824 .begin
= vhost_begin
,
825 .commit
= vhost_commit
,
826 .region_add
= vhost_region_add
,
827 .region_del
= vhost_region_del
,
828 .region_nop
= vhost_region_nop
,
829 .log_start
= vhost_log_start
,
830 .log_stop
= vhost_log_stop
,
831 .log_sync
= vhost_log_sync
,
832 .log_global_start
= vhost_log_global_start
,
833 .log_global_stop
= vhost_log_global_stop
,
834 .eventfd_add
= vhost_eventfd_add
,
835 .eventfd_del
= vhost_eventfd_del
,
838 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
839 hdev
->n_mem_sections
= 0;
840 hdev
->mem_sections
= NULL
;
843 hdev
->log_enabled
= false;
844 hdev
->started
= false;
845 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
850 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
854 close(hdev
->control
);
858 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
861 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
862 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
864 memory_listener_unregister(&hdev
->memory_listener
);
866 g_free(hdev
->mem_sections
);
867 close(hdev
->control
);
870 bool vhost_dev_query(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
872 return !vdev
->binding
->query_guest_notifiers
||
873 vdev
->binding
->query_guest_notifiers(vdev
->binding_opaque
) ||
877 /* Stop processing guest IO notifications in qemu.
878 * Start processing them in vhost in kernel.
880 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
883 if (!vdev
->binding
->set_host_notifier
) {
884 fprintf(stderr
, "binding does not support host notifiers\n");
889 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
890 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
,
894 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
902 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
,
906 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
915 /* Stop processing guest IO notifications in vhost.
916 * Start processing them in qemu.
917 * This might actually run the qemu handlers right away,
918 * so virtio in qemu must be completely setup when this is called.
920 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
924 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
925 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
,
929 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
936 /* Test and clear event pending status.
937 * Should be called after unmask to avoid losing events.
939 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
941 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
942 assert(hdev
->started
);
943 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
944 return event_notifier_test_and_clear(&vq
->masked_notifier
);
947 /* Mask/unmask events from this vq. */
948 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
951 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
952 int r
, index
= n
- hdev
->vq_index
;
954 assert(hdev
->started
);
955 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
957 struct vhost_vring_file file
= {
961 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
963 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
965 r
= ioctl(hdev
->control
, VHOST_SET_VRING_CALL
, &file
);
969 /* Host notifiers must be enabled at this point. */
970 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
974 hdev
->started
= true;
976 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
980 r
= ioctl(hdev
->control
, VHOST_SET_MEM_TABLE
, hdev
->mem
);
985 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
986 r
= vhost_virtqueue_start(hdev
,
995 if (hdev
->log_enabled
) {
996 hdev
->log_size
= vhost_get_log_size(hdev
);
997 hdev
->log
= hdev
->log_size
?
998 g_malloc0(hdev
->log_size
* sizeof *hdev
->log
) : NULL
;
999 r
= ioctl(hdev
->control
, VHOST_SET_LOG_BASE
,
1000 (uint64_t)(unsigned long)hdev
->log
);
1011 vhost_virtqueue_stop(hdev
,
1014 hdev
->vq_index
+ i
);
1020 hdev
->started
= false;
1024 /* Host notifiers must be enabled at this point. */
1025 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1029 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1030 vhost_virtqueue_stop(hdev
,
1033 hdev
->vq_index
+ i
);
1035 vhost_log_sync_range(hdev
, 0, ~0x0ull
);
1037 hdev
->started
= false;