4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22 #include "hw/virtio/virtio-bus.h"
24 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
25 MemoryRegionSection
*section
,
26 uint64_t mfirst
, uint64_t mlast
,
27 uint64_t rfirst
, uint64_t rlast
)
29 uint64_t start
= MAX(mfirst
, rfirst
);
30 uint64_t end
= MIN(mlast
, rlast
);
31 vhost_log_chunk_t
*from
= dev
->log
+ start
/ VHOST_LOG_CHUNK
;
32 vhost_log_chunk_t
*to
= dev
->log
+ end
/ VHOST_LOG_CHUNK
+ 1;
33 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
38 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
39 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
41 for (;from
< to
; ++from
) {
42 vhost_log_chunk_t log
;
44 /* We first check with non-atomic: much cheaper,
45 * and we expect non-dirty to be the common case. */
47 addr
+= VHOST_LOG_CHUNK
;
50 /* Data must be read atomically. We don't really
51 * need the barrier semantics of __sync
52 * builtins, but it's easier to use them than
54 log
= __sync_fetch_and_and(from
, 0);
55 while ((bit
= sizeof(log
) > sizeof(int) ?
56 ffsll(log
) : ffs(log
))) {
58 hwaddr section_offset
;
61 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
62 section_offset
= page_addr
- section
->offset_within_address_space
;
63 mr_offset
= section_offset
+ section
->offset_within_region
;
64 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
65 log
&= ~(0x1ull
<< bit
);
67 addr
+= VHOST_LOG_CHUNK
;
71 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
72 MemoryRegionSection
*section
,
80 if (!dev
->log_enabled
|| !dev
->started
) {
83 start_addr
= section
->offset_within_address_space
;
84 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
85 start_addr
= MAX(first
, start_addr
);
86 end_addr
= MIN(last
, end_addr
);
88 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
89 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
90 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
92 range_get_last(reg
->guest_phys_addr
,
95 for (i
= 0; i
< dev
->nvqs
; ++i
) {
96 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
97 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
98 range_get_last(vq
->used_phys
, vq
->used_size
));
103 static void vhost_log_sync(MemoryListener
*listener
,
104 MemoryRegionSection
*section
)
106 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
108 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
111 static void vhost_log_sync_range(struct vhost_dev
*dev
,
112 hwaddr first
, hwaddr last
)
115 /* FIXME: this is N^2 in number of sections */
116 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
117 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
118 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
122 /* Assign/unassign. Keep an unsorted array of non-overlapping
123 * memory regions in dev->mem. */
124 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
128 int from
, to
, n
= dev
->mem
->nregions
;
129 /* Track overlapping/split regions for sanity checking. */
130 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
132 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
133 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
138 /* clone old region */
140 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
143 /* No overlap is simple */
144 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
149 /* Split only happens if supplied region
150 * is in the middle of an existing one. Thus it can not
151 * overlap with any other existing region. */
154 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
155 memlast
= range_get_last(start_addr
, size
);
157 /* Remove whole region */
158 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
159 --dev
->mem
->nregions
;
166 if (memlast
>= reglast
) {
167 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
168 assert(reg
->memory_size
);
169 assert(!overlap_end
);
175 if (start_addr
<= reg
->guest_phys_addr
) {
176 change
= memlast
+ 1 - reg
->guest_phys_addr
;
177 reg
->memory_size
-= change
;
178 reg
->guest_phys_addr
+= change
;
179 reg
->userspace_addr
+= change
;
180 assert(reg
->memory_size
);
181 assert(!overlap_start
);
186 /* This only happens if supplied region
187 * is in the middle of an existing one. Thus it can not
188 * overlap with any other existing region. */
189 assert(!overlap_start
);
190 assert(!overlap_end
);
191 assert(!overlap_middle
);
192 /* Split region: shrink first part, shift second part. */
193 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
194 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
195 assert(reg
->memory_size
);
196 change
= memlast
+ 1 - reg
->guest_phys_addr
;
197 reg
= dev
->mem
->regions
+ n
;
198 reg
->memory_size
-= change
;
199 assert(reg
->memory_size
);
200 reg
->guest_phys_addr
+= change
;
201 reg
->userspace_addr
+= change
;
202 /* Never add more than 1 region */
203 assert(dev
->mem
->nregions
== n
);
204 ++dev
->mem
->nregions
;
209 /* Called after unassign, so no regions overlap the given range. */
210 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
216 struct vhost_memory_region
*merged
= NULL
;
217 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
218 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
219 uint64_t prlast
, urlast
;
220 uint64_t pmlast
, umlast
;
223 /* clone old region */
225 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
227 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
228 pmlast
= range_get_last(start_addr
, size
);
229 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
230 umlast
= range_get_last(uaddr
, size
);
232 /* check for overlapping regions: should never happen. */
233 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
234 /* Not an adjacent or overlapping region - do not merge. */
235 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
236 (pmlast
+ 1 != reg
->guest_phys_addr
||
237 umlast
+ 1 != reg
->userspace_addr
)) {
247 u
= MIN(uaddr
, reg
->userspace_addr
);
248 s
= MIN(start_addr
, reg
->guest_phys_addr
);
249 e
= MAX(pmlast
, prlast
);
250 uaddr
= merged
->userspace_addr
= u
;
251 start_addr
= merged
->guest_phys_addr
= s
;
252 size
= merged
->memory_size
= e
- s
+ 1;
253 assert(merged
->memory_size
);
257 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
258 memset(reg
, 0, sizeof *reg
);
259 reg
->memory_size
= size
;
260 assert(reg
->memory_size
);
261 reg
->guest_phys_addr
= start_addr
;
262 reg
->userspace_addr
= uaddr
;
265 assert(to
<= dev
->mem
->nregions
+ 1);
266 dev
->mem
->nregions
= to
;
269 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
271 uint64_t log_size
= 0;
273 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
274 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
275 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
277 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
279 for (i
= 0; i
< dev
->nvqs
; ++i
) {
280 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
281 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
282 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
287 static inline void vhost_dev_log_resize(struct vhost_dev
* dev
, uint64_t size
)
289 vhost_log_chunk_t
*log
;
293 log
= g_malloc0(size
* sizeof *log
);
294 log_base
= (uint64_t)(unsigned long)log
;
295 r
= ioctl(dev
->control
, VHOST_SET_LOG_BASE
, &log_base
);
297 /* Sync only the range covered by the old log */
299 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
305 dev
->log_size
= size
;
308 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
313 for (i
= 0; i
< dev
->nvqs
; ++i
) {
314 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
318 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
322 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
323 if (!p
|| l
!= vq
->ring_size
) {
324 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
328 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
331 cpu_physical_memory_unmap(p
, l
, 0, 0);
336 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
340 int i
, n
= dev
->mem
->nregions
;
341 for (i
= 0; i
< n
; ++i
) {
342 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
343 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
351 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
356 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
364 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
365 memlast
= range_get_last(start_addr
, size
);
367 /* Need to extend region? */
368 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
371 /* userspace_addr changed? */
372 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
375 static void vhost_set_memory(MemoryListener
*listener
,
376 MemoryRegionSection
*section
,
379 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
381 hwaddr start_addr
= section
->offset_within_address_space
;
382 ram_addr_t size
= int128_get64(section
->size
);
383 bool log_dirty
= memory_region_is_logging(section
->mr
);
384 int s
= offsetof(struct vhost_memory
, regions
) +
385 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
388 dev
->mem
= g_realloc(dev
->mem
, s
);
396 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
397 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
399 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
400 /* Region exists with same address. Nothing to do. */
404 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
405 /* Removing region that we don't access. Nothing to do. */
410 vhost_dev_unassign_memory(dev
, start_addr
, size
);
412 /* Add given mapping, merging adjacent regions if any */
413 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
415 /* Remove old mapping for this memory, if any. */
416 vhost_dev_unassign_memory(dev
, start_addr
, size
);
418 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
419 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
420 dev
->memory_changed
= true;
423 static bool vhost_section(MemoryRegionSection
*section
)
425 return memory_region_is_ram(section
->mr
);
428 static void vhost_begin(MemoryListener
*listener
)
430 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
432 dev
->mem_changed_end_addr
= 0;
433 dev
->mem_changed_start_addr
= -1;
436 static void vhost_commit(MemoryListener
*listener
)
438 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
440 hwaddr start_addr
= 0;
445 if (!dev
->memory_changed
) {
451 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
456 start_addr
= dev
->mem_changed_start_addr
;
457 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
459 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
463 if (!dev
->log_enabled
) {
464 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
466 dev
->memory_changed
= false;
469 log_size
= vhost_get_log_size(dev
);
470 /* We allocate an extra 4K bytes to log,
471 * to reduce the * number of reallocations. */
472 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
473 /* To log more, must increase log size before table update. */
474 if (dev
->log_size
< log_size
) {
475 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
477 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
479 /* To log less, can only decrease log size after table update. */
480 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
481 vhost_dev_log_resize(dev
, log_size
);
483 dev
->memory_changed
= false;
486 static void vhost_region_add(MemoryListener
*listener
,
487 MemoryRegionSection
*section
)
489 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
492 if (!vhost_section(section
)) {
496 ++dev
->n_mem_sections
;
497 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
498 dev
->n_mem_sections
);
499 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
500 vhost_set_memory(listener
, section
, true);
503 static void vhost_region_del(MemoryListener
*listener
,
504 MemoryRegionSection
*section
)
506 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
510 if (!vhost_section(section
)) {
514 vhost_set_memory(listener
, section
, false);
515 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
516 if (dev
->mem_sections
[i
].offset_within_address_space
517 == section
->offset_within_address_space
) {
518 --dev
->n_mem_sections
;
519 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
520 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
526 static void vhost_region_nop(MemoryListener
*listener
,
527 MemoryRegionSection
*section
)
531 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
532 struct vhost_virtqueue
*vq
,
533 unsigned idx
, bool enable_log
)
535 struct vhost_vring_addr addr
= {
537 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
538 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
539 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
540 .log_guest_addr
= vq
->used_phys
,
541 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
543 int r
= ioctl(dev
->control
, VHOST_SET_VRING_ADDR
, &addr
);
550 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
552 uint64_t features
= dev
->acked_features
;
555 features
|= 0x1 << VHOST_F_LOG_ALL
;
557 r
= ioctl(dev
->control
, VHOST_SET_FEATURES
, &features
);
558 return r
< 0 ? -errno
: 0;
561 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
564 r
= vhost_dev_set_features(dev
, enable_log
);
568 for (i
= 0; i
< dev
->nvqs
; ++i
) {
569 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
577 for (; i
>= 0; --i
) {
578 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
582 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
588 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
590 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
593 if (!!enable
== dev
->log_enabled
) {
597 dev
->log_enabled
= enable
;
601 r
= vhost_dev_set_log(dev
, false);
611 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
612 r
= vhost_dev_set_log(dev
, true);
617 dev
->log_enabled
= enable
;
621 static void vhost_log_global_start(MemoryListener
*listener
)
625 r
= vhost_migration_log(listener
, true);
631 static void vhost_log_global_stop(MemoryListener
*listener
)
635 r
= vhost_migration_log(listener
, false);
641 static void vhost_log_start(MemoryListener
*listener
,
642 MemoryRegionSection
*section
)
644 /* FIXME: implement */
647 static void vhost_log_stop(MemoryListener
*listener
,
648 MemoryRegionSection
*section
)
650 /* FIXME: implement */
653 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
654 struct VirtIODevice
*vdev
,
655 struct vhost_virtqueue
*vq
,
660 int vhost_vq_index
= idx
- dev
->vq_index
;
661 struct vhost_vring_file file
= {
662 .index
= vhost_vq_index
664 struct vhost_vring_state state
= {
665 .index
= vhost_vq_index
667 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
669 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
671 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
672 r
= ioctl(dev
->control
, VHOST_SET_VRING_NUM
, &state
);
677 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
678 r
= ioctl(dev
->control
, VHOST_SET_VRING_BASE
, &state
);
683 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
684 a
= virtio_queue_get_desc_addr(vdev
, idx
);
685 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
686 if (!vq
->desc
|| l
!= s
) {
688 goto fail_alloc_desc
;
690 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
691 a
= virtio_queue_get_avail_addr(vdev
, idx
);
692 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
693 if (!vq
->avail
|| l
!= s
) {
695 goto fail_alloc_avail
;
697 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
698 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
699 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
700 if (!vq
->used
|| l
!= s
) {
702 goto fail_alloc_used
;
705 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
706 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
707 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
708 if (!vq
->ring
|| l
!= s
) {
710 goto fail_alloc_ring
;
713 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
719 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
720 r
= ioctl(dev
->control
, VHOST_SET_VRING_KICK
, &file
);
726 /* Clear and discard previous events if any. */
727 event_notifier_test_and_clear(&vq
->masked_notifier
);
733 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
736 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
739 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
742 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
748 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
749 struct VirtIODevice
*vdev
,
750 struct vhost_virtqueue
*vq
,
753 struct vhost_vring_state state
= {
754 .index
= idx
- dev
->vq_index
757 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
758 r
= ioctl(dev
->control
, VHOST_GET_VRING_BASE
, &state
);
760 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
763 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
765 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
766 0, virtio_queue_get_ring_size(vdev
, idx
));
767 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
768 1, virtio_queue_get_used_size(vdev
, idx
));
769 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
770 0, virtio_queue_get_avail_size(vdev
, idx
));
771 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
772 0, virtio_queue_get_desc_size(vdev
, idx
));
775 static void vhost_eventfd_add(MemoryListener
*listener
,
776 MemoryRegionSection
*section
,
777 bool match_data
, uint64_t data
, EventNotifier
*e
)
781 static void vhost_eventfd_del(MemoryListener
*listener
,
782 MemoryRegionSection
*section
,
783 bool match_data
, uint64_t data
, EventNotifier
*e
)
787 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
788 struct vhost_virtqueue
*vq
, int n
)
790 struct vhost_vring_file file
= {
793 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
798 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
799 r
= ioctl(dev
->control
, VHOST_SET_VRING_CALL
, &file
);
806 event_notifier_cleanup(&vq
->masked_notifier
);
810 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
812 event_notifier_cleanup(&vq
->masked_notifier
);
815 int vhost_dev_init(struct vhost_dev
*hdev
, int devfd
, const char *devpath
,
821 hdev
->control
= devfd
;
823 hdev
->control
= open(devpath
, O_RDWR
);
824 if (hdev
->control
< 0) {
828 r
= ioctl(hdev
->control
, VHOST_SET_OWNER
, NULL
);
833 r
= ioctl(hdev
->control
, VHOST_GET_FEATURES
, &features
);
838 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
839 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, i
);
844 hdev
->features
= features
;
846 hdev
->memory_listener
= (MemoryListener
) {
847 .begin
= vhost_begin
,
848 .commit
= vhost_commit
,
849 .region_add
= vhost_region_add
,
850 .region_del
= vhost_region_del
,
851 .region_nop
= vhost_region_nop
,
852 .log_start
= vhost_log_start
,
853 .log_stop
= vhost_log_stop
,
854 .log_sync
= vhost_log_sync
,
855 .log_global_start
= vhost_log_global_start
,
856 .log_global_stop
= vhost_log_global_stop
,
857 .eventfd_add
= vhost_eventfd_add
,
858 .eventfd_del
= vhost_eventfd_del
,
861 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
862 hdev
->n_mem_sections
= 0;
863 hdev
->mem_sections
= NULL
;
866 hdev
->log_enabled
= false;
867 hdev
->started
= false;
868 hdev
->memory_changed
= false;
869 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
874 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
878 close(hdev
->control
);
882 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
885 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
886 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
888 memory_listener_unregister(&hdev
->memory_listener
);
890 g_free(hdev
->mem_sections
);
891 close(hdev
->control
);
894 bool vhost_dev_query(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
896 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
897 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
898 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
900 return !k
->query_guest_notifiers
||
901 k
->query_guest_notifiers(qbus
->parent
) ||
905 /* Stop processing guest IO notifications in qemu.
906 * Start processing them in vhost in kernel.
908 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
910 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
911 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
912 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
914 if (!k
->set_host_notifier
) {
915 fprintf(stderr
, "binding does not support host notifiers\n");
920 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
921 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, true);
923 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
931 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
933 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
942 /* Stop processing guest IO notifications in vhost.
943 * Start processing them in qemu.
944 * This might actually run the qemu handlers right away,
945 * so virtio in qemu must be completely setup when this is called.
947 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
949 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
950 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
951 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
954 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
955 r
= k
->set_host_notifier(qbus
->parent
, hdev
->vq_index
+ i
, false);
957 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
964 /* Test and clear event pending status.
965 * Should be called after unmask to avoid losing events.
967 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
969 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
970 assert(hdev
->started
);
971 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
972 return event_notifier_test_and_clear(&vq
->masked_notifier
);
975 /* Mask/unmask events from this vq. */
976 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
979 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
980 int r
, index
= n
- hdev
->vq_index
;
982 assert(hdev
->started
);
983 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
985 struct vhost_vring_file file
= {
989 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
991 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
993 r
= ioctl(hdev
->control
, VHOST_SET_VRING_CALL
, &file
);
997 /* Host notifiers must be enabled at this point. */
998 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1002 hdev
->started
= true;
1004 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1008 r
= ioctl(hdev
->control
, VHOST_SET_MEM_TABLE
, hdev
->mem
);
1013 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1014 r
= vhost_virtqueue_start(hdev
,
1017 hdev
->vq_index
+ i
);
1023 if (hdev
->log_enabled
) {
1024 hdev
->log_size
= vhost_get_log_size(hdev
);
1025 hdev
->log
= hdev
->log_size
?
1026 g_malloc0(hdev
->log_size
* sizeof *hdev
->log
) : NULL
;
1027 r
= ioctl(hdev
->control
, VHOST_SET_LOG_BASE
,
1028 (uint64_t)(unsigned long)hdev
->log
);
1039 vhost_virtqueue_stop(hdev
,
1042 hdev
->vq_index
+ i
);
1048 hdev
->started
= false;
1052 /* Host notifiers must be enabled at this point. */
1053 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1057 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1058 vhost_virtqueue_stop(hdev
,
1061 hdev
->vq_index
+ i
);
1063 vhost_log_sync_range(hdev
, 0, ~0x0ull
);
1065 hdev
->started
= false;