4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <sys/ioctl.h>
17 #include <linux/vhost.h>
19 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
20 uint64_t mfirst
, uint64_t mlast
,
21 uint64_t rfirst
, uint64_t rlast
)
23 uint64_t start
= MAX(mfirst
, rfirst
);
24 uint64_t end
= MIN(mlast
, rlast
);
25 vhost_log_chunk_t
*from
= dev
->log
+ start
/ VHOST_LOG_CHUNK
;
26 vhost_log_chunk_t
*to
= dev
->log
+ end
/ VHOST_LOG_CHUNK
+ 1;
27 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
29 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
30 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
34 for (;from
< to
; ++from
) {
35 vhost_log_chunk_t log
;
37 /* We first check with non-atomic: much cheaper,
38 * and we expect non-dirty to be the common case. */
42 /* Data must be read atomically. We don't really
43 * need the barrier semantics of __sync
44 * builtins, but it's easier to use them than
46 log
= __sync_fetch_and_and(from
, 0);
47 while ((bit
= sizeof(log
) > sizeof(int) ?
48 ffsll(log
) : ffs(log
))) {
50 cpu_physical_memory_set_dirty(addr
+ bit
* VHOST_LOG_PAGE
);
51 log
&= ~(0x1ull
<< bit
);
53 addr
+= VHOST_LOG_CHUNK
;
57 static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient
*client
,
58 target_phys_addr_t start_addr
,
59 target_phys_addr_t end_addr
)
61 struct vhost_dev
*dev
= container_of(client
, struct vhost_dev
, client
);
63 if (!dev
->log_enabled
|| !dev
->started
) {
66 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
67 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
68 vhost_dev_sync_region(dev
, start_addr
, end_addr
,
70 range_get_last(reg
->guest_phys_addr
,
73 for (i
= 0; i
< dev
->nvqs
; ++i
) {
74 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
75 vhost_dev_sync_region(dev
, start_addr
, end_addr
, vq
->used_phys
,
76 range_get_last(vq
->used_phys
, vq
->used_size
));
81 /* Assign/unassign. Keep an unsorted array of non-overlapping
82 * memory regions in dev->mem. */
83 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
87 int from
, to
, n
= dev
->mem
->nregions
;
88 /* Track overlapping/split regions for sanity checking. */
89 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
91 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
92 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
97 /* clone old region */
99 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
102 /* No overlap is simple */
103 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
108 /* Split only happens if supplied region
109 * is in the middle of an existing one. Thus it can not
110 * overlap with any other existing region. */
113 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
114 memlast
= range_get_last(start_addr
, size
);
116 /* Remove whole region */
117 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
118 --dev
->mem
->nregions
;
126 if (memlast
>= reglast
) {
127 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
128 assert(reg
->memory_size
);
129 assert(!overlap_end
);
135 if (start_addr
<= reg
->guest_phys_addr
) {
136 change
= memlast
+ 1 - reg
->guest_phys_addr
;
137 reg
->memory_size
-= change
;
138 reg
->guest_phys_addr
+= change
;
139 reg
->userspace_addr
+= change
;
140 assert(reg
->memory_size
);
141 assert(!overlap_start
);
146 /* This only happens if supplied region
147 * is in the middle of an existing one. Thus it can not
148 * overlap with any other existing region. */
149 assert(!overlap_start
);
150 assert(!overlap_end
);
151 assert(!overlap_middle
);
152 /* Split region: shrink first part, shift second part. */
153 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
154 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
155 assert(reg
->memory_size
);
156 change
= memlast
+ 1 - reg
->guest_phys_addr
;
157 reg
= dev
->mem
->regions
+ n
;
158 reg
->memory_size
-= change
;
159 assert(reg
->memory_size
);
160 reg
->guest_phys_addr
+= change
;
161 reg
->userspace_addr
+= change
;
162 /* Never add more than 1 region */
163 assert(dev
->mem
->nregions
== n
);
164 ++dev
->mem
->nregions
;
169 /* Called after unassign, so no regions overlap the given range. */
170 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
176 struct vhost_memory_region
*merged
= NULL
;
177 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
178 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
179 uint64_t prlast
, urlast
;
180 uint64_t pmlast
, umlast
;
183 /* clone old region */
185 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
187 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
188 pmlast
= range_get_last(start_addr
, size
);
189 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
190 umlast
= range_get_last(uaddr
, size
);
192 /* check for overlapping regions: should never happen. */
193 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
194 /* Not an adjacent or overlapping region - do not merge. */
195 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
196 (pmlast
+ 1 != reg
->guest_phys_addr
||
197 umlast
+ 1 != reg
->userspace_addr
)) {
207 u
= MIN(uaddr
, reg
->userspace_addr
);
208 s
= MIN(start_addr
, reg
->guest_phys_addr
);
209 e
= MAX(pmlast
, prlast
);
210 uaddr
= merged
->userspace_addr
= u
;
211 start_addr
= merged
->guest_phys_addr
= s
;
212 size
= merged
->memory_size
= e
- s
+ 1;
213 assert(merged
->memory_size
);
217 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
218 memset(reg
, 0, sizeof *reg
);
219 reg
->memory_size
= size
;
220 assert(reg
->memory_size
);
221 reg
->guest_phys_addr
= start_addr
;
222 reg
->userspace_addr
= uaddr
;
225 assert(to
<= dev
->mem
->nregions
+ 1);
226 dev
->mem
->nregions
= to
;
229 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
231 uint64_t log_size
= 0;
233 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
234 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
235 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
237 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
239 for (i
= 0; i
< dev
->nvqs
; ++i
) {
240 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
241 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
242 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
247 static inline void vhost_dev_log_resize(struct vhost_dev
* dev
, uint64_t size
)
249 vhost_log_chunk_t
*log
;
253 log
= qemu_mallocz(size
* sizeof *log
);
257 log_base
= (uint64_t)(unsigned long)log
;
258 r
= ioctl(dev
->control
, VHOST_SET_LOG_BASE
, &log_base
);
260 vhost_client_sync_dirty_bitmap(&dev
->client
, 0,
261 (target_phys_addr_t
)~0x0ull
);
266 dev
->log_size
= size
;
269 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
274 for (i
= 0; i
< dev
->nvqs
; ++i
) {
275 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
276 target_phys_addr_t l
;
279 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
283 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
284 if (!p
|| l
!= vq
->ring_size
) {
285 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
289 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
292 cpu_physical_memory_unmap(p
, l
, 0, 0);
297 static void vhost_client_set_memory(CPUPhysMemoryClient
*client
,
298 target_phys_addr_t start_addr
,
300 ram_addr_t phys_offset
)
302 struct vhost_dev
*dev
= container_of(client
, struct vhost_dev
, client
);
303 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
304 int s
= offsetof(struct vhost_memory
, regions
) +
305 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
308 dev
->mem
= qemu_realloc(dev
->mem
, s
);
312 vhost_dev_unassign_memory(dev
, start_addr
, size
);
313 if (flags
== IO_MEM_RAM
) {
314 /* Add given mapping, merging adjacent regions if any */
315 vhost_dev_assign_memory(dev
, start_addr
, size
,
316 (uintptr_t)qemu_get_ram_ptr(phys_offset
));
318 /* Remove old mapping for this memory, if any. */
319 vhost_dev_unassign_memory(dev
, start_addr
, size
);
327 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
331 if (!dev
->log_enabled
) {
332 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
336 log_size
= vhost_get_log_size(dev
);
337 /* We allocate an extra 4K bytes to log,
338 * to reduce the * number of reallocations. */
339 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
340 /* To log more, must increase log size before table update. */
341 if (dev
->log_size
< log_size
) {
342 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
344 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
346 /* To log less, can only decrease log size after table update. */
347 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
348 vhost_dev_log_resize(dev
, log_size
);
352 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
353 struct vhost_virtqueue
*vq
,
354 unsigned idx
, bool enable_log
)
356 struct vhost_vring_addr addr
= {
358 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
359 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
360 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
361 .log_guest_addr
= vq
->used_phys
,
362 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
364 int r
= ioctl(dev
->control
, VHOST_SET_VRING_ADDR
, &addr
);
371 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
373 uint64_t features
= dev
->acked_features
;
376 features
|= 0x1 << VHOST_F_LOG_ALL
;
378 r
= ioctl(dev
->control
, VHOST_SET_FEATURES
, &features
);
379 return r
< 0 ? -errno
: 0;
382 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
385 r
= vhost_dev_set_features(dev
, enable_log
);
389 for (i
= 0; i
< dev
->nvqs
; ++i
) {
390 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
398 for (; i
>= 0; --i
) {
399 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
403 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
409 static int vhost_client_migration_log(CPUPhysMemoryClient
*client
,
412 struct vhost_dev
*dev
= container_of(client
, struct vhost_dev
, client
);
414 if (!!enable
== dev
->log_enabled
) {
418 dev
->log_enabled
= enable
;
422 r
= vhost_dev_set_log(dev
, false);
432 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
433 r
= vhost_dev_set_log(dev
, true);
438 dev
->log_enabled
= enable
;
442 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
443 struct VirtIODevice
*vdev
,
444 struct vhost_virtqueue
*vq
,
447 target_phys_addr_t s
, l
, a
;
449 struct vhost_vring_file file
= {
452 struct vhost_vring_state state
= {
455 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
457 if (!vdev
->binding
->set_host_notifier
) {
458 fprintf(stderr
, "binding does not support host notifiers\n");
462 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
463 r
= ioctl(dev
->control
, VHOST_SET_VRING_NUM
, &state
);
468 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
469 r
= ioctl(dev
->control
, VHOST_SET_VRING_BASE
, &state
);
474 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
475 a
= virtio_queue_get_desc_addr(vdev
, idx
);
476 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
477 if (!vq
->desc
|| l
!= s
) {
479 goto fail_alloc_desc
;
481 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
482 a
= virtio_queue_get_avail_addr(vdev
, idx
);
483 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
484 if (!vq
->avail
|| l
!= s
) {
486 goto fail_alloc_avail
;
488 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
489 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
490 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
491 if (!vq
->used
|| l
!= s
) {
493 goto fail_alloc_used
;
496 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
497 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
498 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
499 if (!vq
->ring
|| l
!= s
) {
501 goto fail_alloc_ring
;
504 r
= vhost_virtqueue_set_addr(dev
, vq
, idx
, dev
->log_enabled
);
509 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
, idx
, true);
511 fprintf(stderr
, "Error binding host notifier: %d\n", -r
);
512 goto fail_host_notifier
;
515 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
516 r
= ioctl(dev
->control
, VHOST_SET_VRING_KICK
, &file
);
522 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
523 r
= ioctl(dev
->control
, VHOST_SET_VRING_CALL
, &file
);
533 vdev
->binding
->set_host_notifier(vdev
->binding_opaque
, idx
, false);
536 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
539 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
542 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
545 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
551 static void vhost_virtqueue_cleanup(struct vhost_dev
*dev
,
552 struct VirtIODevice
*vdev
,
553 struct vhost_virtqueue
*vq
,
556 struct vhost_vring_state state
= {
560 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
, idx
, false);
562 fprintf(stderr
, "vhost VQ %d host cleanup failed: %d\n", idx
, r
);
566 r
= ioctl(dev
->control
, VHOST_GET_VRING_BASE
, &state
);
568 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
571 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
573 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
574 0, virtio_queue_get_ring_size(vdev
, idx
));
575 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
576 1, virtio_queue_get_used_size(vdev
, idx
));
577 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
578 0, virtio_queue_get_avail_size(vdev
, idx
));
579 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
580 0, virtio_queue_get_desc_size(vdev
, idx
));
583 int vhost_dev_init(struct vhost_dev
*hdev
, int devfd
)
588 hdev
->control
= devfd
;
590 hdev
->control
= open("/dev/vhost-net", O_RDWR
);
591 if (hdev
->control
< 0) {
595 r
= ioctl(hdev
->control
, VHOST_SET_OWNER
, NULL
);
600 r
= ioctl(hdev
->control
, VHOST_GET_FEATURES
, &features
);
604 hdev
->features
= features
;
606 hdev
->client
.set_memory
= vhost_client_set_memory
;
607 hdev
->client
.sync_dirty_bitmap
= vhost_client_sync_dirty_bitmap
;
608 hdev
->client
.migration_log
= vhost_client_migration_log
;
609 hdev
->mem
= qemu_mallocz(offsetof(struct vhost_memory
, regions
));
612 hdev
->log_enabled
= false;
613 hdev
->started
= false;
614 cpu_register_phys_memory_client(&hdev
->client
);
618 close(hdev
->control
);
622 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
624 cpu_unregister_phys_memory_client(&hdev
->client
);
625 qemu_free(hdev
->mem
);
626 close(hdev
->control
);
629 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
632 if (!vdev
->binding
->set_guest_notifiers
) {
633 fprintf(stderr
, "binding does not support guest notifiers\n");
638 r
= vdev
->binding
->set_guest_notifiers(vdev
->binding_opaque
, true);
640 fprintf(stderr
, "Error binding guest notifier: %d\n", -r
);
644 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
648 r
= ioctl(hdev
->control
, VHOST_SET_MEM_TABLE
, hdev
->mem
);
653 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
654 r
= vhost_virtqueue_init(hdev
,
663 if (hdev
->log_enabled
) {
664 hdev
->log_size
= vhost_get_log_size(hdev
);
665 hdev
->log
= hdev
->log_size
?
666 qemu_mallocz(hdev
->log_size
* sizeof *hdev
->log
) : NULL
;
667 r
= ioctl(hdev
->control
, VHOST_SET_LOG_BASE
,
668 (uint64_t)(unsigned long)hdev
->log
);
675 hdev
->started
= true;
681 vhost_virtqueue_cleanup(hdev
,
688 vdev
->binding
->set_guest_notifiers(vdev
->binding_opaque
, false);
694 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
698 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
699 vhost_virtqueue_cleanup(hdev
,
704 vhost_client_sync_dirty_bitmap(&hdev
->client
, 0,
705 (target_phys_addr_t
)~0x0ull
);
706 r
= vdev
->binding
->set_guest_notifiers(vdev
->binding_opaque
, false);
708 fprintf(stderr
, "vhost guest notifier cleanup failed: %d\n", r
);
713 hdev
->started
= false;
714 qemu_free(hdev
->log
);