Add vhost_ops to vhost_dev struct and replace all relevant ioctls
[qemu/kevin.git] / hw / virtio / vhost.c
blob85841019c006b4434f8a80f89def582e76fc819d
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "hw/virtio/vhost.h"
17 #include "hw/hw.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22 #include "hw/virtio/virtio-bus.h"
24 static void vhost_dev_sync_region(struct vhost_dev *dev,
25 MemoryRegionSection *section,
26 uint64_t mfirst, uint64_t mlast,
27 uint64_t rfirst, uint64_t rlast)
29 uint64_t start = MAX(mfirst, rfirst);
30 uint64_t end = MIN(mlast, rlast);
31 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
32 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
33 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
35 if (end < start) {
36 return;
38 assert(end / VHOST_LOG_CHUNK < dev->log_size);
39 assert(start / VHOST_LOG_CHUNK < dev->log_size);
41 for (;from < to; ++from) {
42 vhost_log_chunk_t log;
43 /* We first check with non-atomic: much cheaper,
44 * and we expect non-dirty to be the common case. */
45 if (!*from) {
46 addr += VHOST_LOG_CHUNK;
47 continue;
49 /* Data must be read atomically. We don't really need barrier semantics
50 * but it's easier to use atomic_* than roll our own. */
51 log = atomic_xchg(from, 0);
52 while (log) {
53 int bit = ctzl(log);
54 hwaddr page_addr;
55 hwaddr section_offset;
56 hwaddr mr_offset;
57 page_addr = addr + bit * VHOST_LOG_PAGE;
58 section_offset = page_addr - section->offset_within_address_space;
59 mr_offset = section_offset + section->offset_within_region;
60 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
61 log &= ~(0x1ull << bit);
63 addr += VHOST_LOG_CHUNK;
67 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
68 MemoryRegionSection *section,
69 hwaddr first,
70 hwaddr last)
72 int i;
73 hwaddr start_addr;
74 hwaddr end_addr;
76 if (!dev->log_enabled || !dev->started) {
77 return 0;
79 start_addr = section->offset_within_address_space;
80 end_addr = range_get_last(start_addr, int128_get64(section->size));
81 start_addr = MAX(first, start_addr);
82 end_addr = MIN(last, end_addr);
84 for (i = 0; i < dev->mem->nregions; ++i) {
85 struct vhost_memory_region *reg = dev->mem->regions + i;
86 vhost_dev_sync_region(dev, section, start_addr, end_addr,
87 reg->guest_phys_addr,
88 range_get_last(reg->guest_phys_addr,
89 reg->memory_size));
91 for (i = 0; i < dev->nvqs; ++i) {
92 struct vhost_virtqueue *vq = dev->vqs + i;
93 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
94 range_get_last(vq->used_phys, vq->used_size));
96 return 0;
99 static void vhost_log_sync(MemoryListener *listener,
100 MemoryRegionSection *section)
102 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
103 memory_listener);
104 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
107 static void vhost_log_sync_range(struct vhost_dev *dev,
108 hwaddr first, hwaddr last)
110 int i;
111 /* FIXME: this is N^2 in number of sections */
112 for (i = 0; i < dev->n_mem_sections; ++i) {
113 MemoryRegionSection *section = &dev->mem_sections[i];
114 vhost_sync_dirty_bitmap(dev, section, first, last);
118 /* Assign/unassign. Keep an unsorted array of non-overlapping
119 * memory regions in dev->mem. */
120 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
121 uint64_t start_addr,
122 uint64_t size)
124 int from, to, n = dev->mem->nregions;
125 /* Track overlapping/split regions for sanity checking. */
126 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
128 for (from = 0, to = 0; from < n; ++from, ++to) {
129 struct vhost_memory_region *reg = dev->mem->regions + to;
130 uint64_t reglast;
131 uint64_t memlast;
132 uint64_t change;
134 /* clone old region */
135 if (to != from) {
136 memcpy(reg, dev->mem->regions + from, sizeof *reg);
139 /* No overlap is simple */
140 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
141 start_addr, size)) {
142 continue;
145 /* Split only happens if supplied region
146 * is in the middle of an existing one. Thus it can not
147 * overlap with any other existing region. */
148 assert(!split);
150 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
151 memlast = range_get_last(start_addr, size);
153 /* Remove whole region */
154 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
155 --dev->mem->nregions;
156 --to;
157 ++overlap_middle;
158 continue;
161 /* Shrink region */
162 if (memlast >= reglast) {
163 reg->memory_size = start_addr - reg->guest_phys_addr;
164 assert(reg->memory_size);
165 assert(!overlap_end);
166 ++overlap_end;
167 continue;
170 /* Shift region */
171 if (start_addr <= reg->guest_phys_addr) {
172 change = memlast + 1 - reg->guest_phys_addr;
173 reg->memory_size -= change;
174 reg->guest_phys_addr += change;
175 reg->userspace_addr += change;
176 assert(reg->memory_size);
177 assert(!overlap_start);
178 ++overlap_start;
179 continue;
182 /* This only happens if supplied region
183 * is in the middle of an existing one. Thus it can not
184 * overlap with any other existing region. */
185 assert(!overlap_start);
186 assert(!overlap_end);
187 assert(!overlap_middle);
188 /* Split region: shrink first part, shift second part. */
189 memcpy(dev->mem->regions + n, reg, sizeof *reg);
190 reg->memory_size = start_addr - reg->guest_phys_addr;
191 assert(reg->memory_size);
192 change = memlast + 1 - reg->guest_phys_addr;
193 reg = dev->mem->regions + n;
194 reg->memory_size -= change;
195 assert(reg->memory_size);
196 reg->guest_phys_addr += change;
197 reg->userspace_addr += change;
198 /* Never add more than 1 region */
199 assert(dev->mem->nregions == n);
200 ++dev->mem->nregions;
201 ++split;
205 /* Called after unassign, so no regions overlap the given range. */
206 static void vhost_dev_assign_memory(struct vhost_dev *dev,
207 uint64_t start_addr,
208 uint64_t size,
209 uint64_t uaddr)
211 int from, to;
212 struct vhost_memory_region *merged = NULL;
213 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
214 struct vhost_memory_region *reg = dev->mem->regions + to;
215 uint64_t prlast, urlast;
216 uint64_t pmlast, umlast;
217 uint64_t s, e, u;
219 /* clone old region */
220 if (to != from) {
221 memcpy(reg, dev->mem->regions + from, sizeof *reg);
223 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
224 pmlast = range_get_last(start_addr, size);
225 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
226 umlast = range_get_last(uaddr, size);
228 /* check for overlapping regions: should never happen. */
229 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
230 /* Not an adjacent or overlapping region - do not merge. */
231 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
232 (pmlast + 1 != reg->guest_phys_addr ||
233 umlast + 1 != reg->userspace_addr)) {
234 continue;
237 if (merged) {
238 --to;
239 assert(to >= 0);
240 } else {
241 merged = reg;
243 u = MIN(uaddr, reg->userspace_addr);
244 s = MIN(start_addr, reg->guest_phys_addr);
245 e = MAX(pmlast, prlast);
246 uaddr = merged->userspace_addr = u;
247 start_addr = merged->guest_phys_addr = s;
248 size = merged->memory_size = e - s + 1;
249 assert(merged->memory_size);
252 if (!merged) {
253 struct vhost_memory_region *reg = dev->mem->regions + to;
254 memset(reg, 0, sizeof *reg);
255 reg->memory_size = size;
256 assert(reg->memory_size);
257 reg->guest_phys_addr = start_addr;
258 reg->userspace_addr = uaddr;
259 ++to;
261 assert(to <= dev->mem->nregions + 1);
262 dev->mem->nregions = to;
265 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
267 uint64_t log_size = 0;
268 int i;
269 for (i = 0; i < dev->mem->nregions; ++i) {
270 struct vhost_memory_region *reg = dev->mem->regions + i;
271 uint64_t last = range_get_last(reg->guest_phys_addr,
272 reg->memory_size);
273 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
275 for (i = 0; i < dev->nvqs; ++i) {
276 struct vhost_virtqueue *vq = dev->vqs + i;
277 uint64_t last = vq->used_phys + vq->used_size - 1;
278 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
280 return log_size;
283 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
285 vhost_log_chunk_t *log;
286 uint64_t log_base;
287 int r;
289 log = g_malloc0(size * sizeof *log);
290 log_base = (uint64_t)(unsigned long)log;
291 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_LOG_BASE, &log_base);
292 assert(r >= 0);
293 /* Sync only the range covered by the old log */
294 if (dev->log_size) {
295 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
297 g_free(dev->log);
298 dev->log = log;
299 dev->log_size = size;
302 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
303 uint64_t start_addr,
304 uint64_t size)
306 int i;
307 for (i = 0; i < dev->nvqs; ++i) {
308 struct vhost_virtqueue *vq = dev->vqs + i;
309 hwaddr l;
310 void *p;
312 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
313 continue;
315 l = vq->ring_size;
316 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
317 if (!p || l != vq->ring_size) {
318 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
319 return -ENOMEM;
321 if (p != vq->ring) {
322 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
323 return -EBUSY;
325 cpu_physical_memory_unmap(p, l, 0, 0);
327 return 0;
330 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
331 uint64_t start_addr,
332 uint64_t size)
334 int i, n = dev->mem->nregions;
335 for (i = 0; i < n; ++i) {
336 struct vhost_memory_region *reg = dev->mem->regions + i;
337 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
338 start_addr, size)) {
339 return reg;
342 return NULL;
345 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
346 uint64_t start_addr,
347 uint64_t size,
348 uint64_t uaddr)
350 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
351 uint64_t reglast;
352 uint64_t memlast;
354 if (!reg) {
355 return true;
358 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
359 memlast = range_get_last(start_addr, size);
361 /* Need to extend region? */
362 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
363 return true;
365 /* userspace_addr changed? */
366 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
369 static void vhost_set_memory(MemoryListener *listener,
370 MemoryRegionSection *section,
371 bool add)
373 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
374 memory_listener);
375 hwaddr start_addr = section->offset_within_address_space;
376 ram_addr_t size = int128_get64(section->size);
377 bool log_dirty = memory_region_is_logging(section->mr);
378 int s = offsetof(struct vhost_memory, regions) +
379 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
380 void *ram;
382 dev->mem = g_realloc(dev->mem, s);
384 if (log_dirty) {
385 add = false;
388 assert(size);
390 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
391 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
392 if (add) {
393 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
394 /* Region exists with same address. Nothing to do. */
395 return;
397 } else {
398 if (!vhost_dev_find_reg(dev, start_addr, size)) {
399 /* Removing region that we don't access. Nothing to do. */
400 return;
404 vhost_dev_unassign_memory(dev, start_addr, size);
405 if (add) {
406 /* Add given mapping, merging adjacent regions if any */
407 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
408 } else {
409 /* Remove old mapping for this memory, if any. */
410 vhost_dev_unassign_memory(dev, start_addr, size);
412 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
413 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
414 dev->memory_changed = true;
417 static bool vhost_section(MemoryRegionSection *section)
419 return memory_region_is_ram(section->mr);
422 static void vhost_begin(MemoryListener *listener)
424 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
425 memory_listener);
426 dev->mem_changed_end_addr = 0;
427 dev->mem_changed_start_addr = -1;
430 static void vhost_commit(MemoryListener *listener)
432 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
433 memory_listener);
434 hwaddr start_addr = 0;
435 ram_addr_t size = 0;
436 uint64_t log_size;
437 int r;
439 if (!dev->memory_changed) {
440 return;
442 if (!dev->started) {
443 return;
445 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
446 return;
449 if (dev->started) {
450 start_addr = dev->mem_changed_start_addr;
451 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
453 r = vhost_verify_ring_mappings(dev, start_addr, size);
454 assert(r >= 0);
457 if (!dev->log_enabled) {
458 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
459 assert(r >= 0);
460 dev->memory_changed = false;
461 return;
463 log_size = vhost_get_log_size(dev);
464 /* We allocate an extra 4K bytes to log,
465 * to reduce the * number of reallocations. */
466 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
467 /* To log more, must increase log size before table update. */
468 if (dev->log_size < log_size) {
469 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
471 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
472 assert(r >= 0);
473 /* To log less, can only decrease log size after table update. */
474 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
475 vhost_dev_log_resize(dev, log_size);
477 dev->memory_changed = false;
480 static void vhost_region_add(MemoryListener *listener,
481 MemoryRegionSection *section)
483 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
484 memory_listener);
486 if (!vhost_section(section)) {
487 return;
490 ++dev->n_mem_sections;
491 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
492 dev->n_mem_sections);
493 dev->mem_sections[dev->n_mem_sections - 1] = *section;
494 memory_region_ref(section->mr);
495 vhost_set_memory(listener, section, true);
498 static void vhost_region_del(MemoryListener *listener,
499 MemoryRegionSection *section)
501 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
502 memory_listener);
503 int i;
505 if (!vhost_section(section)) {
506 return;
509 vhost_set_memory(listener, section, false);
510 memory_region_unref(section->mr);
511 for (i = 0; i < dev->n_mem_sections; ++i) {
512 if (dev->mem_sections[i].offset_within_address_space
513 == section->offset_within_address_space) {
514 --dev->n_mem_sections;
515 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
516 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
517 break;
522 static void vhost_region_nop(MemoryListener *listener,
523 MemoryRegionSection *section)
527 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
528 struct vhost_virtqueue *vq,
529 unsigned idx, bool enable_log)
531 struct vhost_vring_addr addr = {
532 .index = idx,
533 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
534 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
535 .used_user_addr = (uint64_t)(unsigned long)vq->used,
536 .log_guest_addr = vq->used_phys,
537 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
539 int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr);
540 if (r < 0) {
541 return -errno;
543 return 0;
546 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
548 uint64_t features = dev->acked_features;
549 int r;
550 if (enable_log) {
551 features |= 0x1 << VHOST_F_LOG_ALL;
553 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
554 return r < 0 ? -errno : 0;
557 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
559 int r, t, i;
560 r = vhost_dev_set_features(dev, enable_log);
561 if (r < 0) {
562 goto err_features;
564 for (i = 0; i < dev->nvqs; ++i) {
565 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
566 enable_log);
567 if (r < 0) {
568 goto err_vq;
571 return 0;
572 err_vq:
573 for (; i >= 0; --i) {
574 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
575 dev->log_enabled);
576 assert(t >= 0);
578 t = vhost_dev_set_features(dev, dev->log_enabled);
579 assert(t >= 0);
580 err_features:
581 return r;
584 static int vhost_migration_log(MemoryListener *listener, int enable)
586 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
587 memory_listener);
588 int r;
589 if (!!enable == dev->log_enabled) {
590 return 0;
592 if (!dev->started) {
593 dev->log_enabled = enable;
594 return 0;
596 if (!enable) {
597 r = vhost_dev_set_log(dev, false);
598 if (r < 0) {
599 return r;
601 g_free(dev->log);
602 dev->log = NULL;
603 dev->log_size = 0;
604 } else {
605 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
606 r = vhost_dev_set_log(dev, true);
607 if (r < 0) {
608 return r;
611 dev->log_enabled = enable;
612 return 0;
615 static void vhost_log_global_start(MemoryListener *listener)
617 int r;
619 r = vhost_migration_log(listener, true);
620 if (r < 0) {
621 abort();
625 static void vhost_log_global_stop(MemoryListener *listener)
627 int r;
629 r = vhost_migration_log(listener, false);
630 if (r < 0) {
631 abort();
635 static void vhost_log_start(MemoryListener *listener,
636 MemoryRegionSection *section)
638 /* FIXME: implement */
641 static void vhost_log_stop(MemoryListener *listener,
642 MemoryRegionSection *section)
644 /* FIXME: implement */
647 static int vhost_virtqueue_start(struct vhost_dev *dev,
648 struct VirtIODevice *vdev,
649 struct vhost_virtqueue *vq,
650 unsigned idx)
652 hwaddr s, l, a;
653 int r;
654 int vhost_vq_index = idx - dev->vq_index;
655 struct vhost_vring_file file = {
656 .index = vhost_vq_index
658 struct vhost_vring_state state = {
659 .index = vhost_vq_index
661 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
663 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
665 vq->num = state.num = virtio_queue_get_num(vdev, idx);
666 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
667 if (r) {
668 return -errno;
671 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
672 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
673 if (r) {
674 return -errno;
677 s = l = virtio_queue_get_desc_size(vdev, idx);
678 a = virtio_queue_get_desc_addr(vdev, idx);
679 vq->desc = cpu_physical_memory_map(a, &l, 0);
680 if (!vq->desc || l != s) {
681 r = -ENOMEM;
682 goto fail_alloc_desc;
684 s = l = virtio_queue_get_avail_size(vdev, idx);
685 a = virtio_queue_get_avail_addr(vdev, idx);
686 vq->avail = cpu_physical_memory_map(a, &l, 0);
687 if (!vq->avail || l != s) {
688 r = -ENOMEM;
689 goto fail_alloc_avail;
691 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
692 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
693 vq->used = cpu_physical_memory_map(a, &l, 1);
694 if (!vq->used || l != s) {
695 r = -ENOMEM;
696 goto fail_alloc_used;
699 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
700 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
701 vq->ring = cpu_physical_memory_map(a, &l, 1);
702 if (!vq->ring || l != s) {
703 r = -ENOMEM;
704 goto fail_alloc_ring;
707 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
708 if (r < 0) {
709 r = -errno;
710 goto fail_alloc;
713 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
714 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
715 if (r) {
716 r = -errno;
717 goto fail_kick;
720 /* Clear and discard previous events if any. */
721 event_notifier_test_and_clear(&vq->masked_notifier);
723 return 0;
725 fail_kick:
726 fail_alloc:
727 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
728 0, 0);
729 fail_alloc_ring:
730 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
731 0, 0);
732 fail_alloc_used:
733 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
734 0, 0);
735 fail_alloc_avail:
736 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
737 0, 0);
738 fail_alloc_desc:
739 return r;
742 static void vhost_virtqueue_stop(struct vhost_dev *dev,
743 struct VirtIODevice *vdev,
744 struct vhost_virtqueue *vq,
745 unsigned idx)
747 struct vhost_vring_state state = {
748 .index = idx - dev->vq_index
750 int r;
751 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
752 r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state);
753 if (r < 0) {
754 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
755 fflush(stderr);
757 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
758 virtio_queue_invalidate_signalled_used(vdev, idx);
759 assert (r >= 0);
760 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
761 0, virtio_queue_get_ring_size(vdev, idx));
762 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
763 1, virtio_queue_get_used_size(vdev, idx));
764 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
765 0, virtio_queue_get_avail_size(vdev, idx));
766 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
767 0, virtio_queue_get_desc_size(vdev, idx));
770 static void vhost_eventfd_add(MemoryListener *listener,
771 MemoryRegionSection *section,
772 bool match_data, uint64_t data, EventNotifier *e)
776 static void vhost_eventfd_del(MemoryListener *listener,
777 MemoryRegionSection *section,
778 bool match_data, uint64_t data, EventNotifier *e)
782 static int vhost_virtqueue_init(struct vhost_dev *dev,
783 struct vhost_virtqueue *vq, int n)
785 struct vhost_vring_file file = {
786 .index = n,
788 int r = event_notifier_init(&vq->masked_notifier, 0);
789 if (r < 0) {
790 return r;
793 file.fd = event_notifier_get_fd(&vq->masked_notifier);
794 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
795 if (r) {
796 r = -errno;
797 goto fail_call;
799 return 0;
800 fail_call:
801 event_notifier_cleanup(&vq->masked_notifier);
802 return r;
805 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
807 event_notifier_cleanup(&vq->masked_notifier);
810 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
811 bool force)
813 uint64_t features;
814 int i, r;
816 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
817 return -errno;
820 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
821 if (r < 0) {
822 goto fail;
825 r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
826 if (r < 0) {
827 goto fail;
830 for (i = 0; i < hdev->nvqs; ++i) {
831 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
832 if (r < 0) {
833 goto fail_vq;
836 hdev->features = features;
838 hdev->memory_listener = (MemoryListener) {
839 .begin = vhost_begin,
840 .commit = vhost_commit,
841 .region_add = vhost_region_add,
842 .region_del = vhost_region_del,
843 .region_nop = vhost_region_nop,
844 .log_start = vhost_log_start,
845 .log_stop = vhost_log_stop,
846 .log_sync = vhost_log_sync,
847 .log_global_start = vhost_log_global_start,
848 .log_global_stop = vhost_log_global_stop,
849 .eventfd_add = vhost_eventfd_add,
850 .eventfd_del = vhost_eventfd_del,
851 .priority = 10
853 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
854 hdev->n_mem_sections = 0;
855 hdev->mem_sections = NULL;
856 hdev->log = NULL;
857 hdev->log_size = 0;
858 hdev->log_enabled = false;
859 hdev->started = false;
860 hdev->memory_changed = false;
861 memory_listener_register(&hdev->memory_listener, &address_space_memory);
862 hdev->force = force;
863 return 0;
864 fail_vq:
865 while (--i >= 0) {
866 vhost_virtqueue_cleanup(hdev->vqs + i);
868 fail:
869 r = -errno;
870 hdev->vhost_ops->vhost_backend_cleanup(hdev);
871 return r;
874 void vhost_dev_cleanup(struct vhost_dev *hdev)
876 int i;
877 for (i = 0; i < hdev->nvqs; ++i) {
878 vhost_virtqueue_cleanup(hdev->vqs + i);
880 memory_listener_unregister(&hdev->memory_listener);
881 g_free(hdev->mem);
882 g_free(hdev->mem_sections);
883 hdev->vhost_ops->vhost_backend_cleanup(hdev);
886 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
888 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
889 VirtioBusState *vbus = VIRTIO_BUS(qbus);
890 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
892 return !k->query_guest_notifiers ||
893 k->query_guest_notifiers(qbus->parent) ||
894 hdev->force;
897 /* Stop processing guest IO notifications in qemu.
898 * Start processing them in vhost in kernel.
900 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
902 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
903 VirtioBusState *vbus = VIRTIO_BUS(qbus);
904 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
905 int i, r;
906 if (!k->set_host_notifier) {
907 fprintf(stderr, "binding does not support host notifiers\n");
908 r = -ENOSYS;
909 goto fail;
912 for (i = 0; i < hdev->nvqs; ++i) {
913 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
914 if (r < 0) {
915 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
916 goto fail_vq;
920 return 0;
921 fail_vq:
922 while (--i >= 0) {
923 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
924 if (r < 0) {
925 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
926 fflush(stderr);
928 assert (r >= 0);
930 fail:
931 return r;
934 /* Stop processing guest IO notifications in vhost.
935 * Start processing them in qemu.
936 * This might actually run the qemu handlers right away,
937 * so virtio in qemu must be completely setup when this is called.
939 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
941 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
942 VirtioBusState *vbus = VIRTIO_BUS(qbus);
943 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
944 int i, r;
946 for (i = 0; i < hdev->nvqs; ++i) {
947 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
948 if (r < 0) {
949 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
950 fflush(stderr);
952 assert (r >= 0);
956 /* Test and clear event pending status.
957 * Should be called after unmask to avoid losing events.
959 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
961 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
962 assert(hdev->started);
963 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
964 return event_notifier_test_and_clear(&vq->masked_notifier);
967 /* Mask/unmask events from this vq. */
968 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
969 bool mask)
971 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
972 int r, index = n - hdev->vq_index;
974 assert(hdev->started);
975 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
977 struct vhost_vring_file file = {
978 .index = index
980 if (mask) {
981 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
982 } else {
983 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
985 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file);
986 assert(r >= 0);
989 unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
990 unsigned features)
992 const int *bit = feature_bits;
993 while (*bit != VHOST_INVALID_FEATURE_BIT) {
994 unsigned bit_mask = (1 << *bit);
995 if (!(hdev->features & bit_mask)) {
996 features &= ~bit_mask;
998 bit++;
1000 return features;
1003 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1004 unsigned features)
1006 const int *bit = feature_bits;
1007 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1008 unsigned bit_mask = (1 << *bit);
1009 if (features & bit_mask) {
1010 hdev->acked_features |= bit_mask;
1012 bit++;
1016 /* Host notifiers must be enabled at this point. */
1017 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1019 int i, r;
1021 hdev->started = true;
1023 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1024 if (r < 0) {
1025 goto fail_features;
1027 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
1028 if (r < 0) {
1029 r = -errno;
1030 goto fail_mem;
1032 for (i = 0; i < hdev->nvqs; ++i) {
1033 r = vhost_virtqueue_start(hdev,
1034 vdev,
1035 hdev->vqs + i,
1036 hdev->vq_index + i);
1037 if (r < 0) {
1038 goto fail_vq;
1042 if (hdev->log_enabled) {
1043 hdev->log_size = vhost_get_log_size(hdev);
1044 hdev->log = hdev->log_size ?
1045 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
1046 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE, hdev->log);
1047 if (r < 0) {
1048 r = -errno;
1049 goto fail_log;
1053 return 0;
1054 fail_log:
1055 fail_vq:
1056 while (--i >= 0) {
1057 vhost_virtqueue_stop(hdev,
1058 vdev,
1059 hdev->vqs + i,
1060 hdev->vq_index + i);
1062 i = hdev->nvqs;
1063 fail_mem:
1064 fail_features:
1066 hdev->started = false;
1067 return r;
1070 /* Host notifiers must be enabled at this point. */
1071 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1073 int i;
1075 for (i = 0; i < hdev->nvqs; ++i) {
1076 vhost_virtqueue_stop(hdev,
1077 vdev,
1078 hdev->vqs + i,
1079 hdev->vq_index + i);
1081 vhost_log_sync_range(hdev, 0, ~0x0ull);
1083 hdev->started = false;
1084 g_free(hdev->log);
1085 hdev->log = NULL;
1086 hdev->log_size = 0;