vfio/pci: Make interrupt bypass runtime configurable
[qemu/ar7.git] / hw / virtio / vhost.c
bloba08c36bb45b7be92859e43dd0d6ec602f2458acd
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "hw/virtio/vhost.h"
17 #include "hw/hw.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include "qemu/error-report.h"
21 #include <linux/vhost.h>
22 #include "exec/address-spaces.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "migration/migration.h"
27 static struct vhost_log *vhost_log;
29 static void vhost_dev_sync_region(struct vhost_dev *dev,
30 MemoryRegionSection *section,
31 uint64_t mfirst, uint64_t mlast,
32 uint64_t rfirst, uint64_t rlast)
34 vhost_log_chunk_t *log = dev->log->log;
36 uint64_t start = MAX(mfirst, rfirst);
37 uint64_t end = MIN(mlast, rlast);
38 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
39 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
40 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
42 if (end < start) {
43 return;
45 assert(end / VHOST_LOG_CHUNK < dev->log_size);
46 assert(start / VHOST_LOG_CHUNK < dev->log_size);
48 for (;from < to; ++from) {
49 vhost_log_chunk_t log;
50 /* We first check with non-atomic: much cheaper,
51 * and we expect non-dirty to be the common case. */
52 if (!*from) {
53 addr += VHOST_LOG_CHUNK;
54 continue;
56 /* Data must be read atomically. We don't really need barrier semantics
57 * but it's easier to use atomic_* than roll our own. */
58 log = atomic_xchg(from, 0);
59 while (log) {
60 int bit = ctzl(log);
61 hwaddr page_addr;
62 hwaddr section_offset;
63 hwaddr mr_offset;
64 page_addr = addr + bit * VHOST_LOG_PAGE;
65 section_offset = page_addr - section->offset_within_address_space;
66 mr_offset = section_offset + section->offset_within_region;
67 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
68 log &= ~(0x1ull << bit);
70 addr += VHOST_LOG_CHUNK;
74 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
75 MemoryRegionSection *section,
76 hwaddr first,
77 hwaddr last)
79 int i;
80 hwaddr start_addr;
81 hwaddr end_addr;
83 if (!dev->log_enabled || !dev->started) {
84 return 0;
86 start_addr = section->offset_within_address_space;
87 end_addr = range_get_last(start_addr, int128_get64(section->size));
88 start_addr = MAX(first, start_addr);
89 end_addr = MIN(last, end_addr);
91 for (i = 0; i < dev->mem->nregions; ++i) {
92 struct vhost_memory_region *reg = dev->mem->regions + i;
93 vhost_dev_sync_region(dev, section, start_addr, end_addr,
94 reg->guest_phys_addr,
95 range_get_last(reg->guest_phys_addr,
96 reg->memory_size));
98 for (i = 0; i < dev->nvqs; ++i) {
99 struct vhost_virtqueue *vq = dev->vqs + i;
100 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
101 range_get_last(vq->used_phys, vq->used_size));
103 return 0;
106 static void vhost_log_sync(MemoryListener *listener,
107 MemoryRegionSection *section)
109 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
110 memory_listener);
111 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
114 static void vhost_log_sync_range(struct vhost_dev *dev,
115 hwaddr first, hwaddr last)
117 int i;
118 /* FIXME: this is N^2 in number of sections */
119 for (i = 0; i < dev->n_mem_sections; ++i) {
120 MemoryRegionSection *section = &dev->mem_sections[i];
121 vhost_sync_dirty_bitmap(dev, section, first, last);
125 /* Assign/unassign. Keep an unsorted array of non-overlapping
126 * memory regions in dev->mem. */
127 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
128 uint64_t start_addr,
129 uint64_t size)
131 int from, to, n = dev->mem->nregions;
132 /* Track overlapping/split regions for sanity checking. */
133 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
135 for (from = 0, to = 0; from < n; ++from, ++to) {
136 struct vhost_memory_region *reg = dev->mem->regions + to;
137 uint64_t reglast;
138 uint64_t memlast;
139 uint64_t change;
141 /* clone old region */
142 if (to != from) {
143 memcpy(reg, dev->mem->regions + from, sizeof *reg);
146 /* No overlap is simple */
147 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
148 start_addr, size)) {
149 continue;
152 /* Split only happens if supplied region
153 * is in the middle of an existing one. Thus it can not
154 * overlap with any other existing region. */
155 assert(!split);
157 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
158 memlast = range_get_last(start_addr, size);
160 /* Remove whole region */
161 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
162 --dev->mem->nregions;
163 --to;
164 ++overlap_middle;
165 continue;
168 /* Shrink region */
169 if (memlast >= reglast) {
170 reg->memory_size = start_addr - reg->guest_phys_addr;
171 assert(reg->memory_size);
172 assert(!overlap_end);
173 ++overlap_end;
174 continue;
177 /* Shift region */
178 if (start_addr <= reg->guest_phys_addr) {
179 change = memlast + 1 - reg->guest_phys_addr;
180 reg->memory_size -= change;
181 reg->guest_phys_addr += change;
182 reg->userspace_addr += change;
183 assert(reg->memory_size);
184 assert(!overlap_start);
185 ++overlap_start;
186 continue;
189 /* This only happens if supplied region
190 * is in the middle of an existing one. Thus it can not
191 * overlap with any other existing region. */
192 assert(!overlap_start);
193 assert(!overlap_end);
194 assert(!overlap_middle);
195 /* Split region: shrink first part, shift second part. */
196 memcpy(dev->mem->regions + n, reg, sizeof *reg);
197 reg->memory_size = start_addr - reg->guest_phys_addr;
198 assert(reg->memory_size);
199 change = memlast + 1 - reg->guest_phys_addr;
200 reg = dev->mem->regions + n;
201 reg->memory_size -= change;
202 assert(reg->memory_size);
203 reg->guest_phys_addr += change;
204 reg->userspace_addr += change;
205 /* Never add more than 1 region */
206 assert(dev->mem->nregions == n);
207 ++dev->mem->nregions;
208 ++split;
212 /* Called after unassign, so no regions overlap the given range. */
213 static void vhost_dev_assign_memory(struct vhost_dev *dev,
214 uint64_t start_addr,
215 uint64_t size,
216 uint64_t uaddr)
218 int from, to;
219 struct vhost_memory_region *merged = NULL;
220 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
221 struct vhost_memory_region *reg = dev->mem->regions + to;
222 uint64_t prlast, urlast;
223 uint64_t pmlast, umlast;
224 uint64_t s, e, u;
226 /* clone old region */
227 if (to != from) {
228 memcpy(reg, dev->mem->regions + from, sizeof *reg);
230 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
231 pmlast = range_get_last(start_addr, size);
232 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
233 umlast = range_get_last(uaddr, size);
235 /* check for overlapping regions: should never happen. */
236 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
237 /* Not an adjacent or overlapping region - do not merge. */
238 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
239 (pmlast + 1 != reg->guest_phys_addr ||
240 umlast + 1 != reg->userspace_addr)) {
241 continue;
244 if (merged) {
245 --to;
246 assert(to >= 0);
247 } else {
248 merged = reg;
250 u = MIN(uaddr, reg->userspace_addr);
251 s = MIN(start_addr, reg->guest_phys_addr);
252 e = MAX(pmlast, prlast);
253 uaddr = merged->userspace_addr = u;
254 start_addr = merged->guest_phys_addr = s;
255 size = merged->memory_size = e - s + 1;
256 assert(merged->memory_size);
259 if (!merged) {
260 struct vhost_memory_region *reg = dev->mem->regions + to;
261 memset(reg, 0, sizeof *reg);
262 reg->memory_size = size;
263 assert(reg->memory_size);
264 reg->guest_phys_addr = start_addr;
265 reg->userspace_addr = uaddr;
266 ++to;
268 assert(to <= dev->mem->nregions + 1);
269 dev->mem->nregions = to;
272 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
274 uint64_t log_size = 0;
275 int i;
276 for (i = 0; i < dev->mem->nregions; ++i) {
277 struct vhost_memory_region *reg = dev->mem->regions + i;
278 uint64_t last = range_get_last(reg->guest_phys_addr,
279 reg->memory_size);
280 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
282 for (i = 0; i < dev->nvqs; ++i) {
283 struct vhost_virtqueue *vq = dev->vqs + i;
284 uint64_t last = vq->used_phys + vq->used_size - 1;
285 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
287 return log_size;
289 static struct vhost_log *vhost_log_alloc(uint64_t size)
291 struct vhost_log *log = g_malloc0(sizeof *log + size * sizeof(*(log->log)));
293 log->size = size;
294 log->refcnt = 1;
296 return log;
299 static struct vhost_log *vhost_log_get(uint64_t size)
301 if (!vhost_log || vhost_log->size != size) {
302 vhost_log = vhost_log_alloc(size);
303 } else {
304 ++vhost_log->refcnt;
307 return vhost_log;
310 static void vhost_log_put(struct vhost_dev *dev, bool sync)
312 struct vhost_log *log = dev->log;
314 if (!log) {
315 return;
318 --log->refcnt;
319 if (log->refcnt == 0) {
320 /* Sync only the range covered by the old log */
321 if (dev->log_size && sync) {
322 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
324 if (vhost_log == log) {
325 vhost_log = NULL;
327 g_free(log);
331 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
333 struct vhost_log *log = vhost_log_get(size);
334 uint64_t log_base = (uintptr_t)log->log;
335 int r;
337 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_LOG_BASE, &log_base);
338 assert(r >= 0);
339 vhost_log_put(dev, true);
340 dev->log = log;
341 dev->log_size = size;
344 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
345 uint64_t start_addr,
346 uint64_t size)
348 int i;
349 int r = 0;
351 for (i = 0; !r && i < dev->nvqs; ++i) {
352 struct vhost_virtqueue *vq = dev->vqs + i;
353 hwaddr l;
354 void *p;
356 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
357 continue;
359 l = vq->ring_size;
360 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
361 if (!p || l != vq->ring_size) {
362 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
363 r = -ENOMEM;
365 if (p != vq->ring) {
366 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
367 r = -EBUSY;
369 cpu_physical_memory_unmap(p, l, 0, 0);
371 return r;
374 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
375 uint64_t start_addr,
376 uint64_t size)
378 int i, n = dev->mem->nregions;
379 for (i = 0; i < n; ++i) {
380 struct vhost_memory_region *reg = dev->mem->regions + i;
381 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
382 start_addr, size)) {
383 return reg;
386 return NULL;
389 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
390 uint64_t start_addr,
391 uint64_t size,
392 uint64_t uaddr)
394 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
395 uint64_t reglast;
396 uint64_t memlast;
398 if (!reg) {
399 return true;
402 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
403 memlast = range_get_last(start_addr, size);
405 /* Need to extend region? */
406 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
407 return true;
409 /* userspace_addr changed? */
410 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
413 static void vhost_set_memory(MemoryListener *listener,
414 MemoryRegionSection *section,
415 bool add)
417 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
418 memory_listener);
419 hwaddr start_addr = section->offset_within_address_space;
420 ram_addr_t size = int128_get64(section->size);
421 bool log_dirty =
422 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
423 int s = offsetof(struct vhost_memory, regions) +
424 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
425 void *ram;
427 dev->mem = g_realloc(dev->mem, s);
429 if (log_dirty) {
430 add = false;
433 assert(size);
435 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
436 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
437 if (add) {
438 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
439 /* Region exists with same address. Nothing to do. */
440 return;
442 } else {
443 if (!vhost_dev_find_reg(dev, start_addr, size)) {
444 /* Removing region that we don't access. Nothing to do. */
445 return;
449 vhost_dev_unassign_memory(dev, start_addr, size);
450 if (add) {
451 /* Add given mapping, merging adjacent regions if any */
452 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
453 } else {
454 /* Remove old mapping for this memory, if any. */
455 vhost_dev_unassign_memory(dev, start_addr, size);
457 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
458 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
459 dev->memory_changed = true;
462 static bool vhost_section(MemoryRegionSection *section)
464 return memory_region_is_ram(section->mr);
467 static void vhost_begin(MemoryListener *listener)
469 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
470 memory_listener);
471 dev->mem_changed_end_addr = 0;
472 dev->mem_changed_start_addr = -1;
475 static void vhost_commit(MemoryListener *listener)
477 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
478 memory_listener);
479 hwaddr start_addr = 0;
480 ram_addr_t size = 0;
481 uint64_t log_size;
482 int r;
484 if (!dev->memory_changed) {
485 return;
487 if (!dev->started) {
488 return;
490 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
491 return;
494 if (dev->started) {
495 start_addr = dev->mem_changed_start_addr;
496 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
498 r = vhost_verify_ring_mappings(dev, start_addr, size);
499 assert(r >= 0);
502 if (!dev->log_enabled) {
503 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
504 assert(r >= 0);
505 dev->memory_changed = false;
506 return;
508 log_size = vhost_get_log_size(dev);
509 /* We allocate an extra 4K bytes to log,
510 * to reduce the * number of reallocations. */
511 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
512 /* To log more, must increase log size before table update. */
513 if (dev->log_size < log_size) {
514 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
516 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
517 assert(r >= 0);
518 /* To log less, can only decrease log size after table update. */
519 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
520 vhost_dev_log_resize(dev, log_size);
522 dev->memory_changed = false;
525 static void vhost_region_add(MemoryListener *listener,
526 MemoryRegionSection *section)
528 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
529 memory_listener);
531 if (!vhost_section(section)) {
532 return;
535 ++dev->n_mem_sections;
536 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
537 dev->n_mem_sections);
538 dev->mem_sections[dev->n_mem_sections - 1] = *section;
539 memory_region_ref(section->mr);
540 vhost_set_memory(listener, section, true);
543 static void vhost_region_del(MemoryListener *listener,
544 MemoryRegionSection *section)
546 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
547 memory_listener);
548 int i;
550 if (!vhost_section(section)) {
551 return;
554 vhost_set_memory(listener, section, false);
555 memory_region_unref(section->mr);
556 for (i = 0; i < dev->n_mem_sections; ++i) {
557 if (dev->mem_sections[i].offset_within_address_space
558 == section->offset_within_address_space) {
559 --dev->n_mem_sections;
560 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
561 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
562 break;
567 static void vhost_region_nop(MemoryListener *listener,
568 MemoryRegionSection *section)
572 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
573 struct vhost_virtqueue *vq,
574 unsigned idx, bool enable_log)
576 struct vhost_vring_addr addr = {
577 .index = idx,
578 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
579 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
580 .used_user_addr = (uint64_t)(unsigned long)vq->used,
581 .log_guest_addr = vq->used_phys,
582 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
584 int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr);
585 if (r < 0) {
586 return -errno;
588 return 0;
591 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
593 uint64_t features = dev->acked_features;
594 int r;
595 if (enable_log) {
596 features |= 0x1ULL << VHOST_F_LOG_ALL;
598 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
599 return r < 0 ? -errno : 0;
602 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
604 int r, t, i;
605 r = vhost_dev_set_features(dev, enable_log);
606 if (r < 0) {
607 goto err_features;
609 for (i = 0; i < dev->nvqs; ++i) {
610 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
611 enable_log);
612 if (r < 0) {
613 goto err_vq;
616 return 0;
617 err_vq:
618 for (; i >= 0; --i) {
619 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
620 dev->log_enabled);
621 assert(t >= 0);
623 t = vhost_dev_set_features(dev, dev->log_enabled);
624 assert(t >= 0);
625 err_features:
626 return r;
629 static int vhost_migration_log(MemoryListener *listener, int enable)
631 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
632 memory_listener);
633 int r;
634 if (!!enable == dev->log_enabled) {
635 return 0;
637 if (!dev->started) {
638 dev->log_enabled = enable;
639 return 0;
641 if (!enable) {
642 r = vhost_dev_set_log(dev, false);
643 if (r < 0) {
644 return r;
646 vhost_log_put(dev, false);
647 dev->log = NULL;
648 dev->log_size = 0;
649 } else {
650 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
651 r = vhost_dev_set_log(dev, true);
652 if (r < 0) {
653 return r;
656 dev->log_enabled = enable;
657 return 0;
660 static void vhost_log_global_start(MemoryListener *listener)
662 int r;
664 r = vhost_migration_log(listener, true);
665 if (r < 0) {
666 abort();
670 static void vhost_log_global_stop(MemoryListener *listener)
672 int r;
674 r = vhost_migration_log(listener, false);
675 if (r < 0) {
676 abort();
680 static void vhost_log_start(MemoryListener *listener,
681 MemoryRegionSection *section,
682 int old, int new)
684 /* FIXME: implement */
687 static void vhost_log_stop(MemoryListener *listener,
688 MemoryRegionSection *section,
689 int old, int new)
691 /* FIXME: implement */
694 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
695 bool is_big_endian,
696 int vhost_vq_index)
698 struct vhost_vring_state s = {
699 .index = vhost_vq_index,
700 .num = is_big_endian
703 if (!dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ENDIAN, &s)) {
704 return 0;
707 if (errno == ENOTTY) {
708 error_report("vhost does not support cross-endian");
709 return -ENOSYS;
712 return -errno;
715 static int vhost_virtqueue_start(struct vhost_dev *dev,
716 struct VirtIODevice *vdev,
717 struct vhost_virtqueue *vq,
718 unsigned idx)
720 hwaddr s, l, a;
721 int r;
722 int vhost_vq_index = idx - dev->vq_index;
723 struct vhost_vring_file file = {
724 .index = vhost_vq_index
726 struct vhost_vring_state state = {
727 .index = vhost_vq_index
729 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
731 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
733 vq->num = state.num = virtio_queue_get_num(vdev, idx);
734 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
735 if (r) {
736 return -errno;
739 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
740 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
741 if (r) {
742 return -errno;
745 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
746 virtio_legacy_is_cross_endian(vdev)) {
747 r = vhost_virtqueue_set_vring_endian_legacy(dev,
748 virtio_is_big_endian(vdev),
749 vhost_vq_index);
750 if (r) {
751 return -errno;
755 s = l = virtio_queue_get_desc_size(vdev, idx);
756 a = virtio_queue_get_desc_addr(vdev, idx);
757 vq->desc = cpu_physical_memory_map(a, &l, 0);
758 if (!vq->desc || l != s) {
759 r = -ENOMEM;
760 goto fail_alloc_desc;
762 s = l = virtio_queue_get_avail_size(vdev, idx);
763 a = virtio_queue_get_avail_addr(vdev, idx);
764 vq->avail = cpu_physical_memory_map(a, &l, 0);
765 if (!vq->avail || l != s) {
766 r = -ENOMEM;
767 goto fail_alloc_avail;
769 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
770 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
771 vq->used = cpu_physical_memory_map(a, &l, 1);
772 if (!vq->used || l != s) {
773 r = -ENOMEM;
774 goto fail_alloc_used;
777 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
778 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
779 vq->ring = cpu_physical_memory_map(a, &l, 1);
780 if (!vq->ring || l != s) {
781 r = -ENOMEM;
782 goto fail_alloc_ring;
785 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
786 if (r < 0) {
787 r = -errno;
788 goto fail_alloc;
791 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
792 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
793 if (r) {
794 r = -errno;
795 goto fail_kick;
798 /* Clear and discard previous events if any. */
799 event_notifier_test_and_clear(&vq->masked_notifier);
801 return 0;
803 fail_kick:
804 fail_alloc:
805 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
806 0, 0);
807 fail_alloc_ring:
808 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
809 0, 0);
810 fail_alloc_used:
811 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
812 0, 0);
813 fail_alloc_avail:
814 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
815 0, 0);
816 fail_alloc_desc:
817 return r;
820 static void vhost_virtqueue_stop(struct vhost_dev *dev,
821 struct VirtIODevice *vdev,
822 struct vhost_virtqueue *vq,
823 unsigned idx)
825 int vhost_vq_index = idx - dev->vq_index;
826 struct vhost_vring_state state = {
827 .index = vhost_vq_index,
829 int r;
830 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
831 r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state);
832 if (r < 0) {
833 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
834 fflush(stderr);
836 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
837 virtio_queue_invalidate_signalled_used(vdev, idx);
839 /* In the cross-endian case, we need to reset the vring endianness to
840 * native as legacy devices expect so by default.
842 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
843 virtio_legacy_is_cross_endian(vdev)) {
844 r = vhost_virtqueue_set_vring_endian_legacy(dev,
845 !virtio_is_big_endian(vdev),
846 vhost_vq_index);
847 if (r < 0) {
848 error_report("failed to reset vring endianness");
852 assert (r >= 0);
853 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
854 0, virtio_queue_get_ring_size(vdev, idx));
855 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
856 1, virtio_queue_get_used_size(vdev, idx));
857 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
858 0, virtio_queue_get_avail_size(vdev, idx));
859 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
860 0, virtio_queue_get_desc_size(vdev, idx));
863 static void vhost_eventfd_add(MemoryListener *listener,
864 MemoryRegionSection *section,
865 bool match_data, uint64_t data, EventNotifier *e)
869 static void vhost_eventfd_del(MemoryListener *listener,
870 MemoryRegionSection *section,
871 bool match_data, uint64_t data, EventNotifier *e)
875 static int vhost_virtqueue_init(struct vhost_dev *dev,
876 struct vhost_virtqueue *vq, int n)
878 struct vhost_vring_file file = {
879 .index = n,
881 int r = event_notifier_init(&vq->masked_notifier, 0);
882 if (r < 0) {
883 return r;
886 file.fd = event_notifier_get_fd(&vq->masked_notifier);
887 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
888 if (r) {
889 r = -errno;
890 goto fail_call;
892 return 0;
893 fail_call:
894 event_notifier_cleanup(&vq->masked_notifier);
895 return r;
898 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
900 event_notifier_cleanup(&vq->masked_notifier);
903 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
904 VhostBackendType backend_type)
906 uint64_t features;
907 int i, r;
909 if (vhost_set_backend_type(hdev, backend_type) < 0) {
910 close((uintptr_t)opaque);
911 return -1;
914 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
915 close((uintptr_t)opaque);
916 return -errno;
919 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
920 if (r < 0) {
921 goto fail;
924 r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
925 if (r < 0) {
926 goto fail;
929 for (i = 0; i < hdev->nvqs; ++i) {
930 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
931 if (r < 0) {
932 goto fail_vq;
935 hdev->features = features;
937 hdev->memory_listener = (MemoryListener) {
938 .begin = vhost_begin,
939 .commit = vhost_commit,
940 .region_add = vhost_region_add,
941 .region_del = vhost_region_del,
942 .region_nop = vhost_region_nop,
943 .log_start = vhost_log_start,
944 .log_stop = vhost_log_stop,
945 .log_sync = vhost_log_sync,
946 .log_global_start = vhost_log_global_start,
947 .log_global_stop = vhost_log_global_stop,
948 .eventfd_add = vhost_eventfd_add,
949 .eventfd_del = vhost_eventfd_del,
950 .priority = 10
952 hdev->migration_blocker = NULL;
953 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
954 error_setg(&hdev->migration_blocker,
955 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
956 migrate_add_blocker(hdev->migration_blocker);
958 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
959 hdev->n_mem_sections = 0;
960 hdev->mem_sections = NULL;
961 hdev->log = NULL;
962 hdev->log_size = 0;
963 hdev->log_enabled = false;
964 hdev->started = false;
965 hdev->memory_changed = false;
966 memory_listener_register(&hdev->memory_listener, &address_space_memory);
967 return 0;
968 fail_vq:
969 while (--i >= 0) {
970 vhost_virtqueue_cleanup(hdev->vqs + i);
972 fail:
973 r = -errno;
974 hdev->vhost_ops->vhost_backend_cleanup(hdev);
975 return r;
978 void vhost_dev_cleanup(struct vhost_dev *hdev)
980 int i;
981 for (i = 0; i < hdev->nvqs; ++i) {
982 vhost_virtqueue_cleanup(hdev->vqs + i);
984 memory_listener_unregister(&hdev->memory_listener);
985 if (hdev->migration_blocker) {
986 migrate_del_blocker(hdev->migration_blocker);
987 error_free(hdev->migration_blocker);
989 g_free(hdev->mem);
990 g_free(hdev->mem_sections);
991 hdev->vhost_ops->vhost_backend_cleanup(hdev);
994 /* Stop processing guest IO notifications in qemu.
995 * Start processing them in vhost in kernel.
997 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
999 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1000 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1001 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1002 int i, r, e;
1003 if (!k->set_host_notifier) {
1004 fprintf(stderr, "binding does not support host notifiers\n");
1005 r = -ENOSYS;
1006 goto fail;
1009 for (i = 0; i < hdev->nvqs; ++i) {
1010 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
1011 if (r < 0) {
1012 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1013 goto fail_vq;
1017 return 0;
1018 fail_vq:
1019 while (--i >= 0) {
1020 e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1021 if (e < 0) {
1022 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1023 fflush(stderr);
1025 assert (e >= 0);
1027 fail:
1028 return r;
1031 /* Stop processing guest IO notifications in vhost.
1032 * Start processing them in qemu.
1033 * This might actually run the qemu handlers right away,
1034 * so virtio in qemu must be completely setup when this is called.
1036 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1038 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1039 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1040 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1041 int i, r;
1043 for (i = 0; i < hdev->nvqs; ++i) {
1044 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1045 if (r < 0) {
1046 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1047 fflush(stderr);
1049 assert (r >= 0);
1053 /* Test and clear event pending status.
1054 * Should be called after unmask to avoid losing events.
1056 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1058 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1059 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1060 return event_notifier_test_and_clear(&vq->masked_notifier);
1063 /* Mask/unmask events from this vq. */
1064 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1065 bool mask)
1067 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1068 int r, index = n - hdev->vq_index;
1070 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1072 struct vhost_vring_file file = {
1073 .index = index
1075 if (mask) {
1076 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1077 } else {
1078 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1080 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file);
1081 assert(r >= 0);
1084 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1085 uint64_t features)
1087 const int *bit = feature_bits;
1088 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1089 uint64_t bit_mask = (1ULL << *bit);
1090 if (!(hdev->features & bit_mask)) {
1091 features &= ~bit_mask;
1093 bit++;
1095 return features;
1098 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1099 uint64_t features)
1101 const int *bit = feature_bits;
1102 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1103 uint64_t bit_mask = (1ULL << *bit);
1104 if (features & bit_mask) {
1105 hdev->acked_features |= bit_mask;
1107 bit++;
1111 /* Host notifiers must be enabled at this point. */
1112 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1114 int i, r;
1116 hdev->started = true;
1118 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1119 if (r < 0) {
1120 goto fail_features;
1122 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
1123 if (r < 0) {
1124 r = -errno;
1125 goto fail_mem;
1127 for (i = 0; i < hdev->nvqs; ++i) {
1128 r = vhost_virtqueue_start(hdev,
1129 vdev,
1130 hdev->vqs + i,
1131 hdev->vq_index + i);
1132 if (r < 0) {
1133 goto fail_vq;
1137 if (hdev->log_enabled) {
1138 uint64_t log_base;
1140 hdev->log_size = vhost_get_log_size(hdev);
1141 hdev->log = vhost_log_get(hdev->log_size);
1142 log_base = (uintptr_t)hdev->log->log;
1143 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE,
1144 hdev->log_size ? &log_base : NULL);
1145 if (r < 0) {
1146 r = -errno;
1147 goto fail_log;
1151 return 0;
1152 fail_log:
1153 vhost_log_put(hdev, false);
1154 fail_vq:
1155 while (--i >= 0) {
1156 vhost_virtqueue_stop(hdev,
1157 vdev,
1158 hdev->vqs + i,
1159 hdev->vq_index + i);
1161 i = hdev->nvqs;
1162 fail_mem:
1163 fail_features:
1165 hdev->started = false;
1166 return r;
1169 /* Host notifiers must be enabled at this point. */
1170 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1172 int i;
1174 for (i = 0; i < hdev->nvqs; ++i) {
1175 vhost_virtqueue_stop(hdev,
1176 vdev,
1177 hdev->vqs + i,
1178 hdev->vq_index + i);
1181 vhost_log_put(hdev, true);
1182 hdev->started = false;
1183 hdev->log = NULL;
1184 hdev->log_size = 0;