Revert "xhci: generate a Transfer Event for each Transfer TRB with the IOC bit set"
[qemu/ar7.git] / hw / virtio / vhost.c
blob54851b7614e1d7e434fc0b650228e5fdd2a7c759
1 /*
2 * vhost support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "hw/virtio/vhost.h"
17 #include "hw/hw.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22 #include "hw/virtio/virtio-bus.h"
23 #include "migration/migration.h"
25 static void vhost_dev_sync_region(struct vhost_dev *dev,
26 MemoryRegionSection *section,
27 uint64_t mfirst, uint64_t mlast,
28 uint64_t rfirst, uint64_t rlast)
30 uint64_t start = MAX(mfirst, rfirst);
31 uint64_t end = MIN(mlast, rlast);
32 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
33 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
34 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
36 if (end < start) {
37 return;
39 assert(end / VHOST_LOG_CHUNK < dev->log_size);
40 assert(start / VHOST_LOG_CHUNK < dev->log_size);
42 for (;from < to; ++from) {
43 vhost_log_chunk_t log;
44 /* We first check with non-atomic: much cheaper,
45 * and we expect non-dirty to be the common case. */
46 if (!*from) {
47 addr += VHOST_LOG_CHUNK;
48 continue;
50 /* Data must be read atomically. We don't really need barrier semantics
51 * but it's easier to use atomic_* than roll our own. */
52 log = atomic_xchg(from, 0);
53 while (log) {
54 int bit = ctzl(log);
55 hwaddr page_addr;
56 hwaddr section_offset;
57 hwaddr mr_offset;
58 page_addr = addr + bit * VHOST_LOG_PAGE;
59 section_offset = page_addr - section->offset_within_address_space;
60 mr_offset = section_offset + section->offset_within_region;
61 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
62 log &= ~(0x1ull << bit);
64 addr += VHOST_LOG_CHUNK;
68 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
69 MemoryRegionSection *section,
70 hwaddr first,
71 hwaddr last)
73 int i;
74 hwaddr start_addr;
75 hwaddr end_addr;
77 if (!dev->log_enabled || !dev->started) {
78 return 0;
80 start_addr = section->offset_within_address_space;
81 end_addr = range_get_last(start_addr, int128_get64(section->size));
82 start_addr = MAX(first, start_addr);
83 end_addr = MIN(last, end_addr);
85 for (i = 0; i < dev->mem->nregions; ++i) {
86 struct vhost_memory_region *reg = dev->mem->regions + i;
87 vhost_dev_sync_region(dev, section, start_addr, end_addr,
88 reg->guest_phys_addr,
89 range_get_last(reg->guest_phys_addr,
90 reg->memory_size));
92 for (i = 0; i < dev->nvqs; ++i) {
93 struct vhost_virtqueue *vq = dev->vqs + i;
94 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
95 range_get_last(vq->used_phys, vq->used_size));
97 return 0;
100 static void vhost_log_sync(MemoryListener *listener,
101 MemoryRegionSection *section)
103 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
104 memory_listener);
105 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
108 static void vhost_log_sync_range(struct vhost_dev *dev,
109 hwaddr first, hwaddr last)
111 int i;
112 /* FIXME: this is N^2 in number of sections */
113 for (i = 0; i < dev->n_mem_sections; ++i) {
114 MemoryRegionSection *section = &dev->mem_sections[i];
115 vhost_sync_dirty_bitmap(dev, section, first, last);
119 /* Assign/unassign. Keep an unsorted array of non-overlapping
120 * memory regions in dev->mem. */
121 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
122 uint64_t start_addr,
123 uint64_t size)
125 int from, to, n = dev->mem->nregions;
126 /* Track overlapping/split regions for sanity checking. */
127 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
129 for (from = 0, to = 0; from < n; ++from, ++to) {
130 struct vhost_memory_region *reg = dev->mem->regions + to;
131 uint64_t reglast;
132 uint64_t memlast;
133 uint64_t change;
135 /* clone old region */
136 if (to != from) {
137 memcpy(reg, dev->mem->regions + from, sizeof *reg);
140 /* No overlap is simple */
141 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
142 start_addr, size)) {
143 continue;
146 /* Split only happens if supplied region
147 * is in the middle of an existing one. Thus it can not
148 * overlap with any other existing region. */
149 assert(!split);
151 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
152 memlast = range_get_last(start_addr, size);
154 /* Remove whole region */
155 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
156 --dev->mem->nregions;
157 --to;
158 ++overlap_middle;
159 continue;
162 /* Shrink region */
163 if (memlast >= reglast) {
164 reg->memory_size = start_addr - reg->guest_phys_addr;
165 assert(reg->memory_size);
166 assert(!overlap_end);
167 ++overlap_end;
168 continue;
171 /* Shift region */
172 if (start_addr <= reg->guest_phys_addr) {
173 change = memlast + 1 - reg->guest_phys_addr;
174 reg->memory_size -= change;
175 reg->guest_phys_addr += change;
176 reg->userspace_addr += change;
177 assert(reg->memory_size);
178 assert(!overlap_start);
179 ++overlap_start;
180 continue;
183 /* This only happens if supplied region
184 * is in the middle of an existing one. Thus it can not
185 * overlap with any other existing region. */
186 assert(!overlap_start);
187 assert(!overlap_end);
188 assert(!overlap_middle);
189 /* Split region: shrink first part, shift second part. */
190 memcpy(dev->mem->regions + n, reg, sizeof *reg);
191 reg->memory_size = start_addr - reg->guest_phys_addr;
192 assert(reg->memory_size);
193 change = memlast + 1 - reg->guest_phys_addr;
194 reg = dev->mem->regions + n;
195 reg->memory_size -= change;
196 assert(reg->memory_size);
197 reg->guest_phys_addr += change;
198 reg->userspace_addr += change;
199 /* Never add more than 1 region */
200 assert(dev->mem->nregions == n);
201 ++dev->mem->nregions;
202 ++split;
206 /* Called after unassign, so no regions overlap the given range. */
207 static void vhost_dev_assign_memory(struct vhost_dev *dev,
208 uint64_t start_addr,
209 uint64_t size,
210 uint64_t uaddr)
212 int from, to;
213 struct vhost_memory_region *merged = NULL;
214 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
215 struct vhost_memory_region *reg = dev->mem->regions + to;
216 uint64_t prlast, urlast;
217 uint64_t pmlast, umlast;
218 uint64_t s, e, u;
220 /* clone old region */
221 if (to != from) {
222 memcpy(reg, dev->mem->regions + from, sizeof *reg);
224 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
225 pmlast = range_get_last(start_addr, size);
226 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
227 umlast = range_get_last(uaddr, size);
229 /* check for overlapping regions: should never happen. */
230 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
231 /* Not an adjacent or overlapping region - do not merge. */
232 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
233 (pmlast + 1 != reg->guest_phys_addr ||
234 umlast + 1 != reg->userspace_addr)) {
235 continue;
238 if (merged) {
239 --to;
240 assert(to >= 0);
241 } else {
242 merged = reg;
244 u = MIN(uaddr, reg->userspace_addr);
245 s = MIN(start_addr, reg->guest_phys_addr);
246 e = MAX(pmlast, prlast);
247 uaddr = merged->userspace_addr = u;
248 start_addr = merged->guest_phys_addr = s;
249 size = merged->memory_size = e - s + 1;
250 assert(merged->memory_size);
253 if (!merged) {
254 struct vhost_memory_region *reg = dev->mem->regions + to;
255 memset(reg, 0, sizeof *reg);
256 reg->memory_size = size;
257 assert(reg->memory_size);
258 reg->guest_phys_addr = start_addr;
259 reg->userspace_addr = uaddr;
260 ++to;
262 assert(to <= dev->mem->nregions + 1);
263 dev->mem->nregions = to;
266 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
268 uint64_t log_size = 0;
269 int i;
270 for (i = 0; i < dev->mem->nregions; ++i) {
271 struct vhost_memory_region *reg = dev->mem->regions + i;
272 uint64_t last = range_get_last(reg->guest_phys_addr,
273 reg->memory_size);
274 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
276 for (i = 0; i < dev->nvqs; ++i) {
277 struct vhost_virtqueue *vq = dev->vqs + i;
278 uint64_t last = vq->used_phys + vq->used_size - 1;
279 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
281 return log_size;
284 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
286 vhost_log_chunk_t *log;
287 uint64_t log_base;
288 int r;
290 log = g_malloc0(size * sizeof *log);
291 log_base = (uintptr_t)log;
292 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_LOG_BASE, &log_base);
293 assert(r >= 0);
294 /* Sync only the range covered by the old log */
295 if (dev->log_size) {
296 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
298 g_free(dev->log);
299 dev->log = log;
300 dev->log_size = size;
303 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
304 uint64_t start_addr,
305 uint64_t size)
307 int i;
308 int r = 0;
310 for (i = 0; !r && i < dev->nvqs; ++i) {
311 struct vhost_virtqueue *vq = dev->vqs + i;
312 hwaddr l;
313 void *p;
315 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
316 continue;
318 l = vq->ring_size;
319 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
320 if (!p || l != vq->ring_size) {
321 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
322 r = -ENOMEM;
324 if (p != vq->ring) {
325 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
326 r = -EBUSY;
328 cpu_physical_memory_unmap(p, l, 0, 0);
330 return r;
333 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
334 uint64_t start_addr,
335 uint64_t size)
337 int i, n = dev->mem->nregions;
338 for (i = 0; i < n; ++i) {
339 struct vhost_memory_region *reg = dev->mem->regions + i;
340 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
341 start_addr, size)) {
342 return reg;
345 return NULL;
348 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
349 uint64_t start_addr,
350 uint64_t size,
351 uint64_t uaddr)
353 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
354 uint64_t reglast;
355 uint64_t memlast;
357 if (!reg) {
358 return true;
361 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
362 memlast = range_get_last(start_addr, size);
364 /* Need to extend region? */
365 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
366 return true;
368 /* userspace_addr changed? */
369 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
372 static void vhost_set_memory(MemoryListener *listener,
373 MemoryRegionSection *section,
374 bool add)
376 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
377 memory_listener);
378 hwaddr start_addr = section->offset_within_address_space;
379 ram_addr_t size = int128_get64(section->size);
380 bool log_dirty = memory_region_is_logging(section->mr);
381 int s = offsetof(struct vhost_memory, regions) +
382 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
383 void *ram;
385 dev->mem = g_realloc(dev->mem, s);
387 if (log_dirty) {
388 add = false;
391 assert(size);
393 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
394 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
395 if (add) {
396 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
397 /* Region exists with same address. Nothing to do. */
398 return;
400 } else {
401 if (!vhost_dev_find_reg(dev, start_addr, size)) {
402 /* Removing region that we don't access. Nothing to do. */
403 return;
407 vhost_dev_unassign_memory(dev, start_addr, size);
408 if (add) {
409 /* Add given mapping, merging adjacent regions if any */
410 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
411 } else {
412 /* Remove old mapping for this memory, if any. */
413 vhost_dev_unassign_memory(dev, start_addr, size);
415 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
416 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
417 dev->memory_changed = true;
420 static bool vhost_section(MemoryRegionSection *section)
422 return memory_region_is_ram(section->mr);
425 static void vhost_begin(MemoryListener *listener)
427 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
428 memory_listener);
429 dev->mem_changed_end_addr = 0;
430 dev->mem_changed_start_addr = -1;
433 static void vhost_commit(MemoryListener *listener)
435 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
436 memory_listener);
437 hwaddr start_addr = 0;
438 ram_addr_t size = 0;
439 uint64_t log_size;
440 int r;
442 if (!dev->memory_changed) {
443 return;
445 if (!dev->started) {
446 return;
448 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
449 return;
452 if (dev->started) {
453 start_addr = dev->mem_changed_start_addr;
454 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
456 r = vhost_verify_ring_mappings(dev, start_addr, size);
457 assert(r >= 0);
460 if (!dev->log_enabled) {
461 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
462 assert(r >= 0);
463 dev->memory_changed = false;
464 return;
466 log_size = vhost_get_log_size(dev);
467 /* We allocate an extra 4K bytes to log,
468 * to reduce the * number of reallocations. */
469 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
470 /* To log more, must increase log size before table update. */
471 if (dev->log_size < log_size) {
472 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
474 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
475 assert(r >= 0);
476 /* To log less, can only decrease log size after table update. */
477 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
478 vhost_dev_log_resize(dev, log_size);
480 dev->memory_changed = false;
483 static void vhost_region_add(MemoryListener *listener,
484 MemoryRegionSection *section)
486 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
487 memory_listener);
489 if (!vhost_section(section)) {
490 return;
493 ++dev->n_mem_sections;
494 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
495 dev->n_mem_sections);
496 dev->mem_sections[dev->n_mem_sections - 1] = *section;
497 memory_region_ref(section->mr);
498 vhost_set_memory(listener, section, true);
501 static void vhost_region_del(MemoryListener *listener,
502 MemoryRegionSection *section)
504 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
505 memory_listener);
506 int i;
508 if (!vhost_section(section)) {
509 return;
512 vhost_set_memory(listener, section, false);
513 memory_region_unref(section->mr);
514 for (i = 0; i < dev->n_mem_sections; ++i) {
515 if (dev->mem_sections[i].offset_within_address_space
516 == section->offset_within_address_space) {
517 --dev->n_mem_sections;
518 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
519 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
520 break;
525 static void vhost_region_nop(MemoryListener *listener,
526 MemoryRegionSection *section)
530 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
531 struct vhost_virtqueue *vq,
532 unsigned idx, bool enable_log)
534 struct vhost_vring_addr addr = {
535 .index = idx,
536 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
537 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
538 .used_user_addr = (uint64_t)(unsigned long)vq->used,
539 .log_guest_addr = vq->used_phys,
540 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
542 int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr);
543 if (r < 0) {
544 return -errno;
546 return 0;
549 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
551 uint64_t features = dev->acked_features;
552 int r;
553 if (enable_log) {
554 features |= 0x1 << VHOST_F_LOG_ALL;
556 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
557 return r < 0 ? -errno : 0;
560 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
562 int r, t, i;
563 r = vhost_dev_set_features(dev, enable_log);
564 if (r < 0) {
565 goto err_features;
567 for (i = 0; i < dev->nvqs; ++i) {
568 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
569 enable_log);
570 if (r < 0) {
571 goto err_vq;
574 return 0;
575 err_vq:
576 for (; i >= 0; --i) {
577 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
578 dev->log_enabled);
579 assert(t >= 0);
581 t = vhost_dev_set_features(dev, dev->log_enabled);
582 assert(t >= 0);
583 err_features:
584 return r;
587 static int vhost_migration_log(MemoryListener *listener, int enable)
589 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
590 memory_listener);
591 int r;
592 if (!!enable == dev->log_enabled) {
593 return 0;
595 if (!dev->started) {
596 dev->log_enabled = enable;
597 return 0;
599 if (!enable) {
600 r = vhost_dev_set_log(dev, false);
601 if (r < 0) {
602 return r;
604 g_free(dev->log);
605 dev->log = NULL;
606 dev->log_size = 0;
607 } else {
608 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
609 r = vhost_dev_set_log(dev, true);
610 if (r < 0) {
611 return r;
614 dev->log_enabled = enable;
615 return 0;
618 static void vhost_log_global_start(MemoryListener *listener)
620 int r;
622 r = vhost_migration_log(listener, true);
623 if (r < 0) {
624 abort();
628 static void vhost_log_global_stop(MemoryListener *listener)
630 int r;
632 r = vhost_migration_log(listener, false);
633 if (r < 0) {
634 abort();
638 static void vhost_log_start(MemoryListener *listener,
639 MemoryRegionSection *section)
641 /* FIXME: implement */
644 static void vhost_log_stop(MemoryListener *listener,
645 MemoryRegionSection *section)
647 /* FIXME: implement */
650 static int vhost_virtqueue_start(struct vhost_dev *dev,
651 struct VirtIODevice *vdev,
652 struct vhost_virtqueue *vq,
653 unsigned idx)
655 hwaddr s, l, a;
656 int r;
657 int vhost_vq_index = idx - dev->vq_index;
658 struct vhost_vring_file file = {
659 .index = vhost_vq_index
661 struct vhost_vring_state state = {
662 .index = vhost_vq_index
664 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
666 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
668 vq->num = state.num = virtio_queue_get_num(vdev, idx);
669 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
670 if (r) {
671 return -errno;
674 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
675 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
676 if (r) {
677 return -errno;
680 s = l = virtio_queue_get_desc_size(vdev, idx);
681 a = virtio_queue_get_desc_addr(vdev, idx);
682 vq->desc = cpu_physical_memory_map(a, &l, 0);
683 if (!vq->desc || l != s) {
684 r = -ENOMEM;
685 goto fail_alloc_desc;
687 s = l = virtio_queue_get_avail_size(vdev, idx);
688 a = virtio_queue_get_avail_addr(vdev, idx);
689 vq->avail = cpu_physical_memory_map(a, &l, 0);
690 if (!vq->avail || l != s) {
691 r = -ENOMEM;
692 goto fail_alloc_avail;
694 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
695 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
696 vq->used = cpu_physical_memory_map(a, &l, 1);
697 if (!vq->used || l != s) {
698 r = -ENOMEM;
699 goto fail_alloc_used;
702 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
703 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
704 vq->ring = cpu_physical_memory_map(a, &l, 1);
705 if (!vq->ring || l != s) {
706 r = -ENOMEM;
707 goto fail_alloc_ring;
710 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
711 if (r < 0) {
712 r = -errno;
713 goto fail_alloc;
716 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
717 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
718 if (r) {
719 r = -errno;
720 goto fail_kick;
723 /* Clear and discard previous events if any. */
724 event_notifier_test_and_clear(&vq->masked_notifier);
726 return 0;
728 fail_kick:
729 fail_alloc:
730 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
731 0, 0);
732 fail_alloc_ring:
733 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
734 0, 0);
735 fail_alloc_used:
736 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
737 0, 0);
738 fail_alloc_avail:
739 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
740 0, 0);
741 fail_alloc_desc:
742 return r;
745 static void vhost_virtqueue_stop(struct vhost_dev *dev,
746 struct VirtIODevice *vdev,
747 struct vhost_virtqueue *vq,
748 unsigned idx)
750 struct vhost_vring_state state = {
751 .index = idx - dev->vq_index
753 int r;
754 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
755 r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state);
756 if (r < 0) {
757 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
758 fflush(stderr);
760 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
761 virtio_queue_invalidate_signalled_used(vdev, idx);
762 assert (r >= 0);
763 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
764 0, virtio_queue_get_ring_size(vdev, idx));
765 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
766 1, virtio_queue_get_used_size(vdev, idx));
767 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
768 0, virtio_queue_get_avail_size(vdev, idx));
769 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
770 0, virtio_queue_get_desc_size(vdev, idx));
773 static void vhost_eventfd_add(MemoryListener *listener,
774 MemoryRegionSection *section,
775 bool match_data, uint64_t data, EventNotifier *e)
779 static void vhost_eventfd_del(MemoryListener *listener,
780 MemoryRegionSection *section,
781 bool match_data, uint64_t data, EventNotifier *e)
785 static int vhost_virtqueue_init(struct vhost_dev *dev,
786 struct vhost_virtqueue *vq, int n)
788 struct vhost_vring_file file = {
789 .index = n,
791 int r = event_notifier_init(&vq->masked_notifier, 0);
792 if (r < 0) {
793 return r;
796 file.fd = event_notifier_get_fd(&vq->masked_notifier);
797 r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
798 if (r) {
799 r = -errno;
800 goto fail_call;
802 return 0;
803 fail_call:
804 event_notifier_cleanup(&vq->masked_notifier);
805 return r;
808 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
810 event_notifier_cleanup(&vq->masked_notifier);
813 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
814 VhostBackendType backend_type, bool force)
816 uint64_t features;
817 int i, r;
819 if (vhost_set_backend_type(hdev, backend_type) < 0) {
820 close((uintptr_t)opaque);
821 return -1;
824 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
825 close((uintptr_t)opaque);
826 return -errno;
829 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
830 if (r < 0) {
831 goto fail;
834 r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
835 if (r < 0) {
836 goto fail;
839 for (i = 0; i < hdev->nvqs; ++i) {
840 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
841 if (r < 0) {
842 goto fail_vq;
845 hdev->features = features;
847 hdev->memory_listener = (MemoryListener) {
848 .begin = vhost_begin,
849 .commit = vhost_commit,
850 .region_add = vhost_region_add,
851 .region_del = vhost_region_del,
852 .region_nop = vhost_region_nop,
853 .log_start = vhost_log_start,
854 .log_stop = vhost_log_stop,
855 .log_sync = vhost_log_sync,
856 .log_global_start = vhost_log_global_start,
857 .log_global_stop = vhost_log_global_stop,
858 .eventfd_add = vhost_eventfd_add,
859 .eventfd_del = vhost_eventfd_del,
860 .priority = 10
862 hdev->migration_blocker = NULL;
863 if (!(hdev->features & (0x1 << VHOST_F_LOG_ALL))) {
864 error_setg(&hdev->migration_blocker,
865 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
866 migrate_add_blocker(hdev->migration_blocker);
868 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
869 hdev->n_mem_sections = 0;
870 hdev->mem_sections = NULL;
871 hdev->log = NULL;
872 hdev->log_size = 0;
873 hdev->log_enabled = false;
874 hdev->started = false;
875 hdev->memory_changed = false;
876 memory_listener_register(&hdev->memory_listener, &address_space_memory);
877 hdev->force = force;
878 return 0;
879 fail_vq:
880 while (--i >= 0) {
881 vhost_virtqueue_cleanup(hdev->vqs + i);
883 fail:
884 r = -errno;
885 hdev->vhost_ops->vhost_backend_cleanup(hdev);
886 return r;
889 void vhost_dev_cleanup(struct vhost_dev *hdev)
891 int i;
892 for (i = 0; i < hdev->nvqs; ++i) {
893 vhost_virtqueue_cleanup(hdev->vqs + i);
895 memory_listener_unregister(&hdev->memory_listener);
896 if (hdev->migration_blocker) {
897 migrate_del_blocker(hdev->migration_blocker);
898 error_free(hdev->migration_blocker);
900 g_free(hdev->mem);
901 g_free(hdev->mem_sections);
902 hdev->vhost_ops->vhost_backend_cleanup(hdev);
905 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
907 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
908 VirtioBusState *vbus = VIRTIO_BUS(qbus);
909 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
911 return !k->query_guest_notifiers ||
912 k->query_guest_notifiers(qbus->parent) ||
913 hdev->force;
916 /* Stop processing guest IO notifications in qemu.
917 * Start processing them in vhost in kernel.
919 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
921 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
922 VirtioBusState *vbus = VIRTIO_BUS(qbus);
923 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
924 int i, r;
925 if (!k->set_host_notifier) {
926 fprintf(stderr, "binding does not support host notifiers\n");
927 r = -ENOSYS;
928 goto fail;
931 for (i = 0; i < hdev->nvqs; ++i) {
932 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
933 if (r < 0) {
934 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
935 goto fail_vq;
939 return 0;
940 fail_vq:
941 while (--i >= 0) {
942 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
943 if (r < 0) {
944 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
945 fflush(stderr);
947 assert (r >= 0);
949 fail:
950 return r;
953 /* Stop processing guest IO notifications in vhost.
954 * Start processing them in qemu.
955 * This might actually run the qemu handlers right away,
956 * so virtio in qemu must be completely setup when this is called.
958 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
960 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
961 VirtioBusState *vbus = VIRTIO_BUS(qbus);
962 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
963 int i, r;
965 for (i = 0; i < hdev->nvqs; ++i) {
966 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
967 if (r < 0) {
968 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
969 fflush(stderr);
971 assert (r >= 0);
975 /* Test and clear event pending status.
976 * Should be called after unmask to avoid losing events.
978 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
980 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
981 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
982 return event_notifier_test_and_clear(&vq->masked_notifier);
985 /* Mask/unmask events from this vq. */
986 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
987 bool mask)
989 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
990 int r, index = n - hdev->vq_index;
992 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
994 struct vhost_vring_file file = {
995 .index = index
997 if (mask) {
998 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
999 } else {
1000 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1002 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file);
1003 assert(r >= 0);
1006 unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1007 unsigned features)
1009 const int *bit = feature_bits;
1010 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1011 unsigned bit_mask = (1 << *bit);
1012 if (!(hdev->features & bit_mask)) {
1013 features &= ~bit_mask;
1015 bit++;
1017 return features;
1020 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1021 unsigned features)
1023 const int *bit = feature_bits;
1024 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1025 unsigned bit_mask = (1 << *bit);
1026 if (features & bit_mask) {
1027 hdev->acked_features |= bit_mask;
1029 bit++;
1033 /* Host notifiers must be enabled at this point. */
1034 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1036 int i, r;
1038 hdev->started = true;
1040 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1041 if (r < 0) {
1042 goto fail_features;
1044 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
1045 if (r < 0) {
1046 r = -errno;
1047 goto fail_mem;
1049 for (i = 0; i < hdev->nvqs; ++i) {
1050 r = vhost_virtqueue_start(hdev,
1051 vdev,
1052 hdev->vqs + i,
1053 hdev->vq_index + i);
1054 if (r < 0) {
1055 goto fail_vq;
1059 if (hdev->log_enabled) {
1060 uint64_t log_base;
1062 hdev->log_size = vhost_get_log_size(hdev);
1063 hdev->log = hdev->log_size ?
1064 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
1065 log_base = (uintptr_t)hdev->log;
1066 r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE, &log_base);
1067 if (r < 0) {
1068 r = -errno;
1069 goto fail_log;
1073 return 0;
1074 fail_log:
1075 fail_vq:
1076 while (--i >= 0) {
1077 vhost_virtqueue_stop(hdev,
1078 vdev,
1079 hdev->vqs + i,
1080 hdev->vq_index + i);
1082 i = hdev->nvqs;
1083 fail_mem:
1084 fail_features:
1086 hdev->started = false;
1087 return r;
1090 /* Host notifiers must be enabled at this point. */
1091 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1093 int i;
1095 for (i = 0; i < hdev->nvqs; ++i) {
1096 vhost_virtqueue_stop(hdev,
1097 vdev,
1098 hdev->vqs + i,
1099 hdev->vq_index + i);
1101 vhost_log_sync_range(hdev, 0, ~0x0ull);
1103 hdev->started = false;
1104 g_free(hdev->log);
1105 hdev->log = NULL;
1106 hdev->log_size = 0;