s390x: upgrade status of KVM cores to "supported"
[qemu/ar7.git] / hw / virtio / virtio.c
bloba1ff647a66c809853f09b0d7dc86b274a7245cbd
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "trace.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "sysemu/dma.h"
28 * The alignment to use between consumer and producer parts of vring.
29 * x86 pagesize again. This is the default, used by transports like PCI
30 * which don't provide a means for the guest to tell the host the alignment.
32 #define VIRTIO_PCI_VRING_ALIGN 4096
34 typedef struct VRingDesc
36 uint64_t addr;
37 uint32_t len;
38 uint16_t flags;
39 uint16_t next;
40 } VRingDesc;
42 typedef struct VRingAvail
44 uint16_t flags;
45 uint16_t idx;
46 uint16_t ring[0];
47 } VRingAvail;
49 typedef struct VRingUsedElem
51 uint32_t id;
52 uint32_t len;
53 } VRingUsedElem;
55 typedef struct VRingUsed
57 uint16_t flags;
58 uint16_t idx;
59 VRingUsedElem ring[0];
60 } VRingUsed;
62 typedef struct VRingMemoryRegionCaches {
63 struct rcu_head rcu;
64 MemoryRegionCache desc;
65 MemoryRegionCache avail;
66 MemoryRegionCache used;
67 } VRingMemoryRegionCaches;
69 typedef struct VRing
71 unsigned int num;
72 unsigned int num_default;
73 unsigned int align;
74 hwaddr desc;
75 hwaddr avail;
76 hwaddr used;
77 VRingMemoryRegionCaches *caches;
78 } VRing;
80 struct VirtQueue
82 VRing vring;
84 /* Next head to pop */
85 uint16_t last_avail_idx;
87 /* Last avail_idx read from VQ. */
88 uint16_t shadow_avail_idx;
90 uint16_t used_idx;
92 /* Last used index value we have signalled on */
93 uint16_t signalled_used;
95 /* Last used index value we have signalled on */
96 bool signalled_used_valid;
98 /* Notification enabled? */
99 bool notification;
101 uint16_t queue_index;
103 unsigned int inuse;
105 uint16_t vector;
106 VirtIOHandleOutput handle_output;
107 VirtIOHandleAIOOutput handle_aio_output;
108 VirtIODevice *vdev;
109 EventNotifier guest_notifier;
110 EventNotifier host_notifier;
111 QLIST_ENTRY(VirtQueue) node;
114 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
116 if (!caches) {
117 return;
120 address_space_cache_destroy(&caches->desc);
121 address_space_cache_destroy(&caches->avail);
122 address_space_cache_destroy(&caches->used);
123 g_free(caches);
126 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
128 VRingMemoryRegionCaches *caches;
130 caches = atomic_read(&vq->vring.caches);
131 atomic_rcu_set(&vq->vring.caches, NULL);
132 if (caches) {
133 call_rcu(caches, virtio_free_region_cache, rcu);
137 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
139 VirtQueue *vq = &vdev->vq[n];
140 VRingMemoryRegionCaches *old = vq->vring.caches;
141 VRingMemoryRegionCaches *new = NULL;
142 hwaddr addr, size;
143 int event_size;
144 int64_t len;
146 event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
148 addr = vq->vring.desc;
149 if (!addr) {
150 goto out_no_cache;
152 new = g_new0(VRingMemoryRegionCaches, 1);
153 size = virtio_queue_get_desc_size(vdev, n);
154 len = address_space_cache_init(&new->desc, vdev->dma_as,
155 addr, size, false);
156 if (len < size) {
157 virtio_error(vdev, "Cannot map desc");
158 goto err_desc;
161 size = virtio_queue_get_used_size(vdev, n) + event_size;
162 len = address_space_cache_init(&new->used, vdev->dma_as,
163 vq->vring.used, size, true);
164 if (len < size) {
165 virtio_error(vdev, "Cannot map used");
166 goto err_used;
169 size = virtio_queue_get_avail_size(vdev, n) + event_size;
170 len = address_space_cache_init(&new->avail, vdev->dma_as,
171 vq->vring.avail, size, false);
172 if (len < size) {
173 virtio_error(vdev, "Cannot map avail");
174 goto err_avail;
177 atomic_rcu_set(&vq->vring.caches, new);
178 if (old) {
179 call_rcu(old, virtio_free_region_cache, rcu);
181 return;
183 err_avail:
184 address_space_cache_destroy(&new->avail);
185 err_used:
186 address_space_cache_destroy(&new->used);
187 err_desc:
188 address_space_cache_destroy(&new->desc);
189 out_no_cache:
190 g_free(new);
191 virtio_virtqueue_reset_region_cache(vq);
194 /* virt queue functions */
195 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
197 VRing *vring = &vdev->vq[n].vring;
199 if (!vring->num || !vring->desc || !vring->align) {
200 /* not yet setup -> nothing to do */
201 return;
203 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
204 vring->used = vring_align(vring->avail +
205 offsetof(VRingAvail, ring[vring->num]),
206 vring->align);
207 virtio_init_region_cache(vdev, n);
210 /* Called within rcu_read_lock(). */
211 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
212 MemoryRegionCache *cache, int i)
214 address_space_read_cached(cache, i * sizeof(VRingDesc),
215 desc, sizeof(VRingDesc));
216 virtio_tswap64s(vdev, &desc->addr);
217 virtio_tswap32s(vdev, &desc->len);
218 virtio_tswap16s(vdev, &desc->flags);
219 virtio_tswap16s(vdev, &desc->next);
222 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
224 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
225 assert(caches != NULL);
226 return caches;
228 /* Called within rcu_read_lock(). */
229 static inline uint16_t vring_avail_flags(VirtQueue *vq)
231 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
232 hwaddr pa = offsetof(VRingAvail, flags);
233 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
236 /* Called within rcu_read_lock(). */
237 static inline uint16_t vring_avail_idx(VirtQueue *vq)
239 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
240 hwaddr pa = offsetof(VRingAvail, idx);
241 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
242 return vq->shadow_avail_idx;
245 /* Called within rcu_read_lock(). */
246 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
248 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
249 hwaddr pa = offsetof(VRingAvail, ring[i]);
250 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
253 /* Called within rcu_read_lock(). */
254 static inline uint16_t vring_get_used_event(VirtQueue *vq)
256 return vring_avail_ring(vq, vq->vring.num);
259 /* Called within rcu_read_lock(). */
260 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
261 int i)
263 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
264 hwaddr pa = offsetof(VRingUsed, ring[i]);
265 virtio_tswap32s(vq->vdev, &uelem->id);
266 virtio_tswap32s(vq->vdev, &uelem->len);
267 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
268 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
271 /* Called within rcu_read_lock(). */
272 static uint16_t vring_used_idx(VirtQueue *vq)
274 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
275 hwaddr pa = offsetof(VRingUsed, idx);
276 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
279 /* Called within rcu_read_lock(). */
280 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
282 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
283 hwaddr pa = offsetof(VRingUsed, idx);
284 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
285 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
286 vq->used_idx = val;
289 /* Called within rcu_read_lock(). */
290 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
292 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
293 VirtIODevice *vdev = vq->vdev;
294 hwaddr pa = offsetof(VRingUsed, flags);
295 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
297 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
298 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
301 /* Called within rcu_read_lock(). */
302 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
304 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
305 VirtIODevice *vdev = vq->vdev;
306 hwaddr pa = offsetof(VRingUsed, flags);
307 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
309 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
310 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
313 /* Called within rcu_read_lock(). */
314 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
316 VRingMemoryRegionCaches *caches;
317 hwaddr pa;
318 if (!vq->notification) {
319 return;
322 caches = vring_get_region_caches(vq);
323 pa = offsetof(VRingUsed, ring[vq->vring.num]);
324 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
325 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
328 void virtio_queue_set_notification(VirtQueue *vq, int enable)
330 vq->notification = enable;
332 if (!vq->vring.desc) {
333 return;
336 rcu_read_lock();
337 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
338 vring_set_avail_event(vq, vring_avail_idx(vq));
339 } else if (enable) {
340 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
341 } else {
342 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
344 if (enable) {
345 /* Expose avail event/used flags before caller checks the avail idx. */
346 smp_mb();
348 rcu_read_unlock();
351 int virtio_queue_ready(VirtQueue *vq)
353 return vq->vring.avail != 0;
356 /* Fetch avail_idx from VQ memory only when we really need to know if
357 * guest has added some buffers.
358 * Called within rcu_read_lock(). */
359 static int virtio_queue_empty_rcu(VirtQueue *vq)
361 if (unlikely(vq->vdev->broken)) {
362 return 1;
365 if (unlikely(!vq->vring.avail)) {
366 return 1;
369 if (vq->shadow_avail_idx != vq->last_avail_idx) {
370 return 0;
373 return vring_avail_idx(vq) == vq->last_avail_idx;
376 int virtio_queue_empty(VirtQueue *vq)
378 bool empty;
380 if (unlikely(vq->vdev->broken)) {
381 return 1;
384 if (unlikely(!vq->vring.avail)) {
385 return 1;
388 if (vq->shadow_avail_idx != vq->last_avail_idx) {
389 return 0;
392 rcu_read_lock();
393 empty = vring_avail_idx(vq) == vq->last_avail_idx;
394 rcu_read_unlock();
395 return empty;
398 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
399 unsigned int len)
401 AddressSpace *dma_as = vq->vdev->dma_as;
402 unsigned int offset;
403 int i;
405 offset = 0;
406 for (i = 0; i < elem->in_num; i++) {
407 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
409 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
410 elem->in_sg[i].iov_len,
411 DMA_DIRECTION_FROM_DEVICE, size);
413 offset += size;
416 for (i = 0; i < elem->out_num; i++)
417 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
418 elem->out_sg[i].iov_len,
419 DMA_DIRECTION_TO_DEVICE,
420 elem->out_sg[i].iov_len);
423 /* virtqueue_detach_element:
424 * @vq: The #VirtQueue
425 * @elem: The #VirtQueueElement
426 * @len: number of bytes written
428 * Detach the element from the virtqueue. This function is suitable for device
429 * reset or other situations where a #VirtQueueElement is simply freed and will
430 * not be pushed or discarded.
432 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
433 unsigned int len)
435 vq->inuse--;
436 virtqueue_unmap_sg(vq, elem, len);
439 /* virtqueue_unpop:
440 * @vq: The #VirtQueue
441 * @elem: The #VirtQueueElement
442 * @len: number of bytes written
444 * Pretend the most recent element wasn't popped from the virtqueue. The next
445 * call to virtqueue_pop() will refetch the element.
447 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
448 unsigned int len)
450 vq->last_avail_idx--;
451 virtqueue_detach_element(vq, elem, len);
454 /* virtqueue_rewind:
455 * @vq: The #VirtQueue
456 * @num: Number of elements to push back
458 * Pretend that elements weren't popped from the virtqueue. The next
459 * virtqueue_pop() will refetch the oldest element.
461 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
463 * Returns: true on success, false if @num is greater than the number of in use
464 * elements.
466 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
468 if (num > vq->inuse) {
469 return false;
471 vq->last_avail_idx -= num;
472 vq->inuse -= num;
473 return true;
476 /* Called within rcu_read_lock(). */
477 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
478 unsigned int len, unsigned int idx)
480 VRingUsedElem uelem;
482 trace_virtqueue_fill(vq, elem, len, idx);
484 virtqueue_unmap_sg(vq, elem, len);
486 if (unlikely(vq->vdev->broken)) {
487 return;
490 if (unlikely(!vq->vring.used)) {
491 return;
494 idx = (idx + vq->used_idx) % vq->vring.num;
496 uelem.id = elem->index;
497 uelem.len = len;
498 vring_used_write(vq, &uelem, idx);
501 /* Called within rcu_read_lock(). */
502 void virtqueue_flush(VirtQueue *vq, unsigned int count)
504 uint16_t old, new;
506 if (unlikely(vq->vdev->broken)) {
507 vq->inuse -= count;
508 return;
511 if (unlikely(!vq->vring.used)) {
512 return;
515 /* Make sure buffer is written before we update index. */
516 smp_wmb();
517 trace_virtqueue_flush(vq, count);
518 old = vq->used_idx;
519 new = old + count;
520 vring_used_idx_set(vq, new);
521 vq->inuse -= count;
522 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
523 vq->signalled_used_valid = false;
526 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
527 unsigned int len)
529 rcu_read_lock();
530 virtqueue_fill(vq, elem, len, 0);
531 virtqueue_flush(vq, 1);
532 rcu_read_unlock();
535 /* Called within rcu_read_lock(). */
536 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
538 uint16_t num_heads = vring_avail_idx(vq) - idx;
540 /* Check it isn't doing very strange things with descriptor numbers. */
541 if (num_heads > vq->vring.num) {
542 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
543 idx, vq->shadow_avail_idx);
544 return -EINVAL;
546 /* On success, callers read a descriptor at vq->last_avail_idx.
547 * Make sure descriptor read does not bypass avail index read. */
548 if (num_heads) {
549 smp_rmb();
552 return num_heads;
555 /* Called within rcu_read_lock(). */
556 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
557 unsigned int *head)
559 /* Grab the next descriptor number they're advertising, and increment
560 * the index we've seen. */
561 *head = vring_avail_ring(vq, idx % vq->vring.num);
563 /* If their number is silly, that's a fatal mistake. */
564 if (*head >= vq->vring.num) {
565 virtio_error(vq->vdev, "Guest says index %u is available", *head);
566 return false;
569 return true;
572 enum {
573 VIRTQUEUE_READ_DESC_ERROR = -1,
574 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
575 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
578 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
579 MemoryRegionCache *desc_cache, unsigned int max,
580 unsigned int *next)
582 /* If this descriptor says it doesn't chain, we're done. */
583 if (!(desc->flags & VRING_DESC_F_NEXT)) {
584 return VIRTQUEUE_READ_DESC_DONE;
587 /* Check they're not leading us off end of descriptors. */
588 *next = desc->next;
589 /* Make sure compiler knows to grab that: we don't want it changing! */
590 smp_wmb();
592 if (*next >= max) {
593 virtio_error(vdev, "Desc next is %u", *next);
594 return VIRTQUEUE_READ_DESC_ERROR;
597 vring_desc_read(vdev, desc, desc_cache, *next);
598 return VIRTQUEUE_READ_DESC_MORE;
601 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
602 unsigned int *out_bytes,
603 unsigned max_in_bytes, unsigned max_out_bytes)
605 VirtIODevice *vdev = vq->vdev;
606 unsigned int max, idx;
607 unsigned int total_bufs, in_total, out_total;
608 VRingMemoryRegionCaches *caches;
609 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
610 int64_t len = 0;
611 int rc;
613 if (unlikely(!vq->vring.desc)) {
614 if (in_bytes) {
615 *in_bytes = 0;
617 if (out_bytes) {
618 *out_bytes = 0;
620 return;
623 rcu_read_lock();
624 idx = vq->last_avail_idx;
625 total_bufs = in_total = out_total = 0;
627 max = vq->vring.num;
628 caches = vring_get_region_caches(vq);
629 if (caches->desc.len < max * sizeof(VRingDesc)) {
630 virtio_error(vdev, "Cannot map descriptor ring");
631 goto err;
634 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
635 MemoryRegionCache *desc_cache = &caches->desc;
636 unsigned int num_bufs;
637 VRingDesc desc;
638 unsigned int i;
640 num_bufs = total_bufs;
642 if (!virtqueue_get_head(vq, idx++, &i)) {
643 goto err;
646 vring_desc_read(vdev, &desc, desc_cache, i);
648 if (desc.flags & VRING_DESC_F_INDIRECT) {
649 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
650 virtio_error(vdev, "Invalid size for indirect buffer table");
651 goto err;
654 /* If we've got too many, that implies a descriptor loop. */
655 if (num_bufs >= max) {
656 virtio_error(vdev, "Looped descriptor");
657 goto err;
660 /* loop over the indirect descriptor table */
661 len = address_space_cache_init(&indirect_desc_cache,
662 vdev->dma_as,
663 desc.addr, desc.len, false);
664 desc_cache = &indirect_desc_cache;
665 if (len < desc.len) {
666 virtio_error(vdev, "Cannot map indirect buffer");
667 goto err;
670 max = desc.len / sizeof(VRingDesc);
671 num_bufs = i = 0;
672 vring_desc_read(vdev, &desc, desc_cache, i);
675 do {
676 /* If we've got too many, that implies a descriptor loop. */
677 if (++num_bufs > max) {
678 virtio_error(vdev, "Looped descriptor");
679 goto err;
682 if (desc.flags & VRING_DESC_F_WRITE) {
683 in_total += desc.len;
684 } else {
685 out_total += desc.len;
687 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
688 goto done;
691 rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
692 } while (rc == VIRTQUEUE_READ_DESC_MORE);
694 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
695 goto err;
698 if (desc_cache == &indirect_desc_cache) {
699 address_space_cache_destroy(&indirect_desc_cache);
700 total_bufs++;
701 } else {
702 total_bufs = num_bufs;
706 if (rc < 0) {
707 goto err;
710 done:
711 address_space_cache_destroy(&indirect_desc_cache);
712 if (in_bytes) {
713 *in_bytes = in_total;
715 if (out_bytes) {
716 *out_bytes = out_total;
718 rcu_read_unlock();
719 return;
721 err:
722 in_total = out_total = 0;
723 goto done;
726 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
727 unsigned int out_bytes)
729 unsigned int in_total, out_total;
731 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
732 return in_bytes <= in_total && out_bytes <= out_total;
735 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
736 hwaddr *addr, struct iovec *iov,
737 unsigned int max_num_sg, bool is_write,
738 hwaddr pa, size_t sz)
740 bool ok = false;
741 unsigned num_sg = *p_num_sg;
742 assert(num_sg <= max_num_sg);
744 if (!sz) {
745 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
746 goto out;
749 while (sz) {
750 hwaddr len = sz;
752 if (num_sg == max_num_sg) {
753 virtio_error(vdev, "virtio: too many write descriptors in "
754 "indirect table");
755 goto out;
758 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
759 is_write ?
760 DMA_DIRECTION_FROM_DEVICE :
761 DMA_DIRECTION_TO_DEVICE);
762 if (!iov[num_sg].iov_base) {
763 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
764 goto out;
767 iov[num_sg].iov_len = len;
768 addr[num_sg] = pa;
770 sz -= len;
771 pa += len;
772 num_sg++;
774 ok = true;
776 out:
777 *p_num_sg = num_sg;
778 return ok;
781 /* Only used by error code paths before we have a VirtQueueElement (therefore
782 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
783 * yet.
785 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
786 struct iovec *iov)
788 unsigned int i;
790 for (i = 0; i < out_num + in_num; i++) {
791 int is_write = i >= out_num;
793 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
794 iov++;
798 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
799 hwaddr *addr, unsigned int num_sg,
800 int is_write)
802 unsigned int i;
803 hwaddr len;
805 for (i = 0; i < num_sg; i++) {
806 len = sg[i].iov_len;
807 sg[i].iov_base = dma_memory_map(vdev->dma_as,
808 addr[i], &len, is_write ?
809 DMA_DIRECTION_FROM_DEVICE :
810 DMA_DIRECTION_TO_DEVICE);
811 if (!sg[i].iov_base) {
812 error_report("virtio: error trying to map MMIO memory");
813 exit(1);
815 if (len != sg[i].iov_len) {
816 error_report("virtio: unexpected memory split");
817 exit(1);
822 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
824 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, 1);
825 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, 0);
828 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
830 VirtQueueElement *elem;
831 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
832 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
833 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
834 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
835 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
836 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
838 assert(sz >= sizeof(VirtQueueElement));
839 elem = g_malloc(out_sg_end);
840 trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
841 elem->out_num = out_num;
842 elem->in_num = in_num;
843 elem->in_addr = (void *)elem + in_addr_ofs;
844 elem->out_addr = (void *)elem + out_addr_ofs;
845 elem->in_sg = (void *)elem + in_sg_ofs;
846 elem->out_sg = (void *)elem + out_sg_ofs;
847 return elem;
850 void *virtqueue_pop(VirtQueue *vq, size_t sz)
852 unsigned int i, head, max;
853 VRingMemoryRegionCaches *caches;
854 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
855 MemoryRegionCache *desc_cache;
856 int64_t len;
857 VirtIODevice *vdev = vq->vdev;
858 VirtQueueElement *elem = NULL;
859 unsigned out_num, in_num, elem_entries;
860 hwaddr addr[VIRTQUEUE_MAX_SIZE];
861 struct iovec iov[VIRTQUEUE_MAX_SIZE];
862 VRingDesc desc;
863 int rc;
865 if (unlikely(vdev->broken)) {
866 return NULL;
868 rcu_read_lock();
869 if (virtio_queue_empty_rcu(vq)) {
870 goto done;
872 /* Needed after virtio_queue_empty(), see comment in
873 * virtqueue_num_heads(). */
874 smp_rmb();
876 /* When we start there are none of either input nor output. */
877 out_num = in_num = elem_entries = 0;
879 max = vq->vring.num;
881 if (vq->inuse >= vq->vring.num) {
882 virtio_error(vdev, "Virtqueue size exceeded");
883 goto done;
886 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
887 goto done;
890 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
891 vring_set_avail_event(vq, vq->last_avail_idx);
894 i = head;
896 caches = vring_get_region_caches(vq);
897 if (caches->desc.len < max * sizeof(VRingDesc)) {
898 virtio_error(vdev, "Cannot map descriptor ring");
899 goto done;
902 desc_cache = &caches->desc;
903 vring_desc_read(vdev, &desc, desc_cache, i);
904 if (desc.flags & VRING_DESC_F_INDIRECT) {
905 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
906 virtio_error(vdev, "Invalid size for indirect buffer table");
907 goto done;
910 /* loop over the indirect descriptor table */
911 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
912 desc.addr, desc.len, false);
913 desc_cache = &indirect_desc_cache;
914 if (len < desc.len) {
915 virtio_error(vdev, "Cannot map indirect buffer");
916 goto done;
919 max = desc.len / sizeof(VRingDesc);
920 i = 0;
921 vring_desc_read(vdev, &desc, desc_cache, i);
924 /* Collect all the descriptors */
925 do {
926 bool map_ok;
928 if (desc.flags & VRING_DESC_F_WRITE) {
929 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
930 iov + out_num,
931 VIRTQUEUE_MAX_SIZE - out_num, true,
932 desc.addr, desc.len);
933 } else {
934 if (in_num) {
935 virtio_error(vdev, "Incorrect order for descriptors");
936 goto err_undo_map;
938 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
939 VIRTQUEUE_MAX_SIZE, false,
940 desc.addr, desc.len);
942 if (!map_ok) {
943 goto err_undo_map;
946 /* If we've got too many, that implies a descriptor loop. */
947 if (++elem_entries > max) {
948 virtio_error(vdev, "Looped descriptor");
949 goto err_undo_map;
952 rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
953 } while (rc == VIRTQUEUE_READ_DESC_MORE);
955 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
956 goto err_undo_map;
959 /* Now copy what we have collected and mapped */
960 elem = virtqueue_alloc_element(sz, out_num, in_num);
961 elem->index = head;
962 for (i = 0; i < out_num; i++) {
963 elem->out_addr[i] = addr[i];
964 elem->out_sg[i] = iov[i];
966 for (i = 0; i < in_num; i++) {
967 elem->in_addr[i] = addr[out_num + i];
968 elem->in_sg[i] = iov[out_num + i];
971 vq->inuse++;
973 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
974 done:
975 address_space_cache_destroy(&indirect_desc_cache);
976 rcu_read_unlock();
978 return elem;
980 err_undo_map:
981 virtqueue_undo_map_desc(out_num, in_num, iov);
982 goto done;
985 /* virtqueue_drop_all:
986 * @vq: The #VirtQueue
987 * Drops all queued buffers and indicates them to the guest
988 * as if they are done. Useful when buffers can not be
989 * processed but must be returned to the guest.
991 unsigned int virtqueue_drop_all(VirtQueue *vq)
993 unsigned int dropped = 0;
994 VirtQueueElement elem = {};
995 VirtIODevice *vdev = vq->vdev;
996 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
998 if (unlikely(vdev->broken)) {
999 return 0;
1002 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1003 /* works similar to virtqueue_pop but does not map buffers
1004 * and does not allocate any memory */
1005 smp_rmb();
1006 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1007 break;
1009 vq->inuse++;
1010 vq->last_avail_idx++;
1011 if (fEventIdx) {
1012 vring_set_avail_event(vq, vq->last_avail_idx);
1014 /* immediately push the element, nothing to unmap
1015 * as both in_num and out_num are set to 0 */
1016 virtqueue_push(vq, &elem, 0);
1017 dropped++;
1020 return dropped;
1023 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1024 * it is what QEMU has always done by mistake. We can change it sooner
1025 * or later by bumping the version number of the affected vm states.
1026 * In the meanwhile, since the in-memory layout of VirtQueueElement
1027 * has changed, we need to marshal to and from the layout that was
1028 * used before the change.
1030 typedef struct VirtQueueElementOld {
1031 unsigned int index;
1032 unsigned int out_num;
1033 unsigned int in_num;
1034 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1035 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1036 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1037 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1038 } VirtQueueElementOld;
1040 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1042 VirtQueueElement *elem;
1043 VirtQueueElementOld data;
1044 int i;
1046 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1048 /* TODO: teach all callers that this can fail, and return failure instead
1049 * of asserting here.
1050 * This is just one thing (there are probably more) that must be
1051 * fixed before we can allow NDEBUG compilation.
1053 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1054 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1056 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1057 elem->index = data.index;
1059 for (i = 0; i < elem->in_num; i++) {
1060 elem->in_addr[i] = data.in_addr[i];
1063 for (i = 0; i < elem->out_num; i++) {
1064 elem->out_addr[i] = data.out_addr[i];
1067 for (i = 0; i < elem->in_num; i++) {
1068 /* Base is overwritten by virtqueue_map. */
1069 elem->in_sg[i].iov_base = 0;
1070 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1073 for (i = 0; i < elem->out_num; i++) {
1074 /* Base is overwritten by virtqueue_map. */
1075 elem->out_sg[i].iov_base = 0;
1076 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1079 virtqueue_map(vdev, elem);
1080 return elem;
1083 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
1085 VirtQueueElementOld data;
1086 int i;
1088 memset(&data, 0, sizeof(data));
1089 data.index = elem->index;
1090 data.in_num = elem->in_num;
1091 data.out_num = elem->out_num;
1093 for (i = 0; i < elem->in_num; i++) {
1094 data.in_addr[i] = elem->in_addr[i];
1097 for (i = 0; i < elem->out_num; i++) {
1098 data.out_addr[i] = elem->out_addr[i];
1101 for (i = 0; i < elem->in_num; i++) {
1102 /* Base is overwritten by virtqueue_map when loading. Do not
1103 * save it, as it would leak the QEMU address space layout. */
1104 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1107 for (i = 0; i < elem->out_num; i++) {
1108 /* Do not save iov_base as above. */
1109 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1111 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1114 /* virtio device */
1115 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1117 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1118 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1120 if (unlikely(vdev->broken)) {
1121 return;
1124 if (k->notify) {
1125 k->notify(qbus->parent, vector);
1129 void virtio_update_irq(VirtIODevice *vdev)
1131 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1134 static int virtio_validate_features(VirtIODevice *vdev)
1136 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1138 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1139 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1140 return -EFAULT;
1143 if (k->validate_features) {
1144 return k->validate_features(vdev);
1145 } else {
1146 return 0;
1150 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1152 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1153 trace_virtio_set_status(vdev, val);
1155 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1156 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1157 val & VIRTIO_CONFIG_S_FEATURES_OK) {
1158 int ret = virtio_validate_features(vdev);
1160 if (ret) {
1161 return ret;
1165 if (k->set_status) {
1166 k->set_status(vdev, val);
1168 vdev->status = val;
1169 return 0;
1172 static enum virtio_device_endian virtio_default_endian(void)
1174 if (target_words_bigendian()) {
1175 return VIRTIO_DEVICE_ENDIAN_BIG;
1176 } else {
1177 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1181 static enum virtio_device_endian virtio_current_cpu_endian(void)
1183 CPUClass *cc = CPU_GET_CLASS(current_cpu);
1185 if (cc->virtio_is_big_endian(current_cpu)) {
1186 return VIRTIO_DEVICE_ENDIAN_BIG;
1187 } else {
1188 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1192 void virtio_reset(void *opaque)
1194 VirtIODevice *vdev = opaque;
1195 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1196 int i;
1198 virtio_set_status(vdev, 0);
1199 if (current_cpu) {
1200 /* Guest initiated reset */
1201 vdev->device_endian = virtio_current_cpu_endian();
1202 } else {
1203 /* System reset */
1204 vdev->device_endian = virtio_default_endian();
1207 if (k->reset) {
1208 k->reset(vdev);
1211 vdev->broken = false;
1212 vdev->guest_features = 0;
1213 vdev->queue_sel = 0;
1214 vdev->status = 0;
1215 atomic_set(&vdev->isr, 0);
1216 vdev->config_vector = VIRTIO_NO_VECTOR;
1217 virtio_notify_vector(vdev, vdev->config_vector);
1219 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1220 vdev->vq[i].vring.desc = 0;
1221 vdev->vq[i].vring.avail = 0;
1222 vdev->vq[i].vring.used = 0;
1223 vdev->vq[i].last_avail_idx = 0;
1224 vdev->vq[i].shadow_avail_idx = 0;
1225 vdev->vq[i].used_idx = 0;
1226 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
1227 vdev->vq[i].signalled_used = 0;
1228 vdev->vq[i].signalled_used_valid = false;
1229 vdev->vq[i].notification = true;
1230 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
1231 vdev->vq[i].inuse = 0;
1232 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
1236 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
1238 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1239 uint8_t val;
1241 if (addr + sizeof(val) > vdev->config_len) {
1242 return (uint32_t)-1;
1245 k->get_config(vdev, vdev->config);
1247 val = ldub_p(vdev->config + addr);
1248 return val;
1251 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
1253 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1254 uint16_t val;
1256 if (addr + sizeof(val) > vdev->config_len) {
1257 return (uint32_t)-1;
1260 k->get_config(vdev, vdev->config);
1262 val = lduw_p(vdev->config + addr);
1263 return val;
1266 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1268 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1269 uint32_t val;
1271 if (addr + sizeof(val) > vdev->config_len) {
1272 return (uint32_t)-1;
1275 k->get_config(vdev, vdev->config);
1277 val = ldl_p(vdev->config + addr);
1278 return val;
1281 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1283 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1284 uint8_t val = data;
1286 if (addr + sizeof(val) > vdev->config_len) {
1287 return;
1290 stb_p(vdev->config + addr, val);
1292 if (k->set_config) {
1293 k->set_config(vdev, vdev->config);
1297 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1299 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1300 uint16_t val = data;
1302 if (addr + sizeof(val) > vdev->config_len) {
1303 return;
1306 stw_p(vdev->config + addr, val);
1308 if (k->set_config) {
1309 k->set_config(vdev, vdev->config);
1313 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1315 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1316 uint32_t val = data;
1318 if (addr + sizeof(val) > vdev->config_len) {
1319 return;
1322 stl_p(vdev->config + addr, val);
1324 if (k->set_config) {
1325 k->set_config(vdev, vdev->config);
1329 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1331 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1332 uint8_t val;
1334 if (addr + sizeof(val) > vdev->config_len) {
1335 return (uint32_t)-1;
1338 k->get_config(vdev, vdev->config);
1340 val = ldub_p(vdev->config + addr);
1341 return val;
1344 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1346 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1347 uint16_t val;
1349 if (addr + sizeof(val) > vdev->config_len) {
1350 return (uint32_t)-1;
1353 k->get_config(vdev, vdev->config);
1355 val = lduw_le_p(vdev->config + addr);
1356 return val;
1359 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1361 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1362 uint32_t val;
1364 if (addr + sizeof(val) > vdev->config_len) {
1365 return (uint32_t)-1;
1368 k->get_config(vdev, vdev->config);
1370 val = ldl_le_p(vdev->config + addr);
1371 return val;
1374 void virtio_config_modern_writeb(VirtIODevice *vdev,
1375 uint32_t addr, uint32_t data)
1377 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1378 uint8_t val = data;
1380 if (addr + sizeof(val) > vdev->config_len) {
1381 return;
1384 stb_p(vdev->config + addr, val);
1386 if (k->set_config) {
1387 k->set_config(vdev, vdev->config);
1391 void virtio_config_modern_writew(VirtIODevice *vdev,
1392 uint32_t addr, uint32_t data)
1394 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1395 uint16_t val = data;
1397 if (addr + sizeof(val) > vdev->config_len) {
1398 return;
1401 stw_le_p(vdev->config + addr, val);
1403 if (k->set_config) {
1404 k->set_config(vdev, vdev->config);
1408 void virtio_config_modern_writel(VirtIODevice *vdev,
1409 uint32_t addr, uint32_t data)
1411 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1412 uint32_t val = data;
1414 if (addr + sizeof(val) > vdev->config_len) {
1415 return;
1418 stl_le_p(vdev->config + addr, val);
1420 if (k->set_config) {
1421 k->set_config(vdev, vdev->config);
1425 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1427 if (!vdev->vq[n].vring.num) {
1428 return;
1430 vdev->vq[n].vring.desc = addr;
1431 virtio_queue_update_rings(vdev, n);
1434 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1436 return vdev->vq[n].vring.desc;
1439 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1440 hwaddr avail, hwaddr used)
1442 if (!vdev->vq[n].vring.num) {
1443 return;
1445 vdev->vq[n].vring.desc = desc;
1446 vdev->vq[n].vring.avail = avail;
1447 vdev->vq[n].vring.used = used;
1448 virtio_init_region_cache(vdev, n);
1451 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1453 /* Don't allow guest to flip queue between existent and
1454 * nonexistent states, or to set it to an invalid size.
1456 if (!!num != !!vdev->vq[n].vring.num ||
1457 num > VIRTQUEUE_MAX_SIZE ||
1458 num < 0) {
1459 return;
1461 vdev->vq[n].vring.num = num;
1464 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1466 return QLIST_FIRST(&vdev->vector_queues[vector]);
1469 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1471 return QLIST_NEXT(vq, node);
1474 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1476 return vdev->vq[n].vring.num;
1479 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1481 return vdev->vq[n].vring.num_default;
1484 int virtio_get_num_queues(VirtIODevice *vdev)
1486 int i;
1488 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1489 if (!virtio_queue_get_num(vdev, i)) {
1490 break;
1494 return i;
1497 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1499 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1500 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1502 /* virtio-1 compliant devices cannot change the alignment */
1503 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1504 error_report("tried to modify queue alignment for virtio-1 device");
1505 return;
1507 /* Check that the transport told us it was going to do this
1508 * (so a buggy transport will immediately assert rather than
1509 * silently failing to migrate this state)
1511 assert(k->has_variable_vring_alignment);
1513 if (align) {
1514 vdev->vq[n].vring.align = align;
1515 virtio_queue_update_rings(vdev, n);
1519 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
1521 if (vq->vring.desc && vq->handle_aio_output) {
1522 VirtIODevice *vdev = vq->vdev;
1524 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1525 return vq->handle_aio_output(vdev, vq);
1528 return false;
1531 static void virtio_queue_notify_vq(VirtQueue *vq)
1533 if (vq->vring.desc && vq->handle_output) {
1534 VirtIODevice *vdev = vq->vdev;
1536 if (unlikely(vdev->broken)) {
1537 return;
1540 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1541 vq->handle_output(vdev, vq);
1545 void virtio_queue_notify(VirtIODevice *vdev, int n)
1547 VirtQueue *vq = &vdev->vq[n];
1549 if (unlikely(!vq->vring.desc || vdev->broken)) {
1550 return;
1553 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1554 if (vq->handle_aio_output) {
1555 event_notifier_set(&vq->host_notifier);
1556 } else if (vq->handle_output) {
1557 vq->handle_output(vdev, vq);
1561 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1563 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1564 VIRTIO_NO_VECTOR;
1567 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1569 VirtQueue *vq = &vdev->vq[n];
1571 if (n < VIRTIO_QUEUE_MAX) {
1572 if (vdev->vector_queues &&
1573 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1574 QLIST_REMOVE(vq, node);
1576 vdev->vq[n].vector = vector;
1577 if (vdev->vector_queues &&
1578 vector != VIRTIO_NO_VECTOR) {
1579 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1584 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1585 VirtIOHandleOutput handle_output)
1587 int i;
1589 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1590 if (vdev->vq[i].vring.num == 0)
1591 break;
1594 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1595 abort();
1597 vdev->vq[i].vring.num = queue_size;
1598 vdev->vq[i].vring.num_default = queue_size;
1599 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1600 vdev->vq[i].handle_output = handle_output;
1601 vdev->vq[i].handle_aio_output = NULL;
1603 return &vdev->vq[i];
1606 void virtio_del_queue(VirtIODevice *vdev, int n)
1608 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1609 abort();
1612 vdev->vq[n].vring.num = 0;
1613 vdev->vq[n].vring.num_default = 0;
1614 vdev->vq[n].handle_output = NULL;
1615 vdev->vq[n].handle_aio_output = NULL;
1618 static void virtio_set_isr(VirtIODevice *vdev, int value)
1620 uint8_t old = atomic_read(&vdev->isr);
1622 /* Do not write ISR if it does not change, so that its cacheline remains
1623 * shared in the common case where the guest does not read it.
1625 if ((old & value) != value) {
1626 atomic_or(&vdev->isr, value);
1630 /* Called within rcu_read_lock(). */
1631 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1633 uint16_t old, new;
1634 bool v;
1635 /* We need to expose used array entries before checking used event. */
1636 smp_mb();
1637 /* Always notify when queue is empty (when feature acknowledge) */
1638 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1639 !vq->inuse && virtio_queue_empty(vq)) {
1640 return true;
1643 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1644 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1647 v = vq->signalled_used_valid;
1648 vq->signalled_used_valid = true;
1649 old = vq->signalled_used;
1650 new = vq->signalled_used = vq->used_idx;
1651 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1654 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1656 bool should_notify;
1657 rcu_read_lock();
1658 should_notify = virtio_should_notify(vdev, vq);
1659 rcu_read_unlock();
1661 if (!should_notify) {
1662 return;
1665 trace_virtio_notify_irqfd(vdev, vq);
1668 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1669 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1670 * incorrectly polling this bit during crashdump and hibernation
1671 * in MSI mode, causing a hang if this bit is never updated.
1672 * Recent releases of Windows do not really shut down, but rather
1673 * log out and hibernate to make the next startup faster. Hence,
1674 * this manifested as a more serious hang during shutdown with
1676 * Next driver release from 2016 fixed this problem, so working around it
1677 * is not a must, but it's easy to do so let's do it here.
1679 * Note: it's safe to update ISR from any thread as it was switched
1680 * to an atomic operation.
1682 virtio_set_isr(vq->vdev, 0x1);
1683 event_notifier_set(&vq->guest_notifier);
1686 static void virtio_irq(VirtQueue *vq)
1688 virtio_set_isr(vq->vdev, 0x1);
1689 virtio_notify_vector(vq->vdev, vq->vector);
1692 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1694 bool should_notify;
1695 rcu_read_lock();
1696 should_notify = virtio_should_notify(vdev, vq);
1697 rcu_read_unlock();
1699 if (!should_notify) {
1700 return;
1703 trace_virtio_notify(vdev, vq);
1704 virtio_irq(vq);
1707 void virtio_notify_config(VirtIODevice *vdev)
1709 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1710 return;
1712 virtio_set_isr(vdev, 0x3);
1713 vdev->generation++;
1714 virtio_notify_vector(vdev, vdev->config_vector);
1717 static bool virtio_device_endian_needed(void *opaque)
1719 VirtIODevice *vdev = opaque;
1721 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1722 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1723 return vdev->device_endian != virtio_default_endian();
1725 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1726 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1729 static bool virtio_64bit_features_needed(void *opaque)
1731 VirtIODevice *vdev = opaque;
1733 return (vdev->host_features >> 32) != 0;
1736 static bool virtio_virtqueue_needed(void *opaque)
1738 VirtIODevice *vdev = opaque;
1740 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1743 static bool virtio_ringsize_needed(void *opaque)
1745 VirtIODevice *vdev = opaque;
1746 int i;
1748 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1749 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1750 return true;
1753 return false;
1756 static bool virtio_extra_state_needed(void *opaque)
1758 VirtIODevice *vdev = opaque;
1759 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1760 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1762 return k->has_extra_state &&
1763 k->has_extra_state(qbus->parent);
1766 static bool virtio_broken_needed(void *opaque)
1768 VirtIODevice *vdev = opaque;
1770 return vdev->broken;
1773 static const VMStateDescription vmstate_virtqueue = {
1774 .name = "virtqueue_state",
1775 .version_id = 1,
1776 .minimum_version_id = 1,
1777 .fields = (VMStateField[]) {
1778 VMSTATE_UINT64(vring.avail, struct VirtQueue),
1779 VMSTATE_UINT64(vring.used, struct VirtQueue),
1780 VMSTATE_END_OF_LIST()
1784 static const VMStateDescription vmstate_virtio_virtqueues = {
1785 .name = "virtio/virtqueues",
1786 .version_id = 1,
1787 .minimum_version_id = 1,
1788 .needed = &virtio_virtqueue_needed,
1789 .fields = (VMStateField[]) {
1790 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1791 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1792 VMSTATE_END_OF_LIST()
1796 static const VMStateDescription vmstate_ringsize = {
1797 .name = "ringsize_state",
1798 .version_id = 1,
1799 .minimum_version_id = 1,
1800 .fields = (VMStateField[]) {
1801 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1802 VMSTATE_END_OF_LIST()
1806 static const VMStateDescription vmstate_virtio_ringsize = {
1807 .name = "virtio/ringsize",
1808 .version_id = 1,
1809 .minimum_version_id = 1,
1810 .needed = &virtio_ringsize_needed,
1811 .fields = (VMStateField[]) {
1812 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1813 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1814 VMSTATE_END_OF_LIST()
1818 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1819 const VMStateField *field)
1821 VirtIODevice *vdev = pv;
1822 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1823 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1825 if (!k->load_extra_state) {
1826 return -1;
1827 } else {
1828 return k->load_extra_state(qbus->parent, f);
1832 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1833 const VMStateField *field, QJSON *vmdesc)
1835 VirtIODevice *vdev = pv;
1836 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1837 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1839 k->save_extra_state(qbus->parent, f);
1840 return 0;
1843 static const VMStateInfo vmstate_info_extra_state = {
1844 .name = "virtqueue_extra_state",
1845 .get = get_extra_state,
1846 .put = put_extra_state,
1849 static const VMStateDescription vmstate_virtio_extra_state = {
1850 .name = "virtio/extra_state",
1851 .version_id = 1,
1852 .minimum_version_id = 1,
1853 .needed = &virtio_extra_state_needed,
1854 .fields = (VMStateField[]) {
1856 .name = "extra_state",
1857 .version_id = 0,
1858 .field_exists = NULL,
1859 .size = 0,
1860 .info = &vmstate_info_extra_state,
1861 .flags = VMS_SINGLE,
1862 .offset = 0,
1864 VMSTATE_END_OF_LIST()
1868 static const VMStateDescription vmstate_virtio_device_endian = {
1869 .name = "virtio/device_endian",
1870 .version_id = 1,
1871 .minimum_version_id = 1,
1872 .needed = &virtio_device_endian_needed,
1873 .fields = (VMStateField[]) {
1874 VMSTATE_UINT8(device_endian, VirtIODevice),
1875 VMSTATE_END_OF_LIST()
1879 static const VMStateDescription vmstate_virtio_64bit_features = {
1880 .name = "virtio/64bit_features",
1881 .version_id = 1,
1882 .minimum_version_id = 1,
1883 .needed = &virtio_64bit_features_needed,
1884 .fields = (VMStateField[]) {
1885 VMSTATE_UINT64(guest_features, VirtIODevice),
1886 VMSTATE_END_OF_LIST()
1890 static const VMStateDescription vmstate_virtio_broken = {
1891 .name = "virtio/broken",
1892 .version_id = 1,
1893 .minimum_version_id = 1,
1894 .needed = &virtio_broken_needed,
1895 .fields = (VMStateField[]) {
1896 VMSTATE_BOOL(broken, VirtIODevice),
1897 VMSTATE_END_OF_LIST()
1901 static const VMStateDescription vmstate_virtio = {
1902 .name = "virtio",
1903 .version_id = 1,
1904 .minimum_version_id = 1,
1905 .minimum_version_id_old = 1,
1906 .fields = (VMStateField[]) {
1907 VMSTATE_END_OF_LIST()
1909 .subsections = (const VMStateDescription*[]) {
1910 &vmstate_virtio_device_endian,
1911 &vmstate_virtio_64bit_features,
1912 &vmstate_virtio_virtqueues,
1913 &vmstate_virtio_ringsize,
1914 &vmstate_virtio_broken,
1915 &vmstate_virtio_extra_state,
1916 NULL
1920 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
1922 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1923 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1924 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1925 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1926 int i;
1928 if (k->save_config) {
1929 k->save_config(qbus->parent, f);
1932 qemu_put_8s(f, &vdev->status);
1933 qemu_put_8s(f, &vdev->isr);
1934 qemu_put_be16s(f, &vdev->queue_sel);
1935 qemu_put_be32s(f, &guest_features_lo);
1936 qemu_put_be32(f, vdev->config_len);
1937 qemu_put_buffer(f, vdev->config, vdev->config_len);
1939 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1940 if (vdev->vq[i].vring.num == 0)
1941 break;
1944 qemu_put_be32(f, i);
1946 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1947 if (vdev->vq[i].vring.num == 0)
1948 break;
1950 qemu_put_be32(f, vdev->vq[i].vring.num);
1951 if (k->has_variable_vring_alignment) {
1952 qemu_put_be32(f, vdev->vq[i].vring.align);
1955 * Save desc now, the rest of the ring addresses are saved in
1956 * subsections for VIRTIO-1 devices.
1958 qemu_put_be64(f, vdev->vq[i].vring.desc);
1959 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1960 if (k->save_queue) {
1961 k->save_queue(qbus->parent, i, f);
1965 if (vdc->save != NULL) {
1966 vdc->save(vdev, f);
1969 if (vdc->vmsd) {
1970 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1971 if (ret) {
1972 return ret;
1976 /* Subsections */
1977 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1980 /* A wrapper for use as a VMState .put function */
1981 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
1982 const VMStateField *field, QJSON *vmdesc)
1984 return virtio_save(VIRTIO_DEVICE(opaque), f);
1987 /* A wrapper for use as a VMState .get function */
1988 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
1989 const VMStateField *field)
1991 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1992 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1994 return virtio_load(vdev, f, dc->vmsd->version_id);
1997 const VMStateInfo virtio_vmstate_info = {
1998 .name = "virtio",
1999 .get = virtio_device_get,
2000 .put = virtio_device_put,
2003 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2005 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2006 bool bad = (val & ~(vdev->host_features)) != 0;
2008 val &= vdev->host_features;
2009 if (k->set_features) {
2010 k->set_features(vdev, val);
2012 vdev->guest_features = val;
2013 return bad ? -1 : 0;
2016 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2018 int ret;
2020 * The driver must not attempt to set features after feature negotiation
2021 * has finished.
2023 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2024 return -EINVAL;
2026 ret = virtio_set_features_nocheck(vdev, val);
2027 if (!ret && virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2028 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2029 int i;
2030 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2031 if (vdev->vq[i].vring.num != 0) {
2032 virtio_init_region_cache(vdev, i);
2036 return ret;
2039 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2041 int i, ret;
2042 int32_t config_len;
2043 uint32_t num;
2044 uint32_t features;
2045 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2046 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2047 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2050 * We poison the endianness to ensure it does not get used before
2051 * subsections have been loaded.
2053 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2055 if (k->load_config) {
2056 ret = k->load_config(qbus->parent, f);
2057 if (ret)
2058 return ret;
2061 qemu_get_8s(f, &vdev->status);
2062 qemu_get_8s(f, &vdev->isr);
2063 qemu_get_be16s(f, &vdev->queue_sel);
2064 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2065 return -1;
2067 qemu_get_be32s(f, &features);
2070 * Temporarily set guest_features low bits - needed by
2071 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2072 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2074 * Note: devices should always test host features in future - don't create
2075 * new dependencies like this.
2077 vdev->guest_features = features;
2079 config_len = qemu_get_be32(f);
2082 * There are cases where the incoming config can be bigger or smaller
2083 * than what we have; so load what we have space for, and skip
2084 * any excess that's in the stream.
2086 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
2088 while (config_len > vdev->config_len) {
2089 qemu_get_byte(f);
2090 config_len--;
2093 num = qemu_get_be32(f);
2095 if (num > VIRTIO_QUEUE_MAX) {
2096 error_report("Invalid number of virtqueues: 0x%x", num);
2097 return -1;
2100 for (i = 0; i < num; i++) {
2101 vdev->vq[i].vring.num = qemu_get_be32(f);
2102 if (k->has_variable_vring_alignment) {
2103 vdev->vq[i].vring.align = qemu_get_be32(f);
2105 vdev->vq[i].vring.desc = qemu_get_be64(f);
2106 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
2107 vdev->vq[i].signalled_used_valid = false;
2108 vdev->vq[i].notification = true;
2110 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
2111 error_report("VQ %d address 0x0 "
2112 "inconsistent with Host index 0x%x",
2113 i, vdev->vq[i].last_avail_idx);
2114 return -1;
2116 if (k->load_queue) {
2117 ret = k->load_queue(qbus->parent, i, f);
2118 if (ret)
2119 return ret;
2123 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2125 if (vdc->load != NULL) {
2126 ret = vdc->load(vdev, f, version_id);
2127 if (ret) {
2128 return ret;
2132 if (vdc->vmsd) {
2133 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
2134 if (ret) {
2135 return ret;
2139 /* Subsections */
2140 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
2141 if (ret) {
2142 return ret;
2145 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
2146 vdev->device_endian = virtio_default_endian();
2149 if (virtio_64bit_features_needed(vdev)) {
2151 * Subsection load filled vdev->guest_features. Run them
2152 * through virtio_set_features to sanity-check them against
2153 * host_features.
2155 uint64_t features64 = vdev->guest_features;
2156 if (virtio_set_features_nocheck(vdev, features64) < 0) {
2157 error_report("Features 0x%" PRIx64 " unsupported. "
2158 "Allowed features: 0x%" PRIx64,
2159 features64, vdev->host_features);
2160 return -1;
2162 } else {
2163 if (virtio_set_features_nocheck(vdev, features) < 0) {
2164 error_report("Features 0x%x unsupported. "
2165 "Allowed features: 0x%" PRIx64,
2166 features, vdev->host_features);
2167 return -1;
2171 rcu_read_lock();
2172 for (i = 0; i < num; i++) {
2173 if (vdev->vq[i].vring.desc) {
2174 uint16_t nheads;
2177 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2178 * only the region cache needs to be set up. Legacy devices need
2179 * to calculate used and avail ring addresses based on the desc
2180 * address.
2182 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2183 virtio_init_region_cache(vdev, i);
2184 } else {
2185 virtio_queue_update_rings(vdev, i);
2188 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2189 /* Check it isn't doing strange things with descriptor numbers. */
2190 if (nheads > vdev->vq[i].vring.num) {
2191 error_report("VQ %d size 0x%x Guest index 0x%x "
2192 "inconsistent with Host index 0x%x: delta 0x%x",
2193 i, vdev->vq[i].vring.num,
2194 vring_avail_idx(&vdev->vq[i]),
2195 vdev->vq[i].last_avail_idx, nheads);
2196 return -1;
2198 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
2199 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
2202 * Some devices migrate VirtQueueElements that have been popped
2203 * from the avail ring but not yet returned to the used ring.
2204 * Since max ring size < UINT16_MAX it's safe to use modulo
2205 * UINT16_MAX + 1 subtraction.
2207 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
2208 vdev->vq[i].used_idx);
2209 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
2210 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2211 "used_idx 0x%x",
2212 i, vdev->vq[i].vring.num,
2213 vdev->vq[i].last_avail_idx,
2214 vdev->vq[i].used_idx);
2215 return -1;
2219 rcu_read_unlock();
2221 return 0;
2224 void virtio_cleanup(VirtIODevice *vdev)
2226 qemu_del_vm_change_state_handler(vdev->vmstate);
2229 static void virtio_vmstate_change(void *opaque, int running, RunState state)
2231 VirtIODevice *vdev = opaque;
2232 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2233 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2234 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
2235 vdev->vm_running = running;
2237 if (backend_run) {
2238 virtio_set_status(vdev, vdev->status);
2241 if (k->vmstate_change) {
2242 k->vmstate_change(qbus->parent, backend_run);
2245 if (!backend_run) {
2246 virtio_set_status(vdev, vdev->status);
2250 void virtio_instance_init_common(Object *proxy_obj, void *data,
2251 size_t vdev_size, const char *vdev_name)
2253 DeviceState *vdev = data;
2255 object_initialize(vdev, vdev_size, vdev_name);
2256 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
2257 object_unref(OBJECT(vdev));
2258 qdev_alias_all_properties(vdev, proxy_obj);
2261 void virtio_init(VirtIODevice *vdev, const char *name,
2262 uint16_t device_id, size_t config_size)
2264 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2265 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2266 int i;
2267 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
2269 if (nvectors) {
2270 vdev->vector_queues =
2271 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
2274 vdev->device_id = device_id;
2275 vdev->status = 0;
2276 atomic_set(&vdev->isr, 0);
2277 vdev->queue_sel = 0;
2278 vdev->config_vector = VIRTIO_NO_VECTOR;
2279 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
2280 vdev->vm_running = runstate_is_running();
2281 vdev->broken = false;
2282 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2283 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
2284 vdev->vq[i].vdev = vdev;
2285 vdev->vq[i].queue_index = i;
2288 vdev->name = name;
2289 vdev->config_len = config_size;
2290 if (vdev->config_len) {
2291 vdev->config = g_malloc0(config_size);
2292 } else {
2293 vdev->config = NULL;
2295 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2296 vdev);
2297 vdev->device_endian = virtio_default_endian();
2298 vdev->use_guest_notifier_mask = true;
2301 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
2303 return vdev->vq[n].vring.desc;
2306 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
2308 return vdev->vq[n].vring.avail;
2311 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
2313 return vdev->vq[n].vring.used;
2316 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
2318 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2321 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
2323 return offsetof(VRingAvail, ring) +
2324 sizeof(uint16_t) * vdev->vq[n].vring.num;
2327 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
2329 return offsetof(VRingUsed, ring) +
2330 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2333 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2335 return vdev->vq[n].last_avail_idx;
2338 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2340 vdev->vq[n].last_avail_idx = idx;
2341 vdev->vq[n].shadow_avail_idx = idx;
2344 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
2346 rcu_read_lock();
2347 if (vdev->vq[n].vring.desc) {
2348 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
2349 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
2351 rcu_read_unlock();
2354 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2356 rcu_read_lock();
2357 if (vdev->vq[n].vring.desc) {
2358 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2360 rcu_read_unlock();
2363 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2365 vdev->vq[n].signalled_used_valid = false;
2368 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2370 return vdev->vq + n;
2373 uint16_t virtio_get_queue_index(VirtQueue *vq)
2375 return vq->queue_index;
2378 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2380 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2381 if (event_notifier_test_and_clear(n)) {
2382 virtio_irq(vq);
2386 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2387 bool with_irqfd)
2389 if (assign && !with_irqfd) {
2390 event_notifier_set_handler(&vq->guest_notifier,
2391 virtio_queue_guest_notifier_read);
2392 } else {
2393 event_notifier_set_handler(&vq->guest_notifier, NULL);
2395 if (!assign) {
2396 /* Test and clear notifier before closing it,
2397 * in case poll callback didn't have time to run. */
2398 virtio_queue_guest_notifier_read(&vq->guest_notifier);
2402 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2404 return &vq->guest_notifier;
2407 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2409 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2410 if (event_notifier_test_and_clear(n)) {
2411 virtio_queue_notify_aio_vq(vq);
2415 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2417 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2419 virtio_queue_set_notification(vq, 0);
2422 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2424 EventNotifier *n = opaque;
2425 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2426 bool progress;
2428 if (!vq->vring.desc || virtio_queue_empty(vq)) {
2429 return false;
2432 progress = virtio_queue_notify_aio_vq(vq);
2434 /* In case the handler function re-enabled notifications */
2435 virtio_queue_set_notification(vq, 0);
2436 return progress;
2439 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2441 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2443 /* Caller polls once more after this to catch requests that race with us */
2444 virtio_queue_set_notification(vq, 1);
2447 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2448 VirtIOHandleAIOOutput handle_output)
2450 if (handle_output) {
2451 vq->handle_aio_output = handle_output;
2452 aio_set_event_notifier(ctx, &vq->host_notifier, true,
2453 virtio_queue_host_notifier_aio_read,
2454 virtio_queue_host_notifier_aio_poll);
2455 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2456 virtio_queue_host_notifier_aio_poll_begin,
2457 virtio_queue_host_notifier_aio_poll_end);
2458 } else {
2459 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2460 /* Test and clear notifier before after disabling event,
2461 * in case poll callback didn't have time to run. */
2462 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2463 vq->handle_aio_output = NULL;
2467 void virtio_queue_host_notifier_read(EventNotifier *n)
2469 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2470 if (event_notifier_test_and_clear(n)) {
2471 virtio_queue_notify_vq(vq);
2475 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2477 return &vq->host_notifier;
2480 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
2481 MemoryRegion *mr, bool assign)
2483 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2484 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2486 if (k->set_host_notifier_mr) {
2487 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
2490 return -1;
2493 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2495 g_free(vdev->bus_name);
2496 vdev->bus_name = g_strdup(bus_name);
2499 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2501 va_list ap;
2503 va_start(ap, fmt);
2504 error_vreport(fmt, ap);
2505 va_end(ap);
2507 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2508 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
2509 virtio_notify_config(vdev);
2512 vdev->broken = true;
2515 static void virtio_memory_listener_commit(MemoryListener *listener)
2517 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
2518 int i;
2520 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2521 if (vdev->vq[i].vring.num == 0) {
2522 break;
2524 virtio_init_region_cache(vdev, i);
2528 static void virtio_device_realize(DeviceState *dev, Error **errp)
2530 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2531 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2532 Error *err = NULL;
2534 /* Devices should either use vmsd or the load/save methods */
2535 assert(!vdc->vmsd || !vdc->load);
2537 if (vdc->realize != NULL) {
2538 vdc->realize(dev, &err);
2539 if (err != NULL) {
2540 error_propagate(errp, err);
2541 return;
2545 virtio_bus_device_plugged(vdev, &err);
2546 if (err != NULL) {
2547 error_propagate(errp, err);
2548 vdc->unrealize(dev, NULL);
2549 return;
2552 vdev->listener.commit = virtio_memory_listener_commit;
2553 memory_listener_register(&vdev->listener, vdev->dma_as);
2556 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2558 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2559 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2560 Error *err = NULL;
2562 virtio_bus_device_unplugged(vdev);
2564 if (vdc->unrealize != NULL) {
2565 vdc->unrealize(dev, &err);
2566 if (err != NULL) {
2567 error_propagate(errp, err);
2568 return;
2572 g_free(vdev->bus_name);
2573 vdev->bus_name = NULL;
2576 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
2578 int i;
2579 if (!vdev->vq) {
2580 return;
2583 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2584 if (vdev->vq[i].vring.num == 0) {
2585 break;
2587 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2589 g_free(vdev->vq);
2592 static void virtio_device_instance_finalize(Object *obj)
2594 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
2596 memory_listener_unregister(&vdev->listener);
2597 virtio_device_free_virtqueues(vdev);
2599 g_free(vdev->config);
2600 g_free(vdev->vector_queues);
2603 static Property virtio_properties[] = {
2604 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2605 DEFINE_PROP_END_OF_LIST(),
2608 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2610 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2611 int i, n, r, err;
2613 memory_region_transaction_begin();
2614 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2615 VirtQueue *vq = &vdev->vq[n];
2616 if (!virtio_queue_get_num(vdev, n)) {
2617 continue;
2619 r = virtio_bus_set_host_notifier(qbus, n, true);
2620 if (r < 0) {
2621 err = r;
2622 goto assign_error;
2624 event_notifier_set_handler(&vq->host_notifier,
2625 virtio_queue_host_notifier_read);
2628 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2629 /* Kick right away to begin processing requests already in vring */
2630 VirtQueue *vq = &vdev->vq[n];
2631 if (!vq->vring.num) {
2632 continue;
2634 event_notifier_set(&vq->host_notifier);
2636 memory_region_transaction_commit();
2637 return 0;
2639 assign_error:
2640 i = n; /* save n for a second iteration after transaction is committed. */
2641 while (--n >= 0) {
2642 VirtQueue *vq = &vdev->vq[n];
2643 if (!virtio_queue_get_num(vdev, n)) {
2644 continue;
2647 event_notifier_set_handler(&vq->host_notifier, NULL);
2648 r = virtio_bus_set_host_notifier(qbus, n, false);
2649 assert(r >= 0);
2651 memory_region_transaction_commit();
2653 while (--i >= 0) {
2654 if (!virtio_queue_get_num(vdev, i)) {
2655 continue;
2657 virtio_bus_cleanup_host_notifier(qbus, i);
2659 return err;
2662 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2664 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2665 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2667 return virtio_bus_start_ioeventfd(vbus);
2670 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2672 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2673 int n, r;
2675 memory_region_transaction_begin();
2676 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2677 VirtQueue *vq = &vdev->vq[n];
2679 if (!virtio_queue_get_num(vdev, n)) {
2680 continue;
2682 event_notifier_set_handler(&vq->host_notifier, NULL);
2683 r = virtio_bus_set_host_notifier(qbus, n, false);
2684 assert(r >= 0);
2686 memory_region_transaction_commit();
2688 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2689 if (!virtio_queue_get_num(vdev, n)) {
2690 continue;
2692 virtio_bus_cleanup_host_notifier(qbus, n);
2696 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2698 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2699 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2701 virtio_bus_stop_ioeventfd(vbus);
2704 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2706 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2707 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2709 return virtio_bus_grab_ioeventfd(vbus);
2712 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2714 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2715 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2717 virtio_bus_release_ioeventfd(vbus);
2720 static void virtio_device_class_init(ObjectClass *klass, void *data)
2722 /* Set the default value here. */
2723 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2724 DeviceClass *dc = DEVICE_CLASS(klass);
2726 dc->realize = virtio_device_realize;
2727 dc->unrealize = virtio_device_unrealize;
2728 dc->bus_type = TYPE_VIRTIO_BUS;
2729 dc->props = virtio_properties;
2730 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2731 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2733 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2736 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2738 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2739 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2741 return virtio_bus_ioeventfd_enabled(vbus);
2744 static const TypeInfo virtio_device_info = {
2745 .name = TYPE_VIRTIO_DEVICE,
2746 .parent = TYPE_DEVICE,
2747 .instance_size = sizeof(VirtIODevice),
2748 .class_init = virtio_device_class_init,
2749 .instance_finalize = virtio_device_instance_finalize,
2750 .abstract = true,
2751 .class_size = sizeof(VirtioDeviceClass),
2754 static void virtio_register_types(void)
2756 type_register_static(&virtio_device_info);
2759 type_init(virtio_register_types)