virtio: cleanup VMSTATE_VIRTIO_DEVICE
[qemu/ar7.git] / hw / virtio / virtio.c
blobd48d1a98a709f87ab9e42ce710c13d5863ef7a4f
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "trace.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "migration/migration.h"
25 #include "hw/virtio/virtio-access.h"
28 * The alignment to use between consumer and producer parts of vring.
29 * x86 pagesize again. This is the default, used by transports like PCI
30 * which don't provide a means for the guest to tell the host the alignment.
32 #define VIRTIO_PCI_VRING_ALIGN 4096
34 typedef struct VRingDesc
36 uint64_t addr;
37 uint32_t len;
38 uint16_t flags;
39 uint16_t next;
40 } VRingDesc;
42 typedef struct VRingAvail
44 uint16_t flags;
45 uint16_t idx;
46 uint16_t ring[0];
47 } VRingAvail;
49 typedef struct VRingUsedElem
51 uint32_t id;
52 uint32_t len;
53 } VRingUsedElem;
55 typedef struct VRingUsed
57 uint16_t flags;
58 uint16_t idx;
59 VRingUsedElem ring[0];
60 } VRingUsed;
62 typedef struct VRing
64 unsigned int num;
65 unsigned int num_default;
66 unsigned int align;
67 hwaddr desc;
68 hwaddr avail;
69 hwaddr used;
70 } VRing;
72 struct VirtQueue
74 VRing vring;
76 /* Next head to pop */
77 uint16_t last_avail_idx;
79 /* Last avail_idx read from VQ. */
80 uint16_t shadow_avail_idx;
82 uint16_t used_idx;
84 /* Last used index value we have signalled on */
85 uint16_t signalled_used;
87 /* Last used index value we have signalled on */
88 bool signalled_used_valid;
90 /* Notification enabled? */
91 bool notification;
93 uint16_t queue_index;
95 int inuse;
97 uint16_t vector;
98 VirtIOHandleOutput handle_output;
99 VirtIOHandleOutput handle_aio_output;
100 bool use_aio;
101 VirtIODevice *vdev;
102 EventNotifier guest_notifier;
103 EventNotifier host_notifier;
104 QLIST_ENTRY(VirtQueue) node;
107 /* virt queue functions */
108 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
110 VRing *vring = &vdev->vq[n].vring;
112 if (!vring->desc) {
113 /* not yet setup -> nothing to do */
114 return;
116 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
117 vring->used = vring_align(vring->avail +
118 offsetof(VRingAvail, ring[vring->num]),
119 vring->align);
122 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
123 hwaddr desc_pa, int i)
125 address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
126 MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
127 virtio_tswap64s(vdev, &desc->addr);
128 virtio_tswap32s(vdev, &desc->len);
129 virtio_tswap16s(vdev, &desc->flags);
130 virtio_tswap16s(vdev, &desc->next);
133 static inline uint16_t vring_avail_flags(VirtQueue *vq)
135 hwaddr pa;
136 pa = vq->vring.avail + offsetof(VRingAvail, flags);
137 return virtio_lduw_phys(vq->vdev, pa);
140 static inline uint16_t vring_avail_idx(VirtQueue *vq)
142 hwaddr pa;
143 pa = vq->vring.avail + offsetof(VRingAvail, idx);
144 vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
145 return vq->shadow_avail_idx;
148 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
150 hwaddr pa;
151 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
152 return virtio_lduw_phys(vq->vdev, pa);
155 static inline uint16_t vring_get_used_event(VirtQueue *vq)
157 return vring_avail_ring(vq, vq->vring.num);
160 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
161 int i)
163 hwaddr pa;
164 virtio_tswap32s(vq->vdev, &uelem->id);
165 virtio_tswap32s(vq->vdev, &uelem->len);
166 pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
167 address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
168 (void *)uelem, sizeof(VRingUsedElem));
171 static uint16_t vring_used_idx(VirtQueue *vq)
173 hwaddr pa;
174 pa = vq->vring.used + offsetof(VRingUsed, idx);
175 return virtio_lduw_phys(vq->vdev, pa);
178 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
180 hwaddr pa;
181 pa = vq->vring.used + offsetof(VRingUsed, idx);
182 virtio_stw_phys(vq->vdev, pa, val);
183 vq->used_idx = val;
186 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
188 VirtIODevice *vdev = vq->vdev;
189 hwaddr pa;
190 pa = vq->vring.used + offsetof(VRingUsed, flags);
191 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
194 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
196 VirtIODevice *vdev = vq->vdev;
197 hwaddr pa;
198 pa = vq->vring.used + offsetof(VRingUsed, flags);
199 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
202 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
204 hwaddr pa;
205 if (!vq->notification) {
206 return;
208 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
209 virtio_stw_phys(vq->vdev, pa, val);
212 void virtio_queue_set_notification(VirtQueue *vq, int enable)
214 vq->notification = enable;
215 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
216 vring_set_avail_event(vq, vring_avail_idx(vq));
217 } else if (enable) {
218 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
219 } else {
220 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
222 if (enable) {
223 /* Expose avail event/used flags before caller checks the avail idx. */
224 smp_mb();
228 int virtio_queue_ready(VirtQueue *vq)
230 return vq->vring.avail != 0;
233 /* Fetch avail_idx from VQ memory only when we really need to know if
234 * guest has added some buffers. */
235 int virtio_queue_empty(VirtQueue *vq)
237 if (vq->shadow_avail_idx != vq->last_avail_idx) {
238 return 0;
241 return vring_avail_idx(vq) == vq->last_avail_idx;
244 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
245 unsigned int len)
247 unsigned int offset;
248 int i;
250 offset = 0;
251 for (i = 0; i < elem->in_num; i++) {
252 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
254 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
255 elem->in_sg[i].iov_len,
256 1, size);
258 offset += size;
261 for (i = 0; i < elem->out_num; i++)
262 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
263 elem->out_sg[i].iov_len,
264 0, elem->out_sg[i].iov_len);
267 /* virtqueue_detach_element:
268 * @vq: The #VirtQueue
269 * @elem: The #VirtQueueElement
270 * @len: number of bytes written
272 * Detach the element from the virtqueue. This function is suitable for device
273 * reset or other situations where a #VirtQueueElement is simply freed and will
274 * not be pushed or discarded.
276 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
277 unsigned int len)
279 vq->inuse--;
280 virtqueue_unmap_sg(vq, elem, len);
283 /* virtqueue_discard:
284 * @vq: The #VirtQueue
285 * @elem: The #VirtQueueElement
286 * @len: number of bytes written
288 * Pretend the most recent element wasn't popped from the virtqueue. The next
289 * call to virtqueue_pop() will refetch the element.
291 void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
292 unsigned int len)
294 vq->last_avail_idx--;
295 virtqueue_detach_element(vq, elem, len);
298 /* virtqueue_rewind:
299 * @vq: The #VirtQueue
300 * @num: Number of elements to push back
302 * Pretend that elements weren't popped from the virtqueue. The next
303 * virtqueue_pop() will refetch the oldest element.
305 * Use virtqueue_discard() instead if you have a VirtQueueElement.
307 * Returns: true on success, false if @num is greater than the number of in use
308 * elements.
310 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
312 if (num > vq->inuse) {
313 return false;
315 vq->last_avail_idx -= num;
316 vq->inuse -= num;
317 return true;
320 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
321 unsigned int len, unsigned int idx)
323 VRingUsedElem uelem;
325 trace_virtqueue_fill(vq, elem, len, idx);
327 virtqueue_unmap_sg(vq, elem, len);
329 if (unlikely(vq->vdev->broken)) {
330 return;
333 idx = (idx + vq->used_idx) % vq->vring.num;
335 uelem.id = elem->index;
336 uelem.len = len;
337 vring_used_write(vq, &uelem, idx);
340 void virtqueue_flush(VirtQueue *vq, unsigned int count)
342 uint16_t old, new;
344 if (unlikely(vq->vdev->broken)) {
345 vq->inuse -= count;
346 return;
349 /* Make sure buffer is written before we update index. */
350 smp_wmb();
351 trace_virtqueue_flush(vq, count);
352 old = vq->used_idx;
353 new = old + count;
354 vring_used_idx_set(vq, new);
355 vq->inuse -= count;
356 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
357 vq->signalled_used_valid = false;
360 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
361 unsigned int len)
363 virtqueue_fill(vq, elem, len, 0);
364 virtqueue_flush(vq, 1);
367 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
369 uint16_t num_heads = vring_avail_idx(vq) - idx;
371 /* Check it isn't doing very strange things with descriptor numbers. */
372 if (num_heads > vq->vring.num) {
373 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
374 idx, vq->shadow_avail_idx);
375 return -EINVAL;
377 /* On success, callers read a descriptor at vq->last_avail_idx.
378 * Make sure descriptor read does not bypass avail index read. */
379 if (num_heads) {
380 smp_rmb();
383 return num_heads;
386 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
387 unsigned int *head)
389 /* Grab the next descriptor number they're advertising, and increment
390 * the index we've seen. */
391 *head = vring_avail_ring(vq, idx % vq->vring.num);
393 /* If their number is silly, that's a fatal mistake. */
394 if (*head >= vq->vring.num) {
395 virtio_error(vq->vdev, "Guest says index %u is available", *head);
396 return false;
399 return true;
402 enum {
403 VIRTQUEUE_READ_DESC_ERROR = -1,
404 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
405 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
408 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
409 hwaddr desc_pa, unsigned int max,
410 unsigned int *next)
412 /* If this descriptor says it doesn't chain, we're done. */
413 if (!(desc->flags & VRING_DESC_F_NEXT)) {
414 return VIRTQUEUE_READ_DESC_DONE;
417 /* Check they're not leading us off end of descriptors. */
418 *next = desc->next;
419 /* Make sure compiler knows to grab that: we don't want it changing! */
420 smp_wmb();
422 if (*next >= max) {
423 virtio_error(vdev, "Desc next is %u", *next);
424 return VIRTQUEUE_READ_DESC_ERROR;
427 vring_desc_read(vdev, desc, desc_pa, *next);
428 return VIRTQUEUE_READ_DESC_MORE;
431 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
432 unsigned int *out_bytes,
433 unsigned max_in_bytes, unsigned max_out_bytes)
435 unsigned int idx;
436 unsigned int total_bufs, in_total, out_total;
437 int rc;
439 idx = vq->last_avail_idx;
441 total_bufs = in_total = out_total = 0;
442 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
443 VirtIODevice *vdev = vq->vdev;
444 unsigned int max, num_bufs, indirect = 0;
445 VRingDesc desc;
446 hwaddr desc_pa;
447 unsigned int i;
449 max = vq->vring.num;
450 num_bufs = total_bufs;
452 if (!virtqueue_get_head(vq, idx++, &i)) {
453 goto err;
456 desc_pa = vq->vring.desc;
457 vring_desc_read(vdev, &desc, desc_pa, i);
459 if (desc.flags & VRING_DESC_F_INDIRECT) {
460 if (desc.len % sizeof(VRingDesc)) {
461 virtio_error(vdev, "Invalid size for indirect buffer table");
462 goto err;
465 /* If we've got too many, that implies a descriptor loop. */
466 if (num_bufs >= max) {
467 virtio_error(vdev, "Looped descriptor");
468 goto err;
471 /* loop over the indirect descriptor table */
472 indirect = 1;
473 max = desc.len / sizeof(VRingDesc);
474 desc_pa = desc.addr;
475 num_bufs = i = 0;
476 vring_desc_read(vdev, &desc, desc_pa, i);
479 do {
480 /* If we've got too many, that implies a descriptor loop. */
481 if (++num_bufs > max) {
482 virtio_error(vdev, "Looped descriptor");
483 goto err;
486 if (desc.flags & VRING_DESC_F_WRITE) {
487 in_total += desc.len;
488 } else {
489 out_total += desc.len;
491 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
492 goto done;
495 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
496 } while (rc == VIRTQUEUE_READ_DESC_MORE);
498 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
499 goto err;
502 if (!indirect)
503 total_bufs = num_bufs;
504 else
505 total_bufs++;
508 if (rc < 0) {
509 goto err;
512 done:
513 if (in_bytes) {
514 *in_bytes = in_total;
516 if (out_bytes) {
517 *out_bytes = out_total;
519 return;
521 err:
522 in_total = out_total = 0;
523 goto done;
526 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
527 unsigned int out_bytes)
529 unsigned int in_total, out_total;
531 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
532 return in_bytes <= in_total && out_bytes <= out_total;
535 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
536 hwaddr *addr, struct iovec *iov,
537 unsigned int max_num_sg, bool is_write,
538 hwaddr pa, size_t sz)
540 bool ok = false;
541 unsigned num_sg = *p_num_sg;
542 assert(num_sg <= max_num_sg);
544 if (!sz) {
545 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
546 goto out;
549 while (sz) {
550 hwaddr len = sz;
552 if (num_sg == max_num_sg) {
553 virtio_error(vdev, "virtio: too many write descriptors in "
554 "indirect table");
555 goto out;
558 iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
559 if (!iov[num_sg].iov_base) {
560 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
561 goto out;
564 iov[num_sg].iov_len = len;
565 addr[num_sg] = pa;
567 sz -= len;
568 pa += len;
569 num_sg++;
571 ok = true;
573 out:
574 *p_num_sg = num_sg;
575 return ok;
578 /* Only used by error code paths before we have a VirtQueueElement (therefore
579 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
580 * yet.
582 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
583 struct iovec *iov)
585 unsigned int i;
587 for (i = 0; i < out_num + in_num; i++) {
588 int is_write = i >= out_num;
590 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
591 iov++;
595 static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
596 unsigned int *num_sg, unsigned int max_size,
597 int is_write)
599 unsigned int i;
600 hwaddr len;
602 /* Note: this function MUST validate input, some callers
603 * are passing in num_sg values received over the network.
605 /* TODO: teach all callers that this can fail, and return failure instead
606 * of asserting here.
607 * When we do, we might be able to re-enable NDEBUG below.
609 #ifdef NDEBUG
610 #error building with NDEBUG is not supported
611 #endif
612 assert(*num_sg <= max_size);
614 for (i = 0; i < *num_sg; i++) {
615 len = sg[i].iov_len;
616 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
617 if (!sg[i].iov_base) {
618 error_report("virtio: error trying to map MMIO memory");
619 exit(1);
621 if (len != sg[i].iov_len) {
622 error_report("virtio: unexpected memory split");
623 exit(1);
628 void virtqueue_map(VirtQueueElement *elem)
630 virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
631 VIRTQUEUE_MAX_SIZE, 1);
632 virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
633 VIRTQUEUE_MAX_SIZE, 0);
636 void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
638 VirtQueueElement *elem;
639 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
640 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
641 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
642 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
643 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
644 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
646 assert(sz >= sizeof(VirtQueueElement));
647 elem = g_malloc(out_sg_end);
648 elem->out_num = out_num;
649 elem->in_num = in_num;
650 elem->in_addr = (void *)elem + in_addr_ofs;
651 elem->out_addr = (void *)elem + out_addr_ofs;
652 elem->in_sg = (void *)elem + in_sg_ofs;
653 elem->out_sg = (void *)elem + out_sg_ofs;
654 return elem;
657 void *virtqueue_pop(VirtQueue *vq, size_t sz)
659 unsigned int i, head, max;
660 hwaddr desc_pa = vq->vring.desc;
661 VirtIODevice *vdev = vq->vdev;
662 VirtQueueElement *elem;
663 unsigned out_num, in_num;
664 hwaddr addr[VIRTQUEUE_MAX_SIZE];
665 struct iovec iov[VIRTQUEUE_MAX_SIZE];
666 VRingDesc desc;
667 int rc;
669 if (unlikely(vdev->broken)) {
670 return NULL;
672 if (virtio_queue_empty(vq)) {
673 return NULL;
675 /* Needed after virtio_queue_empty(), see comment in
676 * virtqueue_num_heads(). */
677 smp_rmb();
679 /* When we start there are none of either input nor output. */
680 out_num = in_num = 0;
682 max = vq->vring.num;
684 if (vq->inuse >= vq->vring.num) {
685 virtio_error(vdev, "Virtqueue size exceeded");
686 return NULL;
689 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
690 return NULL;
693 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
694 vring_set_avail_event(vq, vq->last_avail_idx);
697 i = head;
698 vring_desc_read(vdev, &desc, desc_pa, i);
699 if (desc.flags & VRING_DESC_F_INDIRECT) {
700 if (desc.len % sizeof(VRingDesc)) {
701 virtio_error(vdev, "Invalid size for indirect buffer table");
702 return NULL;
705 /* loop over the indirect descriptor table */
706 max = desc.len / sizeof(VRingDesc);
707 desc_pa = desc.addr;
708 i = 0;
709 vring_desc_read(vdev, &desc, desc_pa, i);
712 /* Collect all the descriptors */
713 do {
714 bool map_ok;
716 if (desc.flags & VRING_DESC_F_WRITE) {
717 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
718 iov + out_num,
719 VIRTQUEUE_MAX_SIZE - out_num, true,
720 desc.addr, desc.len);
721 } else {
722 if (in_num) {
723 virtio_error(vdev, "Incorrect order for descriptors");
724 goto err_undo_map;
726 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
727 VIRTQUEUE_MAX_SIZE, false,
728 desc.addr, desc.len);
730 if (!map_ok) {
731 goto err_undo_map;
734 /* If we've got too many, that implies a descriptor loop. */
735 if ((in_num + out_num) > max) {
736 virtio_error(vdev, "Looped descriptor");
737 goto err_undo_map;
740 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
741 } while (rc == VIRTQUEUE_READ_DESC_MORE);
743 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
744 goto err_undo_map;
747 /* Now copy what we have collected and mapped */
748 elem = virtqueue_alloc_element(sz, out_num, in_num);
749 elem->index = head;
750 for (i = 0; i < out_num; i++) {
751 elem->out_addr[i] = addr[i];
752 elem->out_sg[i] = iov[i];
754 for (i = 0; i < in_num; i++) {
755 elem->in_addr[i] = addr[out_num + i];
756 elem->in_sg[i] = iov[out_num + i];
759 vq->inuse++;
761 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
762 return elem;
764 err_undo_map:
765 virtqueue_undo_map_desc(out_num, in_num, iov);
766 return NULL;
769 /* Reading and writing a structure directly to QEMUFile is *awful*, but
770 * it is what QEMU has always done by mistake. We can change it sooner
771 * or later by bumping the version number of the affected vm states.
772 * In the meanwhile, since the in-memory layout of VirtQueueElement
773 * has changed, we need to marshal to and from the layout that was
774 * used before the change.
776 typedef struct VirtQueueElementOld {
777 unsigned int index;
778 unsigned int out_num;
779 unsigned int in_num;
780 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
781 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
782 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
783 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
784 } VirtQueueElementOld;
786 void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
788 VirtQueueElement *elem;
789 VirtQueueElementOld data;
790 int i;
792 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
794 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
795 elem->index = data.index;
797 for (i = 0; i < elem->in_num; i++) {
798 elem->in_addr[i] = data.in_addr[i];
801 for (i = 0; i < elem->out_num; i++) {
802 elem->out_addr[i] = data.out_addr[i];
805 for (i = 0; i < elem->in_num; i++) {
806 /* Base is overwritten by virtqueue_map. */
807 elem->in_sg[i].iov_base = 0;
808 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
811 for (i = 0; i < elem->out_num; i++) {
812 /* Base is overwritten by virtqueue_map. */
813 elem->out_sg[i].iov_base = 0;
814 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
817 virtqueue_map(elem);
818 return elem;
821 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
823 VirtQueueElementOld data;
824 int i;
826 memset(&data, 0, sizeof(data));
827 data.index = elem->index;
828 data.in_num = elem->in_num;
829 data.out_num = elem->out_num;
831 for (i = 0; i < elem->in_num; i++) {
832 data.in_addr[i] = elem->in_addr[i];
835 for (i = 0; i < elem->out_num; i++) {
836 data.out_addr[i] = elem->out_addr[i];
839 for (i = 0; i < elem->in_num; i++) {
840 /* Base is overwritten by virtqueue_map when loading. Do not
841 * save it, as it would leak the QEMU address space layout. */
842 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
845 for (i = 0; i < elem->out_num; i++) {
846 /* Do not save iov_base as above. */
847 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
849 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
852 /* virtio device */
853 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
855 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
856 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
858 if (unlikely(vdev->broken)) {
859 return;
862 if (k->notify) {
863 k->notify(qbus->parent, vector);
867 void virtio_update_irq(VirtIODevice *vdev)
869 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
872 static int virtio_validate_features(VirtIODevice *vdev)
874 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
876 if (k->validate_features) {
877 return k->validate_features(vdev);
878 } else {
879 return 0;
883 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
885 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
886 trace_virtio_set_status(vdev, val);
888 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
889 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
890 val & VIRTIO_CONFIG_S_FEATURES_OK) {
891 int ret = virtio_validate_features(vdev);
893 if (ret) {
894 return ret;
898 if (k->set_status) {
899 k->set_status(vdev, val);
901 vdev->status = val;
902 return 0;
905 bool target_words_bigendian(void);
906 static enum virtio_device_endian virtio_default_endian(void)
908 if (target_words_bigendian()) {
909 return VIRTIO_DEVICE_ENDIAN_BIG;
910 } else {
911 return VIRTIO_DEVICE_ENDIAN_LITTLE;
915 static enum virtio_device_endian virtio_current_cpu_endian(void)
917 CPUClass *cc = CPU_GET_CLASS(current_cpu);
919 if (cc->virtio_is_big_endian(current_cpu)) {
920 return VIRTIO_DEVICE_ENDIAN_BIG;
921 } else {
922 return VIRTIO_DEVICE_ENDIAN_LITTLE;
926 void virtio_reset(void *opaque)
928 VirtIODevice *vdev = opaque;
929 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
930 int i;
932 virtio_set_status(vdev, 0);
933 if (current_cpu) {
934 /* Guest initiated reset */
935 vdev->device_endian = virtio_current_cpu_endian();
936 } else {
937 /* System reset */
938 vdev->device_endian = virtio_default_endian();
941 if (k->reset) {
942 k->reset(vdev);
945 vdev->broken = false;
946 vdev->guest_features = 0;
947 vdev->queue_sel = 0;
948 vdev->status = 0;
949 vdev->isr = 0;
950 vdev->config_vector = VIRTIO_NO_VECTOR;
951 virtio_notify_vector(vdev, vdev->config_vector);
953 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
954 vdev->vq[i].vring.desc = 0;
955 vdev->vq[i].vring.avail = 0;
956 vdev->vq[i].vring.used = 0;
957 vdev->vq[i].last_avail_idx = 0;
958 vdev->vq[i].shadow_avail_idx = 0;
959 vdev->vq[i].used_idx = 0;
960 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
961 vdev->vq[i].signalled_used = 0;
962 vdev->vq[i].signalled_used_valid = false;
963 vdev->vq[i].notification = true;
964 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
965 vdev->vq[i].inuse = 0;
969 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
971 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
972 uint8_t val;
974 if (addr + sizeof(val) > vdev->config_len) {
975 return (uint32_t)-1;
978 k->get_config(vdev, vdev->config);
980 val = ldub_p(vdev->config + addr);
981 return val;
984 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
986 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
987 uint16_t val;
989 if (addr + sizeof(val) > vdev->config_len) {
990 return (uint32_t)-1;
993 k->get_config(vdev, vdev->config);
995 val = lduw_p(vdev->config + addr);
996 return val;
999 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1001 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1002 uint32_t val;
1004 if (addr + sizeof(val) > vdev->config_len) {
1005 return (uint32_t)-1;
1008 k->get_config(vdev, vdev->config);
1010 val = ldl_p(vdev->config + addr);
1011 return val;
1014 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1016 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1017 uint8_t val = data;
1019 if (addr + sizeof(val) > vdev->config_len) {
1020 return;
1023 stb_p(vdev->config + addr, val);
1025 if (k->set_config) {
1026 k->set_config(vdev, vdev->config);
1030 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1032 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1033 uint16_t val = data;
1035 if (addr + sizeof(val) > vdev->config_len) {
1036 return;
1039 stw_p(vdev->config + addr, val);
1041 if (k->set_config) {
1042 k->set_config(vdev, vdev->config);
1046 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1048 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1049 uint32_t val = data;
1051 if (addr + sizeof(val) > vdev->config_len) {
1052 return;
1055 stl_p(vdev->config + addr, val);
1057 if (k->set_config) {
1058 k->set_config(vdev, vdev->config);
1062 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1064 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1065 uint8_t val;
1067 if (addr + sizeof(val) > vdev->config_len) {
1068 return (uint32_t)-1;
1071 k->get_config(vdev, vdev->config);
1073 val = ldub_p(vdev->config + addr);
1074 return val;
1077 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1079 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1080 uint16_t val;
1082 if (addr + sizeof(val) > vdev->config_len) {
1083 return (uint32_t)-1;
1086 k->get_config(vdev, vdev->config);
1088 val = lduw_le_p(vdev->config + addr);
1089 return val;
1092 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1094 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1095 uint32_t val;
1097 if (addr + sizeof(val) > vdev->config_len) {
1098 return (uint32_t)-1;
1101 k->get_config(vdev, vdev->config);
1103 val = ldl_le_p(vdev->config + addr);
1104 return val;
1107 void virtio_config_modern_writeb(VirtIODevice *vdev,
1108 uint32_t addr, uint32_t data)
1110 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1111 uint8_t val = data;
1113 if (addr + sizeof(val) > vdev->config_len) {
1114 return;
1117 stb_p(vdev->config + addr, val);
1119 if (k->set_config) {
1120 k->set_config(vdev, vdev->config);
1124 void virtio_config_modern_writew(VirtIODevice *vdev,
1125 uint32_t addr, uint32_t data)
1127 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1128 uint16_t val = data;
1130 if (addr + sizeof(val) > vdev->config_len) {
1131 return;
1134 stw_le_p(vdev->config + addr, val);
1136 if (k->set_config) {
1137 k->set_config(vdev, vdev->config);
1141 void virtio_config_modern_writel(VirtIODevice *vdev,
1142 uint32_t addr, uint32_t data)
1144 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1145 uint32_t val = data;
1147 if (addr + sizeof(val) > vdev->config_len) {
1148 return;
1151 stl_le_p(vdev->config + addr, val);
1153 if (k->set_config) {
1154 k->set_config(vdev, vdev->config);
1158 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1160 vdev->vq[n].vring.desc = addr;
1161 virtio_queue_update_rings(vdev, n);
1164 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1166 return vdev->vq[n].vring.desc;
1169 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1170 hwaddr avail, hwaddr used)
1172 vdev->vq[n].vring.desc = desc;
1173 vdev->vq[n].vring.avail = avail;
1174 vdev->vq[n].vring.used = used;
1177 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1179 /* Don't allow guest to flip queue between existent and
1180 * nonexistent states, or to set it to an invalid size.
1182 if (!!num != !!vdev->vq[n].vring.num ||
1183 num > VIRTQUEUE_MAX_SIZE ||
1184 num < 0) {
1185 return;
1187 vdev->vq[n].vring.num = num;
1190 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1192 return QLIST_FIRST(&vdev->vector_queues[vector]);
1195 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1197 return QLIST_NEXT(vq, node);
1200 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1202 return vdev->vq[n].vring.num;
1205 int virtio_get_num_queues(VirtIODevice *vdev)
1207 int i;
1209 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1210 if (!virtio_queue_get_num(vdev, i)) {
1211 break;
1215 return i;
1218 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1220 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1221 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1223 /* virtio-1 compliant devices cannot change the alignment */
1224 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1225 error_report("tried to modify queue alignment for virtio-1 device");
1226 return;
1228 /* Check that the transport told us it was going to do this
1229 * (so a buggy transport will immediately assert rather than
1230 * silently failing to migrate this state)
1232 assert(k->has_variable_vring_alignment);
1234 vdev->vq[n].vring.align = align;
1235 virtio_queue_update_rings(vdev, n);
1238 static void virtio_queue_notify_aio_vq(VirtQueue *vq)
1240 if (vq->vring.desc && vq->handle_aio_output) {
1241 VirtIODevice *vdev = vq->vdev;
1243 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1244 vq->handle_aio_output(vdev, vq);
1248 static void virtio_queue_notify_vq(VirtQueue *vq)
1250 if (vq->vring.desc && vq->handle_output) {
1251 VirtIODevice *vdev = vq->vdev;
1253 if (unlikely(vdev->broken)) {
1254 return;
1257 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1258 vq->handle_output(vdev, vq);
1262 void virtio_queue_notify(VirtIODevice *vdev, int n)
1264 virtio_queue_notify_vq(&vdev->vq[n]);
1267 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1269 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1270 VIRTIO_NO_VECTOR;
1273 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1275 VirtQueue *vq = &vdev->vq[n];
1277 if (n < VIRTIO_QUEUE_MAX) {
1278 if (vdev->vector_queues &&
1279 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1280 QLIST_REMOVE(vq, node);
1282 vdev->vq[n].vector = vector;
1283 if (vdev->vector_queues &&
1284 vector != VIRTIO_NO_VECTOR) {
1285 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1290 static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
1291 VirtIOHandleOutput handle_output,
1292 bool use_aio)
1294 int i;
1296 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1297 if (vdev->vq[i].vring.num == 0)
1298 break;
1301 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1302 abort();
1304 vdev->vq[i].vring.num = queue_size;
1305 vdev->vq[i].vring.num_default = queue_size;
1306 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1307 vdev->vq[i].handle_output = handle_output;
1308 vdev->vq[i].handle_aio_output = NULL;
1309 vdev->vq[i].use_aio = use_aio;
1311 return &vdev->vq[i];
1314 /* Add a virt queue and mark AIO.
1315 * An AIO queue will use the AioContext based event interface instead of the
1316 * default IOHandler and EventNotifier interface.
1318 VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
1319 VirtIOHandleOutput handle_output)
1321 return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
1324 /* Add a normal virt queue (on the contrary to the AIO version above. */
1325 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1326 VirtIOHandleOutput handle_output)
1328 return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
1331 void virtio_del_queue(VirtIODevice *vdev, int n)
1333 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1334 abort();
1337 vdev->vq[n].vring.num = 0;
1338 vdev->vq[n].vring.num_default = 0;
1341 void virtio_irq(VirtQueue *vq)
1343 trace_virtio_irq(vq);
1344 vq->vdev->isr |= 0x01;
1345 virtio_notify_vector(vq->vdev, vq->vector);
1348 bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1350 uint16_t old, new;
1351 bool v;
1352 /* We need to expose used array entries before checking used event. */
1353 smp_mb();
1354 /* Always notify when queue is empty (when feature acknowledge) */
1355 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1356 !vq->inuse && virtio_queue_empty(vq)) {
1357 return true;
1360 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1361 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1364 v = vq->signalled_used_valid;
1365 vq->signalled_used_valid = true;
1366 old = vq->signalled_used;
1367 new = vq->signalled_used = vq->used_idx;
1368 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1371 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1373 if (!virtio_should_notify(vdev, vq)) {
1374 return;
1377 trace_virtio_notify(vdev, vq);
1378 vdev->isr |= 0x01;
1379 virtio_notify_vector(vdev, vq->vector);
1382 void virtio_notify_config(VirtIODevice *vdev)
1384 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1385 return;
1387 vdev->isr |= 0x03;
1388 vdev->generation++;
1389 virtio_notify_vector(vdev, vdev->config_vector);
1392 static bool virtio_device_endian_needed(void *opaque)
1394 VirtIODevice *vdev = opaque;
1396 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1397 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1398 return vdev->device_endian != virtio_default_endian();
1400 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1401 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1404 static bool virtio_64bit_features_needed(void *opaque)
1406 VirtIODevice *vdev = opaque;
1408 return (vdev->host_features >> 32) != 0;
1411 static bool virtio_virtqueue_needed(void *opaque)
1413 VirtIODevice *vdev = opaque;
1415 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1418 static bool virtio_ringsize_needed(void *opaque)
1420 VirtIODevice *vdev = opaque;
1421 int i;
1423 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1424 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1425 return true;
1428 return false;
1431 static bool virtio_extra_state_needed(void *opaque)
1433 VirtIODevice *vdev = opaque;
1434 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1435 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1437 return k->has_extra_state &&
1438 k->has_extra_state(qbus->parent);
1441 static bool virtio_broken_needed(void *opaque)
1443 VirtIODevice *vdev = opaque;
1445 return vdev->broken;
1448 static const VMStateDescription vmstate_virtqueue = {
1449 .name = "virtqueue_state",
1450 .version_id = 1,
1451 .minimum_version_id = 1,
1452 .fields = (VMStateField[]) {
1453 VMSTATE_UINT64(vring.avail, struct VirtQueue),
1454 VMSTATE_UINT64(vring.used, struct VirtQueue),
1455 VMSTATE_END_OF_LIST()
1459 static const VMStateDescription vmstate_virtio_virtqueues = {
1460 .name = "virtio/virtqueues",
1461 .version_id = 1,
1462 .minimum_version_id = 1,
1463 .needed = &virtio_virtqueue_needed,
1464 .fields = (VMStateField[]) {
1465 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1466 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1467 VMSTATE_END_OF_LIST()
1471 static const VMStateDescription vmstate_ringsize = {
1472 .name = "ringsize_state",
1473 .version_id = 1,
1474 .minimum_version_id = 1,
1475 .fields = (VMStateField[]) {
1476 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1477 VMSTATE_END_OF_LIST()
1481 static const VMStateDescription vmstate_virtio_ringsize = {
1482 .name = "virtio/ringsize",
1483 .version_id = 1,
1484 .minimum_version_id = 1,
1485 .needed = &virtio_ringsize_needed,
1486 .fields = (VMStateField[]) {
1487 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1488 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1489 VMSTATE_END_OF_LIST()
1493 static int get_extra_state(QEMUFile *f, void *pv, size_t size)
1495 VirtIODevice *vdev = pv;
1496 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1497 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1499 if (!k->load_extra_state) {
1500 return -1;
1501 } else {
1502 return k->load_extra_state(qbus->parent, f);
1506 static void put_extra_state(QEMUFile *f, void *pv, size_t size)
1508 VirtIODevice *vdev = pv;
1509 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1510 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1512 k->save_extra_state(qbus->parent, f);
1515 static const VMStateInfo vmstate_info_extra_state = {
1516 .name = "virtqueue_extra_state",
1517 .get = get_extra_state,
1518 .put = put_extra_state,
1521 static const VMStateDescription vmstate_virtio_extra_state = {
1522 .name = "virtio/extra_state",
1523 .version_id = 1,
1524 .minimum_version_id = 1,
1525 .needed = &virtio_extra_state_needed,
1526 .fields = (VMStateField[]) {
1528 .name = "extra_state",
1529 .version_id = 0,
1530 .field_exists = NULL,
1531 .size = 0,
1532 .info = &vmstate_info_extra_state,
1533 .flags = VMS_SINGLE,
1534 .offset = 0,
1536 VMSTATE_END_OF_LIST()
1540 static const VMStateDescription vmstate_virtio_device_endian = {
1541 .name = "virtio/device_endian",
1542 .version_id = 1,
1543 .minimum_version_id = 1,
1544 .needed = &virtio_device_endian_needed,
1545 .fields = (VMStateField[]) {
1546 VMSTATE_UINT8(device_endian, VirtIODevice),
1547 VMSTATE_END_OF_LIST()
1551 static const VMStateDescription vmstate_virtio_64bit_features = {
1552 .name = "virtio/64bit_features",
1553 .version_id = 1,
1554 .minimum_version_id = 1,
1555 .needed = &virtio_64bit_features_needed,
1556 .fields = (VMStateField[]) {
1557 VMSTATE_UINT64(guest_features, VirtIODevice),
1558 VMSTATE_END_OF_LIST()
1562 static const VMStateDescription vmstate_virtio_broken = {
1563 .name = "virtio/broken",
1564 .version_id = 1,
1565 .minimum_version_id = 1,
1566 .needed = &virtio_broken_needed,
1567 .fields = (VMStateField[]) {
1568 VMSTATE_BOOL(broken, VirtIODevice),
1569 VMSTATE_END_OF_LIST()
1573 static const VMStateDescription vmstate_virtio = {
1574 .name = "virtio",
1575 .version_id = 1,
1576 .minimum_version_id = 1,
1577 .minimum_version_id_old = 1,
1578 .fields = (VMStateField[]) {
1579 VMSTATE_END_OF_LIST()
1581 .subsections = (const VMStateDescription*[]) {
1582 &vmstate_virtio_device_endian,
1583 &vmstate_virtio_64bit_features,
1584 &vmstate_virtio_virtqueues,
1585 &vmstate_virtio_ringsize,
1586 &vmstate_virtio_broken,
1587 &vmstate_virtio_extra_state,
1588 NULL
1592 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
1594 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1595 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1596 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1597 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1598 int i;
1600 if (k->save_config) {
1601 k->save_config(qbus->parent, f);
1604 qemu_put_8s(f, &vdev->status);
1605 qemu_put_8s(f, &vdev->isr);
1606 qemu_put_be16s(f, &vdev->queue_sel);
1607 qemu_put_be32s(f, &guest_features_lo);
1608 qemu_put_be32(f, vdev->config_len);
1609 qemu_put_buffer(f, vdev->config, vdev->config_len);
1611 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1612 if (vdev->vq[i].vring.num == 0)
1613 break;
1616 qemu_put_be32(f, i);
1618 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1619 if (vdev->vq[i].vring.num == 0)
1620 break;
1622 qemu_put_be32(f, vdev->vq[i].vring.num);
1623 if (k->has_variable_vring_alignment) {
1624 qemu_put_be32(f, vdev->vq[i].vring.align);
1626 /* XXX virtio-1 devices */
1627 qemu_put_be64(f, vdev->vq[i].vring.desc);
1628 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1629 if (k->save_queue) {
1630 k->save_queue(qbus->parent, i, f);
1634 if (vdc->save != NULL) {
1635 vdc->save(vdev, f);
1638 /* Subsections */
1639 vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1642 /* A wrapper for use as a VMState .put function */
1643 static void virtio_device_put(QEMUFile *f, void *opaque, size_t size)
1645 virtio_save(VIRTIO_DEVICE(opaque), f);
1648 /* A wrapper for use as a VMState .get function */
1649 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size)
1651 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1652 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1654 return virtio_load(vdev, f, dc->vmsd->version_id);
1657 const VMStateInfo virtio_vmstate_info = {
1658 .name = "virtio",
1659 .get = virtio_device_get,
1660 .put = virtio_device_put,
1663 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1665 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1666 bool bad = (val & ~(vdev->host_features)) != 0;
1668 val &= vdev->host_features;
1669 if (k->set_features) {
1670 k->set_features(vdev, val);
1672 vdev->guest_features = val;
1673 return bad ? -1 : 0;
1676 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1679 * The driver must not attempt to set features after feature negotiation
1680 * has finished.
1682 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
1683 return -EINVAL;
1685 return virtio_set_features_nocheck(vdev, val);
1688 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
1690 int i, ret;
1691 int32_t config_len;
1692 uint32_t num;
1693 uint32_t features;
1694 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1695 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1696 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1699 * We poison the endianness to ensure it does not get used before
1700 * subsections have been loaded.
1702 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
1704 if (k->load_config) {
1705 ret = k->load_config(qbus->parent, f);
1706 if (ret)
1707 return ret;
1710 qemu_get_8s(f, &vdev->status);
1711 qemu_get_8s(f, &vdev->isr);
1712 qemu_get_be16s(f, &vdev->queue_sel);
1713 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
1714 return -1;
1716 qemu_get_be32s(f, &features);
1719 * Temporarily set guest_features low bits - needed by
1720 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
1721 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
1723 * Note: devices should always test host features in future - don't create
1724 * new dependencies like this.
1726 vdev->guest_features = features;
1728 config_len = qemu_get_be32(f);
1731 * There are cases where the incoming config can be bigger or smaller
1732 * than what we have; so load what we have space for, and skip
1733 * any excess that's in the stream.
1735 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
1737 while (config_len > vdev->config_len) {
1738 qemu_get_byte(f);
1739 config_len--;
1742 num = qemu_get_be32(f);
1744 if (num > VIRTIO_QUEUE_MAX) {
1745 error_report("Invalid number of virtqueues: 0x%x", num);
1746 return -1;
1749 for (i = 0; i < num; i++) {
1750 vdev->vq[i].vring.num = qemu_get_be32(f);
1751 if (k->has_variable_vring_alignment) {
1752 vdev->vq[i].vring.align = qemu_get_be32(f);
1754 vdev->vq[i].vring.desc = qemu_get_be64(f);
1755 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
1756 vdev->vq[i].signalled_used_valid = false;
1757 vdev->vq[i].notification = true;
1759 if (vdev->vq[i].vring.desc) {
1760 /* XXX virtio-1 devices */
1761 virtio_queue_update_rings(vdev, i);
1762 } else if (vdev->vq[i].last_avail_idx) {
1763 error_report("VQ %d address 0x0 "
1764 "inconsistent with Host index 0x%x",
1765 i, vdev->vq[i].last_avail_idx);
1766 return -1;
1768 if (k->load_queue) {
1769 ret = k->load_queue(qbus->parent, i, f);
1770 if (ret)
1771 return ret;
1775 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1777 if (vdc->load != NULL) {
1778 ret = vdc->load(vdev, f, version_id);
1779 if (ret) {
1780 return ret;
1784 /* Subsections */
1785 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
1786 if (ret) {
1787 return ret;
1790 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
1791 vdev->device_endian = virtio_default_endian();
1794 if (virtio_64bit_features_needed(vdev)) {
1796 * Subsection load filled vdev->guest_features. Run them
1797 * through virtio_set_features to sanity-check them against
1798 * host_features.
1800 uint64_t features64 = vdev->guest_features;
1801 if (virtio_set_features_nocheck(vdev, features64) < 0) {
1802 error_report("Features 0x%" PRIx64 " unsupported. "
1803 "Allowed features: 0x%" PRIx64,
1804 features64, vdev->host_features);
1805 return -1;
1807 } else {
1808 if (virtio_set_features_nocheck(vdev, features) < 0) {
1809 error_report("Features 0x%x unsupported. "
1810 "Allowed features: 0x%" PRIx64,
1811 features, vdev->host_features);
1812 return -1;
1816 for (i = 0; i < num; i++) {
1817 if (vdev->vq[i].vring.desc) {
1818 uint16_t nheads;
1819 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
1820 /* Check it isn't doing strange things with descriptor numbers. */
1821 if (nheads > vdev->vq[i].vring.num) {
1822 error_report("VQ %d size 0x%x Guest index 0x%x "
1823 "inconsistent with Host index 0x%x: delta 0x%x",
1824 i, vdev->vq[i].vring.num,
1825 vring_avail_idx(&vdev->vq[i]),
1826 vdev->vq[i].last_avail_idx, nheads);
1827 return -1;
1829 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
1830 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
1833 * Some devices migrate VirtQueueElements that have been popped
1834 * from the avail ring but not yet returned to the used ring.
1836 vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
1837 vdev->vq[i].used_idx;
1838 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
1839 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
1840 "used_idx 0x%x",
1841 i, vdev->vq[i].vring.num,
1842 vdev->vq[i].last_avail_idx,
1843 vdev->vq[i].used_idx);
1844 return -1;
1849 return 0;
1852 void virtio_cleanup(VirtIODevice *vdev)
1854 qemu_del_vm_change_state_handler(vdev->vmstate);
1855 g_free(vdev->config);
1856 g_free(vdev->vq);
1857 g_free(vdev->vector_queues);
1860 static void virtio_vmstate_change(void *opaque, int running, RunState state)
1862 VirtIODevice *vdev = opaque;
1863 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1864 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1865 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1866 vdev->vm_running = running;
1868 if (backend_run) {
1869 virtio_set_status(vdev, vdev->status);
1872 if (k->vmstate_change) {
1873 k->vmstate_change(qbus->parent, backend_run);
1876 if (!backend_run) {
1877 virtio_set_status(vdev, vdev->status);
1881 void virtio_instance_init_common(Object *proxy_obj, void *data,
1882 size_t vdev_size, const char *vdev_name)
1884 DeviceState *vdev = data;
1886 object_initialize(vdev, vdev_size, vdev_name);
1887 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
1888 object_unref(OBJECT(vdev));
1889 qdev_alias_all_properties(vdev, proxy_obj);
1892 void virtio_init(VirtIODevice *vdev, const char *name,
1893 uint16_t device_id, size_t config_size)
1895 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1896 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1897 int i;
1898 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
1900 if (nvectors) {
1901 vdev->vector_queues =
1902 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
1905 vdev->device_id = device_id;
1906 vdev->status = 0;
1907 vdev->isr = 0;
1908 vdev->queue_sel = 0;
1909 vdev->config_vector = VIRTIO_NO_VECTOR;
1910 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1911 vdev->vm_running = runstate_is_running();
1912 vdev->broken = false;
1913 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1914 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1915 vdev->vq[i].vdev = vdev;
1916 vdev->vq[i].queue_index = i;
1919 vdev->name = name;
1920 vdev->config_len = config_size;
1921 if (vdev->config_len) {
1922 vdev->config = g_malloc0(config_size);
1923 } else {
1924 vdev->config = NULL;
1926 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
1927 vdev);
1928 vdev->device_endian = virtio_default_endian();
1929 vdev->use_guest_notifier_mask = true;
1932 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1934 return vdev->vq[n].vring.desc;
1937 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1939 return vdev->vq[n].vring.avail;
1942 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1944 return vdev->vq[n].vring.used;
1947 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1949 return vdev->vq[n].vring.desc;
1952 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1954 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
1957 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1959 return offsetof(VRingAvail, ring) +
1960 sizeof(uint16_t) * vdev->vq[n].vring.num;
1963 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1965 return offsetof(VRingUsed, ring) +
1966 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
1969 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1971 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
1972 virtio_queue_get_used_size(vdev, n);
1975 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
1977 return vdev->vq[n].last_avail_idx;
1980 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
1982 vdev->vq[n].last_avail_idx = idx;
1983 vdev->vq[n].shadow_avail_idx = idx;
1986 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
1988 vdev->vq[n].signalled_used_valid = false;
1991 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
1993 return vdev->vq + n;
1996 uint16_t virtio_get_queue_index(VirtQueue *vq)
1998 return vq->queue_index;
2001 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2003 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2004 if (event_notifier_test_and_clear(n)) {
2005 virtio_irq(vq);
2009 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2010 bool with_irqfd)
2012 if (assign && !with_irqfd) {
2013 event_notifier_set_handler(&vq->guest_notifier, false,
2014 virtio_queue_guest_notifier_read);
2015 } else {
2016 event_notifier_set_handler(&vq->guest_notifier, false, NULL);
2018 if (!assign) {
2019 /* Test and clear notifier before closing it,
2020 * in case poll callback didn't have time to run. */
2021 virtio_queue_guest_notifier_read(&vq->guest_notifier);
2025 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2027 return &vq->guest_notifier;
2030 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2032 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2033 if (event_notifier_test_and_clear(n)) {
2034 virtio_queue_notify_aio_vq(vq);
2038 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2039 VirtIOHandleOutput handle_output)
2041 if (handle_output) {
2042 vq->handle_aio_output = handle_output;
2043 aio_set_event_notifier(ctx, &vq->host_notifier, true,
2044 virtio_queue_host_notifier_aio_read);
2045 } else {
2046 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
2047 /* Test and clear notifier before after disabling event,
2048 * in case poll callback didn't have time to run. */
2049 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2050 vq->handle_aio_output = NULL;
2054 static void virtio_queue_host_notifier_read(EventNotifier *n)
2056 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2057 if (event_notifier_test_and_clear(n)) {
2058 virtio_queue_notify_vq(vq);
2062 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
2063 bool set_handler)
2065 AioContext *ctx = qemu_get_aio_context();
2066 if (assign && set_handler) {
2067 if (vq->use_aio) {
2068 aio_set_event_notifier(ctx, &vq->host_notifier, true,
2069 virtio_queue_host_notifier_read);
2070 } else {
2071 event_notifier_set_handler(&vq->host_notifier, true,
2072 virtio_queue_host_notifier_read);
2074 } else {
2075 if (vq->use_aio) {
2076 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
2077 } else {
2078 event_notifier_set_handler(&vq->host_notifier, true, NULL);
2081 if (!assign) {
2082 /* Test and clear notifier before after disabling event,
2083 * in case poll callback didn't have time to run. */
2084 virtio_queue_host_notifier_read(&vq->host_notifier);
2088 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2090 return &vq->host_notifier;
2093 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2095 g_free(vdev->bus_name);
2096 vdev->bus_name = g_strdup(bus_name);
2099 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2101 va_list ap;
2103 va_start(ap, fmt);
2104 error_vreport(fmt, ap);
2105 va_end(ap);
2107 vdev->broken = true;
2109 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2110 virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2111 virtio_notify_config(vdev);
2115 static void virtio_device_realize(DeviceState *dev, Error **errp)
2117 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2118 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2119 Error *err = NULL;
2121 if (vdc->realize != NULL) {
2122 vdc->realize(dev, &err);
2123 if (err != NULL) {
2124 error_propagate(errp, err);
2125 return;
2129 virtio_bus_device_plugged(vdev, &err);
2130 if (err != NULL) {
2131 error_propagate(errp, err);
2132 return;
2136 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2138 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2139 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2140 Error *err = NULL;
2142 virtio_bus_device_unplugged(vdev);
2144 if (vdc->unrealize != NULL) {
2145 vdc->unrealize(dev, &err);
2146 if (err != NULL) {
2147 error_propagate(errp, err);
2148 return;
2152 g_free(vdev->bus_name);
2153 vdev->bus_name = NULL;
2156 static Property virtio_properties[] = {
2157 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2158 DEFINE_PROP_END_OF_LIST(),
2161 static void virtio_device_class_init(ObjectClass *klass, void *data)
2163 /* Set the default value here. */
2164 DeviceClass *dc = DEVICE_CLASS(klass);
2166 dc->realize = virtio_device_realize;
2167 dc->unrealize = virtio_device_unrealize;
2168 dc->bus_type = TYPE_VIRTIO_BUS;
2169 dc->props = virtio_properties;
2172 static const TypeInfo virtio_device_info = {
2173 .name = TYPE_VIRTIO_DEVICE,
2174 .parent = TYPE_DEVICE,
2175 .instance_size = sizeof(VirtIODevice),
2176 .class_init = virtio_device_class_init,
2177 .abstract = true,
2178 .class_size = sizeof(VirtioDeviceClass),
2181 static void virtio_register_types(void)
2183 type_register_static(&virtio_device_info);
2186 type_init(virtio_register_types)