virtio: access ISR atomically
[qemu.git] / hw / virtio / virtio.c
blob138a414cbe8618e4b77b040c2d97c6cbd4c970de
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "cpu.h"
18 #include "trace.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "migration/migration.h"
25 #include "hw/virtio/virtio-access.h"
28 * The alignment to use between consumer and producer parts of vring.
29 * x86 pagesize again. This is the default, used by transports like PCI
30 * which don't provide a means for the guest to tell the host the alignment.
32 #define VIRTIO_PCI_VRING_ALIGN 4096
34 typedef struct VRingDesc
36 uint64_t addr;
37 uint32_t len;
38 uint16_t flags;
39 uint16_t next;
40 } VRingDesc;
42 typedef struct VRingAvail
44 uint16_t flags;
45 uint16_t idx;
46 uint16_t ring[0];
47 } VRingAvail;
49 typedef struct VRingUsedElem
51 uint32_t id;
52 uint32_t len;
53 } VRingUsedElem;
55 typedef struct VRingUsed
57 uint16_t flags;
58 uint16_t idx;
59 VRingUsedElem ring[0];
60 } VRingUsed;
62 typedef struct VRing
64 unsigned int num;
65 unsigned int num_default;
66 unsigned int align;
67 hwaddr desc;
68 hwaddr avail;
69 hwaddr used;
70 } VRing;
72 struct VirtQueue
74 VRing vring;
76 /* Next head to pop */
77 uint16_t last_avail_idx;
79 /* Last avail_idx read from VQ. */
80 uint16_t shadow_avail_idx;
82 uint16_t used_idx;
84 /* Last used index value we have signalled on */
85 uint16_t signalled_used;
87 /* Last used index value we have signalled on */
88 bool signalled_used_valid;
90 /* Notification enabled? */
91 bool notification;
93 uint16_t queue_index;
95 int inuse;
97 uint16_t vector;
98 VirtIOHandleOutput handle_output;
99 VirtIOHandleOutput handle_aio_output;
100 VirtIODevice *vdev;
101 EventNotifier guest_notifier;
102 EventNotifier host_notifier;
103 QLIST_ENTRY(VirtQueue) node;
106 /* virt queue functions */
107 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
109 VRing *vring = &vdev->vq[n].vring;
111 if (!vring->desc) {
112 /* not yet setup -> nothing to do */
113 return;
115 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
116 vring->used = vring_align(vring->avail +
117 offsetof(VRingAvail, ring[vring->num]),
118 vring->align);
121 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
122 hwaddr desc_pa, int i)
124 address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
125 MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
126 virtio_tswap64s(vdev, &desc->addr);
127 virtio_tswap32s(vdev, &desc->len);
128 virtio_tswap16s(vdev, &desc->flags);
129 virtio_tswap16s(vdev, &desc->next);
132 static inline uint16_t vring_avail_flags(VirtQueue *vq)
134 hwaddr pa;
135 pa = vq->vring.avail + offsetof(VRingAvail, flags);
136 return virtio_lduw_phys(vq->vdev, pa);
139 static inline uint16_t vring_avail_idx(VirtQueue *vq)
141 hwaddr pa;
142 pa = vq->vring.avail + offsetof(VRingAvail, idx);
143 vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
144 return vq->shadow_avail_idx;
147 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
149 hwaddr pa;
150 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
151 return virtio_lduw_phys(vq->vdev, pa);
154 static inline uint16_t vring_get_used_event(VirtQueue *vq)
156 return vring_avail_ring(vq, vq->vring.num);
159 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
160 int i)
162 hwaddr pa;
163 virtio_tswap32s(vq->vdev, &uelem->id);
164 virtio_tswap32s(vq->vdev, &uelem->len);
165 pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
166 address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
167 (void *)uelem, sizeof(VRingUsedElem));
170 static uint16_t vring_used_idx(VirtQueue *vq)
172 hwaddr pa;
173 pa = vq->vring.used + offsetof(VRingUsed, idx);
174 return virtio_lduw_phys(vq->vdev, pa);
177 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
179 hwaddr pa;
180 pa = vq->vring.used + offsetof(VRingUsed, idx);
181 virtio_stw_phys(vq->vdev, pa, val);
182 vq->used_idx = val;
185 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
187 VirtIODevice *vdev = vq->vdev;
188 hwaddr pa;
189 pa = vq->vring.used + offsetof(VRingUsed, flags);
190 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
193 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
195 VirtIODevice *vdev = vq->vdev;
196 hwaddr pa;
197 pa = vq->vring.used + offsetof(VRingUsed, flags);
198 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
201 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
203 hwaddr pa;
204 if (!vq->notification) {
205 return;
207 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
208 virtio_stw_phys(vq->vdev, pa, val);
211 void virtio_queue_set_notification(VirtQueue *vq, int enable)
213 vq->notification = enable;
214 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
215 vring_set_avail_event(vq, vring_avail_idx(vq));
216 } else if (enable) {
217 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
218 } else {
219 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
221 if (enable) {
222 /* Expose avail event/used flags before caller checks the avail idx. */
223 smp_mb();
227 int virtio_queue_ready(VirtQueue *vq)
229 return vq->vring.avail != 0;
232 /* Fetch avail_idx from VQ memory only when we really need to know if
233 * guest has added some buffers. */
234 int virtio_queue_empty(VirtQueue *vq)
236 if (vq->shadow_avail_idx != vq->last_avail_idx) {
237 return 0;
240 return vring_avail_idx(vq) == vq->last_avail_idx;
243 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
244 unsigned int len)
246 unsigned int offset;
247 int i;
249 offset = 0;
250 for (i = 0; i < elem->in_num; i++) {
251 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
253 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
254 elem->in_sg[i].iov_len,
255 1, size);
257 offset += size;
260 for (i = 0; i < elem->out_num; i++)
261 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
262 elem->out_sg[i].iov_len,
263 0, elem->out_sg[i].iov_len);
266 /* virtqueue_detach_element:
267 * @vq: The #VirtQueue
268 * @elem: The #VirtQueueElement
269 * @len: number of bytes written
271 * Detach the element from the virtqueue. This function is suitable for device
272 * reset or other situations where a #VirtQueueElement is simply freed and will
273 * not be pushed or discarded.
275 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
276 unsigned int len)
278 vq->inuse--;
279 virtqueue_unmap_sg(vq, elem, len);
282 /* virtqueue_unpop:
283 * @vq: The #VirtQueue
284 * @elem: The #VirtQueueElement
285 * @len: number of bytes written
287 * Pretend the most recent element wasn't popped from the virtqueue. The next
288 * call to virtqueue_pop() will refetch the element.
290 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
291 unsigned int len)
293 vq->last_avail_idx--;
294 virtqueue_detach_element(vq, elem, len);
297 /* virtqueue_rewind:
298 * @vq: The #VirtQueue
299 * @num: Number of elements to push back
301 * Pretend that elements weren't popped from the virtqueue. The next
302 * virtqueue_pop() will refetch the oldest element.
304 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
306 * Returns: true on success, false if @num is greater than the number of in use
307 * elements.
309 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
311 if (num > vq->inuse) {
312 return false;
314 vq->last_avail_idx -= num;
315 vq->inuse -= num;
316 return true;
319 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
320 unsigned int len, unsigned int idx)
322 VRingUsedElem uelem;
324 trace_virtqueue_fill(vq, elem, len, idx);
326 virtqueue_unmap_sg(vq, elem, len);
328 if (unlikely(vq->vdev->broken)) {
329 return;
332 idx = (idx + vq->used_idx) % vq->vring.num;
334 uelem.id = elem->index;
335 uelem.len = len;
336 vring_used_write(vq, &uelem, idx);
339 void virtqueue_flush(VirtQueue *vq, unsigned int count)
341 uint16_t old, new;
343 if (unlikely(vq->vdev->broken)) {
344 vq->inuse -= count;
345 return;
348 /* Make sure buffer is written before we update index. */
349 smp_wmb();
350 trace_virtqueue_flush(vq, count);
351 old = vq->used_idx;
352 new = old + count;
353 vring_used_idx_set(vq, new);
354 vq->inuse -= count;
355 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
356 vq->signalled_used_valid = false;
359 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
360 unsigned int len)
362 virtqueue_fill(vq, elem, len, 0);
363 virtqueue_flush(vq, 1);
366 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
368 uint16_t num_heads = vring_avail_idx(vq) - idx;
370 /* Check it isn't doing very strange things with descriptor numbers. */
371 if (num_heads > vq->vring.num) {
372 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
373 idx, vq->shadow_avail_idx);
374 return -EINVAL;
376 /* On success, callers read a descriptor at vq->last_avail_idx.
377 * Make sure descriptor read does not bypass avail index read. */
378 if (num_heads) {
379 smp_rmb();
382 return num_heads;
385 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
386 unsigned int *head)
388 /* Grab the next descriptor number they're advertising, and increment
389 * the index we've seen. */
390 *head = vring_avail_ring(vq, idx % vq->vring.num);
392 /* If their number is silly, that's a fatal mistake. */
393 if (*head >= vq->vring.num) {
394 virtio_error(vq->vdev, "Guest says index %u is available", *head);
395 return false;
398 return true;
401 enum {
402 VIRTQUEUE_READ_DESC_ERROR = -1,
403 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
404 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
407 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
408 hwaddr desc_pa, unsigned int max,
409 unsigned int *next)
411 /* If this descriptor says it doesn't chain, we're done. */
412 if (!(desc->flags & VRING_DESC_F_NEXT)) {
413 return VIRTQUEUE_READ_DESC_DONE;
416 /* Check they're not leading us off end of descriptors. */
417 *next = desc->next;
418 /* Make sure compiler knows to grab that: we don't want it changing! */
419 smp_wmb();
421 if (*next >= max) {
422 virtio_error(vdev, "Desc next is %u", *next);
423 return VIRTQUEUE_READ_DESC_ERROR;
426 vring_desc_read(vdev, desc, desc_pa, *next);
427 return VIRTQUEUE_READ_DESC_MORE;
430 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
431 unsigned int *out_bytes,
432 unsigned max_in_bytes, unsigned max_out_bytes)
434 unsigned int idx;
435 unsigned int total_bufs, in_total, out_total;
436 int rc;
438 idx = vq->last_avail_idx;
440 total_bufs = in_total = out_total = 0;
441 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
442 VirtIODevice *vdev = vq->vdev;
443 unsigned int max, num_bufs, indirect = 0;
444 VRingDesc desc;
445 hwaddr desc_pa;
446 unsigned int i;
448 max = vq->vring.num;
449 num_bufs = total_bufs;
451 if (!virtqueue_get_head(vq, idx++, &i)) {
452 goto err;
455 desc_pa = vq->vring.desc;
456 vring_desc_read(vdev, &desc, desc_pa, i);
458 if (desc.flags & VRING_DESC_F_INDIRECT) {
459 if (desc.len % sizeof(VRingDesc)) {
460 virtio_error(vdev, "Invalid size for indirect buffer table");
461 goto err;
464 /* If we've got too many, that implies a descriptor loop. */
465 if (num_bufs >= max) {
466 virtio_error(vdev, "Looped descriptor");
467 goto err;
470 /* loop over the indirect descriptor table */
471 indirect = 1;
472 max = desc.len / sizeof(VRingDesc);
473 desc_pa = desc.addr;
474 num_bufs = i = 0;
475 vring_desc_read(vdev, &desc, desc_pa, i);
478 do {
479 /* If we've got too many, that implies a descriptor loop. */
480 if (++num_bufs > max) {
481 virtio_error(vdev, "Looped descriptor");
482 goto err;
485 if (desc.flags & VRING_DESC_F_WRITE) {
486 in_total += desc.len;
487 } else {
488 out_total += desc.len;
490 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
491 goto done;
494 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
495 } while (rc == VIRTQUEUE_READ_DESC_MORE);
497 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
498 goto err;
501 if (!indirect)
502 total_bufs = num_bufs;
503 else
504 total_bufs++;
507 if (rc < 0) {
508 goto err;
511 done:
512 if (in_bytes) {
513 *in_bytes = in_total;
515 if (out_bytes) {
516 *out_bytes = out_total;
518 return;
520 err:
521 in_total = out_total = 0;
522 goto done;
525 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
526 unsigned int out_bytes)
528 unsigned int in_total, out_total;
530 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
531 return in_bytes <= in_total && out_bytes <= out_total;
534 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
535 hwaddr *addr, struct iovec *iov,
536 unsigned int max_num_sg, bool is_write,
537 hwaddr pa, size_t sz)
539 bool ok = false;
540 unsigned num_sg = *p_num_sg;
541 assert(num_sg <= max_num_sg);
543 if (!sz) {
544 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
545 goto out;
548 while (sz) {
549 hwaddr len = sz;
551 if (num_sg == max_num_sg) {
552 virtio_error(vdev, "virtio: too many write descriptors in "
553 "indirect table");
554 goto out;
557 iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
558 if (!iov[num_sg].iov_base) {
559 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
560 goto out;
563 iov[num_sg].iov_len = len;
564 addr[num_sg] = pa;
566 sz -= len;
567 pa += len;
568 num_sg++;
570 ok = true;
572 out:
573 *p_num_sg = num_sg;
574 return ok;
577 /* Only used by error code paths before we have a VirtQueueElement (therefore
578 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
579 * yet.
581 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
582 struct iovec *iov)
584 unsigned int i;
586 for (i = 0; i < out_num + in_num; i++) {
587 int is_write = i >= out_num;
589 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
590 iov++;
594 static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
595 unsigned int *num_sg, unsigned int max_size,
596 int is_write)
598 unsigned int i;
599 hwaddr len;
601 /* Note: this function MUST validate input, some callers
602 * are passing in num_sg values received over the network.
604 /* TODO: teach all callers that this can fail, and return failure instead
605 * of asserting here.
606 * When we do, we might be able to re-enable NDEBUG below.
608 #ifdef NDEBUG
609 #error building with NDEBUG is not supported
610 #endif
611 assert(*num_sg <= max_size);
613 for (i = 0; i < *num_sg; i++) {
614 len = sg[i].iov_len;
615 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
616 if (!sg[i].iov_base) {
617 error_report("virtio: error trying to map MMIO memory");
618 exit(1);
620 if (len != sg[i].iov_len) {
621 error_report("virtio: unexpected memory split");
622 exit(1);
627 void virtqueue_map(VirtQueueElement *elem)
629 virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
630 VIRTQUEUE_MAX_SIZE, 1);
631 virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
632 VIRTQUEUE_MAX_SIZE, 0);
635 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
637 VirtQueueElement *elem;
638 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
639 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
640 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
641 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
642 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
643 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
645 assert(sz >= sizeof(VirtQueueElement));
646 elem = g_malloc(out_sg_end);
647 elem->out_num = out_num;
648 elem->in_num = in_num;
649 elem->in_addr = (void *)elem + in_addr_ofs;
650 elem->out_addr = (void *)elem + out_addr_ofs;
651 elem->in_sg = (void *)elem + in_sg_ofs;
652 elem->out_sg = (void *)elem + out_sg_ofs;
653 return elem;
656 void *virtqueue_pop(VirtQueue *vq, size_t sz)
658 unsigned int i, head, max;
659 hwaddr desc_pa = vq->vring.desc;
660 VirtIODevice *vdev = vq->vdev;
661 VirtQueueElement *elem;
662 unsigned out_num, in_num;
663 hwaddr addr[VIRTQUEUE_MAX_SIZE];
664 struct iovec iov[VIRTQUEUE_MAX_SIZE];
665 VRingDesc desc;
666 int rc;
668 if (unlikely(vdev->broken)) {
669 return NULL;
671 if (virtio_queue_empty(vq)) {
672 return NULL;
674 /* Needed after virtio_queue_empty(), see comment in
675 * virtqueue_num_heads(). */
676 smp_rmb();
678 /* When we start there are none of either input nor output. */
679 out_num = in_num = 0;
681 max = vq->vring.num;
683 if (vq->inuse >= vq->vring.num) {
684 virtio_error(vdev, "Virtqueue size exceeded");
685 return NULL;
688 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
689 return NULL;
692 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
693 vring_set_avail_event(vq, vq->last_avail_idx);
696 i = head;
697 vring_desc_read(vdev, &desc, desc_pa, i);
698 if (desc.flags & VRING_DESC_F_INDIRECT) {
699 if (desc.len % sizeof(VRingDesc)) {
700 virtio_error(vdev, "Invalid size for indirect buffer table");
701 return NULL;
704 /* loop over the indirect descriptor table */
705 max = desc.len / sizeof(VRingDesc);
706 desc_pa = desc.addr;
707 i = 0;
708 vring_desc_read(vdev, &desc, desc_pa, i);
711 /* Collect all the descriptors */
712 do {
713 bool map_ok;
715 if (desc.flags & VRING_DESC_F_WRITE) {
716 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
717 iov + out_num,
718 VIRTQUEUE_MAX_SIZE - out_num, true,
719 desc.addr, desc.len);
720 } else {
721 if (in_num) {
722 virtio_error(vdev, "Incorrect order for descriptors");
723 goto err_undo_map;
725 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
726 VIRTQUEUE_MAX_SIZE, false,
727 desc.addr, desc.len);
729 if (!map_ok) {
730 goto err_undo_map;
733 /* If we've got too many, that implies a descriptor loop. */
734 if ((in_num + out_num) > max) {
735 virtio_error(vdev, "Looped descriptor");
736 goto err_undo_map;
739 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
740 } while (rc == VIRTQUEUE_READ_DESC_MORE);
742 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
743 goto err_undo_map;
746 /* Now copy what we have collected and mapped */
747 elem = virtqueue_alloc_element(sz, out_num, in_num);
748 elem->index = head;
749 for (i = 0; i < out_num; i++) {
750 elem->out_addr[i] = addr[i];
751 elem->out_sg[i] = iov[i];
753 for (i = 0; i < in_num; i++) {
754 elem->in_addr[i] = addr[out_num + i];
755 elem->in_sg[i] = iov[out_num + i];
758 vq->inuse++;
760 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
761 return elem;
763 err_undo_map:
764 virtqueue_undo_map_desc(out_num, in_num, iov);
765 return NULL;
768 /* Reading and writing a structure directly to QEMUFile is *awful*, but
769 * it is what QEMU has always done by mistake. We can change it sooner
770 * or later by bumping the version number of the affected vm states.
771 * In the meanwhile, since the in-memory layout of VirtQueueElement
772 * has changed, we need to marshal to and from the layout that was
773 * used before the change.
775 typedef struct VirtQueueElementOld {
776 unsigned int index;
777 unsigned int out_num;
778 unsigned int in_num;
779 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
780 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
781 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
782 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
783 } VirtQueueElementOld;
785 void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
787 VirtQueueElement *elem;
788 VirtQueueElementOld data;
789 int i;
791 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
793 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
794 elem->index = data.index;
796 for (i = 0; i < elem->in_num; i++) {
797 elem->in_addr[i] = data.in_addr[i];
800 for (i = 0; i < elem->out_num; i++) {
801 elem->out_addr[i] = data.out_addr[i];
804 for (i = 0; i < elem->in_num; i++) {
805 /* Base is overwritten by virtqueue_map. */
806 elem->in_sg[i].iov_base = 0;
807 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
810 for (i = 0; i < elem->out_num; i++) {
811 /* Base is overwritten by virtqueue_map. */
812 elem->out_sg[i].iov_base = 0;
813 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
816 virtqueue_map(elem);
817 return elem;
820 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
822 VirtQueueElementOld data;
823 int i;
825 memset(&data, 0, sizeof(data));
826 data.index = elem->index;
827 data.in_num = elem->in_num;
828 data.out_num = elem->out_num;
830 for (i = 0; i < elem->in_num; i++) {
831 data.in_addr[i] = elem->in_addr[i];
834 for (i = 0; i < elem->out_num; i++) {
835 data.out_addr[i] = elem->out_addr[i];
838 for (i = 0; i < elem->in_num; i++) {
839 /* Base is overwritten by virtqueue_map when loading. Do not
840 * save it, as it would leak the QEMU address space layout. */
841 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
844 for (i = 0; i < elem->out_num; i++) {
845 /* Do not save iov_base as above. */
846 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
848 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
851 /* virtio device */
852 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
854 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
855 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
857 if (unlikely(vdev->broken)) {
858 return;
861 if (k->notify) {
862 k->notify(qbus->parent, vector);
866 void virtio_update_irq(VirtIODevice *vdev)
868 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
871 static int virtio_validate_features(VirtIODevice *vdev)
873 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
875 if (k->validate_features) {
876 return k->validate_features(vdev);
877 } else {
878 return 0;
882 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
884 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
885 trace_virtio_set_status(vdev, val);
887 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
888 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
889 val & VIRTIO_CONFIG_S_FEATURES_OK) {
890 int ret = virtio_validate_features(vdev);
892 if (ret) {
893 return ret;
897 if (k->set_status) {
898 k->set_status(vdev, val);
900 vdev->status = val;
901 return 0;
904 bool target_words_bigendian(void);
905 static enum virtio_device_endian virtio_default_endian(void)
907 if (target_words_bigendian()) {
908 return VIRTIO_DEVICE_ENDIAN_BIG;
909 } else {
910 return VIRTIO_DEVICE_ENDIAN_LITTLE;
914 static enum virtio_device_endian virtio_current_cpu_endian(void)
916 CPUClass *cc = CPU_GET_CLASS(current_cpu);
918 if (cc->virtio_is_big_endian(current_cpu)) {
919 return VIRTIO_DEVICE_ENDIAN_BIG;
920 } else {
921 return VIRTIO_DEVICE_ENDIAN_LITTLE;
925 void virtio_reset(void *opaque)
927 VirtIODevice *vdev = opaque;
928 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
929 int i;
931 virtio_set_status(vdev, 0);
932 if (current_cpu) {
933 /* Guest initiated reset */
934 vdev->device_endian = virtio_current_cpu_endian();
935 } else {
936 /* System reset */
937 vdev->device_endian = virtio_default_endian();
940 if (k->reset) {
941 k->reset(vdev);
944 vdev->broken = false;
945 vdev->guest_features = 0;
946 vdev->queue_sel = 0;
947 vdev->status = 0;
948 atomic_set(&vdev->isr, 0);
949 vdev->config_vector = VIRTIO_NO_VECTOR;
950 virtio_notify_vector(vdev, vdev->config_vector);
952 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
953 vdev->vq[i].vring.desc = 0;
954 vdev->vq[i].vring.avail = 0;
955 vdev->vq[i].vring.used = 0;
956 vdev->vq[i].last_avail_idx = 0;
957 vdev->vq[i].shadow_avail_idx = 0;
958 vdev->vq[i].used_idx = 0;
959 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
960 vdev->vq[i].signalled_used = 0;
961 vdev->vq[i].signalled_used_valid = false;
962 vdev->vq[i].notification = true;
963 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
964 vdev->vq[i].inuse = 0;
968 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
970 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
971 uint8_t val;
973 if (addr + sizeof(val) > vdev->config_len) {
974 return (uint32_t)-1;
977 k->get_config(vdev, vdev->config);
979 val = ldub_p(vdev->config + addr);
980 return val;
983 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
985 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
986 uint16_t val;
988 if (addr + sizeof(val) > vdev->config_len) {
989 return (uint32_t)-1;
992 k->get_config(vdev, vdev->config);
994 val = lduw_p(vdev->config + addr);
995 return val;
998 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1000 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1001 uint32_t val;
1003 if (addr + sizeof(val) > vdev->config_len) {
1004 return (uint32_t)-1;
1007 k->get_config(vdev, vdev->config);
1009 val = ldl_p(vdev->config + addr);
1010 return val;
1013 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1015 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1016 uint8_t val = data;
1018 if (addr + sizeof(val) > vdev->config_len) {
1019 return;
1022 stb_p(vdev->config + addr, val);
1024 if (k->set_config) {
1025 k->set_config(vdev, vdev->config);
1029 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1031 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1032 uint16_t val = data;
1034 if (addr + sizeof(val) > vdev->config_len) {
1035 return;
1038 stw_p(vdev->config + addr, val);
1040 if (k->set_config) {
1041 k->set_config(vdev, vdev->config);
1045 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1047 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1048 uint32_t val = data;
1050 if (addr + sizeof(val) > vdev->config_len) {
1051 return;
1054 stl_p(vdev->config + addr, val);
1056 if (k->set_config) {
1057 k->set_config(vdev, vdev->config);
1061 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1063 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1064 uint8_t val;
1066 if (addr + sizeof(val) > vdev->config_len) {
1067 return (uint32_t)-1;
1070 k->get_config(vdev, vdev->config);
1072 val = ldub_p(vdev->config + addr);
1073 return val;
1076 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1078 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1079 uint16_t val;
1081 if (addr + sizeof(val) > vdev->config_len) {
1082 return (uint32_t)-1;
1085 k->get_config(vdev, vdev->config);
1087 val = lduw_le_p(vdev->config + addr);
1088 return val;
1091 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1093 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1094 uint32_t val;
1096 if (addr + sizeof(val) > vdev->config_len) {
1097 return (uint32_t)-1;
1100 k->get_config(vdev, vdev->config);
1102 val = ldl_le_p(vdev->config + addr);
1103 return val;
1106 void virtio_config_modern_writeb(VirtIODevice *vdev,
1107 uint32_t addr, uint32_t data)
1109 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1110 uint8_t val = data;
1112 if (addr + sizeof(val) > vdev->config_len) {
1113 return;
1116 stb_p(vdev->config + addr, val);
1118 if (k->set_config) {
1119 k->set_config(vdev, vdev->config);
1123 void virtio_config_modern_writew(VirtIODevice *vdev,
1124 uint32_t addr, uint32_t data)
1126 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1127 uint16_t val = data;
1129 if (addr + sizeof(val) > vdev->config_len) {
1130 return;
1133 stw_le_p(vdev->config + addr, val);
1135 if (k->set_config) {
1136 k->set_config(vdev, vdev->config);
1140 void virtio_config_modern_writel(VirtIODevice *vdev,
1141 uint32_t addr, uint32_t data)
1143 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1144 uint32_t val = data;
1146 if (addr + sizeof(val) > vdev->config_len) {
1147 return;
1150 stl_le_p(vdev->config + addr, val);
1152 if (k->set_config) {
1153 k->set_config(vdev, vdev->config);
1157 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1159 vdev->vq[n].vring.desc = addr;
1160 virtio_queue_update_rings(vdev, n);
1163 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1165 return vdev->vq[n].vring.desc;
1168 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1169 hwaddr avail, hwaddr used)
1171 vdev->vq[n].vring.desc = desc;
1172 vdev->vq[n].vring.avail = avail;
1173 vdev->vq[n].vring.used = used;
1176 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1178 /* Don't allow guest to flip queue between existent and
1179 * nonexistent states, or to set it to an invalid size.
1181 if (!!num != !!vdev->vq[n].vring.num ||
1182 num > VIRTQUEUE_MAX_SIZE ||
1183 num < 0) {
1184 return;
1186 vdev->vq[n].vring.num = num;
1189 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1191 return QLIST_FIRST(&vdev->vector_queues[vector]);
1194 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1196 return QLIST_NEXT(vq, node);
1199 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1201 return vdev->vq[n].vring.num;
1204 int virtio_get_num_queues(VirtIODevice *vdev)
1206 int i;
1208 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1209 if (!virtio_queue_get_num(vdev, i)) {
1210 break;
1214 return i;
1217 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1219 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1220 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1222 /* virtio-1 compliant devices cannot change the alignment */
1223 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1224 error_report("tried to modify queue alignment for virtio-1 device");
1225 return;
1227 /* Check that the transport told us it was going to do this
1228 * (so a buggy transport will immediately assert rather than
1229 * silently failing to migrate this state)
1231 assert(k->has_variable_vring_alignment);
1233 vdev->vq[n].vring.align = align;
1234 virtio_queue_update_rings(vdev, n);
1237 static void virtio_queue_notify_aio_vq(VirtQueue *vq)
1239 if (vq->vring.desc && vq->handle_aio_output) {
1240 VirtIODevice *vdev = vq->vdev;
1242 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1243 vq->handle_aio_output(vdev, vq);
1247 static void virtio_queue_notify_vq(VirtQueue *vq)
1249 if (vq->vring.desc && vq->handle_output) {
1250 VirtIODevice *vdev = vq->vdev;
1252 if (unlikely(vdev->broken)) {
1253 return;
1256 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1257 vq->handle_output(vdev, vq);
1261 void virtio_queue_notify(VirtIODevice *vdev, int n)
1263 virtio_queue_notify_vq(&vdev->vq[n]);
1266 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1268 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1269 VIRTIO_NO_VECTOR;
1272 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1274 VirtQueue *vq = &vdev->vq[n];
1276 if (n < VIRTIO_QUEUE_MAX) {
1277 if (vdev->vector_queues &&
1278 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1279 QLIST_REMOVE(vq, node);
1281 vdev->vq[n].vector = vector;
1282 if (vdev->vector_queues &&
1283 vector != VIRTIO_NO_VECTOR) {
1284 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1289 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1290 VirtIOHandleOutput handle_output)
1292 int i;
1294 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1295 if (vdev->vq[i].vring.num == 0)
1296 break;
1299 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1300 abort();
1302 vdev->vq[i].vring.num = queue_size;
1303 vdev->vq[i].vring.num_default = queue_size;
1304 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1305 vdev->vq[i].handle_output = handle_output;
1306 vdev->vq[i].handle_aio_output = NULL;
1308 return &vdev->vq[i];
1311 void virtio_del_queue(VirtIODevice *vdev, int n)
1313 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1314 abort();
1317 vdev->vq[n].vring.num = 0;
1318 vdev->vq[n].vring.num_default = 0;
1321 static void virtio_set_isr(VirtIODevice *vdev, int value)
1323 uint8_t old = atomic_read(&vdev->isr);
1325 /* Do not write ISR if it does not change, so that its cacheline remains
1326 * shared in the common case where the guest does not read it.
1328 if ((old & value) != value) {
1329 atomic_or(&vdev->isr, value);
1333 void virtio_irq(VirtQueue *vq)
1335 trace_virtio_irq(vq);
1336 virtio_set_isr(vq->vdev, 0x1);
1337 virtio_notify_vector(vq->vdev, vq->vector);
1340 bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1342 uint16_t old, new;
1343 bool v;
1344 /* We need to expose used array entries before checking used event. */
1345 smp_mb();
1346 /* Always notify when queue is empty (when feature acknowledge) */
1347 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1348 !vq->inuse && virtio_queue_empty(vq)) {
1349 return true;
1352 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1353 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1356 v = vq->signalled_used_valid;
1357 vq->signalled_used_valid = true;
1358 old = vq->signalled_used;
1359 new = vq->signalled_used = vq->used_idx;
1360 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1363 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1365 if (!virtio_should_notify(vdev, vq)) {
1366 return;
1369 trace_virtio_notify(vdev, vq);
1370 virtio_set_isr(vq->vdev, 0x1);
1371 virtio_notify_vector(vdev, vq->vector);
1374 void virtio_notify_config(VirtIODevice *vdev)
1376 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1377 return;
1379 virtio_set_isr(vdev, 0x3);
1380 vdev->generation++;
1381 virtio_notify_vector(vdev, vdev->config_vector);
1384 static bool virtio_device_endian_needed(void *opaque)
1386 VirtIODevice *vdev = opaque;
1388 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1389 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1390 return vdev->device_endian != virtio_default_endian();
1392 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1393 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1396 static bool virtio_64bit_features_needed(void *opaque)
1398 VirtIODevice *vdev = opaque;
1400 return (vdev->host_features >> 32) != 0;
1403 static bool virtio_virtqueue_needed(void *opaque)
1405 VirtIODevice *vdev = opaque;
1407 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1410 static bool virtio_ringsize_needed(void *opaque)
1412 VirtIODevice *vdev = opaque;
1413 int i;
1415 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1416 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1417 return true;
1420 return false;
1423 static bool virtio_extra_state_needed(void *opaque)
1425 VirtIODevice *vdev = opaque;
1426 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1427 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1429 return k->has_extra_state &&
1430 k->has_extra_state(qbus->parent);
1433 static bool virtio_broken_needed(void *opaque)
1435 VirtIODevice *vdev = opaque;
1437 return vdev->broken;
1440 static const VMStateDescription vmstate_virtqueue = {
1441 .name = "virtqueue_state",
1442 .version_id = 1,
1443 .minimum_version_id = 1,
1444 .fields = (VMStateField[]) {
1445 VMSTATE_UINT64(vring.avail, struct VirtQueue),
1446 VMSTATE_UINT64(vring.used, struct VirtQueue),
1447 VMSTATE_END_OF_LIST()
1451 static const VMStateDescription vmstate_virtio_virtqueues = {
1452 .name = "virtio/virtqueues",
1453 .version_id = 1,
1454 .minimum_version_id = 1,
1455 .needed = &virtio_virtqueue_needed,
1456 .fields = (VMStateField[]) {
1457 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1458 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1459 VMSTATE_END_OF_LIST()
1463 static const VMStateDescription vmstate_ringsize = {
1464 .name = "ringsize_state",
1465 .version_id = 1,
1466 .minimum_version_id = 1,
1467 .fields = (VMStateField[]) {
1468 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1469 VMSTATE_END_OF_LIST()
1473 static const VMStateDescription vmstate_virtio_ringsize = {
1474 .name = "virtio/ringsize",
1475 .version_id = 1,
1476 .minimum_version_id = 1,
1477 .needed = &virtio_ringsize_needed,
1478 .fields = (VMStateField[]) {
1479 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1480 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1481 VMSTATE_END_OF_LIST()
1485 static int get_extra_state(QEMUFile *f, void *pv, size_t size)
1487 VirtIODevice *vdev = pv;
1488 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1489 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1491 if (!k->load_extra_state) {
1492 return -1;
1493 } else {
1494 return k->load_extra_state(qbus->parent, f);
1498 static void put_extra_state(QEMUFile *f, void *pv, size_t size)
1500 VirtIODevice *vdev = pv;
1501 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1502 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1504 k->save_extra_state(qbus->parent, f);
1507 static const VMStateInfo vmstate_info_extra_state = {
1508 .name = "virtqueue_extra_state",
1509 .get = get_extra_state,
1510 .put = put_extra_state,
1513 static const VMStateDescription vmstate_virtio_extra_state = {
1514 .name = "virtio/extra_state",
1515 .version_id = 1,
1516 .minimum_version_id = 1,
1517 .needed = &virtio_extra_state_needed,
1518 .fields = (VMStateField[]) {
1520 .name = "extra_state",
1521 .version_id = 0,
1522 .field_exists = NULL,
1523 .size = 0,
1524 .info = &vmstate_info_extra_state,
1525 .flags = VMS_SINGLE,
1526 .offset = 0,
1528 VMSTATE_END_OF_LIST()
1532 static const VMStateDescription vmstate_virtio_device_endian = {
1533 .name = "virtio/device_endian",
1534 .version_id = 1,
1535 .minimum_version_id = 1,
1536 .needed = &virtio_device_endian_needed,
1537 .fields = (VMStateField[]) {
1538 VMSTATE_UINT8(device_endian, VirtIODevice),
1539 VMSTATE_END_OF_LIST()
1543 static const VMStateDescription vmstate_virtio_64bit_features = {
1544 .name = "virtio/64bit_features",
1545 .version_id = 1,
1546 .minimum_version_id = 1,
1547 .needed = &virtio_64bit_features_needed,
1548 .fields = (VMStateField[]) {
1549 VMSTATE_UINT64(guest_features, VirtIODevice),
1550 VMSTATE_END_OF_LIST()
1554 static const VMStateDescription vmstate_virtio_broken = {
1555 .name = "virtio/broken",
1556 .version_id = 1,
1557 .minimum_version_id = 1,
1558 .needed = &virtio_broken_needed,
1559 .fields = (VMStateField[]) {
1560 VMSTATE_BOOL(broken, VirtIODevice),
1561 VMSTATE_END_OF_LIST()
1565 static const VMStateDescription vmstate_virtio = {
1566 .name = "virtio",
1567 .version_id = 1,
1568 .minimum_version_id = 1,
1569 .minimum_version_id_old = 1,
1570 .fields = (VMStateField[]) {
1571 VMSTATE_END_OF_LIST()
1573 .subsections = (const VMStateDescription*[]) {
1574 &vmstate_virtio_device_endian,
1575 &vmstate_virtio_64bit_features,
1576 &vmstate_virtio_virtqueues,
1577 &vmstate_virtio_ringsize,
1578 &vmstate_virtio_broken,
1579 &vmstate_virtio_extra_state,
1580 NULL
1584 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
1586 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1587 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1588 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1589 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1590 int i;
1592 if (k->save_config) {
1593 k->save_config(qbus->parent, f);
1596 qemu_put_8s(f, &vdev->status);
1597 qemu_put_8s(f, &vdev->isr);
1598 qemu_put_be16s(f, &vdev->queue_sel);
1599 qemu_put_be32s(f, &guest_features_lo);
1600 qemu_put_be32(f, vdev->config_len);
1601 qemu_put_buffer(f, vdev->config, vdev->config_len);
1603 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1604 if (vdev->vq[i].vring.num == 0)
1605 break;
1608 qemu_put_be32(f, i);
1610 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1611 if (vdev->vq[i].vring.num == 0)
1612 break;
1614 qemu_put_be32(f, vdev->vq[i].vring.num);
1615 if (k->has_variable_vring_alignment) {
1616 qemu_put_be32(f, vdev->vq[i].vring.align);
1618 /* XXX virtio-1 devices */
1619 qemu_put_be64(f, vdev->vq[i].vring.desc);
1620 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1621 if (k->save_queue) {
1622 k->save_queue(qbus->parent, i, f);
1626 if (vdc->save != NULL) {
1627 vdc->save(vdev, f);
1630 if (vdc->vmsd) {
1631 vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1634 /* Subsections */
1635 vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1638 /* A wrapper for use as a VMState .put function */
1639 static void virtio_device_put(QEMUFile *f, void *opaque, size_t size)
1641 virtio_save(VIRTIO_DEVICE(opaque), f);
1644 /* A wrapper for use as a VMState .get function */
1645 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size)
1647 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1648 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1650 return virtio_load(vdev, f, dc->vmsd->version_id);
1653 const VMStateInfo virtio_vmstate_info = {
1654 .name = "virtio",
1655 .get = virtio_device_get,
1656 .put = virtio_device_put,
1659 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1661 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1662 bool bad = (val & ~(vdev->host_features)) != 0;
1664 val &= vdev->host_features;
1665 if (k->set_features) {
1666 k->set_features(vdev, val);
1668 vdev->guest_features = val;
1669 return bad ? -1 : 0;
1672 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1675 * The driver must not attempt to set features after feature negotiation
1676 * has finished.
1678 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
1679 return -EINVAL;
1681 return virtio_set_features_nocheck(vdev, val);
1684 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
1686 int i, ret;
1687 int32_t config_len;
1688 uint32_t num;
1689 uint32_t features;
1690 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1691 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1692 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1695 * We poison the endianness to ensure it does not get used before
1696 * subsections have been loaded.
1698 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
1700 if (k->load_config) {
1701 ret = k->load_config(qbus->parent, f);
1702 if (ret)
1703 return ret;
1706 qemu_get_8s(f, &vdev->status);
1707 qemu_get_8s(f, &vdev->isr);
1708 qemu_get_be16s(f, &vdev->queue_sel);
1709 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
1710 return -1;
1712 qemu_get_be32s(f, &features);
1715 * Temporarily set guest_features low bits - needed by
1716 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
1717 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
1719 * Note: devices should always test host features in future - don't create
1720 * new dependencies like this.
1722 vdev->guest_features = features;
1724 config_len = qemu_get_be32(f);
1727 * There are cases where the incoming config can be bigger or smaller
1728 * than what we have; so load what we have space for, and skip
1729 * any excess that's in the stream.
1731 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
1733 while (config_len > vdev->config_len) {
1734 qemu_get_byte(f);
1735 config_len--;
1738 num = qemu_get_be32(f);
1740 if (num > VIRTIO_QUEUE_MAX) {
1741 error_report("Invalid number of virtqueues: 0x%x", num);
1742 return -1;
1745 for (i = 0; i < num; i++) {
1746 vdev->vq[i].vring.num = qemu_get_be32(f);
1747 if (k->has_variable_vring_alignment) {
1748 vdev->vq[i].vring.align = qemu_get_be32(f);
1750 vdev->vq[i].vring.desc = qemu_get_be64(f);
1751 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
1752 vdev->vq[i].signalled_used_valid = false;
1753 vdev->vq[i].notification = true;
1755 if (vdev->vq[i].vring.desc) {
1756 /* XXX virtio-1 devices */
1757 virtio_queue_update_rings(vdev, i);
1758 } else if (vdev->vq[i].last_avail_idx) {
1759 error_report("VQ %d address 0x0 "
1760 "inconsistent with Host index 0x%x",
1761 i, vdev->vq[i].last_avail_idx);
1762 return -1;
1764 if (k->load_queue) {
1765 ret = k->load_queue(qbus->parent, i, f);
1766 if (ret)
1767 return ret;
1771 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1773 if (vdc->load != NULL) {
1774 ret = vdc->load(vdev, f, version_id);
1775 if (ret) {
1776 return ret;
1780 if (vdc->vmsd) {
1781 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
1782 if (ret) {
1783 return ret;
1787 /* Subsections */
1788 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
1789 if (ret) {
1790 return ret;
1793 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
1794 vdev->device_endian = virtio_default_endian();
1797 if (virtio_64bit_features_needed(vdev)) {
1799 * Subsection load filled vdev->guest_features. Run them
1800 * through virtio_set_features to sanity-check them against
1801 * host_features.
1803 uint64_t features64 = vdev->guest_features;
1804 if (virtio_set_features_nocheck(vdev, features64) < 0) {
1805 error_report("Features 0x%" PRIx64 " unsupported. "
1806 "Allowed features: 0x%" PRIx64,
1807 features64, vdev->host_features);
1808 return -1;
1810 } else {
1811 if (virtio_set_features_nocheck(vdev, features) < 0) {
1812 error_report("Features 0x%x unsupported. "
1813 "Allowed features: 0x%" PRIx64,
1814 features, vdev->host_features);
1815 return -1;
1819 for (i = 0; i < num; i++) {
1820 if (vdev->vq[i].vring.desc) {
1821 uint16_t nheads;
1822 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
1823 /* Check it isn't doing strange things with descriptor numbers. */
1824 if (nheads > vdev->vq[i].vring.num) {
1825 error_report("VQ %d size 0x%x Guest index 0x%x "
1826 "inconsistent with Host index 0x%x: delta 0x%x",
1827 i, vdev->vq[i].vring.num,
1828 vring_avail_idx(&vdev->vq[i]),
1829 vdev->vq[i].last_avail_idx, nheads);
1830 return -1;
1832 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
1833 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
1836 * Some devices migrate VirtQueueElements that have been popped
1837 * from the avail ring but not yet returned to the used ring.
1839 vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
1840 vdev->vq[i].used_idx;
1841 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
1842 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
1843 "used_idx 0x%x",
1844 i, vdev->vq[i].vring.num,
1845 vdev->vq[i].last_avail_idx,
1846 vdev->vq[i].used_idx);
1847 return -1;
1852 return 0;
1855 void virtio_cleanup(VirtIODevice *vdev)
1857 qemu_del_vm_change_state_handler(vdev->vmstate);
1858 g_free(vdev->config);
1859 g_free(vdev->vq);
1860 g_free(vdev->vector_queues);
1863 static void virtio_vmstate_change(void *opaque, int running, RunState state)
1865 VirtIODevice *vdev = opaque;
1866 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1867 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1868 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1869 vdev->vm_running = running;
1871 if (backend_run) {
1872 virtio_set_status(vdev, vdev->status);
1875 if (k->vmstate_change) {
1876 k->vmstate_change(qbus->parent, backend_run);
1879 if (!backend_run) {
1880 virtio_set_status(vdev, vdev->status);
1884 void virtio_instance_init_common(Object *proxy_obj, void *data,
1885 size_t vdev_size, const char *vdev_name)
1887 DeviceState *vdev = data;
1889 object_initialize(vdev, vdev_size, vdev_name);
1890 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
1891 object_unref(OBJECT(vdev));
1892 qdev_alias_all_properties(vdev, proxy_obj);
1895 void virtio_init(VirtIODevice *vdev, const char *name,
1896 uint16_t device_id, size_t config_size)
1898 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1899 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1900 int i;
1901 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
1903 if (nvectors) {
1904 vdev->vector_queues =
1905 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
1908 vdev->device_id = device_id;
1909 vdev->status = 0;
1910 atomic_set(&vdev->isr, 0);
1911 vdev->queue_sel = 0;
1912 vdev->config_vector = VIRTIO_NO_VECTOR;
1913 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1914 vdev->vm_running = runstate_is_running();
1915 vdev->broken = false;
1916 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1917 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1918 vdev->vq[i].vdev = vdev;
1919 vdev->vq[i].queue_index = i;
1922 vdev->name = name;
1923 vdev->config_len = config_size;
1924 if (vdev->config_len) {
1925 vdev->config = g_malloc0(config_size);
1926 } else {
1927 vdev->config = NULL;
1929 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
1930 vdev);
1931 vdev->device_endian = virtio_default_endian();
1932 vdev->use_guest_notifier_mask = true;
1935 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1937 return vdev->vq[n].vring.desc;
1940 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1942 return vdev->vq[n].vring.avail;
1945 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1947 return vdev->vq[n].vring.used;
1950 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1952 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
1955 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1957 return offsetof(VRingAvail, ring) +
1958 sizeof(uint16_t) * vdev->vq[n].vring.num;
1961 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1963 return offsetof(VRingUsed, ring) +
1964 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
1967 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
1969 return vdev->vq[n].last_avail_idx;
1972 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
1974 vdev->vq[n].last_avail_idx = idx;
1975 vdev->vq[n].shadow_avail_idx = idx;
1978 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
1980 vdev->vq[n].signalled_used_valid = false;
1983 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
1985 return vdev->vq + n;
1988 uint16_t virtio_get_queue_index(VirtQueue *vq)
1990 return vq->queue_index;
1993 static void virtio_queue_guest_notifier_read(EventNotifier *n)
1995 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
1996 if (event_notifier_test_and_clear(n)) {
1997 virtio_irq(vq);
2001 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2002 bool with_irqfd)
2004 if (assign && !with_irqfd) {
2005 event_notifier_set_handler(&vq->guest_notifier, false,
2006 virtio_queue_guest_notifier_read);
2007 } else {
2008 event_notifier_set_handler(&vq->guest_notifier, false, NULL);
2010 if (!assign) {
2011 /* Test and clear notifier before closing it,
2012 * in case poll callback didn't have time to run. */
2013 virtio_queue_guest_notifier_read(&vq->guest_notifier);
2017 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2019 return &vq->guest_notifier;
2022 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2024 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2025 if (event_notifier_test_and_clear(n)) {
2026 virtio_queue_notify_aio_vq(vq);
2030 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2031 VirtIOHandleOutput handle_output)
2033 if (handle_output) {
2034 vq->handle_aio_output = handle_output;
2035 aio_set_event_notifier(ctx, &vq->host_notifier, true,
2036 virtio_queue_host_notifier_aio_read);
2037 } else {
2038 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
2039 /* Test and clear notifier before after disabling event,
2040 * in case poll callback didn't have time to run. */
2041 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2042 vq->handle_aio_output = NULL;
2046 void virtio_queue_host_notifier_read(EventNotifier *n)
2048 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2049 if (event_notifier_test_and_clear(n)) {
2050 virtio_queue_notify_vq(vq);
2054 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2056 return &vq->host_notifier;
2059 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2061 g_free(vdev->bus_name);
2062 vdev->bus_name = g_strdup(bus_name);
2065 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2067 va_list ap;
2069 va_start(ap, fmt);
2070 error_vreport(fmt, ap);
2071 va_end(ap);
2073 vdev->broken = true;
2075 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2076 virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2077 virtio_notify_config(vdev);
2081 static void virtio_device_realize(DeviceState *dev, Error **errp)
2083 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2084 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2085 Error *err = NULL;
2087 /* Devices should either use vmsd or the load/save methods */
2088 assert(!vdc->vmsd || !vdc->load);
2090 if (vdc->realize != NULL) {
2091 vdc->realize(dev, &err);
2092 if (err != NULL) {
2093 error_propagate(errp, err);
2094 return;
2098 virtio_bus_device_plugged(vdev, &err);
2099 if (err != NULL) {
2100 error_propagate(errp, err);
2101 return;
2105 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2107 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2108 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2109 Error *err = NULL;
2111 virtio_bus_device_unplugged(vdev);
2113 if (vdc->unrealize != NULL) {
2114 vdc->unrealize(dev, &err);
2115 if (err != NULL) {
2116 error_propagate(errp, err);
2117 return;
2121 g_free(vdev->bus_name);
2122 vdev->bus_name = NULL;
2125 static Property virtio_properties[] = {
2126 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2127 DEFINE_PROP_END_OF_LIST(),
2130 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2132 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2133 int n, r, err;
2135 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2136 VirtQueue *vq = &vdev->vq[n];
2137 if (!virtio_queue_get_num(vdev, n)) {
2138 continue;
2140 r = virtio_bus_set_host_notifier(qbus, n, true);
2141 if (r < 0) {
2142 err = r;
2143 goto assign_error;
2145 event_notifier_set_handler(&vq->host_notifier, true,
2146 virtio_queue_host_notifier_read);
2149 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2150 /* Kick right away to begin processing requests already in vring */
2151 VirtQueue *vq = &vdev->vq[n];
2152 if (!vq->vring.num) {
2153 continue;
2155 event_notifier_set(&vq->host_notifier);
2157 return 0;
2159 assign_error:
2160 while (--n >= 0) {
2161 VirtQueue *vq = &vdev->vq[n];
2162 if (!virtio_queue_get_num(vdev, n)) {
2163 continue;
2166 event_notifier_set_handler(&vq->host_notifier, true, NULL);
2167 r = virtio_bus_set_host_notifier(qbus, n, false);
2168 assert(r >= 0);
2170 return err;
2173 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2175 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2176 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2178 return virtio_bus_start_ioeventfd(vbus);
2181 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2183 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2184 int n, r;
2186 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2187 VirtQueue *vq = &vdev->vq[n];
2189 if (!virtio_queue_get_num(vdev, n)) {
2190 continue;
2192 event_notifier_set_handler(&vq->host_notifier, true, NULL);
2193 r = virtio_bus_set_host_notifier(qbus, n, false);
2194 assert(r >= 0);
2198 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2200 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2201 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2203 virtio_bus_stop_ioeventfd(vbus);
2206 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2208 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2209 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2211 return virtio_bus_grab_ioeventfd(vbus);
2214 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2216 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2217 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2219 virtio_bus_release_ioeventfd(vbus);
2222 static void virtio_device_class_init(ObjectClass *klass, void *data)
2224 /* Set the default value here. */
2225 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2226 DeviceClass *dc = DEVICE_CLASS(klass);
2228 dc->realize = virtio_device_realize;
2229 dc->unrealize = virtio_device_unrealize;
2230 dc->bus_type = TYPE_VIRTIO_BUS;
2231 dc->props = virtio_properties;
2232 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2233 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2235 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2238 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2240 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2241 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2243 return virtio_bus_ioeventfd_enabled(vbus);
2246 static const TypeInfo virtio_device_info = {
2247 .name = TYPE_VIRTIO_DEVICE,
2248 .parent = TYPE_DEVICE,
2249 .instance_size = sizeof(VirtIODevice),
2250 .class_init = virtio_device_class_init,
2251 .abstract = true,
2252 .class_size = sizeof(VirtioDeviceClass),
2255 static void virtio_register_types(void)
2257 type_register_static(&virtio_device_info);
2260 type_init(virtio_register_types)