ppc/pnv: enable user created pnv-phb for powernv8
[qemu.git] / hw / virtio / virtio.c
blob5d607aeaa05f3bfa4cb547321b07ea4205bdab96
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "trace.h"
18 #include "qemu/error-report.h"
19 #include "qemu/log.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30 #include "standard-headers/linux/virtio_ids.h"
33 * The alignment to use between consumer and producer parts of vring.
34 * x86 pagesize again. This is the default, used by transports like PCI
35 * which don't provide a means for the guest to tell the host the alignment.
37 #define VIRTIO_PCI_VRING_ALIGN 4096
39 typedef struct VRingDesc
41 uint64_t addr;
42 uint32_t len;
43 uint16_t flags;
44 uint16_t next;
45 } VRingDesc;
47 typedef struct VRingPackedDesc {
48 uint64_t addr;
49 uint32_t len;
50 uint16_t id;
51 uint16_t flags;
52 } VRingPackedDesc;
54 typedef struct VRingAvail
56 uint16_t flags;
57 uint16_t idx;
58 uint16_t ring[];
59 } VRingAvail;
61 typedef struct VRingUsedElem
63 uint32_t id;
64 uint32_t len;
65 } VRingUsedElem;
67 typedef struct VRingUsed
69 uint16_t flags;
70 uint16_t idx;
71 VRingUsedElem ring[];
72 } VRingUsed;
74 typedef struct VRingMemoryRegionCaches {
75 struct rcu_head rcu;
76 MemoryRegionCache desc;
77 MemoryRegionCache avail;
78 MemoryRegionCache used;
79 } VRingMemoryRegionCaches;
81 typedef struct VRing
83 unsigned int num;
84 unsigned int num_default;
85 unsigned int align;
86 hwaddr desc;
87 hwaddr avail;
88 hwaddr used;
89 VRingMemoryRegionCaches *caches;
90 } VRing;
92 typedef struct VRingPackedDescEvent {
93 uint16_t off_wrap;
94 uint16_t flags;
95 } VRingPackedDescEvent ;
97 struct VirtQueue
99 VRing vring;
100 VirtQueueElement *used_elems;
102 /* Next head to pop */
103 uint16_t last_avail_idx;
104 bool last_avail_wrap_counter;
106 /* Last avail_idx read from VQ. */
107 uint16_t shadow_avail_idx;
108 bool shadow_avail_wrap_counter;
110 uint16_t used_idx;
111 bool used_wrap_counter;
113 /* Last used index value we have signalled on */
114 uint16_t signalled_used;
116 /* Last used index value we have signalled on */
117 bool signalled_used_valid;
119 /* Notification enabled? */
120 bool notification;
122 uint16_t queue_index;
124 unsigned int inuse;
126 uint16_t vector;
127 VirtIOHandleOutput handle_output;
128 VirtIODevice *vdev;
129 EventNotifier guest_notifier;
130 EventNotifier host_notifier;
131 bool host_notifier_enabled;
132 QLIST_ENTRY(VirtQueue) node;
135 const char *virtio_device_names[] = {
136 [VIRTIO_ID_NET] = "virtio-net",
137 [VIRTIO_ID_BLOCK] = "virtio-blk",
138 [VIRTIO_ID_CONSOLE] = "virtio-serial",
139 [VIRTIO_ID_RNG] = "virtio-rng",
140 [VIRTIO_ID_BALLOON] = "virtio-balloon",
141 [VIRTIO_ID_IOMEM] = "virtio-iomem",
142 [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
143 [VIRTIO_ID_SCSI] = "virtio-scsi",
144 [VIRTIO_ID_9P] = "virtio-9p",
145 [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
146 [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
147 [VIRTIO_ID_CAIF] = "virtio-caif",
148 [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
149 [VIRTIO_ID_GPU] = "virtio-gpu",
150 [VIRTIO_ID_CLOCK] = "virtio-clk",
151 [VIRTIO_ID_INPUT] = "virtio-input",
152 [VIRTIO_ID_VSOCK] = "vhost-vsock",
153 [VIRTIO_ID_CRYPTO] = "virtio-crypto",
154 [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
155 [VIRTIO_ID_PSTORE] = "virtio-pstore",
156 [VIRTIO_ID_IOMMU] = "virtio-iommu",
157 [VIRTIO_ID_MEM] = "virtio-mem",
158 [VIRTIO_ID_SOUND] = "virtio-sound",
159 [VIRTIO_ID_FS] = "virtio-user-fs",
160 [VIRTIO_ID_PMEM] = "virtio-pmem",
161 [VIRTIO_ID_RPMB] = "virtio-rpmb",
162 [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
163 [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
164 [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
165 [VIRTIO_ID_SCMI] = "virtio-scmi",
166 [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
167 [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
168 [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
169 [VIRTIO_ID_CAN] = "virtio-can",
170 [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
171 [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
172 [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
173 [VIRTIO_ID_BT] = "virtio-bluetooth",
174 [VIRTIO_ID_GPIO] = "virtio-gpio"
177 static const char *virtio_id_to_name(uint16_t device_id)
179 assert(device_id < G_N_ELEMENTS(virtio_device_names));
180 const char *name = virtio_device_names[device_id];
181 assert(name != NULL);
182 return name;
185 /* Called within call_rcu(). */
186 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
188 assert(caches != NULL);
189 address_space_cache_destroy(&caches->desc);
190 address_space_cache_destroy(&caches->avail);
191 address_space_cache_destroy(&caches->used);
192 g_free(caches);
195 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
197 VRingMemoryRegionCaches *caches;
199 caches = qatomic_read(&vq->vring.caches);
200 qatomic_rcu_set(&vq->vring.caches, NULL);
201 if (caches) {
202 call_rcu(caches, virtio_free_region_cache, rcu);
206 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
208 VirtQueue *vq = &vdev->vq[n];
209 VRingMemoryRegionCaches *old = vq->vring.caches;
210 VRingMemoryRegionCaches *new = NULL;
211 hwaddr addr, size;
212 int64_t len;
213 bool packed;
216 addr = vq->vring.desc;
217 if (!addr) {
218 goto out_no_cache;
220 new = g_new0(VRingMemoryRegionCaches, 1);
221 size = virtio_queue_get_desc_size(vdev, n);
222 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
223 true : false;
224 len = address_space_cache_init(&new->desc, vdev->dma_as,
225 addr, size, packed);
226 if (len < size) {
227 virtio_error(vdev, "Cannot map desc");
228 goto err_desc;
231 size = virtio_queue_get_used_size(vdev, n);
232 len = address_space_cache_init(&new->used, vdev->dma_as,
233 vq->vring.used, size, true);
234 if (len < size) {
235 virtio_error(vdev, "Cannot map used");
236 goto err_used;
239 size = virtio_queue_get_avail_size(vdev, n);
240 len = address_space_cache_init(&new->avail, vdev->dma_as,
241 vq->vring.avail, size, false);
242 if (len < size) {
243 virtio_error(vdev, "Cannot map avail");
244 goto err_avail;
247 qatomic_rcu_set(&vq->vring.caches, new);
248 if (old) {
249 call_rcu(old, virtio_free_region_cache, rcu);
251 return;
253 err_avail:
254 address_space_cache_destroy(&new->avail);
255 err_used:
256 address_space_cache_destroy(&new->used);
257 err_desc:
258 address_space_cache_destroy(&new->desc);
259 out_no_cache:
260 g_free(new);
261 virtio_virtqueue_reset_region_cache(vq);
264 /* virt queue functions */
265 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
267 VRing *vring = &vdev->vq[n].vring;
269 if (!vring->num || !vring->desc || !vring->align) {
270 /* not yet setup -> nothing to do */
271 return;
273 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
274 vring->used = vring_align(vring->avail +
275 offsetof(VRingAvail, ring[vring->num]),
276 vring->align);
277 virtio_init_region_cache(vdev, n);
280 /* Called within rcu_read_lock(). */
281 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
282 MemoryRegionCache *cache, int i)
284 address_space_read_cached(cache, i * sizeof(VRingDesc),
285 desc, sizeof(VRingDesc));
286 virtio_tswap64s(vdev, &desc->addr);
287 virtio_tswap32s(vdev, &desc->len);
288 virtio_tswap16s(vdev, &desc->flags);
289 virtio_tswap16s(vdev, &desc->next);
292 static void vring_packed_event_read(VirtIODevice *vdev,
293 MemoryRegionCache *cache,
294 VRingPackedDescEvent *e)
296 hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
297 hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
299 e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
300 /* Make sure flags is seen before off_wrap */
301 smp_rmb();
302 e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
303 virtio_tswap16s(vdev, &e->flags);
306 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
307 MemoryRegionCache *cache,
308 uint16_t off_wrap)
310 hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
312 virtio_stw_phys_cached(vdev, cache, off, off_wrap);
313 address_space_cache_invalidate(cache, off, sizeof(off_wrap));
316 static void vring_packed_flags_write(VirtIODevice *vdev,
317 MemoryRegionCache *cache, uint16_t flags)
319 hwaddr off = offsetof(VRingPackedDescEvent, flags);
321 virtio_stw_phys_cached(vdev, cache, off, flags);
322 address_space_cache_invalidate(cache, off, sizeof(flags));
325 /* Called within rcu_read_lock(). */
326 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
328 return qatomic_rcu_read(&vq->vring.caches);
331 /* Called within rcu_read_lock(). */
332 static inline uint16_t vring_avail_flags(VirtQueue *vq)
334 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
335 hwaddr pa = offsetof(VRingAvail, flags);
337 if (!caches) {
338 return 0;
341 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
344 /* Called within rcu_read_lock(). */
345 static inline uint16_t vring_avail_idx(VirtQueue *vq)
347 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
348 hwaddr pa = offsetof(VRingAvail, idx);
350 if (!caches) {
351 return 0;
354 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
355 return vq->shadow_avail_idx;
358 /* Called within rcu_read_lock(). */
359 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
361 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
362 hwaddr pa = offsetof(VRingAvail, ring[i]);
364 if (!caches) {
365 return 0;
368 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
371 /* Called within rcu_read_lock(). */
372 static inline uint16_t vring_get_used_event(VirtQueue *vq)
374 return vring_avail_ring(vq, vq->vring.num);
377 /* Called within rcu_read_lock(). */
378 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
379 int i)
381 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
382 hwaddr pa = offsetof(VRingUsed, ring[i]);
384 if (!caches) {
385 return;
388 virtio_tswap32s(vq->vdev, &uelem->id);
389 virtio_tswap32s(vq->vdev, &uelem->len);
390 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
391 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
394 /* Called within rcu_read_lock(). */
395 static uint16_t vring_used_idx(VirtQueue *vq)
397 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
398 hwaddr pa = offsetof(VRingUsed, idx);
400 if (!caches) {
401 return 0;
404 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
407 /* Called within rcu_read_lock(). */
408 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
410 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
411 hwaddr pa = offsetof(VRingUsed, idx);
413 if (caches) {
414 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
415 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
418 vq->used_idx = val;
421 /* Called within rcu_read_lock(). */
422 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
424 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
425 VirtIODevice *vdev = vq->vdev;
426 hwaddr pa = offsetof(VRingUsed, flags);
427 uint16_t flags;
429 if (!caches) {
430 return;
433 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
434 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
435 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
438 /* Called within rcu_read_lock(). */
439 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
441 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
442 VirtIODevice *vdev = vq->vdev;
443 hwaddr pa = offsetof(VRingUsed, flags);
444 uint16_t flags;
446 if (!caches) {
447 return;
450 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
451 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
452 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
455 /* Called within rcu_read_lock(). */
456 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
458 VRingMemoryRegionCaches *caches;
459 hwaddr pa;
460 if (!vq->notification) {
461 return;
464 caches = vring_get_region_caches(vq);
465 if (!caches) {
466 return;
469 pa = offsetof(VRingUsed, ring[vq->vring.num]);
470 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
471 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
474 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
476 RCU_READ_LOCK_GUARD();
478 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
479 vring_set_avail_event(vq, vring_avail_idx(vq));
480 } else if (enable) {
481 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
482 } else {
483 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
485 if (enable) {
486 /* Expose avail event/used flags before caller checks the avail idx. */
487 smp_mb();
491 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
493 uint16_t off_wrap;
494 VRingPackedDescEvent e;
495 VRingMemoryRegionCaches *caches;
497 RCU_READ_LOCK_GUARD();
498 caches = vring_get_region_caches(vq);
499 if (!caches) {
500 return;
503 vring_packed_event_read(vq->vdev, &caches->used, &e);
505 if (!enable) {
506 e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
507 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
508 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
509 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
510 /* Make sure off_wrap is wrote before flags */
511 smp_wmb();
512 e.flags = VRING_PACKED_EVENT_FLAG_DESC;
513 } else {
514 e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
517 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
518 if (enable) {
519 /* Expose avail event/used flags before caller checks the avail idx. */
520 smp_mb();
524 bool virtio_queue_get_notification(VirtQueue *vq)
526 return vq->notification;
529 void virtio_queue_set_notification(VirtQueue *vq, int enable)
531 vq->notification = enable;
533 if (!vq->vring.desc) {
534 return;
537 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
538 virtio_queue_packed_set_notification(vq, enable);
539 } else {
540 virtio_queue_split_set_notification(vq, enable);
544 int virtio_queue_ready(VirtQueue *vq)
546 return vq->vring.avail != 0;
549 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
550 uint16_t *flags,
551 MemoryRegionCache *cache,
552 int i)
554 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
556 *flags = virtio_lduw_phys_cached(vdev, cache, off);
559 static void vring_packed_desc_read(VirtIODevice *vdev,
560 VRingPackedDesc *desc,
561 MemoryRegionCache *cache,
562 int i, bool strict_order)
564 hwaddr off = i * sizeof(VRingPackedDesc);
566 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
568 if (strict_order) {
569 /* Make sure flags is read before the rest fields. */
570 smp_rmb();
573 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
574 &desc->addr, sizeof(desc->addr));
575 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
576 &desc->id, sizeof(desc->id));
577 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
578 &desc->len, sizeof(desc->len));
579 virtio_tswap64s(vdev, &desc->addr);
580 virtio_tswap16s(vdev, &desc->id);
581 virtio_tswap32s(vdev, &desc->len);
584 static void vring_packed_desc_write_data(VirtIODevice *vdev,
585 VRingPackedDesc *desc,
586 MemoryRegionCache *cache,
587 int i)
589 hwaddr off_id = i * sizeof(VRingPackedDesc) +
590 offsetof(VRingPackedDesc, id);
591 hwaddr off_len = i * sizeof(VRingPackedDesc) +
592 offsetof(VRingPackedDesc, len);
594 virtio_tswap32s(vdev, &desc->len);
595 virtio_tswap16s(vdev, &desc->id);
596 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
597 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
598 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
599 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
602 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
603 VRingPackedDesc *desc,
604 MemoryRegionCache *cache,
605 int i)
607 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
609 virtio_stw_phys_cached(vdev, cache, off, desc->flags);
610 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
613 static void vring_packed_desc_write(VirtIODevice *vdev,
614 VRingPackedDesc *desc,
615 MemoryRegionCache *cache,
616 int i, bool strict_order)
618 vring_packed_desc_write_data(vdev, desc, cache, i);
619 if (strict_order) {
620 /* Make sure data is wrote before flags. */
621 smp_wmb();
623 vring_packed_desc_write_flags(vdev, desc, cache, i);
626 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
628 bool avail, used;
630 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
631 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
632 return (avail != used) && (avail == wrap_counter);
635 /* Fetch avail_idx from VQ memory only when we really need to know if
636 * guest has added some buffers.
637 * Called within rcu_read_lock(). */
638 static int virtio_queue_empty_rcu(VirtQueue *vq)
640 if (virtio_device_disabled(vq->vdev)) {
641 return 1;
644 if (unlikely(!vq->vring.avail)) {
645 return 1;
648 if (vq->shadow_avail_idx != vq->last_avail_idx) {
649 return 0;
652 return vring_avail_idx(vq) == vq->last_avail_idx;
655 static int virtio_queue_split_empty(VirtQueue *vq)
657 bool empty;
659 if (virtio_device_disabled(vq->vdev)) {
660 return 1;
663 if (unlikely(!vq->vring.avail)) {
664 return 1;
667 if (vq->shadow_avail_idx != vq->last_avail_idx) {
668 return 0;
671 RCU_READ_LOCK_GUARD();
672 empty = vring_avail_idx(vq) == vq->last_avail_idx;
673 return empty;
676 /* Called within rcu_read_lock(). */
677 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
679 struct VRingPackedDesc desc;
680 VRingMemoryRegionCaches *cache;
682 if (unlikely(!vq->vring.desc)) {
683 return 1;
686 cache = vring_get_region_caches(vq);
687 if (!cache) {
688 return 1;
691 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
692 vq->last_avail_idx);
694 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
697 static int virtio_queue_packed_empty(VirtQueue *vq)
699 RCU_READ_LOCK_GUARD();
700 return virtio_queue_packed_empty_rcu(vq);
703 int virtio_queue_empty(VirtQueue *vq)
705 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
706 return virtio_queue_packed_empty(vq);
707 } else {
708 return virtio_queue_split_empty(vq);
712 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
713 unsigned int len)
715 AddressSpace *dma_as = vq->vdev->dma_as;
716 unsigned int offset;
717 int i;
719 offset = 0;
720 for (i = 0; i < elem->in_num; i++) {
721 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
723 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
724 elem->in_sg[i].iov_len,
725 DMA_DIRECTION_FROM_DEVICE, size);
727 offset += size;
730 for (i = 0; i < elem->out_num; i++)
731 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
732 elem->out_sg[i].iov_len,
733 DMA_DIRECTION_TO_DEVICE,
734 elem->out_sg[i].iov_len);
737 /* virtqueue_detach_element:
738 * @vq: The #VirtQueue
739 * @elem: The #VirtQueueElement
740 * @len: number of bytes written
742 * Detach the element from the virtqueue. This function is suitable for device
743 * reset or other situations where a #VirtQueueElement is simply freed and will
744 * not be pushed or discarded.
746 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
747 unsigned int len)
749 vq->inuse -= elem->ndescs;
750 virtqueue_unmap_sg(vq, elem, len);
753 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
755 vq->last_avail_idx -= num;
758 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
760 if (vq->last_avail_idx < num) {
761 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
762 vq->last_avail_wrap_counter ^= 1;
763 } else {
764 vq->last_avail_idx -= num;
768 /* virtqueue_unpop:
769 * @vq: The #VirtQueue
770 * @elem: The #VirtQueueElement
771 * @len: number of bytes written
773 * Pretend the most recent element wasn't popped from the virtqueue. The next
774 * call to virtqueue_pop() will refetch the element.
776 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
777 unsigned int len)
780 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
781 virtqueue_packed_rewind(vq, 1);
782 } else {
783 virtqueue_split_rewind(vq, 1);
786 virtqueue_detach_element(vq, elem, len);
789 /* virtqueue_rewind:
790 * @vq: The #VirtQueue
791 * @num: Number of elements to push back
793 * Pretend that elements weren't popped from the virtqueue. The next
794 * virtqueue_pop() will refetch the oldest element.
796 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
798 * Returns: true on success, false if @num is greater than the number of in use
799 * elements.
801 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
803 if (num > vq->inuse) {
804 return false;
807 vq->inuse -= num;
808 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
809 virtqueue_packed_rewind(vq, num);
810 } else {
811 virtqueue_split_rewind(vq, num);
813 return true;
816 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
817 unsigned int len, unsigned int idx)
819 VRingUsedElem uelem;
821 if (unlikely(!vq->vring.used)) {
822 return;
825 idx = (idx + vq->used_idx) % vq->vring.num;
827 uelem.id = elem->index;
828 uelem.len = len;
829 vring_used_write(vq, &uelem, idx);
832 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
833 unsigned int len, unsigned int idx)
835 vq->used_elems[idx].index = elem->index;
836 vq->used_elems[idx].len = len;
837 vq->used_elems[idx].ndescs = elem->ndescs;
840 static void virtqueue_packed_fill_desc(VirtQueue *vq,
841 const VirtQueueElement *elem,
842 unsigned int idx,
843 bool strict_order)
845 uint16_t head;
846 VRingMemoryRegionCaches *caches;
847 VRingPackedDesc desc = {
848 .id = elem->index,
849 .len = elem->len,
851 bool wrap_counter = vq->used_wrap_counter;
853 if (unlikely(!vq->vring.desc)) {
854 return;
857 head = vq->used_idx + idx;
858 if (head >= vq->vring.num) {
859 head -= vq->vring.num;
860 wrap_counter ^= 1;
862 if (wrap_counter) {
863 desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
864 desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
865 } else {
866 desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
867 desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
870 caches = vring_get_region_caches(vq);
871 if (!caches) {
872 return;
875 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
878 /* Called within rcu_read_lock(). */
879 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
880 unsigned int len, unsigned int idx)
882 trace_virtqueue_fill(vq, elem, len, idx);
884 virtqueue_unmap_sg(vq, elem, len);
886 if (virtio_device_disabled(vq->vdev)) {
887 return;
890 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
891 virtqueue_packed_fill(vq, elem, len, idx);
892 } else {
893 virtqueue_split_fill(vq, elem, len, idx);
897 /* Called within rcu_read_lock(). */
898 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
900 uint16_t old, new;
902 if (unlikely(!vq->vring.used)) {
903 return;
906 /* Make sure buffer is written before we update index. */
907 smp_wmb();
908 trace_virtqueue_flush(vq, count);
909 old = vq->used_idx;
910 new = old + count;
911 vring_used_idx_set(vq, new);
912 vq->inuse -= count;
913 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
914 vq->signalled_used_valid = false;
917 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
919 unsigned int i, ndescs = 0;
921 if (unlikely(!vq->vring.desc)) {
922 return;
925 for (i = 1; i < count; i++) {
926 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
927 ndescs += vq->used_elems[i].ndescs;
929 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
930 ndescs += vq->used_elems[0].ndescs;
932 vq->inuse -= ndescs;
933 vq->used_idx += ndescs;
934 if (vq->used_idx >= vq->vring.num) {
935 vq->used_idx -= vq->vring.num;
936 vq->used_wrap_counter ^= 1;
937 vq->signalled_used_valid = false;
941 void virtqueue_flush(VirtQueue *vq, unsigned int count)
943 if (virtio_device_disabled(vq->vdev)) {
944 vq->inuse -= count;
945 return;
948 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
949 virtqueue_packed_flush(vq, count);
950 } else {
951 virtqueue_split_flush(vq, count);
955 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
956 unsigned int len)
958 RCU_READ_LOCK_GUARD();
959 virtqueue_fill(vq, elem, len, 0);
960 virtqueue_flush(vq, 1);
963 /* Called within rcu_read_lock(). */
964 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
966 uint16_t num_heads = vring_avail_idx(vq) - idx;
968 /* Check it isn't doing very strange things with descriptor numbers. */
969 if (num_heads > vq->vring.num) {
970 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
971 idx, vq->shadow_avail_idx);
972 return -EINVAL;
974 /* On success, callers read a descriptor at vq->last_avail_idx.
975 * Make sure descriptor read does not bypass avail index read. */
976 if (num_heads) {
977 smp_rmb();
980 return num_heads;
983 /* Called within rcu_read_lock(). */
984 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
985 unsigned int *head)
987 /* Grab the next descriptor number they're advertising, and increment
988 * the index we've seen. */
989 *head = vring_avail_ring(vq, idx % vq->vring.num);
991 /* If their number is silly, that's a fatal mistake. */
992 if (*head >= vq->vring.num) {
993 virtio_error(vq->vdev, "Guest says index %u is available", *head);
994 return false;
997 return true;
1000 enum {
1001 VIRTQUEUE_READ_DESC_ERROR = -1,
1002 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
1003 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
1006 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1007 MemoryRegionCache *desc_cache,
1008 unsigned int max, unsigned int *next)
1010 /* If this descriptor says it doesn't chain, we're done. */
1011 if (!(desc->flags & VRING_DESC_F_NEXT)) {
1012 return VIRTQUEUE_READ_DESC_DONE;
1015 /* Check they're not leading us off end of descriptors. */
1016 *next = desc->next;
1017 /* Make sure compiler knows to grab that: we don't want it changing! */
1018 smp_wmb();
1020 if (*next >= max) {
1021 virtio_error(vdev, "Desc next is %u", *next);
1022 return VIRTQUEUE_READ_DESC_ERROR;
1025 vring_split_desc_read(vdev, desc, desc_cache, *next);
1026 return VIRTQUEUE_READ_DESC_MORE;
1029 /* Called within rcu_read_lock(). */
1030 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1031 unsigned int *in_bytes, unsigned int *out_bytes,
1032 unsigned max_in_bytes, unsigned max_out_bytes,
1033 VRingMemoryRegionCaches *caches)
1035 VirtIODevice *vdev = vq->vdev;
1036 unsigned int max, idx;
1037 unsigned int total_bufs, in_total, out_total;
1038 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1039 int64_t len = 0;
1040 int rc;
1042 idx = vq->last_avail_idx;
1043 total_bufs = in_total = out_total = 0;
1045 max = vq->vring.num;
1047 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1048 MemoryRegionCache *desc_cache = &caches->desc;
1049 unsigned int num_bufs;
1050 VRingDesc desc;
1051 unsigned int i;
1053 num_bufs = total_bufs;
1055 if (!virtqueue_get_head(vq, idx++, &i)) {
1056 goto err;
1059 vring_split_desc_read(vdev, &desc, desc_cache, i);
1061 if (desc.flags & VRING_DESC_F_INDIRECT) {
1062 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1063 virtio_error(vdev, "Invalid size for indirect buffer table");
1064 goto err;
1067 /* If we've got too many, that implies a descriptor loop. */
1068 if (num_bufs >= max) {
1069 virtio_error(vdev, "Looped descriptor");
1070 goto err;
1073 /* loop over the indirect descriptor table */
1074 len = address_space_cache_init(&indirect_desc_cache,
1075 vdev->dma_as,
1076 desc.addr, desc.len, false);
1077 desc_cache = &indirect_desc_cache;
1078 if (len < desc.len) {
1079 virtio_error(vdev, "Cannot map indirect buffer");
1080 goto err;
1083 max = desc.len / sizeof(VRingDesc);
1084 num_bufs = i = 0;
1085 vring_split_desc_read(vdev, &desc, desc_cache, i);
1088 do {
1089 /* If we've got too many, that implies a descriptor loop. */
1090 if (++num_bufs > max) {
1091 virtio_error(vdev, "Looped descriptor");
1092 goto err;
1095 if (desc.flags & VRING_DESC_F_WRITE) {
1096 in_total += desc.len;
1097 } else {
1098 out_total += desc.len;
1100 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1101 goto done;
1104 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1105 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1107 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1108 goto err;
1111 if (desc_cache == &indirect_desc_cache) {
1112 address_space_cache_destroy(&indirect_desc_cache);
1113 total_bufs++;
1114 } else {
1115 total_bufs = num_bufs;
1119 if (rc < 0) {
1120 goto err;
1123 done:
1124 address_space_cache_destroy(&indirect_desc_cache);
1125 if (in_bytes) {
1126 *in_bytes = in_total;
1128 if (out_bytes) {
1129 *out_bytes = out_total;
1131 return;
1133 err:
1134 in_total = out_total = 0;
1135 goto done;
1138 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1139 VRingPackedDesc *desc,
1140 MemoryRegionCache
1141 *desc_cache,
1142 unsigned int max,
1143 unsigned int *next,
1144 bool indirect)
1146 /* If this descriptor says it doesn't chain, we're done. */
1147 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1148 return VIRTQUEUE_READ_DESC_DONE;
1151 ++*next;
1152 if (*next == max) {
1153 if (indirect) {
1154 return VIRTQUEUE_READ_DESC_DONE;
1155 } else {
1156 (*next) -= vq->vring.num;
1160 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1161 return VIRTQUEUE_READ_DESC_MORE;
1164 /* Called within rcu_read_lock(). */
1165 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1166 unsigned int *in_bytes,
1167 unsigned int *out_bytes,
1168 unsigned max_in_bytes,
1169 unsigned max_out_bytes,
1170 VRingMemoryRegionCaches *caches)
1172 VirtIODevice *vdev = vq->vdev;
1173 unsigned int max, idx;
1174 unsigned int total_bufs, in_total, out_total;
1175 MemoryRegionCache *desc_cache;
1176 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1177 int64_t len = 0;
1178 VRingPackedDesc desc;
1179 bool wrap_counter;
1181 idx = vq->last_avail_idx;
1182 wrap_counter = vq->last_avail_wrap_counter;
1183 total_bufs = in_total = out_total = 0;
1185 max = vq->vring.num;
1187 for (;;) {
1188 unsigned int num_bufs = total_bufs;
1189 unsigned int i = idx;
1190 int rc;
1192 desc_cache = &caches->desc;
1193 vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1194 if (!is_desc_avail(desc.flags, wrap_counter)) {
1195 break;
1198 if (desc.flags & VRING_DESC_F_INDIRECT) {
1199 if (desc.len % sizeof(VRingPackedDesc)) {
1200 virtio_error(vdev, "Invalid size for indirect buffer table");
1201 goto err;
1204 /* If we've got too many, that implies a descriptor loop. */
1205 if (num_bufs >= max) {
1206 virtio_error(vdev, "Looped descriptor");
1207 goto err;
1210 /* loop over the indirect descriptor table */
1211 len = address_space_cache_init(&indirect_desc_cache,
1212 vdev->dma_as,
1213 desc.addr, desc.len, false);
1214 desc_cache = &indirect_desc_cache;
1215 if (len < desc.len) {
1216 virtio_error(vdev, "Cannot map indirect buffer");
1217 goto err;
1220 max = desc.len / sizeof(VRingPackedDesc);
1221 num_bufs = i = 0;
1222 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1225 do {
1226 /* If we've got too many, that implies a descriptor loop. */
1227 if (++num_bufs > max) {
1228 virtio_error(vdev, "Looped descriptor");
1229 goto err;
1232 if (desc.flags & VRING_DESC_F_WRITE) {
1233 in_total += desc.len;
1234 } else {
1235 out_total += desc.len;
1237 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1238 goto done;
1241 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1242 &i, desc_cache ==
1243 &indirect_desc_cache);
1244 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1246 if (desc_cache == &indirect_desc_cache) {
1247 address_space_cache_destroy(&indirect_desc_cache);
1248 total_bufs++;
1249 idx++;
1250 } else {
1251 idx += num_bufs - total_bufs;
1252 total_bufs = num_bufs;
1255 if (idx >= vq->vring.num) {
1256 idx -= vq->vring.num;
1257 wrap_counter ^= 1;
1261 /* Record the index and wrap counter for a kick we want */
1262 vq->shadow_avail_idx = idx;
1263 vq->shadow_avail_wrap_counter = wrap_counter;
1264 done:
1265 address_space_cache_destroy(&indirect_desc_cache);
1266 if (in_bytes) {
1267 *in_bytes = in_total;
1269 if (out_bytes) {
1270 *out_bytes = out_total;
1272 return;
1274 err:
1275 in_total = out_total = 0;
1276 goto done;
1279 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1280 unsigned int *out_bytes,
1281 unsigned max_in_bytes, unsigned max_out_bytes)
1283 uint16_t desc_size;
1284 VRingMemoryRegionCaches *caches;
1286 RCU_READ_LOCK_GUARD();
1288 if (unlikely(!vq->vring.desc)) {
1289 goto err;
1292 caches = vring_get_region_caches(vq);
1293 if (!caches) {
1294 goto err;
1297 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1298 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1299 if (caches->desc.len < vq->vring.num * desc_size) {
1300 virtio_error(vq->vdev, "Cannot map descriptor ring");
1301 goto err;
1304 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1305 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1306 max_in_bytes, max_out_bytes,
1307 caches);
1308 } else {
1309 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1310 max_in_bytes, max_out_bytes,
1311 caches);
1314 return;
1315 err:
1316 if (in_bytes) {
1317 *in_bytes = 0;
1319 if (out_bytes) {
1320 *out_bytes = 0;
1324 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1325 unsigned int out_bytes)
1327 unsigned int in_total, out_total;
1329 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1330 return in_bytes <= in_total && out_bytes <= out_total;
1333 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1334 hwaddr *addr, struct iovec *iov,
1335 unsigned int max_num_sg, bool is_write,
1336 hwaddr pa, size_t sz)
1338 bool ok = false;
1339 unsigned num_sg = *p_num_sg;
1340 assert(num_sg <= max_num_sg);
1342 if (!sz) {
1343 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1344 goto out;
1347 while (sz) {
1348 hwaddr len = sz;
1350 if (num_sg == max_num_sg) {
1351 virtio_error(vdev, "virtio: too many write descriptors in "
1352 "indirect table");
1353 goto out;
1356 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1357 is_write ?
1358 DMA_DIRECTION_FROM_DEVICE :
1359 DMA_DIRECTION_TO_DEVICE,
1360 MEMTXATTRS_UNSPECIFIED);
1361 if (!iov[num_sg].iov_base) {
1362 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1363 goto out;
1366 iov[num_sg].iov_len = len;
1367 addr[num_sg] = pa;
1369 sz -= len;
1370 pa += len;
1371 num_sg++;
1373 ok = true;
1375 out:
1376 *p_num_sg = num_sg;
1377 return ok;
1380 /* Only used by error code paths before we have a VirtQueueElement (therefore
1381 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1382 * yet.
1384 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1385 struct iovec *iov)
1387 unsigned int i;
1389 for (i = 0; i < out_num + in_num; i++) {
1390 int is_write = i >= out_num;
1392 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1393 iov++;
1397 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1398 hwaddr *addr, unsigned int num_sg,
1399 bool is_write)
1401 unsigned int i;
1402 hwaddr len;
1404 for (i = 0; i < num_sg; i++) {
1405 len = sg[i].iov_len;
1406 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1407 addr[i], &len, is_write ?
1408 DMA_DIRECTION_FROM_DEVICE :
1409 DMA_DIRECTION_TO_DEVICE,
1410 MEMTXATTRS_UNSPECIFIED);
1411 if (!sg[i].iov_base) {
1412 error_report("virtio: error trying to map MMIO memory");
1413 exit(1);
1415 if (len != sg[i].iov_len) {
1416 error_report("virtio: unexpected memory split");
1417 exit(1);
1422 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1424 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1425 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1426 false);
1429 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1431 VirtQueueElement *elem;
1432 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1433 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1434 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1435 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1436 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1437 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1439 assert(sz >= sizeof(VirtQueueElement));
1440 elem = g_malloc(out_sg_end);
1441 trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1442 elem->out_num = out_num;
1443 elem->in_num = in_num;
1444 elem->in_addr = (void *)elem + in_addr_ofs;
1445 elem->out_addr = (void *)elem + out_addr_ofs;
1446 elem->in_sg = (void *)elem + in_sg_ofs;
1447 elem->out_sg = (void *)elem + out_sg_ofs;
1448 return elem;
1451 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1453 unsigned int i, head, max;
1454 VRingMemoryRegionCaches *caches;
1455 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1456 MemoryRegionCache *desc_cache;
1457 int64_t len;
1458 VirtIODevice *vdev = vq->vdev;
1459 VirtQueueElement *elem = NULL;
1460 unsigned out_num, in_num, elem_entries;
1461 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1462 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1463 VRingDesc desc;
1464 int rc;
1466 RCU_READ_LOCK_GUARD();
1467 if (virtio_queue_empty_rcu(vq)) {
1468 goto done;
1470 /* Needed after virtio_queue_empty(), see comment in
1471 * virtqueue_num_heads(). */
1472 smp_rmb();
1474 /* When we start there are none of either input nor output. */
1475 out_num = in_num = elem_entries = 0;
1477 max = vq->vring.num;
1479 if (vq->inuse >= vq->vring.num) {
1480 virtio_error(vdev, "Virtqueue size exceeded");
1481 goto done;
1484 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1485 goto done;
1488 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1489 vring_set_avail_event(vq, vq->last_avail_idx);
1492 i = head;
1494 caches = vring_get_region_caches(vq);
1495 if (!caches) {
1496 virtio_error(vdev, "Region caches not initialized");
1497 goto done;
1500 if (caches->desc.len < max * sizeof(VRingDesc)) {
1501 virtio_error(vdev, "Cannot map descriptor ring");
1502 goto done;
1505 desc_cache = &caches->desc;
1506 vring_split_desc_read(vdev, &desc, desc_cache, i);
1507 if (desc.flags & VRING_DESC_F_INDIRECT) {
1508 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1509 virtio_error(vdev, "Invalid size for indirect buffer table");
1510 goto done;
1513 /* loop over the indirect descriptor table */
1514 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1515 desc.addr, desc.len, false);
1516 desc_cache = &indirect_desc_cache;
1517 if (len < desc.len) {
1518 virtio_error(vdev, "Cannot map indirect buffer");
1519 goto done;
1522 max = desc.len / sizeof(VRingDesc);
1523 i = 0;
1524 vring_split_desc_read(vdev, &desc, desc_cache, i);
1527 /* Collect all the descriptors */
1528 do {
1529 bool map_ok;
1531 if (desc.flags & VRING_DESC_F_WRITE) {
1532 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1533 iov + out_num,
1534 VIRTQUEUE_MAX_SIZE - out_num, true,
1535 desc.addr, desc.len);
1536 } else {
1537 if (in_num) {
1538 virtio_error(vdev, "Incorrect order for descriptors");
1539 goto err_undo_map;
1541 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1542 VIRTQUEUE_MAX_SIZE, false,
1543 desc.addr, desc.len);
1545 if (!map_ok) {
1546 goto err_undo_map;
1549 /* If we've got too many, that implies a descriptor loop. */
1550 if (++elem_entries > max) {
1551 virtio_error(vdev, "Looped descriptor");
1552 goto err_undo_map;
1555 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1556 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1558 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1559 goto err_undo_map;
1562 /* Now copy what we have collected and mapped */
1563 elem = virtqueue_alloc_element(sz, out_num, in_num);
1564 elem->index = head;
1565 elem->ndescs = 1;
1566 for (i = 0; i < out_num; i++) {
1567 elem->out_addr[i] = addr[i];
1568 elem->out_sg[i] = iov[i];
1570 for (i = 0; i < in_num; i++) {
1571 elem->in_addr[i] = addr[out_num + i];
1572 elem->in_sg[i] = iov[out_num + i];
1575 vq->inuse++;
1577 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1578 done:
1579 address_space_cache_destroy(&indirect_desc_cache);
1581 return elem;
1583 err_undo_map:
1584 virtqueue_undo_map_desc(out_num, in_num, iov);
1585 goto done;
1588 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1590 unsigned int i, max;
1591 VRingMemoryRegionCaches *caches;
1592 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1593 MemoryRegionCache *desc_cache;
1594 int64_t len;
1595 VirtIODevice *vdev = vq->vdev;
1596 VirtQueueElement *elem = NULL;
1597 unsigned out_num, in_num, elem_entries;
1598 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1599 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1600 VRingPackedDesc desc;
1601 uint16_t id;
1602 int rc;
1604 RCU_READ_LOCK_GUARD();
1605 if (virtio_queue_packed_empty_rcu(vq)) {
1606 goto done;
1609 /* When we start there are none of either input nor output. */
1610 out_num = in_num = elem_entries = 0;
1612 max = vq->vring.num;
1614 if (vq->inuse >= vq->vring.num) {
1615 virtio_error(vdev, "Virtqueue size exceeded");
1616 goto done;
1619 i = vq->last_avail_idx;
1621 caches = vring_get_region_caches(vq);
1622 if (!caches) {
1623 virtio_error(vdev, "Region caches not initialized");
1624 goto done;
1627 if (caches->desc.len < max * sizeof(VRingDesc)) {
1628 virtio_error(vdev, "Cannot map descriptor ring");
1629 goto done;
1632 desc_cache = &caches->desc;
1633 vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1634 id = desc.id;
1635 if (desc.flags & VRING_DESC_F_INDIRECT) {
1636 if (desc.len % sizeof(VRingPackedDesc)) {
1637 virtio_error(vdev, "Invalid size for indirect buffer table");
1638 goto done;
1641 /* loop over the indirect descriptor table */
1642 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1643 desc.addr, desc.len, false);
1644 desc_cache = &indirect_desc_cache;
1645 if (len < desc.len) {
1646 virtio_error(vdev, "Cannot map indirect buffer");
1647 goto done;
1650 max = desc.len / sizeof(VRingPackedDesc);
1651 i = 0;
1652 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1655 /* Collect all the descriptors */
1656 do {
1657 bool map_ok;
1659 if (desc.flags & VRING_DESC_F_WRITE) {
1660 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1661 iov + out_num,
1662 VIRTQUEUE_MAX_SIZE - out_num, true,
1663 desc.addr, desc.len);
1664 } else {
1665 if (in_num) {
1666 virtio_error(vdev, "Incorrect order for descriptors");
1667 goto err_undo_map;
1669 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1670 VIRTQUEUE_MAX_SIZE, false,
1671 desc.addr, desc.len);
1673 if (!map_ok) {
1674 goto err_undo_map;
1677 /* If we've got too many, that implies a descriptor loop. */
1678 if (++elem_entries > max) {
1679 virtio_error(vdev, "Looped descriptor");
1680 goto err_undo_map;
1683 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1684 desc_cache ==
1685 &indirect_desc_cache);
1686 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1688 /* Now copy what we have collected and mapped */
1689 elem = virtqueue_alloc_element(sz, out_num, in_num);
1690 for (i = 0; i < out_num; i++) {
1691 elem->out_addr[i] = addr[i];
1692 elem->out_sg[i] = iov[i];
1694 for (i = 0; i < in_num; i++) {
1695 elem->in_addr[i] = addr[out_num + i];
1696 elem->in_sg[i] = iov[out_num + i];
1699 elem->index = id;
1700 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1701 vq->last_avail_idx += elem->ndescs;
1702 vq->inuse += elem->ndescs;
1704 if (vq->last_avail_idx >= vq->vring.num) {
1705 vq->last_avail_idx -= vq->vring.num;
1706 vq->last_avail_wrap_counter ^= 1;
1709 vq->shadow_avail_idx = vq->last_avail_idx;
1710 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1712 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1713 done:
1714 address_space_cache_destroy(&indirect_desc_cache);
1716 return elem;
1718 err_undo_map:
1719 virtqueue_undo_map_desc(out_num, in_num, iov);
1720 goto done;
1723 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1725 if (virtio_device_disabled(vq->vdev)) {
1726 return NULL;
1729 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1730 return virtqueue_packed_pop(vq, sz);
1731 } else {
1732 return virtqueue_split_pop(vq, sz);
1736 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1738 VRingMemoryRegionCaches *caches;
1739 MemoryRegionCache *desc_cache;
1740 unsigned int dropped = 0;
1741 VirtQueueElement elem = {};
1742 VirtIODevice *vdev = vq->vdev;
1743 VRingPackedDesc desc;
1745 RCU_READ_LOCK_GUARD();
1747 caches = vring_get_region_caches(vq);
1748 if (!caches) {
1749 return 0;
1752 desc_cache = &caches->desc;
1754 virtio_queue_set_notification(vq, 0);
1756 while (vq->inuse < vq->vring.num) {
1757 unsigned int idx = vq->last_avail_idx;
1759 * works similar to virtqueue_pop but does not map buffers
1760 * and does not allocate any memory.
1762 vring_packed_desc_read(vdev, &desc, desc_cache,
1763 vq->last_avail_idx , true);
1764 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1765 break;
1767 elem.index = desc.id;
1768 elem.ndescs = 1;
1769 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1770 vq->vring.num, &idx, false)) {
1771 ++elem.ndescs;
1774 * immediately push the element, nothing to unmap
1775 * as both in_num and out_num are set to 0.
1777 virtqueue_push(vq, &elem, 0);
1778 dropped++;
1779 vq->last_avail_idx += elem.ndescs;
1780 if (vq->last_avail_idx >= vq->vring.num) {
1781 vq->last_avail_idx -= vq->vring.num;
1782 vq->last_avail_wrap_counter ^= 1;
1786 return dropped;
1789 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1791 unsigned int dropped = 0;
1792 VirtQueueElement elem = {};
1793 VirtIODevice *vdev = vq->vdev;
1794 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1796 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1797 /* works similar to virtqueue_pop but does not map buffers
1798 * and does not allocate any memory */
1799 smp_rmb();
1800 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1801 break;
1803 vq->inuse++;
1804 vq->last_avail_idx++;
1805 if (fEventIdx) {
1806 vring_set_avail_event(vq, vq->last_avail_idx);
1808 /* immediately push the element, nothing to unmap
1809 * as both in_num and out_num are set to 0 */
1810 virtqueue_push(vq, &elem, 0);
1811 dropped++;
1814 return dropped;
1817 /* virtqueue_drop_all:
1818 * @vq: The #VirtQueue
1819 * Drops all queued buffers and indicates them to the guest
1820 * as if they are done. Useful when buffers can not be
1821 * processed but must be returned to the guest.
1823 unsigned int virtqueue_drop_all(VirtQueue *vq)
1825 struct VirtIODevice *vdev = vq->vdev;
1827 if (virtio_device_disabled(vq->vdev)) {
1828 return 0;
1831 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1832 return virtqueue_packed_drop_all(vq);
1833 } else {
1834 return virtqueue_split_drop_all(vq);
1838 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1839 * it is what QEMU has always done by mistake. We can change it sooner
1840 * or later by bumping the version number of the affected vm states.
1841 * In the meanwhile, since the in-memory layout of VirtQueueElement
1842 * has changed, we need to marshal to and from the layout that was
1843 * used before the change.
1845 typedef struct VirtQueueElementOld {
1846 unsigned int index;
1847 unsigned int out_num;
1848 unsigned int in_num;
1849 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1850 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1851 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1852 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1853 } VirtQueueElementOld;
1855 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1857 VirtQueueElement *elem;
1858 VirtQueueElementOld data;
1859 int i;
1861 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1863 /* TODO: teach all callers that this can fail, and return failure instead
1864 * of asserting here.
1865 * This is just one thing (there are probably more) that must be
1866 * fixed before we can allow NDEBUG compilation.
1868 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1869 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1871 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1872 elem->index = data.index;
1874 for (i = 0; i < elem->in_num; i++) {
1875 elem->in_addr[i] = data.in_addr[i];
1878 for (i = 0; i < elem->out_num; i++) {
1879 elem->out_addr[i] = data.out_addr[i];
1882 for (i = 0; i < elem->in_num; i++) {
1883 /* Base is overwritten by virtqueue_map. */
1884 elem->in_sg[i].iov_base = 0;
1885 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1888 for (i = 0; i < elem->out_num; i++) {
1889 /* Base is overwritten by virtqueue_map. */
1890 elem->out_sg[i].iov_base = 0;
1891 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1894 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1895 qemu_get_be32s(f, &elem->ndescs);
1898 virtqueue_map(vdev, elem);
1899 return elem;
1902 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1903 VirtQueueElement *elem)
1905 VirtQueueElementOld data;
1906 int i;
1908 memset(&data, 0, sizeof(data));
1909 data.index = elem->index;
1910 data.in_num = elem->in_num;
1911 data.out_num = elem->out_num;
1913 for (i = 0; i < elem->in_num; i++) {
1914 data.in_addr[i] = elem->in_addr[i];
1917 for (i = 0; i < elem->out_num; i++) {
1918 data.out_addr[i] = elem->out_addr[i];
1921 for (i = 0; i < elem->in_num; i++) {
1922 /* Base is overwritten by virtqueue_map when loading. Do not
1923 * save it, as it would leak the QEMU address space layout. */
1924 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1927 for (i = 0; i < elem->out_num; i++) {
1928 /* Do not save iov_base as above. */
1929 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1932 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1933 qemu_put_be32s(f, &elem->ndescs);
1936 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1939 /* virtio device */
1940 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1942 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1943 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1945 if (virtio_device_disabled(vdev)) {
1946 return;
1949 if (k->notify) {
1950 k->notify(qbus->parent, vector);
1954 void virtio_update_irq(VirtIODevice *vdev)
1956 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1959 static int virtio_validate_features(VirtIODevice *vdev)
1961 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1963 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1964 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1965 return -EFAULT;
1968 if (k->validate_features) {
1969 return k->validate_features(vdev);
1970 } else {
1971 return 0;
1975 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1977 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1978 trace_virtio_set_status(vdev, val);
1980 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1981 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1982 val & VIRTIO_CONFIG_S_FEATURES_OK) {
1983 int ret = virtio_validate_features(vdev);
1985 if (ret) {
1986 return ret;
1991 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
1992 (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1993 virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
1996 if (k->set_status) {
1997 k->set_status(vdev, val);
1999 vdev->status = val;
2001 return 0;
2004 static enum virtio_device_endian virtio_default_endian(void)
2006 if (target_words_bigendian()) {
2007 return VIRTIO_DEVICE_ENDIAN_BIG;
2008 } else {
2009 return VIRTIO_DEVICE_ENDIAN_LITTLE;
2013 static enum virtio_device_endian virtio_current_cpu_endian(void)
2015 if (cpu_virtio_is_big_endian(current_cpu)) {
2016 return VIRTIO_DEVICE_ENDIAN_BIG;
2017 } else {
2018 return VIRTIO_DEVICE_ENDIAN_LITTLE;
2022 void virtio_reset(void *opaque)
2024 VirtIODevice *vdev = opaque;
2025 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2026 int i;
2028 virtio_set_status(vdev, 0);
2029 if (current_cpu) {
2030 /* Guest initiated reset */
2031 vdev->device_endian = virtio_current_cpu_endian();
2032 } else {
2033 /* System reset */
2034 vdev->device_endian = virtio_default_endian();
2037 if (k->reset) {
2038 k->reset(vdev);
2041 vdev->start_on_kick = false;
2042 vdev->started = false;
2043 vdev->broken = false;
2044 vdev->guest_features = 0;
2045 vdev->queue_sel = 0;
2046 vdev->status = 0;
2047 vdev->disabled = false;
2048 qatomic_set(&vdev->isr, 0);
2049 vdev->config_vector = VIRTIO_NO_VECTOR;
2050 virtio_notify_vector(vdev, vdev->config_vector);
2052 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2053 vdev->vq[i].vring.desc = 0;
2054 vdev->vq[i].vring.avail = 0;
2055 vdev->vq[i].vring.used = 0;
2056 vdev->vq[i].last_avail_idx = 0;
2057 vdev->vq[i].shadow_avail_idx = 0;
2058 vdev->vq[i].used_idx = 0;
2059 vdev->vq[i].last_avail_wrap_counter = true;
2060 vdev->vq[i].shadow_avail_wrap_counter = true;
2061 vdev->vq[i].used_wrap_counter = true;
2062 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2063 vdev->vq[i].signalled_used = 0;
2064 vdev->vq[i].signalled_used_valid = false;
2065 vdev->vq[i].notification = true;
2066 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2067 vdev->vq[i].inuse = 0;
2068 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2072 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
2074 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2075 uint8_t val;
2077 if (addr + sizeof(val) > vdev->config_len) {
2078 return (uint32_t)-1;
2081 k->get_config(vdev, vdev->config);
2083 val = ldub_p(vdev->config + addr);
2084 return val;
2087 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
2089 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2090 uint16_t val;
2092 if (addr + sizeof(val) > vdev->config_len) {
2093 return (uint32_t)-1;
2096 k->get_config(vdev, vdev->config);
2098 val = lduw_p(vdev->config + addr);
2099 return val;
2102 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
2104 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2105 uint32_t val;
2107 if (addr + sizeof(val) > vdev->config_len) {
2108 return (uint32_t)-1;
2111 k->get_config(vdev, vdev->config);
2113 val = ldl_p(vdev->config + addr);
2114 return val;
2117 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2119 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2120 uint8_t val = data;
2122 if (addr + sizeof(val) > vdev->config_len) {
2123 return;
2126 stb_p(vdev->config + addr, val);
2128 if (k->set_config) {
2129 k->set_config(vdev, vdev->config);
2133 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2135 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2136 uint16_t val = data;
2138 if (addr + sizeof(val) > vdev->config_len) {
2139 return;
2142 stw_p(vdev->config + addr, val);
2144 if (k->set_config) {
2145 k->set_config(vdev, vdev->config);
2149 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2151 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2152 uint32_t val = data;
2154 if (addr + sizeof(val) > vdev->config_len) {
2155 return;
2158 stl_p(vdev->config + addr, val);
2160 if (k->set_config) {
2161 k->set_config(vdev, vdev->config);
2165 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
2167 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2168 uint8_t val;
2170 if (addr + sizeof(val) > vdev->config_len) {
2171 return (uint32_t)-1;
2174 k->get_config(vdev, vdev->config);
2176 val = ldub_p(vdev->config + addr);
2177 return val;
2180 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
2182 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2183 uint16_t val;
2185 if (addr + sizeof(val) > vdev->config_len) {
2186 return (uint32_t)-1;
2189 k->get_config(vdev, vdev->config);
2191 val = lduw_le_p(vdev->config + addr);
2192 return val;
2195 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
2197 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2198 uint32_t val;
2200 if (addr + sizeof(val) > vdev->config_len) {
2201 return (uint32_t)-1;
2204 k->get_config(vdev, vdev->config);
2206 val = ldl_le_p(vdev->config + addr);
2207 return val;
2210 void virtio_config_modern_writeb(VirtIODevice *vdev,
2211 uint32_t addr, uint32_t data)
2213 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2214 uint8_t val = data;
2216 if (addr + sizeof(val) > vdev->config_len) {
2217 return;
2220 stb_p(vdev->config + addr, val);
2222 if (k->set_config) {
2223 k->set_config(vdev, vdev->config);
2227 void virtio_config_modern_writew(VirtIODevice *vdev,
2228 uint32_t addr, uint32_t data)
2230 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2231 uint16_t val = data;
2233 if (addr + sizeof(val) > vdev->config_len) {
2234 return;
2237 stw_le_p(vdev->config + addr, val);
2239 if (k->set_config) {
2240 k->set_config(vdev, vdev->config);
2244 void virtio_config_modern_writel(VirtIODevice *vdev,
2245 uint32_t addr, uint32_t data)
2247 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2248 uint32_t val = data;
2250 if (addr + sizeof(val) > vdev->config_len) {
2251 return;
2254 stl_le_p(vdev->config + addr, val);
2256 if (k->set_config) {
2257 k->set_config(vdev, vdev->config);
2261 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2263 if (!vdev->vq[n].vring.num) {
2264 return;
2266 vdev->vq[n].vring.desc = addr;
2267 virtio_queue_update_rings(vdev, n);
2270 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2272 return vdev->vq[n].vring.desc;
2275 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2276 hwaddr avail, hwaddr used)
2278 if (!vdev->vq[n].vring.num) {
2279 return;
2281 vdev->vq[n].vring.desc = desc;
2282 vdev->vq[n].vring.avail = avail;
2283 vdev->vq[n].vring.used = used;
2284 virtio_init_region_cache(vdev, n);
2287 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2289 /* Don't allow guest to flip queue between existent and
2290 * nonexistent states, or to set it to an invalid size.
2292 if (!!num != !!vdev->vq[n].vring.num ||
2293 num > VIRTQUEUE_MAX_SIZE ||
2294 num < 0) {
2295 return;
2297 vdev->vq[n].vring.num = num;
2300 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2302 return QLIST_FIRST(&vdev->vector_queues[vector]);
2305 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2307 return QLIST_NEXT(vq, node);
2310 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2312 return vdev->vq[n].vring.num;
2315 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2317 return vdev->vq[n].vring.num_default;
2320 int virtio_get_num_queues(VirtIODevice *vdev)
2322 int i;
2324 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2325 if (!virtio_queue_get_num(vdev, i)) {
2326 break;
2330 return i;
2333 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2335 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2336 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2338 /* virtio-1 compliant devices cannot change the alignment */
2339 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2340 error_report("tried to modify queue alignment for virtio-1 device");
2341 return;
2343 /* Check that the transport told us it was going to do this
2344 * (so a buggy transport will immediately assert rather than
2345 * silently failing to migrate this state)
2347 assert(k->has_variable_vring_alignment);
2349 if (align) {
2350 vdev->vq[n].vring.align = align;
2351 virtio_queue_update_rings(vdev, n);
2355 static void virtio_queue_notify_vq(VirtQueue *vq)
2357 if (vq->vring.desc && vq->handle_output) {
2358 VirtIODevice *vdev = vq->vdev;
2360 if (unlikely(vdev->broken)) {
2361 return;
2364 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2365 vq->handle_output(vdev, vq);
2367 if (unlikely(vdev->start_on_kick)) {
2368 virtio_set_started(vdev, true);
2373 void virtio_queue_notify(VirtIODevice *vdev, int n)
2375 VirtQueue *vq = &vdev->vq[n];
2377 if (unlikely(!vq->vring.desc || vdev->broken)) {
2378 return;
2381 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2382 if (vq->host_notifier_enabled) {
2383 event_notifier_set(&vq->host_notifier);
2384 } else if (vq->handle_output) {
2385 vq->handle_output(vdev, vq);
2387 if (unlikely(vdev->start_on_kick)) {
2388 virtio_set_started(vdev, true);
2393 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2395 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2396 VIRTIO_NO_VECTOR;
2399 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2401 VirtQueue *vq = &vdev->vq[n];
2403 if (n < VIRTIO_QUEUE_MAX) {
2404 if (vdev->vector_queues &&
2405 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2406 QLIST_REMOVE(vq, node);
2408 vdev->vq[n].vector = vector;
2409 if (vdev->vector_queues &&
2410 vector != VIRTIO_NO_VECTOR) {
2411 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2416 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2417 VirtIOHandleOutput handle_output)
2419 int i;
2421 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2422 if (vdev->vq[i].vring.num == 0)
2423 break;
2426 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2427 abort();
2429 vdev->vq[i].vring.num = queue_size;
2430 vdev->vq[i].vring.num_default = queue_size;
2431 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2432 vdev->vq[i].handle_output = handle_output;
2433 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2435 return &vdev->vq[i];
2438 void virtio_delete_queue(VirtQueue *vq)
2440 vq->vring.num = 0;
2441 vq->vring.num_default = 0;
2442 vq->handle_output = NULL;
2443 g_free(vq->used_elems);
2444 vq->used_elems = NULL;
2445 virtio_virtqueue_reset_region_cache(vq);
2448 void virtio_del_queue(VirtIODevice *vdev, int n)
2450 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2451 abort();
2454 virtio_delete_queue(&vdev->vq[n]);
2457 static void virtio_set_isr(VirtIODevice *vdev, int value)
2459 uint8_t old = qatomic_read(&vdev->isr);
2461 /* Do not write ISR if it does not change, so that its cacheline remains
2462 * shared in the common case where the guest does not read it.
2464 if ((old & value) != value) {
2465 qatomic_or(&vdev->isr, value);
2469 /* Called within rcu_read_lock(). */
2470 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2472 uint16_t old, new;
2473 bool v;
2474 /* We need to expose used array entries before checking used event. */
2475 smp_mb();
2476 /* Always notify when queue is empty (when feature acknowledge) */
2477 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2478 !vq->inuse && virtio_queue_empty(vq)) {
2479 return true;
2482 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2483 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2486 v = vq->signalled_used_valid;
2487 vq->signalled_used_valid = true;
2488 old = vq->signalled_used;
2489 new = vq->signalled_used = vq->used_idx;
2490 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2493 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2494 uint16_t off_wrap, uint16_t new,
2495 uint16_t old)
2497 int off = off_wrap & ~(1 << 15);
2499 if (wrap != off_wrap >> 15) {
2500 off -= vq->vring.num;
2503 return vring_need_event(off, new, old);
2506 /* Called within rcu_read_lock(). */
2507 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2509 VRingPackedDescEvent e;
2510 uint16_t old, new;
2511 bool v;
2512 VRingMemoryRegionCaches *caches;
2514 caches = vring_get_region_caches(vq);
2515 if (!caches) {
2516 return false;
2519 vring_packed_event_read(vdev, &caches->avail, &e);
2521 old = vq->signalled_used;
2522 new = vq->signalled_used = vq->used_idx;
2523 v = vq->signalled_used_valid;
2524 vq->signalled_used_valid = true;
2526 if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2527 return false;
2528 } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2529 return true;
2532 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2533 e.off_wrap, new, old);
2536 /* Called within rcu_read_lock(). */
2537 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2539 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2540 return virtio_packed_should_notify(vdev, vq);
2541 } else {
2542 return virtio_split_should_notify(vdev, vq);
2546 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2548 WITH_RCU_READ_LOCK_GUARD() {
2549 if (!virtio_should_notify(vdev, vq)) {
2550 return;
2554 trace_virtio_notify_irqfd(vdev, vq);
2557 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2558 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2559 * incorrectly polling this bit during crashdump and hibernation
2560 * in MSI mode, causing a hang if this bit is never updated.
2561 * Recent releases of Windows do not really shut down, but rather
2562 * log out and hibernate to make the next startup faster. Hence,
2563 * this manifested as a more serious hang during shutdown with
2565 * Next driver release from 2016 fixed this problem, so working around it
2566 * is not a must, but it's easy to do so let's do it here.
2568 * Note: it's safe to update ISR from any thread as it was switched
2569 * to an atomic operation.
2571 virtio_set_isr(vq->vdev, 0x1);
2572 event_notifier_set(&vq->guest_notifier);
2575 static void virtio_irq(VirtQueue *vq)
2577 virtio_set_isr(vq->vdev, 0x1);
2578 virtio_notify_vector(vq->vdev, vq->vector);
2581 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2583 WITH_RCU_READ_LOCK_GUARD() {
2584 if (!virtio_should_notify(vdev, vq)) {
2585 return;
2589 trace_virtio_notify(vdev, vq);
2590 virtio_irq(vq);
2593 void virtio_notify_config(VirtIODevice *vdev)
2595 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2596 return;
2598 virtio_set_isr(vdev, 0x3);
2599 vdev->generation++;
2600 virtio_notify_vector(vdev, vdev->config_vector);
2603 static bool virtio_device_endian_needed(void *opaque)
2605 VirtIODevice *vdev = opaque;
2607 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2608 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2609 return vdev->device_endian != virtio_default_endian();
2611 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2612 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2615 static bool virtio_64bit_features_needed(void *opaque)
2617 VirtIODevice *vdev = opaque;
2619 return (vdev->host_features >> 32) != 0;
2622 static bool virtio_virtqueue_needed(void *opaque)
2624 VirtIODevice *vdev = opaque;
2626 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2629 static bool virtio_packed_virtqueue_needed(void *opaque)
2631 VirtIODevice *vdev = opaque;
2633 return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2636 static bool virtio_ringsize_needed(void *opaque)
2638 VirtIODevice *vdev = opaque;
2639 int i;
2641 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2642 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2643 return true;
2646 return false;
2649 static bool virtio_extra_state_needed(void *opaque)
2651 VirtIODevice *vdev = opaque;
2652 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2653 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2655 return k->has_extra_state &&
2656 k->has_extra_state(qbus->parent);
2659 static bool virtio_broken_needed(void *opaque)
2661 VirtIODevice *vdev = opaque;
2663 return vdev->broken;
2666 static bool virtio_started_needed(void *opaque)
2668 VirtIODevice *vdev = opaque;
2670 return vdev->started;
2673 static bool virtio_disabled_needed(void *opaque)
2675 VirtIODevice *vdev = opaque;
2677 return vdev->disabled;
2680 static const VMStateDescription vmstate_virtqueue = {
2681 .name = "virtqueue_state",
2682 .version_id = 1,
2683 .minimum_version_id = 1,
2684 .fields = (VMStateField[]) {
2685 VMSTATE_UINT64(vring.avail, struct VirtQueue),
2686 VMSTATE_UINT64(vring.used, struct VirtQueue),
2687 VMSTATE_END_OF_LIST()
2691 static const VMStateDescription vmstate_packed_virtqueue = {
2692 .name = "packed_virtqueue_state",
2693 .version_id = 1,
2694 .minimum_version_id = 1,
2695 .fields = (VMStateField[]) {
2696 VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2697 VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2698 VMSTATE_UINT16(used_idx, struct VirtQueue),
2699 VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2700 VMSTATE_UINT32(inuse, struct VirtQueue),
2701 VMSTATE_END_OF_LIST()
2705 static const VMStateDescription vmstate_virtio_virtqueues = {
2706 .name = "virtio/virtqueues",
2707 .version_id = 1,
2708 .minimum_version_id = 1,
2709 .needed = &virtio_virtqueue_needed,
2710 .fields = (VMStateField[]) {
2711 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2712 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2713 VMSTATE_END_OF_LIST()
2717 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2718 .name = "virtio/packed_virtqueues",
2719 .version_id = 1,
2720 .minimum_version_id = 1,
2721 .needed = &virtio_packed_virtqueue_needed,
2722 .fields = (VMStateField[]) {
2723 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2724 VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2725 VMSTATE_END_OF_LIST()
2729 static const VMStateDescription vmstate_ringsize = {
2730 .name = "ringsize_state",
2731 .version_id = 1,
2732 .minimum_version_id = 1,
2733 .fields = (VMStateField[]) {
2734 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2735 VMSTATE_END_OF_LIST()
2739 static const VMStateDescription vmstate_virtio_ringsize = {
2740 .name = "virtio/ringsize",
2741 .version_id = 1,
2742 .minimum_version_id = 1,
2743 .needed = &virtio_ringsize_needed,
2744 .fields = (VMStateField[]) {
2745 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2746 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2747 VMSTATE_END_OF_LIST()
2751 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2752 const VMStateField *field)
2754 VirtIODevice *vdev = pv;
2755 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2756 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2758 if (!k->load_extra_state) {
2759 return -1;
2760 } else {
2761 return k->load_extra_state(qbus->parent, f);
2765 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2766 const VMStateField *field, JSONWriter *vmdesc)
2768 VirtIODevice *vdev = pv;
2769 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2770 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2772 k->save_extra_state(qbus->parent, f);
2773 return 0;
2776 static const VMStateInfo vmstate_info_extra_state = {
2777 .name = "virtqueue_extra_state",
2778 .get = get_extra_state,
2779 .put = put_extra_state,
2782 static const VMStateDescription vmstate_virtio_extra_state = {
2783 .name = "virtio/extra_state",
2784 .version_id = 1,
2785 .minimum_version_id = 1,
2786 .needed = &virtio_extra_state_needed,
2787 .fields = (VMStateField[]) {
2789 .name = "extra_state",
2790 .version_id = 0,
2791 .field_exists = NULL,
2792 .size = 0,
2793 .info = &vmstate_info_extra_state,
2794 .flags = VMS_SINGLE,
2795 .offset = 0,
2797 VMSTATE_END_OF_LIST()
2801 static const VMStateDescription vmstate_virtio_device_endian = {
2802 .name = "virtio/device_endian",
2803 .version_id = 1,
2804 .minimum_version_id = 1,
2805 .needed = &virtio_device_endian_needed,
2806 .fields = (VMStateField[]) {
2807 VMSTATE_UINT8(device_endian, VirtIODevice),
2808 VMSTATE_END_OF_LIST()
2812 static const VMStateDescription vmstate_virtio_64bit_features = {
2813 .name = "virtio/64bit_features",
2814 .version_id = 1,
2815 .minimum_version_id = 1,
2816 .needed = &virtio_64bit_features_needed,
2817 .fields = (VMStateField[]) {
2818 VMSTATE_UINT64(guest_features, VirtIODevice),
2819 VMSTATE_END_OF_LIST()
2823 static const VMStateDescription vmstate_virtio_broken = {
2824 .name = "virtio/broken",
2825 .version_id = 1,
2826 .minimum_version_id = 1,
2827 .needed = &virtio_broken_needed,
2828 .fields = (VMStateField[]) {
2829 VMSTATE_BOOL(broken, VirtIODevice),
2830 VMSTATE_END_OF_LIST()
2834 static const VMStateDescription vmstate_virtio_started = {
2835 .name = "virtio/started",
2836 .version_id = 1,
2837 .minimum_version_id = 1,
2838 .needed = &virtio_started_needed,
2839 .fields = (VMStateField[]) {
2840 VMSTATE_BOOL(started, VirtIODevice),
2841 VMSTATE_END_OF_LIST()
2845 static const VMStateDescription vmstate_virtio_disabled = {
2846 .name = "virtio/disabled",
2847 .version_id = 1,
2848 .minimum_version_id = 1,
2849 .needed = &virtio_disabled_needed,
2850 .fields = (VMStateField[]) {
2851 VMSTATE_BOOL(disabled, VirtIODevice),
2852 VMSTATE_END_OF_LIST()
2856 static const VMStateDescription vmstate_virtio = {
2857 .name = "virtio",
2858 .version_id = 1,
2859 .minimum_version_id = 1,
2860 .fields = (VMStateField[]) {
2861 VMSTATE_END_OF_LIST()
2863 .subsections = (const VMStateDescription*[]) {
2864 &vmstate_virtio_device_endian,
2865 &vmstate_virtio_64bit_features,
2866 &vmstate_virtio_virtqueues,
2867 &vmstate_virtio_ringsize,
2868 &vmstate_virtio_broken,
2869 &vmstate_virtio_extra_state,
2870 &vmstate_virtio_started,
2871 &vmstate_virtio_packed_virtqueues,
2872 &vmstate_virtio_disabled,
2873 NULL
2877 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2879 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2880 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2881 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2882 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2883 int i;
2885 if (k->save_config) {
2886 k->save_config(qbus->parent, f);
2889 qemu_put_8s(f, &vdev->status);
2890 qemu_put_8s(f, &vdev->isr);
2891 qemu_put_be16s(f, &vdev->queue_sel);
2892 qemu_put_be32s(f, &guest_features_lo);
2893 qemu_put_be32(f, vdev->config_len);
2894 qemu_put_buffer(f, vdev->config, vdev->config_len);
2896 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2897 if (vdev->vq[i].vring.num == 0)
2898 break;
2901 qemu_put_be32(f, i);
2903 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2904 if (vdev->vq[i].vring.num == 0)
2905 break;
2907 qemu_put_be32(f, vdev->vq[i].vring.num);
2908 if (k->has_variable_vring_alignment) {
2909 qemu_put_be32(f, vdev->vq[i].vring.align);
2912 * Save desc now, the rest of the ring addresses are saved in
2913 * subsections for VIRTIO-1 devices.
2915 qemu_put_be64(f, vdev->vq[i].vring.desc);
2916 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2917 if (k->save_queue) {
2918 k->save_queue(qbus->parent, i, f);
2922 if (vdc->save != NULL) {
2923 vdc->save(vdev, f);
2926 if (vdc->vmsd) {
2927 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2928 if (ret) {
2929 return ret;
2933 /* Subsections */
2934 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2937 /* A wrapper for use as a VMState .put function */
2938 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2939 const VMStateField *field, JSONWriter *vmdesc)
2941 return virtio_save(VIRTIO_DEVICE(opaque), f);
2944 /* A wrapper for use as a VMState .get function */
2945 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2946 const VMStateField *field)
2948 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2949 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2951 return virtio_load(vdev, f, dc->vmsd->version_id);
2954 const VMStateInfo virtio_vmstate_info = {
2955 .name = "virtio",
2956 .get = virtio_device_get,
2957 .put = virtio_device_put,
2960 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2962 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2963 bool bad = (val & ~(vdev->host_features)) != 0;
2965 val &= vdev->host_features;
2966 if (k->set_features) {
2967 k->set_features(vdev, val);
2969 vdev->guest_features = val;
2970 return bad ? -1 : 0;
2973 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2975 int ret;
2977 * The driver must not attempt to set features after feature negotiation
2978 * has finished.
2980 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2981 return -EINVAL;
2983 ret = virtio_set_features_nocheck(vdev, val);
2984 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2985 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2986 int i;
2987 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2988 if (vdev->vq[i].vring.num != 0) {
2989 virtio_init_region_cache(vdev, i);
2993 if (!ret) {
2994 if (!virtio_device_started(vdev, vdev->status) &&
2995 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2996 vdev->start_on_kick = true;
2999 return ret;
3002 size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
3003 uint64_t host_features)
3005 size_t config_size = 0;
3006 int i;
3008 for (i = 0; feature_sizes[i].flags != 0; i++) {
3009 if (host_features & feature_sizes[i].flags) {
3010 config_size = MAX(feature_sizes[i].end, config_size);
3014 return config_size;
3017 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
3019 int i, ret;
3020 int32_t config_len;
3021 uint32_t num;
3022 uint32_t features;
3023 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3024 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3025 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3028 * We poison the endianness to ensure it does not get used before
3029 * subsections have been loaded.
3031 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3033 if (k->load_config) {
3034 ret = k->load_config(qbus->parent, f);
3035 if (ret)
3036 return ret;
3039 qemu_get_8s(f, &vdev->status);
3040 qemu_get_8s(f, &vdev->isr);
3041 qemu_get_be16s(f, &vdev->queue_sel);
3042 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3043 return -1;
3045 qemu_get_be32s(f, &features);
3048 * Temporarily set guest_features low bits - needed by
3049 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3050 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3052 * Note: devices should always test host features in future - don't create
3053 * new dependencies like this.
3055 vdev->guest_features = features;
3057 config_len = qemu_get_be32(f);
3060 * There are cases where the incoming config can be bigger or smaller
3061 * than what we have; so load what we have space for, and skip
3062 * any excess that's in the stream.
3064 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3066 while (config_len > vdev->config_len) {
3067 qemu_get_byte(f);
3068 config_len--;
3071 num = qemu_get_be32(f);
3073 if (num > VIRTIO_QUEUE_MAX) {
3074 error_report("Invalid number of virtqueues: 0x%x", num);
3075 return -1;
3078 for (i = 0; i < num; i++) {
3079 vdev->vq[i].vring.num = qemu_get_be32(f);
3080 if (k->has_variable_vring_alignment) {
3081 vdev->vq[i].vring.align = qemu_get_be32(f);
3083 vdev->vq[i].vring.desc = qemu_get_be64(f);
3084 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3085 vdev->vq[i].signalled_used_valid = false;
3086 vdev->vq[i].notification = true;
3088 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3089 error_report("VQ %d address 0x0 "
3090 "inconsistent with Host index 0x%x",
3091 i, vdev->vq[i].last_avail_idx);
3092 return -1;
3094 if (k->load_queue) {
3095 ret = k->load_queue(qbus->parent, i, f);
3096 if (ret)
3097 return ret;
3101 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3103 if (vdc->load != NULL) {
3104 ret = vdc->load(vdev, f, version_id);
3105 if (ret) {
3106 return ret;
3110 if (vdc->vmsd) {
3111 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3112 if (ret) {
3113 return ret;
3117 /* Subsections */
3118 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3119 if (ret) {
3120 return ret;
3123 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3124 vdev->device_endian = virtio_default_endian();
3127 if (virtio_64bit_features_needed(vdev)) {
3129 * Subsection load filled vdev->guest_features. Run them
3130 * through virtio_set_features to sanity-check them against
3131 * host_features.
3133 uint64_t features64 = vdev->guest_features;
3134 if (virtio_set_features_nocheck(vdev, features64) < 0) {
3135 error_report("Features 0x%" PRIx64 " unsupported. "
3136 "Allowed features: 0x%" PRIx64,
3137 features64, vdev->host_features);
3138 return -1;
3140 } else {
3141 if (virtio_set_features_nocheck(vdev, features) < 0) {
3142 error_report("Features 0x%x unsupported. "
3143 "Allowed features: 0x%" PRIx64,
3144 features, vdev->host_features);
3145 return -1;
3149 if (!virtio_device_started(vdev, vdev->status) &&
3150 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3151 vdev->start_on_kick = true;
3154 RCU_READ_LOCK_GUARD();
3155 for (i = 0; i < num; i++) {
3156 if (vdev->vq[i].vring.desc) {
3157 uint16_t nheads;
3160 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3161 * only the region cache needs to be set up. Legacy devices need
3162 * to calculate used and avail ring addresses based on the desc
3163 * address.
3165 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3166 virtio_init_region_cache(vdev, i);
3167 } else {
3168 virtio_queue_update_rings(vdev, i);
3171 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3172 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3173 vdev->vq[i].shadow_avail_wrap_counter =
3174 vdev->vq[i].last_avail_wrap_counter;
3175 continue;
3178 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3179 /* Check it isn't doing strange things with descriptor numbers. */
3180 if (nheads > vdev->vq[i].vring.num) {
3181 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3182 "inconsistent with Host index 0x%x: delta 0x%x",
3183 i, vdev->vq[i].vring.num,
3184 vring_avail_idx(&vdev->vq[i]),
3185 vdev->vq[i].last_avail_idx, nheads);
3186 vdev->vq[i].used_idx = 0;
3187 vdev->vq[i].shadow_avail_idx = 0;
3188 vdev->vq[i].inuse = 0;
3189 continue;
3191 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3192 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3195 * Some devices migrate VirtQueueElements that have been popped
3196 * from the avail ring but not yet returned to the used ring.
3197 * Since max ring size < UINT16_MAX it's safe to use modulo
3198 * UINT16_MAX + 1 subtraction.
3200 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3201 vdev->vq[i].used_idx);
3202 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3203 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3204 "used_idx 0x%x",
3205 i, vdev->vq[i].vring.num,
3206 vdev->vq[i].last_avail_idx,
3207 vdev->vq[i].used_idx);
3208 return -1;
3213 if (vdc->post_load) {
3214 ret = vdc->post_load(vdev);
3215 if (ret) {
3216 return ret;
3220 return 0;
3223 void virtio_cleanup(VirtIODevice *vdev)
3225 qemu_del_vm_change_state_handler(vdev->vmstate);
3228 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3230 VirtIODevice *vdev = opaque;
3231 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3232 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3233 bool backend_run = running && virtio_device_started(vdev, vdev->status);
3234 vdev->vm_running = running;
3236 if (backend_run) {
3237 virtio_set_status(vdev, vdev->status);
3240 if (k->vmstate_change) {
3241 k->vmstate_change(qbus->parent, backend_run);
3244 if (!backend_run) {
3245 virtio_set_status(vdev, vdev->status);
3249 void virtio_instance_init_common(Object *proxy_obj, void *data,
3250 size_t vdev_size, const char *vdev_name)
3252 DeviceState *vdev = data;
3254 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3255 vdev_size, vdev_name, &error_abort,
3256 NULL);
3257 qdev_alias_all_properties(vdev, proxy_obj);
3260 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
3262 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3263 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3264 int i;
3265 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3267 if (nvectors) {
3268 vdev->vector_queues =
3269 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3272 vdev->start_on_kick = false;
3273 vdev->started = false;
3274 vdev->vhost_started = false;
3275 vdev->device_id = device_id;
3276 vdev->status = 0;
3277 qatomic_set(&vdev->isr, 0);
3278 vdev->queue_sel = 0;
3279 vdev->config_vector = VIRTIO_NO_VECTOR;
3280 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3281 vdev->vm_running = runstate_is_running();
3282 vdev->broken = false;
3283 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3284 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3285 vdev->vq[i].vdev = vdev;
3286 vdev->vq[i].queue_index = i;
3287 vdev->vq[i].host_notifier_enabled = false;
3290 vdev->name = virtio_id_to_name(device_id);
3291 vdev->config_len = config_size;
3292 if (vdev->config_len) {
3293 vdev->config = g_malloc0(config_size);
3294 } else {
3295 vdev->config = NULL;
3297 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3298 virtio_vmstate_change, vdev);
3299 vdev->device_endian = virtio_default_endian();
3300 vdev->use_guest_notifier_mask = true;
3304 * Only devices that have already been around prior to defining the virtio
3305 * standard support legacy mode; this includes devices not specified in the
3306 * standard. All newer devices conform to the virtio standard only.
3308 bool virtio_legacy_allowed(VirtIODevice *vdev)
3310 switch (vdev->device_id) {
3311 case VIRTIO_ID_NET:
3312 case VIRTIO_ID_BLOCK:
3313 case VIRTIO_ID_CONSOLE:
3314 case VIRTIO_ID_RNG:
3315 case VIRTIO_ID_BALLOON:
3316 case VIRTIO_ID_RPMSG:
3317 case VIRTIO_ID_SCSI:
3318 case VIRTIO_ID_9P:
3319 case VIRTIO_ID_RPROC_SERIAL:
3320 case VIRTIO_ID_CAIF:
3321 return true;
3322 default:
3323 return false;
3327 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3329 return vdev->disable_legacy_check;
3332 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3334 return vdev->vq[n].vring.desc;
3337 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3339 return virtio_queue_get_desc_addr(vdev, n) != 0;
3342 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3344 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3345 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3347 if (k->queue_enabled) {
3348 return k->queue_enabled(qbus->parent, n);
3350 return virtio_queue_enabled_legacy(vdev, n);
3353 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3355 return vdev->vq[n].vring.avail;
3358 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3360 return vdev->vq[n].vring.used;
3363 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3365 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3368 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3370 int s;
3372 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3373 return sizeof(struct VRingPackedDescEvent);
3376 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3377 return offsetof(VRingAvail, ring) +
3378 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3381 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3383 int s;
3385 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3386 return sizeof(struct VRingPackedDescEvent);
3389 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3390 return offsetof(VRingUsed, ring) +
3391 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3394 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3395 int n)
3397 unsigned int avail, used;
3399 avail = vdev->vq[n].last_avail_idx;
3400 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3402 used = vdev->vq[n].used_idx;
3403 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3405 return avail | used << 16;
3408 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3409 int n)
3411 return vdev->vq[n].last_avail_idx;
3414 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3416 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3417 return virtio_queue_packed_get_last_avail_idx(vdev, n);
3418 } else {
3419 return virtio_queue_split_get_last_avail_idx(vdev, n);
3423 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3424 int n, unsigned int idx)
3426 struct VirtQueue *vq = &vdev->vq[n];
3428 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3429 vq->last_avail_wrap_counter =
3430 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3431 idx >>= 16;
3432 vq->used_idx = idx & 0x7ffff;
3433 vq->used_wrap_counter = !!(idx & 0x8000);
3436 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3437 int n, unsigned int idx)
3439 vdev->vq[n].last_avail_idx = idx;
3440 vdev->vq[n].shadow_avail_idx = idx;
3443 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3444 unsigned int idx)
3446 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3447 virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3448 } else {
3449 virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3453 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3454 int n)
3456 /* We don't have a reference like avail idx in shared memory */
3457 return;
3460 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3461 int n)
3463 RCU_READ_LOCK_GUARD();
3464 if (vdev->vq[n].vring.desc) {
3465 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3466 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3470 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3472 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3473 virtio_queue_packed_restore_last_avail_idx(vdev, n);
3474 } else {
3475 virtio_queue_split_restore_last_avail_idx(vdev, n);
3479 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3481 /* used idx was updated through set_last_avail_idx() */
3482 return;
3485 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3487 RCU_READ_LOCK_GUARD();
3488 if (vdev->vq[n].vring.desc) {
3489 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3493 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3495 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3496 return virtio_queue_packed_update_used_idx(vdev, n);
3497 } else {
3498 return virtio_split_packed_update_used_idx(vdev, n);
3502 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3504 vdev->vq[n].signalled_used_valid = false;
3507 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3509 return vdev->vq + n;
3512 uint16_t virtio_get_queue_index(VirtQueue *vq)
3514 return vq->queue_index;
3517 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3519 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3520 if (event_notifier_test_and_clear(n)) {
3521 virtio_irq(vq);
3525 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3526 bool with_irqfd)
3528 if (assign && !with_irqfd) {
3529 event_notifier_set_handler(&vq->guest_notifier,
3530 virtio_queue_guest_notifier_read);
3531 } else {
3532 event_notifier_set_handler(&vq->guest_notifier, NULL);
3534 if (!assign) {
3535 /* Test and clear notifier before closing it,
3536 * in case poll callback didn't have time to run. */
3537 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3541 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3543 return &vq->guest_notifier;
3546 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3548 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3550 virtio_queue_set_notification(vq, 0);
3553 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3555 EventNotifier *n = opaque;
3556 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3558 return vq->vring.desc && !virtio_queue_empty(vq);
3561 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
3563 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3565 virtio_queue_notify_vq(vq);
3568 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3570 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3572 /* Caller polls once more after this to catch requests that race with us */
3573 virtio_queue_set_notification(vq, 1);
3576 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
3578 aio_set_event_notifier(ctx, &vq->host_notifier, true,
3579 virtio_queue_host_notifier_read,
3580 virtio_queue_host_notifier_aio_poll,
3581 virtio_queue_host_notifier_aio_poll_ready);
3582 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3583 virtio_queue_host_notifier_aio_poll_begin,
3584 virtio_queue_host_notifier_aio_poll_end);
3588 * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3589 * this for rx virtqueues and similar cases where the virtqueue handler
3590 * function does not pop all elements. When the virtqueue is left non-empty
3591 * polling consumes CPU cycles and should not be used.
3593 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3595 aio_set_event_notifier(ctx, &vq->host_notifier, true,
3596 virtio_queue_host_notifier_read,
3597 NULL, NULL);
3600 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
3602 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
3603 /* Test and clear notifier before after disabling event,
3604 * in case poll callback didn't have time to run. */
3605 virtio_queue_host_notifier_read(&vq->host_notifier);
3608 void virtio_queue_host_notifier_read(EventNotifier *n)
3610 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3611 if (event_notifier_test_and_clear(n)) {
3612 virtio_queue_notify_vq(vq);
3616 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3618 return &vq->host_notifier;
3621 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3623 vq->host_notifier_enabled = enabled;
3626 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3627 MemoryRegion *mr, bool assign)
3629 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3630 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3632 if (k->set_host_notifier_mr) {
3633 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3636 return -1;
3639 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3641 g_free(vdev->bus_name);
3642 vdev->bus_name = g_strdup(bus_name);
3645 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3647 va_list ap;
3649 va_start(ap, fmt);
3650 error_vreport(fmt, ap);
3651 va_end(ap);
3653 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3654 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3655 virtio_notify_config(vdev);
3658 vdev->broken = true;
3661 static void virtio_memory_listener_commit(MemoryListener *listener)
3663 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3664 int i;
3666 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3667 if (vdev->vq[i].vring.num == 0) {
3668 break;
3670 virtio_init_region_cache(vdev, i);
3674 static void virtio_device_realize(DeviceState *dev, Error **errp)
3676 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3677 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3678 Error *err = NULL;
3680 /* Devices should either use vmsd or the load/save methods */
3681 assert(!vdc->vmsd || !vdc->load);
3683 if (vdc->realize != NULL) {
3684 vdc->realize(dev, &err);
3685 if (err != NULL) {
3686 error_propagate(errp, err);
3687 return;
3691 virtio_bus_device_plugged(vdev, &err);
3692 if (err != NULL) {
3693 error_propagate(errp, err);
3694 vdc->unrealize(dev);
3695 return;
3698 vdev->listener.commit = virtio_memory_listener_commit;
3699 vdev->listener.name = "virtio";
3700 memory_listener_register(&vdev->listener, vdev->dma_as);
3703 static void virtio_device_unrealize(DeviceState *dev)
3705 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3706 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3708 memory_listener_unregister(&vdev->listener);
3709 virtio_bus_device_unplugged(vdev);
3711 if (vdc->unrealize != NULL) {
3712 vdc->unrealize(dev);
3715 g_free(vdev->bus_name);
3716 vdev->bus_name = NULL;
3719 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3721 int i;
3722 if (!vdev->vq) {
3723 return;
3726 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3727 if (vdev->vq[i].vring.num == 0) {
3728 break;
3730 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3732 g_free(vdev->vq);
3735 static void virtio_device_instance_finalize(Object *obj)
3737 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3739 virtio_device_free_virtqueues(vdev);
3741 g_free(vdev->config);
3742 g_free(vdev->vector_queues);
3745 static Property virtio_properties[] = {
3746 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3747 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3748 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
3749 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3750 disable_legacy_check, false),
3751 DEFINE_PROP_END_OF_LIST(),
3754 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3756 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3757 int i, n, r, err;
3760 * Batch all the host notifiers in a single transaction to avoid
3761 * quadratic time complexity in address_space_update_ioeventfds().
3763 memory_region_transaction_begin();
3764 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3765 VirtQueue *vq = &vdev->vq[n];
3766 if (!virtio_queue_get_num(vdev, n)) {
3767 continue;
3769 r = virtio_bus_set_host_notifier(qbus, n, true);
3770 if (r < 0) {
3771 err = r;
3772 goto assign_error;
3774 event_notifier_set_handler(&vq->host_notifier,
3775 virtio_queue_host_notifier_read);
3778 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3779 /* Kick right away to begin processing requests already in vring */
3780 VirtQueue *vq = &vdev->vq[n];
3781 if (!vq->vring.num) {
3782 continue;
3784 event_notifier_set(&vq->host_notifier);
3786 memory_region_transaction_commit();
3787 return 0;
3789 assign_error:
3790 i = n; /* save n for a second iteration after transaction is committed. */
3791 while (--n >= 0) {
3792 VirtQueue *vq = &vdev->vq[n];
3793 if (!virtio_queue_get_num(vdev, n)) {
3794 continue;
3797 event_notifier_set_handler(&vq->host_notifier, NULL);
3798 r = virtio_bus_set_host_notifier(qbus, n, false);
3799 assert(r >= 0);
3802 * The transaction expects the ioeventfds to be open when it
3803 * commits. Do it now, before the cleanup loop.
3805 memory_region_transaction_commit();
3807 while (--i >= 0) {
3808 if (!virtio_queue_get_num(vdev, i)) {
3809 continue;
3811 virtio_bus_cleanup_host_notifier(qbus, i);
3813 return err;
3816 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3818 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3819 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3821 return virtio_bus_start_ioeventfd(vbus);
3824 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3826 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3827 int n, r;
3830 * Batch all the host notifiers in a single transaction to avoid
3831 * quadratic time complexity in address_space_update_ioeventfds().
3833 memory_region_transaction_begin();
3834 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3835 VirtQueue *vq = &vdev->vq[n];
3837 if (!virtio_queue_get_num(vdev, n)) {
3838 continue;
3840 event_notifier_set_handler(&vq->host_notifier, NULL);
3841 r = virtio_bus_set_host_notifier(qbus, n, false);
3842 assert(r >= 0);
3845 * The transaction expects the ioeventfds to be open when it
3846 * commits. Do it now, before the cleanup loop.
3848 memory_region_transaction_commit();
3850 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3851 if (!virtio_queue_get_num(vdev, n)) {
3852 continue;
3854 virtio_bus_cleanup_host_notifier(qbus, n);
3858 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3860 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3861 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3863 return virtio_bus_grab_ioeventfd(vbus);
3866 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3868 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3869 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3871 virtio_bus_release_ioeventfd(vbus);
3874 static void virtio_device_class_init(ObjectClass *klass, void *data)
3876 /* Set the default value here. */
3877 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3878 DeviceClass *dc = DEVICE_CLASS(klass);
3880 dc->realize = virtio_device_realize;
3881 dc->unrealize = virtio_device_unrealize;
3882 dc->bus_type = TYPE_VIRTIO_BUS;
3883 device_class_set_props(dc, virtio_properties);
3884 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3885 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3887 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3890 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3892 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3893 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3895 return virtio_bus_ioeventfd_enabled(vbus);
3898 static const TypeInfo virtio_device_info = {
3899 .name = TYPE_VIRTIO_DEVICE,
3900 .parent = TYPE_DEVICE,
3901 .instance_size = sizeof(VirtIODevice),
3902 .class_init = virtio_device_class_init,
3903 .instance_finalize = virtio_device_instance_finalize,
3904 .abstract = true,
3905 .class_size = sizeof(VirtioDeviceClass),
3908 static void virtio_register_types(void)
3910 type_register_static(&virtio_device_info);
3913 type_init(virtio_register_types)