4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
18 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30 #include "standard-headers/linux/virtio_ids.h"
33 * The alignment to use between consumer and producer parts of vring.
34 * x86 pagesize again. This is the default, used by transports like PCI
35 * which don't provide a means for the guest to tell the host the alignment.
37 #define VIRTIO_PCI_VRING_ALIGN 4096
39 typedef struct VRingDesc
47 typedef struct VRingPackedDesc
{
54 typedef struct VRingAvail
61 typedef struct VRingUsedElem
67 typedef struct VRingUsed
74 typedef struct VRingMemoryRegionCaches
{
76 MemoryRegionCache desc
;
77 MemoryRegionCache avail
;
78 MemoryRegionCache used
;
79 } VRingMemoryRegionCaches
;
84 unsigned int num_default
;
89 VRingMemoryRegionCaches
*caches
;
92 typedef struct VRingPackedDescEvent
{
95 } VRingPackedDescEvent
;
100 VirtQueueElement
*used_elems
;
102 /* Next head to pop */
103 uint16_t last_avail_idx
;
104 bool last_avail_wrap_counter
;
106 /* Last avail_idx read from VQ. */
107 uint16_t shadow_avail_idx
;
108 bool shadow_avail_wrap_counter
;
111 bool used_wrap_counter
;
113 /* Last used index value we have signalled on */
114 uint16_t signalled_used
;
116 /* Last used index value we have signalled on */
117 bool signalled_used_valid
;
119 /* Notification enabled? */
122 uint16_t queue_index
;
127 VirtIOHandleOutput handle_output
;
129 EventNotifier guest_notifier
;
130 EventNotifier host_notifier
;
131 bool host_notifier_enabled
;
132 QLIST_ENTRY(VirtQueue
) node
;
135 const char *virtio_device_names
[] = {
136 [VIRTIO_ID_NET
] = "virtio-net",
137 [VIRTIO_ID_BLOCK
] = "virtio-blk",
138 [VIRTIO_ID_CONSOLE
] = "virtio-serial",
139 [VIRTIO_ID_RNG
] = "virtio-rng",
140 [VIRTIO_ID_BALLOON
] = "virtio-balloon",
141 [VIRTIO_ID_IOMEM
] = "virtio-iomem",
142 [VIRTIO_ID_RPMSG
] = "virtio-rpmsg",
143 [VIRTIO_ID_SCSI
] = "virtio-scsi",
144 [VIRTIO_ID_9P
] = "virtio-9p",
145 [VIRTIO_ID_MAC80211_WLAN
] = "virtio-mac-wlan",
146 [VIRTIO_ID_RPROC_SERIAL
] = "virtio-rproc-serial",
147 [VIRTIO_ID_CAIF
] = "virtio-caif",
148 [VIRTIO_ID_MEMORY_BALLOON
] = "virtio-mem-balloon",
149 [VIRTIO_ID_GPU
] = "virtio-gpu",
150 [VIRTIO_ID_CLOCK
] = "virtio-clk",
151 [VIRTIO_ID_INPUT
] = "virtio-input",
152 [VIRTIO_ID_VSOCK
] = "vhost-vsock",
153 [VIRTIO_ID_CRYPTO
] = "virtio-crypto",
154 [VIRTIO_ID_SIGNAL_DIST
] = "virtio-signal",
155 [VIRTIO_ID_PSTORE
] = "virtio-pstore",
156 [VIRTIO_ID_IOMMU
] = "virtio-iommu",
157 [VIRTIO_ID_MEM
] = "virtio-mem",
158 [VIRTIO_ID_SOUND
] = "virtio-sound",
159 [VIRTIO_ID_FS
] = "virtio-user-fs",
160 [VIRTIO_ID_PMEM
] = "virtio-pmem",
161 [VIRTIO_ID_RPMB
] = "virtio-rpmb",
162 [VIRTIO_ID_MAC80211_HWSIM
] = "virtio-mac-hwsim",
163 [VIRTIO_ID_VIDEO_ENCODER
] = "virtio-vid-encoder",
164 [VIRTIO_ID_VIDEO_DECODER
] = "virtio-vid-decoder",
165 [VIRTIO_ID_SCMI
] = "virtio-scmi",
166 [VIRTIO_ID_NITRO_SEC_MOD
] = "virtio-nitro-sec-mod",
167 [VIRTIO_ID_I2C_ADAPTER
] = "vhost-user-i2c",
168 [VIRTIO_ID_WATCHDOG
] = "virtio-watchdog",
169 [VIRTIO_ID_CAN
] = "virtio-can",
170 [VIRTIO_ID_DMABUF
] = "virtio-dmabuf",
171 [VIRTIO_ID_PARAM_SERV
] = "virtio-param-serv",
172 [VIRTIO_ID_AUDIO_POLICY
] = "virtio-audio-pol",
173 [VIRTIO_ID_BT
] = "virtio-bluetooth",
174 [VIRTIO_ID_GPIO
] = "virtio-gpio"
177 static const char *virtio_id_to_name(uint16_t device_id
)
179 assert(device_id
< G_N_ELEMENTS(virtio_device_names
));
180 const char *name
= virtio_device_names
[device_id
];
181 assert(name
!= NULL
);
185 /* Called within call_rcu(). */
186 static void virtio_free_region_cache(VRingMemoryRegionCaches
*caches
)
188 assert(caches
!= NULL
);
189 address_space_cache_destroy(&caches
->desc
);
190 address_space_cache_destroy(&caches
->avail
);
191 address_space_cache_destroy(&caches
->used
);
195 static void virtio_virtqueue_reset_region_cache(struct VirtQueue
*vq
)
197 VRingMemoryRegionCaches
*caches
;
199 caches
= qatomic_read(&vq
->vring
.caches
);
200 qatomic_rcu_set(&vq
->vring
.caches
, NULL
);
202 call_rcu(caches
, virtio_free_region_cache
, rcu
);
206 static void virtio_init_region_cache(VirtIODevice
*vdev
, int n
)
208 VirtQueue
*vq
= &vdev
->vq
[n
];
209 VRingMemoryRegionCaches
*old
= vq
->vring
.caches
;
210 VRingMemoryRegionCaches
*new = NULL
;
216 addr
= vq
->vring
.desc
;
220 new = g_new0(VRingMemoryRegionCaches
, 1);
221 size
= virtio_queue_get_desc_size(vdev
, n
);
222 packed
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
224 len
= address_space_cache_init(&new->desc
, vdev
->dma_as
,
227 virtio_error(vdev
, "Cannot map desc");
231 size
= virtio_queue_get_used_size(vdev
, n
);
232 len
= address_space_cache_init(&new->used
, vdev
->dma_as
,
233 vq
->vring
.used
, size
, true);
235 virtio_error(vdev
, "Cannot map used");
239 size
= virtio_queue_get_avail_size(vdev
, n
);
240 len
= address_space_cache_init(&new->avail
, vdev
->dma_as
,
241 vq
->vring
.avail
, size
, false);
243 virtio_error(vdev
, "Cannot map avail");
247 qatomic_rcu_set(&vq
->vring
.caches
, new);
249 call_rcu(old
, virtio_free_region_cache
, rcu
);
254 address_space_cache_destroy(&new->avail
);
256 address_space_cache_destroy(&new->used
);
258 address_space_cache_destroy(&new->desc
);
261 virtio_virtqueue_reset_region_cache(vq
);
264 /* virt queue functions */
265 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
267 VRing
*vring
= &vdev
->vq
[n
].vring
;
269 if (!vring
->num
|| !vring
->desc
|| !vring
->align
) {
270 /* not yet setup -> nothing to do */
273 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
274 vring
->used
= vring_align(vring
->avail
+
275 offsetof(VRingAvail
, ring
[vring
->num
]),
277 virtio_init_region_cache(vdev
, n
);
280 /* Called within rcu_read_lock(). */
281 static void vring_split_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
282 MemoryRegionCache
*cache
, int i
)
284 address_space_read_cached(cache
, i
* sizeof(VRingDesc
),
285 desc
, sizeof(VRingDesc
));
286 virtio_tswap64s(vdev
, &desc
->addr
);
287 virtio_tswap32s(vdev
, &desc
->len
);
288 virtio_tswap16s(vdev
, &desc
->flags
);
289 virtio_tswap16s(vdev
, &desc
->next
);
292 static void vring_packed_event_read(VirtIODevice
*vdev
,
293 MemoryRegionCache
*cache
,
294 VRingPackedDescEvent
*e
)
296 hwaddr off_off
= offsetof(VRingPackedDescEvent
, off_wrap
);
297 hwaddr off_flags
= offsetof(VRingPackedDescEvent
, flags
);
299 e
->flags
= virtio_lduw_phys_cached(vdev
, cache
, off_flags
);
300 /* Make sure flags is seen before off_wrap */
302 e
->off_wrap
= virtio_lduw_phys_cached(vdev
, cache
, off_off
);
303 virtio_tswap16s(vdev
, &e
->flags
);
306 static void vring_packed_off_wrap_write(VirtIODevice
*vdev
,
307 MemoryRegionCache
*cache
,
310 hwaddr off
= offsetof(VRingPackedDescEvent
, off_wrap
);
312 virtio_stw_phys_cached(vdev
, cache
, off
, off_wrap
);
313 address_space_cache_invalidate(cache
, off
, sizeof(off_wrap
));
316 static void vring_packed_flags_write(VirtIODevice
*vdev
,
317 MemoryRegionCache
*cache
, uint16_t flags
)
319 hwaddr off
= offsetof(VRingPackedDescEvent
, flags
);
321 virtio_stw_phys_cached(vdev
, cache
, off
, flags
);
322 address_space_cache_invalidate(cache
, off
, sizeof(flags
));
325 /* Called within rcu_read_lock(). */
326 static VRingMemoryRegionCaches
*vring_get_region_caches(struct VirtQueue
*vq
)
328 return qatomic_rcu_read(&vq
->vring
.caches
);
331 /* Called within rcu_read_lock(). */
332 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
334 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
335 hwaddr pa
= offsetof(VRingAvail
, flags
);
341 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
344 /* Called within rcu_read_lock(). */
345 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
347 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
348 hwaddr pa
= offsetof(VRingAvail
, idx
);
354 vq
->shadow_avail_idx
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
355 return vq
->shadow_avail_idx
;
358 /* Called within rcu_read_lock(). */
359 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
361 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
362 hwaddr pa
= offsetof(VRingAvail
, ring
[i
]);
368 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
371 /* Called within rcu_read_lock(). */
372 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
374 return vring_avail_ring(vq
, vq
->vring
.num
);
377 /* Called within rcu_read_lock(). */
378 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
381 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
382 hwaddr pa
= offsetof(VRingUsed
, ring
[i
]);
388 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
389 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
390 address_space_write_cached(&caches
->used
, pa
, uelem
, sizeof(VRingUsedElem
));
391 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(VRingUsedElem
));
394 /* Called within rcu_read_lock(). */
395 static uint16_t vring_used_idx(VirtQueue
*vq
)
397 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
398 hwaddr pa
= offsetof(VRingUsed
, idx
);
404 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
407 /* Called within rcu_read_lock(). */
408 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
410 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
411 hwaddr pa
= offsetof(VRingUsed
, idx
);
414 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
415 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
421 /* Called within rcu_read_lock(). */
422 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
424 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
425 VirtIODevice
*vdev
= vq
->vdev
;
426 hwaddr pa
= offsetof(VRingUsed
, flags
);
433 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
434 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
| mask
);
435 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
438 /* Called within rcu_read_lock(). */
439 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
441 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
442 VirtIODevice
*vdev
= vq
->vdev
;
443 hwaddr pa
= offsetof(VRingUsed
, flags
);
450 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
451 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
& ~mask
);
452 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
455 /* Called within rcu_read_lock(). */
456 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
458 VRingMemoryRegionCaches
*caches
;
460 if (!vq
->notification
) {
464 caches
= vring_get_region_caches(vq
);
469 pa
= offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
470 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
471 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
474 static void virtio_queue_split_set_notification(VirtQueue
*vq
, int enable
)
476 RCU_READ_LOCK_GUARD();
478 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
479 vring_set_avail_event(vq
, vring_avail_idx(vq
));
481 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
483 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
486 /* Expose avail event/used flags before caller checks the avail idx. */
491 static void virtio_queue_packed_set_notification(VirtQueue
*vq
, int enable
)
494 VRingPackedDescEvent e
;
495 VRingMemoryRegionCaches
*caches
;
497 RCU_READ_LOCK_GUARD();
498 caches
= vring_get_region_caches(vq
);
503 vring_packed_event_read(vq
->vdev
, &caches
->used
, &e
);
506 e
.flags
= VRING_PACKED_EVENT_FLAG_DISABLE
;
507 } else if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
508 off_wrap
= vq
->shadow_avail_idx
| vq
->shadow_avail_wrap_counter
<< 15;
509 vring_packed_off_wrap_write(vq
->vdev
, &caches
->used
, off_wrap
);
510 /* Make sure off_wrap is wrote before flags */
512 e
.flags
= VRING_PACKED_EVENT_FLAG_DESC
;
514 e
.flags
= VRING_PACKED_EVENT_FLAG_ENABLE
;
517 vring_packed_flags_write(vq
->vdev
, &caches
->used
, e
.flags
);
519 /* Expose avail event/used flags before caller checks the avail idx. */
524 bool virtio_queue_get_notification(VirtQueue
*vq
)
526 return vq
->notification
;
529 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
531 vq
->notification
= enable
;
533 if (!vq
->vring
.desc
) {
537 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
538 virtio_queue_packed_set_notification(vq
, enable
);
540 virtio_queue_split_set_notification(vq
, enable
);
544 int virtio_queue_ready(VirtQueue
*vq
)
546 return vq
->vring
.avail
!= 0;
549 static void vring_packed_desc_read_flags(VirtIODevice
*vdev
,
551 MemoryRegionCache
*cache
,
554 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
556 *flags
= virtio_lduw_phys_cached(vdev
, cache
, off
);
559 static void vring_packed_desc_read(VirtIODevice
*vdev
,
560 VRingPackedDesc
*desc
,
561 MemoryRegionCache
*cache
,
562 int i
, bool strict_order
)
564 hwaddr off
= i
* sizeof(VRingPackedDesc
);
566 vring_packed_desc_read_flags(vdev
, &desc
->flags
, cache
, i
);
569 /* Make sure flags is read before the rest fields. */
573 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, addr
),
574 &desc
->addr
, sizeof(desc
->addr
));
575 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, id
),
576 &desc
->id
, sizeof(desc
->id
));
577 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, len
),
578 &desc
->len
, sizeof(desc
->len
));
579 virtio_tswap64s(vdev
, &desc
->addr
);
580 virtio_tswap16s(vdev
, &desc
->id
);
581 virtio_tswap32s(vdev
, &desc
->len
);
584 static void vring_packed_desc_write_data(VirtIODevice
*vdev
,
585 VRingPackedDesc
*desc
,
586 MemoryRegionCache
*cache
,
589 hwaddr off_id
= i
* sizeof(VRingPackedDesc
) +
590 offsetof(VRingPackedDesc
, id
);
591 hwaddr off_len
= i
* sizeof(VRingPackedDesc
) +
592 offsetof(VRingPackedDesc
, len
);
594 virtio_tswap32s(vdev
, &desc
->len
);
595 virtio_tswap16s(vdev
, &desc
->id
);
596 address_space_write_cached(cache
, off_id
, &desc
->id
, sizeof(desc
->id
));
597 address_space_cache_invalidate(cache
, off_id
, sizeof(desc
->id
));
598 address_space_write_cached(cache
, off_len
, &desc
->len
, sizeof(desc
->len
));
599 address_space_cache_invalidate(cache
, off_len
, sizeof(desc
->len
));
602 static void vring_packed_desc_write_flags(VirtIODevice
*vdev
,
603 VRingPackedDesc
*desc
,
604 MemoryRegionCache
*cache
,
607 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
609 virtio_stw_phys_cached(vdev
, cache
, off
, desc
->flags
);
610 address_space_cache_invalidate(cache
, off
, sizeof(desc
->flags
));
613 static void vring_packed_desc_write(VirtIODevice
*vdev
,
614 VRingPackedDesc
*desc
,
615 MemoryRegionCache
*cache
,
616 int i
, bool strict_order
)
618 vring_packed_desc_write_data(vdev
, desc
, cache
, i
);
620 /* Make sure data is wrote before flags. */
623 vring_packed_desc_write_flags(vdev
, desc
, cache
, i
);
626 static inline bool is_desc_avail(uint16_t flags
, bool wrap_counter
)
630 avail
= !!(flags
& (1 << VRING_PACKED_DESC_F_AVAIL
));
631 used
= !!(flags
& (1 << VRING_PACKED_DESC_F_USED
));
632 return (avail
!= used
) && (avail
== wrap_counter
);
635 /* Fetch avail_idx from VQ memory only when we really need to know if
636 * guest has added some buffers.
637 * Called within rcu_read_lock(). */
638 static int virtio_queue_empty_rcu(VirtQueue
*vq
)
640 if (virtio_device_disabled(vq
->vdev
)) {
644 if (unlikely(!vq
->vring
.avail
)) {
648 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
652 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
655 static int virtio_queue_split_empty(VirtQueue
*vq
)
659 if (virtio_device_disabled(vq
->vdev
)) {
663 if (unlikely(!vq
->vring
.avail
)) {
667 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
671 RCU_READ_LOCK_GUARD();
672 empty
= vring_avail_idx(vq
) == vq
->last_avail_idx
;
676 /* Called within rcu_read_lock(). */
677 static int virtio_queue_packed_empty_rcu(VirtQueue
*vq
)
679 struct VRingPackedDesc desc
;
680 VRingMemoryRegionCaches
*cache
;
682 if (unlikely(!vq
->vring
.desc
)) {
686 cache
= vring_get_region_caches(vq
);
691 vring_packed_desc_read_flags(vq
->vdev
, &desc
.flags
, &cache
->desc
,
694 return !is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
);
697 static int virtio_queue_packed_empty(VirtQueue
*vq
)
699 RCU_READ_LOCK_GUARD();
700 return virtio_queue_packed_empty_rcu(vq
);
703 int virtio_queue_empty(VirtQueue
*vq
)
705 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
706 return virtio_queue_packed_empty(vq
);
708 return virtio_queue_split_empty(vq
);
712 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
715 AddressSpace
*dma_as
= vq
->vdev
->dma_as
;
720 for (i
= 0; i
< elem
->in_num
; i
++) {
721 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
723 dma_memory_unmap(dma_as
, elem
->in_sg
[i
].iov_base
,
724 elem
->in_sg
[i
].iov_len
,
725 DMA_DIRECTION_FROM_DEVICE
, size
);
730 for (i
= 0; i
< elem
->out_num
; i
++)
731 dma_memory_unmap(dma_as
, elem
->out_sg
[i
].iov_base
,
732 elem
->out_sg
[i
].iov_len
,
733 DMA_DIRECTION_TO_DEVICE
,
734 elem
->out_sg
[i
].iov_len
);
737 /* virtqueue_detach_element:
738 * @vq: The #VirtQueue
739 * @elem: The #VirtQueueElement
740 * @len: number of bytes written
742 * Detach the element from the virtqueue. This function is suitable for device
743 * reset or other situations where a #VirtQueueElement is simply freed and will
744 * not be pushed or discarded.
746 void virtqueue_detach_element(VirtQueue
*vq
, const VirtQueueElement
*elem
,
749 vq
->inuse
-= elem
->ndescs
;
750 virtqueue_unmap_sg(vq
, elem
, len
);
753 static void virtqueue_split_rewind(VirtQueue
*vq
, unsigned int num
)
755 vq
->last_avail_idx
-= num
;
758 static void virtqueue_packed_rewind(VirtQueue
*vq
, unsigned int num
)
760 if (vq
->last_avail_idx
< num
) {
761 vq
->last_avail_idx
= vq
->vring
.num
+ vq
->last_avail_idx
- num
;
762 vq
->last_avail_wrap_counter
^= 1;
764 vq
->last_avail_idx
-= num
;
769 * @vq: The #VirtQueue
770 * @elem: The #VirtQueueElement
771 * @len: number of bytes written
773 * Pretend the most recent element wasn't popped from the virtqueue. The next
774 * call to virtqueue_pop() will refetch the element.
776 void virtqueue_unpop(VirtQueue
*vq
, const VirtQueueElement
*elem
,
780 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
781 virtqueue_packed_rewind(vq
, 1);
783 virtqueue_split_rewind(vq
, 1);
786 virtqueue_detach_element(vq
, elem
, len
);
790 * @vq: The #VirtQueue
791 * @num: Number of elements to push back
793 * Pretend that elements weren't popped from the virtqueue. The next
794 * virtqueue_pop() will refetch the oldest element.
796 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
798 * Returns: true on success, false if @num is greater than the number of in use
801 bool virtqueue_rewind(VirtQueue
*vq
, unsigned int num
)
803 if (num
> vq
->inuse
) {
808 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
809 virtqueue_packed_rewind(vq
, num
);
811 virtqueue_split_rewind(vq
, num
);
816 static void virtqueue_split_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
817 unsigned int len
, unsigned int idx
)
821 if (unlikely(!vq
->vring
.used
)) {
825 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
827 uelem
.id
= elem
->index
;
829 vring_used_write(vq
, &uelem
, idx
);
832 static void virtqueue_packed_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
833 unsigned int len
, unsigned int idx
)
835 vq
->used_elems
[idx
].index
= elem
->index
;
836 vq
->used_elems
[idx
].len
= len
;
837 vq
->used_elems
[idx
].ndescs
= elem
->ndescs
;
840 static void virtqueue_packed_fill_desc(VirtQueue
*vq
,
841 const VirtQueueElement
*elem
,
846 VRingMemoryRegionCaches
*caches
;
847 VRingPackedDesc desc
= {
851 bool wrap_counter
= vq
->used_wrap_counter
;
853 if (unlikely(!vq
->vring
.desc
)) {
857 head
= vq
->used_idx
+ idx
;
858 if (head
>= vq
->vring
.num
) {
859 head
-= vq
->vring
.num
;
863 desc
.flags
|= (1 << VRING_PACKED_DESC_F_AVAIL
);
864 desc
.flags
|= (1 << VRING_PACKED_DESC_F_USED
);
866 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_AVAIL
);
867 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_USED
);
870 caches
= vring_get_region_caches(vq
);
875 vring_packed_desc_write(vq
->vdev
, &desc
, &caches
->desc
, head
, strict_order
);
878 /* Called within rcu_read_lock(). */
879 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
880 unsigned int len
, unsigned int idx
)
882 trace_virtqueue_fill(vq
, elem
, len
, idx
);
884 virtqueue_unmap_sg(vq
, elem
, len
);
886 if (virtio_device_disabled(vq
->vdev
)) {
890 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
891 virtqueue_packed_fill(vq
, elem
, len
, idx
);
893 virtqueue_split_fill(vq
, elem
, len
, idx
);
897 /* Called within rcu_read_lock(). */
898 static void virtqueue_split_flush(VirtQueue
*vq
, unsigned int count
)
902 if (unlikely(!vq
->vring
.used
)) {
906 /* Make sure buffer is written before we update index. */
908 trace_virtqueue_flush(vq
, count
);
911 vring_used_idx_set(vq
, new);
913 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
914 vq
->signalled_used_valid
= false;
917 static void virtqueue_packed_flush(VirtQueue
*vq
, unsigned int count
)
919 unsigned int i
, ndescs
= 0;
921 if (unlikely(!vq
->vring
.desc
)) {
925 for (i
= 1; i
< count
; i
++) {
926 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[i
], i
, false);
927 ndescs
+= vq
->used_elems
[i
].ndescs
;
929 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[0], 0, true);
930 ndescs
+= vq
->used_elems
[0].ndescs
;
933 vq
->used_idx
+= ndescs
;
934 if (vq
->used_idx
>= vq
->vring
.num
) {
935 vq
->used_idx
-= vq
->vring
.num
;
936 vq
->used_wrap_counter
^= 1;
937 vq
->signalled_used_valid
= false;
941 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
943 if (virtio_device_disabled(vq
->vdev
)) {
948 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
949 virtqueue_packed_flush(vq
, count
);
951 virtqueue_split_flush(vq
, count
);
955 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
958 RCU_READ_LOCK_GUARD();
959 virtqueue_fill(vq
, elem
, len
, 0);
960 virtqueue_flush(vq
, 1);
963 /* Called within rcu_read_lock(). */
964 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
966 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
968 /* Check it isn't doing very strange things with descriptor numbers. */
969 if (num_heads
> vq
->vring
.num
) {
970 virtio_error(vq
->vdev
, "Guest moved used index from %u to %u",
971 idx
, vq
->shadow_avail_idx
);
974 /* On success, callers read a descriptor at vq->last_avail_idx.
975 * Make sure descriptor read does not bypass avail index read. */
983 /* Called within rcu_read_lock(). */
984 static bool virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
,
987 /* Grab the next descriptor number they're advertising, and increment
988 * the index we've seen. */
989 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
991 /* If their number is silly, that's a fatal mistake. */
992 if (*head
>= vq
->vring
.num
) {
993 virtio_error(vq
->vdev
, "Guest says index %u is available", *head
);
1001 VIRTQUEUE_READ_DESC_ERROR
= -1,
1002 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
1003 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
1006 static int virtqueue_split_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
1007 MemoryRegionCache
*desc_cache
,
1008 unsigned int max
, unsigned int *next
)
1010 /* If this descriptor says it doesn't chain, we're done. */
1011 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
1012 return VIRTQUEUE_READ_DESC_DONE
;
1015 /* Check they're not leading us off end of descriptors. */
1017 /* Make sure compiler knows to grab that: we don't want it changing! */
1021 virtio_error(vdev
, "Desc next is %u", *next
);
1022 return VIRTQUEUE_READ_DESC_ERROR
;
1025 vring_split_desc_read(vdev
, desc
, desc_cache
, *next
);
1026 return VIRTQUEUE_READ_DESC_MORE
;
1029 /* Called within rcu_read_lock(). */
1030 static void virtqueue_split_get_avail_bytes(VirtQueue
*vq
,
1031 unsigned int *in_bytes
, unsigned int *out_bytes
,
1032 unsigned max_in_bytes
, unsigned max_out_bytes
,
1033 VRingMemoryRegionCaches
*caches
)
1035 VirtIODevice
*vdev
= vq
->vdev
;
1036 unsigned int max
, idx
;
1037 unsigned int total_bufs
, in_total
, out_total
;
1038 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1042 idx
= vq
->last_avail_idx
;
1043 total_bufs
= in_total
= out_total
= 0;
1045 max
= vq
->vring
.num
;
1047 while ((rc
= virtqueue_num_heads(vq
, idx
)) > 0) {
1048 MemoryRegionCache
*desc_cache
= &caches
->desc
;
1049 unsigned int num_bufs
;
1053 num_bufs
= total_bufs
;
1055 if (!virtqueue_get_head(vq
, idx
++, &i
)) {
1059 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1061 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1062 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1063 virtio_error(vdev
, "Invalid size for indirect buffer table");
1067 /* If we've got too many, that implies a descriptor loop. */
1068 if (num_bufs
>= max
) {
1069 virtio_error(vdev
, "Looped descriptor");
1073 /* loop over the indirect descriptor table */
1074 len
= address_space_cache_init(&indirect_desc_cache
,
1076 desc
.addr
, desc
.len
, false);
1077 desc_cache
= &indirect_desc_cache
;
1078 if (len
< desc
.len
) {
1079 virtio_error(vdev
, "Cannot map indirect buffer");
1083 max
= desc
.len
/ sizeof(VRingDesc
);
1085 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1089 /* If we've got too many, that implies a descriptor loop. */
1090 if (++num_bufs
> max
) {
1091 virtio_error(vdev
, "Looped descriptor");
1095 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1096 in_total
+= desc
.len
;
1098 out_total
+= desc
.len
;
1100 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1104 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
1105 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1107 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1111 if (desc_cache
== &indirect_desc_cache
) {
1112 address_space_cache_destroy(&indirect_desc_cache
);
1115 total_bufs
= num_bufs
;
1124 address_space_cache_destroy(&indirect_desc_cache
);
1126 *in_bytes
= in_total
;
1129 *out_bytes
= out_total
;
1134 in_total
= out_total
= 0;
1138 static int virtqueue_packed_read_next_desc(VirtQueue
*vq
,
1139 VRingPackedDesc
*desc
,
1146 /* If this descriptor says it doesn't chain, we're done. */
1147 if (!indirect
&& !(desc
->flags
& VRING_DESC_F_NEXT
)) {
1148 return VIRTQUEUE_READ_DESC_DONE
;
1154 return VIRTQUEUE_READ_DESC_DONE
;
1156 (*next
) -= vq
->vring
.num
;
1160 vring_packed_desc_read(vq
->vdev
, desc
, desc_cache
, *next
, false);
1161 return VIRTQUEUE_READ_DESC_MORE
;
1164 /* Called within rcu_read_lock(). */
1165 static void virtqueue_packed_get_avail_bytes(VirtQueue
*vq
,
1166 unsigned int *in_bytes
,
1167 unsigned int *out_bytes
,
1168 unsigned max_in_bytes
,
1169 unsigned max_out_bytes
,
1170 VRingMemoryRegionCaches
*caches
)
1172 VirtIODevice
*vdev
= vq
->vdev
;
1173 unsigned int max
, idx
;
1174 unsigned int total_bufs
, in_total
, out_total
;
1175 MemoryRegionCache
*desc_cache
;
1176 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1178 VRingPackedDesc desc
;
1181 idx
= vq
->last_avail_idx
;
1182 wrap_counter
= vq
->last_avail_wrap_counter
;
1183 total_bufs
= in_total
= out_total
= 0;
1185 max
= vq
->vring
.num
;
1188 unsigned int num_bufs
= total_bufs
;
1189 unsigned int i
= idx
;
1192 desc_cache
= &caches
->desc
;
1193 vring_packed_desc_read(vdev
, &desc
, desc_cache
, idx
, true);
1194 if (!is_desc_avail(desc
.flags
, wrap_counter
)) {
1198 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1199 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1200 virtio_error(vdev
, "Invalid size for indirect buffer table");
1204 /* If we've got too many, that implies a descriptor loop. */
1205 if (num_bufs
>= max
) {
1206 virtio_error(vdev
, "Looped descriptor");
1210 /* loop over the indirect descriptor table */
1211 len
= address_space_cache_init(&indirect_desc_cache
,
1213 desc
.addr
, desc
.len
, false);
1214 desc_cache
= &indirect_desc_cache
;
1215 if (len
< desc
.len
) {
1216 virtio_error(vdev
, "Cannot map indirect buffer");
1220 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1222 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1226 /* If we've got too many, that implies a descriptor loop. */
1227 if (++num_bufs
> max
) {
1228 virtio_error(vdev
, "Looped descriptor");
1232 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1233 in_total
+= desc
.len
;
1235 out_total
+= desc
.len
;
1237 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1241 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
,
1243 &indirect_desc_cache
);
1244 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1246 if (desc_cache
== &indirect_desc_cache
) {
1247 address_space_cache_destroy(&indirect_desc_cache
);
1251 idx
+= num_bufs
- total_bufs
;
1252 total_bufs
= num_bufs
;
1255 if (idx
>= vq
->vring
.num
) {
1256 idx
-= vq
->vring
.num
;
1261 /* Record the index and wrap counter for a kick we want */
1262 vq
->shadow_avail_idx
= idx
;
1263 vq
->shadow_avail_wrap_counter
= wrap_counter
;
1265 address_space_cache_destroy(&indirect_desc_cache
);
1267 *in_bytes
= in_total
;
1270 *out_bytes
= out_total
;
1275 in_total
= out_total
= 0;
1279 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
1280 unsigned int *out_bytes
,
1281 unsigned max_in_bytes
, unsigned max_out_bytes
)
1284 VRingMemoryRegionCaches
*caches
;
1286 RCU_READ_LOCK_GUARD();
1288 if (unlikely(!vq
->vring
.desc
)) {
1292 caches
= vring_get_region_caches(vq
);
1297 desc_size
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
1298 sizeof(VRingPackedDesc
) : sizeof(VRingDesc
);
1299 if (caches
->desc
.len
< vq
->vring
.num
* desc_size
) {
1300 virtio_error(vq
->vdev
, "Cannot map descriptor ring");
1304 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1305 virtqueue_packed_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1306 max_in_bytes
, max_out_bytes
,
1309 virtqueue_split_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1310 max_in_bytes
, max_out_bytes
,
1324 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
1325 unsigned int out_bytes
)
1327 unsigned int in_total
, out_total
;
1329 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
1330 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1333 static bool virtqueue_map_desc(VirtIODevice
*vdev
, unsigned int *p_num_sg
,
1334 hwaddr
*addr
, struct iovec
*iov
,
1335 unsigned int max_num_sg
, bool is_write
,
1336 hwaddr pa
, size_t sz
)
1339 unsigned num_sg
= *p_num_sg
;
1340 assert(num_sg
<= max_num_sg
);
1343 virtio_error(vdev
, "virtio: zero sized buffers are not allowed");
1350 if (num_sg
== max_num_sg
) {
1351 virtio_error(vdev
, "virtio: too many write descriptors in "
1356 iov
[num_sg
].iov_base
= dma_memory_map(vdev
->dma_as
, pa
, &len
,
1358 DMA_DIRECTION_FROM_DEVICE
:
1359 DMA_DIRECTION_TO_DEVICE
,
1360 MEMTXATTRS_UNSPECIFIED
);
1361 if (!iov
[num_sg
].iov_base
) {
1362 virtio_error(vdev
, "virtio: bogus descriptor or out of resources");
1366 iov
[num_sg
].iov_len
= len
;
1380 /* Only used by error code paths before we have a VirtQueueElement (therefore
1381 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1384 static void virtqueue_undo_map_desc(unsigned int out_num
, unsigned int in_num
,
1389 for (i
= 0; i
< out_num
+ in_num
; i
++) {
1390 int is_write
= i
>= out_num
;
1392 cpu_physical_memory_unmap(iov
->iov_base
, iov
->iov_len
, is_write
, 0);
1397 static void virtqueue_map_iovec(VirtIODevice
*vdev
, struct iovec
*sg
,
1398 hwaddr
*addr
, unsigned int num_sg
,
1404 for (i
= 0; i
< num_sg
; i
++) {
1405 len
= sg
[i
].iov_len
;
1406 sg
[i
].iov_base
= dma_memory_map(vdev
->dma_as
,
1407 addr
[i
], &len
, is_write
?
1408 DMA_DIRECTION_FROM_DEVICE
:
1409 DMA_DIRECTION_TO_DEVICE
,
1410 MEMTXATTRS_UNSPECIFIED
);
1411 if (!sg
[i
].iov_base
) {
1412 error_report("virtio: error trying to map MMIO memory");
1415 if (len
!= sg
[i
].iov_len
) {
1416 error_report("virtio: unexpected memory split");
1422 void virtqueue_map(VirtIODevice
*vdev
, VirtQueueElement
*elem
)
1424 virtqueue_map_iovec(vdev
, elem
->in_sg
, elem
->in_addr
, elem
->in_num
, true);
1425 virtqueue_map_iovec(vdev
, elem
->out_sg
, elem
->out_addr
, elem
->out_num
,
1429 static void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
1431 VirtQueueElement
*elem
;
1432 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
1433 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
1434 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
1435 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
1436 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
1437 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
1439 assert(sz
>= sizeof(VirtQueueElement
));
1440 elem
= g_malloc(out_sg_end
);
1441 trace_virtqueue_alloc_element(elem
, sz
, in_num
, out_num
);
1442 elem
->out_num
= out_num
;
1443 elem
->in_num
= in_num
;
1444 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
1445 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
1446 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
1447 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
1451 static void *virtqueue_split_pop(VirtQueue
*vq
, size_t sz
)
1453 unsigned int i
, head
, max
;
1454 VRingMemoryRegionCaches
*caches
;
1455 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1456 MemoryRegionCache
*desc_cache
;
1458 VirtIODevice
*vdev
= vq
->vdev
;
1459 VirtQueueElement
*elem
= NULL
;
1460 unsigned out_num
, in_num
, elem_entries
;
1461 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1462 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1466 RCU_READ_LOCK_GUARD();
1467 if (virtio_queue_empty_rcu(vq
)) {
1470 /* Needed after virtio_queue_empty(), see comment in
1471 * virtqueue_num_heads(). */
1474 /* When we start there are none of either input nor output. */
1475 out_num
= in_num
= elem_entries
= 0;
1477 max
= vq
->vring
.num
;
1479 if (vq
->inuse
>= vq
->vring
.num
) {
1480 virtio_error(vdev
, "Virtqueue size exceeded");
1484 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
++, &head
)) {
1488 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1489 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1494 caches
= vring_get_region_caches(vq
);
1496 virtio_error(vdev
, "Region caches not initialized");
1500 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1501 virtio_error(vdev
, "Cannot map descriptor ring");
1505 desc_cache
= &caches
->desc
;
1506 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1507 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1508 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1509 virtio_error(vdev
, "Invalid size for indirect buffer table");
1513 /* loop over the indirect descriptor table */
1514 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1515 desc
.addr
, desc
.len
, false);
1516 desc_cache
= &indirect_desc_cache
;
1517 if (len
< desc
.len
) {
1518 virtio_error(vdev
, "Cannot map indirect buffer");
1522 max
= desc
.len
/ sizeof(VRingDesc
);
1524 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1527 /* Collect all the descriptors */
1531 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1532 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1534 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1535 desc
.addr
, desc
.len
);
1538 virtio_error(vdev
, "Incorrect order for descriptors");
1541 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1542 VIRTQUEUE_MAX_SIZE
, false,
1543 desc
.addr
, desc
.len
);
1549 /* If we've got too many, that implies a descriptor loop. */
1550 if (++elem_entries
> max
) {
1551 virtio_error(vdev
, "Looped descriptor");
1555 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
1556 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1558 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1562 /* Now copy what we have collected and mapped */
1563 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1566 for (i
= 0; i
< out_num
; i
++) {
1567 elem
->out_addr
[i
] = addr
[i
];
1568 elem
->out_sg
[i
] = iov
[i
];
1570 for (i
= 0; i
< in_num
; i
++) {
1571 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1572 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1577 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1579 address_space_cache_destroy(&indirect_desc_cache
);
1584 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1588 static void *virtqueue_packed_pop(VirtQueue
*vq
, size_t sz
)
1590 unsigned int i
, max
;
1591 VRingMemoryRegionCaches
*caches
;
1592 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1593 MemoryRegionCache
*desc_cache
;
1595 VirtIODevice
*vdev
= vq
->vdev
;
1596 VirtQueueElement
*elem
= NULL
;
1597 unsigned out_num
, in_num
, elem_entries
;
1598 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1599 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1600 VRingPackedDesc desc
;
1604 RCU_READ_LOCK_GUARD();
1605 if (virtio_queue_packed_empty_rcu(vq
)) {
1609 /* When we start there are none of either input nor output. */
1610 out_num
= in_num
= elem_entries
= 0;
1612 max
= vq
->vring
.num
;
1614 if (vq
->inuse
>= vq
->vring
.num
) {
1615 virtio_error(vdev
, "Virtqueue size exceeded");
1619 i
= vq
->last_avail_idx
;
1621 caches
= vring_get_region_caches(vq
);
1623 virtio_error(vdev
, "Region caches not initialized");
1627 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1628 virtio_error(vdev
, "Cannot map descriptor ring");
1632 desc_cache
= &caches
->desc
;
1633 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, true);
1635 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1636 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1637 virtio_error(vdev
, "Invalid size for indirect buffer table");
1641 /* loop over the indirect descriptor table */
1642 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1643 desc
.addr
, desc
.len
, false);
1644 desc_cache
= &indirect_desc_cache
;
1645 if (len
< desc
.len
) {
1646 virtio_error(vdev
, "Cannot map indirect buffer");
1650 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1652 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1655 /* Collect all the descriptors */
1659 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1660 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1662 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1663 desc
.addr
, desc
.len
);
1666 virtio_error(vdev
, "Incorrect order for descriptors");
1669 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1670 VIRTQUEUE_MAX_SIZE
, false,
1671 desc
.addr
, desc
.len
);
1677 /* If we've got too many, that implies a descriptor loop. */
1678 if (++elem_entries
> max
) {
1679 virtio_error(vdev
, "Looped descriptor");
1683 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
, &i
,
1685 &indirect_desc_cache
);
1686 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1688 /* Now copy what we have collected and mapped */
1689 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1690 for (i
= 0; i
< out_num
; i
++) {
1691 elem
->out_addr
[i
] = addr
[i
];
1692 elem
->out_sg
[i
] = iov
[i
];
1694 for (i
= 0; i
< in_num
; i
++) {
1695 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1696 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1700 elem
->ndescs
= (desc_cache
== &indirect_desc_cache
) ? 1 : elem_entries
;
1701 vq
->last_avail_idx
+= elem
->ndescs
;
1702 vq
->inuse
+= elem
->ndescs
;
1704 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1705 vq
->last_avail_idx
-= vq
->vring
.num
;
1706 vq
->last_avail_wrap_counter
^= 1;
1709 vq
->shadow_avail_idx
= vq
->last_avail_idx
;
1710 vq
->shadow_avail_wrap_counter
= vq
->last_avail_wrap_counter
;
1712 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1714 address_space_cache_destroy(&indirect_desc_cache
);
1719 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1723 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
1725 if (virtio_device_disabled(vq
->vdev
)) {
1729 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1730 return virtqueue_packed_pop(vq
, sz
);
1732 return virtqueue_split_pop(vq
, sz
);
1736 static unsigned int virtqueue_packed_drop_all(VirtQueue
*vq
)
1738 VRingMemoryRegionCaches
*caches
;
1739 MemoryRegionCache
*desc_cache
;
1740 unsigned int dropped
= 0;
1741 VirtQueueElement elem
= {};
1742 VirtIODevice
*vdev
= vq
->vdev
;
1743 VRingPackedDesc desc
;
1745 RCU_READ_LOCK_GUARD();
1747 caches
= vring_get_region_caches(vq
);
1752 desc_cache
= &caches
->desc
;
1754 virtio_queue_set_notification(vq
, 0);
1756 while (vq
->inuse
< vq
->vring
.num
) {
1757 unsigned int idx
= vq
->last_avail_idx
;
1759 * works similar to virtqueue_pop but does not map buffers
1760 * and does not allocate any memory.
1762 vring_packed_desc_read(vdev
, &desc
, desc_cache
,
1763 vq
->last_avail_idx
, true);
1764 if (!is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
)) {
1767 elem
.index
= desc
.id
;
1769 while (virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
,
1770 vq
->vring
.num
, &idx
, false)) {
1774 * immediately push the element, nothing to unmap
1775 * as both in_num and out_num are set to 0.
1777 virtqueue_push(vq
, &elem
, 0);
1779 vq
->last_avail_idx
+= elem
.ndescs
;
1780 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1781 vq
->last_avail_idx
-= vq
->vring
.num
;
1782 vq
->last_avail_wrap_counter
^= 1;
1789 static unsigned int virtqueue_split_drop_all(VirtQueue
*vq
)
1791 unsigned int dropped
= 0;
1792 VirtQueueElement elem
= {};
1793 VirtIODevice
*vdev
= vq
->vdev
;
1794 bool fEventIdx
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
1796 while (!virtio_queue_empty(vq
) && vq
->inuse
< vq
->vring
.num
) {
1797 /* works similar to virtqueue_pop but does not map buffers
1798 * and does not allocate any memory */
1800 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
, &elem
.index
)) {
1804 vq
->last_avail_idx
++;
1806 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1808 /* immediately push the element, nothing to unmap
1809 * as both in_num and out_num are set to 0 */
1810 virtqueue_push(vq
, &elem
, 0);
1817 /* virtqueue_drop_all:
1818 * @vq: The #VirtQueue
1819 * Drops all queued buffers and indicates them to the guest
1820 * as if they are done. Useful when buffers can not be
1821 * processed but must be returned to the guest.
1823 unsigned int virtqueue_drop_all(VirtQueue
*vq
)
1825 struct VirtIODevice
*vdev
= vq
->vdev
;
1827 if (virtio_device_disabled(vq
->vdev
)) {
1831 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1832 return virtqueue_packed_drop_all(vq
);
1834 return virtqueue_split_drop_all(vq
);
1838 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1839 * it is what QEMU has always done by mistake. We can change it sooner
1840 * or later by bumping the version number of the affected vm states.
1841 * In the meanwhile, since the in-memory layout of VirtQueueElement
1842 * has changed, we need to marshal to and from the layout that was
1843 * used before the change.
1845 typedef struct VirtQueueElementOld
{
1847 unsigned int out_num
;
1848 unsigned int in_num
;
1849 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
1850 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
1851 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
1852 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
1853 } VirtQueueElementOld
;
1855 void *qemu_get_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
, size_t sz
)
1857 VirtQueueElement
*elem
;
1858 VirtQueueElementOld data
;
1861 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1863 /* TODO: teach all callers that this can fail, and return failure instead
1864 * of asserting here.
1865 * This is just one thing (there are probably more) that must be
1866 * fixed before we can allow NDEBUG compilation.
1868 assert(ARRAY_SIZE(data
.in_addr
) >= data
.in_num
);
1869 assert(ARRAY_SIZE(data
.out_addr
) >= data
.out_num
);
1871 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
1872 elem
->index
= data
.index
;
1874 for (i
= 0; i
< elem
->in_num
; i
++) {
1875 elem
->in_addr
[i
] = data
.in_addr
[i
];
1878 for (i
= 0; i
< elem
->out_num
; i
++) {
1879 elem
->out_addr
[i
] = data
.out_addr
[i
];
1882 for (i
= 0; i
< elem
->in_num
; i
++) {
1883 /* Base is overwritten by virtqueue_map. */
1884 elem
->in_sg
[i
].iov_base
= 0;
1885 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
1888 for (i
= 0; i
< elem
->out_num
; i
++) {
1889 /* Base is overwritten by virtqueue_map. */
1890 elem
->out_sg
[i
].iov_base
= 0;
1891 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
1894 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1895 qemu_get_be32s(f
, &elem
->ndescs
);
1898 virtqueue_map(vdev
, elem
);
1902 void qemu_put_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
,
1903 VirtQueueElement
*elem
)
1905 VirtQueueElementOld data
;
1908 memset(&data
, 0, sizeof(data
));
1909 data
.index
= elem
->index
;
1910 data
.in_num
= elem
->in_num
;
1911 data
.out_num
= elem
->out_num
;
1913 for (i
= 0; i
< elem
->in_num
; i
++) {
1914 data
.in_addr
[i
] = elem
->in_addr
[i
];
1917 for (i
= 0; i
< elem
->out_num
; i
++) {
1918 data
.out_addr
[i
] = elem
->out_addr
[i
];
1921 for (i
= 0; i
< elem
->in_num
; i
++) {
1922 /* Base is overwritten by virtqueue_map when loading. Do not
1923 * save it, as it would leak the QEMU address space layout. */
1924 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
1927 for (i
= 0; i
< elem
->out_num
; i
++) {
1928 /* Do not save iov_base as above. */
1929 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
1932 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1933 qemu_put_be32s(f
, &elem
->ndescs
);
1936 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1940 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
1942 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1943 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1945 if (virtio_device_disabled(vdev
)) {
1950 k
->notify(qbus
->parent
, vector
);
1954 void virtio_update_irq(VirtIODevice
*vdev
)
1956 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1959 static int virtio_validate_features(VirtIODevice
*vdev
)
1961 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1963 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
) &&
1964 !virtio_vdev_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1968 if (k
->validate_features
) {
1969 return k
->validate_features(vdev
);
1975 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
1977 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1978 trace_virtio_set_status(vdev
, val
);
1980 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1981 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
1982 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1983 int ret
= virtio_validate_features(vdev
);
1991 if ((vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) !=
1992 (val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1993 virtio_set_started(vdev
, val
& VIRTIO_CONFIG_S_DRIVER_OK
);
1996 if (k
->set_status
) {
1997 k
->set_status(vdev
, val
);
2004 static enum virtio_device_endian
virtio_default_endian(void)
2006 if (target_words_bigendian()) {
2007 return VIRTIO_DEVICE_ENDIAN_BIG
;
2009 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
2013 static enum virtio_device_endian
virtio_current_cpu_endian(void)
2015 if (cpu_virtio_is_big_endian(current_cpu
)) {
2016 return VIRTIO_DEVICE_ENDIAN_BIG
;
2018 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
2022 void virtio_reset(void *opaque
)
2024 VirtIODevice
*vdev
= opaque
;
2025 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2028 virtio_set_status(vdev
, 0);
2030 /* Guest initiated reset */
2031 vdev
->device_endian
= virtio_current_cpu_endian();
2034 vdev
->device_endian
= virtio_default_endian();
2041 vdev
->start_on_kick
= false;
2042 vdev
->started
= false;
2043 vdev
->broken
= false;
2044 vdev
->guest_features
= 0;
2045 vdev
->queue_sel
= 0;
2047 vdev
->disabled
= false;
2048 qatomic_set(&vdev
->isr
, 0);
2049 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
2050 virtio_notify_vector(vdev
, vdev
->config_vector
);
2052 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2053 vdev
->vq
[i
].vring
.desc
= 0;
2054 vdev
->vq
[i
].vring
.avail
= 0;
2055 vdev
->vq
[i
].vring
.used
= 0;
2056 vdev
->vq
[i
].last_avail_idx
= 0;
2057 vdev
->vq
[i
].shadow_avail_idx
= 0;
2058 vdev
->vq
[i
].used_idx
= 0;
2059 vdev
->vq
[i
].last_avail_wrap_counter
= true;
2060 vdev
->vq
[i
].shadow_avail_wrap_counter
= true;
2061 vdev
->vq
[i
].used_wrap_counter
= true;
2062 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
2063 vdev
->vq
[i
].signalled_used
= 0;
2064 vdev
->vq
[i
].signalled_used_valid
= false;
2065 vdev
->vq
[i
].notification
= true;
2066 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
2067 vdev
->vq
[i
].inuse
= 0;
2068 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
2072 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
2074 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2077 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2078 return (uint32_t)-1;
2081 k
->get_config(vdev
, vdev
->config
);
2083 val
= ldub_p(vdev
->config
+ addr
);
2087 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
2089 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2092 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2093 return (uint32_t)-1;
2096 k
->get_config(vdev
, vdev
->config
);
2098 val
= lduw_p(vdev
->config
+ addr
);
2102 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
2104 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2107 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2108 return (uint32_t)-1;
2111 k
->get_config(vdev
, vdev
->config
);
2113 val
= ldl_p(vdev
->config
+ addr
);
2117 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2119 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2122 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2126 stb_p(vdev
->config
+ addr
, val
);
2128 if (k
->set_config
) {
2129 k
->set_config(vdev
, vdev
->config
);
2133 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2135 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2136 uint16_t val
= data
;
2138 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2142 stw_p(vdev
->config
+ addr
, val
);
2144 if (k
->set_config
) {
2145 k
->set_config(vdev
, vdev
->config
);
2149 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2151 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2152 uint32_t val
= data
;
2154 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2158 stl_p(vdev
->config
+ addr
, val
);
2160 if (k
->set_config
) {
2161 k
->set_config(vdev
, vdev
->config
);
2165 uint32_t virtio_config_modern_readb(VirtIODevice
*vdev
, uint32_t addr
)
2167 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2170 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2171 return (uint32_t)-1;
2174 k
->get_config(vdev
, vdev
->config
);
2176 val
= ldub_p(vdev
->config
+ addr
);
2180 uint32_t virtio_config_modern_readw(VirtIODevice
*vdev
, uint32_t addr
)
2182 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2185 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2186 return (uint32_t)-1;
2189 k
->get_config(vdev
, vdev
->config
);
2191 val
= lduw_le_p(vdev
->config
+ addr
);
2195 uint32_t virtio_config_modern_readl(VirtIODevice
*vdev
, uint32_t addr
)
2197 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2200 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2201 return (uint32_t)-1;
2204 k
->get_config(vdev
, vdev
->config
);
2206 val
= ldl_le_p(vdev
->config
+ addr
);
2210 void virtio_config_modern_writeb(VirtIODevice
*vdev
,
2211 uint32_t addr
, uint32_t data
)
2213 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2216 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2220 stb_p(vdev
->config
+ addr
, val
);
2222 if (k
->set_config
) {
2223 k
->set_config(vdev
, vdev
->config
);
2227 void virtio_config_modern_writew(VirtIODevice
*vdev
,
2228 uint32_t addr
, uint32_t data
)
2230 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2231 uint16_t val
= data
;
2233 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2237 stw_le_p(vdev
->config
+ addr
, val
);
2239 if (k
->set_config
) {
2240 k
->set_config(vdev
, vdev
->config
);
2244 void virtio_config_modern_writel(VirtIODevice
*vdev
,
2245 uint32_t addr
, uint32_t data
)
2247 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2248 uint32_t val
= data
;
2250 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2254 stl_le_p(vdev
->config
+ addr
, val
);
2256 if (k
->set_config
) {
2257 k
->set_config(vdev
, vdev
->config
);
2261 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
2263 if (!vdev
->vq
[n
].vring
.num
) {
2266 vdev
->vq
[n
].vring
.desc
= addr
;
2267 virtio_queue_update_rings(vdev
, n
);
2270 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
2272 return vdev
->vq
[n
].vring
.desc
;
2275 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
2276 hwaddr avail
, hwaddr used
)
2278 if (!vdev
->vq
[n
].vring
.num
) {
2281 vdev
->vq
[n
].vring
.desc
= desc
;
2282 vdev
->vq
[n
].vring
.avail
= avail
;
2283 vdev
->vq
[n
].vring
.used
= used
;
2284 virtio_init_region_cache(vdev
, n
);
2287 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
2289 /* Don't allow guest to flip queue between existent and
2290 * nonexistent states, or to set it to an invalid size.
2292 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
2293 num
> VIRTQUEUE_MAX_SIZE
||
2297 vdev
->vq
[n
].vring
.num
= num
;
2300 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
2302 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
2305 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
2307 return QLIST_NEXT(vq
, node
);
2310 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
2312 return vdev
->vq
[n
].vring
.num
;
2315 int virtio_queue_get_max_num(VirtIODevice
*vdev
, int n
)
2317 return vdev
->vq
[n
].vring
.num_default
;
2320 int virtio_get_num_queues(VirtIODevice
*vdev
)
2324 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2325 if (!virtio_queue_get_num(vdev
, i
)) {
2333 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
2335 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2336 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2338 /* virtio-1 compliant devices cannot change the alignment */
2339 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2340 error_report("tried to modify queue alignment for virtio-1 device");
2343 /* Check that the transport told us it was going to do this
2344 * (so a buggy transport will immediately assert rather than
2345 * silently failing to migrate this state)
2347 assert(k
->has_variable_vring_alignment
);
2350 vdev
->vq
[n
].vring
.align
= align
;
2351 virtio_queue_update_rings(vdev
, n
);
2355 static void virtio_queue_notify_vq(VirtQueue
*vq
)
2357 if (vq
->vring
.desc
&& vq
->handle_output
) {
2358 VirtIODevice
*vdev
= vq
->vdev
;
2360 if (unlikely(vdev
->broken
)) {
2364 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2365 vq
->handle_output(vdev
, vq
);
2367 if (unlikely(vdev
->start_on_kick
)) {
2368 virtio_set_started(vdev
, true);
2373 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
2375 VirtQueue
*vq
= &vdev
->vq
[n
];
2377 if (unlikely(!vq
->vring
.desc
|| vdev
->broken
)) {
2381 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2382 if (vq
->host_notifier_enabled
) {
2383 event_notifier_set(&vq
->host_notifier
);
2384 } else if (vq
->handle_output
) {
2385 vq
->handle_output(vdev
, vq
);
2387 if (unlikely(vdev
->start_on_kick
)) {
2388 virtio_set_started(vdev
, true);
2393 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
2395 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
2399 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
2401 VirtQueue
*vq
= &vdev
->vq
[n
];
2403 if (n
< VIRTIO_QUEUE_MAX
) {
2404 if (vdev
->vector_queues
&&
2405 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
2406 QLIST_REMOVE(vq
, node
);
2408 vdev
->vq
[n
].vector
= vector
;
2409 if (vdev
->vector_queues
&&
2410 vector
!= VIRTIO_NO_VECTOR
) {
2411 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
2416 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
2417 VirtIOHandleOutput handle_output
)
2421 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2422 if (vdev
->vq
[i
].vring
.num
== 0)
2426 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
2429 vdev
->vq
[i
].vring
.num
= queue_size
;
2430 vdev
->vq
[i
].vring
.num_default
= queue_size
;
2431 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
2432 vdev
->vq
[i
].handle_output
= handle_output
;
2433 vdev
->vq
[i
].used_elems
= g_new0(VirtQueueElement
, queue_size
);
2435 return &vdev
->vq
[i
];
2438 void virtio_delete_queue(VirtQueue
*vq
)
2441 vq
->vring
.num_default
= 0;
2442 vq
->handle_output
= NULL
;
2443 g_free(vq
->used_elems
);
2444 vq
->used_elems
= NULL
;
2445 virtio_virtqueue_reset_region_cache(vq
);
2448 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
2450 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
2454 virtio_delete_queue(&vdev
->vq
[n
]);
2457 static void virtio_set_isr(VirtIODevice
*vdev
, int value
)
2459 uint8_t old
= qatomic_read(&vdev
->isr
);
2461 /* Do not write ISR if it does not change, so that its cacheline remains
2462 * shared in the common case where the guest does not read it.
2464 if ((old
& value
) != value
) {
2465 qatomic_or(&vdev
->isr
, value
);
2469 /* Called within rcu_read_lock(). */
2470 static bool virtio_split_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2474 /* We need to expose used array entries before checking used event. */
2476 /* Always notify when queue is empty (when feature acknowledge) */
2477 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2478 !vq
->inuse
&& virtio_queue_empty(vq
)) {
2482 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2483 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
2486 v
= vq
->signalled_used_valid
;
2487 vq
->signalled_used_valid
= true;
2488 old
= vq
->signalled_used
;
2489 new = vq
->signalled_used
= vq
->used_idx
;
2490 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
2493 static bool vring_packed_need_event(VirtQueue
*vq
, bool wrap
,
2494 uint16_t off_wrap
, uint16_t new,
2497 int off
= off_wrap
& ~(1 << 15);
2499 if (wrap
!= off_wrap
>> 15) {
2500 off
-= vq
->vring
.num
;
2503 return vring_need_event(off
, new, old
);
2506 /* Called within rcu_read_lock(). */
2507 static bool virtio_packed_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2509 VRingPackedDescEvent e
;
2512 VRingMemoryRegionCaches
*caches
;
2514 caches
= vring_get_region_caches(vq
);
2519 vring_packed_event_read(vdev
, &caches
->avail
, &e
);
2521 old
= vq
->signalled_used
;
2522 new = vq
->signalled_used
= vq
->used_idx
;
2523 v
= vq
->signalled_used_valid
;
2524 vq
->signalled_used_valid
= true;
2526 if (e
.flags
== VRING_PACKED_EVENT_FLAG_DISABLE
) {
2528 } else if (e
.flags
== VRING_PACKED_EVENT_FLAG_ENABLE
) {
2532 return !v
|| vring_packed_need_event(vq
, vq
->used_wrap_counter
,
2533 e
.off_wrap
, new, old
);
2536 /* Called within rcu_read_lock(). */
2537 static bool virtio_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2539 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
2540 return virtio_packed_should_notify(vdev
, vq
);
2542 return virtio_split_should_notify(vdev
, vq
);
2546 void virtio_notify_irqfd(VirtIODevice
*vdev
, VirtQueue
*vq
)
2548 WITH_RCU_READ_LOCK_GUARD() {
2549 if (!virtio_should_notify(vdev
, vq
)) {
2554 trace_virtio_notify_irqfd(vdev
, vq
);
2557 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2558 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2559 * incorrectly polling this bit during crashdump and hibernation
2560 * in MSI mode, causing a hang if this bit is never updated.
2561 * Recent releases of Windows do not really shut down, but rather
2562 * log out and hibernate to make the next startup faster. Hence,
2563 * this manifested as a more serious hang during shutdown with
2565 * Next driver release from 2016 fixed this problem, so working around it
2566 * is not a must, but it's easy to do so let's do it here.
2568 * Note: it's safe to update ISR from any thread as it was switched
2569 * to an atomic operation.
2571 virtio_set_isr(vq
->vdev
, 0x1);
2572 event_notifier_set(&vq
->guest_notifier
);
2575 static void virtio_irq(VirtQueue
*vq
)
2577 virtio_set_isr(vq
->vdev
, 0x1);
2578 virtio_notify_vector(vq
->vdev
, vq
->vector
);
2581 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2583 WITH_RCU_READ_LOCK_GUARD() {
2584 if (!virtio_should_notify(vdev
, vq
)) {
2589 trace_virtio_notify(vdev
, vq
);
2593 void virtio_notify_config(VirtIODevice
*vdev
)
2595 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
2598 virtio_set_isr(vdev
, 0x3);
2600 virtio_notify_vector(vdev
, vdev
->config_vector
);
2603 static bool virtio_device_endian_needed(void *opaque
)
2605 VirtIODevice
*vdev
= opaque
;
2607 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
2608 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2609 return vdev
->device_endian
!= virtio_default_endian();
2611 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2612 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
2615 static bool virtio_64bit_features_needed(void *opaque
)
2617 VirtIODevice
*vdev
= opaque
;
2619 return (vdev
->host_features
>> 32) != 0;
2622 static bool virtio_virtqueue_needed(void *opaque
)
2624 VirtIODevice
*vdev
= opaque
;
2626 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
2629 static bool virtio_packed_virtqueue_needed(void *opaque
)
2631 VirtIODevice
*vdev
= opaque
;
2633 return virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
);
2636 static bool virtio_ringsize_needed(void *opaque
)
2638 VirtIODevice
*vdev
= opaque
;
2641 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2642 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
2649 static bool virtio_extra_state_needed(void *opaque
)
2651 VirtIODevice
*vdev
= opaque
;
2652 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2653 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2655 return k
->has_extra_state
&&
2656 k
->has_extra_state(qbus
->parent
);
2659 static bool virtio_broken_needed(void *opaque
)
2661 VirtIODevice
*vdev
= opaque
;
2663 return vdev
->broken
;
2666 static bool virtio_started_needed(void *opaque
)
2668 VirtIODevice
*vdev
= opaque
;
2670 return vdev
->started
;
2673 static bool virtio_disabled_needed(void *opaque
)
2675 VirtIODevice
*vdev
= opaque
;
2677 return vdev
->disabled
;
2680 static const VMStateDescription vmstate_virtqueue
= {
2681 .name
= "virtqueue_state",
2683 .minimum_version_id
= 1,
2684 .fields
= (VMStateField
[]) {
2685 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
2686 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
2687 VMSTATE_END_OF_LIST()
2691 static const VMStateDescription vmstate_packed_virtqueue
= {
2692 .name
= "packed_virtqueue_state",
2694 .minimum_version_id
= 1,
2695 .fields
= (VMStateField
[]) {
2696 VMSTATE_UINT16(last_avail_idx
, struct VirtQueue
),
2697 VMSTATE_BOOL(last_avail_wrap_counter
, struct VirtQueue
),
2698 VMSTATE_UINT16(used_idx
, struct VirtQueue
),
2699 VMSTATE_BOOL(used_wrap_counter
, struct VirtQueue
),
2700 VMSTATE_UINT32(inuse
, struct VirtQueue
),
2701 VMSTATE_END_OF_LIST()
2705 static const VMStateDescription vmstate_virtio_virtqueues
= {
2706 .name
= "virtio/virtqueues",
2708 .minimum_version_id
= 1,
2709 .needed
= &virtio_virtqueue_needed
,
2710 .fields
= (VMStateField
[]) {
2711 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2712 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
2713 VMSTATE_END_OF_LIST()
2717 static const VMStateDescription vmstate_virtio_packed_virtqueues
= {
2718 .name
= "virtio/packed_virtqueues",
2720 .minimum_version_id
= 1,
2721 .needed
= &virtio_packed_virtqueue_needed
,
2722 .fields
= (VMStateField
[]) {
2723 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2724 VIRTIO_QUEUE_MAX
, 0, vmstate_packed_virtqueue
, VirtQueue
),
2725 VMSTATE_END_OF_LIST()
2729 static const VMStateDescription vmstate_ringsize
= {
2730 .name
= "ringsize_state",
2732 .minimum_version_id
= 1,
2733 .fields
= (VMStateField
[]) {
2734 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
2735 VMSTATE_END_OF_LIST()
2739 static const VMStateDescription vmstate_virtio_ringsize
= {
2740 .name
= "virtio/ringsize",
2742 .minimum_version_id
= 1,
2743 .needed
= &virtio_ringsize_needed
,
2744 .fields
= (VMStateField
[]) {
2745 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2746 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
2747 VMSTATE_END_OF_LIST()
2751 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2752 const VMStateField
*field
)
2754 VirtIODevice
*vdev
= pv
;
2755 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2756 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2758 if (!k
->load_extra_state
) {
2761 return k
->load_extra_state(qbus
->parent
, f
);
2765 static int put_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2766 const VMStateField
*field
, JSONWriter
*vmdesc
)
2768 VirtIODevice
*vdev
= pv
;
2769 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2770 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2772 k
->save_extra_state(qbus
->parent
, f
);
2776 static const VMStateInfo vmstate_info_extra_state
= {
2777 .name
= "virtqueue_extra_state",
2778 .get
= get_extra_state
,
2779 .put
= put_extra_state
,
2782 static const VMStateDescription vmstate_virtio_extra_state
= {
2783 .name
= "virtio/extra_state",
2785 .minimum_version_id
= 1,
2786 .needed
= &virtio_extra_state_needed
,
2787 .fields
= (VMStateField
[]) {
2789 .name
= "extra_state",
2791 .field_exists
= NULL
,
2793 .info
= &vmstate_info_extra_state
,
2794 .flags
= VMS_SINGLE
,
2797 VMSTATE_END_OF_LIST()
2801 static const VMStateDescription vmstate_virtio_device_endian
= {
2802 .name
= "virtio/device_endian",
2804 .minimum_version_id
= 1,
2805 .needed
= &virtio_device_endian_needed
,
2806 .fields
= (VMStateField
[]) {
2807 VMSTATE_UINT8(device_endian
, VirtIODevice
),
2808 VMSTATE_END_OF_LIST()
2812 static const VMStateDescription vmstate_virtio_64bit_features
= {
2813 .name
= "virtio/64bit_features",
2815 .minimum_version_id
= 1,
2816 .needed
= &virtio_64bit_features_needed
,
2817 .fields
= (VMStateField
[]) {
2818 VMSTATE_UINT64(guest_features
, VirtIODevice
),
2819 VMSTATE_END_OF_LIST()
2823 static const VMStateDescription vmstate_virtio_broken
= {
2824 .name
= "virtio/broken",
2826 .minimum_version_id
= 1,
2827 .needed
= &virtio_broken_needed
,
2828 .fields
= (VMStateField
[]) {
2829 VMSTATE_BOOL(broken
, VirtIODevice
),
2830 VMSTATE_END_OF_LIST()
2834 static const VMStateDescription vmstate_virtio_started
= {
2835 .name
= "virtio/started",
2837 .minimum_version_id
= 1,
2838 .needed
= &virtio_started_needed
,
2839 .fields
= (VMStateField
[]) {
2840 VMSTATE_BOOL(started
, VirtIODevice
),
2841 VMSTATE_END_OF_LIST()
2845 static const VMStateDescription vmstate_virtio_disabled
= {
2846 .name
= "virtio/disabled",
2848 .minimum_version_id
= 1,
2849 .needed
= &virtio_disabled_needed
,
2850 .fields
= (VMStateField
[]) {
2851 VMSTATE_BOOL(disabled
, VirtIODevice
),
2852 VMSTATE_END_OF_LIST()
2856 static const VMStateDescription vmstate_virtio
= {
2859 .minimum_version_id
= 1,
2860 .fields
= (VMStateField
[]) {
2861 VMSTATE_END_OF_LIST()
2863 .subsections
= (const VMStateDescription
*[]) {
2864 &vmstate_virtio_device_endian
,
2865 &vmstate_virtio_64bit_features
,
2866 &vmstate_virtio_virtqueues
,
2867 &vmstate_virtio_ringsize
,
2868 &vmstate_virtio_broken
,
2869 &vmstate_virtio_extra_state
,
2870 &vmstate_virtio_started
,
2871 &vmstate_virtio_packed_virtqueues
,
2872 &vmstate_virtio_disabled
,
2877 int virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
2879 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2880 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2881 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2882 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
2885 if (k
->save_config
) {
2886 k
->save_config(qbus
->parent
, f
);
2889 qemu_put_8s(f
, &vdev
->status
);
2890 qemu_put_8s(f
, &vdev
->isr
);
2891 qemu_put_be16s(f
, &vdev
->queue_sel
);
2892 qemu_put_be32s(f
, &guest_features_lo
);
2893 qemu_put_be32(f
, vdev
->config_len
);
2894 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
2896 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2897 if (vdev
->vq
[i
].vring
.num
== 0)
2901 qemu_put_be32(f
, i
);
2903 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2904 if (vdev
->vq
[i
].vring
.num
== 0)
2907 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
2908 if (k
->has_variable_vring_alignment
) {
2909 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
2912 * Save desc now, the rest of the ring addresses are saved in
2913 * subsections for VIRTIO-1 devices.
2915 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
2916 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
2917 if (k
->save_queue
) {
2918 k
->save_queue(qbus
->parent
, i
, f
);
2922 if (vdc
->save
!= NULL
) {
2927 int ret
= vmstate_save_state(f
, vdc
->vmsd
, vdev
, NULL
);
2934 return vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
2937 /* A wrapper for use as a VMState .put function */
2938 static int virtio_device_put(QEMUFile
*f
, void *opaque
, size_t size
,
2939 const VMStateField
*field
, JSONWriter
*vmdesc
)
2941 return virtio_save(VIRTIO_DEVICE(opaque
), f
);
2944 /* A wrapper for use as a VMState .get function */
2945 static int virtio_device_get(QEMUFile
*f
, void *opaque
, size_t size
,
2946 const VMStateField
*field
)
2948 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
2949 DeviceClass
*dc
= DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev
));
2951 return virtio_load(vdev
, f
, dc
->vmsd
->version_id
);
2954 const VMStateInfo virtio_vmstate_info
= {
2956 .get
= virtio_device_get
,
2957 .put
= virtio_device_put
,
2960 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
2962 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2963 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
2965 val
&= vdev
->host_features
;
2966 if (k
->set_features
) {
2967 k
->set_features(vdev
, val
);
2969 vdev
->guest_features
= val
;
2970 return bad
? -1 : 0;
2973 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
2977 * The driver must not attempt to set features after feature negotiation
2980 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
2983 ret
= virtio_set_features_nocheck(vdev
, val
);
2984 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2985 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2987 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2988 if (vdev
->vq
[i
].vring
.num
!= 0) {
2989 virtio_init_region_cache(vdev
, i
);
2994 if (!virtio_device_started(vdev
, vdev
->status
) &&
2995 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2996 vdev
->start_on_kick
= true;
3002 size_t virtio_feature_get_config_size(const VirtIOFeature
*feature_sizes
,
3003 uint64_t host_features
)
3005 size_t config_size
= 0;
3008 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
3009 if (host_features
& feature_sizes
[i
].flags
) {
3010 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
3017 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
3023 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3024 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3025 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
3028 * We poison the endianness to ensure it does not get used before
3029 * subsections have been loaded.
3031 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
3033 if (k
->load_config
) {
3034 ret
= k
->load_config(qbus
->parent
, f
);
3039 qemu_get_8s(f
, &vdev
->status
);
3040 qemu_get_8s(f
, &vdev
->isr
);
3041 qemu_get_be16s(f
, &vdev
->queue_sel
);
3042 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
3045 qemu_get_be32s(f
, &features
);
3048 * Temporarily set guest_features low bits - needed by
3049 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3050 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3052 * Note: devices should always test host features in future - don't create
3053 * new dependencies like this.
3055 vdev
->guest_features
= features
;
3057 config_len
= qemu_get_be32(f
);
3060 * There are cases where the incoming config can be bigger or smaller
3061 * than what we have; so load what we have space for, and skip
3062 * any excess that's in the stream.
3064 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
3066 while (config_len
> vdev
->config_len
) {
3071 num
= qemu_get_be32(f
);
3073 if (num
> VIRTIO_QUEUE_MAX
) {
3074 error_report("Invalid number of virtqueues: 0x%x", num
);
3078 for (i
= 0; i
< num
; i
++) {
3079 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
3080 if (k
->has_variable_vring_alignment
) {
3081 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
3083 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
3084 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
3085 vdev
->vq
[i
].signalled_used_valid
= false;
3086 vdev
->vq
[i
].notification
= true;
3088 if (!vdev
->vq
[i
].vring
.desc
&& vdev
->vq
[i
].last_avail_idx
) {
3089 error_report("VQ %d address 0x0 "
3090 "inconsistent with Host index 0x%x",
3091 i
, vdev
->vq
[i
].last_avail_idx
);
3094 if (k
->load_queue
) {
3095 ret
= k
->load_queue(qbus
->parent
, i
, f
);
3101 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
3103 if (vdc
->load
!= NULL
) {
3104 ret
= vdc
->load(vdev
, f
, version_id
);
3111 ret
= vmstate_load_state(f
, vdc
->vmsd
, vdev
, version_id
);
3118 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
3123 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
3124 vdev
->device_endian
= virtio_default_endian();
3127 if (virtio_64bit_features_needed(vdev
)) {
3129 * Subsection load filled vdev->guest_features. Run them
3130 * through virtio_set_features to sanity-check them against
3133 uint64_t features64
= vdev
->guest_features
;
3134 if (virtio_set_features_nocheck(vdev
, features64
) < 0) {
3135 error_report("Features 0x%" PRIx64
" unsupported. "
3136 "Allowed features: 0x%" PRIx64
,
3137 features64
, vdev
->host_features
);
3141 if (virtio_set_features_nocheck(vdev
, features
) < 0) {
3142 error_report("Features 0x%x unsupported. "
3143 "Allowed features: 0x%" PRIx64
,
3144 features
, vdev
->host_features
);
3149 if (!virtio_device_started(vdev
, vdev
->status
) &&
3150 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3151 vdev
->start_on_kick
= true;
3154 RCU_READ_LOCK_GUARD();
3155 for (i
= 0; i
< num
; i
++) {
3156 if (vdev
->vq
[i
].vring
.desc
) {
3160 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3161 * only the region cache needs to be set up. Legacy devices need
3162 * to calculate used and avail ring addresses based on the desc
3165 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3166 virtio_init_region_cache(vdev
, i
);
3168 virtio_queue_update_rings(vdev
, i
);
3171 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3172 vdev
->vq
[i
].shadow_avail_idx
= vdev
->vq
[i
].last_avail_idx
;
3173 vdev
->vq
[i
].shadow_avail_wrap_counter
=
3174 vdev
->vq
[i
].last_avail_wrap_counter
;
3178 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
3179 /* Check it isn't doing strange things with descriptor numbers. */
3180 if (nheads
> vdev
->vq
[i
].vring
.num
) {
3181 virtio_error(vdev
, "VQ %d size 0x%x Guest index 0x%x "
3182 "inconsistent with Host index 0x%x: delta 0x%x",
3183 i
, vdev
->vq
[i
].vring
.num
,
3184 vring_avail_idx(&vdev
->vq
[i
]),
3185 vdev
->vq
[i
].last_avail_idx
, nheads
);
3186 vdev
->vq
[i
].used_idx
= 0;
3187 vdev
->vq
[i
].shadow_avail_idx
= 0;
3188 vdev
->vq
[i
].inuse
= 0;
3191 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
3192 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
3195 * Some devices migrate VirtQueueElements that have been popped
3196 * from the avail ring but not yet returned to the used ring.
3197 * Since max ring size < UINT16_MAX it's safe to use modulo
3198 * UINT16_MAX + 1 subtraction.
3200 vdev
->vq
[i
].inuse
= (uint16_t)(vdev
->vq
[i
].last_avail_idx
-
3201 vdev
->vq
[i
].used_idx
);
3202 if (vdev
->vq
[i
].inuse
> vdev
->vq
[i
].vring
.num
) {
3203 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3205 i
, vdev
->vq
[i
].vring
.num
,
3206 vdev
->vq
[i
].last_avail_idx
,
3207 vdev
->vq
[i
].used_idx
);
3213 if (vdc
->post_load
) {
3214 ret
= vdc
->post_load(vdev
);
3223 void virtio_cleanup(VirtIODevice
*vdev
)
3225 qemu_del_vm_change_state_handler(vdev
->vmstate
);
3228 static void virtio_vmstate_change(void *opaque
, bool running
, RunState state
)
3230 VirtIODevice
*vdev
= opaque
;
3231 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3232 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3233 bool backend_run
= running
&& virtio_device_started(vdev
, vdev
->status
);
3234 vdev
->vm_running
= running
;
3237 virtio_set_status(vdev
, vdev
->status
);
3240 if (k
->vmstate_change
) {
3241 k
->vmstate_change(qbus
->parent
, backend_run
);
3245 virtio_set_status(vdev
, vdev
->status
);
3249 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
3250 size_t vdev_size
, const char *vdev_name
)
3252 DeviceState
*vdev
= data
;
3254 object_initialize_child_with_props(proxy_obj
, "virtio-backend", vdev
,
3255 vdev_size
, vdev_name
, &error_abort
,
3257 qdev_alias_all_properties(vdev
, proxy_obj
);
3260 void virtio_init(VirtIODevice
*vdev
, uint16_t device_id
, size_t config_size
)
3262 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3263 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3265 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
3268 vdev
->vector_queues
=
3269 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
3272 vdev
->start_on_kick
= false;
3273 vdev
->started
= false;
3274 vdev
->vhost_started
= false;
3275 vdev
->device_id
= device_id
;
3277 qatomic_set(&vdev
->isr
, 0);
3278 vdev
->queue_sel
= 0;
3279 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
3280 vdev
->vq
= g_new0(VirtQueue
, VIRTIO_QUEUE_MAX
);
3281 vdev
->vm_running
= runstate_is_running();
3282 vdev
->broken
= false;
3283 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3284 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
3285 vdev
->vq
[i
].vdev
= vdev
;
3286 vdev
->vq
[i
].queue_index
= i
;
3287 vdev
->vq
[i
].host_notifier_enabled
= false;
3290 vdev
->name
= virtio_id_to_name(device_id
);
3291 vdev
->config_len
= config_size
;
3292 if (vdev
->config_len
) {
3293 vdev
->config
= g_malloc0(config_size
);
3295 vdev
->config
= NULL
;
3297 vdev
->vmstate
= qdev_add_vm_change_state_handler(DEVICE(vdev
),
3298 virtio_vmstate_change
, vdev
);
3299 vdev
->device_endian
= virtio_default_endian();
3300 vdev
->use_guest_notifier_mask
= true;
3304 * Only devices that have already been around prior to defining the virtio
3305 * standard support legacy mode; this includes devices not specified in the
3306 * standard. All newer devices conform to the virtio standard only.
3308 bool virtio_legacy_allowed(VirtIODevice
*vdev
)
3310 switch (vdev
->device_id
) {
3312 case VIRTIO_ID_BLOCK
:
3313 case VIRTIO_ID_CONSOLE
:
3315 case VIRTIO_ID_BALLOON
:
3316 case VIRTIO_ID_RPMSG
:
3317 case VIRTIO_ID_SCSI
:
3319 case VIRTIO_ID_RPROC_SERIAL
:
3320 case VIRTIO_ID_CAIF
:
3327 bool virtio_legacy_check_disabled(VirtIODevice
*vdev
)
3329 return vdev
->disable_legacy_check
;
3332 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
3334 return vdev
->vq
[n
].vring
.desc
;
3337 bool virtio_queue_enabled_legacy(VirtIODevice
*vdev
, int n
)
3339 return virtio_queue_get_desc_addr(vdev
, n
) != 0;
3342 bool virtio_queue_enabled(VirtIODevice
*vdev
, int n
)
3344 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3345 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3347 if (k
->queue_enabled
) {
3348 return k
->queue_enabled(qbus
->parent
, n
);
3350 return virtio_queue_enabled_legacy(vdev
, n
);
3353 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
3355 return vdev
->vq
[n
].vring
.avail
;
3358 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
3360 return vdev
->vq
[n
].vring
.used
;
3363 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
3365 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
3368 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
3372 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3373 return sizeof(struct VRingPackedDescEvent
);
3376 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3377 return offsetof(VRingAvail
, ring
) +
3378 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
+ s
;
3381 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
3385 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3386 return sizeof(struct VRingPackedDescEvent
);
3389 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3390 return offsetof(VRingUsed
, ring
) +
3391 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
+ s
;
3394 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice
*vdev
,
3397 unsigned int avail
, used
;
3399 avail
= vdev
->vq
[n
].last_avail_idx
;
3400 avail
|= ((uint16_t)vdev
->vq
[n
].last_avail_wrap_counter
) << 15;
3402 used
= vdev
->vq
[n
].used_idx
;
3403 used
|= ((uint16_t)vdev
->vq
[n
].used_wrap_counter
) << 15;
3405 return avail
| used
<< 16;
3408 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice
*vdev
,
3411 return vdev
->vq
[n
].last_avail_idx
;
3414 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
3416 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3417 return virtio_queue_packed_get_last_avail_idx(vdev
, n
);
3419 return virtio_queue_split_get_last_avail_idx(vdev
, n
);
3423 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice
*vdev
,
3424 int n
, unsigned int idx
)
3426 struct VirtQueue
*vq
= &vdev
->vq
[n
];
3428 vq
->last_avail_idx
= vq
->shadow_avail_idx
= idx
& 0x7fff;
3429 vq
->last_avail_wrap_counter
=
3430 vq
->shadow_avail_wrap_counter
= !!(idx
& 0x8000);
3432 vq
->used_idx
= idx
& 0x7ffff;
3433 vq
->used_wrap_counter
= !!(idx
& 0x8000);
3436 static void virtio_queue_split_set_last_avail_idx(VirtIODevice
*vdev
,
3437 int n
, unsigned int idx
)
3439 vdev
->vq
[n
].last_avail_idx
= idx
;
3440 vdev
->vq
[n
].shadow_avail_idx
= idx
;
3443 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
,
3446 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3447 virtio_queue_packed_set_last_avail_idx(vdev
, n
, idx
);
3449 virtio_queue_split_set_last_avail_idx(vdev
, n
, idx
);
3453 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice
*vdev
,
3456 /* We don't have a reference like avail idx in shared memory */
3460 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice
*vdev
,
3463 RCU_READ_LOCK_GUARD();
3464 if (vdev
->vq
[n
].vring
.desc
) {
3465 vdev
->vq
[n
].last_avail_idx
= vring_used_idx(&vdev
->vq
[n
]);
3466 vdev
->vq
[n
].shadow_avail_idx
= vdev
->vq
[n
].last_avail_idx
;
3470 void virtio_queue_restore_last_avail_idx(VirtIODevice
*vdev
, int n
)
3472 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3473 virtio_queue_packed_restore_last_avail_idx(vdev
, n
);
3475 virtio_queue_split_restore_last_avail_idx(vdev
, n
);
3479 static void virtio_queue_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3481 /* used idx was updated through set_last_avail_idx() */
3485 static void virtio_split_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3487 RCU_READ_LOCK_GUARD();
3488 if (vdev
->vq
[n
].vring
.desc
) {
3489 vdev
->vq
[n
].used_idx
= vring_used_idx(&vdev
->vq
[n
]);
3493 void virtio_queue_update_used_idx(VirtIODevice
*vdev
, int n
)
3495 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3496 return virtio_queue_packed_update_used_idx(vdev
, n
);
3498 return virtio_split_packed_update_used_idx(vdev
, n
);
3502 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
3504 vdev
->vq
[n
].signalled_used_valid
= false;
3507 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
3509 return vdev
->vq
+ n
;
3512 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
3514 return vq
->queue_index
;
3517 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
3519 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
3520 if (event_notifier_test_and_clear(n
)) {
3525 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
3528 if (assign
&& !with_irqfd
) {
3529 event_notifier_set_handler(&vq
->guest_notifier
,
3530 virtio_queue_guest_notifier_read
);
3532 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
3535 /* Test and clear notifier before closing it,
3536 * in case poll callback didn't have time to run. */
3537 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
3541 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
3543 return &vq
->guest_notifier
;
3546 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier
*n
)
3548 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3550 virtio_queue_set_notification(vq
, 0);
3553 static bool virtio_queue_host_notifier_aio_poll(void *opaque
)
3555 EventNotifier
*n
= opaque
;
3556 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3558 return vq
->vring
.desc
&& !virtio_queue_empty(vq
);
3561 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier
*n
)
3563 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3565 virtio_queue_notify_vq(vq
);
3568 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier
*n
)
3570 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3572 /* Caller polls once more after this to catch requests that race with us */
3573 virtio_queue_set_notification(vq
, 1);
3576 void virtio_queue_aio_attach_host_notifier(VirtQueue
*vq
, AioContext
*ctx
)
3578 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true,
3579 virtio_queue_host_notifier_read
,
3580 virtio_queue_host_notifier_aio_poll
,
3581 virtio_queue_host_notifier_aio_poll_ready
);
3582 aio_set_event_notifier_poll(ctx
, &vq
->host_notifier
,
3583 virtio_queue_host_notifier_aio_poll_begin
,
3584 virtio_queue_host_notifier_aio_poll_end
);
3588 * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3589 * this for rx virtqueues and similar cases where the virtqueue handler
3590 * function does not pop all elements. When the virtqueue is left non-empty
3591 * polling consumes CPU cycles and should not be used.
3593 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue
*vq
, AioContext
*ctx
)
3595 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true,
3596 virtio_queue_host_notifier_read
,
3600 void virtio_queue_aio_detach_host_notifier(VirtQueue
*vq
, AioContext
*ctx
)
3602 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true, NULL
, NULL
, NULL
);
3603 /* Test and clear notifier before after disabling event,
3604 * in case poll callback didn't have time to run. */
3605 virtio_queue_host_notifier_read(&vq
->host_notifier
);
3608 void virtio_queue_host_notifier_read(EventNotifier
*n
)
3610 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3611 if (event_notifier_test_and_clear(n
)) {
3612 virtio_queue_notify_vq(vq
);
3616 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
3618 return &vq
->host_notifier
;
3621 void virtio_queue_set_host_notifier_enabled(VirtQueue
*vq
, bool enabled
)
3623 vq
->host_notifier_enabled
= enabled
;
3626 int virtio_queue_set_host_notifier_mr(VirtIODevice
*vdev
, int n
,
3627 MemoryRegion
*mr
, bool assign
)
3629 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3630 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3632 if (k
->set_host_notifier_mr
) {
3633 return k
->set_host_notifier_mr(qbus
->parent
, n
, mr
, assign
);
3639 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
3641 g_free(vdev
->bus_name
);
3642 vdev
->bus_name
= g_strdup(bus_name
);
3645 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice
*vdev
, const char *fmt
, ...)
3650 error_vreport(fmt
, ap
);
3653 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3654 vdev
->status
= vdev
->status
| VIRTIO_CONFIG_S_NEEDS_RESET
;
3655 virtio_notify_config(vdev
);
3658 vdev
->broken
= true;
3661 static void virtio_memory_listener_commit(MemoryListener
*listener
)
3663 VirtIODevice
*vdev
= container_of(listener
, VirtIODevice
, listener
);
3666 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3667 if (vdev
->vq
[i
].vring
.num
== 0) {
3670 virtio_init_region_cache(vdev
, i
);
3674 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
3676 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3677 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3680 /* Devices should either use vmsd or the load/save methods */
3681 assert(!vdc
->vmsd
|| !vdc
->load
);
3683 if (vdc
->realize
!= NULL
) {
3684 vdc
->realize(dev
, &err
);
3686 error_propagate(errp
, err
);
3691 virtio_bus_device_plugged(vdev
, &err
);
3693 error_propagate(errp
, err
);
3694 vdc
->unrealize(dev
);
3698 vdev
->listener
.commit
= virtio_memory_listener_commit
;
3699 vdev
->listener
.name
= "virtio";
3700 memory_listener_register(&vdev
->listener
, vdev
->dma_as
);
3703 static void virtio_device_unrealize(DeviceState
*dev
)
3705 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3706 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3708 memory_listener_unregister(&vdev
->listener
);
3709 virtio_bus_device_unplugged(vdev
);
3711 if (vdc
->unrealize
!= NULL
) {
3712 vdc
->unrealize(dev
);
3715 g_free(vdev
->bus_name
);
3716 vdev
->bus_name
= NULL
;
3719 static void virtio_device_free_virtqueues(VirtIODevice
*vdev
)
3726 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3727 if (vdev
->vq
[i
].vring
.num
== 0) {
3730 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
3735 static void virtio_device_instance_finalize(Object
*obj
)
3737 VirtIODevice
*vdev
= VIRTIO_DEVICE(obj
);
3739 virtio_device_free_virtqueues(vdev
);
3741 g_free(vdev
->config
);
3742 g_free(vdev
->vector_queues
);
3745 static Property virtio_properties
[] = {
3746 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
3747 DEFINE_PROP_BOOL("use-started", VirtIODevice
, use_started
, true),
3748 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice
, use_disabled_flag
, true),
3749 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice
,
3750 disable_legacy_check
, false),
3751 DEFINE_PROP_END_OF_LIST(),
3754 static int virtio_device_start_ioeventfd_impl(VirtIODevice
*vdev
)
3756 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3760 * Batch all the host notifiers in a single transaction to avoid
3761 * quadratic time complexity in address_space_update_ioeventfds().
3763 memory_region_transaction_begin();
3764 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3765 VirtQueue
*vq
= &vdev
->vq
[n
];
3766 if (!virtio_queue_get_num(vdev
, n
)) {
3769 r
= virtio_bus_set_host_notifier(qbus
, n
, true);
3774 event_notifier_set_handler(&vq
->host_notifier
,
3775 virtio_queue_host_notifier_read
);
3778 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3779 /* Kick right away to begin processing requests already in vring */
3780 VirtQueue
*vq
= &vdev
->vq
[n
];
3781 if (!vq
->vring
.num
) {
3784 event_notifier_set(&vq
->host_notifier
);
3786 memory_region_transaction_commit();
3790 i
= n
; /* save n for a second iteration after transaction is committed. */
3792 VirtQueue
*vq
= &vdev
->vq
[n
];
3793 if (!virtio_queue_get_num(vdev
, n
)) {
3797 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3798 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3802 * The transaction expects the ioeventfds to be open when it
3803 * commits. Do it now, before the cleanup loop.
3805 memory_region_transaction_commit();
3808 if (!virtio_queue_get_num(vdev
, i
)) {
3811 virtio_bus_cleanup_host_notifier(qbus
, i
);
3816 int virtio_device_start_ioeventfd(VirtIODevice
*vdev
)
3818 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3819 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3821 return virtio_bus_start_ioeventfd(vbus
);
3824 static void virtio_device_stop_ioeventfd_impl(VirtIODevice
*vdev
)
3826 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3830 * Batch all the host notifiers in a single transaction to avoid
3831 * quadratic time complexity in address_space_update_ioeventfds().
3833 memory_region_transaction_begin();
3834 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3835 VirtQueue
*vq
= &vdev
->vq
[n
];
3837 if (!virtio_queue_get_num(vdev
, n
)) {
3840 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3841 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3845 * The transaction expects the ioeventfds to be open when it
3846 * commits. Do it now, before the cleanup loop.
3848 memory_region_transaction_commit();
3850 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3851 if (!virtio_queue_get_num(vdev
, n
)) {
3854 virtio_bus_cleanup_host_notifier(qbus
, n
);
3858 int virtio_device_grab_ioeventfd(VirtIODevice
*vdev
)
3860 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3861 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3863 return virtio_bus_grab_ioeventfd(vbus
);
3866 void virtio_device_release_ioeventfd(VirtIODevice
*vdev
)
3868 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3869 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3871 virtio_bus_release_ioeventfd(vbus
);
3874 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
3876 /* Set the default value here. */
3877 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
3878 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3880 dc
->realize
= virtio_device_realize
;
3881 dc
->unrealize
= virtio_device_unrealize
;
3882 dc
->bus_type
= TYPE_VIRTIO_BUS
;
3883 device_class_set_props(dc
, virtio_properties
);
3884 vdc
->start_ioeventfd
= virtio_device_start_ioeventfd_impl
;
3885 vdc
->stop_ioeventfd
= virtio_device_stop_ioeventfd_impl
;
3887 vdc
->legacy_features
|= VIRTIO_LEGACY_FEATURES
;
3890 bool virtio_device_ioeventfd_enabled(VirtIODevice
*vdev
)
3892 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3893 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3895 return virtio_bus_ioeventfd_enabled(vbus
);
3898 static const TypeInfo virtio_device_info
= {
3899 .name
= TYPE_VIRTIO_DEVICE
,
3900 .parent
= TYPE_DEVICE
,
3901 .instance_size
= sizeof(VirtIODevice
),
3902 .class_init
= virtio_device_class_init
,
3903 .instance_finalize
= virtio_device_instance_finalize
,
3905 .class_size
= sizeof(VirtioDeviceClass
),
3908 static void virtio_register_types(void)
3910 type_register_static(&virtio_device_info
);
3913 type_init(virtio_register_types
)