4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
18 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30 #include "standard-headers/linux/virtio_ids.h"
33 * The alignment to use between consumer and producer parts of vring.
34 * x86 pagesize again. This is the default, used by transports like PCI
35 * which don't provide a means for the guest to tell the host the alignment.
37 #define VIRTIO_PCI_VRING_ALIGN 4096
39 typedef struct VRingDesc
47 typedef struct VRingPackedDesc
{
54 typedef struct VRingAvail
61 typedef struct VRingUsedElem
67 typedef struct VRingUsed
74 typedef struct VRingMemoryRegionCaches
{
76 MemoryRegionCache desc
;
77 MemoryRegionCache avail
;
78 MemoryRegionCache used
;
79 } VRingMemoryRegionCaches
;
84 unsigned int num_default
;
89 VRingMemoryRegionCaches
*caches
;
92 typedef struct VRingPackedDescEvent
{
95 } VRingPackedDescEvent
;
100 VirtQueueElement
*used_elems
;
102 /* Next head to pop */
103 uint16_t last_avail_idx
;
104 bool last_avail_wrap_counter
;
106 /* Last avail_idx read from VQ. */
107 uint16_t shadow_avail_idx
;
108 bool shadow_avail_wrap_counter
;
111 bool used_wrap_counter
;
113 /* Last used index value we have signalled on */
114 uint16_t signalled_used
;
116 /* Last used index value we have signalled on */
117 bool signalled_used_valid
;
119 /* Notification enabled? */
122 uint16_t queue_index
;
127 VirtIOHandleOutput handle_output
;
128 VirtIOHandleAIOOutput handle_aio_output
;
130 EventNotifier guest_notifier
;
131 EventNotifier host_notifier
;
132 bool host_notifier_enabled
;
133 QLIST_ENTRY(VirtQueue
) node
;
136 /* Called within call_rcu(). */
137 static void virtio_free_region_cache(VRingMemoryRegionCaches
*caches
)
139 assert(caches
!= NULL
);
140 address_space_cache_destroy(&caches
->desc
);
141 address_space_cache_destroy(&caches
->avail
);
142 address_space_cache_destroy(&caches
->used
);
146 static void virtio_virtqueue_reset_region_cache(struct VirtQueue
*vq
)
148 VRingMemoryRegionCaches
*caches
;
150 caches
= qatomic_read(&vq
->vring
.caches
);
151 qatomic_rcu_set(&vq
->vring
.caches
, NULL
);
153 call_rcu(caches
, virtio_free_region_cache
, rcu
);
157 static void virtio_init_region_cache(VirtIODevice
*vdev
, int n
)
159 VirtQueue
*vq
= &vdev
->vq
[n
];
160 VRingMemoryRegionCaches
*old
= vq
->vring
.caches
;
161 VRingMemoryRegionCaches
*new = NULL
;
167 addr
= vq
->vring
.desc
;
171 new = g_new0(VRingMemoryRegionCaches
, 1);
172 size
= virtio_queue_get_desc_size(vdev
, n
);
173 packed
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
175 len
= address_space_cache_init(&new->desc
, vdev
->dma_as
,
178 virtio_error(vdev
, "Cannot map desc");
182 size
= virtio_queue_get_used_size(vdev
, n
);
183 len
= address_space_cache_init(&new->used
, vdev
->dma_as
,
184 vq
->vring
.used
, size
, true);
186 virtio_error(vdev
, "Cannot map used");
190 size
= virtio_queue_get_avail_size(vdev
, n
);
191 len
= address_space_cache_init(&new->avail
, vdev
->dma_as
,
192 vq
->vring
.avail
, size
, false);
194 virtio_error(vdev
, "Cannot map avail");
198 qatomic_rcu_set(&vq
->vring
.caches
, new);
200 call_rcu(old
, virtio_free_region_cache
, rcu
);
205 address_space_cache_destroy(&new->avail
);
207 address_space_cache_destroy(&new->used
);
209 address_space_cache_destroy(&new->desc
);
212 virtio_virtqueue_reset_region_cache(vq
);
215 /* virt queue functions */
216 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
218 VRing
*vring
= &vdev
->vq
[n
].vring
;
220 if (!vring
->num
|| !vring
->desc
|| !vring
->align
) {
221 /* not yet setup -> nothing to do */
224 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
225 vring
->used
= vring_align(vring
->avail
+
226 offsetof(VRingAvail
, ring
[vring
->num
]),
228 virtio_init_region_cache(vdev
, n
);
231 /* Called within rcu_read_lock(). */
232 static void vring_split_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
233 MemoryRegionCache
*cache
, int i
)
235 address_space_read_cached(cache
, i
* sizeof(VRingDesc
),
236 desc
, sizeof(VRingDesc
));
237 virtio_tswap64s(vdev
, &desc
->addr
);
238 virtio_tswap32s(vdev
, &desc
->len
);
239 virtio_tswap16s(vdev
, &desc
->flags
);
240 virtio_tswap16s(vdev
, &desc
->next
);
243 static void vring_packed_event_read(VirtIODevice
*vdev
,
244 MemoryRegionCache
*cache
,
245 VRingPackedDescEvent
*e
)
247 hwaddr off_off
= offsetof(VRingPackedDescEvent
, off_wrap
);
248 hwaddr off_flags
= offsetof(VRingPackedDescEvent
, flags
);
250 e
->flags
= virtio_lduw_phys_cached(vdev
, cache
, off_flags
);
251 /* Make sure flags is seen before off_wrap */
253 e
->off_wrap
= virtio_lduw_phys_cached(vdev
, cache
, off_off
);
254 virtio_tswap16s(vdev
, &e
->flags
);
257 static void vring_packed_off_wrap_write(VirtIODevice
*vdev
,
258 MemoryRegionCache
*cache
,
261 hwaddr off
= offsetof(VRingPackedDescEvent
, off_wrap
);
263 virtio_stw_phys_cached(vdev
, cache
, off
, off_wrap
);
264 address_space_cache_invalidate(cache
, off
, sizeof(off_wrap
));
267 static void vring_packed_flags_write(VirtIODevice
*vdev
,
268 MemoryRegionCache
*cache
, uint16_t flags
)
270 hwaddr off
= offsetof(VRingPackedDescEvent
, flags
);
272 virtio_stw_phys_cached(vdev
, cache
, off
, flags
);
273 address_space_cache_invalidate(cache
, off
, sizeof(flags
));
276 /* Called within rcu_read_lock(). */
277 static VRingMemoryRegionCaches
*vring_get_region_caches(struct VirtQueue
*vq
)
279 return qatomic_rcu_read(&vq
->vring
.caches
);
282 /* Called within rcu_read_lock(). */
283 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
285 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
286 hwaddr pa
= offsetof(VRingAvail
, flags
);
292 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
295 /* Called within rcu_read_lock(). */
296 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
298 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
299 hwaddr pa
= offsetof(VRingAvail
, idx
);
305 vq
->shadow_avail_idx
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
306 return vq
->shadow_avail_idx
;
309 /* Called within rcu_read_lock(). */
310 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
312 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
313 hwaddr pa
= offsetof(VRingAvail
, ring
[i
]);
319 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
322 /* Called within rcu_read_lock(). */
323 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
325 return vring_avail_ring(vq
, vq
->vring
.num
);
328 /* Called within rcu_read_lock(). */
329 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
332 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
333 hwaddr pa
= offsetof(VRingUsed
, ring
[i
]);
339 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
340 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
341 address_space_write_cached(&caches
->used
, pa
, uelem
, sizeof(VRingUsedElem
));
342 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(VRingUsedElem
));
345 /* Called within rcu_read_lock(). */
346 static uint16_t vring_used_idx(VirtQueue
*vq
)
348 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
349 hwaddr pa
= offsetof(VRingUsed
, idx
);
355 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
358 /* Called within rcu_read_lock(). */
359 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
361 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
362 hwaddr pa
= offsetof(VRingUsed
, idx
);
365 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
366 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
372 /* Called within rcu_read_lock(). */
373 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
375 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
376 VirtIODevice
*vdev
= vq
->vdev
;
377 hwaddr pa
= offsetof(VRingUsed
, flags
);
384 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
385 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
| mask
);
386 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
389 /* Called within rcu_read_lock(). */
390 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
392 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
393 VirtIODevice
*vdev
= vq
->vdev
;
394 hwaddr pa
= offsetof(VRingUsed
, flags
);
401 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
402 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
& ~mask
);
403 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
406 /* Called within rcu_read_lock(). */
407 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
409 VRingMemoryRegionCaches
*caches
;
411 if (!vq
->notification
) {
415 caches
= vring_get_region_caches(vq
);
420 pa
= offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
421 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
422 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
425 static void virtio_queue_split_set_notification(VirtQueue
*vq
, int enable
)
427 RCU_READ_LOCK_GUARD();
429 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
430 vring_set_avail_event(vq
, vring_avail_idx(vq
));
432 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
434 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
437 /* Expose avail event/used flags before caller checks the avail idx. */
442 static void virtio_queue_packed_set_notification(VirtQueue
*vq
, int enable
)
445 VRingPackedDescEvent e
;
446 VRingMemoryRegionCaches
*caches
;
448 RCU_READ_LOCK_GUARD();
449 caches
= vring_get_region_caches(vq
);
454 vring_packed_event_read(vq
->vdev
, &caches
->used
, &e
);
457 e
.flags
= VRING_PACKED_EVENT_FLAG_DISABLE
;
458 } else if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
459 off_wrap
= vq
->shadow_avail_idx
| vq
->shadow_avail_wrap_counter
<< 15;
460 vring_packed_off_wrap_write(vq
->vdev
, &caches
->used
, off_wrap
);
461 /* Make sure off_wrap is wrote before flags */
463 e
.flags
= VRING_PACKED_EVENT_FLAG_DESC
;
465 e
.flags
= VRING_PACKED_EVENT_FLAG_ENABLE
;
468 vring_packed_flags_write(vq
->vdev
, &caches
->used
, e
.flags
);
470 /* Expose avail event/used flags before caller checks the avail idx. */
475 bool virtio_queue_get_notification(VirtQueue
*vq
)
477 return vq
->notification
;
480 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
482 vq
->notification
= enable
;
484 if (!vq
->vring
.desc
) {
488 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
489 virtio_queue_packed_set_notification(vq
, enable
);
491 virtio_queue_split_set_notification(vq
, enable
);
495 int virtio_queue_ready(VirtQueue
*vq
)
497 return vq
->vring
.avail
!= 0;
500 static void vring_packed_desc_read_flags(VirtIODevice
*vdev
,
502 MemoryRegionCache
*cache
,
505 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
507 *flags
= virtio_lduw_phys_cached(vdev
, cache
, off
);
510 static void vring_packed_desc_read(VirtIODevice
*vdev
,
511 VRingPackedDesc
*desc
,
512 MemoryRegionCache
*cache
,
513 int i
, bool strict_order
)
515 hwaddr off
= i
* sizeof(VRingPackedDesc
);
517 vring_packed_desc_read_flags(vdev
, &desc
->flags
, cache
, i
);
520 /* Make sure flags is read before the rest fields. */
524 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, addr
),
525 &desc
->addr
, sizeof(desc
->addr
));
526 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, id
),
527 &desc
->id
, sizeof(desc
->id
));
528 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, len
),
529 &desc
->len
, sizeof(desc
->len
));
530 virtio_tswap64s(vdev
, &desc
->addr
);
531 virtio_tswap16s(vdev
, &desc
->id
);
532 virtio_tswap32s(vdev
, &desc
->len
);
535 static void vring_packed_desc_write_data(VirtIODevice
*vdev
,
536 VRingPackedDesc
*desc
,
537 MemoryRegionCache
*cache
,
540 hwaddr off_id
= i
* sizeof(VRingPackedDesc
) +
541 offsetof(VRingPackedDesc
, id
);
542 hwaddr off_len
= i
* sizeof(VRingPackedDesc
) +
543 offsetof(VRingPackedDesc
, len
);
545 virtio_tswap32s(vdev
, &desc
->len
);
546 virtio_tswap16s(vdev
, &desc
->id
);
547 address_space_write_cached(cache
, off_id
, &desc
->id
, sizeof(desc
->id
));
548 address_space_cache_invalidate(cache
, off_id
, sizeof(desc
->id
));
549 address_space_write_cached(cache
, off_len
, &desc
->len
, sizeof(desc
->len
));
550 address_space_cache_invalidate(cache
, off_len
, sizeof(desc
->len
));
553 static void vring_packed_desc_write_flags(VirtIODevice
*vdev
,
554 VRingPackedDesc
*desc
,
555 MemoryRegionCache
*cache
,
558 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
560 virtio_stw_phys_cached(vdev
, cache
, off
, desc
->flags
);
561 address_space_cache_invalidate(cache
, off
, sizeof(desc
->flags
));
564 static void vring_packed_desc_write(VirtIODevice
*vdev
,
565 VRingPackedDesc
*desc
,
566 MemoryRegionCache
*cache
,
567 int i
, bool strict_order
)
569 vring_packed_desc_write_data(vdev
, desc
, cache
, i
);
571 /* Make sure data is wrote before flags. */
574 vring_packed_desc_write_flags(vdev
, desc
, cache
, i
);
577 static inline bool is_desc_avail(uint16_t flags
, bool wrap_counter
)
581 avail
= !!(flags
& (1 << VRING_PACKED_DESC_F_AVAIL
));
582 used
= !!(flags
& (1 << VRING_PACKED_DESC_F_USED
));
583 return (avail
!= used
) && (avail
== wrap_counter
);
586 /* Fetch avail_idx from VQ memory only when we really need to know if
587 * guest has added some buffers.
588 * Called within rcu_read_lock(). */
589 static int virtio_queue_empty_rcu(VirtQueue
*vq
)
591 if (virtio_device_disabled(vq
->vdev
)) {
595 if (unlikely(!vq
->vring
.avail
)) {
599 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
603 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
606 static int virtio_queue_split_empty(VirtQueue
*vq
)
610 if (virtio_device_disabled(vq
->vdev
)) {
614 if (unlikely(!vq
->vring
.avail
)) {
618 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
622 RCU_READ_LOCK_GUARD();
623 empty
= vring_avail_idx(vq
) == vq
->last_avail_idx
;
627 /* Called within rcu_read_lock(). */
628 static int virtio_queue_packed_empty_rcu(VirtQueue
*vq
)
630 struct VRingPackedDesc desc
;
631 VRingMemoryRegionCaches
*cache
;
633 if (unlikely(!vq
->vring
.desc
)) {
637 cache
= vring_get_region_caches(vq
);
642 vring_packed_desc_read_flags(vq
->vdev
, &desc
.flags
, &cache
->desc
,
645 return !is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
);
648 static int virtio_queue_packed_empty(VirtQueue
*vq
)
650 RCU_READ_LOCK_GUARD();
651 return virtio_queue_packed_empty_rcu(vq
);
654 int virtio_queue_empty(VirtQueue
*vq
)
656 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
657 return virtio_queue_packed_empty(vq
);
659 return virtio_queue_split_empty(vq
);
663 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
666 AddressSpace
*dma_as
= vq
->vdev
->dma_as
;
671 for (i
= 0; i
< elem
->in_num
; i
++) {
672 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
674 dma_memory_unmap(dma_as
, elem
->in_sg
[i
].iov_base
,
675 elem
->in_sg
[i
].iov_len
,
676 DMA_DIRECTION_FROM_DEVICE
, size
);
681 for (i
= 0; i
< elem
->out_num
; i
++)
682 dma_memory_unmap(dma_as
, elem
->out_sg
[i
].iov_base
,
683 elem
->out_sg
[i
].iov_len
,
684 DMA_DIRECTION_TO_DEVICE
,
685 elem
->out_sg
[i
].iov_len
);
688 /* virtqueue_detach_element:
689 * @vq: The #VirtQueue
690 * @elem: The #VirtQueueElement
691 * @len: number of bytes written
693 * Detach the element from the virtqueue. This function is suitable for device
694 * reset or other situations where a #VirtQueueElement is simply freed and will
695 * not be pushed or discarded.
697 void virtqueue_detach_element(VirtQueue
*vq
, const VirtQueueElement
*elem
,
700 vq
->inuse
-= elem
->ndescs
;
701 virtqueue_unmap_sg(vq
, elem
, len
);
704 static void virtqueue_split_rewind(VirtQueue
*vq
, unsigned int num
)
706 vq
->last_avail_idx
-= num
;
709 static void virtqueue_packed_rewind(VirtQueue
*vq
, unsigned int num
)
711 if (vq
->last_avail_idx
< num
) {
712 vq
->last_avail_idx
= vq
->vring
.num
+ vq
->last_avail_idx
- num
;
713 vq
->last_avail_wrap_counter
^= 1;
715 vq
->last_avail_idx
-= num
;
720 * @vq: The #VirtQueue
721 * @elem: The #VirtQueueElement
722 * @len: number of bytes written
724 * Pretend the most recent element wasn't popped from the virtqueue. The next
725 * call to virtqueue_pop() will refetch the element.
727 void virtqueue_unpop(VirtQueue
*vq
, const VirtQueueElement
*elem
,
731 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
732 virtqueue_packed_rewind(vq
, 1);
734 virtqueue_split_rewind(vq
, 1);
737 virtqueue_detach_element(vq
, elem
, len
);
741 * @vq: The #VirtQueue
742 * @num: Number of elements to push back
744 * Pretend that elements weren't popped from the virtqueue. The next
745 * virtqueue_pop() will refetch the oldest element.
747 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
749 * Returns: true on success, false if @num is greater than the number of in use
752 bool virtqueue_rewind(VirtQueue
*vq
, unsigned int num
)
754 if (num
> vq
->inuse
) {
759 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
760 virtqueue_packed_rewind(vq
, num
);
762 virtqueue_split_rewind(vq
, num
);
767 static void virtqueue_split_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
768 unsigned int len
, unsigned int idx
)
772 if (unlikely(!vq
->vring
.used
)) {
776 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
778 uelem
.id
= elem
->index
;
780 vring_used_write(vq
, &uelem
, idx
);
783 static void virtqueue_packed_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
784 unsigned int len
, unsigned int idx
)
786 vq
->used_elems
[idx
].index
= elem
->index
;
787 vq
->used_elems
[idx
].len
= len
;
788 vq
->used_elems
[idx
].ndescs
= elem
->ndescs
;
791 static void virtqueue_packed_fill_desc(VirtQueue
*vq
,
792 const VirtQueueElement
*elem
,
797 VRingMemoryRegionCaches
*caches
;
798 VRingPackedDesc desc
= {
802 bool wrap_counter
= vq
->used_wrap_counter
;
804 if (unlikely(!vq
->vring
.desc
)) {
808 head
= vq
->used_idx
+ idx
;
809 if (head
>= vq
->vring
.num
) {
810 head
-= vq
->vring
.num
;
814 desc
.flags
|= (1 << VRING_PACKED_DESC_F_AVAIL
);
815 desc
.flags
|= (1 << VRING_PACKED_DESC_F_USED
);
817 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_AVAIL
);
818 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_USED
);
821 caches
= vring_get_region_caches(vq
);
826 vring_packed_desc_write(vq
->vdev
, &desc
, &caches
->desc
, head
, strict_order
);
829 /* Called within rcu_read_lock(). */
830 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
831 unsigned int len
, unsigned int idx
)
833 trace_virtqueue_fill(vq
, elem
, len
, idx
);
835 virtqueue_unmap_sg(vq
, elem
, len
);
837 if (virtio_device_disabled(vq
->vdev
)) {
841 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
842 virtqueue_packed_fill(vq
, elem
, len
, idx
);
844 virtqueue_split_fill(vq
, elem
, len
, idx
);
848 /* Called within rcu_read_lock(). */
849 static void virtqueue_split_flush(VirtQueue
*vq
, unsigned int count
)
853 if (unlikely(!vq
->vring
.used
)) {
857 /* Make sure buffer is written before we update index. */
859 trace_virtqueue_flush(vq
, count
);
862 vring_used_idx_set(vq
, new);
864 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
865 vq
->signalled_used_valid
= false;
868 static void virtqueue_packed_flush(VirtQueue
*vq
, unsigned int count
)
870 unsigned int i
, ndescs
= 0;
872 if (unlikely(!vq
->vring
.desc
)) {
876 for (i
= 1; i
< count
; i
++) {
877 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[i
], i
, false);
878 ndescs
+= vq
->used_elems
[i
].ndescs
;
880 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[0], 0, true);
881 ndescs
+= vq
->used_elems
[0].ndescs
;
884 vq
->used_idx
+= ndescs
;
885 if (vq
->used_idx
>= vq
->vring
.num
) {
886 vq
->used_idx
-= vq
->vring
.num
;
887 vq
->used_wrap_counter
^= 1;
891 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
893 if (virtio_device_disabled(vq
->vdev
)) {
898 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
899 virtqueue_packed_flush(vq
, count
);
901 virtqueue_split_flush(vq
, count
);
905 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
908 RCU_READ_LOCK_GUARD();
909 virtqueue_fill(vq
, elem
, len
, 0);
910 virtqueue_flush(vq
, 1);
913 /* Called within rcu_read_lock(). */
914 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
916 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
918 /* Check it isn't doing very strange things with descriptor numbers. */
919 if (num_heads
> vq
->vring
.num
) {
920 virtio_error(vq
->vdev
, "Guest moved used index from %u to %u",
921 idx
, vq
->shadow_avail_idx
);
924 /* On success, callers read a descriptor at vq->last_avail_idx.
925 * Make sure descriptor read does not bypass avail index read. */
933 /* Called within rcu_read_lock(). */
934 static bool virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
,
937 /* Grab the next descriptor number they're advertising, and increment
938 * the index we've seen. */
939 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
941 /* If their number is silly, that's a fatal mistake. */
942 if (*head
>= vq
->vring
.num
) {
943 virtio_error(vq
->vdev
, "Guest says index %u is available", *head
);
951 VIRTQUEUE_READ_DESC_ERROR
= -1,
952 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
953 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
956 static int virtqueue_split_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
957 MemoryRegionCache
*desc_cache
,
958 unsigned int max
, unsigned int *next
)
960 /* If this descriptor says it doesn't chain, we're done. */
961 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
962 return VIRTQUEUE_READ_DESC_DONE
;
965 /* Check they're not leading us off end of descriptors. */
967 /* Make sure compiler knows to grab that: we don't want it changing! */
971 virtio_error(vdev
, "Desc next is %u", *next
);
972 return VIRTQUEUE_READ_DESC_ERROR
;
975 vring_split_desc_read(vdev
, desc
, desc_cache
, *next
);
976 return VIRTQUEUE_READ_DESC_MORE
;
979 /* Called within rcu_read_lock(). */
980 static void virtqueue_split_get_avail_bytes(VirtQueue
*vq
,
981 unsigned int *in_bytes
, unsigned int *out_bytes
,
982 unsigned max_in_bytes
, unsigned max_out_bytes
,
983 VRingMemoryRegionCaches
*caches
)
985 VirtIODevice
*vdev
= vq
->vdev
;
986 unsigned int max
, idx
;
987 unsigned int total_bufs
, in_total
, out_total
;
988 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
992 idx
= vq
->last_avail_idx
;
993 total_bufs
= in_total
= out_total
= 0;
997 while ((rc
= virtqueue_num_heads(vq
, idx
)) > 0) {
998 MemoryRegionCache
*desc_cache
= &caches
->desc
;
999 unsigned int num_bufs
;
1003 num_bufs
= total_bufs
;
1005 if (!virtqueue_get_head(vq
, idx
++, &i
)) {
1009 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1011 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1012 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1013 virtio_error(vdev
, "Invalid size for indirect buffer table");
1017 /* If we've got too many, that implies a descriptor loop. */
1018 if (num_bufs
>= max
) {
1019 virtio_error(vdev
, "Looped descriptor");
1023 /* loop over the indirect descriptor table */
1024 len
= address_space_cache_init(&indirect_desc_cache
,
1026 desc
.addr
, desc
.len
, false);
1027 desc_cache
= &indirect_desc_cache
;
1028 if (len
< desc
.len
) {
1029 virtio_error(vdev
, "Cannot map indirect buffer");
1033 max
= desc
.len
/ sizeof(VRingDesc
);
1035 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1039 /* If we've got too many, that implies a descriptor loop. */
1040 if (++num_bufs
> max
) {
1041 virtio_error(vdev
, "Looped descriptor");
1045 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1046 in_total
+= desc
.len
;
1048 out_total
+= desc
.len
;
1050 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1054 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
1055 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1057 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1061 if (desc_cache
== &indirect_desc_cache
) {
1062 address_space_cache_destroy(&indirect_desc_cache
);
1065 total_bufs
= num_bufs
;
1074 address_space_cache_destroy(&indirect_desc_cache
);
1076 *in_bytes
= in_total
;
1079 *out_bytes
= out_total
;
1084 in_total
= out_total
= 0;
1088 static int virtqueue_packed_read_next_desc(VirtQueue
*vq
,
1089 VRingPackedDesc
*desc
,
1096 /* If this descriptor says it doesn't chain, we're done. */
1097 if (!indirect
&& !(desc
->flags
& VRING_DESC_F_NEXT
)) {
1098 return VIRTQUEUE_READ_DESC_DONE
;
1104 return VIRTQUEUE_READ_DESC_DONE
;
1106 (*next
) -= vq
->vring
.num
;
1110 vring_packed_desc_read(vq
->vdev
, desc
, desc_cache
, *next
, false);
1111 return VIRTQUEUE_READ_DESC_MORE
;
1114 /* Called within rcu_read_lock(). */
1115 static void virtqueue_packed_get_avail_bytes(VirtQueue
*vq
,
1116 unsigned int *in_bytes
,
1117 unsigned int *out_bytes
,
1118 unsigned max_in_bytes
,
1119 unsigned max_out_bytes
,
1120 VRingMemoryRegionCaches
*caches
)
1122 VirtIODevice
*vdev
= vq
->vdev
;
1123 unsigned int max
, idx
;
1124 unsigned int total_bufs
, in_total
, out_total
;
1125 MemoryRegionCache
*desc_cache
;
1126 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1128 VRingPackedDesc desc
;
1131 idx
= vq
->last_avail_idx
;
1132 wrap_counter
= vq
->last_avail_wrap_counter
;
1133 total_bufs
= in_total
= out_total
= 0;
1135 max
= vq
->vring
.num
;
1138 unsigned int num_bufs
= total_bufs
;
1139 unsigned int i
= idx
;
1142 desc_cache
= &caches
->desc
;
1143 vring_packed_desc_read(vdev
, &desc
, desc_cache
, idx
, true);
1144 if (!is_desc_avail(desc
.flags
, wrap_counter
)) {
1148 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1149 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1150 virtio_error(vdev
, "Invalid size for indirect buffer table");
1154 /* If we've got too many, that implies a descriptor loop. */
1155 if (num_bufs
>= max
) {
1156 virtio_error(vdev
, "Looped descriptor");
1160 /* loop over the indirect descriptor table */
1161 len
= address_space_cache_init(&indirect_desc_cache
,
1163 desc
.addr
, desc
.len
, false);
1164 desc_cache
= &indirect_desc_cache
;
1165 if (len
< desc
.len
) {
1166 virtio_error(vdev
, "Cannot map indirect buffer");
1170 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1172 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1176 /* If we've got too many, that implies a descriptor loop. */
1177 if (++num_bufs
> max
) {
1178 virtio_error(vdev
, "Looped descriptor");
1182 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1183 in_total
+= desc
.len
;
1185 out_total
+= desc
.len
;
1187 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1191 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
,
1193 &indirect_desc_cache
);
1194 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1196 if (desc_cache
== &indirect_desc_cache
) {
1197 address_space_cache_destroy(&indirect_desc_cache
);
1201 idx
+= num_bufs
- total_bufs
;
1202 total_bufs
= num_bufs
;
1205 if (idx
>= vq
->vring
.num
) {
1206 idx
-= vq
->vring
.num
;
1211 /* Record the index and wrap counter for a kick we want */
1212 vq
->shadow_avail_idx
= idx
;
1213 vq
->shadow_avail_wrap_counter
= wrap_counter
;
1215 address_space_cache_destroy(&indirect_desc_cache
);
1217 *in_bytes
= in_total
;
1220 *out_bytes
= out_total
;
1225 in_total
= out_total
= 0;
1229 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
1230 unsigned int *out_bytes
,
1231 unsigned max_in_bytes
, unsigned max_out_bytes
)
1234 VRingMemoryRegionCaches
*caches
;
1236 RCU_READ_LOCK_GUARD();
1238 if (unlikely(!vq
->vring
.desc
)) {
1242 caches
= vring_get_region_caches(vq
);
1247 desc_size
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
1248 sizeof(VRingPackedDesc
) : sizeof(VRingDesc
);
1249 if (caches
->desc
.len
< vq
->vring
.num
* desc_size
) {
1250 virtio_error(vq
->vdev
, "Cannot map descriptor ring");
1254 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1255 virtqueue_packed_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1256 max_in_bytes
, max_out_bytes
,
1259 virtqueue_split_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1260 max_in_bytes
, max_out_bytes
,
1274 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
1275 unsigned int out_bytes
)
1277 unsigned int in_total
, out_total
;
1279 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
1280 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1283 static bool virtqueue_map_desc(VirtIODevice
*vdev
, unsigned int *p_num_sg
,
1284 hwaddr
*addr
, struct iovec
*iov
,
1285 unsigned int max_num_sg
, bool is_write
,
1286 hwaddr pa
, size_t sz
)
1289 unsigned num_sg
= *p_num_sg
;
1290 assert(num_sg
<= max_num_sg
);
1293 virtio_error(vdev
, "virtio: zero sized buffers are not allowed");
1300 if (num_sg
== max_num_sg
) {
1301 virtio_error(vdev
, "virtio: too many write descriptors in "
1306 iov
[num_sg
].iov_base
= dma_memory_map(vdev
->dma_as
, pa
, &len
,
1308 DMA_DIRECTION_FROM_DEVICE
:
1309 DMA_DIRECTION_TO_DEVICE
);
1310 if (!iov
[num_sg
].iov_base
) {
1311 virtio_error(vdev
, "virtio: bogus descriptor or out of resources");
1315 iov
[num_sg
].iov_len
= len
;
1329 /* Only used by error code paths before we have a VirtQueueElement (therefore
1330 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1333 static void virtqueue_undo_map_desc(unsigned int out_num
, unsigned int in_num
,
1338 for (i
= 0; i
< out_num
+ in_num
; i
++) {
1339 int is_write
= i
>= out_num
;
1341 cpu_physical_memory_unmap(iov
->iov_base
, iov
->iov_len
, is_write
, 0);
1346 static void virtqueue_map_iovec(VirtIODevice
*vdev
, struct iovec
*sg
,
1347 hwaddr
*addr
, unsigned int num_sg
,
1353 for (i
= 0; i
< num_sg
; i
++) {
1354 len
= sg
[i
].iov_len
;
1355 sg
[i
].iov_base
= dma_memory_map(vdev
->dma_as
,
1356 addr
[i
], &len
, is_write
?
1357 DMA_DIRECTION_FROM_DEVICE
:
1358 DMA_DIRECTION_TO_DEVICE
);
1359 if (!sg
[i
].iov_base
) {
1360 error_report("virtio: error trying to map MMIO memory");
1363 if (len
!= sg
[i
].iov_len
) {
1364 error_report("virtio: unexpected memory split");
1370 void virtqueue_map(VirtIODevice
*vdev
, VirtQueueElement
*elem
)
1372 virtqueue_map_iovec(vdev
, elem
->in_sg
, elem
->in_addr
, elem
->in_num
, true);
1373 virtqueue_map_iovec(vdev
, elem
->out_sg
, elem
->out_addr
, elem
->out_num
,
1377 static void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
1379 VirtQueueElement
*elem
;
1380 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
1381 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
1382 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
1383 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
1384 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
1385 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
1387 assert(sz
>= sizeof(VirtQueueElement
));
1388 elem
= g_malloc(out_sg_end
);
1389 trace_virtqueue_alloc_element(elem
, sz
, in_num
, out_num
);
1390 elem
->out_num
= out_num
;
1391 elem
->in_num
= in_num
;
1392 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
1393 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
1394 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
1395 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
1399 static void *virtqueue_split_pop(VirtQueue
*vq
, size_t sz
)
1401 unsigned int i
, head
, max
;
1402 VRingMemoryRegionCaches
*caches
;
1403 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1404 MemoryRegionCache
*desc_cache
;
1406 VirtIODevice
*vdev
= vq
->vdev
;
1407 VirtQueueElement
*elem
= NULL
;
1408 unsigned out_num
, in_num
, elem_entries
;
1409 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1410 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1414 RCU_READ_LOCK_GUARD();
1415 if (virtio_queue_empty_rcu(vq
)) {
1418 /* Needed after virtio_queue_empty(), see comment in
1419 * virtqueue_num_heads(). */
1422 /* When we start there are none of either input nor output. */
1423 out_num
= in_num
= elem_entries
= 0;
1425 max
= vq
->vring
.num
;
1427 if (vq
->inuse
>= vq
->vring
.num
) {
1428 virtio_error(vdev
, "Virtqueue size exceeded");
1432 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
++, &head
)) {
1436 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1437 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1442 caches
= vring_get_region_caches(vq
);
1444 virtio_error(vdev
, "Region caches not initialized");
1448 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1449 virtio_error(vdev
, "Cannot map descriptor ring");
1453 desc_cache
= &caches
->desc
;
1454 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1455 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1456 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1457 virtio_error(vdev
, "Invalid size for indirect buffer table");
1461 /* loop over the indirect descriptor table */
1462 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1463 desc
.addr
, desc
.len
, false);
1464 desc_cache
= &indirect_desc_cache
;
1465 if (len
< desc
.len
) {
1466 virtio_error(vdev
, "Cannot map indirect buffer");
1470 max
= desc
.len
/ sizeof(VRingDesc
);
1472 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1475 /* Collect all the descriptors */
1479 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1480 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1482 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1483 desc
.addr
, desc
.len
);
1486 virtio_error(vdev
, "Incorrect order for descriptors");
1489 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1490 VIRTQUEUE_MAX_SIZE
, false,
1491 desc
.addr
, desc
.len
);
1497 /* If we've got too many, that implies a descriptor loop. */
1498 if (++elem_entries
> max
) {
1499 virtio_error(vdev
, "Looped descriptor");
1503 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
1504 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1506 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1510 /* Now copy what we have collected and mapped */
1511 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1514 for (i
= 0; i
< out_num
; i
++) {
1515 elem
->out_addr
[i
] = addr
[i
];
1516 elem
->out_sg
[i
] = iov
[i
];
1518 for (i
= 0; i
< in_num
; i
++) {
1519 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1520 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1525 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1527 address_space_cache_destroy(&indirect_desc_cache
);
1532 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1536 static void *virtqueue_packed_pop(VirtQueue
*vq
, size_t sz
)
1538 unsigned int i
, max
;
1539 VRingMemoryRegionCaches
*caches
;
1540 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1541 MemoryRegionCache
*desc_cache
;
1543 VirtIODevice
*vdev
= vq
->vdev
;
1544 VirtQueueElement
*elem
= NULL
;
1545 unsigned out_num
, in_num
, elem_entries
;
1546 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1547 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1548 VRingPackedDesc desc
;
1552 RCU_READ_LOCK_GUARD();
1553 if (virtio_queue_packed_empty_rcu(vq
)) {
1557 /* When we start there are none of either input nor output. */
1558 out_num
= in_num
= elem_entries
= 0;
1560 max
= vq
->vring
.num
;
1562 if (vq
->inuse
>= vq
->vring
.num
) {
1563 virtio_error(vdev
, "Virtqueue size exceeded");
1567 i
= vq
->last_avail_idx
;
1569 caches
= vring_get_region_caches(vq
);
1571 virtio_error(vdev
, "Region caches not initialized");
1575 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1576 virtio_error(vdev
, "Cannot map descriptor ring");
1580 desc_cache
= &caches
->desc
;
1581 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, true);
1583 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1584 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1585 virtio_error(vdev
, "Invalid size for indirect buffer table");
1589 /* loop over the indirect descriptor table */
1590 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1591 desc
.addr
, desc
.len
, false);
1592 desc_cache
= &indirect_desc_cache
;
1593 if (len
< desc
.len
) {
1594 virtio_error(vdev
, "Cannot map indirect buffer");
1598 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1600 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1603 /* Collect all the descriptors */
1607 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1608 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1610 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1611 desc
.addr
, desc
.len
);
1614 virtio_error(vdev
, "Incorrect order for descriptors");
1617 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1618 VIRTQUEUE_MAX_SIZE
, false,
1619 desc
.addr
, desc
.len
);
1625 /* If we've got too many, that implies a descriptor loop. */
1626 if (++elem_entries
> max
) {
1627 virtio_error(vdev
, "Looped descriptor");
1631 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
, &i
,
1633 &indirect_desc_cache
);
1634 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1636 /* Now copy what we have collected and mapped */
1637 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1638 for (i
= 0; i
< out_num
; i
++) {
1639 elem
->out_addr
[i
] = addr
[i
];
1640 elem
->out_sg
[i
] = iov
[i
];
1642 for (i
= 0; i
< in_num
; i
++) {
1643 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1644 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1648 elem
->ndescs
= (desc_cache
== &indirect_desc_cache
) ? 1 : elem_entries
;
1649 vq
->last_avail_idx
+= elem
->ndescs
;
1650 vq
->inuse
+= elem
->ndescs
;
1652 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1653 vq
->last_avail_idx
-= vq
->vring
.num
;
1654 vq
->last_avail_wrap_counter
^= 1;
1657 vq
->shadow_avail_idx
= vq
->last_avail_idx
;
1658 vq
->shadow_avail_wrap_counter
= vq
->last_avail_wrap_counter
;
1660 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1662 address_space_cache_destroy(&indirect_desc_cache
);
1667 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1671 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
1673 if (virtio_device_disabled(vq
->vdev
)) {
1677 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1678 return virtqueue_packed_pop(vq
, sz
);
1680 return virtqueue_split_pop(vq
, sz
);
1684 static unsigned int virtqueue_packed_drop_all(VirtQueue
*vq
)
1686 VRingMemoryRegionCaches
*caches
;
1687 MemoryRegionCache
*desc_cache
;
1688 unsigned int dropped
= 0;
1689 VirtQueueElement elem
= {};
1690 VirtIODevice
*vdev
= vq
->vdev
;
1691 VRingPackedDesc desc
;
1693 RCU_READ_LOCK_GUARD();
1695 caches
= vring_get_region_caches(vq
);
1700 desc_cache
= &caches
->desc
;
1702 virtio_queue_set_notification(vq
, 0);
1704 while (vq
->inuse
< vq
->vring
.num
) {
1705 unsigned int idx
= vq
->last_avail_idx
;
1707 * works similar to virtqueue_pop but does not map buffers
1708 * and does not allocate any memory.
1710 vring_packed_desc_read(vdev
, &desc
, desc_cache
,
1711 vq
->last_avail_idx
, true);
1712 if (!is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
)) {
1715 elem
.index
= desc
.id
;
1717 while (virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
,
1718 vq
->vring
.num
, &idx
, false)) {
1722 * immediately push the element, nothing to unmap
1723 * as both in_num and out_num are set to 0.
1725 virtqueue_push(vq
, &elem
, 0);
1727 vq
->last_avail_idx
+= elem
.ndescs
;
1728 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1729 vq
->last_avail_idx
-= vq
->vring
.num
;
1730 vq
->last_avail_wrap_counter
^= 1;
1737 static unsigned int virtqueue_split_drop_all(VirtQueue
*vq
)
1739 unsigned int dropped
= 0;
1740 VirtQueueElement elem
= {};
1741 VirtIODevice
*vdev
= vq
->vdev
;
1742 bool fEventIdx
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
1744 while (!virtio_queue_empty(vq
) && vq
->inuse
< vq
->vring
.num
) {
1745 /* works similar to virtqueue_pop but does not map buffers
1746 * and does not allocate any memory */
1748 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
, &elem
.index
)) {
1752 vq
->last_avail_idx
++;
1754 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1756 /* immediately push the element, nothing to unmap
1757 * as both in_num and out_num are set to 0 */
1758 virtqueue_push(vq
, &elem
, 0);
1765 /* virtqueue_drop_all:
1766 * @vq: The #VirtQueue
1767 * Drops all queued buffers and indicates them to the guest
1768 * as if they are done. Useful when buffers can not be
1769 * processed but must be returned to the guest.
1771 unsigned int virtqueue_drop_all(VirtQueue
*vq
)
1773 struct VirtIODevice
*vdev
= vq
->vdev
;
1775 if (virtio_device_disabled(vq
->vdev
)) {
1779 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1780 return virtqueue_packed_drop_all(vq
);
1782 return virtqueue_split_drop_all(vq
);
1786 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1787 * it is what QEMU has always done by mistake. We can change it sooner
1788 * or later by bumping the version number of the affected vm states.
1789 * In the meanwhile, since the in-memory layout of VirtQueueElement
1790 * has changed, we need to marshal to and from the layout that was
1791 * used before the change.
1793 typedef struct VirtQueueElementOld
{
1795 unsigned int out_num
;
1796 unsigned int in_num
;
1797 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
1798 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
1799 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
1800 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
1801 } VirtQueueElementOld
;
1803 void *qemu_get_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
, size_t sz
)
1805 VirtQueueElement
*elem
;
1806 VirtQueueElementOld data
;
1809 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1811 /* TODO: teach all callers that this can fail, and return failure instead
1812 * of asserting here.
1813 * This is just one thing (there are probably more) that must be
1814 * fixed before we can allow NDEBUG compilation.
1816 assert(ARRAY_SIZE(data
.in_addr
) >= data
.in_num
);
1817 assert(ARRAY_SIZE(data
.out_addr
) >= data
.out_num
);
1819 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
1820 elem
->index
= data
.index
;
1822 for (i
= 0; i
< elem
->in_num
; i
++) {
1823 elem
->in_addr
[i
] = data
.in_addr
[i
];
1826 for (i
= 0; i
< elem
->out_num
; i
++) {
1827 elem
->out_addr
[i
] = data
.out_addr
[i
];
1830 for (i
= 0; i
< elem
->in_num
; i
++) {
1831 /* Base is overwritten by virtqueue_map. */
1832 elem
->in_sg
[i
].iov_base
= 0;
1833 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
1836 for (i
= 0; i
< elem
->out_num
; i
++) {
1837 /* Base is overwritten by virtqueue_map. */
1838 elem
->out_sg
[i
].iov_base
= 0;
1839 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
1842 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1843 qemu_get_be32s(f
, &elem
->ndescs
);
1846 virtqueue_map(vdev
, elem
);
1850 void qemu_put_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
,
1851 VirtQueueElement
*elem
)
1853 VirtQueueElementOld data
;
1856 memset(&data
, 0, sizeof(data
));
1857 data
.index
= elem
->index
;
1858 data
.in_num
= elem
->in_num
;
1859 data
.out_num
= elem
->out_num
;
1861 for (i
= 0; i
< elem
->in_num
; i
++) {
1862 data
.in_addr
[i
] = elem
->in_addr
[i
];
1865 for (i
= 0; i
< elem
->out_num
; i
++) {
1866 data
.out_addr
[i
] = elem
->out_addr
[i
];
1869 for (i
= 0; i
< elem
->in_num
; i
++) {
1870 /* Base is overwritten by virtqueue_map when loading. Do not
1871 * save it, as it would leak the QEMU address space layout. */
1872 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
1875 for (i
= 0; i
< elem
->out_num
; i
++) {
1876 /* Do not save iov_base as above. */
1877 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
1880 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1881 qemu_put_be32s(f
, &elem
->ndescs
);
1884 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1888 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
1890 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1891 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1893 if (virtio_device_disabled(vdev
)) {
1898 k
->notify(qbus
->parent
, vector
);
1902 void virtio_update_irq(VirtIODevice
*vdev
)
1904 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1907 static int virtio_validate_features(VirtIODevice
*vdev
)
1909 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1911 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
) &&
1912 !virtio_vdev_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1916 if (k
->validate_features
) {
1917 return k
->validate_features(vdev
);
1923 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
1925 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1926 trace_virtio_set_status(vdev
, val
);
1928 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1929 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
1930 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1931 int ret
= virtio_validate_features(vdev
);
1939 if ((vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) !=
1940 (val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1941 virtio_set_started(vdev
, val
& VIRTIO_CONFIG_S_DRIVER_OK
);
1944 if (k
->set_status
) {
1945 k
->set_status(vdev
, val
);
1952 static enum virtio_device_endian
virtio_default_endian(void)
1954 if (target_words_bigendian()) {
1955 return VIRTIO_DEVICE_ENDIAN_BIG
;
1957 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1961 static enum virtio_device_endian
virtio_current_cpu_endian(void)
1963 if (cpu_virtio_is_big_endian(current_cpu
)) {
1964 return VIRTIO_DEVICE_ENDIAN_BIG
;
1966 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1970 void virtio_reset(void *opaque
)
1972 VirtIODevice
*vdev
= opaque
;
1973 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1976 virtio_set_status(vdev
, 0);
1978 /* Guest initiated reset */
1979 vdev
->device_endian
= virtio_current_cpu_endian();
1982 vdev
->device_endian
= virtio_default_endian();
1989 vdev
->start_on_kick
= false;
1990 vdev
->started
= false;
1991 vdev
->broken
= false;
1992 vdev
->guest_features
= 0;
1993 vdev
->queue_sel
= 0;
1995 vdev
->disabled
= false;
1996 qatomic_set(&vdev
->isr
, 0);
1997 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
1998 virtio_notify_vector(vdev
, vdev
->config_vector
);
2000 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2001 vdev
->vq
[i
].vring
.desc
= 0;
2002 vdev
->vq
[i
].vring
.avail
= 0;
2003 vdev
->vq
[i
].vring
.used
= 0;
2004 vdev
->vq
[i
].last_avail_idx
= 0;
2005 vdev
->vq
[i
].shadow_avail_idx
= 0;
2006 vdev
->vq
[i
].used_idx
= 0;
2007 vdev
->vq
[i
].last_avail_wrap_counter
= true;
2008 vdev
->vq
[i
].shadow_avail_wrap_counter
= true;
2009 vdev
->vq
[i
].used_wrap_counter
= true;
2010 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
2011 vdev
->vq
[i
].signalled_used
= 0;
2012 vdev
->vq
[i
].signalled_used_valid
= false;
2013 vdev
->vq
[i
].notification
= true;
2014 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
2015 vdev
->vq
[i
].inuse
= 0;
2016 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
2020 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
2022 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2025 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2026 return (uint32_t)-1;
2029 k
->get_config(vdev
, vdev
->config
);
2031 val
= ldub_p(vdev
->config
+ addr
);
2035 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
2037 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2040 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2041 return (uint32_t)-1;
2044 k
->get_config(vdev
, vdev
->config
);
2046 val
= lduw_p(vdev
->config
+ addr
);
2050 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
2052 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2055 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2056 return (uint32_t)-1;
2059 k
->get_config(vdev
, vdev
->config
);
2061 val
= ldl_p(vdev
->config
+ addr
);
2065 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2067 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2070 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2074 stb_p(vdev
->config
+ addr
, val
);
2076 if (k
->set_config
) {
2077 k
->set_config(vdev
, vdev
->config
);
2081 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2083 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2084 uint16_t val
= data
;
2086 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2090 stw_p(vdev
->config
+ addr
, val
);
2092 if (k
->set_config
) {
2093 k
->set_config(vdev
, vdev
->config
);
2097 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2099 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2100 uint32_t val
= data
;
2102 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2106 stl_p(vdev
->config
+ addr
, val
);
2108 if (k
->set_config
) {
2109 k
->set_config(vdev
, vdev
->config
);
2113 uint32_t virtio_config_modern_readb(VirtIODevice
*vdev
, uint32_t addr
)
2115 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2118 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2119 return (uint32_t)-1;
2122 k
->get_config(vdev
, vdev
->config
);
2124 val
= ldub_p(vdev
->config
+ addr
);
2128 uint32_t virtio_config_modern_readw(VirtIODevice
*vdev
, uint32_t addr
)
2130 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2133 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2134 return (uint32_t)-1;
2137 k
->get_config(vdev
, vdev
->config
);
2139 val
= lduw_le_p(vdev
->config
+ addr
);
2143 uint32_t virtio_config_modern_readl(VirtIODevice
*vdev
, uint32_t addr
)
2145 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2148 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2149 return (uint32_t)-1;
2152 k
->get_config(vdev
, vdev
->config
);
2154 val
= ldl_le_p(vdev
->config
+ addr
);
2158 void virtio_config_modern_writeb(VirtIODevice
*vdev
,
2159 uint32_t addr
, uint32_t data
)
2161 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2164 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2168 stb_p(vdev
->config
+ addr
, val
);
2170 if (k
->set_config
) {
2171 k
->set_config(vdev
, vdev
->config
);
2175 void virtio_config_modern_writew(VirtIODevice
*vdev
,
2176 uint32_t addr
, uint32_t data
)
2178 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2179 uint16_t val
= data
;
2181 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2185 stw_le_p(vdev
->config
+ addr
, val
);
2187 if (k
->set_config
) {
2188 k
->set_config(vdev
, vdev
->config
);
2192 void virtio_config_modern_writel(VirtIODevice
*vdev
,
2193 uint32_t addr
, uint32_t data
)
2195 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2196 uint32_t val
= data
;
2198 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2202 stl_le_p(vdev
->config
+ addr
, val
);
2204 if (k
->set_config
) {
2205 k
->set_config(vdev
, vdev
->config
);
2209 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
2211 if (!vdev
->vq
[n
].vring
.num
) {
2214 vdev
->vq
[n
].vring
.desc
= addr
;
2215 virtio_queue_update_rings(vdev
, n
);
2218 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
2220 return vdev
->vq
[n
].vring
.desc
;
2223 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
2224 hwaddr avail
, hwaddr used
)
2226 if (!vdev
->vq
[n
].vring
.num
) {
2229 vdev
->vq
[n
].vring
.desc
= desc
;
2230 vdev
->vq
[n
].vring
.avail
= avail
;
2231 vdev
->vq
[n
].vring
.used
= used
;
2232 virtio_init_region_cache(vdev
, n
);
2235 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
2237 /* Don't allow guest to flip queue between existent and
2238 * nonexistent states, or to set it to an invalid size.
2240 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
2241 num
> VIRTQUEUE_MAX_SIZE
||
2245 vdev
->vq
[n
].vring
.num
= num
;
2248 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
2250 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
2253 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
2255 return QLIST_NEXT(vq
, node
);
2258 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
2260 return vdev
->vq
[n
].vring
.num
;
2263 int virtio_queue_get_max_num(VirtIODevice
*vdev
, int n
)
2265 return vdev
->vq
[n
].vring
.num_default
;
2268 int virtio_get_num_queues(VirtIODevice
*vdev
)
2272 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2273 if (!virtio_queue_get_num(vdev
, i
)) {
2281 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
2283 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2284 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2286 /* virtio-1 compliant devices cannot change the alignment */
2287 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2288 error_report("tried to modify queue alignment for virtio-1 device");
2291 /* Check that the transport told us it was going to do this
2292 * (so a buggy transport will immediately assert rather than
2293 * silently failing to migrate this state)
2295 assert(k
->has_variable_vring_alignment
);
2298 vdev
->vq
[n
].vring
.align
= align
;
2299 virtio_queue_update_rings(vdev
, n
);
2303 static bool virtio_queue_notify_aio_vq(VirtQueue
*vq
)
2307 if (vq
->vring
.desc
&& vq
->handle_aio_output
) {
2308 VirtIODevice
*vdev
= vq
->vdev
;
2310 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2311 ret
= vq
->handle_aio_output(vdev
, vq
);
2313 if (unlikely(vdev
->start_on_kick
)) {
2314 virtio_set_started(vdev
, true);
2321 static void virtio_queue_notify_vq(VirtQueue
*vq
)
2323 if (vq
->vring
.desc
&& vq
->handle_output
) {
2324 VirtIODevice
*vdev
= vq
->vdev
;
2326 if (unlikely(vdev
->broken
)) {
2330 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2331 vq
->handle_output(vdev
, vq
);
2333 if (unlikely(vdev
->start_on_kick
)) {
2334 virtio_set_started(vdev
, true);
2339 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
2341 VirtQueue
*vq
= &vdev
->vq
[n
];
2343 if (unlikely(!vq
->vring
.desc
|| vdev
->broken
)) {
2347 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2348 if (vq
->host_notifier_enabled
) {
2349 event_notifier_set(&vq
->host_notifier
);
2350 } else if (vq
->handle_output
) {
2351 vq
->handle_output(vdev
, vq
);
2353 if (unlikely(vdev
->start_on_kick
)) {
2354 virtio_set_started(vdev
, true);
2359 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
2361 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
2365 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
2367 VirtQueue
*vq
= &vdev
->vq
[n
];
2369 if (n
< VIRTIO_QUEUE_MAX
) {
2370 if (vdev
->vector_queues
&&
2371 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
2372 QLIST_REMOVE(vq
, node
);
2374 vdev
->vq
[n
].vector
= vector
;
2375 if (vdev
->vector_queues
&&
2376 vector
!= VIRTIO_NO_VECTOR
) {
2377 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
2382 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
2383 VirtIOHandleOutput handle_output
)
2387 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2388 if (vdev
->vq
[i
].vring
.num
== 0)
2392 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
2395 vdev
->vq
[i
].vring
.num
= queue_size
;
2396 vdev
->vq
[i
].vring
.num_default
= queue_size
;
2397 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
2398 vdev
->vq
[i
].handle_output
= handle_output
;
2399 vdev
->vq
[i
].handle_aio_output
= NULL
;
2400 vdev
->vq
[i
].used_elems
= g_malloc0(sizeof(VirtQueueElement
) *
2403 return &vdev
->vq
[i
];
2406 void virtio_delete_queue(VirtQueue
*vq
)
2409 vq
->vring
.num_default
= 0;
2410 vq
->handle_output
= NULL
;
2411 vq
->handle_aio_output
= NULL
;
2412 g_free(vq
->used_elems
);
2413 vq
->used_elems
= NULL
;
2414 virtio_virtqueue_reset_region_cache(vq
);
2417 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
2419 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
2423 virtio_delete_queue(&vdev
->vq
[n
]);
2426 static void virtio_set_isr(VirtIODevice
*vdev
, int value
)
2428 uint8_t old
= qatomic_read(&vdev
->isr
);
2430 /* Do not write ISR if it does not change, so that its cacheline remains
2431 * shared in the common case where the guest does not read it.
2433 if ((old
& value
) != value
) {
2434 qatomic_or(&vdev
->isr
, value
);
2438 /* Called within rcu_read_lock(). */
2439 static bool virtio_split_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2443 /* We need to expose used array entries before checking used event. */
2445 /* Always notify when queue is empty (when feature acknowledge) */
2446 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2447 !vq
->inuse
&& virtio_queue_empty(vq
)) {
2451 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2452 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
2455 v
= vq
->signalled_used_valid
;
2456 vq
->signalled_used_valid
= true;
2457 old
= vq
->signalled_used
;
2458 new = vq
->signalled_used
= vq
->used_idx
;
2459 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
2462 static bool vring_packed_need_event(VirtQueue
*vq
, bool wrap
,
2463 uint16_t off_wrap
, uint16_t new,
2466 int off
= off_wrap
& ~(1 << 15);
2468 if (wrap
!= off_wrap
>> 15) {
2469 off
-= vq
->vring
.num
;
2472 return vring_need_event(off
, new, old
);
2475 /* Called within rcu_read_lock(). */
2476 static bool virtio_packed_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2478 VRingPackedDescEvent e
;
2481 VRingMemoryRegionCaches
*caches
;
2483 caches
= vring_get_region_caches(vq
);
2488 vring_packed_event_read(vdev
, &caches
->avail
, &e
);
2490 old
= vq
->signalled_used
;
2491 new = vq
->signalled_used
= vq
->used_idx
;
2492 v
= vq
->signalled_used_valid
;
2493 vq
->signalled_used_valid
= true;
2495 if (e
.flags
== VRING_PACKED_EVENT_FLAG_DISABLE
) {
2497 } else if (e
.flags
== VRING_PACKED_EVENT_FLAG_ENABLE
) {
2501 return !v
|| vring_packed_need_event(vq
, vq
->used_wrap_counter
,
2502 e
.off_wrap
, new, old
);
2505 /* Called within rcu_read_lock(). */
2506 static bool virtio_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2508 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
2509 return virtio_packed_should_notify(vdev
, vq
);
2511 return virtio_split_should_notify(vdev
, vq
);
2515 void virtio_notify_irqfd(VirtIODevice
*vdev
, VirtQueue
*vq
)
2517 WITH_RCU_READ_LOCK_GUARD() {
2518 if (!virtio_should_notify(vdev
, vq
)) {
2523 trace_virtio_notify_irqfd(vdev
, vq
);
2526 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2527 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2528 * incorrectly polling this bit during crashdump and hibernation
2529 * in MSI mode, causing a hang if this bit is never updated.
2530 * Recent releases of Windows do not really shut down, but rather
2531 * log out and hibernate to make the next startup faster. Hence,
2532 * this manifested as a more serious hang during shutdown with
2534 * Next driver release from 2016 fixed this problem, so working around it
2535 * is not a must, but it's easy to do so let's do it here.
2537 * Note: it's safe to update ISR from any thread as it was switched
2538 * to an atomic operation.
2540 virtio_set_isr(vq
->vdev
, 0x1);
2541 event_notifier_set(&vq
->guest_notifier
);
2544 static void virtio_irq(VirtQueue
*vq
)
2546 virtio_set_isr(vq
->vdev
, 0x1);
2547 virtio_notify_vector(vq
->vdev
, vq
->vector
);
2550 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2552 WITH_RCU_READ_LOCK_GUARD() {
2553 if (!virtio_should_notify(vdev
, vq
)) {
2558 trace_virtio_notify(vdev
, vq
);
2562 void virtio_notify_config(VirtIODevice
*vdev
)
2564 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
2567 virtio_set_isr(vdev
, 0x3);
2569 virtio_notify_vector(vdev
, vdev
->config_vector
);
2572 static bool virtio_device_endian_needed(void *opaque
)
2574 VirtIODevice
*vdev
= opaque
;
2576 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
2577 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2578 return vdev
->device_endian
!= virtio_default_endian();
2580 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2581 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
2584 static bool virtio_64bit_features_needed(void *opaque
)
2586 VirtIODevice
*vdev
= opaque
;
2588 return (vdev
->host_features
>> 32) != 0;
2591 static bool virtio_virtqueue_needed(void *opaque
)
2593 VirtIODevice
*vdev
= opaque
;
2595 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
2598 static bool virtio_packed_virtqueue_needed(void *opaque
)
2600 VirtIODevice
*vdev
= opaque
;
2602 return virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
);
2605 static bool virtio_ringsize_needed(void *opaque
)
2607 VirtIODevice
*vdev
= opaque
;
2610 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2611 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
2618 static bool virtio_extra_state_needed(void *opaque
)
2620 VirtIODevice
*vdev
= opaque
;
2621 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2622 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2624 return k
->has_extra_state
&&
2625 k
->has_extra_state(qbus
->parent
);
2628 static bool virtio_broken_needed(void *opaque
)
2630 VirtIODevice
*vdev
= opaque
;
2632 return vdev
->broken
;
2635 static bool virtio_started_needed(void *opaque
)
2637 VirtIODevice
*vdev
= opaque
;
2639 return vdev
->started
;
2642 static bool virtio_disabled_needed(void *opaque
)
2644 VirtIODevice
*vdev
= opaque
;
2646 return vdev
->disabled
;
2649 static const VMStateDescription vmstate_virtqueue
= {
2650 .name
= "virtqueue_state",
2652 .minimum_version_id
= 1,
2653 .fields
= (VMStateField
[]) {
2654 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
2655 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
2656 VMSTATE_END_OF_LIST()
2660 static const VMStateDescription vmstate_packed_virtqueue
= {
2661 .name
= "packed_virtqueue_state",
2663 .minimum_version_id
= 1,
2664 .fields
= (VMStateField
[]) {
2665 VMSTATE_UINT16(last_avail_idx
, struct VirtQueue
),
2666 VMSTATE_BOOL(last_avail_wrap_counter
, struct VirtQueue
),
2667 VMSTATE_UINT16(used_idx
, struct VirtQueue
),
2668 VMSTATE_BOOL(used_wrap_counter
, struct VirtQueue
),
2669 VMSTATE_UINT32(inuse
, struct VirtQueue
),
2670 VMSTATE_END_OF_LIST()
2674 static const VMStateDescription vmstate_virtio_virtqueues
= {
2675 .name
= "virtio/virtqueues",
2677 .minimum_version_id
= 1,
2678 .needed
= &virtio_virtqueue_needed
,
2679 .fields
= (VMStateField
[]) {
2680 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2681 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
2682 VMSTATE_END_OF_LIST()
2686 static const VMStateDescription vmstate_virtio_packed_virtqueues
= {
2687 .name
= "virtio/packed_virtqueues",
2689 .minimum_version_id
= 1,
2690 .needed
= &virtio_packed_virtqueue_needed
,
2691 .fields
= (VMStateField
[]) {
2692 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2693 VIRTIO_QUEUE_MAX
, 0, vmstate_packed_virtqueue
, VirtQueue
),
2694 VMSTATE_END_OF_LIST()
2698 static const VMStateDescription vmstate_ringsize
= {
2699 .name
= "ringsize_state",
2701 .minimum_version_id
= 1,
2702 .fields
= (VMStateField
[]) {
2703 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
2704 VMSTATE_END_OF_LIST()
2708 static const VMStateDescription vmstate_virtio_ringsize
= {
2709 .name
= "virtio/ringsize",
2711 .minimum_version_id
= 1,
2712 .needed
= &virtio_ringsize_needed
,
2713 .fields
= (VMStateField
[]) {
2714 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2715 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
2716 VMSTATE_END_OF_LIST()
2720 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2721 const VMStateField
*field
)
2723 VirtIODevice
*vdev
= pv
;
2724 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2725 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2727 if (!k
->load_extra_state
) {
2730 return k
->load_extra_state(qbus
->parent
, f
);
2734 static int put_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2735 const VMStateField
*field
, JSONWriter
*vmdesc
)
2737 VirtIODevice
*vdev
= pv
;
2738 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2739 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2741 k
->save_extra_state(qbus
->parent
, f
);
2745 static const VMStateInfo vmstate_info_extra_state
= {
2746 .name
= "virtqueue_extra_state",
2747 .get
= get_extra_state
,
2748 .put
= put_extra_state
,
2751 static const VMStateDescription vmstate_virtio_extra_state
= {
2752 .name
= "virtio/extra_state",
2754 .minimum_version_id
= 1,
2755 .needed
= &virtio_extra_state_needed
,
2756 .fields
= (VMStateField
[]) {
2758 .name
= "extra_state",
2760 .field_exists
= NULL
,
2762 .info
= &vmstate_info_extra_state
,
2763 .flags
= VMS_SINGLE
,
2766 VMSTATE_END_OF_LIST()
2770 static const VMStateDescription vmstate_virtio_device_endian
= {
2771 .name
= "virtio/device_endian",
2773 .minimum_version_id
= 1,
2774 .needed
= &virtio_device_endian_needed
,
2775 .fields
= (VMStateField
[]) {
2776 VMSTATE_UINT8(device_endian
, VirtIODevice
),
2777 VMSTATE_END_OF_LIST()
2781 static const VMStateDescription vmstate_virtio_64bit_features
= {
2782 .name
= "virtio/64bit_features",
2784 .minimum_version_id
= 1,
2785 .needed
= &virtio_64bit_features_needed
,
2786 .fields
= (VMStateField
[]) {
2787 VMSTATE_UINT64(guest_features
, VirtIODevice
),
2788 VMSTATE_END_OF_LIST()
2792 static const VMStateDescription vmstate_virtio_broken
= {
2793 .name
= "virtio/broken",
2795 .minimum_version_id
= 1,
2796 .needed
= &virtio_broken_needed
,
2797 .fields
= (VMStateField
[]) {
2798 VMSTATE_BOOL(broken
, VirtIODevice
),
2799 VMSTATE_END_OF_LIST()
2803 static const VMStateDescription vmstate_virtio_started
= {
2804 .name
= "virtio/started",
2806 .minimum_version_id
= 1,
2807 .needed
= &virtio_started_needed
,
2808 .fields
= (VMStateField
[]) {
2809 VMSTATE_BOOL(started
, VirtIODevice
),
2810 VMSTATE_END_OF_LIST()
2814 static const VMStateDescription vmstate_virtio_disabled
= {
2815 .name
= "virtio/disabled",
2817 .minimum_version_id
= 1,
2818 .needed
= &virtio_disabled_needed
,
2819 .fields
= (VMStateField
[]) {
2820 VMSTATE_BOOL(disabled
, VirtIODevice
),
2821 VMSTATE_END_OF_LIST()
2825 static const VMStateDescription vmstate_virtio
= {
2828 .minimum_version_id
= 1,
2829 .minimum_version_id_old
= 1,
2830 .fields
= (VMStateField
[]) {
2831 VMSTATE_END_OF_LIST()
2833 .subsections
= (const VMStateDescription
*[]) {
2834 &vmstate_virtio_device_endian
,
2835 &vmstate_virtio_64bit_features
,
2836 &vmstate_virtio_virtqueues
,
2837 &vmstate_virtio_ringsize
,
2838 &vmstate_virtio_broken
,
2839 &vmstate_virtio_extra_state
,
2840 &vmstate_virtio_started
,
2841 &vmstate_virtio_packed_virtqueues
,
2842 &vmstate_virtio_disabled
,
2847 int virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
2849 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2850 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2851 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2852 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
2855 if (k
->save_config
) {
2856 k
->save_config(qbus
->parent
, f
);
2859 qemu_put_8s(f
, &vdev
->status
);
2860 qemu_put_8s(f
, &vdev
->isr
);
2861 qemu_put_be16s(f
, &vdev
->queue_sel
);
2862 qemu_put_be32s(f
, &guest_features_lo
);
2863 qemu_put_be32(f
, vdev
->config_len
);
2864 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
2866 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2867 if (vdev
->vq
[i
].vring
.num
== 0)
2871 qemu_put_be32(f
, i
);
2873 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2874 if (vdev
->vq
[i
].vring
.num
== 0)
2877 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
2878 if (k
->has_variable_vring_alignment
) {
2879 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
2882 * Save desc now, the rest of the ring addresses are saved in
2883 * subsections for VIRTIO-1 devices.
2885 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
2886 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
2887 if (k
->save_queue
) {
2888 k
->save_queue(qbus
->parent
, i
, f
);
2892 if (vdc
->save
!= NULL
) {
2897 int ret
= vmstate_save_state(f
, vdc
->vmsd
, vdev
, NULL
);
2904 return vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
2907 /* A wrapper for use as a VMState .put function */
2908 static int virtio_device_put(QEMUFile
*f
, void *opaque
, size_t size
,
2909 const VMStateField
*field
, JSONWriter
*vmdesc
)
2911 return virtio_save(VIRTIO_DEVICE(opaque
), f
);
2914 /* A wrapper for use as a VMState .get function */
2915 static int virtio_device_get(QEMUFile
*f
, void *opaque
, size_t size
,
2916 const VMStateField
*field
)
2918 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
2919 DeviceClass
*dc
= DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev
));
2921 return virtio_load(vdev
, f
, dc
->vmsd
->version_id
);
2924 const VMStateInfo virtio_vmstate_info
= {
2926 .get
= virtio_device_get
,
2927 .put
= virtio_device_put
,
2930 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
2932 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2933 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
2935 val
&= vdev
->host_features
;
2936 if (k
->set_features
) {
2937 k
->set_features(vdev
, val
);
2939 vdev
->guest_features
= val
;
2940 return bad
? -1 : 0;
2943 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
2947 * The driver must not attempt to set features after feature negotiation
2950 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
2953 ret
= virtio_set_features_nocheck(vdev
, val
);
2954 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2955 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2957 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2958 if (vdev
->vq
[i
].vring
.num
!= 0) {
2959 virtio_init_region_cache(vdev
, i
);
2964 if (!virtio_device_started(vdev
, vdev
->status
) &&
2965 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2966 vdev
->start_on_kick
= true;
2972 size_t virtio_feature_get_config_size(const VirtIOFeature
*feature_sizes
,
2973 uint64_t host_features
)
2975 size_t config_size
= 0;
2978 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
2979 if (host_features
& feature_sizes
[i
].flags
) {
2980 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
2987 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
2993 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2994 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2995 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2998 * We poison the endianness to ensure it does not get used before
2999 * subsections have been loaded.
3001 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
3003 if (k
->load_config
) {
3004 ret
= k
->load_config(qbus
->parent
, f
);
3009 qemu_get_8s(f
, &vdev
->status
);
3010 qemu_get_8s(f
, &vdev
->isr
);
3011 qemu_get_be16s(f
, &vdev
->queue_sel
);
3012 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
3015 qemu_get_be32s(f
, &features
);
3018 * Temporarily set guest_features low bits - needed by
3019 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3020 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3022 * Note: devices should always test host features in future - don't create
3023 * new dependencies like this.
3025 vdev
->guest_features
= features
;
3027 config_len
= qemu_get_be32(f
);
3030 * There are cases where the incoming config can be bigger or smaller
3031 * than what we have; so load what we have space for, and skip
3032 * any excess that's in the stream.
3034 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
3036 while (config_len
> vdev
->config_len
) {
3041 num
= qemu_get_be32(f
);
3043 if (num
> VIRTIO_QUEUE_MAX
) {
3044 error_report("Invalid number of virtqueues: 0x%x", num
);
3048 for (i
= 0; i
< num
; i
++) {
3049 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
3050 if (k
->has_variable_vring_alignment
) {
3051 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
3053 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
3054 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
3055 vdev
->vq
[i
].signalled_used_valid
= false;
3056 vdev
->vq
[i
].notification
= true;
3058 if (!vdev
->vq
[i
].vring
.desc
&& vdev
->vq
[i
].last_avail_idx
) {
3059 error_report("VQ %d address 0x0 "
3060 "inconsistent with Host index 0x%x",
3061 i
, vdev
->vq
[i
].last_avail_idx
);
3064 if (k
->load_queue
) {
3065 ret
= k
->load_queue(qbus
->parent
, i
, f
);
3071 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
3073 if (vdc
->load
!= NULL
) {
3074 ret
= vdc
->load(vdev
, f
, version_id
);
3081 ret
= vmstate_load_state(f
, vdc
->vmsd
, vdev
, version_id
);
3088 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
3093 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
3094 vdev
->device_endian
= virtio_default_endian();
3097 if (virtio_64bit_features_needed(vdev
)) {
3099 * Subsection load filled vdev->guest_features. Run them
3100 * through virtio_set_features to sanity-check them against
3103 uint64_t features64
= vdev
->guest_features
;
3104 if (virtio_set_features_nocheck(vdev
, features64
) < 0) {
3105 error_report("Features 0x%" PRIx64
" unsupported. "
3106 "Allowed features: 0x%" PRIx64
,
3107 features64
, vdev
->host_features
);
3111 if (virtio_set_features_nocheck(vdev
, features
) < 0) {
3112 error_report("Features 0x%x unsupported. "
3113 "Allowed features: 0x%" PRIx64
,
3114 features
, vdev
->host_features
);
3119 if (!virtio_device_started(vdev
, vdev
->status
) &&
3120 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3121 vdev
->start_on_kick
= true;
3124 RCU_READ_LOCK_GUARD();
3125 for (i
= 0; i
< num
; i
++) {
3126 if (vdev
->vq
[i
].vring
.desc
) {
3130 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3131 * only the region cache needs to be set up. Legacy devices need
3132 * to calculate used and avail ring addresses based on the desc
3135 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3136 virtio_init_region_cache(vdev
, i
);
3138 virtio_queue_update_rings(vdev
, i
);
3141 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3142 vdev
->vq
[i
].shadow_avail_idx
= vdev
->vq
[i
].last_avail_idx
;
3143 vdev
->vq
[i
].shadow_avail_wrap_counter
=
3144 vdev
->vq
[i
].last_avail_wrap_counter
;
3148 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
3149 /* Check it isn't doing strange things with descriptor numbers. */
3150 if (nheads
> vdev
->vq
[i
].vring
.num
) {
3151 virtio_error(vdev
, "VQ %d size 0x%x Guest index 0x%x "
3152 "inconsistent with Host index 0x%x: delta 0x%x",
3153 i
, vdev
->vq
[i
].vring
.num
,
3154 vring_avail_idx(&vdev
->vq
[i
]),
3155 vdev
->vq
[i
].last_avail_idx
, nheads
);
3156 vdev
->vq
[i
].used_idx
= 0;
3157 vdev
->vq
[i
].shadow_avail_idx
= 0;
3158 vdev
->vq
[i
].inuse
= 0;
3161 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
3162 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
3165 * Some devices migrate VirtQueueElements that have been popped
3166 * from the avail ring but not yet returned to the used ring.
3167 * Since max ring size < UINT16_MAX it's safe to use modulo
3168 * UINT16_MAX + 1 subtraction.
3170 vdev
->vq
[i
].inuse
= (uint16_t)(vdev
->vq
[i
].last_avail_idx
-
3171 vdev
->vq
[i
].used_idx
);
3172 if (vdev
->vq
[i
].inuse
> vdev
->vq
[i
].vring
.num
) {
3173 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3175 i
, vdev
->vq
[i
].vring
.num
,
3176 vdev
->vq
[i
].last_avail_idx
,
3177 vdev
->vq
[i
].used_idx
);
3183 if (vdc
->post_load
) {
3184 ret
= vdc
->post_load(vdev
);
3193 void virtio_cleanup(VirtIODevice
*vdev
)
3195 qemu_del_vm_change_state_handler(vdev
->vmstate
);
3198 static void virtio_vmstate_change(void *opaque
, bool running
, RunState state
)
3200 VirtIODevice
*vdev
= opaque
;
3201 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3202 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3203 bool backend_run
= running
&& virtio_device_started(vdev
, vdev
->status
);
3204 vdev
->vm_running
= running
;
3207 virtio_set_status(vdev
, vdev
->status
);
3210 if (k
->vmstate_change
) {
3211 k
->vmstate_change(qbus
->parent
, backend_run
);
3215 virtio_set_status(vdev
, vdev
->status
);
3219 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
3220 size_t vdev_size
, const char *vdev_name
)
3222 DeviceState
*vdev
= data
;
3224 object_initialize_child_with_props(proxy_obj
, "virtio-backend", vdev
,
3225 vdev_size
, vdev_name
, &error_abort
,
3227 qdev_alias_all_properties(vdev
, proxy_obj
);
3230 void virtio_init(VirtIODevice
*vdev
, const char *name
,
3231 uint16_t device_id
, size_t config_size
)
3233 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3234 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3236 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
3239 vdev
->vector_queues
=
3240 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
3243 vdev
->start_on_kick
= false;
3244 vdev
->started
= false;
3245 vdev
->device_id
= device_id
;
3247 qatomic_set(&vdev
->isr
, 0);
3248 vdev
->queue_sel
= 0;
3249 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
3250 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_QUEUE_MAX
);
3251 vdev
->vm_running
= runstate_is_running();
3252 vdev
->broken
= false;
3253 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3254 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
3255 vdev
->vq
[i
].vdev
= vdev
;
3256 vdev
->vq
[i
].queue_index
= i
;
3257 vdev
->vq
[i
].host_notifier_enabled
= false;
3261 vdev
->config_len
= config_size
;
3262 if (vdev
->config_len
) {
3263 vdev
->config
= g_malloc0(config_size
);
3265 vdev
->config
= NULL
;
3267 vdev
->vmstate
= qdev_add_vm_change_state_handler(DEVICE(vdev
),
3268 virtio_vmstate_change
, vdev
);
3269 vdev
->device_endian
= virtio_default_endian();
3270 vdev
->use_guest_notifier_mask
= true;
3274 * Only devices that have already been around prior to defining the virtio
3275 * standard support legacy mode; this includes devices not specified in the
3276 * standard. All newer devices conform to the virtio standard only.
3278 bool virtio_legacy_allowed(VirtIODevice
*vdev
)
3280 switch (vdev
->device_id
) {
3282 case VIRTIO_ID_BLOCK
:
3283 case VIRTIO_ID_CONSOLE
:
3285 case VIRTIO_ID_BALLOON
:
3286 case VIRTIO_ID_RPMSG
:
3287 case VIRTIO_ID_SCSI
:
3289 case VIRTIO_ID_RPROC_SERIAL
:
3290 case VIRTIO_ID_CAIF
:
3297 bool virtio_legacy_check_disabled(VirtIODevice
*vdev
)
3299 return vdev
->disable_legacy_check
;
3302 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
3304 return vdev
->vq
[n
].vring
.desc
;
3307 bool virtio_queue_enabled_legacy(VirtIODevice
*vdev
, int n
)
3309 return virtio_queue_get_desc_addr(vdev
, n
) != 0;
3312 bool virtio_queue_enabled(VirtIODevice
*vdev
, int n
)
3314 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3315 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3317 if (k
->queue_enabled
) {
3318 return k
->queue_enabled(qbus
->parent
, n
);
3320 return virtio_queue_enabled_legacy(vdev
, n
);
3323 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
3325 return vdev
->vq
[n
].vring
.avail
;
3328 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
3330 return vdev
->vq
[n
].vring
.used
;
3333 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
3335 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
3338 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
3342 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3343 return sizeof(struct VRingPackedDescEvent
);
3346 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3347 return offsetof(VRingAvail
, ring
) +
3348 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
+ s
;
3351 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
3355 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3356 return sizeof(struct VRingPackedDescEvent
);
3359 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3360 return offsetof(VRingUsed
, ring
) +
3361 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
+ s
;
3364 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice
*vdev
,
3367 unsigned int avail
, used
;
3369 avail
= vdev
->vq
[n
].last_avail_idx
;
3370 avail
|= ((uint16_t)vdev
->vq
[n
].last_avail_wrap_counter
) << 15;
3372 used
= vdev
->vq
[n
].used_idx
;
3373 used
|= ((uint16_t)vdev
->vq
[n
].used_wrap_counter
) << 15;
3375 return avail
| used
<< 16;
3378 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice
*vdev
,
3381 return vdev
->vq
[n
].last_avail_idx
;
3384 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
3386 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3387 return virtio_queue_packed_get_last_avail_idx(vdev
, n
);
3389 return virtio_queue_split_get_last_avail_idx(vdev
, n
);
3393 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice
*vdev
,
3394 int n
, unsigned int idx
)
3396 struct VirtQueue
*vq
= &vdev
->vq
[n
];
3398 vq
->last_avail_idx
= vq
->shadow_avail_idx
= idx
& 0x7fff;
3399 vq
->last_avail_wrap_counter
=
3400 vq
->shadow_avail_wrap_counter
= !!(idx
& 0x8000);
3402 vq
->used_idx
= idx
& 0x7ffff;
3403 vq
->used_wrap_counter
= !!(idx
& 0x8000);
3406 static void virtio_queue_split_set_last_avail_idx(VirtIODevice
*vdev
,
3407 int n
, unsigned int idx
)
3409 vdev
->vq
[n
].last_avail_idx
= idx
;
3410 vdev
->vq
[n
].shadow_avail_idx
= idx
;
3413 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
,
3416 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3417 virtio_queue_packed_set_last_avail_idx(vdev
, n
, idx
);
3419 virtio_queue_split_set_last_avail_idx(vdev
, n
, idx
);
3423 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice
*vdev
,
3426 /* We don't have a reference like avail idx in shared memory */
3430 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice
*vdev
,
3433 RCU_READ_LOCK_GUARD();
3434 if (vdev
->vq
[n
].vring
.desc
) {
3435 vdev
->vq
[n
].last_avail_idx
= vring_used_idx(&vdev
->vq
[n
]);
3436 vdev
->vq
[n
].shadow_avail_idx
= vdev
->vq
[n
].last_avail_idx
;
3440 void virtio_queue_restore_last_avail_idx(VirtIODevice
*vdev
, int n
)
3442 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3443 virtio_queue_packed_restore_last_avail_idx(vdev
, n
);
3445 virtio_queue_split_restore_last_avail_idx(vdev
, n
);
3449 static void virtio_queue_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3451 /* used idx was updated through set_last_avail_idx() */
3455 static void virtio_split_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3457 RCU_READ_LOCK_GUARD();
3458 if (vdev
->vq
[n
].vring
.desc
) {
3459 vdev
->vq
[n
].used_idx
= vring_used_idx(&vdev
->vq
[n
]);
3463 void virtio_queue_update_used_idx(VirtIODevice
*vdev
, int n
)
3465 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3466 return virtio_queue_packed_update_used_idx(vdev
, n
);
3468 return virtio_split_packed_update_used_idx(vdev
, n
);
3472 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
3474 vdev
->vq
[n
].signalled_used_valid
= false;
3477 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
3479 return vdev
->vq
+ n
;
3482 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
3484 return vq
->queue_index
;
3487 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
3489 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
3490 if (event_notifier_test_and_clear(n
)) {
3495 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
3498 if (assign
&& !with_irqfd
) {
3499 event_notifier_set_handler(&vq
->guest_notifier
,
3500 virtio_queue_guest_notifier_read
);
3502 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
3505 /* Test and clear notifier before closing it,
3506 * in case poll callback didn't have time to run. */
3507 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
3511 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
3513 return &vq
->guest_notifier
;
3516 static void virtio_queue_host_notifier_aio_read(EventNotifier
*n
)
3518 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3519 if (event_notifier_test_and_clear(n
)) {
3520 virtio_queue_notify_aio_vq(vq
);
3524 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier
*n
)
3526 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3528 virtio_queue_set_notification(vq
, 0);
3531 static bool virtio_queue_host_notifier_aio_poll(void *opaque
)
3533 EventNotifier
*n
= opaque
;
3534 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3536 if (!vq
->vring
.desc
|| virtio_queue_empty(vq
)) {
3540 return virtio_queue_notify_aio_vq(vq
);
3543 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier
*n
)
3545 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3547 /* Caller polls once more after this to catch requests that race with us */
3548 virtio_queue_set_notification(vq
, 1);
3551 void virtio_queue_aio_set_host_notifier_handler(VirtQueue
*vq
, AioContext
*ctx
,
3552 VirtIOHandleAIOOutput handle_output
)
3554 if (handle_output
) {
3555 vq
->handle_aio_output
= handle_output
;
3556 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true,
3557 virtio_queue_host_notifier_aio_read
,
3558 virtio_queue_host_notifier_aio_poll
);
3559 aio_set_event_notifier_poll(ctx
, &vq
->host_notifier
,
3560 virtio_queue_host_notifier_aio_poll_begin
,
3561 virtio_queue_host_notifier_aio_poll_end
);
3563 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true, NULL
, NULL
);
3564 /* Test and clear notifier before after disabling event,
3565 * in case poll callback didn't have time to run. */
3566 virtio_queue_host_notifier_aio_read(&vq
->host_notifier
);
3567 vq
->handle_aio_output
= NULL
;
3571 void virtio_queue_host_notifier_read(EventNotifier
*n
)
3573 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3574 if (event_notifier_test_and_clear(n
)) {
3575 virtio_queue_notify_vq(vq
);
3579 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
3581 return &vq
->host_notifier
;
3584 void virtio_queue_set_host_notifier_enabled(VirtQueue
*vq
, bool enabled
)
3586 vq
->host_notifier_enabled
= enabled
;
3589 int virtio_queue_set_host_notifier_mr(VirtIODevice
*vdev
, int n
,
3590 MemoryRegion
*mr
, bool assign
)
3592 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3593 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3595 if (k
->set_host_notifier_mr
) {
3596 return k
->set_host_notifier_mr(qbus
->parent
, n
, mr
, assign
);
3602 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
3604 g_free(vdev
->bus_name
);
3605 vdev
->bus_name
= g_strdup(bus_name
);
3608 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice
*vdev
, const char *fmt
, ...)
3613 error_vreport(fmt
, ap
);
3616 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3617 vdev
->status
= vdev
->status
| VIRTIO_CONFIG_S_NEEDS_RESET
;
3618 virtio_notify_config(vdev
);
3621 vdev
->broken
= true;
3624 static void virtio_memory_listener_commit(MemoryListener
*listener
)
3626 VirtIODevice
*vdev
= container_of(listener
, VirtIODevice
, listener
);
3629 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3630 if (vdev
->vq
[i
].vring
.num
== 0) {
3633 virtio_init_region_cache(vdev
, i
);
3637 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
3639 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3640 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3643 /* Devices should either use vmsd or the load/save methods */
3644 assert(!vdc
->vmsd
|| !vdc
->load
);
3646 if (vdc
->realize
!= NULL
) {
3647 vdc
->realize(dev
, &err
);
3649 error_propagate(errp
, err
);
3654 virtio_bus_device_plugged(vdev
, &err
);
3656 error_propagate(errp
, err
);
3657 vdc
->unrealize(dev
);
3661 vdev
->listener
.commit
= virtio_memory_listener_commit
;
3662 vdev
->listener
.name
= "virtio";
3663 memory_listener_register(&vdev
->listener
, vdev
->dma_as
);
3666 static void virtio_device_unrealize(DeviceState
*dev
)
3668 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3669 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3671 memory_listener_unregister(&vdev
->listener
);
3672 virtio_bus_device_unplugged(vdev
);
3674 if (vdc
->unrealize
!= NULL
) {
3675 vdc
->unrealize(dev
);
3678 g_free(vdev
->bus_name
);
3679 vdev
->bus_name
= NULL
;
3682 static void virtio_device_free_virtqueues(VirtIODevice
*vdev
)
3689 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3690 if (vdev
->vq
[i
].vring
.num
== 0) {
3693 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
3698 static void virtio_device_instance_finalize(Object
*obj
)
3700 VirtIODevice
*vdev
= VIRTIO_DEVICE(obj
);
3702 virtio_device_free_virtqueues(vdev
);
3704 g_free(vdev
->config
);
3705 g_free(vdev
->vector_queues
);
3708 static Property virtio_properties
[] = {
3709 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
3710 DEFINE_PROP_BOOL("use-started", VirtIODevice
, use_started
, true),
3711 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice
, use_disabled_flag
, true),
3712 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice
,
3713 disable_legacy_check
, false),
3714 DEFINE_PROP_END_OF_LIST(),
3717 static int virtio_device_start_ioeventfd_impl(VirtIODevice
*vdev
)
3719 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3723 * Batch all the host notifiers in a single transaction to avoid
3724 * quadratic time complexity in address_space_update_ioeventfds().
3726 memory_region_transaction_begin();
3727 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3728 VirtQueue
*vq
= &vdev
->vq
[n
];
3729 if (!virtio_queue_get_num(vdev
, n
)) {
3732 r
= virtio_bus_set_host_notifier(qbus
, n
, true);
3737 event_notifier_set_handler(&vq
->host_notifier
,
3738 virtio_queue_host_notifier_read
);
3741 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3742 /* Kick right away to begin processing requests already in vring */
3743 VirtQueue
*vq
= &vdev
->vq
[n
];
3744 if (!vq
->vring
.num
) {
3747 event_notifier_set(&vq
->host_notifier
);
3749 memory_region_transaction_commit();
3753 i
= n
; /* save n for a second iteration after transaction is committed. */
3755 VirtQueue
*vq
= &vdev
->vq
[n
];
3756 if (!virtio_queue_get_num(vdev
, n
)) {
3760 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3761 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3765 * The transaction expects the ioeventfds to be open when it
3766 * commits. Do it now, before the cleanup loop.
3768 memory_region_transaction_commit();
3771 if (!virtio_queue_get_num(vdev
, i
)) {
3774 virtio_bus_cleanup_host_notifier(qbus
, i
);
3779 int virtio_device_start_ioeventfd(VirtIODevice
*vdev
)
3781 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3782 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3784 return virtio_bus_start_ioeventfd(vbus
);
3787 static void virtio_device_stop_ioeventfd_impl(VirtIODevice
*vdev
)
3789 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3793 * Batch all the host notifiers in a single transaction to avoid
3794 * quadratic time complexity in address_space_update_ioeventfds().
3796 memory_region_transaction_begin();
3797 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3798 VirtQueue
*vq
= &vdev
->vq
[n
];
3800 if (!virtio_queue_get_num(vdev
, n
)) {
3803 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3804 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3808 * The transaction expects the ioeventfds to be open when it
3809 * commits. Do it now, before the cleanup loop.
3811 memory_region_transaction_commit();
3813 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3814 if (!virtio_queue_get_num(vdev
, n
)) {
3817 virtio_bus_cleanup_host_notifier(qbus
, n
);
3821 int virtio_device_grab_ioeventfd(VirtIODevice
*vdev
)
3823 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3824 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3826 return virtio_bus_grab_ioeventfd(vbus
);
3829 void virtio_device_release_ioeventfd(VirtIODevice
*vdev
)
3831 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3832 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3834 virtio_bus_release_ioeventfd(vbus
);
3837 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
3839 /* Set the default value here. */
3840 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
3841 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3843 dc
->realize
= virtio_device_realize
;
3844 dc
->unrealize
= virtio_device_unrealize
;
3845 dc
->bus_type
= TYPE_VIRTIO_BUS
;
3846 device_class_set_props(dc
, virtio_properties
);
3847 vdc
->start_ioeventfd
= virtio_device_start_ioeventfd_impl
;
3848 vdc
->stop_ioeventfd
= virtio_device_stop_ioeventfd_impl
;
3850 vdc
->legacy_features
|= VIRTIO_LEGACY_FEATURES
;
3853 bool virtio_device_ioeventfd_enabled(VirtIODevice
*vdev
)
3855 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3856 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3858 return virtio_bus_ioeventfd_enabled(vbus
);
3861 static const TypeInfo virtio_device_info
= {
3862 .name
= TYPE_VIRTIO_DEVICE
,
3863 .parent
= TYPE_DEVICE
,
3864 .instance_size
= sizeof(VirtIODevice
),
3865 .class_init
= virtio_device_class_init
,
3866 .instance_finalize
= virtio_device_instance_finalize
,
3868 .class_size
= sizeof(VirtioDeviceClass
),
3871 static void virtio_register_types(void)
3873 type_register_static(&virtio_device_info
);
3876 type_init(virtio_register_types
)