4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "exec/address-spaces.h"
18 #include "qemu/error-report.h"
19 #include "hw/virtio/virtio.h"
20 #include "qemu/atomic.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "migration/migration.h"
23 #include "hw/virtio/virtio-access.h"
26 * The alignment to use between consumer and producer parts of vring.
27 * x86 pagesize again. This is the default, used by transports like PCI
28 * which don't provide a means for the guest to tell the host the alignment.
30 #define VIRTIO_PCI_VRING_ALIGN 4096
32 typedef struct VRingDesc
40 typedef struct VRingAvail
47 typedef struct VRingUsedElem
53 typedef struct VRingUsed
57 VRingUsedElem ring
[0];
63 unsigned int num_default
;
74 /* Next head to pop */
75 uint16_t last_avail_idx
;
77 /* Last avail_idx read from VQ. */
78 uint16_t shadow_avail_idx
;
82 /* Last used index value we have signalled on */
83 uint16_t signalled_used
;
85 /* Last used index value we have signalled on */
86 bool signalled_used_valid
;
88 /* Notification enabled? */
96 void (*handle_output
)(VirtIODevice
*vdev
, VirtQueue
*vq
);
98 EventNotifier guest_notifier
;
99 EventNotifier host_notifier
;
100 QLIST_ENTRY(VirtQueue
) node
;
103 /* virt queue functions */
104 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
106 VRing
*vring
= &vdev
->vq
[n
].vring
;
109 /* not yet setup -> nothing to do */
112 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
113 vring
->used
= vring_align(vring
->avail
+
114 offsetof(VRingAvail
, ring
[vring
->num
]),
118 static void vring_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
119 hwaddr desc_pa
, int i
)
121 address_space_read(&address_space_memory
, desc_pa
+ i
* sizeof(VRingDesc
),
122 MEMTXATTRS_UNSPECIFIED
, (void *)desc
, sizeof(VRingDesc
));
123 virtio_tswap64s(vdev
, &desc
->addr
);
124 virtio_tswap32s(vdev
, &desc
->len
);
125 virtio_tswap16s(vdev
, &desc
->flags
);
126 virtio_tswap16s(vdev
, &desc
->next
);
129 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
132 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, flags
);
133 return virtio_lduw_phys(vq
->vdev
, pa
);
136 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
139 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, idx
);
140 vq
->shadow_avail_idx
= virtio_lduw_phys(vq
->vdev
, pa
);
141 return vq
->shadow_avail_idx
;
144 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
147 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, ring
[i
]);
148 return virtio_lduw_phys(vq
->vdev
, pa
);
151 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
153 return vring_avail_ring(vq
, vq
->vring
.num
);
156 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
160 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
161 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
162 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
]);
163 address_space_write(&address_space_memory
, pa
, MEMTXATTRS_UNSPECIFIED
,
164 (void *)uelem
, sizeof(VRingUsedElem
));
167 static uint16_t vring_used_idx(VirtQueue
*vq
)
170 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
171 return virtio_lduw_phys(vq
->vdev
, pa
);
174 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
177 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
178 virtio_stw_phys(vq
->vdev
, pa
, val
);
182 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
184 VirtIODevice
*vdev
= vq
->vdev
;
186 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
187 virtio_stw_phys(vdev
, pa
, virtio_lduw_phys(vdev
, pa
) | mask
);
190 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
192 VirtIODevice
*vdev
= vq
->vdev
;
194 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
195 virtio_stw_phys(vdev
, pa
, virtio_lduw_phys(vdev
, pa
) & ~mask
);
198 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
201 if (!vq
->notification
) {
204 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
205 virtio_stw_phys(vq
->vdev
, pa
, val
);
208 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
210 vq
->notification
= enable
;
211 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
212 vring_set_avail_event(vq
, vring_avail_idx(vq
));
214 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
216 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
219 /* Expose avail event/used flags before caller checks the avail idx. */
224 int virtio_queue_ready(VirtQueue
*vq
)
226 return vq
->vring
.avail
!= 0;
229 /* Fetch avail_idx from VQ memory only when we really need to know if
230 * guest has added some buffers. */
231 int virtio_queue_empty(VirtQueue
*vq
)
233 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
237 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
240 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
247 for (i
= 0; i
< elem
->in_num
; i
++) {
248 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
250 cpu_physical_memory_unmap(elem
->in_sg
[i
].iov_base
,
251 elem
->in_sg
[i
].iov_len
,
257 for (i
= 0; i
< elem
->out_num
; i
++)
258 cpu_physical_memory_unmap(elem
->out_sg
[i
].iov_base
,
259 elem
->out_sg
[i
].iov_len
,
260 0, elem
->out_sg
[i
].iov_len
);
263 void virtqueue_discard(VirtQueue
*vq
, const VirtQueueElement
*elem
,
266 vq
->last_avail_idx
--;
267 virtqueue_unmap_sg(vq
, elem
, len
);
270 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
271 unsigned int len
, unsigned int idx
)
275 trace_virtqueue_fill(vq
, elem
, len
, idx
);
277 virtqueue_unmap_sg(vq
, elem
, len
);
279 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
281 uelem
.id
= elem
->index
;
283 vring_used_write(vq
, &uelem
, idx
);
286 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
289 /* Make sure buffer is written before we update index. */
291 trace_virtqueue_flush(vq
, count
);
294 vring_used_idx_set(vq
, new);
296 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
297 vq
->signalled_used_valid
= false;
300 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
303 virtqueue_fill(vq
, elem
, len
, 0);
304 virtqueue_flush(vq
, 1);
307 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
309 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
311 /* Check it isn't doing very strange things with descriptor numbers. */
312 if (num_heads
> vq
->vring
.num
) {
313 error_report("Guest moved used index from %u to %u",
314 idx
, vq
->shadow_avail_idx
);
317 /* On success, callers read a descriptor at vq->last_avail_idx.
318 * Make sure descriptor read does not bypass avail index read. */
326 static unsigned int virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
)
330 /* Grab the next descriptor number they're advertising, and increment
331 * the index we've seen. */
332 head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
334 /* If their number is silly, that's a fatal mistake. */
335 if (head
>= vq
->vring
.num
) {
336 error_report("Guest says index %u is available", head
);
343 static unsigned virtqueue_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
344 hwaddr desc_pa
, unsigned int max
)
348 /* If this descriptor says it doesn't chain, we're done. */
349 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
353 /* Check they're not leading us off end of descriptors. */
355 /* Make sure compiler knows to grab that: we don't want it changing! */
359 error_report("Desc next is %u", next
);
363 vring_desc_read(vdev
, desc
, desc_pa
, next
);
367 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
368 unsigned int *out_bytes
,
369 unsigned max_in_bytes
, unsigned max_out_bytes
)
372 unsigned int total_bufs
, in_total
, out_total
;
374 idx
= vq
->last_avail_idx
;
376 total_bufs
= in_total
= out_total
= 0;
377 while (virtqueue_num_heads(vq
, idx
)) {
378 VirtIODevice
*vdev
= vq
->vdev
;
379 unsigned int max
, num_bufs
, indirect
= 0;
385 num_bufs
= total_bufs
;
386 i
= virtqueue_get_head(vq
, idx
++);
387 desc_pa
= vq
->vring
.desc
;
388 vring_desc_read(vdev
, &desc
, desc_pa
, i
);
390 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
391 if (desc
.len
% sizeof(VRingDesc
)) {
392 error_report("Invalid size for indirect buffer table");
396 /* If we've got too many, that implies a descriptor loop. */
397 if (num_bufs
>= max
) {
398 error_report("Looped descriptor");
402 /* loop over the indirect descriptor table */
404 max
= desc
.len
/ sizeof(VRingDesc
);
407 vring_desc_read(vdev
, &desc
, desc_pa
, i
);
411 /* If we've got too many, that implies a descriptor loop. */
412 if (++num_bufs
> max
) {
413 error_report("Looped descriptor");
417 if (desc
.flags
& VRING_DESC_F_WRITE
) {
418 in_total
+= desc
.len
;
420 out_total
+= desc
.len
;
422 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
425 } while ((i
= virtqueue_read_next_desc(vdev
, &desc
, desc_pa
, max
)) != max
);
428 total_bufs
= num_bufs
;
434 *in_bytes
= in_total
;
437 *out_bytes
= out_total
;
441 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
442 unsigned int out_bytes
)
444 unsigned int in_total
, out_total
;
446 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
447 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
450 static void virtqueue_map_desc(unsigned int *p_num_sg
, hwaddr
*addr
, struct iovec
*iov
,
451 unsigned int max_num_sg
, bool is_write
,
452 hwaddr pa
, size_t sz
)
454 unsigned num_sg
= *p_num_sg
;
455 assert(num_sg
<= max_num_sg
);
460 if (num_sg
== max_num_sg
) {
461 error_report("virtio: too many write descriptors in indirect table");
465 iov
[num_sg
].iov_base
= cpu_physical_memory_map(pa
, &len
, is_write
);
466 iov
[num_sg
].iov_len
= len
;
476 static void virtqueue_map_iovec(struct iovec
*sg
, hwaddr
*addr
,
477 unsigned int *num_sg
, unsigned int max_size
,
483 /* Note: this function MUST validate input, some callers
484 * are passing in num_sg values received over the network.
486 /* TODO: teach all callers that this can fail, and return failure instead
488 * When we do, we might be able to re-enable NDEBUG below.
491 #error building with NDEBUG is not supported
493 assert(*num_sg
<= max_size
);
495 for (i
= 0; i
< *num_sg
; i
++) {
497 sg
[i
].iov_base
= cpu_physical_memory_map(addr
[i
], &len
, is_write
);
498 if (!sg
[i
].iov_base
) {
499 error_report("virtio: error trying to map MMIO memory");
502 if (len
!= sg
[i
].iov_len
) {
503 error_report("virtio: unexpected memory split");
509 void virtqueue_map(VirtQueueElement
*elem
)
511 virtqueue_map_iovec(elem
->in_sg
, elem
->in_addr
, &elem
->in_num
,
512 VIRTQUEUE_MAX_SIZE
, 1);
513 virtqueue_map_iovec(elem
->out_sg
, elem
->out_addr
, &elem
->out_num
,
514 VIRTQUEUE_MAX_SIZE
, 0);
517 void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
519 VirtQueueElement
*elem
;
520 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
521 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
522 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
523 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
524 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
525 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
527 assert(sz
>= sizeof(VirtQueueElement
));
528 elem
= g_malloc(out_sg_end
);
529 elem
->out_num
= out_num
;
530 elem
->in_num
= in_num
;
531 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
532 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
533 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
534 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
538 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
540 unsigned int i
, head
, max
;
541 hwaddr desc_pa
= vq
->vring
.desc
;
542 VirtIODevice
*vdev
= vq
->vdev
;
543 VirtQueueElement
*elem
;
544 unsigned out_num
, in_num
;
545 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
546 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
549 if (virtio_queue_empty(vq
)) {
552 /* Needed after virtio_queue_empty(), see comment in
553 * virtqueue_num_heads(). */
556 /* When we start there are none of either input nor output. */
557 out_num
= in_num
= 0;
561 i
= head
= virtqueue_get_head(vq
, vq
->last_avail_idx
++);
562 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
563 vring_set_avail_event(vq
, vq
->last_avail_idx
);
566 vring_desc_read(vdev
, &desc
, desc_pa
, i
);
567 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
568 if (desc
.len
% sizeof(VRingDesc
)) {
569 error_report("Invalid size for indirect buffer table");
573 /* loop over the indirect descriptor table */
574 max
= desc
.len
/ sizeof(VRingDesc
);
577 vring_desc_read(vdev
, &desc
, desc_pa
, i
);
580 /* Collect all the descriptors */
582 if (desc
.flags
& VRING_DESC_F_WRITE
) {
583 virtqueue_map_desc(&in_num
, addr
+ out_num
, iov
+ out_num
,
584 VIRTQUEUE_MAX_SIZE
- out_num
, true, desc
.addr
, desc
.len
);
587 error_report("Incorrect order for descriptors");
590 virtqueue_map_desc(&out_num
, addr
, iov
,
591 VIRTQUEUE_MAX_SIZE
, false, desc
.addr
, desc
.len
);
594 /* If we've got too many, that implies a descriptor loop. */
595 if ((in_num
+ out_num
) > max
) {
596 error_report("Looped descriptor");
599 } while ((i
= virtqueue_read_next_desc(vdev
, &desc
, desc_pa
, max
)) != max
);
601 /* Now copy what we have collected and mapped */
602 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
604 for (i
= 0; i
< out_num
; i
++) {
605 elem
->out_addr
[i
] = addr
[i
];
606 elem
->out_sg
[i
] = iov
[i
];
608 for (i
= 0; i
< in_num
; i
++) {
609 elem
->in_addr
[i
] = addr
[out_num
+ i
];
610 elem
->in_sg
[i
] = iov
[out_num
+ i
];
615 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
619 /* Reading and writing a structure directly to QEMUFile is *awful*, but
620 * it is what QEMU has always done by mistake. We can change it sooner
621 * or later by bumping the version number of the affected vm states.
622 * In the meanwhile, since the in-memory layout of VirtQueueElement
623 * has changed, we need to marshal to and from the layout that was
624 * used before the change.
626 typedef struct VirtQueueElementOld
{
628 unsigned int out_num
;
630 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
631 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
632 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
633 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
634 } VirtQueueElementOld
;
636 void *qemu_get_virtqueue_element(QEMUFile
*f
, size_t sz
)
638 VirtQueueElement
*elem
;
639 VirtQueueElementOld data
;
642 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
644 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
645 elem
->index
= data
.index
;
647 for (i
= 0; i
< elem
->in_num
; i
++) {
648 elem
->in_addr
[i
] = data
.in_addr
[i
];
651 for (i
= 0; i
< elem
->out_num
; i
++) {
652 elem
->out_addr
[i
] = data
.out_addr
[i
];
655 for (i
= 0; i
< elem
->in_num
; i
++) {
656 /* Base is overwritten by virtqueue_map. */
657 elem
->in_sg
[i
].iov_base
= 0;
658 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
661 for (i
= 0; i
< elem
->out_num
; i
++) {
662 /* Base is overwritten by virtqueue_map. */
663 elem
->out_sg
[i
].iov_base
= 0;
664 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
671 void qemu_put_virtqueue_element(QEMUFile
*f
, VirtQueueElement
*elem
)
673 VirtQueueElementOld data
;
676 memset(&data
, 0, sizeof(data
));
677 data
.index
= elem
->index
;
678 data
.in_num
= elem
->in_num
;
679 data
.out_num
= elem
->out_num
;
681 for (i
= 0; i
< elem
->in_num
; i
++) {
682 data
.in_addr
[i
] = elem
->in_addr
[i
];
685 for (i
= 0; i
< elem
->out_num
; i
++) {
686 data
.out_addr
[i
] = elem
->out_addr
[i
];
689 for (i
= 0; i
< elem
->in_num
; i
++) {
690 /* Base is overwritten by virtqueue_map when loading. Do not
691 * save it, as it would leak the QEMU address space layout. */
692 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
695 for (i
= 0; i
< elem
->out_num
; i
++) {
696 /* Do not save iov_base as above. */
697 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
699 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
703 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
705 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
706 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
709 k
->notify(qbus
->parent
, vector
);
713 void virtio_update_irq(VirtIODevice
*vdev
)
715 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
718 static int virtio_validate_features(VirtIODevice
*vdev
)
720 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
722 if (k
->validate_features
) {
723 return k
->validate_features(vdev
);
729 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
731 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
732 trace_virtio_set_status(vdev
, val
);
734 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
735 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
736 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
737 int ret
= virtio_validate_features(vdev
);
745 k
->set_status(vdev
, val
);
751 bool target_words_bigendian(void);
752 static enum virtio_device_endian
virtio_default_endian(void)
754 if (target_words_bigendian()) {
755 return VIRTIO_DEVICE_ENDIAN_BIG
;
757 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
761 static enum virtio_device_endian
virtio_current_cpu_endian(void)
763 CPUClass
*cc
= CPU_GET_CLASS(current_cpu
);
765 if (cc
->virtio_is_big_endian(current_cpu
)) {
766 return VIRTIO_DEVICE_ENDIAN_BIG
;
768 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
772 void virtio_reset(void *opaque
)
774 VirtIODevice
*vdev
= opaque
;
775 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
778 virtio_set_status(vdev
, 0);
780 /* Guest initiated reset */
781 vdev
->device_endian
= virtio_current_cpu_endian();
784 vdev
->device_endian
= virtio_default_endian();
791 vdev
->guest_features
= 0;
795 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
796 virtio_notify_vector(vdev
, vdev
->config_vector
);
798 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
799 vdev
->vq
[i
].vring
.desc
= 0;
800 vdev
->vq
[i
].vring
.avail
= 0;
801 vdev
->vq
[i
].vring
.used
= 0;
802 vdev
->vq
[i
].last_avail_idx
= 0;
803 vdev
->vq
[i
].shadow_avail_idx
= 0;
804 vdev
->vq
[i
].used_idx
= 0;
805 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
806 vdev
->vq
[i
].signalled_used
= 0;
807 vdev
->vq
[i
].signalled_used_valid
= false;
808 vdev
->vq
[i
].notification
= true;
809 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
813 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
815 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
818 if (addr
+ sizeof(val
) > vdev
->config_len
) {
822 k
->get_config(vdev
, vdev
->config
);
824 val
= ldub_p(vdev
->config
+ addr
);
828 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
830 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
833 if (addr
+ sizeof(val
) > vdev
->config_len
) {
837 k
->get_config(vdev
, vdev
->config
);
839 val
= lduw_p(vdev
->config
+ addr
);
843 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
845 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
848 if (addr
+ sizeof(val
) > vdev
->config_len
) {
852 k
->get_config(vdev
, vdev
->config
);
854 val
= ldl_p(vdev
->config
+ addr
);
858 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
860 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
863 if (addr
+ sizeof(val
) > vdev
->config_len
) {
867 stb_p(vdev
->config
+ addr
, val
);
870 k
->set_config(vdev
, vdev
->config
);
874 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
876 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
879 if (addr
+ sizeof(val
) > vdev
->config_len
) {
883 stw_p(vdev
->config
+ addr
, val
);
886 k
->set_config(vdev
, vdev
->config
);
890 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
892 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
895 if (addr
+ sizeof(val
) > vdev
->config_len
) {
899 stl_p(vdev
->config
+ addr
, val
);
902 k
->set_config(vdev
, vdev
->config
);
906 uint32_t virtio_config_modern_readb(VirtIODevice
*vdev
, uint32_t addr
)
908 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
911 if (addr
+ sizeof(val
) > vdev
->config_len
) {
915 k
->get_config(vdev
, vdev
->config
);
917 val
= ldub_p(vdev
->config
+ addr
);
921 uint32_t virtio_config_modern_readw(VirtIODevice
*vdev
, uint32_t addr
)
923 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
926 if (addr
+ sizeof(val
) > vdev
->config_len
) {
930 k
->get_config(vdev
, vdev
->config
);
932 val
= lduw_le_p(vdev
->config
+ addr
);
936 uint32_t virtio_config_modern_readl(VirtIODevice
*vdev
, uint32_t addr
)
938 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
941 if (addr
+ sizeof(val
) > vdev
->config_len
) {
945 k
->get_config(vdev
, vdev
->config
);
947 val
= ldl_le_p(vdev
->config
+ addr
);
951 void virtio_config_modern_writeb(VirtIODevice
*vdev
,
952 uint32_t addr
, uint32_t data
)
954 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
957 if (addr
+ sizeof(val
) > vdev
->config_len
) {
961 stb_p(vdev
->config
+ addr
, val
);
964 k
->set_config(vdev
, vdev
->config
);
968 void virtio_config_modern_writew(VirtIODevice
*vdev
,
969 uint32_t addr
, uint32_t data
)
971 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
974 if (addr
+ sizeof(val
) > vdev
->config_len
) {
978 stw_le_p(vdev
->config
+ addr
, val
);
981 k
->set_config(vdev
, vdev
->config
);
985 void virtio_config_modern_writel(VirtIODevice
*vdev
,
986 uint32_t addr
, uint32_t data
)
988 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
991 if (addr
+ sizeof(val
) > vdev
->config_len
) {
995 stl_le_p(vdev
->config
+ addr
, val
);
998 k
->set_config(vdev
, vdev
->config
);
1002 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
1004 vdev
->vq
[n
].vring
.desc
= addr
;
1005 virtio_queue_update_rings(vdev
, n
);
1008 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
1010 return vdev
->vq
[n
].vring
.desc
;
1013 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
1014 hwaddr avail
, hwaddr used
)
1016 vdev
->vq
[n
].vring
.desc
= desc
;
1017 vdev
->vq
[n
].vring
.avail
= avail
;
1018 vdev
->vq
[n
].vring
.used
= used
;
1021 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
1023 /* Don't allow guest to flip queue between existent and
1024 * nonexistent states, or to set it to an invalid size.
1026 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
1027 num
> VIRTQUEUE_MAX_SIZE
||
1031 vdev
->vq
[n
].vring
.num
= num
;
1034 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
1036 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
1039 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
1041 return QLIST_NEXT(vq
, node
);
1044 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
1046 return vdev
->vq
[n
].vring
.num
;
1049 int virtio_get_num_queues(VirtIODevice
*vdev
)
1053 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1054 if (!virtio_queue_get_num(vdev
, i
)) {
1062 int virtio_queue_get_id(VirtQueue
*vq
)
1064 VirtIODevice
*vdev
= vq
->vdev
;
1065 assert(vq
>= &vdev
->vq
[0] && vq
< &vdev
->vq
[VIRTIO_QUEUE_MAX
]);
1066 return vq
- &vdev
->vq
[0];
1069 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
1071 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1072 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1074 /* virtio-1 compliant devices cannot change the alignment */
1075 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1076 error_report("tried to modify queue alignment for virtio-1 device");
1079 /* Check that the transport told us it was going to do this
1080 * (so a buggy transport will immediately assert rather than
1081 * silently failing to migrate this state)
1083 assert(k
->has_variable_vring_alignment
);
1085 vdev
->vq
[n
].vring
.align
= align
;
1086 virtio_queue_update_rings(vdev
, n
);
1089 void virtio_queue_notify_vq(VirtQueue
*vq
)
1091 if (vq
->vring
.desc
&& vq
->handle_output
) {
1092 VirtIODevice
*vdev
= vq
->vdev
;
1094 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
1095 vq
->handle_output(vdev
, vq
);
1099 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
1101 virtio_queue_notify_vq(&vdev
->vq
[n
]);
1104 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
1106 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
1110 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
1112 VirtQueue
*vq
= &vdev
->vq
[n
];
1114 if (n
< VIRTIO_QUEUE_MAX
) {
1115 if (vdev
->vector_queues
&&
1116 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
1117 QLIST_REMOVE(vq
, node
);
1119 vdev
->vq
[n
].vector
= vector
;
1120 if (vdev
->vector_queues
&&
1121 vector
!= VIRTIO_NO_VECTOR
) {
1122 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
1127 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
1128 void (*handle_output
)(VirtIODevice
*, VirtQueue
*))
1132 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1133 if (vdev
->vq
[i
].vring
.num
== 0)
1137 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
1140 vdev
->vq
[i
].vring
.num
= queue_size
;
1141 vdev
->vq
[i
].vring
.num_default
= queue_size
;
1142 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
1143 vdev
->vq
[i
].handle_output
= handle_output
;
1145 return &vdev
->vq
[i
];
1148 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
1150 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
1154 vdev
->vq
[n
].vring
.num
= 0;
1155 vdev
->vq
[n
].vring
.num_default
= 0;
1158 void virtio_irq(VirtQueue
*vq
)
1160 trace_virtio_irq(vq
);
1161 vq
->vdev
->isr
|= 0x01;
1162 virtio_notify_vector(vq
->vdev
, vq
->vector
);
1165 static bool vring_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
1169 /* We need to expose used array entries before checking used event. */
1171 /* Always notify when queue is empty (when feature acknowledge) */
1172 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1173 !vq
->inuse
&& virtio_queue_empty(vq
)) {
1177 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1178 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
1181 v
= vq
->signalled_used_valid
;
1182 vq
->signalled_used_valid
= true;
1183 old
= vq
->signalled_used
;
1184 new = vq
->signalled_used
= vq
->used_idx
;
1185 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
1188 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
1190 if (!vring_notify(vdev
, vq
)) {
1194 trace_virtio_notify(vdev
, vq
);
1196 virtio_notify_vector(vdev
, vq
->vector
);
1199 void virtio_notify_config(VirtIODevice
*vdev
)
1201 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
1206 virtio_notify_vector(vdev
, vdev
->config_vector
);
1209 static bool virtio_device_endian_needed(void *opaque
)
1211 VirtIODevice
*vdev
= opaque
;
1213 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
1214 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1215 return vdev
->device_endian
!= virtio_default_endian();
1217 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1218 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
1221 static bool virtio_64bit_features_needed(void *opaque
)
1223 VirtIODevice
*vdev
= opaque
;
1225 return (vdev
->host_features
>> 32) != 0;
1228 static bool virtio_virtqueue_needed(void *opaque
)
1230 VirtIODevice
*vdev
= opaque
;
1232 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
1235 static bool virtio_ringsize_needed(void *opaque
)
1237 VirtIODevice
*vdev
= opaque
;
1240 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1241 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
1248 static bool virtio_extra_state_needed(void *opaque
)
1250 VirtIODevice
*vdev
= opaque
;
1251 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1252 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1254 return k
->has_extra_state
&&
1255 k
->has_extra_state(qbus
->parent
);
1258 static const VMStateDescription vmstate_virtqueue
= {
1259 .name
= "virtqueue_state",
1261 .minimum_version_id
= 1,
1262 .fields
= (VMStateField
[]) {
1263 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
1264 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
1265 VMSTATE_END_OF_LIST()
1269 static const VMStateDescription vmstate_virtio_virtqueues
= {
1270 .name
= "virtio/virtqueues",
1272 .minimum_version_id
= 1,
1273 .needed
= &virtio_virtqueue_needed
,
1274 .fields
= (VMStateField
[]) {
1275 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
1276 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
1277 VMSTATE_END_OF_LIST()
1281 static const VMStateDescription vmstate_ringsize
= {
1282 .name
= "ringsize_state",
1284 .minimum_version_id
= 1,
1285 .fields
= (VMStateField
[]) {
1286 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
1287 VMSTATE_END_OF_LIST()
1291 static const VMStateDescription vmstate_virtio_ringsize
= {
1292 .name
= "virtio/ringsize",
1294 .minimum_version_id
= 1,
1295 .needed
= &virtio_ringsize_needed
,
1296 .fields
= (VMStateField
[]) {
1297 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
1298 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
1299 VMSTATE_END_OF_LIST()
1303 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
)
1305 VirtIODevice
*vdev
= pv
;
1306 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1307 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1309 if (!k
->load_extra_state
) {
1312 return k
->load_extra_state(qbus
->parent
, f
);
1316 static void put_extra_state(QEMUFile
*f
, void *pv
, size_t size
)
1318 VirtIODevice
*vdev
= pv
;
1319 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1320 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1322 k
->save_extra_state(qbus
->parent
, f
);
1325 static const VMStateInfo vmstate_info_extra_state
= {
1326 .name
= "virtqueue_extra_state",
1327 .get
= get_extra_state
,
1328 .put
= put_extra_state
,
1331 static const VMStateDescription vmstate_virtio_extra_state
= {
1332 .name
= "virtio/extra_state",
1334 .minimum_version_id
= 1,
1335 .needed
= &virtio_extra_state_needed
,
1336 .fields
= (VMStateField
[]) {
1338 .name
= "extra_state",
1340 .field_exists
= NULL
,
1342 .info
= &vmstate_info_extra_state
,
1343 .flags
= VMS_SINGLE
,
1346 VMSTATE_END_OF_LIST()
1350 static const VMStateDescription vmstate_virtio_device_endian
= {
1351 .name
= "virtio/device_endian",
1353 .minimum_version_id
= 1,
1354 .needed
= &virtio_device_endian_needed
,
1355 .fields
= (VMStateField
[]) {
1356 VMSTATE_UINT8(device_endian
, VirtIODevice
),
1357 VMSTATE_END_OF_LIST()
1361 static const VMStateDescription vmstate_virtio_64bit_features
= {
1362 .name
= "virtio/64bit_features",
1364 .minimum_version_id
= 1,
1365 .needed
= &virtio_64bit_features_needed
,
1366 .fields
= (VMStateField
[]) {
1367 VMSTATE_UINT64(guest_features
, VirtIODevice
),
1368 VMSTATE_END_OF_LIST()
1372 static const VMStateDescription vmstate_virtio
= {
1375 .minimum_version_id
= 1,
1376 .minimum_version_id_old
= 1,
1377 .fields
= (VMStateField
[]) {
1378 VMSTATE_END_OF_LIST()
1380 .subsections
= (const VMStateDescription
*[]) {
1381 &vmstate_virtio_device_endian
,
1382 &vmstate_virtio_64bit_features
,
1383 &vmstate_virtio_virtqueues
,
1384 &vmstate_virtio_ringsize
,
1385 &vmstate_virtio_extra_state
,
1390 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
1392 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1393 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1394 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1395 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
1398 if (k
->save_config
) {
1399 k
->save_config(qbus
->parent
, f
);
1402 qemu_put_8s(f
, &vdev
->status
);
1403 qemu_put_8s(f
, &vdev
->isr
);
1404 qemu_put_be16s(f
, &vdev
->queue_sel
);
1405 qemu_put_be32s(f
, &guest_features_lo
);
1406 qemu_put_be32(f
, vdev
->config_len
);
1407 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
1409 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1410 if (vdev
->vq
[i
].vring
.num
== 0)
1414 qemu_put_be32(f
, i
);
1416 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1417 if (vdev
->vq
[i
].vring
.num
== 0)
1420 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
1421 if (k
->has_variable_vring_alignment
) {
1422 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
1424 /* XXX virtio-1 devices */
1425 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
1426 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
1427 if (k
->save_queue
) {
1428 k
->save_queue(qbus
->parent
, i
, f
);
1432 if (vdc
->save
!= NULL
) {
1437 vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
1440 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
1442 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1443 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
1445 val
&= vdev
->host_features
;
1446 if (k
->set_features
) {
1447 k
->set_features(vdev
, val
);
1449 vdev
->guest_features
= val
;
1450 return bad
? -1 : 0;
1453 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
1456 * The driver must not attempt to set features after feature negotiation
1459 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1462 return virtio_set_features_nocheck(vdev
, val
);
1465 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
1471 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1472 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1473 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1476 * We poison the endianness to ensure it does not get used before
1477 * subsections have been loaded.
1479 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
1481 if (k
->load_config
) {
1482 ret
= k
->load_config(qbus
->parent
, f
);
1487 qemu_get_8s(f
, &vdev
->status
);
1488 qemu_get_8s(f
, &vdev
->isr
);
1489 qemu_get_be16s(f
, &vdev
->queue_sel
);
1490 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
1493 qemu_get_be32s(f
, &features
);
1495 config_len
= qemu_get_be32(f
);
1498 * There are cases where the incoming config can be bigger or smaller
1499 * than what we have; so load what we have space for, and skip
1500 * any excess that's in the stream.
1502 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
1504 while (config_len
> vdev
->config_len
) {
1509 num
= qemu_get_be32(f
);
1511 if (num
> VIRTIO_QUEUE_MAX
) {
1512 error_report("Invalid number of virtqueues: 0x%x", num
);
1516 for (i
= 0; i
< num
; i
++) {
1517 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
1518 if (k
->has_variable_vring_alignment
) {
1519 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
1521 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
1522 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
1523 vdev
->vq
[i
].signalled_used_valid
= false;
1524 vdev
->vq
[i
].notification
= true;
1526 if (vdev
->vq
[i
].vring
.desc
) {
1527 /* XXX virtio-1 devices */
1528 virtio_queue_update_rings(vdev
, i
);
1529 } else if (vdev
->vq
[i
].last_avail_idx
) {
1530 error_report("VQ %d address 0x0 "
1531 "inconsistent with Host index 0x%x",
1532 i
, vdev
->vq
[i
].last_avail_idx
);
1535 if (k
->load_queue
) {
1536 ret
= k
->load_queue(qbus
->parent
, i
, f
);
1542 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1544 if (vdc
->load
!= NULL
) {
1545 ret
= vdc
->load(vdev
, f
, version_id
);
1552 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
1557 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
1558 vdev
->device_endian
= virtio_default_endian();
1561 if (virtio_64bit_features_needed(vdev
)) {
1563 * Subsection load filled vdev->guest_features. Run them
1564 * through virtio_set_features to sanity-check them against
1567 uint64_t features64
= vdev
->guest_features
;
1568 if (virtio_set_features_nocheck(vdev
, features64
) < 0) {
1569 error_report("Features 0x%" PRIx64
" unsupported. "
1570 "Allowed features: 0x%" PRIx64
,
1571 features64
, vdev
->host_features
);
1575 if (virtio_set_features_nocheck(vdev
, features
) < 0) {
1576 error_report("Features 0x%x unsupported. "
1577 "Allowed features: 0x%" PRIx64
,
1578 features
, vdev
->host_features
);
1583 for (i
= 0; i
< num
; i
++) {
1584 if (vdev
->vq
[i
].vring
.desc
) {
1586 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
1587 /* Check it isn't doing strange things with descriptor numbers. */
1588 if (nheads
> vdev
->vq
[i
].vring
.num
) {
1589 error_report("VQ %d size 0x%x Guest index 0x%x "
1590 "inconsistent with Host index 0x%x: delta 0x%x",
1591 i
, vdev
->vq
[i
].vring
.num
,
1592 vring_avail_idx(&vdev
->vq
[i
]),
1593 vdev
->vq
[i
].last_avail_idx
, nheads
);
1596 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
1597 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
1604 void virtio_cleanup(VirtIODevice
*vdev
)
1606 qemu_del_vm_change_state_handler(vdev
->vmstate
);
1607 g_free(vdev
->config
);
1609 g_free(vdev
->vector_queues
);
1612 static void virtio_vmstate_change(void *opaque
, int running
, RunState state
)
1614 VirtIODevice
*vdev
= opaque
;
1615 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1616 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1617 bool backend_run
= running
&& (vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
);
1618 vdev
->vm_running
= running
;
1621 virtio_set_status(vdev
, vdev
->status
);
1624 if (k
->vmstate_change
) {
1625 k
->vmstate_change(qbus
->parent
, backend_run
);
1629 virtio_set_status(vdev
, vdev
->status
);
1633 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
1634 size_t vdev_size
, const char *vdev_name
)
1636 DeviceState
*vdev
= data
;
1638 object_initialize(vdev
, vdev_size
, vdev_name
);
1639 object_property_add_child(proxy_obj
, "virtio-backend", OBJECT(vdev
), NULL
);
1640 object_unref(OBJECT(vdev
));
1641 qdev_alias_all_properties(vdev
, proxy_obj
);
1644 void virtio_init(VirtIODevice
*vdev
, const char *name
,
1645 uint16_t device_id
, size_t config_size
)
1647 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1648 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1650 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
1653 vdev
->vector_queues
=
1654 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
1657 vdev
->device_id
= device_id
;
1660 vdev
->queue_sel
= 0;
1661 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
1662 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_QUEUE_MAX
);
1663 vdev
->vm_running
= runstate_is_running();
1664 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1665 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
1666 vdev
->vq
[i
].vdev
= vdev
;
1667 vdev
->vq
[i
].queue_index
= i
;
1671 vdev
->config_len
= config_size
;
1672 if (vdev
->config_len
) {
1673 vdev
->config
= g_malloc0(config_size
);
1675 vdev
->config
= NULL
;
1677 vdev
->vmstate
= qemu_add_vm_change_state_handler(virtio_vmstate_change
,
1679 vdev
->device_endian
= virtio_default_endian();
1682 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
1684 return vdev
->vq
[n
].vring
.desc
;
1687 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
1689 return vdev
->vq
[n
].vring
.avail
;
1692 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
1694 return vdev
->vq
[n
].vring
.used
;
1697 hwaddr
virtio_queue_get_ring_addr(VirtIODevice
*vdev
, int n
)
1699 return vdev
->vq
[n
].vring
.desc
;
1702 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
1704 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
1707 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
1709 return offsetof(VRingAvail
, ring
) +
1710 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
;
1713 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
1715 return offsetof(VRingUsed
, ring
) +
1716 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
;
1719 hwaddr
virtio_queue_get_ring_size(VirtIODevice
*vdev
, int n
)
1721 return vdev
->vq
[n
].vring
.used
- vdev
->vq
[n
].vring
.desc
+
1722 virtio_queue_get_used_size(vdev
, n
);
1725 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
1727 return vdev
->vq
[n
].last_avail_idx
;
1730 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
)
1732 vdev
->vq
[n
].last_avail_idx
= idx
;
1733 vdev
->vq
[n
].shadow_avail_idx
= idx
;
1736 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
1738 vdev
->vq
[n
].signalled_used_valid
= false;
1741 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
1743 return vdev
->vq
+ n
;
1746 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
1748 return vq
->queue_index
;
1751 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
1753 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
1754 if (event_notifier_test_and_clear(n
)) {
1759 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
1762 if (assign
&& !with_irqfd
) {
1763 event_notifier_set_handler(&vq
->guest_notifier
,
1764 virtio_queue_guest_notifier_read
);
1766 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
1769 /* Test and clear notifier before closing it,
1770 * in case poll callback didn't have time to run. */
1771 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
1775 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
1777 return &vq
->guest_notifier
;
1780 static void virtio_queue_host_notifier_read(EventNotifier
*n
)
1782 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
1783 if (event_notifier_test_and_clear(n
)) {
1784 virtio_queue_notify_vq(vq
);
1788 void virtio_queue_set_host_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
1791 if (assign
&& set_handler
) {
1792 event_notifier_set_handler(&vq
->host_notifier
,
1793 virtio_queue_host_notifier_read
);
1795 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
1798 /* Test and clear notifier before after disabling event,
1799 * in case poll callback didn't have time to run. */
1800 virtio_queue_host_notifier_read(&vq
->host_notifier
);
1804 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
1806 return &vq
->host_notifier
;
1809 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
1811 g_free(vdev
->bus_name
);
1812 vdev
->bus_name
= g_strdup(bus_name
);
1815 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
1817 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1818 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
1821 if (vdc
->realize
!= NULL
) {
1822 vdc
->realize(dev
, &err
);
1824 error_propagate(errp
, err
);
1829 virtio_bus_device_plugged(vdev
, &err
);
1831 error_propagate(errp
, err
);
1836 static void virtio_device_unrealize(DeviceState
*dev
, Error
**errp
)
1838 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1839 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
1842 virtio_bus_device_unplugged(vdev
);
1844 if (vdc
->unrealize
!= NULL
) {
1845 vdc
->unrealize(dev
, &err
);
1847 error_propagate(errp
, err
);
1852 g_free(vdev
->bus_name
);
1853 vdev
->bus_name
= NULL
;
1856 static Property virtio_properties
[] = {
1857 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
1858 DEFINE_PROP_END_OF_LIST(),
1861 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
1863 /* Set the default value here. */
1864 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1866 dc
->realize
= virtio_device_realize
;
1867 dc
->unrealize
= virtio_device_unrealize
;
1868 dc
->bus_type
= TYPE_VIRTIO_BUS
;
1869 dc
->props
= virtio_properties
;
1872 static const TypeInfo virtio_device_info
= {
1873 .name
= TYPE_VIRTIO_DEVICE
,
1874 .parent
= TYPE_DEVICE
,
1875 .instance_size
= sizeof(VirtIODevice
),
1876 .class_init
= virtio_device_class_init
,
1878 .class_size
= sizeof(VirtioDeviceClass
),
1881 static void virtio_register_types(void)
1883 type_register_static(&virtio_device_info
);
1886 type_init(virtio_register_types
)