4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #include "exec/address-spaces.h"
18 #include "qemu/error-report.h"
19 #include "hw/virtio/virtio.h"
20 #include "qemu/atomic.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "migration/migration.h"
25 * The alignment to use between consumer and producer parts of vring.
26 * x86 pagesize again. This is the default, used by transports like PCI
27 * which don't provide a means for the guest to tell the host the alignment.
29 #define VIRTIO_PCI_VRING_ALIGN 4096
31 typedef struct VRingDesc
39 typedef struct VRingAvail
46 typedef struct VRingUsedElem
52 typedef struct VRingUsed
56 VRingUsedElem ring
[0];
72 uint16_t last_avail_idx
;
73 /* Last used index value we have signalled on */
74 uint16_t signalled_used
;
76 /* Last used index value we have signalled on */
77 bool signalled_used_valid
;
79 /* Notification enabled? */
87 void (*handle_output
)(VirtIODevice
*vdev
, VirtQueue
*vq
);
89 EventNotifier guest_notifier
;
90 EventNotifier host_notifier
;
93 /* virt queue functions */
94 static void virtqueue_init(VirtQueue
*vq
)
99 vq
->vring
.avail
= pa
+ vq
->vring
.num
* sizeof(VRingDesc
);
100 vq
->vring
.used
= vring_align(vq
->vring
.avail
+
101 offsetof(VRingAvail
, ring
[vq
->vring
.num
]),
105 static inline uint64_t vring_desc_addr(hwaddr desc_pa
, int i
)
108 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, addr
);
109 return ldq_phys(&address_space_memory
, pa
);
112 static inline uint32_t vring_desc_len(hwaddr desc_pa
, int i
)
115 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, len
);
116 return ldl_phys(&address_space_memory
, pa
);
119 static inline uint16_t vring_desc_flags(hwaddr desc_pa
, int i
)
122 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, flags
);
123 return lduw_phys(&address_space_memory
, pa
);
126 static inline uint16_t vring_desc_next(hwaddr desc_pa
, int i
)
129 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, next
);
130 return lduw_phys(&address_space_memory
, pa
);
133 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
136 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, flags
);
137 return lduw_phys(&address_space_memory
, pa
);
140 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
143 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, idx
);
144 return lduw_phys(&address_space_memory
, pa
);
147 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
150 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, ring
[i
]);
151 return lduw_phys(&address_space_memory
, pa
);
154 static inline uint16_t vring_used_event(VirtQueue
*vq
)
156 return vring_avail_ring(vq
, vq
->vring
.num
);
159 static inline void vring_used_ring_id(VirtQueue
*vq
, int i
, uint32_t val
)
162 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
].id
);
163 stl_phys(&address_space_memory
, pa
, val
);
166 static inline void vring_used_ring_len(VirtQueue
*vq
, int i
, uint32_t val
)
169 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
].len
);
170 stl_phys(&address_space_memory
, pa
, val
);
173 static uint16_t vring_used_idx(VirtQueue
*vq
)
176 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
177 return lduw_phys(&address_space_memory
, pa
);
180 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
183 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
184 stw_phys(&address_space_memory
, pa
, val
);
187 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
190 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
191 stw_phys(&address_space_memory
,
192 pa
, lduw_phys(&address_space_memory
, pa
) | mask
);
195 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
198 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
199 stw_phys(&address_space_memory
,
200 pa
, lduw_phys(&address_space_memory
, pa
) & ~mask
);
203 static inline void vring_avail_event(VirtQueue
*vq
, uint16_t val
)
206 if (!vq
->notification
) {
209 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
210 stw_phys(&address_space_memory
, pa
, val
);
213 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
215 vq
->notification
= enable
;
216 if (vq
->vdev
->guest_features
& (1 << VIRTIO_RING_F_EVENT_IDX
)) {
217 vring_avail_event(vq
, vring_avail_idx(vq
));
219 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
221 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
224 /* Expose avail event/used flags before caller checks the avail idx. */
229 int virtio_queue_ready(VirtQueue
*vq
)
231 return vq
->vring
.avail
!= 0;
234 int virtio_queue_empty(VirtQueue
*vq
)
236 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
239 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
240 unsigned int len
, unsigned int idx
)
245 trace_virtqueue_fill(vq
, elem
, len
, idx
);
248 for (i
= 0; i
< elem
->in_num
; i
++) {
249 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
251 cpu_physical_memory_unmap(elem
->in_sg
[i
].iov_base
,
252 elem
->in_sg
[i
].iov_len
,
258 for (i
= 0; i
< elem
->out_num
; i
++)
259 cpu_physical_memory_unmap(elem
->out_sg
[i
].iov_base
,
260 elem
->out_sg
[i
].iov_len
,
261 0, elem
->out_sg
[i
].iov_len
);
263 idx
= (idx
+ vring_used_idx(vq
)) % vq
->vring
.num
;
265 /* Get a pointer to the next entry in the used ring. */
266 vring_used_ring_id(vq
, idx
, elem
->index
);
267 vring_used_ring_len(vq
, idx
, len
);
270 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
273 /* Make sure buffer is written before we update index. */
275 trace_virtqueue_flush(vq
, count
);
276 old
= vring_used_idx(vq
);
278 vring_used_idx_set(vq
, new);
280 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
281 vq
->signalled_used_valid
= false;
284 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
287 virtqueue_fill(vq
, elem
, len
, 0);
288 virtqueue_flush(vq
, 1);
291 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
293 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
295 /* Check it isn't doing very strange things with descriptor numbers. */
296 if (num_heads
> vq
->vring
.num
) {
297 error_report("Guest moved used index from %u to %u",
298 idx
, vring_avail_idx(vq
));
301 /* On success, callers read a descriptor at vq->last_avail_idx.
302 * Make sure descriptor read does not bypass avail index read. */
310 static unsigned int virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
)
314 /* Grab the next descriptor number they're advertising, and increment
315 * the index we've seen. */
316 head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
318 /* If their number is silly, that's a fatal mistake. */
319 if (head
>= vq
->vring
.num
) {
320 error_report("Guest says index %u is available", head
);
327 static unsigned virtqueue_next_desc(hwaddr desc_pa
,
328 unsigned int i
, unsigned int max
)
332 /* If this descriptor says it doesn't chain, we're done. */
333 if (!(vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_NEXT
))
336 /* Check they're not leading us off end of descriptors. */
337 next
= vring_desc_next(desc_pa
, i
);
338 /* Make sure compiler knows to grab that: we don't want it changing! */
342 error_report("Desc next is %u", next
);
349 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
350 unsigned int *out_bytes
,
351 unsigned max_in_bytes
, unsigned max_out_bytes
)
354 unsigned int total_bufs
, in_total
, out_total
;
356 idx
= vq
->last_avail_idx
;
358 total_bufs
= in_total
= out_total
= 0;
359 while (virtqueue_num_heads(vq
, idx
)) {
360 unsigned int max
, num_bufs
, indirect
= 0;
365 num_bufs
= total_bufs
;
366 i
= virtqueue_get_head(vq
, idx
++);
367 desc_pa
= vq
->vring
.desc
;
369 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_INDIRECT
) {
370 if (vring_desc_len(desc_pa
, i
) % sizeof(VRingDesc
)) {
371 error_report("Invalid size for indirect buffer table");
375 /* If we've got too many, that implies a descriptor loop. */
376 if (num_bufs
>= max
) {
377 error_report("Looped descriptor");
381 /* loop over the indirect descriptor table */
383 max
= vring_desc_len(desc_pa
, i
) / sizeof(VRingDesc
);
384 desc_pa
= vring_desc_addr(desc_pa
, i
);
389 /* If we've got too many, that implies a descriptor loop. */
390 if (++num_bufs
> max
) {
391 error_report("Looped descriptor");
395 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_WRITE
) {
396 in_total
+= vring_desc_len(desc_pa
, i
);
398 out_total
+= vring_desc_len(desc_pa
, i
);
400 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
403 } while ((i
= virtqueue_next_desc(desc_pa
, i
, max
)) != max
);
406 total_bufs
= num_bufs
;
412 *in_bytes
= in_total
;
415 *out_bytes
= out_total
;
419 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
420 unsigned int out_bytes
)
422 unsigned int in_total
, out_total
;
424 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
425 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
428 void virtqueue_map_sg(struct iovec
*sg
, hwaddr
*addr
,
429 size_t num_sg
, int is_write
)
434 if (num_sg
> VIRTQUEUE_MAX_SIZE
) {
435 error_report("virtio: map attempt out of bounds: %zd > %d",
436 num_sg
, VIRTQUEUE_MAX_SIZE
);
440 for (i
= 0; i
< num_sg
; i
++) {
442 sg
[i
].iov_base
= cpu_physical_memory_map(addr
[i
], &len
, is_write
);
443 if (sg
[i
].iov_base
== NULL
|| len
!= sg
[i
].iov_len
) {
444 error_report("virtio: error trying to map MMIO memory");
450 int virtqueue_pop(VirtQueue
*vq
, VirtQueueElement
*elem
)
452 unsigned int i
, head
, max
;
453 hwaddr desc_pa
= vq
->vring
.desc
;
455 if (!virtqueue_num_heads(vq
, vq
->last_avail_idx
))
458 /* When we start there are none of either input nor output. */
459 elem
->out_num
= elem
->in_num
= 0;
463 i
= head
= virtqueue_get_head(vq
, vq
->last_avail_idx
++);
464 if (vq
->vdev
->guest_features
& (1 << VIRTIO_RING_F_EVENT_IDX
)) {
465 vring_avail_event(vq
, vring_avail_idx(vq
));
468 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_INDIRECT
) {
469 if (vring_desc_len(desc_pa
, i
) % sizeof(VRingDesc
)) {
470 error_report("Invalid size for indirect buffer table");
474 /* loop over the indirect descriptor table */
475 max
= vring_desc_len(desc_pa
, i
) / sizeof(VRingDesc
);
476 desc_pa
= vring_desc_addr(desc_pa
, i
);
480 /* Collect all the descriptors */
484 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_WRITE
) {
485 if (elem
->in_num
>= ARRAY_SIZE(elem
->in_sg
)) {
486 error_report("Too many write descriptors in indirect table");
489 elem
->in_addr
[elem
->in_num
] = vring_desc_addr(desc_pa
, i
);
490 sg
= &elem
->in_sg
[elem
->in_num
++];
492 if (elem
->out_num
>= ARRAY_SIZE(elem
->out_sg
)) {
493 error_report("Too many read descriptors in indirect table");
496 elem
->out_addr
[elem
->out_num
] = vring_desc_addr(desc_pa
, i
);
497 sg
= &elem
->out_sg
[elem
->out_num
++];
500 sg
->iov_len
= vring_desc_len(desc_pa
, i
);
502 /* If we've got too many, that implies a descriptor loop. */
503 if ((elem
->in_num
+ elem
->out_num
) > max
) {
504 error_report("Looped descriptor");
507 } while ((i
= virtqueue_next_desc(desc_pa
, i
, max
)) != max
);
509 /* Now map what we have collected */
510 virtqueue_map_sg(elem
->in_sg
, elem
->in_addr
, elem
->in_num
, 1);
511 virtqueue_map_sg(elem
->out_sg
, elem
->out_addr
, elem
->out_num
, 0);
517 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
518 return elem
->in_num
+ elem
->out_num
;
522 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
524 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
525 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
528 k
->notify(qbus
->parent
, vector
);
532 void virtio_update_irq(VirtIODevice
*vdev
)
534 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
537 void virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
539 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
540 trace_virtio_set_status(vdev
, val
);
543 k
->set_status(vdev
, val
);
548 bool target_words_bigendian(void);
549 static enum virtio_device_endian
virtio_default_endian(void)
551 if (target_words_bigendian()) {
552 return VIRTIO_DEVICE_ENDIAN_BIG
;
554 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
558 static enum virtio_device_endian
virtio_current_cpu_endian(void)
560 CPUClass
*cc
= CPU_GET_CLASS(current_cpu
);
562 if (cc
->virtio_is_big_endian(current_cpu
)) {
563 return VIRTIO_DEVICE_ENDIAN_BIG
;
565 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
569 void virtio_reset(void *opaque
)
571 VirtIODevice
*vdev
= opaque
;
572 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
575 virtio_set_status(vdev
, 0);
577 /* Guest initiated reset */
578 vdev
->device_endian
= virtio_current_cpu_endian();
581 vdev
->device_endian
= virtio_default_endian();
588 vdev
->guest_features
= 0;
592 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
593 virtio_notify_vector(vdev
, vdev
->config_vector
);
595 for(i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
596 vdev
->vq
[i
].vring
.desc
= 0;
597 vdev
->vq
[i
].vring
.avail
= 0;
598 vdev
->vq
[i
].vring
.used
= 0;
599 vdev
->vq
[i
].last_avail_idx
= 0;
601 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
602 vdev
->vq
[i
].signalled_used
= 0;
603 vdev
->vq
[i
].signalled_used_valid
= false;
604 vdev
->vq
[i
].notification
= true;
608 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
610 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
613 if (addr
+ sizeof(val
) > vdev
->config_len
) {
617 k
->get_config(vdev
, vdev
->config
);
619 val
= ldub_p(vdev
->config
+ addr
);
623 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
625 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
628 if (addr
+ sizeof(val
) > vdev
->config_len
) {
632 k
->get_config(vdev
, vdev
->config
);
634 val
= lduw_p(vdev
->config
+ addr
);
638 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
640 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
643 if (addr
+ sizeof(val
) > vdev
->config_len
) {
647 k
->get_config(vdev
, vdev
->config
);
649 val
= ldl_p(vdev
->config
+ addr
);
653 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
655 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
658 if (addr
+ sizeof(val
) > vdev
->config_len
) {
662 stb_p(vdev
->config
+ addr
, val
);
665 k
->set_config(vdev
, vdev
->config
);
669 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
671 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
674 if (addr
+ sizeof(val
) > vdev
->config_len
) {
678 stw_p(vdev
->config
+ addr
, val
);
681 k
->set_config(vdev
, vdev
->config
);
685 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
687 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
690 if (addr
+ sizeof(val
) > vdev
->config_len
) {
694 stl_p(vdev
->config
+ addr
, val
);
697 k
->set_config(vdev
, vdev
->config
);
701 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
703 vdev
->vq
[n
].pa
= addr
;
704 virtqueue_init(&vdev
->vq
[n
]);
707 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
709 return vdev
->vq
[n
].pa
;
712 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
714 /* Don't allow guest to flip queue between existent and
715 * nonexistent states, or to set it to an invalid size.
717 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
718 num
> VIRTQUEUE_MAX_SIZE
||
722 vdev
->vq
[n
].vring
.num
= num
;
723 virtqueue_init(&vdev
->vq
[n
]);
726 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
728 return vdev
->vq
[n
].vring
.num
;
731 int virtio_queue_get_id(VirtQueue
*vq
)
733 VirtIODevice
*vdev
= vq
->vdev
;
734 assert(vq
>= &vdev
->vq
[0] && vq
< &vdev
->vq
[VIRTIO_PCI_QUEUE_MAX
]);
735 return vq
- &vdev
->vq
[0];
738 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
740 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
741 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
743 /* Check that the transport told us it was going to do this
744 * (so a buggy transport will immediately assert rather than
745 * silently failing to migrate this state)
747 assert(k
->has_variable_vring_alignment
);
749 vdev
->vq
[n
].vring
.align
= align
;
750 virtqueue_init(&vdev
->vq
[n
]);
753 void virtio_queue_notify_vq(VirtQueue
*vq
)
755 if (vq
->vring
.desc
) {
756 VirtIODevice
*vdev
= vq
->vdev
;
757 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
758 vq
->handle_output(vdev
, vq
);
762 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
764 virtio_queue_notify_vq(&vdev
->vq
[n
]);
767 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
769 return n
< VIRTIO_PCI_QUEUE_MAX
? vdev
->vq
[n
].vector
:
773 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
775 if (n
< VIRTIO_PCI_QUEUE_MAX
)
776 vdev
->vq
[n
].vector
= vector
;
779 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
780 void (*handle_output
)(VirtIODevice
*, VirtQueue
*))
784 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
785 if (vdev
->vq
[i
].vring
.num
== 0)
789 if (i
== VIRTIO_PCI_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
792 vdev
->vq
[i
].vring
.num
= queue_size
;
793 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
794 vdev
->vq
[i
].handle_output
= handle_output
;
799 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
801 if (n
< 0 || n
>= VIRTIO_PCI_QUEUE_MAX
) {
805 vdev
->vq
[n
].vring
.num
= 0;
808 void virtio_irq(VirtQueue
*vq
)
810 trace_virtio_irq(vq
);
811 vq
->vdev
->isr
|= 0x01;
812 virtio_notify_vector(vq
->vdev
, vq
->vector
);
815 /* Assuming a given event_idx value from the other size, if
816 * we have just incremented index from old to new_idx,
817 * should we trigger an event? */
818 static inline int vring_need_event(uint16_t event
, uint16_t new, uint16_t old
)
820 /* Note: Xen has similar logic for notification hold-off
821 * in include/xen/interface/io/ring.h with req_event and req_prod
822 * corresponding to event_idx + 1 and new respectively.
823 * Note also that req_event and req_prod in Xen start at 1,
824 * event indexes in virtio start at 0. */
825 return (uint16_t)(new - event
- 1) < (uint16_t)(new - old
);
828 static bool vring_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
832 /* We need to expose used array entries before checking used event. */
834 /* Always notify when queue is empty (when feature acknowledge) */
835 if (((vdev
->guest_features
& (1 << VIRTIO_F_NOTIFY_ON_EMPTY
)) &&
836 !vq
->inuse
&& vring_avail_idx(vq
) == vq
->last_avail_idx
)) {
840 if (!(vdev
->guest_features
& (1 << VIRTIO_RING_F_EVENT_IDX
))) {
841 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
844 v
= vq
->signalled_used_valid
;
845 vq
->signalled_used_valid
= true;
846 old
= vq
->signalled_used
;
847 new = vq
->signalled_used
= vring_used_idx(vq
);
848 return !v
|| vring_need_event(vring_used_event(vq
), new, old
);
851 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
853 if (!vring_notify(vdev
, vq
)) {
857 trace_virtio_notify(vdev
, vq
);
859 virtio_notify_vector(vdev
, vq
->vector
);
862 void virtio_notify_config(VirtIODevice
*vdev
)
864 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
868 virtio_notify_vector(vdev
, vdev
->config_vector
);
871 static bool virtio_device_endian_needed(void *opaque
)
873 VirtIODevice
*vdev
= opaque
;
875 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
876 return vdev
->device_endian
!= virtio_default_endian();
879 static const VMStateDescription vmstate_virtio_device_endian
= {
880 .name
= "virtio/device_endian",
882 .minimum_version_id
= 1,
883 .fields
= (VMStateField
[]) {
884 VMSTATE_UINT8(device_endian
, VirtIODevice
),
885 VMSTATE_END_OF_LIST()
889 static const VMStateDescription vmstate_virtio
= {
892 .minimum_version_id
= 1,
893 .minimum_version_id_old
= 1,
894 .fields
= (VMStateField
[]) {
895 VMSTATE_END_OF_LIST()
897 .subsections
= (VMStateSubsection
[]) {
899 .vmsd
= &vmstate_virtio_device_endian
,
900 .needed
= &virtio_device_endian_needed
906 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
908 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
909 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
910 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
913 if (k
->save_config
) {
914 k
->save_config(qbus
->parent
, f
);
917 qemu_put_8s(f
, &vdev
->status
);
918 qemu_put_8s(f
, &vdev
->isr
);
919 qemu_put_be16s(f
, &vdev
->queue_sel
);
920 qemu_put_be32s(f
, &vdev
->guest_features
);
921 qemu_put_be32(f
, vdev
->config_len
);
922 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
924 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
925 if (vdev
->vq
[i
].vring
.num
== 0)
931 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
932 if (vdev
->vq
[i
].vring
.num
== 0)
935 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
936 if (k
->has_variable_vring_alignment
) {
937 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
939 qemu_put_be64(f
, vdev
->vq
[i
].pa
);
940 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
942 k
->save_queue(qbus
->parent
, i
, f
);
946 if (vdc
->save
!= NULL
) {
951 vmstate_save_state(f
, &vmstate_virtio
, vdev
);
954 int virtio_set_features(VirtIODevice
*vdev
, uint32_t val
)
956 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
957 VirtioBusClass
*vbusk
= VIRTIO_BUS_GET_CLASS(qbus
);
958 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
959 uint32_t supported_features
= vbusk
->get_features(qbus
->parent
);
960 bool bad
= (val
& ~supported_features
) != 0;
962 val
&= supported_features
;
963 if (k
->set_features
) {
964 k
->set_features(vdev
, val
);
966 vdev
->guest_features
= val
;
970 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
976 uint32_t supported_features
;
977 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
978 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
979 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
982 * We poison the endianness to ensure it does not get used before
983 * subsections have been loaded.
985 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
987 if (k
->load_config
) {
988 ret
= k
->load_config(qbus
->parent
, f
);
993 qemu_get_8s(f
, &vdev
->status
);
994 qemu_get_8s(f
, &vdev
->isr
);
995 qemu_get_be16s(f
, &vdev
->queue_sel
);
996 if (vdev
->queue_sel
>= VIRTIO_PCI_QUEUE_MAX
) {
999 qemu_get_be32s(f
, &features
);
1001 if (virtio_set_features(vdev
, features
) < 0) {
1002 supported_features
= k
->get_features(qbus
->parent
);
1003 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
1004 features
, supported_features
);
1007 config_len
= qemu_get_be32(f
);
1010 * There are cases where the incoming config can be bigger or smaller
1011 * than what we have; so load what we have space for, and skip
1012 * any excess that's in the stream.
1014 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
1016 while (config_len
> vdev
->config_len
) {
1021 num
= qemu_get_be32(f
);
1023 if (num
> VIRTIO_PCI_QUEUE_MAX
) {
1024 error_report("Invalid number of PCI queues: 0x%x", num
);
1028 for (i
= 0; i
< num
; i
++) {
1029 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
1030 if (k
->has_variable_vring_alignment
) {
1031 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
1033 vdev
->vq
[i
].pa
= qemu_get_be64(f
);
1034 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
1035 vdev
->vq
[i
].signalled_used_valid
= false;
1036 vdev
->vq
[i
].notification
= true;
1038 if (vdev
->vq
[i
].pa
) {
1039 virtqueue_init(&vdev
->vq
[i
]);
1040 } else if (vdev
->vq
[i
].last_avail_idx
) {
1041 error_report("VQ %d address 0x0 "
1042 "inconsistent with Host index 0x%x",
1043 i
, vdev
->vq
[i
].last_avail_idx
);
1046 if (k
->load_queue
) {
1047 ret
= k
->load_queue(qbus
->parent
, i
, f
);
1053 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1055 if (vdc
->load
!= NULL
) {
1056 ret
= vdc
->load(vdev
, f
, version_id
);
1063 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
1068 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
1069 vdev
->device_endian
= virtio_default_endian();
1072 for (i
= 0; i
< num
; i
++) {
1073 if (vdev
->vq
[i
].pa
) {
1075 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
1076 /* Check it isn't doing strange things with descriptor numbers. */
1077 if (nheads
> vdev
->vq
[i
].vring
.num
) {
1078 error_report("VQ %d size 0x%x Guest index 0x%x "
1079 "inconsistent with Host index 0x%x: delta 0x%x",
1080 i
, vdev
->vq
[i
].vring
.num
,
1081 vring_avail_idx(&vdev
->vq
[i
]),
1082 vdev
->vq
[i
].last_avail_idx
, nheads
);
1091 void virtio_cleanup(VirtIODevice
*vdev
)
1093 qemu_del_vm_change_state_handler(vdev
->vmstate
);
1094 g_free(vdev
->config
);
1098 static void virtio_vmstate_change(void *opaque
, int running
, RunState state
)
1100 VirtIODevice
*vdev
= opaque
;
1101 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1102 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1103 bool backend_run
= running
&& (vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
);
1104 vdev
->vm_running
= running
;
1107 virtio_set_status(vdev
, vdev
->status
);
1110 if (k
->vmstate_change
) {
1111 k
->vmstate_change(qbus
->parent
, backend_run
);
1115 virtio_set_status(vdev
, vdev
->status
);
1119 void virtio_init(VirtIODevice
*vdev
, const char *name
,
1120 uint16_t device_id
, size_t config_size
)
1123 vdev
->device_id
= device_id
;
1126 vdev
->queue_sel
= 0;
1127 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
1128 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_PCI_QUEUE_MAX
);
1129 vdev
->vm_running
= runstate_is_running();
1130 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
1131 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
1132 vdev
->vq
[i
].vdev
= vdev
;
1133 vdev
->vq
[i
].queue_index
= i
;
1137 vdev
->config_len
= config_size
;
1138 if (vdev
->config_len
) {
1139 vdev
->config
= g_malloc0(config_size
);
1141 vdev
->config
= NULL
;
1143 vdev
->vmstate
= qemu_add_vm_change_state_handler(virtio_vmstate_change
,
1145 vdev
->device_endian
= virtio_default_endian();
1148 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
1150 return vdev
->vq
[n
].vring
.desc
;
1153 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
1155 return vdev
->vq
[n
].vring
.avail
;
1158 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
1160 return vdev
->vq
[n
].vring
.used
;
1163 hwaddr
virtio_queue_get_ring_addr(VirtIODevice
*vdev
, int n
)
1165 return vdev
->vq
[n
].vring
.desc
;
1168 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
1170 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
1173 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
1175 return offsetof(VRingAvail
, ring
) +
1176 sizeof(uint64_t) * vdev
->vq
[n
].vring
.num
;
1179 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
1181 return offsetof(VRingUsed
, ring
) +
1182 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
;
1185 hwaddr
virtio_queue_get_ring_size(VirtIODevice
*vdev
, int n
)
1187 return vdev
->vq
[n
].vring
.used
- vdev
->vq
[n
].vring
.desc
+
1188 virtio_queue_get_used_size(vdev
, n
);
1191 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
1193 return vdev
->vq
[n
].last_avail_idx
;
1196 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
)
1198 vdev
->vq
[n
].last_avail_idx
= idx
;
1201 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
1203 vdev
->vq
[n
].signalled_used_valid
= false;
1206 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
1208 return vdev
->vq
+ n
;
1211 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
1213 return vq
->queue_index
;
1216 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
1218 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
1219 if (event_notifier_test_and_clear(n
)) {
1224 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
1227 if (assign
&& !with_irqfd
) {
1228 event_notifier_set_handler(&vq
->guest_notifier
,
1229 virtio_queue_guest_notifier_read
);
1231 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
1234 /* Test and clear notifier before closing it,
1235 * in case poll callback didn't have time to run. */
1236 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
1240 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
1242 return &vq
->guest_notifier
;
1245 static void virtio_queue_host_notifier_read(EventNotifier
*n
)
1247 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
1248 if (event_notifier_test_and_clear(n
)) {
1249 virtio_queue_notify_vq(vq
);
1253 void virtio_queue_set_host_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
1256 if (assign
&& set_handler
) {
1257 event_notifier_set_handler(&vq
->host_notifier
,
1258 virtio_queue_host_notifier_read
);
1260 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
1263 /* Test and clear notifier before after disabling event,
1264 * in case poll callback didn't have time to run. */
1265 virtio_queue_host_notifier_read(&vq
->host_notifier
);
1269 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
1271 return &vq
->host_notifier
;
1274 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
1276 g_free(vdev
->bus_name
);
1277 vdev
->bus_name
= g_strdup(bus_name
);
1280 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
1282 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1283 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
1286 if (vdc
->realize
!= NULL
) {
1287 vdc
->realize(dev
, &err
);
1289 error_propagate(errp
, err
);
1293 virtio_bus_device_plugged(vdev
);
1296 static void virtio_device_unrealize(DeviceState
*dev
, Error
**errp
)
1298 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1299 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
1302 virtio_bus_device_unplugged(vdev
);
1304 if (vdc
->unrealize
!= NULL
) {
1305 vdc
->unrealize(dev
, &err
);
1307 error_propagate(errp
, err
);
1312 g_free(vdev
->bus_name
);
1313 vdev
->bus_name
= NULL
;
1316 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
1318 /* Set the default value here. */
1319 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1321 dc
->realize
= virtio_device_realize
;
1322 dc
->unrealize
= virtio_device_unrealize
;
1323 dc
->bus_type
= TYPE_VIRTIO_BUS
;
1326 static const TypeInfo virtio_device_info
= {
1327 .name
= TYPE_VIRTIO_DEVICE
,
1328 .parent
= TYPE_DEVICE
,
1329 .instance_size
= sizeof(VirtIODevice
),
1330 .class_init
= virtio_device_class_init
,
1332 .class_size
= sizeof(VirtioDeviceClass
),
1335 static void virtio_register_types(void)
1337 type_register_static(&virtio_device_info
);
1340 type_init(virtio_register_types
)