4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #include "exec/address-spaces.h"
18 #include "qemu/error-report.h"
19 #include "hw/virtio/virtio.h"
20 #include "qemu/atomic.h"
21 #include "hw/virtio/virtio-bus.h"
24 * The alignment to use between consumer and producer parts of vring.
25 * x86 pagesize again. This is the default, used by transports like PCI
26 * which don't provide a means for the guest to tell the host the alignment.
28 #define VIRTIO_PCI_VRING_ALIGN 4096
30 typedef struct VRingDesc
38 typedef struct VRingAvail
45 typedef struct VRingUsedElem
51 typedef struct VRingUsed
55 VRingUsedElem ring
[0];
71 uint16_t last_avail_idx
;
72 /* Last used index value we have signalled on */
73 uint16_t signalled_used
;
75 /* Last used index value we have signalled on */
76 bool signalled_used_valid
;
78 /* Notification enabled? */
86 void (*handle_output
)(VirtIODevice
*vdev
, VirtQueue
*vq
);
88 EventNotifier guest_notifier
;
89 EventNotifier host_notifier
;
92 /* virt queue functions */
93 static void virtqueue_init(VirtQueue
*vq
)
98 vq
->vring
.avail
= pa
+ vq
->vring
.num
* sizeof(VRingDesc
);
99 vq
->vring
.used
= vring_align(vq
->vring
.avail
+
100 offsetof(VRingAvail
, ring
[vq
->vring
.num
]),
104 static inline uint64_t vring_desc_addr(hwaddr desc_pa
, int i
)
107 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, addr
);
108 return ldq_phys(&address_space_memory
, pa
);
111 static inline uint32_t vring_desc_len(hwaddr desc_pa
, int i
)
114 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, len
);
115 return ldl_phys(&address_space_memory
, pa
);
118 static inline uint16_t vring_desc_flags(hwaddr desc_pa
, int i
)
121 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, flags
);
122 return lduw_phys(&address_space_memory
, pa
);
125 static inline uint16_t vring_desc_next(hwaddr desc_pa
, int i
)
128 pa
= desc_pa
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, next
);
129 return lduw_phys(&address_space_memory
, pa
);
132 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
135 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, flags
);
136 return lduw_phys(&address_space_memory
, pa
);
139 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
142 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, idx
);
143 return lduw_phys(&address_space_memory
, pa
);
146 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
149 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, ring
[i
]);
150 return lduw_phys(&address_space_memory
, pa
);
153 static inline uint16_t vring_used_event(VirtQueue
*vq
)
155 return vring_avail_ring(vq
, vq
->vring
.num
);
158 static inline void vring_used_ring_id(VirtQueue
*vq
, int i
, uint32_t val
)
161 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
].id
);
162 stl_phys(&address_space_memory
, pa
, val
);
165 static inline void vring_used_ring_len(VirtQueue
*vq
, int i
, uint32_t val
)
168 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
].len
);
169 stl_phys(&address_space_memory
, pa
, val
);
172 static uint16_t vring_used_idx(VirtQueue
*vq
)
175 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
176 return lduw_phys(&address_space_memory
, pa
);
179 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
182 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
183 stw_phys(&address_space_memory
, pa
, val
);
186 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
189 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
190 stw_phys(&address_space_memory
,
191 pa
, lduw_phys(&address_space_memory
, pa
) | mask
);
194 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
197 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
198 stw_phys(&address_space_memory
,
199 pa
, lduw_phys(&address_space_memory
, pa
) & ~mask
);
202 static inline void vring_avail_event(VirtQueue
*vq
, uint16_t val
)
205 if (!vq
->notification
) {
208 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
209 stw_phys(&address_space_memory
, pa
, val
);
212 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
214 vq
->notification
= enable
;
215 if (vq
->vdev
->guest_features
& (1 << VIRTIO_RING_F_EVENT_IDX
)) {
216 vring_avail_event(vq
, vring_avail_idx(vq
));
218 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
220 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
223 /* Expose avail event/used flags before caller checks the avail idx. */
228 int virtio_queue_ready(VirtQueue
*vq
)
230 return vq
->vring
.avail
!= 0;
233 int virtio_queue_empty(VirtQueue
*vq
)
235 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
238 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
239 unsigned int len
, unsigned int idx
)
244 trace_virtqueue_fill(vq
, elem
, len
, idx
);
247 for (i
= 0; i
< elem
->in_num
; i
++) {
248 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
250 cpu_physical_memory_unmap(elem
->in_sg
[i
].iov_base
,
251 elem
->in_sg
[i
].iov_len
,
257 for (i
= 0; i
< elem
->out_num
; i
++)
258 cpu_physical_memory_unmap(elem
->out_sg
[i
].iov_base
,
259 elem
->out_sg
[i
].iov_len
,
260 0, elem
->out_sg
[i
].iov_len
);
262 idx
= (idx
+ vring_used_idx(vq
)) % vq
->vring
.num
;
264 /* Get a pointer to the next entry in the used ring. */
265 vring_used_ring_id(vq
, idx
, elem
->index
);
266 vring_used_ring_len(vq
, idx
, len
);
269 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
272 /* Make sure buffer is written before we update index. */
274 trace_virtqueue_flush(vq
, count
);
275 old
= vring_used_idx(vq
);
277 vring_used_idx_set(vq
, new);
279 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
280 vq
->signalled_used_valid
= false;
283 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
286 virtqueue_fill(vq
, elem
, len
, 0);
287 virtqueue_flush(vq
, 1);
290 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
292 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
294 /* Check it isn't doing very strange things with descriptor numbers. */
295 if (num_heads
> vq
->vring
.num
) {
296 error_report("Guest moved used index from %u to %u",
297 idx
, vring_avail_idx(vq
));
300 /* On success, callers read a descriptor at vq->last_avail_idx.
301 * Make sure descriptor read does not bypass avail index read. */
309 static unsigned int virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
)
313 /* Grab the next descriptor number they're advertising, and increment
314 * the index we've seen. */
315 head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
317 /* If their number is silly, that's a fatal mistake. */
318 if (head
>= vq
->vring
.num
) {
319 error_report("Guest says index %u is available", head
);
326 static unsigned virtqueue_next_desc(hwaddr desc_pa
,
327 unsigned int i
, unsigned int max
)
331 /* If this descriptor says it doesn't chain, we're done. */
332 if (!(vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_NEXT
))
335 /* Check they're not leading us off end of descriptors. */
336 next
= vring_desc_next(desc_pa
, i
);
337 /* Make sure compiler knows to grab that: we don't want it changing! */
341 error_report("Desc next is %u", next
);
348 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
349 unsigned int *out_bytes
,
350 unsigned max_in_bytes
, unsigned max_out_bytes
)
353 unsigned int total_bufs
, in_total
, out_total
;
355 idx
= vq
->last_avail_idx
;
357 total_bufs
= in_total
= out_total
= 0;
358 while (virtqueue_num_heads(vq
, idx
)) {
359 unsigned int max
, num_bufs
, indirect
= 0;
364 num_bufs
= total_bufs
;
365 i
= virtqueue_get_head(vq
, idx
++);
366 desc_pa
= vq
->vring
.desc
;
368 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_INDIRECT
) {
369 if (vring_desc_len(desc_pa
, i
) % sizeof(VRingDesc
)) {
370 error_report("Invalid size for indirect buffer table");
374 /* If we've got too many, that implies a descriptor loop. */
375 if (num_bufs
>= max
) {
376 error_report("Looped descriptor");
380 /* loop over the indirect descriptor table */
382 max
= vring_desc_len(desc_pa
, i
) / sizeof(VRingDesc
);
383 desc_pa
= vring_desc_addr(desc_pa
, i
);
388 /* If we've got too many, that implies a descriptor loop. */
389 if (++num_bufs
> max
) {
390 error_report("Looped descriptor");
394 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_WRITE
) {
395 in_total
+= vring_desc_len(desc_pa
, i
);
397 out_total
+= vring_desc_len(desc_pa
, i
);
399 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
402 } while ((i
= virtqueue_next_desc(desc_pa
, i
, max
)) != max
);
405 total_bufs
= num_bufs
;
411 *in_bytes
= in_total
;
414 *out_bytes
= out_total
;
418 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
419 unsigned int out_bytes
)
421 unsigned int in_total
, out_total
;
423 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
424 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
427 void virtqueue_map_sg(struct iovec
*sg
, hwaddr
*addr
,
428 size_t num_sg
, int is_write
)
433 if (num_sg
> VIRTQUEUE_MAX_SIZE
) {
434 error_report("virtio: map attempt out of bounds: %zd > %d",
435 num_sg
, VIRTQUEUE_MAX_SIZE
);
439 for (i
= 0; i
< num_sg
; i
++) {
441 sg
[i
].iov_base
= cpu_physical_memory_map(addr
[i
], &len
, is_write
);
442 if (sg
[i
].iov_base
== NULL
|| len
!= sg
[i
].iov_len
) {
443 error_report("virtio: error trying to map MMIO memory");
449 int virtqueue_pop(VirtQueue
*vq
, VirtQueueElement
*elem
)
451 unsigned int i
, head
, max
;
452 hwaddr desc_pa
= vq
->vring
.desc
;
454 if (!virtqueue_num_heads(vq
, vq
->last_avail_idx
))
457 /* When we start there are none of either input nor output. */
458 elem
->out_num
= elem
->in_num
= 0;
462 i
= head
= virtqueue_get_head(vq
, vq
->last_avail_idx
++);
463 if (vq
->vdev
->guest_features
& (1 << VIRTIO_RING_F_EVENT_IDX
)) {
464 vring_avail_event(vq
, vring_avail_idx(vq
));
467 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_INDIRECT
) {
468 if (vring_desc_len(desc_pa
, i
) % sizeof(VRingDesc
)) {
469 error_report("Invalid size for indirect buffer table");
473 /* loop over the indirect descriptor table */
474 max
= vring_desc_len(desc_pa
, i
) / sizeof(VRingDesc
);
475 desc_pa
= vring_desc_addr(desc_pa
, i
);
479 /* Collect all the descriptors */
483 if (vring_desc_flags(desc_pa
, i
) & VRING_DESC_F_WRITE
) {
484 if (elem
->in_num
>= ARRAY_SIZE(elem
->in_sg
)) {
485 error_report("Too many write descriptors in indirect table");
488 elem
->in_addr
[elem
->in_num
] = vring_desc_addr(desc_pa
, i
);
489 sg
= &elem
->in_sg
[elem
->in_num
++];
491 if (elem
->out_num
>= ARRAY_SIZE(elem
->out_sg
)) {
492 error_report("Too many read descriptors in indirect table");
495 elem
->out_addr
[elem
->out_num
] = vring_desc_addr(desc_pa
, i
);
496 sg
= &elem
->out_sg
[elem
->out_num
++];
499 sg
->iov_len
= vring_desc_len(desc_pa
, i
);
501 /* If we've got too many, that implies a descriptor loop. */
502 if ((elem
->in_num
+ elem
->out_num
) > max
) {
503 error_report("Looped descriptor");
506 } while ((i
= virtqueue_next_desc(desc_pa
, i
, max
)) != max
);
508 /* Now map what we have collected */
509 virtqueue_map_sg(elem
->in_sg
, elem
->in_addr
, elem
->in_num
, 1);
510 virtqueue_map_sg(elem
->out_sg
, elem
->out_addr
, elem
->out_num
, 0);
516 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
517 return elem
->in_num
+ elem
->out_num
;
521 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
523 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
524 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
527 k
->notify(qbus
->parent
, vector
);
531 void virtio_update_irq(VirtIODevice
*vdev
)
533 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
536 void virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
538 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
539 trace_virtio_set_status(vdev
, val
);
542 k
->set_status(vdev
, val
);
547 void virtio_reset(void *opaque
)
549 VirtIODevice
*vdev
= opaque
;
550 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
553 virtio_set_status(vdev
, 0);
559 vdev
->guest_features
= 0;
563 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
564 virtio_notify_vector(vdev
, vdev
->config_vector
);
566 for(i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
567 vdev
->vq
[i
].vring
.desc
= 0;
568 vdev
->vq
[i
].vring
.avail
= 0;
569 vdev
->vq
[i
].vring
.used
= 0;
570 vdev
->vq
[i
].last_avail_idx
= 0;
572 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
573 vdev
->vq
[i
].signalled_used
= 0;
574 vdev
->vq
[i
].signalled_used_valid
= false;
575 vdev
->vq
[i
].notification
= true;
579 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
581 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
584 if (addr
+ sizeof(val
) > vdev
->config_len
) {
588 k
->get_config(vdev
, vdev
->config
);
590 val
= ldub_p(vdev
->config
+ addr
);
594 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
596 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
599 if (addr
+ sizeof(val
) > vdev
->config_len
) {
603 k
->get_config(vdev
, vdev
->config
);
605 val
= lduw_p(vdev
->config
+ addr
);
609 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
611 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
614 if (addr
+ sizeof(val
) > vdev
->config_len
) {
618 k
->get_config(vdev
, vdev
->config
);
620 val
= ldl_p(vdev
->config
+ addr
);
624 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
626 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
629 if (addr
+ sizeof(val
) > vdev
->config_len
) {
633 stb_p(vdev
->config
+ addr
, val
);
636 k
->set_config(vdev
, vdev
->config
);
640 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
642 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
645 if (addr
+ sizeof(val
) > vdev
->config_len
) {
649 stw_p(vdev
->config
+ addr
, val
);
652 k
->set_config(vdev
, vdev
->config
);
656 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
658 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
661 if (addr
+ sizeof(val
) > vdev
->config_len
) {
665 stl_p(vdev
->config
+ addr
, val
);
668 k
->set_config(vdev
, vdev
->config
);
672 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
674 vdev
->vq
[n
].pa
= addr
;
675 virtqueue_init(&vdev
->vq
[n
]);
678 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
680 return vdev
->vq
[n
].pa
;
683 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
685 /* Don't allow guest to flip queue between existent and
686 * nonexistent states, or to set it to an invalid size.
688 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
689 num
> VIRTQUEUE_MAX_SIZE
||
693 vdev
->vq
[n
].vring
.num
= num
;
694 virtqueue_init(&vdev
->vq
[n
]);
697 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
699 return vdev
->vq
[n
].vring
.num
;
702 int virtio_queue_get_id(VirtQueue
*vq
)
704 VirtIODevice
*vdev
= vq
->vdev
;
705 assert(vq
>= &vdev
->vq
[0] && vq
< &vdev
->vq
[VIRTIO_PCI_QUEUE_MAX
]);
706 return vq
- &vdev
->vq
[0];
709 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
711 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
712 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
714 /* Check that the transport told us it was going to do this
715 * (so a buggy transport will immediately assert rather than
716 * silently failing to migrate this state)
718 assert(k
->has_variable_vring_alignment
);
720 vdev
->vq
[n
].vring
.align
= align
;
721 virtqueue_init(&vdev
->vq
[n
]);
724 void virtio_queue_notify_vq(VirtQueue
*vq
)
726 if (vq
->vring
.desc
) {
727 VirtIODevice
*vdev
= vq
->vdev
;
728 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
729 vq
->handle_output(vdev
, vq
);
733 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
735 virtio_queue_notify_vq(&vdev
->vq
[n
]);
738 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
740 return n
< VIRTIO_PCI_QUEUE_MAX
? vdev
->vq
[n
].vector
:
744 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
746 if (n
< VIRTIO_PCI_QUEUE_MAX
)
747 vdev
->vq
[n
].vector
= vector
;
750 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
751 void (*handle_output
)(VirtIODevice
*, VirtQueue
*))
755 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
756 if (vdev
->vq
[i
].vring
.num
== 0)
760 if (i
== VIRTIO_PCI_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
763 vdev
->vq
[i
].vring
.num
= queue_size
;
764 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
765 vdev
->vq
[i
].handle_output
= handle_output
;
770 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
772 if (n
< 0 || n
>= VIRTIO_PCI_QUEUE_MAX
) {
776 vdev
->vq
[n
].vring
.num
= 0;
779 void virtio_irq(VirtQueue
*vq
)
781 trace_virtio_irq(vq
);
782 vq
->vdev
->isr
|= 0x01;
783 virtio_notify_vector(vq
->vdev
, vq
->vector
);
786 /* Assuming a given event_idx value from the other size, if
787 * we have just incremented index from old to new_idx,
788 * should we trigger an event? */
789 static inline int vring_need_event(uint16_t event
, uint16_t new, uint16_t old
)
791 /* Note: Xen has similar logic for notification hold-off
792 * in include/xen/interface/io/ring.h with req_event and req_prod
793 * corresponding to event_idx + 1 and new respectively.
794 * Note also that req_event and req_prod in Xen start at 1,
795 * event indexes in virtio start at 0. */
796 return (uint16_t)(new - event
- 1) < (uint16_t)(new - old
);
799 static bool vring_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
803 /* We need to expose used array entries before checking used event. */
805 /* Always notify when queue is empty (when feature acknowledge) */
806 if (((vdev
->guest_features
& (1 << VIRTIO_F_NOTIFY_ON_EMPTY
)) &&
807 !vq
->inuse
&& vring_avail_idx(vq
) == vq
->last_avail_idx
)) {
811 if (!(vdev
->guest_features
& (1 << VIRTIO_RING_F_EVENT_IDX
))) {
812 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
815 v
= vq
->signalled_used_valid
;
816 vq
->signalled_used_valid
= true;
817 old
= vq
->signalled_used
;
818 new = vq
->signalled_used
= vring_used_idx(vq
);
819 return !v
|| vring_need_event(vring_used_event(vq
), new, old
);
822 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
824 if (!vring_notify(vdev
, vq
)) {
828 trace_virtio_notify(vdev
, vq
);
830 virtio_notify_vector(vdev
, vq
->vector
);
833 void virtio_notify_config(VirtIODevice
*vdev
)
835 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
839 virtio_notify_vector(vdev
, vdev
->config_vector
);
842 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
844 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
845 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
848 if (k
->save_config
) {
849 k
->save_config(qbus
->parent
, f
);
852 qemu_put_8s(f
, &vdev
->status
);
853 qemu_put_8s(f
, &vdev
->isr
);
854 qemu_put_be16s(f
, &vdev
->queue_sel
);
855 qemu_put_be32s(f
, &vdev
->guest_features
);
856 qemu_put_be32(f
, vdev
->config_len
);
857 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
859 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
860 if (vdev
->vq
[i
].vring
.num
== 0)
866 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
867 if (vdev
->vq
[i
].vring
.num
== 0)
870 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
871 if (k
->has_variable_vring_alignment
) {
872 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
874 qemu_put_be64(f
, vdev
->vq
[i
].pa
);
875 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
877 k
->save_queue(qbus
->parent
, i
, f
);
882 int virtio_set_features(VirtIODevice
*vdev
, uint32_t val
)
884 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
885 VirtioBusClass
*vbusk
= VIRTIO_BUS_GET_CLASS(qbus
);
886 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
887 uint32_t supported_features
= vbusk
->get_features(qbus
->parent
);
888 bool bad
= (val
& ~supported_features
) != 0;
890 val
&= supported_features
;
891 if (k
->set_features
) {
892 k
->set_features(vdev
, val
);
894 vdev
->guest_features
= val
;
898 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
)
904 uint32_t supported_features
;
905 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
906 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
908 if (k
->load_config
) {
909 ret
= k
->load_config(qbus
->parent
, f
);
914 qemu_get_8s(f
, &vdev
->status
);
915 qemu_get_8s(f
, &vdev
->isr
);
916 qemu_get_be16s(f
, &vdev
->queue_sel
);
917 if (vdev
->queue_sel
>= VIRTIO_PCI_QUEUE_MAX
) {
920 qemu_get_be32s(f
, &features
);
922 if (virtio_set_features(vdev
, features
) < 0) {
923 supported_features
= k
->get_features(qbus
->parent
);
924 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
925 features
, supported_features
);
928 config_len
= qemu_get_be32(f
);
931 * There are cases where the incoming config can be bigger or smaller
932 * than what we have; so load what we have space for, and skip
933 * any excess that's in the stream.
935 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
937 while (config_len
> vdev
->config_len
) {
942 num
= qemu_get_be32(f
);
944 if (num
> VIRTIO_PCI_QUEUE_MAX
) {
945 error_report("Invalid number of PCI queues: 0x%x", num
);
949 for (i
= 0; i
< num
; i
++) {
950 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
951 if (k
->has_variable_vring_alignment
) {
952 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
954 vdev
->vq
[i
].pa
= qemu_get_be64(f
);
955 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
956 vdev
->vq
[i
].signalled_used_valid
= false;
957 vdev
->vq
[i
].notification
= true;
959 if (vdev
->vq
[i
].pa
) {
961 virtqueue_init(&vdev
->vq
[i
]);
962 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
963 /* Check it isn't doing very strange things with descriptor numbers. */
964 if (nheads
> vdev
->vq
[i
].vring
.num
) {
965 error_report("VQ %d size 0x%x Guest index 0x%x "
966 "inconsistent with Host index 0x%x: delta 0x%x",
967 i
, vdev
->vq
[i
].vring
.num
,
968 vring_avail_idx(&vdev
->vq
[i
]),
969 vdev
->vq
[i
].last_avail_idx
, nheads
);
972 } else if (vdev
->vq
[i
].last_avail_idx
) {
973 error_report("VQ %d address 0x0 "
974 "inconsistent with Host index 0x%x",
975 i
, vdev
->vq
[i
].last_avail_idx
);
979 ret
= k
->load_queue(qbus
->parent
, i
, f
);
985 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
989 void virtio_cleanup(VirtIODevice
*vdev
)
991 qemu_del_vm_change_state_handler(vdev
->vmstate
);
992 g_free(vdev
->config
);
996 static void virtio_vmstate_change(void *opaque
, int running
, RunState state
)
998 VirtIODevice
*vdev
= opaque
;
999 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1000 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1001 bool backend_run
= running
&& (vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
);
1002 vdev
->vm_running
= running
;
1005 virtio_set_status(vdev
, vdev
->status
);
1008 if (k
->vmstate_change
) {
1009 k
->vmstate_change(qbus
->parent
, backend_run
);
1013 virtio_set_status(vdev
, vdev
->status
);
1017 void virtio_init(VirtIODevice
*vdev
, const char *name
,
1018 uint16_t device_id
, size_t config_size
)
1021 vdev
->device_id
= device_id
;
1024 vdev
->queue_sel
= 0;
1025 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
1026 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_PCI_QUEUE_MAX
);
1027 vdev
->vm_running
= runstate_is_running();
1028 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
1029 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
1030 vdev
->vq
[i
].vdev
= vdev
;
1031 vdev
->vq
[i
].queue_index
= i
;
1035 vdev
->config_len
= config_size
;
1036 if (vdev
->config_len
) {
1037 vdev
->config
= g_malloc0(config_size
);
1039 vdev
->config
= NULL
;
1041 vdev
->vmstate
= qemu_add_vm_change_state_handler(virtio_vmstate_change
,
1045 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
1047 return vdev
->vq
[n
].vring
.desc
;
1050 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
1052 return vdev
->vq
[n
].vring
.avail
;
1055 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
1057 return vdev
->vq
[n
].vring
.used
;
1060 hwaddr
virtio_queue_get_ring_addr(VirtIODevice
*vdev
, int n
)
1062 return vdev
->vq
[n
].vring
.desc
;
1065 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
1067 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
1070 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
1072 return offsetof(VRingAvail
, ring
) +
1073 sizeof(uint64_t) * vdev
->vq
[n
].vring
.num
;
1076 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
1078 return offsetof(VRingUsed
, ring
) +
1079 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
;
1082 hwaddr
virtio_queue_get_ring_size(VirtIODevice
*vdev
, int n
)
1084 return vdev
->vq
[n
].vring
.used
- vdev
->vq
[n
].vring
.desc
+
1085 virtio_queue_get_used_size(vdev
, n
);
1088 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
1090 return vdev
->vq
[n
].last_avail_idx
;
1093 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
)
1095 vdev
->vq
[n
].last_avail_idx
= idx
;
1098 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
1100 vdev
->vq
[n
].signalled_used_valid
= false;
1103 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
1105 return vdev
->vq
+ n
;
1108 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
1110 return vq
->queue_index
;
1113 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
1115 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
1116 if (event_notifier_test_and_clear(n
)) {
1121 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
1124 if (assign
&& !with_irqfd
) {
1125 event_notifier_set_handler(&vq
->guest_notifier
,
1126 virtio_queue_guest_notifier_read
);
1128 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
1131 /* Test and clear notifier before closing it,
1132 * in case poll callback didn't have time to run. */
1133 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
1137 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
1139 return &vq
->guest_notifier
;
1142 static void virtio_queue_host_notifier_read(EventNotifier
*n
)
1144 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
1145 if (event_notifier_test_and_clear(n
)) {
1146 virtio_queue_notify_vq(vq
);
1150 void virtio_queue_set_host_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
1153 if (assign
&& set_handler
) {
1154 event_notifier_set_handler(&vq
->host_notifier
,
1155 virtio_queue_host_notifier_read
);
1157 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
1160 /* Test and clear notifier before after disabling event,
1161 * in case poll callback didn't have time to run. */
1162 virtio_queue_host_notifier_read(&vq
->host_notifier
);
1166 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
1168 return &vq
->host_notifier
;
1171 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
1173 g_free(vdev
->bus_name
);
1174 vdev
->bus_name
= g_strdup(bus_name
);
1177 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
1179 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1180 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
1183 if (vdc
->realize
!= NULL
) {
1184 vdc
->realize(dev
, &err
);
1186 error_propagate(errp
, err
);
1190 virtio_bus_device_plugged(vdev
);
1193 static void virtio_device_unrealize(DeviceState
*dev
, Error
**errp
)
1195 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1196 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
1199 virtio_bus_device_unplugged(vdev
);
1201 if (vdc
->unrealize
!= NULL
) {
1202 vdc
->unrealize(dev
, &err
);
1204 error_propagate(errp
, err
);
1209 g_free(vdev
->bus_name
);
1210 vdev
->bus_name
= NULL
;
1213 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
1215 /* Set the default value here. */
1216 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1218 dc
->realize
= virtio_device_realize
;
1219 dc
->unrealize
= virtio_device_unrealize
;
1220 dc
->bus_type
= TYPE_VIRTIO_BUS
;
1223 static const TypeInfo virtio_device_info
= {
1224 .name
= TYPE_VIRTIO_DEVICE
,
1225 .parent
= TYPE_DEVICE
,
1226 .instance_size
= sizeof(VirtIODevice
),
1227 .class_init
= virtio_device_class_init
,
1229 .class_size
= sizeof(VirtioDeviceClass
),
1232 static void virtio_register_types(void)
1234 type_register_static(&virtio_device_info
);
1237 type_init(virtio_register_types
)