build: fix race with creating qapi-generated
[qemu.git] / hw / virtio.c
blobd9bf266492b0eeda1a92f5f0cd77e7586b473b2e
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <inttypes.h>
16 #include "trace.h"
17 #include "qemu-error.h"
18 #include "virtio.h"
20 /* The alignment to use between consumer and producer parts of vring.
21 * x86 pagesize again. */
22 #define VIRTIO_PCI_VRING_ALIGN 4096
24 /* QEMU doesn't strictly need write barriers since everything runs in
25 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
26 * KVM or if kqemu gets SMP support.
27 * In any case, we must prevent the compiler from reordering the code.
28 * TODO: we likely need some rmb()/mb() as well.
31 #define wmb() __asm__ __volatile__("": : :"memory")
33 typedef struct VRingDesc
35 uint64_t addr;
36 uint32_t len;
37 uint16_t flags;
38 uint16_t next;
39 } VRingDesc;
41 typedef struct VRingAvail
43 uint16_t flags;
44 uint16_t idx;
45 uint16_t ring[0];
46 } VRingAvail;
48 typedef struct VRingUsedElem
50 uint32_t id;
51 uint32_t len;
52 } VRingUsedElem;
54 typedef struct VRingUsed
56 uint16_t flags;
57 uint16_t idx;
58 VRingUsedElem ring[0];
59 } VRingUsed;
61 typedef struct VRing
63 unsigned int num;
64 target_phys_addr_t desc;
65 target_phys_addr_t avail;
66 target_phys_addr_t used;
67 } VRing;
69 struct VirtQueue
71 VRing vring;
72 target_phys_addr_t pa;
73 uint16_t last_avail_idx;
74 /* Last used index value we have signalled on */
75 uint16_t signalled_used;
77 /* Last used index value we have signalled on */
78 bool signalled_used_valid;
80 /* Notification enabled? */
81 bool notification;
83 int inuse;
85 uint16_t vector;
86 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
87 VirtIODevice *vdev;
88 EventNotifier guest_notifier;
89 EventNotifier host_notifier;
92 /* virt queue functions */
93 static void virtqueue_init(VirtQueue *vq)
95 target_phys_addr_t pa = vq->pa;
97 vq->vring.desc = pa;
98 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
99 vq->vring.used = vring_align(vq->vring.avail +
100 offsetof(VRingAvail, ring[vq->vring.num]),
101 VIRTIO_PCI_VRING_ALIGN);
104 static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
106 target_phys_addr_t pa;
107 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
108 return ldq_phys(pa);
111 static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
113 target_phys_addr_t pa;
114 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
115 return ldl_phys(pa);
118 static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
120 target_phys_addr_t pa;
121 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
122 return lduw_phys(pa);
125 static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
127 target_phys_addr_t pa;
128 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
129 return lduw_phys(pa);
132 static inline uint16_t vring_avail_flags(VirtQueue *vq)
134 target_phys_addr_t pa;
135 pa = vq->vring.avail + offsetof(VRingAvail, flags);
136 return lduw_phys(pa);
139 static inline uint16_t vring_avail_idx(VirtQueue *vq)
141 target_phys_addr_t pa;
142 pa = vq->vring.avail + offsetof(VRingAvail, idx);
143 return lduw_phys(pa);
146 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
148 target_phys_addr_t pa;
149 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
150 return lduw_phys(pa);
153 static inline uint16_t vring_used_event(VirtQueue *vq)
155 return vring_avail_ring(vq, vq->vring.num);
158 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
160 target_phys_addr_t pa;
161 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
162 stl_phys(pa, val);
165 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
167 target_phys_addr_t pa;
168 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
169 stl_phys(pa, val);
172 static uint16_t vring_used_idx(VirtQueue *vq)
174 target_phys_addr_t pa;
175 pa = vq->vring.used + offsetof(VRingUsed, idx);
176 return lduw_phys(pa);
179 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
181 target_phys_addr_t pa;
182 pa = vq->vring.used + offsetof(VRingUsed, idx);
183 stw_phys(pa, val);
186 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
188 target_phys_addr_t pa;
189 pa = vq->vring.used + offsetof(VRingUsed, flags);
190 stw_phys(pa, lduw_phys(pa) | mask);
193 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
195 target_phys_addr_t pa;
196 pa = vq->vring.used + offsetof(VRingUsed, flags);
197 stw_phys(pa, lduw_phys(pa) & ~mask);
200 static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
202 target_phys_addr_t pa;
203 if (!vq->notification) {
204 return;
206 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
207 stw_phys(pa, val);
210 void virtio_queue_set_notification(VirtQueue *vq, int enable)
212 vq->notification = enable;
213 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
214 vring_avail_event(vq, vring_avail_idx(vq));
215 } else if (enable) {
216 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
217 } else {
218 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
222 int virtio_queue_ready(VirtQueue *vq)
224 return vq->vring.avail != 0;
227 int virtio_queue_empty(VirtQueue *vq)
229 return vring_avail_idx(vq) == vq->last_avail_idx;
232 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
233 unsigned int len, unsigned int idx)
235 unsigned int offset;
236 int i;
238 trace_virtqueue_fill(vq, elem, len, idx);
240 offset = 0;
241 for (i = 0; i < elem->in_num; i++) {
242 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
244 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
245 elem->in_sg[i].iov_len,
246 1, size);
248 offset += elem->in_sg[i].iov_len;
251 for (i = 0; i < elem->out_num; i++)
252 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
253 elem->out_sg[i].iov_len,
254 0, elem->out_sg[i].iov_len);
256 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
258 /* Get a pointer to the next entry in the used ring. */
259 vring_used_ring_id(vq, idx, elem->index);
260 vring_used_ring_len(vq, idx, len);
263 void virtqueue_flush(VirtQueue *vq, unsigned int count)
265 uint16_t old, new;
266 /* Make sure buffer is written before we update index. */
267 wmb();
268 trace_virtqueue_flush(vq, count);
269 old = vring_used_idx(vq);
270 new = old + count;
271 vring_used_idx_set(vq, new);
272 vq->inuse -= count;
273 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
274 vq->signalled_used_valid = false;
277 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
278 unsigned int len)
280 virtqueue_fill(vq, elem, len, 0);
281 virtqueue_flush(vq, 1);
284 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
286 uint16_t num_heads = vring_avail_idx(vq) - idx;
288 /* Check it isn't doing very strange things with descriptor numbers. */
289 if (num_heads > vq->vring.num) {
290 error_report("Guest moved used index from %u to %u",
291 idx, vring_avail_idx(vq));
292 exit(1);
295 return num_heads;
298 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
300 unsigned int head;
302 /* Grab the next descriptor number they're advertising, and increment
303 * the index we've seen. */
304 head = vring_avail_ring(vq, idx % vq->vring.num);
306 /* If their number is silly, that's a fatal mistake. */
307 if (head >= vq->vring.num) {
308 error_report("Guest says index %u is available", head);
309 exit(1);
312 return head;
315 static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
316 unsigned int i, unsigned int max)
318 unsigned int next;
320 /* If this descriptor says it doesn't chain, we're done. */
321 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
322 return max;
324 /* Check they're not leading us off end of descriptors. */
325 next = vring_desc_next(desc_pa, i);
326 /* Make sure compiler knows to grab that: we don't want it changing! */
327 wmb();
329 if (next >= max) {
330 error_report("Desc next is %u", next);
331 exit(1);
334 return next;
337 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
339 unsigned int idx;
340 int total_bufs, in_total, out_total;
342 idx = vq->last_avail_idx;
344 total_bufs = in_total = out_total = 0;
345 while (virtqueue_num_heads(vq, idx)) {
346 unsigned int max, num_bufs, indirect = 0;
347 target_phys_addr_t desc_pa;
348 int i;
350 max = vq->vring.num;
351 num_bufs = total_bufs;
352 i = virtqueue_get_head(vq, idx++);
353 desc_pa = vq->vring.desc;
355 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
356 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
357 error_report("Invalid size for indirect buffer table");
358 exit(1);
361 /* If we've got too many, that implies a descriptor loop. */
362 if (num_bufs >= max) {
363 error_report("Looped descriptor");
364 exit(1);
367 /* loop over the indirect descriptor table */
368 indirect = 1;
369 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
370 num_bufs = i = 0;
371 desc_pa = vring_desc_addr(desc_pa, i);
374 do {
375 /* If we've got too many, that implies a descriptor loop. */
376 if (++num_bufs > max) {
377 error_report("Looped descriptor");
378 exit(1);
381 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
382 if (in_bytes > 0 &&
383 (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
384 return 1;
385 } else {
386 if (out_bytes > 0 &&
387 (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
388 return 1;
390 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
392 if (!indirect)
393 total_bufs = num_bufs;
394 else
395 total_bufs++;
398 return 0;
401 void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
402 size_t num_sg, int is_write)
404 unsigned int i;
405 target_phys_addr_t len;
407 for (i = 0; i < num_sg; i++) {
408 len = sg[i].iov_len;
409 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
410 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
411 error_report("virtio: trying to map MMIO memory");
412 exit(1);
417 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
419 unsigned int i, head, max;
420 target_phys_addr_t desc_pa = vq->vring.desc;
422 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
423 return 0;
425 /* When we start there are none of either input nor output. */
426 elem->out_num = elem->in_num = 0;
428 max = vq->vring.num;
430 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
431 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
432 vring_avail_event(vq, vring_avail_idx(vq));
435 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
436 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
437 error_report("Invalid size for indirect buffer table");
438 exit(1);
441 /* loop over the indirect descriptor table */
442 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
443 desc_pa = vring_desc_addr(desc_pa, i);
444 i = 0;
447 /* Collect all the descriptors */
448 do {
449 struct iovec *sg;
451 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
452 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
453 error_report("Too many write descriptors in indirect table");
454 exit(1);
456 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
457 sg = &elem->in_sg[elem->in_num++];
458 } else {
459 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
460 error_report("Too many read descriptors in indirect table");
461 exit(1);
463 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
464 sg = &elem->out_sg[elem->out_num++];
467 sg->iov_len = vring_desc_len(desc_pa, i);
469 /* If we've got too many, that implies a descriptor loop. */
470 if ((elem->in_num + elem->out_num) > max) {
471 error_report("Looped descriptor");
472 exit(1);
474 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
476 /* Now map what we have collected */
477 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
478 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
480 elem->index = head;
482 vq->inuse++;
484 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
485 return elem->in_num + elem->out_num;
488 /* virtio device */
489 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
491 if (vdev->binding->notify) {
492 vdev->binding->notify(vdev->binding_opaque, vector);
496 void virtio_update_irq(VirtIODevice *vdev)
498 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
501 void virtio_set_status(VirtIODevice *vdev, uint8_t val)
503 trace_virtio_set_status(vdev, val);
505 if (vdev->set_status) {
506 vdev->set_status(vdev, val);
508 vdev->status = val;
511 void virtio_reset(void *opaque)
513 VirtIODevice *vdev = opaque;
514 int i;
516 virtio_set_status(vdev, 0);
518 if (vdev->reset)
519 vdev->reset(vdev);
521 vdev->guest_features = 0;
522 vdev->queue_sel = 0;
523 vdev->status = 0;
524 vdev->isr = 0;
525 vdev->config_vector = VIRTIO_NO_VECTOR;
526 virtio_notify_vector(vdev, vdev->config_vector);
528 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
529 vdev->vq[i].vring.desc = 0;
530 vdev->vq[i].vring.avail = 0;
531 vdev->vq[i].vring.used = 0;
532 vdev->vq[i].last_avail_idx = 0;
533 vdev->vq[i].pa = 0;
534 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
535 vdev->vq[i].signalled_used = 0;
536 vdev->vq[i].signalled_used_valid = false;
537 vdev->vq[i].notification = true;
541 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
543 uint8_t val;
545 vdev->get_config(vdev, vdev->config);
547 if (addr > (vdev->config_len - sizeof(val)))
548 return (uint32_t)-1;
550 memcpy(&val, vdev->config + addr, sizeof(val));
551 return val;
554 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
556 uint16_t val;
558 vdev->get_config(vdev, vdev->config);
560 if (addr > (vdev->config_len - sizeof(val)))
561 return (uint32_t)-1;
563 memcpy(&val, vdev->config + addr, sizeof(val));
564 return val;
567 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
569 uint32_t val;
571 vdev->get_config(vdev, vdev->config);
573 if (addr > (vdev->config_len - sizeof(val)))
574 return (uint32_t)-1;
576 memcpy(&val, vdev->config + addr, sizeof(val));
577 return val;
580 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
582 uint8_t val = data;
584 if (addr > (vdev->config_len - sizeof(val)))
585 return;
587 memcpy(vdev->config + addr, &val, sizeof(val));
589 if (vdev->set_config)
590 vdev->set_config(vdev, vdev->config);
593 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
595 uint16_t val = data;
597 if (addr > (vdev->config_len - sizeof(val)))
598 return;
600 memcpy(vdev->config + addr, &val, sizeof(val));
602 if (vdev->set_config)
603 vdev->set_config(vdev, vdev->config);
606 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
608 uint32_t val = data;
610 if (addr > (vdev->config_len - sizeof(val)))
611 return;
613 memcpy(vdev->config + addr, &val, sizeof(val));
615 if (vdev->set_config)
616 vdev->set_config(vdev, vdev->config);
619 void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
621 vdev->vq[n].pa = addr;
622 virtqueue_init(&vdev->vq[n]);
625 target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
627 return vdev->vq[n].pa;
630 int virtio_queue_get_num(VirtIODevice *vdev, int n)
632 return vdev->vq[n].vring.num;
635 void virtio_queue_notify_vq(VirtQueue *vq)
637 if (vq->vring.desc) {
638 VirtIODevice *vdev = vq->vdev;
639 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
640 vq->handle_output(vdev, vq);
644 void virtio_queue_notify(VirtIODevice *vdev, int n)
646 virtio_queue_notify_vq(&vdev->vq[n]);
649 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
651 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
652 VIRTIO_NO_VECTOR;
655 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
657 if (n < VIRTIO_PCI_QUEUE_MAX)
658 vdev->vq[n].vector = vector;
661 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
662 void (*handle_output)(VirtIODevice *, VirtQueue *))
664 int i;
666 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
667 if (vdev->vq[i].vring.num == 0)
668 break;
671 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
672 abort();
674 vdev->vq[i].vring.num = queue_size;
675 vdev->vq[i].handle_output = handle_output;
677 return &vdev->vq[i];
680 void virtio_irq(VirtQueue *vq)
682 trace_virtio_irq(vq);
683 vq->vdev->isr |= 0x01;
684 virtio_notify_vector(vq->vdev, vq->vector);
687 /* Assuming a given event_idx value from the other size, if
688 * we have just incremented index from old to new_idx,
689 * should we trigger an event? */
690 static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
692 /* Note: Xen has similar logic for notification hold-off
693 * in include/xen/interface/io/ring.h with req_event and req_prod
694 * corresponding to event_idx + 1 and new respectively.
695 * Note also that req_event and req_prod in Xen start at 1,
696 * event indexes in virtio start at 0. */
697 return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
700 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
702 uint16_t old, new;
703 bool v;
704 /* Always notify when queue is empty (when feature acknowledge) */
705 if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
706 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
707 return true;
710 if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
711 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
714 v = vq->signalled_used_valid;
715 vq->signalled_used_valid = true;
716 old = vq->signalled_used;
717 new = vq->signalled_used = vring_used_idx(vq);
718 return !v || vring_need_event(vring_used_event(vq), new, old);
721 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
723 if (!vring_notify(vdev, vq)) {
724 return;
727 trace_virtio_notify(vdev, vq);
728 vdev->isr |= 0x01;
729 virtio_notify_vector(vdev, vq->vector);
732 void virtio_notify_config(VirtIODevice *vdev)
734 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
735 return;
737 vdev->isr |= 0x03;
738 virtio_notify_vector(vdev, vdev->config_vector);
741 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
743 int i;
745 if (vdev->binding->save_config)
746 vdev->binding->save_config(vdev->binding_opaque, f);
748 qemu_put_8s(f, &vdev->status);
749 qemu_put_8s(f, &vdev->isr);
750 qemu_put_be16s(f, &vdev->queue_sel);
751 qemu_put_be32s(f, &vdev->guest_features);
752 qemu_put_be32(f, vdev->config_len);
753 qemu_put_buffer(f, vdev->config, vdev->config_len);
755 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
756 if (vdev->vq[i].vring.num == 0)
757 break;
760 qemu_put_be32(f, i);
762 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
763 if (vdev->vq[i].vring.num == 0)
764 break;
766 qemu_put_be32(f, vdev->vq[i].vring.num);
767 qemu_put_be64(f, vdev->vq[i].pa);
768 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
769 if (vdev->binding->save_queue)
770 vdev->binding->save_queue(vdev->binding_opaque, i, f);
774 int virtio_load(VirtIODevice *vdev, QEMUFile *f)
776 int num, i, ret;
777 uint32_t features;
778 uint32_t supported_features =
779 vdev->binding->get_features(vdev->binding_opaque);
781 if (vdev->binding->load_config) {
782 ret = vdev->binding->load_config(vdev->binding_opaque, f);
783 if (ret)
784 return ret;
787 qemu_get_8s(f, &vdev->status);
788 qemu_get_8s(f, &vdev->isr);
789 qemu_get_be16s(f, &vdev->queue_sel);
790 qemu_get_be32s(f, &features);
791 if (features & ~supported_features) {
792 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
793 features, supported_features);
794 return -1;
796 if (vdev->set_features)
797 vdev->set_features(vdev, features);
798 vdev->guest_features = features;
799 vdev->config_len = qemu_get_be32(f);
800 qemu_get_buffer(f, vdev->config, vdev->config_len);
802 num = qemu_get_be32(f);
804 for (i = 0; i < num; i++) {
805 vdev->vq[i].vring.num = qemu_get_be32(f);
806 vdev->vq[i].pa = qemu_get_be64(f);
807 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
808 vdev->vq[i].signalled_used_valid = false;
809 vdev->vq[i].notification = true;
811 if (vdev->vq[i].pa) {
812 uint16_t nheads;
813 virtqueue_init(&vdev->vq[i]);
814 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
815 /* Check it isn't doing very strange things with descriptor numbers. */
816 if (nheads > vdev->vq[i].vring.num) {
817 error_report("VQ %d size 0x%x Guest index 0x%x "
818 "inconsistent with Host index 0x%x: delta 0x%x",
819 i, vdev->vq[i].vring.num,
820 vring_avail_idx(&vdev->vq[i]),
821 vdev->vq[i].last_avail_idx, nheads);
822 return -1;
824 } else if (vdev->vq[i].last_avail_idx) {
825 error_report("VQ %d address 0x0 "
826 "inconsistent with Host index 0x%x",
827 i, vdev->vq[i].last_avail_idx);
828 return -1;
830 if (vdev->binding->load_queue) {
831 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
832 if (ret)
833 return ret;
837 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
838 return 0;
841 void virtio_cleanup(VirtIODevice *vdev)
843 qemu_del_vm_change_state_handler(vdev->vmstate);
844 if (vdev->config)
845 g_free(vdev->config);
846 g_free(vdev->vq);
847 g_free(vdev);
850 static void virtio_vmstate_change(void *opaque, int running, RunState state)
852 VirtIODevice *vdev = opaque;
853 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
854 vdev->vm_running = running;
856 if (backend_run) {
857 virtio_set_status(vdev, vdev->status);
860 if (vdev->binding->vmstate_change) {
861 vdev->binding->vmstate_change(vdev->binding_opaque, backend_run);
864 if (!backend_run) {
865 virtio_set_status(vdev, vdev->status);
869 VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
870 size_t config_size, size_t struct_size)
872 VirtIODevice *vdev;
873 int i;
875 vdev = g_malloc0(struct_size);
877 vdev->device_id = device_id;
878 vdev->status = 0;
879 vdev->isr = 0;
880 vdev->queue_sel = 0;
881 vdev->config_vector = VIRTIO_NO_VECTOR;
882 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
883 vdev->vm_running = runstate_is_running();
884 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
885 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
886 vdev->vq[i].vdev = vdev;
889 vdev->name = name;
890 vdev->config_len = config_size;
891 if (vdev->config_len)
892 vdev->config = g_malloc0(config_size);
893 else
894 vdev->config = NULL;
896 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, vdev);
898 return vdev;
901 void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
902 void *opaque)
904 vdev->binding = binding;
905 vdev->binding_opaque = opaque;
908 target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
910 return vdev->vq[n].vring.desc;
913 target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
915 return vdev->vq[n].vring.avail;
918 target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
920 return vdev->vq[n].vring.used;
923 target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
925 return vdev->vq[n].vring.desc;
928 target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
930 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
933 target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
935 return offsetof(VRingAvail, ring) +
936 sizeof(uint64_t) * vdev->vq[n].vring.num;
939 target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
941 return offsetof(VRingUsed, ring) +
942 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
945 target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
947 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
948 virtio_queue_get_used_size(vdev, n);
951 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
953 return vdev->vq[n].last_avail_idx;
956 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
958 vdev->vq[n].last_avail_idx = idx;
961 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
963 return vdev->vq + n;
966 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
968 return &vq->guest_notifier;
970 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
972 return &vq->host_notifier;