pci_bus.h: tweak include guards
[qemu/ar7.git] / hw / virtio.c
blobf40a8c55713e637d5dbc5c8307ac4730908515cb
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <inttypes.h>
16 #include "trace.h"
17 #include "qemu-error.h"
18 #include "virtio.h"
19 #include "qemu-barrier.h"
21 /* The alignment to use between consumer and producer parts of vring.
22 * x86 pagesize again. */
23 #define VIRTIO_PCI_VRING_ALIGN 4096
25 typedef struct VRingDesc
27 uint64_t addr;
28 uint32_t len;
29 uint16_t flags;
30 uint16_t next;
31 } VRingDesc;
33 typedef struct VRingAvail
35 uint16_t flags;
36 uint16_t idx;
37 uint16_t ring[0];
38 } VRingAvail;
40 typedef struct VRingUsedElem
42 uint32_t id;
43 uint32_t len;
44 } VRingUsedElem;
46 typedef struct VRingUsed
48 uint16_t flags;
49 uint16_t idx;
50 VRingUsedElem ring[0];
51 } VRingUsed;
53 typedef struct VRing
55 unsigned int num;
56 hwaddr desc;
57 hwaddr avail;
58 hwaddr used;
59 } VRing;
61 struct VirtQueue
63 VRing vring;
64 hwaddr pa;
65 uint16_t last_avail_idx;
66 /* Last used index value we have signalled on */
67 uint16_t signalled_used;
69 /* Last used index value we have signalled on */
70 bool signalled_used_valid;
72 /* Notification enabled? */
73 bool notification;
75 int inuse;
77 uint16_t vector;
78 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
79 VirtIODevice *vdev;
80 EventNotifier guest_notifier;
81 EventNotifier host_notifier;
84 /* virt queue functions */
85 static void virtqueue_init(VirtQueue *vq)
87 hwaddr pa = vq->pa;
89 vq->vring.desc = pa;
90 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
91 vq->vring.used = vring_align(vq->vring.avail +
92 offsetof(VRingAvail, ring[vq->vring.num]),
93 VIRTIO_PCI_VRING_ALIGN);
96 static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
98 hwaddr pa;
99 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
100 return ldq_phys(pa);
103 static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
105 hwaddr pa;
106 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
107 return ldl_phys(pa);
110 static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
112 hwaddr pa;
113 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
114 return lduw_phys(pa);
117 static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
119 hwaddr pa;
120 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
121 return lduw_phys(pa);
124 static inline uint16_t vring_avail_flags(VirtQueue *vq)
126 hwaddr pa;
127 pa = vq->vring.avail + offsetof(VRingAvail, flags);
128 return lduw_phys(pa);
131 static inline uint16_t vring_avail_idx(VirtQueue *vq)
133 hwaddr pa;
134 pa = vq->vring.avail + offsetof(VRingAvail, idx);
135 return lduw_phys(pa);
138 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
140 hwaddr pa;
141 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
142 return lduw_phys(pa);
145 static inline uint16_t vring_used_event(VirtQueue *vq)
147 return vring_avail_ring(vq, vq->vring.num);
150 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
152 hwaddr pa;
153 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
154 stl_phys(pa, val);
157 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
159 hwaddr pa;
160 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
161 stl_phys(pa, val);
164 static uint16_t vring_used_idx(VirtQueue *vq)
166 hwaddr pa;
167 pa = vq->vring.used + offsetof(VRingUsed, idx);
168 return lduw_phys(pa);
171 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
173 hwaddr pa;
174 pa = vq->vring.used + offsetof(VRingUsed, idx);
175 stw_phys(pa, val);
178 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
180 hwaddr pa;
181 pa = vq->vring.used + offsetof(VRingUsed, flags);
182 stw_phys(pa, lduw_phys(pa) | mask);
185 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
187 hwaddr pa;
188 pa = vq->vring.used + offsetof(VRingUsed, flags);
189 stw_phys(pa, lduw_phys(pa) & ~mask);
192 static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
194 hwaddr pa;
195 if (!vq->notification) {
196 return;
198 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
199 stw_phys(pa, val);
202 void virtio_queue_set_notification(VirtQueue *vq, int enable)
204 vq->notification = enable;
205 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
206 vring_avail_event(vq, vring_avail_idx(vq));
207 } else if (enable) {
208 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
209 } else {
210 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
212 if (enable) {
213 /* Expose avail event/used flags before caller checks the avail idx. */
214 smp_mb();
218 int virtio_queue_ready(VirtQueue *vq)
220 return vq->vring.avail != 0;
223 int virtio_queue_empty(VirtQueue *vq)
225 return vring_avail_idx(vq) == vq->last_avail_idx;
228 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
229 unsigned int len, unsigned int idx)
231 unsigned int offset;
232 int i;
234 trace_virtqueue_fill(vq, elem, len, idx);
236 offset = 0;
237 for (i = 0; i < elem->in_num; i++) {
238 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
240 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
241 elem->in_sg[i].iov_len,
242 1, size);
244 offset += size;
247 for (i = 0; i < elem->out_num; i++)
248 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
249 elem->out_sg[i].iov_len,
250 0, elem->out_sg[i].iov_len);
252 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
254 /* Get a pointer to the next entry in the used ring. */
255 vring_used_ring_id(vq, idx, elem->index);
256 vring_used_ring_len(vq, idx, len);
259 void virtqueue_flush(VirtQueue *vq, unsigned int count)
261 uint16_t old, new;
262 /* Make sure buffer is written before we update index. */
263 smp_wmb();
264 trace_virtqueue_flush(vq, count);
265 old = vring_used_idx(vq);
266 new = old + count;
267 vring_used_idx_set(vq, new);
268 vq->inuse -= count;
269 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
270 vq->signalled_used_valid = false;
273 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
274 unsigned int len)
276 virtqueue_fill(vq, elem, len, 0);
277 virtqueue_flush(vq, 1);
280 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
282 uint16_t num_heads = vring_avail_idx(vq) - idx;
284 /* Check it isn't doing very strange things with descriptor numbers. */
285 if (num_heads > vq->vring.num) {
286 error_report("Guest moved used index from %u to %u",
287 idx, vring_avail_idx(vq));
288 exit(1);
290 /* On success, callers read a descriptor at vq->last_avail_idx.
291 * Make sure descriptor read does not bypass avail index read. */
292 if (num_heads) {
293 smp_rmb();
296 return num_heads;
299 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
301 unsigned int head;
303 /* Grab the next descriptor number they're advertising, and increment
304 * the index we've seen. */
305 head = vring_avail_ring(vq, idx % vq->vring.num);
307 /* If their number is silly, that's a fatal mistake. */
308 if (head >= vq->vring.num) {
309 error_report("Guest says index %u is available", head);
310 exit(1);
313 return head;
316 static unsigned virtqueue_next_desc(hwaddr desc_pa,
317 unsigned int i, unsigned int max)
319 unsigned int next;
321 /* If this descriptor says it doesn't chain, we're done. */
322 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
323 return max;
325 /* Check they're not leading us off end of descriptors. */
326 next = vring_desc_next(desc_pa, i);
327 /* Make sure compiler knows to grab that: we don't want it changing! */
328 smp_wmb();
330 if (next >= max) {
331 error_report("Desc next is %u", next);
332 exit(1);
335 return next;
338 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
339 unsigned int *out_bytes,
340 unsigned max_in_bytes, unsigned max_out_bytes)
342 unsigned int idx;
343 unsigned int total_bufs, in_total, out_total;
345 idx = vq->last_avail_idx;
347 total_bufs = in_total = out_total = 0;
348 while (virtqueue_num_heads(vq, idx)) {
349 unsigned int max, num_bufs, indirect = 0;
350 hwaddr desc_pa;
351 int i;
353 max = vq->vring.num;
354 num_bufs = total_bufs;
355 i = virtqueue_get_head(vq, idx++);
356 desc_pa = vq->vring.desc;
358 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
359 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
360 error_report("Invalid size for indirect buffer table");
361 exit(1);
364 /* If we've got too many, that implies a descriptor loop. */
365 if (num_bufs >= max) {
366 error_report("Looped descriptor");
367 exit(1);
370 /* loop over the indirect descriptor table */
371 indirect = 1;
372 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
373 num_bufs = i = 0;
374 desc_pa = vring_desc_addr(desc_pa, i);
377 do {
378 /* If we've got too many, that implies a descriptor loop. */
379 if (++num_bufs > max) {
380 error_report("Looped descriptor");
381 exit(1);
384 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
385 in_total += vring_desc_len(desc_pa, i);
386 } else {
387 out_total += vring_desc_len(desc_pa, i);
389 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
390 goto done;
392 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
394 if (!indirect)
395 total_bufs = num_bufs;
396 else
397 total_bufs++;
399 done:
400 if (in_bytes) {
401 *in_bytes = in_total;
403 if (out_bytes) {
404 *out_bytes = out_total;
408 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
409 unsigned int out_bytes)
411 unsigned int in_total, out_total;
413 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
414 return in_bytes <= in_total && out_bytes <= out_total;
417 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
418 size_t num_sg, int is_write)
420 unsigned int i;
421 hwaddr len;
423 for (i = 0; i < num_sg; i++) {
424 len = sg[i].iov_len;
425 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
426 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
427 error_report("virtio: trying to map MMIO memory");
428 exit(1);
433 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
435 unsigned int i, head, max;
436 hwaddr desc_pa = vq->vring.desc;
438 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
439 return 0;
441 /* When we start there are none of either input nor output. */
442 elem->out_num = elem->in_num = 0;
444 max = vq->vring.num;
446 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
447 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
448 vring_avail_event(vq, vring_avail_idx(vq));
451 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
452 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
453 error_report("Invalid size for indirect buffer table");
454 exit(1);
457 /* loop over the indirect descriptor table */
458 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
459 desc_pa = vring_desc_addr(desc_pa, i);
460 i = 0;
463 /* Collect all the descriptors */
464 do {
465 struct iovec *sg;
467 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
468 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
469 error_report("Too many write descriptors in indirect table");
470 exit(1);
472 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
473 sg = &elem->in_sg[elem->in_num++];
474 } else {
475 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
476 error_report("Too many read descriptors in indirect table");
477 exit(1);
479 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
480 sg = &elem->out_sg[elem->out_num++];
483 sg->iov_len = vring_desc_len(desc_pa, i);
485 /* If we've got too many, that implies a descriptor loop. */
486 if ((elem->in_num + elem->out_num) > max) {
487 error_report("Looped descriptor");
488 exit(1);
490 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
492 /* Now map what we have collected */
493 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
494 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
496 elem->index = head;
498 vq->inuse++;
500 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
501 return elem->in_num + elem->out_num;
504 /* virtio device */
505 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
507 if (vdev->binding->notify) {
508 vdev->binding->notify(vdev->binding_opaque, vector);
512 void virtio_update_irq(VirtIODevice *vdev)
514 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
517 void virtio_set_status(VirtIODevice *vdev, uint8_t val)
519 trace_virtio_set_status(vdev, val);
521 if (vdev->set_status) {
522 vdev->set_status(vdev, val);
524 vdev->status = val;
527 void virtio_reset(void *opaque)
529 VirtIODevice *vdev = opaque;
530 int i;
532 virtio_set_status(vdev, 0);
534 if (vdev->reset)
535 vdev->reset(vdev);
537 vdev->guest_features = 0;
538 vdev->queue_sel = 0;
539 vdev->status = 0;
540 vdev->isr = 0;
541 vdev->config_vector = VIRTIO_NO_VECTOR;
542 virtio_notify_vector(vdev, vdev->config_vector);
544 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
545 vdev->vq[i].vring.desc = 0;
546 vdev->vq[i].vring.avail = 0;
547 vdev->vq[i].vring.used = 0;
548 vdev->vq[i].last_avail_idx = 0;
549 vdev->vq[i].pa = 0;
550 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
551 vdev->vq[i].signalled_used = 0;
552 vdev->vq[i].signalled_used_valid = false;
553 vdev->vq[i].notification = true;
557 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
559 uint8_t val;
561 vdev->get_config(vdev, vdev->config);
563 if (addr > (vdev->config_len - sizeof(val)))
564 return (uint32_t)-1;
566 val = ldub_p(vdev->config + addr);
567 return val;
570 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
572 uint16_t val;
574 vdev->get_config(vdev, vdev->config);
576 if (addr > (vdev->config_len - sizeof(val)))
577 return (uint32_t)-1;
579 val = lduw_p(vdev->config + addr);
580 return val;
583 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
585 uint32_t val;
587 vdev->get_config(vdev, vdev->config);
589 if (addr > (vdev->config_len - sizeof(val)))
590 return (uint32_t)-1;
592 val = ldl_p(vdev->config + addr);
593 return val;
596 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
598 uint8_t val = data;
600 if (addr > (vdev->config_len - sizeof(val)))
601 return;
603 stb_p(vdev->config + addr, val);
605 if (vdev->set_config)
606 vdev->set_config(vdev, vdev->config);
609 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
611 uint16_t val = data;
613 if (addr > (vdev->config_len - sizeof(val)))
614 return;
616 stw_p(vdev->config + addr, val);
618 if (vdev->set_config)
619 vdev->set_config(vdev, vdev->config);
622 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
624 uint32_t val = data;
626 if (addr > (vdev->config_len - sizeof(val)))
627 return;
629 stl_p(vdev->config + addr, val);
631 if (vdev->set_config)
632 vdev->set_config(vdev, vdev->config);
635 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
637 vdev->vq[n].pa = addr;
638 virtqueue_init(&vdev->vq[n]);
641 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
643 return vdev->vq[n].pa;
646 int virtio_queue_get_num(VirtIODevice *vdev, int n)
648 return vdev->vq[n].vring.num;
651 int virtio_queue_get_id(VirtQueue *vq)
653 VirtIODevice *vdev = vq->vdev;
654 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
655 return vq - &vdev->vq[0];
658 void virtio_queue_notify_vq(VirtQueue *vq)
660 if (vq->vring.desc) {
661 VirtIODevice *vdev = vq->vdev;
662 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
663 vq->handle_output(vdev, vq);
667 void virtio_queue_notify(VirtIODevice *vdev, int n)
669 virtio_queue_notify_vq(&vdev->vq[n]);
672 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
674 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
675 VIRTIO_NO_VECTOR;
678 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
680 if (n < VIRTIO_PCI_QUEUE_MAX)
681 vdev->vq[n].vector = vector;
684 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
685 void (*handle_output)(VirtIODevice *, VirtQueue *))
687 int i;
689 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
690 if (vdev->vq[i].vring.num == 0)
691 break;
694 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
695 abort();
697 vdev->vq[i].vring.num = queue_size;
698 vdev->vq[i].handle_output = handle_output;
700 return &vdev->vq[i];
703 void virtio_irq(VirtQueue *vq)
705 trace_virtio_irq(vq);
706 vq->vdev->isr |= 0x01;
707 virtio_notify_vector(vq->vdev, vq->vector);
710 /* Assuming a given event_idx value from the other size, if
711 * we have just incremented index from old to new_idx,
712 * should we trigger an event? */
713 static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
715 /* Note: Xen has similar logic for notification hold-off
716 * in include/xen/interface/io/ring.h with req_event and req_prod
717 * corresponding to event_idx + 1 and new respectively.
718 * Note also that req_event and req_prod in Xen start at 1,
719 * event indexes in virtio start at 0. */
720 return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
723 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
725 uint16_t old, new;
726 bool v;
727 /* We need to expose used array entries before checking used event. */
728 smp_mb();
729 /* Always notify when queue is empty (when feature acknowledge) */
730 if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
731 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
732 return true;
735 if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
736 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
739 v = vq->signalled_used_valid;
740 vq->signalled_used_valid = true;
741 old = vq->signalled_used;
742 new = vq->signalled_used = vring_used_idx(vq);
743 return !v || vring_need_event(vring_used_event(vq), new, old);
746 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
748 if (!vring_notify(vdev, vq)) {
749 return;
752 trace_virtio_notify(vdev, vq);
753 vdev->isr |= 0x01;
754 virtio_notify_vector(vdev, vq->vector);
757 void virtio_notify_config(VirtIODevice *vdev)
759 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
760 return;
762 vdev->isr |= 0x03;
763 virtio_notify_vector(vdev, vdev->config_vector);
766 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
768 int i;
770 if (vdev->binding->save_config)
771 vdev->binding->save_config(vdev->binding_opaque, f);
773 qemu_put_8s(f, &vdev->status);
774 qemu_put_8s(f, &vdev->isr);
775 qemu_put_be16s(f, &vdev->queue_sel);
776 qemu_put_be32s(f, &vdev->guest_features);
777 qemu_put_be32(f, vdev->config_len);
778 qemu_put_buffer(f, vdev->config, vdev->config_len);
780 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
781 if (vdev->vq[i].vring.num == 0)
782 break;
785 qemu_put_be32(f, i);
787 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
788 if (vdev->vq[i].vring.num == 0)
789 break;
791 qemu_put_be32(f, vdev->vq[i].vring.num);
792 qemu_put_be64(f, vdev->vq[i].pa);
793 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
794 if (vdev->binding->save_queue)
795 vdev->binding->save_queue(vdev->binding_opaque, i, f);
799 int virtio_set_features(VirtIODevice *vdev, uint32_t val)
801 uint32_t supported_features =
802 vdev->binding->get_features(vdev->binding_opaque);
803 bool bad = (val & ~supported_features) != 0;
805 val &= supported_features;
806 if (vdev->set_features) {
807 vdev->set_features(vdev, val);
809 vdev->guest_features = val;
810 return bad ? -1 : 0;
813 int virtio_load(VirtIODevice *vdev, QEMUFile *f)
815 int num, i, ret;
816 uint32_t features;
817 uint32_t supported_features;
819 if (vdev->binding->load_config) {
820 ret = vdev->binding->load_config(vdev->binding_opaque, f);
821 if (ret)
822 return ret;
825 qemu_get_8s(f, &vdev->status);
826 qemu_get_8s(f, &vdev->isr);
827 qemu_get_be16s(f, &vdev->queue_sel);
828 qemu_get_be32s(f, &features);
830 if (virtio_set_features(vdev, features) < 0) {
831 supported_features = vdev->binding->get_features(vdev->binding_opaque);
832 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
833 features, supported_features);
834 return -1;
836 vdev->config_len = qemu_get_be32(f);
837 qemu_get_buffer(f, vdev->config, vdev->config_len);
839 num = qemu_get_be32(f);
841 for (i = 0; i < num; i++) {
842 vdev->vq[i].vring.num = qemu_get_be32(f);
843 vdev->vq[i].pa = qemu_get_be64(f);
844 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
845 vdev->vq[i].signalled_used_valid = false;
846 vdev->vq[i].notification = true;
848 if (vdev->vq[i].pa) {
849 uint16_t nheads;
850 virtqueue_init(&vdev->vq[i]);
851 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
852 /* Check it isn't doing very strange things with descriptor numbers. */
853 if (nheads > vdev->vq[i].vring.num) {
854 error_report("VQ %d size 0x%x Guest index 0x%x "
855 "inconsistent with Host index 0x%x: delta 0x%x",
856 i, vdev->vq[i].vring.num,
857 vring_avail_idx(&vdev->vq[i]),
858 vdev->vq[i].last_avail_idx, nheads);
859 return -1;
861 } else if (vdev->vq[i].last_avail_idx) {
862 error_report("VQ %d address 0x0 "
863 "inconsistent with Host index 0x%x",
864 i, vdev->vq[i].last_avail_idx);
865 return -1;
867 if (vdev->binding->load_queue) {
868 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
869 if (ret)
870 return ret;
874 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
875 return 0;
878 void virtio_cleanup(VirtIODevice *vdev)
880 qemu_del_vm_change_state_handler(vdev->vmstate);
881 g_free(vdev->config);
882 g_free(vdev->vq);
883 g_free(vdev);
886 static void virtio_vmstate_change(void *opaque, int running, RunState state)
888 VirtIODevice *vdev = opaque;
889 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
890 vdev->vm_running = running;
892 if (backend_run) {
893 virtio_set_status(vdev, vdev->status);
896 if (vdev->binding->vmstate_change) {
897 vdev->binding->vmstate_change(vdev->binding_opaque, backend_run);
900 if (!backend_run) {
901 virtio_set_status(vdev, vdev->status);
905 VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
906 size_t config_size, size_t struct_size)
908 VirtIODevice *vdev;
909 int i;
911 vdev = g_malloc0(struct_size);
913 vdev->device_id = device_id;
914 vdev->status = 0;
915 vdev->isr = 0;
916 vdev->queue_sel = 0;
917 vdev->config_vector = VIRTIO_NO_VECTOR;
918 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
919 vdev->vm_running = runstate_is_running();
920 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
921 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
922 vdev->vq[i].vdev = vdev;
925 vdev->name = name;
926 vdev->config_len = config_size;
927 if (vdev->config_len)
928 vdev->config = g_malloc0(config_size);
929 else
930 vdev->config = NULL;
932 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, vdev);
934 return vdev;
937 void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
938 void *opaque)
940 vdev->binding = binding;
941 vdev->binding_opaque = opaque;
944 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
946 return vdev->vq[n].vring.desc;
949 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
951 return vdev->vq[n].vring.avail;
954 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
956 return vdev->vq[n].vring.used;
959 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
961 return vdev->vq[n].vring.desc;
964 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
966 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
969 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
971 return offsetof(VRingAvail, ring) +
972 sizeof(uint64_t) * vdev->vq[n].vring.num;
975 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
977 return offsetof(VRingUsed, ring) +
978 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
981 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
983 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
984 virtio_queue_get_used_size(vdev, n);
987 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
989 return vdev->vq[n].last_avail_idx;
992 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
994 vdev->vq[n].last_avail_idx = idx;
997 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
999 return vdev->vq + n;
1002 static void virtio_queue_guest_notifier_read(EventNotifier *n)
1004 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
1005 if (event_notifier_test_and_clear(n)) {
1006 virtio_irq(vq);
1010 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
1011 bool with_irqfd)
1013 if (assign && !with_irqfd) {
1014 event_notifier_set_handler(&vq->guest_notifier,
1015 virtio_queue_guest_notifier_read);
1016 } else {
1017 event_notifier_set_handler(&vq->guest_notifier, NULL);
1019 if (!assign) {
1020 /* Test and clear notifier before closing it,
1021 * in case poll callback didn't have time to run. */
1022 virtio_queue_guest_notifier_read(&vq->guest_notifier);
1026 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
1028 return &vq->guest_notifier;
1031 static void virtio_queue_host_notifier_read(EventNotifier *n)
1033 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
1034 if (event_notifier_test_and_clear(n)) {
1035 virtio_queue_notify_vq(vq);
1039 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
1040 bool set_handler)
1042 if (assign && set_handler) {
1043 event_notifier_set_handler(&vq->host_notifier,
1044 virtio_queue_host_notifier_read);
1045 } else {
1046 event_notifier_set_handler(&vq->host_notifier, NULL);
1048 if (!assign) {
1049 /* Test and clear notifier before after disabling event,
1050 * in case poll callback didn't have time to run. */
1051 virtio_queue_host_notifier_read(&vq->host_notifier);
1055 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
1057 return &vq->host_notifier;