qemu-img rebase: Open new backing file read-only
[qemu/qemu-dev-zwu.git] / hw / virtio.c
blob85312b3ebed212517fb189e9568c75154b46b729
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <inttypes.h>
16 #include "virtio.h"
17 #include "sysemu.h"
19 /* The alignment to use between consumer and producer parts of vring.
20 * x86 pagesize again. */
21 #define VIRTIO_PCI_VRING_ALIGN 4096
23 /* QEMU doesn't strictly need write barriers since everything runs in
24 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
25 * KVM or if kqemu gets SMP support.
26 * In any case, we must prevent the compiler from reordering the code.
27 * TODO: we likely need some rmb()/mb() as well.
30 #define wmb() __asm__ __volatile__("": : :"memory")
32 typedef struct VRingDesc
34 uint64_t addr;
35 uint32_t len;
36 uint16_t flags;
37 uint16_t next;
38 } VRingDesc;
40 typedef struct VRingAvail
42 uint16_t flags;
43 uint16_t idx;
44 uint16_t ring[0];
45 } VRingAvail;
47 typedef struct VRingUsedElem
49 uint32_t id;
50 uint32_t len;
51 } VRingUsedElem;
53 typedef struct VRingUsed
55 uint16_t flags;
56 uint16_t idx;
57 VRingUsedElem ring[0];
58 } VRingUsed;
60 typedef struct VRing
62 unsigned int num;
63 target_phys_addr_t desc;
64 target_phys_addr_t avail;
65 target_phys_addr_t used;
66 } VRing;
68 struct VirtQueue
70 VRing vring;
71 target_phys_addr_t pa;
72 uint16_t last_avail_idx;
73 int inuse;
74 uint16_t vector;
75 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
76 VirtIODevice *vdev;
77 EventNotifier guest_notifier;
78 EventNotifier host_notifier;
81 /* virt queue functions */
82 static void virtqueue_init(VirtQueue *vq)
84 target_phys_addr_t pa = vq->pa;
86 vq->vring.desc = pa;
87 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
88 vq->vring.used = vring_align(vq->vring.avail +
89 offsetof(VRingAvail, ring[vq->vring.num]),
90 VIRTIO_PCI_VRING_ALIGN);
93 static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
95 target_phys_addr_t pa;
96 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
97 return ldq_phys(pa);
100 static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
102 target_phys_addr_t pa;
103 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
104 return ldl_phys(pa);
107 static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
109 target_phys_addr_t pa;
110 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
111 return lduw_phys(pa);
114 static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
116 target_phys_addr_t pa;
117 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
118 return lduw_phys(pa);
121 static inline uint16_t vring_avail_flags(VirtQueue *vq)
123 target_phys_addr_t pa;
124 pa = vq->vring.avail + offsetof(VRingAvail, flags);
125 return lduw_phys(pa);
128 static inline uint16_t vring_avail_idx(VirtQueue *vq)
130 target_phys_addr_t pa;
131 pa = vq->vring.avail + offsetof(VRingAvail, idx);
132 return lduw_phys(pa);
135 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
137 target_phys_addr_t pa;
138 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
139 return lduw_phys(pa);
142 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
144 target_phys_addr_t pa;
145 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
146 stl_phys(pa, val);
149 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
151 target_phys_addr_t pa;
152 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
153 stl_phys(pa, val);
156 static uint16_t vring_used_idx(VirtQueue *vq)
158 target_phys_addr_t pa;
159 pa = vq->vring.used + offsetof(VRingUsed, idx);
160 return lduw_phys(pa);
163 static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
165 target_phys_addr_t pa;
166 pa = vq->vring.used + offsetof(VRingUsed, idx);
167 stw_phys(pa, vring_used_idx(vq) + val);
170 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
172 target_phys_addr_t pa;
173 pa = vq->vring.used + offsetof(VRingUsed, flags);
174 stw_phys(pa, lduw_phys(pa) | mask);
177 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
179 target_phys_addr_t pa;
180 pa = vq->vring.used + offsetof(VRingUsed, flags);
181 stw_phys(pa, lduw_phys(pa) & ~mask);
184 void virtio_queue_set_notification(VirtQueue *vq, int enable)
186 if (enable)
187 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
188 else
189 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
192 int virtio_queue_ready(VirtQueue *vq)
194 return vq->vring.avail != 0;
197 int virtio_queue_empty(VirtQueue *vq)
199 return vring_avail_idx(vq) == vq->last_avail_idx;
202 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
203 unsigned int len, unsigned int idx)
205 unsigned int offset;
206 int i;
208 offset = 0;
209 for (i = 0; i < elem->in_num; i++) {
210 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
212 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
213 elem->in_sg[i].iov_len,
214 1, size);
216 offset += elem->in_sg[i].iov_len;
219 for (i = 0; i < elem->out_num; i++)
220 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
221 elem->out_sg[i].iov_len,
222 0, elem->out_sg[i].iov_len);
224 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
226 /* Get a pointer to the next entry in the used ring. */
227 vring_used_ring_id(vq, idx, elem->index);
228 vring_used_ring_len(vq, idx, len);
231 void virtqueue_flush(VirtQueue *vq, unsigned int count)
233 /* Make sure buffer is written before we update index. */
234 wmb();
235 vring_used_idx_increment(vq, count);
236 vq->inuse -= count;
239 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
240 unsigned int len)
242 virtqueue_fill(vq, elem, len, 0);
243 virtqueue_flush(vq, 1);
246 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
248 uint16_t num_heads = vring_avail_idx(vq) - idx;
250 /* Check it isn't doing very strange things with descriptor numbers. */
251 if (num_heads > vq->vring.num) {
252 fprintf(stderr, "Guest moved used index from %u to %u",
253 idx, vring_avail_idx(vq));
254 exit(1);
257 return num_heads;
260 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
262 unsigned int head;
264 /* Grab the next descriptor number they're advertising, and increment
265 * the index we've seen. */
266 head = vring_avail_ring(vq, idx % vq->vring.num);
268 /* If their number is silly, that's a fatal mistake. */
269 if (head >= vq->vring.num) {
270 fprintf(stderr, "Guest says index %u is available", head);
271 exit(1);
274 return head;
277 static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
278 unsigned int i, unsigned int max)
280 unsigned int next;
282 /* If this descriptor says it doesn't chain, we're done. */
283 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
284 return max;
286 /* Check they're not leading us off end of descriptors. */
287 next = vring_desc_next(desc_pa, i);
288 /* Make sure compiler knows to grab that: we don't want it changing! */
289 wmb();
291 if (next >= max) {
292 fprintf(stderr, "Desc next is %u", next);
293 exit(1);
296 return next;
299 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
301 unsigned int idx;
302 int total_bufs, in_total, out_total;
304 idx = vq->last_avail_idx;
306 total_bufs = in_total = out_total = 0;
307 while (virtqueue_num_heads(vq, idx)) {
308 unsigned int max, num_bufs, indirect = 0;
309 target_phys_addr_t desc_pa;
310 int i;
312 max = vq->vring.num;
313 num_bufs = total_bufs;
314 i = virtqueue_get_head(vq, idx++);
315 desc_pa = vq->vring.desc;
317 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
318 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
319 fprintf(stderr, "Invalid size for indirect buffer table\n");
320 exit(1);
323 /* If we've got too many, that implies a descriptor loop. */
324 if (num_bufs >= max) {
325 fprintf(stderr, "Looped descriptor");
326 exit(1);
329 /* loop over the indirect descriptor table */
330 indirect = 1;
331 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
332 num_bufs = i = 0;
333 desc_pa = vring_desc_addr(desc_pa, i);
336 do {
337 /* If we've got too many, that implies a descriptor loop. */
338 if (++num_bufs > max) {
339 fprintf(stderr, "Looped descriptor");
340 exit(1);
343 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
344 if (in_bytes > 0 &&
345 (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
346 return 1;
347 } else {
348 if (out_bytes > 0 &&
349 (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
350 return 1;
352 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
354 if (!indirect)
355 total_bufs = num_bufs;
356 else
357 total_bufs++;
360 return 0;
363 void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
364 size_t num_sg, int is_write)
366 unsigned int i;
367 target_phys_addr_t len;
369 for (i = 0; i < num_sg; i++) {
370 len = sg[i].iov_len;
371 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
372 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
373 fprintf(stderr, "virtio: trying to map MMIO memory\n");
374 exit(1);
379 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
381 unsigned int i, head, max;
382 target_phys_addr_t desc_pa = vq->vring.desc;
384 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
385 return 0;
387 /* When we start there are none of either input nor output. */
388 elem->out_num = elem->in_num = 0;
390 max = vq->vring.num;
392 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
394 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
395 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
396 fprintf(stderr, "Invalid size for indirect buffer table\n");
397 exit(1);
400 /* loop over the indirect descriptor table */
401 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
402 desc_pa = vring_desc_addr(desc_pa, i);
403 i = 0;
406 /* Collect all the descriptors */
407 do {
408 struct iovec *sg;
410 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
411 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
412 sg = &elem->in_sg[elem->in_num++];
413 } else {
414 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
415 sg = &elem->out_sg[elem->out_num++];
418 sg->iov_len = vring_desc_len(desc_pa, i);
420 /* If we've got too many, that implies a descriptor loop. */
421 if ((elem->in_num + elem->out_num) > max) {
422 fprintf(stderr, "Looped descriptor");
423 exit(1);
425 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
427 /* Now map what we have collected */
428 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
429 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
431 elem->index = head;
433 vq->inuse++;
435 return elem->in_num + elem->out_num;
438 /* virtio device */
439 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
441 if (vdev->binding->notify) {
442 vdev->binding->notify(vdev->binding_opaque, vector);
446 void virtio_update_irq(VirtIODevice *vdev)
448 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
451 void virtio_reset(void *opaque)
453 VirtIODevice *vdev = opaque;
454 int i;
456 if (vdev->reset)
457 vdev->reset(vdev);
459 vdev->guest_features = 0;
460 vdev->queue_sel = 0;
461 vdev->status = 0;
462 vdev->isr = 0;
463 vdev->config_vector = VIRTIO_NO_VECTOR;
464 virtio_notify_vector(vdev, vdev->config_vector);
466 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
467 vdev->vq[i].vring.desc = 0;
468 vdev->vq[i].vring.avail = 0;
469 vdev->vq[i].vring.used = 0;
470 vdev->vq[i].last_avail_idx = 0;
471 vdev->vq[i].pa = 0;
472 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
476 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
478 uint8_t val;
480 vdev->get_config(vdev, vdev->config);
482 if (addr > (vdev->config_len - sizeof(val)))
483 return (uint32_t)-1;
485 memcpy(&val, vdev->config + addr, sizeof(val));
486 return val;
489 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
491 uint16_t val;
493 vdev->get_config(vdev, vdev->config);
495 if (addr > (vdev->config_len - sizeof(val)))
496 return (uint32_t)-1;
498 memcpy(&val, vdev->config + addr, sizeof(val));
499 return val;
502 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
504 uint32_t val;
506 vdev->get_config(vdev, vdev->config);
508 if (addr > (vdev->config_len - sizeof(val)))
509 return (uint32_t)-1;
511 memcpy(&val, vdev->config + addr, sizeof(val));
512 return val;
515 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
517 uint8_t val = data;
519 if (addr > (vdev->config_len - sizeof(val)))
520 return;
522 memcpy(vdev->config + addr, &val, sizeof(val));
524 if (vdev->set_config)
525 vdev->set_config(vdev, vdev->config);
528 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
530 uint16_t val = data;
532 if (addr > (vdev->config_len - sizeof(val)))
533 return;
535 memcpy(vdev->config + addr, &val, sizeof(val));
537 if (vdev->set_config)
538 vdev->set_config(vdev, vdev->config);
541 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
543 uint32_t val = data;
545 if (addr > (vdev->config_len - sizeof(val)))
546 return;
548 memcpy(vdev->config + addr, &val, sizeof(val));
550 if (vdev->set_config)
551 vdev->set_config(vdev, vdev->config);
554 void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
556 vdev->vq[n].pa = addr;
557 virtqueue_init(&vdev->vq[n]);
560 target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
562 return vdev->vq[n].pa;
565 int virtio_queue_get_num(VirtIODevice *vdev, int n)
567 return vdev->vq[n].vring.num;
570 void virtio_queue_notify(VirtIODevice *vdev, int n)
572 if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
573 vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
577 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
579 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
580 VIRTIO_NO_VECTOR;
583 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
585 if (n < VIRTIO_PCI_QUEUE_MAX)
586 vdev->vq[n].vector = vector;
589 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
590 void (*handle_output)(VirtIODevice *, VirtQueue *))
592 int i;
594 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
595 if (vdev->vq[i].vring.num == 0)
596 break;
599 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
600 abort();
602 vdev->vq[i].vring.num = queue_size;
603 vdev->vq[i].handle_output = handle_output;
605 return &vdev->vq[i];
608 void virtio_irq(VirtQueue *vq)
610 vq->vdev->isr |= 0x01;
611 virtio_notify_vector(vq->vdev, vq->vector);
614 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
616 /* Always notify when queue is empty (when feature acknowledge) */
617 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
618 (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
619 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
620 return;
622 vdev->isr |= 0x01;
623 virtio_notify_vector(vdev, vq->vector);
626 void virtio_notify_config(VirtIODevice *vdev)
628 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
629 return;
631 vdev->isr |= 0x03;
632 virtio_notify_vector(vdev, vdev->config_vector);
635 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
637 int i;
639 if (vdev->binding->save_config)
640 vdev->binding->save_config(vdev->binding_opaque, f);
642 qemu_put_8s(f, &vdev->status);
643 qemu_put_8s(f, &vdev->isr);
644 qemu_put_be16s(f, &vdev->queue_sel);
645 qemu_put_be32s(f, &vdev->guest_features);
646 qemu_put_be32(f, vdev->config_len);
647 qemu_put_buffer(f, vdev->config, vdev->config_len);
649 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
650 if (vdev->vq[i].vring.num == 0)
651 break;
654 qemu_put_be32(f, i);
656 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
657 if (vdev->vq[i].vring.num == 0)
658 break;
660 qemu_put_be32(f, vdev->vq[i].vring.num);
661 qemu_put_be64(f, vdev->vq[i].pa);
662 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
663 if (vdev->binding->save_queue)
664 vdev->binding->save_queue(vdev->binding_opaque, i, f);
668 int virtio_load(VirtIODevice *vdev, QEMUFile *f)
670 int num, i, ret;
671 uint32_t features;
672 uint32_t supported_features =
673 vdev->binding->get_features(vdev->binding_opaque);
675 if (vdev->binding->load_config) {
676 ret = vdev->binding->load_config(vdev->binding_opaque, f);
677 if (ret)
678 return ret;
681 qemu_get_8s(f, &vdev->status);
682 qemu_get_8s(f, &vdev->isr);
683 qemu_get_be16s(f, &vdev->queue_sel);
684 qemu_get_be32s(f, &features);
685 if (features & ~supported_features) {
686 fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
687 features, supported_features);
688 return -1;
690 if (vdev->set_features)
691 vdev->set_features(vdev, features);
692 vdev->guest_features = features;
693 vdev->config_len = qemu_get_be32(f);
694 qemu_get_buffer(f, vdev->config, vdev->config_len);
696 num = qemu_get_be32(f);
698 for (i = 0; i < num; i++) {
699 vdev->vq[i].vring.num = qemu_get_be32(f);
700 vdev->vq[i].pa = qemu_get_be64(f);
701 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
703 if (vdev->vq[i].pa) {
704 virtqueue_init(&vdev->vq[i]);
706 if (vdev->binding->load_queue) {
707 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
708 if (ret)
709 return ret;
713 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
714 return 0;
717 void virtio_cleanup(VirtIODevice *vdev)
719 if (vdev->config)
720 qemu_free(vdev->config);
721 qemu_free(vdev->vq);
724 VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
725 size_t config_size, size_t struct_size)
727 VirtIODevice *vdev;
728 int i;
730 vdev = qemu_mallocz(struct_size);
732 vdev->device_id = device_id;
733 vdev->status = 0;
734 vdev->isr = 0;
735 vdev->queue_sel = 0;
736 vdev->config_vector = VIRTIO_NO_VECTOR;
737 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
738 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
739 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
740 vdev->vq[i].vdev = vdev;
743 vdev->name = name;
744 vdev->config_len = config_size;
745 if (vdev->config_len)
746 vdev->config = qemu_mallocz(config_size);
747 else
748 vdev->config = NULL;
750 return vdev;
753 void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
754 void *opaque)
756 vdev->binding = binding;
757 vdev->binding_opaque = opaque;
760 target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
762 return vdev->vq[n].vring.desc;
765 target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
767 return vdev->vq[n].vring.avail;
770 target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
772 return vdev->vq[n].vring.used;
775 target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
777 return vdev->vq[n].vring.desc;
780 target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
782 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
785 target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
787 return offsetof(VRingAvail, ring) +
788 sizeof(uint64_t) * vdev->vq[n].vring.num;
791 target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
793 return offsetof(VRingUsed, ring) +
794 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
797 target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
799 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
800 virtio_queue_get_used_size(vdev, n);
803 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
805 return vdev->vq[n].last_avail_idx;
808 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
810 vdev->vq[n].last_avail_idx = idx;
813 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
815 return vdev->vq + n;
818 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
820 return &vq->guest_notifier;
822 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
824 return &vq->host_notifier;