2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/virtio/virtqueue.c,v 1.2 2012/04/14 05:48:04 grehan Exp $
30 * Implements the virtqueue interface as basically described
31 * in the original VirtIO paper.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/sglist.h>
39 #include <sys/serialize.h>
43 #include <machine/cpu.h>
44 #include <machine/atomic.h>
49 #include "virtqueue.h"
50 #include "virtio_ring.h"
52 #include "virtio_bus_if.h"
56 char vq_name
[VIRTQUEUE_MAX_NAME_SZ
];
57 uint16_t vq_queue_index
;
61 #define VIRTQUEUE_FLAG_INDIRECT 0x0001
62 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
67 int vq_max_indirect_size
;
68 int vq_indirect_mem_size
;
70 virtqueue_intr_t
*vq_intrhand
;
71 void *vq_intrhand_arg
;
75 uint16_t vq_queued_cnt
;
77 * Head of the free chain in the descriptor table. If
78 * there are no free descriptors, this will be set to
79 * VQ_RING_DESC_CHAIN_END.
81 uint16_t vq_desc_head_idx
;
83 * Last consumed descriptor in the used table,
84 * trails vq_ring.used->idx.
86 uint16_t vq_used_cons_idx
;
88 struct vq_desc_extra
{
90 struct vring_desc
*indirect
;
91 vm_paddr_t indirect_paddr
;
97 * The maximum virtqueue size is 2^15. Use that value as the end of
98 * descriptor chain terminator since it will never be a valid index
99 * in the descriptor table. This is used to verify we are correctly
100 * handling vq_free_cnt.
102 #define VQ_RING_DESC_CHAIN_END 32768
104 #define VQASSERT(_vq, _exp, _msg, ...) \
105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
110 "invalid ring index: %d, max: %d", (_idx), \
113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
118 static int virtqueue_init_indirect(struct virtqueue
*vq
, int);
119 static void virtqueue_free_indirect(struct virtqueue
*vq
);
120 static void virtqueue_init_indirect_list(struct virtqueue
*,
121 struct vring_desc
*);
123 static void vq_ring_init(struct virtqueue
*);
124 static void vq_ring_update_avail(struct virtqueue
*, uint16_t);
125 static uint16_t vq_ring_enqueue_segments(struct virtqueue
*,
126 struct vring_desc
*, uint16_t, struct sglist
*, int, int);
127 static int vq_ring_use_indirect(struct virtqueue
*, int);
128 static void vq_ring_enqueue_indirect(struct virtqueue
*, void *,
129 struct sglist
*, int, int);
130 static int vq_ring_must_notify_host(struct virtqueue
*);
131 static void vq_ring_notify_host(struct virtqueue
*);
132 static void vq_ring_free_chain(struct virtqueue
*, uint16_t);
135 virtqueue_filter_features(uint64_t features
)
139 mask
= (1 << VIRTIO_TRANSPORT_F_START
) - 1;
140 mask
|= VIRTIO_RING_F_INDIRECT_DESC
;
141 mask
|= VIRTIO_RING_F_EVENT_IDX
;
143 return (features
& mask
);
147 virtqueue_alloc(device_t dev
, uint16_t queue
, uint16_t size
, int align
,
148 vm_paddr_t highaddr
, struct vq_alloc_info
*info
, struct virtqueue
**vqp
)
150 struct virtqueue
*vq
;
158 "virtqueue %d (%s) does not exist (size is zero)\n",
159 queue
, info
->vqai_name
);
161 } else if (!powerof2(size
)) {
163 "virtqueue %d (%s) size is not a power of 2: %d\n",
164 queue
, info
->vqai_name
, size
);
166 } else if (info
->vqai_maxindirsz
> VIRTIO_MAX_INDIRECT
) {
167 device_printf(dev
, "virtqueue %d (%s) requested too many "
168 "indirect descriptors: %d, max %d\n",
169 queue
, info
->vqai_name
, info
->vqai_maxindirsz
,
170 VIRTIO_MAX_INDIRECT
);
174 vq
= kmalloc(sizeof(struct virtqueue
) +
175 size
* sizeof(struct vq_desc_extra
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
177 device_printf(dev
, "cannot allocate virtqueue\n");
182 strlcpy(vq
->vq_name
, info
->vqai_name
, sizeof(vq
->vq_name
));
183 vq
->vq_queue_index
= queue
;
184 vq
->vq_alignment
= align
;
185 vq
->vq_nentries
= size
;
186 vq
->vq_free_cnt
= size
;
187 vq
->vq_intrhand
= info
->vqai_intr
;
188 vq
->vq_intrhand_arg
= info
->vqai_intr_arg
;
190 if (VIRTIO_BUS_WITH_FEATURE(dev
, VIRTIO_RING_F_EVENT_IDX
) != 0)
191 vq
->vq_flags
|= VIRTQUEUE_FLAG_EVENT_IDX
;
193 if (info
->vqai_maxindirsz
> 1) {
194 error
= virtqueue_init_indirect(vq
, info
->vqai_maxindirsz
);
199 vq
->vq_ring_size
= round_page(vring_size(size
, align
));
200 vq
->vq_ring_mem
= contigmalloc(vq
->vq_ring_size
, M_DEVBUF
,
201 M_WAITOK
| M_ZERO
, 0, highaddr
, PAGE_SIZE
, 0);
202 if (vq
->vq_ring_mem
== NULL
) {
204 "cannot allocate memory for virtqueue ring\n");
210 virtqueue_disable_intr(vq
);
222 virtqueue_init_indirect(struct virtqueue
*vq
, int indirect_size
)
225 struct vq_desc_extra
*dxp
;
230 if (VIRTIO_BUS_WITH_FEATURE(dev
, VIRTIO_RING_F_INDIRECT_DESC
) == 0) {
232 * Indirect descriptors requested by the driver but not
233 * negotiated. Return zero to keep the initialization
234 * going: we'll run fine without.
237 device_printf(dev
, "virtqueue %d (%s) requested "
238 "indirect descriptors but not negotiated\n",
239 vq
->vq_queue_index
, vq
->vq_name
);
243 size
= indirect_size
* sizeof(struct vring_desc
);
244 vq
->vq_max_indirect_size
= indirect_size
;
245 vq
->vq_indirect_mem_size
= size
;
246 vq
->vq_flags
|= VIRTQUEUE_FLAG_INDIRECT
;
248 for (i
= 0; i
< vq
->vq_nentries
; i
++) {
249 dxp
= &vq
->vq_descx
[i
];
251 dxp
->indirect
= contigmalloc(size
, M_DEVBUF
, M_WAITOK
,
252 0, BUS_SPACE_MAXADDR
, 16, 0);
253 if (dxp
->indirect
== NULL
) {
254 device_printf(dev
, "cannot allocate indirect list\n");
258 dxp
->indirect_paddr
= vtophys(dxp
->indirect
);
259 virtqueue_init_indirect_list(vq
, dxp
->indirect
);
266 virtqueue_free_indirect(struct virtqueue
*vq
)
268 struct vq_desc_extra
*dxp
;
271 for (i
= 0; i
< vq
->vq_nentries
; i
++) {
272 dxp
= &vq
->vq_descx
[i
];
274 if (dxp
->indirect
== NULL
)
277 contigfree(dxp
->indirect
, vq
->vq_indirect_mem_size
, M_DEVBUF
);
278 dxp
->indirect
= NULL
;
279 dxp
->indirect_paddr
= 0;
282 vq
->vq_flags
&= ~VIRTQUEUE_FLAG_INDIRECT
;
283 vq
->vq_indirect_mem_size
= 0;
287 virtqueue_init_indirect_list(struct virtqueue
*vq
,
288 struct vring_desc
*indirect
)
292 bzero(indirect
, vq
->vq_indirect_mem_size
);
294 for (i
= 0; i
< vq
->vq_max_indirect_size
- 1; i
++)
295 indirect
[i
].next
= i
+ 1;
296 indirect
[i
].next
= VQ_RING_DESC_CHAIN_END
;
300 virtqueue_reinit(struct virtqueue
*vq
, uint16_t size
)
302 struct vq_desc_extra
*dxp
;
305 if (vq
->vq_nentries
!= size
) {
306 device_printf(vq
->vq_dev
,
307 "%s: '%s' changed size; old=%hu, new=%hu\n",
308 __func__
, vq
->vq_name
, vq
->vq_nentries
, size
);
312 /* Warn if the virtqueue was not properly cleaned up. */
313 if (vq
->vq_free_cnt
!= vq
->vq_nentries
) {
314 device_printf(vq
->vq_dev
,
315 "%s: warning, '%s' virtqueue not empty, "
316 "leaking %d entries\n", __func__
, vq
->vq_name
,
317 vq
->vq_nentries
- vq
->vq_free_cnt
);
320 vq
->vq_desc_head_idx
= 0;
321 vq
->vq_used_cons_idx
= 0;
322 vq
->vq_queued_cnt
= 0;
323 vq
->vq_free_cnt
= vq
->vq_nentries
;
325 /* To be safe, reset all our allocated memory. */
326 bzero(vq
->vq_ring_mem
, vq
->vq_ring_size
);
327 for (i
= 0; i
< vq
->vq_nentries
; i
++) {
328 dxp
= &vq
->vq_descx
[i
];
331 if (vq
->vq_flags
& VIRTQUEUE_FLAG_INDIRECT
)
332 virtqueue_init_indirect_list(vq
, dxp
->indirect
);
336 virtqueue_disable_intr(vq
);
342 virtqueue_free(struct virtqueue
*vq
)
345 if (vq
->vq_free_cnt
!= vq
->vq_nentries
) {
346 device_printf(vq
->vq_dev
, "%s: freeing non-empty virtqueue, "
347 "leaking %d entries\n", vq
->vq_name
,
348 vq
->vq_nentries
- vq
->vq_free_cnt
);
351 if (vq
->vq_flags
& VIRTQUEUE_FLAG_INDIRECT
)
352 virtqueue_free_indirect(vq
);
354 if (vq
->vq_ring_mem
!= NULL
) {
355 contigfree(vq
->vq_ring_mem
, vq
->vq_ring_size
, M_DEVBUF
);
356 vq
->vq_ring_size
= 0;
357 vq
->vq_ring_mem
= NULL
;
364 virtqueue_paddr(struct virtqueue
*vq
)
366 return (vtophys(vq
->vq_ring_mem
));
370 virtqueue_size(struct virtqueue
*vq
)
372 return (vq
->vq_nentries
);
376 virtqueue_empty(struct virtqueue
*vq
)
379 return (vq
->vq_nentries
== vq
->vq_free_cnt
);
383 virtqueue_full(struct virtqueue
*vq
)
386 return (vq
->vq_free_cnt
== 0);
390 virtqueue_notify(struct virtqueue
*vq
, lwkt_serialize_t interlock
)
392 /* Ensure updated avail->idx is visible to host. */
395 if (vq_ring_must_notify_host(vq
)) {
396 if (interlock
!= NULL
)
397 lwkt_serialize_exit(interlock
);
398 vq_ring_notify_host(vq
);
399 if (interlock
!= NULL
)
400 lwkt_serialize_enter(interlock
);
402 vq
->vq_queued_cnt
= 0;
406 virtqueue_nused(struct virtqueue
*vq
)
408 uint16_t used_idx
, nused
;
410 used_idx
= vq
->vq_ring
.used
->idx
;
411 nused
= (uint16_t)(used_idx
- vq
->vq_used_cons_idx
);
412 VQASSERT(vq
, nused
<= vq
->vq_nentries
, "used more than available");
418 virtqueue_intr(struct virtqueue
*vq
)
421 if (vq
->vq_intrhand
== NULL
||
422 vq
->vq_used_cons_idx
== vq
->vq_ring
.used
->idx
)
425 vq
->vq_intrhand(vq
->vq_intrhand_arg
);
431 * Enable interrupts on a given virtqueue. Returns 1 if there are
432 * additional entries to process on the virtqueue after we return.
435 virtqueue_enable_intr(struct virtqueue
*vq
)
438 * Enable interrupts, making sure we get the latest
439 * index of what's already been consumed.
441 vq
->vq_ring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
442 if (vq
->vq_flags
& VIRTQUEUE_FLAG_EVENT_IDX
) {
443 vring_used_event(&vq
->vq_ring
) = vq
->vq_used_cons_idx
;
445 vq
->vq_ring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
451 * Additional items may have been consumed in the time between
452 * since we last checked and enabled interrupts above. Let our
453 * caller know so it processes the new entries.
455 if (vq
->vq_used_cons_idx
!= vq
->vq_ring
.used
->idx
)
462 virtqueue_postpone_intr(struct virtqueue
*vq
)
467 * Postpone until at least half of the available descriptors
468 * have been consumed.
470 * XXX Adaptive factor? (Linux uses 3/4)
472 ndesc
= (uint16_t)(vq
->vq_ring
.avail
->idx
- vq
->vq_used_cons_idx
) / 2;
474 if (vq
->vq_flags
& VIRTQUEUE_FLAG_EVENT_IDX
)
475 vring_used_event(&vq
->vq_ring
) = vq
->vq_used_cons_idx
+ ndesc
;
477 vq
->vq_ring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
482 * Enough items may have already been consumed to meet our
483 * threshold since we last checked. Let our caller know so
484 * it processes the new entries.
486 if (virtqueue_nused(vq
) > ndesc
)
493 virtqueue_disable_intr(struct virtqueue
*vq
)
496 * Note this is only considered a hint to the host.
498 if ((vq
->vq_flags
& VIRTQUEUE_FLAG_EVENT_IDX
) == 0)
499 vq
->vq_ring
.avail
->flags
|= VRING_AVAIL_F_NO_INTERRUPT
;
503 virtqueue_enqueue(struct virtqueue
*vq
, void *cookie
, struct sglist
*sg
,
504 int readable
, int writable
)
506 struct vq_desc_extra
*dxp
;
508 uint16_t head_idx
, idx
;
510 needed
= readable
+ writable
;
512 VQASSERT(vq
, cookie
!= NULL
, "enqueuing with no cookie");
513 VQASSERT(vq
, needed
== sg
->sg_nseg
,
514 "segment count mismatch, %d, %d", needed
, sg
->sg_nseg
);
516 needed
<= vq
->vq_nentries
|| needed
<= vq
->vq_max_indirect_size
,
517 "too many segments to enqueue: %d, %d/%d", needed
,
518 vq
->vq_nentries
, vq
->vq_max_indirect_size
);
522 if (vq
->vq_free_cnt
== 0)
525 if (vq_ring_use_indirect(vq
, needed
)) {
526 vq_ring_enqueue_indirect(vq
, cookie
, sg
, readable
, writable
);
528 } else if (vq
->vq_free_cnt
< needed
)
531 head_idx
= vq
->vq_desc_head_idx
;
532 VQ_RING_ASSERT_VALID_IDX(vq
, head_idx
);
533 dxp
= &vq
->vq_descx
[head_idx
];
535 VQASSERT(vq
, dxp
->cookie
== NULL
,
536 "cookie already exists for index %d", head_idx
);
537 dxp
->cookie
= cookie
;
538 dxp
->ndescs
= needed
;
540 idx
= vq_ring_enqueue_segments(vq
, vq
->vq_ring
.desc
, head_idx
,
541 sg
, readable
, writable
);
543 vq
->vq_desc_head_idx
= idx
;
544 vq
->vq_free_cnt
-= needed
;
545 if (vq
->vq_free_cnt
== 0)
546 VQ_RING_ASSERT_CHAIN_TERM(vq
);
548 VQ_RING_ASSERT_VALID_IDX(vq
, idx
);
550 vq_ring_update_avail(vq
, head_idx
);
556 virtqueue_dequeue(struct virtqueue
*vq
, uint32_t *len
)
558 struct vring_used_elem
*uep
;
560 uint16_t used_idx
, desc_idx
;
562 if (vq
->vq_used_cons_idx
== vq
->vq_ring
.used
->idx
)
565 used_idx
= vq
->vq_used_cons_idx
++ & (vq
->vq_nentries
- 1);
566 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
569 desc_idx
= (uint16_t) uep
->id
;
573 vq_ring_free_chain(vq
, desc_idx
);
575 cookie
= vq
->vq_descx
[desc_idx
].cookie
;
576 VQASSERT(vq
, cookie
!= NULL
, "no cookie for index %d", desc_idx
);
577 vq
->vq_descx
[desc_idx
].cookie
= NULL
;
583 virtqueue_poll(struct virtqueue
*vq
, uint32_t *len
)
587 /* We only poll the virtqueue when dumping to virtio-blk */
588 while ((cookie
= virtqueue_dequeue(vq
, len
)) == NULL
)
595 virtqueue_drain(struct virtqueue
*vq
, int *last
)
603 while (idx
< vq
->vq_nentries
&& cookie
== NULL
) {
604 if ((cookie
= vq
->vq_descx
[idx
].cookie
) != NULL
) {
605 vq
->vq_descx
[idx
].cookie
= NULL
;
606 /* Free chain to keep free count consistent. */
607 vq_ring_free_chain(vq
, idx
);
618 virtqueue_dump(struct virtqueue
*vq
)
624 kprintf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
625 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
626 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
627 vq
->vq_name
, vq
->vq_nentries
, vq
->vq_free_cnt
,
628 virtqueue_nused(vq
), vq
->vq_queued_cnt
, vq
->vq_desc_head_idx
,
629 vq
->vq_ring
.avail
->idx
, vq
->vq_used_cons_idx
,
630 vq
->vq_ring
.used
->idx
, vq
->vq_ring
.avail
->flags
,
631 vq
->vq_ring
.used
->flags
);
635 vq_ring_init(struct virtqueue
*vq
)
641 ring_mem
= vq
->vq_ring_mem
;
642 size
= vq
->vq_nentries
;
645 vring_init(vr
, size
, ring_mem
, vq
->vq_alignment
);
647 for (i
= 0; i
< size
- 1; i
++)
648 vr
->desc
[i
].next
= i
+ 1;
649 vr
->desc
[i
].next
= VQ_RING_DESC_CHAIN_END
;
653 vq_ring_update_avail(struct virtqueue
*vq
, uint16_t desc_idx
)
658 * Place the head of the descriptor chain into the next slot and make
659 * it usable to the host. The chain is made available now rather than
660 * deferring to virtqueue_notify() in the hopes that if the host is
661 * currently running on another CPU, we can keep it processing the new
664 avail_idx
= vq
->vq_ring
.avail
->idx
& (vq
->vq_nentries
- 1);
665 vq
->vq_ring
.avail
->ring
[avail_idx
] = desc_idx
;
668 vq
->vq_ring
.avail
->idx
++;
670 /* Keep pending count until virtqueue_notify() for debugging. */
675 vq_ring_enqueue_segments(struct virtqueue
*vq
, struct vring_desc
*desc
,
676 uint16_t head_idx
, struct sglist
*sg
, int readable
, int writable
)
678 struct sglist_seg
*seg
;
679 struct vring_desc
*dp
;
683 needed
= readable
+ writable
;
685 for (i
= 0, idx
= head_idx
, seg
= sg
->sg_segs
;
687 i
++, idx
= dp
->next
, seg
++) {
688 VQASSERT(vq
, idx
!= VQ_RING_DESC_CHAIN_END
,
689 "premature end of free desc chain");
692 dp
->addr
= seg
->ss_paddr
;
693 dp
->len
= seg
->ss_len
;
697 dp
->flags
|= VRING_DESC_F_NEXT
;
699 dp
->flags
|= VRING_DESC_F_WRITE
;
706 vq_ring_use_indirect(struct virtqueue
*vq
, int needed
)
709 if ((vq
->vq_flags
& VIRTQUEUE_FLAG_INDIRECT
) == 0)
712 if (vq
->vq_max_indirect_size
< needed
)
722 vq_ring_enqueue_indirect(struct virtqueue
*vq
, void *cookie
,
723 struct sglist
*sg
, int readable
, int writable
)
725 struct vring_desc
*dp
;
726 struct vq_desc_extra
*dxp
;
730 needed
= readable
+ writable
;
731 VQASSERT(vq
, needed
<= vq
->vq_max_indirect_size
,
732 "enqueuing too many indirect descriptors");
734 head_idx
= vq
->vq_desc_head_idx
;
735 VQ_RING_ASSERT_VALID_IDX(vq
, head_idx
);
736 dp
= &vq
->vq_ring
.desc
[head_idx
];
737 dxp
= &vq
->vq_descx
[head_idx
];
739 VQASSERT(vq
, dxp
->cookie
== NULL
,
740 "cookie already exists for index %d", head_idx
);
741 dxp
->cookie
= cookie
;
744 dp
->addr
= dxp
->indirect_paddr
;
745 dp
->len
= needed
* sizeof(struct vring_desc
);
746 dp
->flags
= VRING_DESC_F_INDIRECT
;
748 vq_ring_enqueue_segments(vq
, dxp
->indirect
, 0,
749 sg
, readable
, writable
);
751 vq
->vq_desc_head_idx
= dp
->next
;
753 if (vq
->vq_free_cnt
== 0)
754 VQ_RING_ASSERT_CHAIN_TERM(vq
);
756 VQ_RING_ASSERT_VALID_IDX(vq
, vq
->vq_desc_head_idx
);
758 vq_ring_update_avail(vq
, head_idx
);
762 vq_ring_must_notify_host(struct virtqueue
*vq
)
764 uint16_t new_idx
, prev_idx
, event_idx
;
766 if (vq
->vq_flags
& VIRTQUEUE_FLAG_EVENT_IDX
) {
767 new_idx
= vq
->vq_ring
.avail
->idx
;
768 prev_idx
= new_idx
- vq
->vq_queued_cnt
;
769 event_idx
= vring_avail_event(&vq
->vq_ring
);
771 return (vring_need_event(event_idx
, new_idx
, prev_idx
) != 0);
774 return ((vq
->vq_ring
.used
->flags
& VRING_USED_F_NO_NOTIFY
) == 0);
778 vq_ring_notify_host(struct virtqueue
*vq
)
780 VIRTIO_BUS_NOTIFY_VQ(vq
->vq_dev
, vq
->vq_queue_index
);
784 vq_ring_free_chain(struct virtqueue
*vq
, uint16_t desc_idx
)
786 struct vring_desc
*dp
;
787 struct vq_desc_extra
*dxp
;
789 VQ_RING_ASSERT_VALID_IDX(vq
, desc_idx
);
790 dp
= &vq
->vq_ring
.desc
[desc_idx
];
791 dxp
= &vq
->vq_descx
[desc_idx
];
793 if (vq
->vq_free_cnt
== 0)
794 VQ_RING_ASSERT_CHAIN_TERM(vq
);
796 vq
->vq_free_cnt
+= dxp
->ndescs
;
799 if ((dp
->flags
& VRING_DESC_F_INDIRECT
) == 0) {
800 while (dp
->flags
& VRING_DESC_F_NEXT
) {
801 VQ_RING_ASSERT_VALID_IDX(vq
, dp
->next
);
802 dp
= &vq
->vq_ring
.desc
[dp
->next
];
806 VQASSERT(vq
, dxp
->ndescs
== 0, "failed to free entire desc chain");
809 * We must append the existing free chain, if any, to the end of
810 * newly freed chain. If the virtqueue was completely used, then
811 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
813 dp
->next
= vq
->vq_desc_head_idx
;
814 vq
->vq_desc_head_idx
= desc_idx
;