2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Chris Torek <torek @ torek net>
6 * Copyright (c) 2019 Joyent, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
36 #include <machine/atomic.h>
39 #include <dev/virtio/pci/virtio_pci_legacy_var.h>
46 #include <pthread_np.h>
54 * Functions for dealing with generalized "virtual devices" as
55 * defined by <https://www.google.com/#output=search&q=virtio+spec>
59 * In case we decide to relax the "virtio softc comes at the
60 * front of virtio-based device softc" constraint, let's use
63 #define DEV_SOFTC(vs) ((void *)(vs))
66 * Link a virtio_softc to its constants, the device softc, and
70 vi_softc_linkup(struct virtio_softc
*vs
, struct virtio_consts
*vc
,
71 void *dev_softc
, struct pci_devinst
*pi
,
72 struct vqueue_info
*queues
)
76 /* vs and dev_softc addresses must match */
77 assert((void *)vs
== dev_softc
);
82 vs
->vs_queues
= queues
;
83 for (i
= 0; i
< vc
->vc_nvq
; i
++) {
90 * Reset device (device-wide). This erases all queues, i.e.,
91 * all the queues become invalid (though we don't wipe out the
92 * internal pointers, we just clear the VQ_ALLOC flag).
94 * It resets negotiated features to "none".
96 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
99 vi_reset_dev(struct virtio_softc
*vs
)
101 struct vqueue_info
*vq
;
105 assert(pthread_mutex_isowned_np(vs
->vs_mtx
));
107 nvq
= vs
->vs_vc
->vc_nvq
;
108 for (vq
= vs
->vs_queues
, i
= 0; i
< nvq
; vq
++, i
++) {
110 vq
->vq_last_avail
= 0;
111 vq
->vq_next_used
= 0;
112 vq
->vq_save_used
= 0;
114 vq
->vq_msix_idx
= VIRTIO_MSI_NO_VECTOR
;
116 vs
->vs_negotiated_caps
= 0;
118 /* vs->vs_status = 0; -- redundant */
120 pci_lintr_deassert(vs
->vs_pi
);
122 vs
->vs_msix_cfg_idx
= VIRTIO_MSI_NO_VECTOR
;
126 * Set I/O BAR (usually 0) to map PCI config registers.
129 vi_set_io_bar(struct virtio_softc
*vs
, int barnum
)
134 * ??? should we use VIRTIO_PCI_CONFIG_OFF(0) if MSI-X is disabled?
135 * Existing code did not...
137 size
= VIRTIO_PCI_CONFIG_OFF(1) + vs
->vs_vc
->vc_cfgsize
;
138 pci_emul_alloc_bar(vs
->vs_pi
, barnum
, PCIBAR_IO
, size
);
142 * Initialize MSI-X vector capabilities if we're to use MSI-X,
143 * or MSI capabilities if not.
145 * We assume we want one MSI-X vector per queue, here, plus one
146 * for the config vec.
149 vi_intr_init(struct virtio_softc
*vs
, int barnum
, int use_msix
)
154 vs
->vs_flags
|= VIRTIO_USE_MSIX
;
156 vi_reset_dev(vs
); /* set all vectors to NO_VECTOR */
158 nvec
= vs
->vs_vc
->vc_nvq
+ 1;
159 if (pci_emul_add_msixcap(vs
->vs_pi
, nvec
, barnum
))
162 vs
->vs_flags
&= ~VIRTIO_USE_MSIX
;
164 /* Only 1 MSI vector for bhyve */
165 pci_emul_add_msicap(vs
->vs_pi
, 1);
167 /* Legacy interrupts are mandatory for virtio devices */
168 pci_lintr_request(vs
->vs_pi
);
174 * Initialize the currently-selected virtio queue (vs->vs_curq).
175 * The guest just gave us a page frame number, from which we can
176 * calculate the addresses of the queue.
179 vi_vq_init(struct virtio_softc
*vs
, uint32_t pfn
)
181 struct vqueue_info
*vq
;
186 vq
= &vs
->vs_queues
[vs
->vs_curq
];
188 phys
= (uint64_t)pfn
<< VRING_PFN
;
189 size
= vring_size_aligned(vq
->vq_qsize
);
190 base
= paddr_guest2host(vs
->vs_pi
->pi_vmctx
, phys
, size
);
192 /* First page(s) are descriptors... */
193 vq
->vq_desc
= (struct vring_desc
*)base
;
194 base
+= vq
->vq_qsize
* sizeof(struct vring_desc
);
196 /* ... immediately followed by "avail" ring (entirely uint16_t's) */
197 vq
->vq_avail
= (struct vring_avail
*)base
;
198 base
+= (2 + vq
->vq_qsize
+ 1) * sizeof(uint16_t);
200 /* Then it's rounded up to the next page... */
201 base
= (char *)roundup2((uintptr_t)base
, VRING_ALIGN
);
203 /* ... and the last page(s) are the used ring. */
204 vq
->vq_used
= (struct vring_used
*)base
;
206 /* Mark queue as allocated, and start at 0 when we use it. */
207 vq
->vq_flags
= VQ_ALLOC
;
208 vq
->vq_last_avail
= 0;
209 vq
->vq_next_used
= 0;
210 vq
->vq_save_used
= 0;
214 * Helper inline for vq_getchain(): record the i'th "real"
218 _vq_record(int i
, struct vring_desc
*vd
, struct vmctx
*ctx
, struct iovec
*iov
,
219 int n_iov
, struct vi_req
*reqp
)
223 iov
[i
].iov_base
= paddr_guest2host(ctx
, vd
->addr
, vd
->len
);
224 iov
[i
].iov_len
= vd
->len
;
225 if ((vd
->flags
& VRING_DESC_F_WRITE
) == 0)
230 #define VQ_MAX_DESCRIPTORS 512 /* see below */
233 * Examine the chain of descriptors starting at the "next one" to
234 * make sure that they describe a sensible request. If so, return
235 * the number of "real" descriptors that would be needed/used in
236 * acting on this request. This may be smaller than the number of
237 * available descriptors, e.g., if there are two available but
238 * they are two separate requests, this just returns 1. Or, it
239 * may be larger: if there are indirect descriptors involved,
240 * there may only be one descriptor available but it may be an
241 * indirect pointing to eight more. We return 8 in this case,
242 * i.e., we do not count the indirect descriptors, only the "real"
245 * Basically, this vets the "flags" and "next" field of each
246 * descriptor and tells you how many are involved. Since some may
247 * be indirect, this also needs the vmctx (in the pci_devinst
248 * at vs->vs_pi) so that it can find indirect descriptors.
250 * As we process each descriptor, we copy and adjust it (guest to
251 * host address wise, also using the vmtctx) into the given iov[]
252 * array (of the given size). If the array overflows, we stop
253 * placing values into the array but keep processing descriptors,
254 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
255 * So you, the caller, must not assume that iov[] is as big as the
256 * return value (you can process the same thing twice to allocate
257 * a larger iov array if needed, or supply a zero length to find
258 * out how much space is needed).
260 * If some descriptor(s) are invalid, this prints a diagnostic message
261 * and returns -1. If no descriptors are ready now it simply returns 0.
263 * You are assumed to have done a vq_ring_ready() if needed (note
264 * that vq_has_descs() does one).
267 vq_getchain(struct vqueue_info
*vq
, struct iovec
*iov
, int niov
,
271 u_int ndesc
, n_indir
;
274 struct vring_desc
*vdir
, *vindir
, *vp
;
276 struct virtio_softc
*vs
;
280 name
= vs
->vs_vc
->vc_name
;
281 memset(&req
, 0, sizeof(req
));
284 * Note: it's the responsibility of the guest not to
285 * update vq->vq_avail->idx until all of the descriptors
286 * the guest has written are valid (including all their
287 * "next" fields and "flags").
289 * Compute (vq_avail->idx - last_avail) in integers mod 2**16. This is
290 * the number of descriptors the device has made available
291 * since the last time we updated vq->vq_last_avail.
293 * We just need to do the subtraction as an unsigned int,
294 * then trim off excess bits.
296 idx
= vq
->vq_last_avail
;
297 ndesc
= (uint16_t)((u_int
)vq
->vq_avail
->idx
- idx
);
300 if (ndesc
> vq
->vq_qsize
) {
301 /* XXX need better way to diagnose issues */
303 "%s: ndesc (%u) out of range, driver confused?",
309 * Now count/parse "involved" descriptors starting from
310 * the head of the chain.
312 * To prevent loops, we could be more complicated and
313 * check whether we're re-visiting a previously visited
314 * index, but we just abort if the count gets excessive.
316 ctx
= vs
->vs_pi
->pi_vmctx
;
317 req
.idx
= next
= vq
->vq_avail
->ring
[idx
& (vq
->vq_qsize
- 1)];
319 for (i
= 0; i
< VQ_MAX_DESCRIPTORS
; next
= vdir
->next
) {
320 if (next
>= vq
->vq_qsize
) {
322 "%s: descriptor index %u out of range, "
327 vdir
= &vq
->vq_desc
[next
];
328 if ((vdir
->flags
& VRING_DESC_F_INDIRECT
) == 0) {
329 _vq_record(i
, vdir
, ctx
, iov
, niov
, &req
);
331 } else if ((vs
->vs_vc
->vc_hv_caps
&
332 VIRTIO_RING_F_INDIRECT_DESC
) == 0) {
334 "%s: descriptor has forbidden INDIRECT flag, "
339 n_indir
= vdir
->len
/ 16;
340 if ((vdir
->len
& 0xf) || n_indir
== 0) {
342 "%s: invalid indir len 0x%x, "
344 name
, (u_int
)vdir
->len
);
347 vindir
= paddr_guest2host(ctx
,
348 vdir
->addr
, vdir
->len
);
350 * Indirects start at the 0th, then follow
351 * their own embedded "next"s until those run
352 * out. Each one's indirect flag must be off
353 * (we don't really have to check, could just
359 if (vp
->flags
& VRING_DESC_F_INDIRECT
) {
361 "%s: indirect desc has INDIR flag,"
366 _vq_record(i
, vp
, ctx
, iov
, niov
, &req
);
367 if (++i
> VQ_MAX_DESCRIPTORS
)
369 if ((vp
->flags
& VRING_DESC_F_NEXT
) == 0)
372 if (next
>= n_indir
) {
374 "%s: invalid next %u > %u, "
376 name
, (u_int
)next
, n_indir
);
381 if ((vdir
->flags
& VRING_DESC_F_NEXT
) == 0)
387 "%s: descriptor loop? count > %d - driver confused?",
397 * Return the first n_chain request chains back to the available queue.
399 * (These chains are the ones you handled when you called vq_getchain()
400 * and used its positive return value.)
403 vq_retchains(struct vqueue_info
*vq
, uint16_t n_chains
)
406 vq
->vq_last_avail
-= n_chains
;
410 vq_relchain_prepare(struct vqueue_info
*vq
, uint16_t idx
, uint32_t iolen
)
412 struct vring_used
*vuh
;
413 struct vring_used_elem
*vue
;
418 * - mask is N-1 where N is a power of 2 so computes x % N
419 * - vuh points to the "used" data shared with guest
420 * - vue points to the "used" ring entry we want to update
422 mask
= vq
->vq_qsize
- 1;
425 vue
= &vuh
->ring
[vq
->vq_next_used
++ & mask
];
431 vq_relchain_publish(struct vqueue_info
*vq
)
434 * Ensure the used descriptor is visible before updating the index.
435 * This is necessary on ISAs with memory ordering less strict than x86
436 * (and even on x86 to act as a compiler barrier).
438 atomic_thread_fence_rel();
439 vq
->vq_used
->idx
= vq
->vq_next_used
;
443 * Return specified request chain to the guest, setting its I/O length
444 * to the provided value.
446 * (This chain is the one you handled when you called vq_getchain()
447 * and used its positive return value.)
450 vq_relchain(struct vqueue_info
*vq
, uint16_t idx
, uint32_t iolen
)
452 vq_relchain_prepare(vq
, idx
, iolen
);
453 vq_relchain_publish(vq
);
457 * Driver has finished processing "available" chains and calling
458 * vq_relchain on each one. If driver used all the available
459 * chains, used_all should be set.
461 * If the "used" index moved we may need to inform the guest, i.e.,
462 * deliver an interrupt. Even if the used index did NOT move we
463 * may need to deliver an interrupt, if the avail ring is empty and
464 * we are supposed to interrupt on empty.
466 * Note that used_all_avail is provided by the caller because it's
467 * a snapshot of the ring state when he decided to finish interrupt
468 * processing -- it's possible that descriptors became available after
469 * that point. (It's also typically a constant 1/True as well.)
472 vq_endchains(struct vqueue_info
*vq
, int used_all_avail
)
474 struct virtio_softc
*vs
;
475 uint16_t event_idx
, new_idx
, old_idx
;
479 * Interrupt generation: if we're using EVENT_IDX,
480 * interrupt if we've crossed the event threshold.
481 * Otherwise interrupt is generated if we added "used" entries,
482 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
484 * In any case, though, if NOTIFY_ON_EMPTY is set and the
485 * entire avail was processed, we need to interrupt always.
488 old_idx
= vq
->vq_save_used
;
489 vq
->vq_save_used
= new_idx
= vq
->vq_used
->idx
;
492 * Use full memory barrier between "idx" store from preceding
493 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
494 * "flags" field below.
496 atomic_thread_fence_seq_cst();
497 if (used_all_avail
&&
498 (vs
->vs_negotiated_caps
& VIRTIO_F_NOTIFY_ON_EMPTY
))
500 else if (vs
->vs_negotiated_caps
& VIRTIO_RING_F_EVENT_IDX
) {
501 event_idx
= VQ_USED_EVENT_IDX(vq
);
503 * This calculation is per docs and the kernel
504 * (see src/sys/dev/virtio/virtio_ring.h).
506 intr
= (uint16_t)(new_idx
- event_idx
- 1) <
507 (uint16_t)(new_idx
- old_idx
);
509 intr
= new_idx
!= old_idx
&&
510 !(vq
->vq_avail
->flags
& VRING_AVAIL_F_NO_INTERRUPT
);
513 vq_interrupt(vs
, vq
);
516 /* Note: these are in sorted order to make for a fast search */
517 static struct config_reg
{
518 uint16_t cr_offset
; /* register offset */
519 uint8_t cr_size
; /* size (bytes) */
520 uint8_t cr_ro
; /* true => reg is read only */
521 const char *cr_name
; /* name of reg */
523 { VIRTIO_PCI_HOST_FEATURES
, 4, 1, "HOST_FEATURES" },
524 { VIRTIO_PCI_GUEST_FEATURES
, 4, 0, "GUEST_FEATURES" },
525 { VIRTIO_PCI_QUEUE_PFN
, 4, 0, "QUEUE_PFN" },
526 { VIRTIO_PCI_QUEUE_NUM
, 2, 1, "QUEUE_NUM" },
527 { VIRTIO_PCI_QUEUE_SEL
, 2, 0, "QUEUE_SEL" },
528 { VIRTIO_PCI_QUEUE_NOTIFY
, 2, 0, "QUEUE_NOTIFY" },
529 { VIRTIO_PCI_STATUS
, 1, 0, "STATUS" },
530 { VIRTIO_PCI_ISR
, 1, 0, "ISR" },
531 { VIRTIO_MSI_CONFIG_VECTOR
, 2, 0, "CONFIG_VECTOR" },
532 { VIRTIO_MSI_QUEUE_VECTOR
, 2, 0, "QUEUE_VECTOR" },
535 static inline struct config_reg
*
536 vi_find_cr(int offset
) {
538 struct config_reg
*cr
;
541 hi
= sizeof(config_regs
) / sizeof(*config_regs
) - 1;
543 mid
= (hi
+ lo
) >> 1;
544 cr
= &config_regs
[mid
];
545 if (cr
->cr_offset
== offset
)
547 if (cr
->cr_offset
< offset
)
556 * Handle pci config space reads.
557 * If it's to the MSI-X info, do that.
558 * If it's part of the virtio standard stuff, do that.
559 * Otherwise dispatch to the actual driver.
562 vi_pci_read(struct vmctx
*ctx __unused
,
563 struct pci_devinst
*pi
, int baridx
, uint64_t offset
, int size
)
565 struct virtio_softc
*vs
= pi
->pi_arg
;
566 struct virtio_consts
*vc
;
567 struct config_reg
*cr
;
568 uint64_t virtio_config_size
, max
;
574 if (vs
->vs_flags
& VIRTIO_USE_MSIX
) {
575 if (baridx
== pci_msix_table_bar(pi
) ||
576 baridx
== pci_msix_pba_bar(pi
)) {
577 return (pci_emul_msix_tread(pi
, offset
, size
));
581 /* XXX probably should do something better than just assert() */
585 pthread_mutex_lock(vs
->vs_mtx
);
589 value
= size
== 1 ? 0xff : size
== 2 ? 0xffff : 0xffffffff;
591 if (size
!= 1 && size
!= 2 && size
!= 4)
594 virtio_config_size
= VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi
));
596 if (offset
>= virtio_config_size
) {
598 * Subtract off the standard size (including MSI-X
599 * registers if enabled) and dispatch to underlying driver.
600 * If that fails, fall into general code.
602 newoff
= offset
- virtio_config_size
;
603 max
= vc
->vc_cfgsize
? vc
->vc_cfgsize
: 0x100000000;
604 if (newoff
+ size
> max
)
606 if (vc
->vc_cfgread
!= NULL
)
607 error
= (*vc
->vc_cfgread
)(DEV_SOFTC(vs
), newoff
, size
, &value
);
615 cr
= vi_find_cr(offset
);
616 if (cr
== NULL
|| cr
->cr_size
!= size
) {
618 /* offset must be OK, so size must be bad */
620 "%s: read from %s: bad size %d",
621 name
, cr
->cr_name
, size
);
624 "%s: read from bad offset/size %jd/%d",
625 name
, (uintmax_t)offset
, size
);
631 case VIRTIO_PCI_HOST_FEATURES
:
632 value
= vc
->vc_hv_caps
;
634 case VIRTIO_PCI_GUEST_FEATURES
:
635 value
= vs
->vs_negotiated_caps
;
637 case VIRTIO_PCI_QUEUE_PFN
:
638 if (vs
->vs_curq
< vc
->vc_nvq
)
639 value
= vs
->vs_queues
[vs
->vs_curq
].vq_pfn
;
641 case VIRTIO_PCI_QUEUE_NUM
:
642 value
= vs
->vs_curq
< vc
->vc_nvq
?
643 vs
->vs_queues
[vs
->vs_curq
].vq_qsize
: 0;
645 case VIRTIO_PCI_QUEUE_SEL
:
648 case VIRTIO_PCI_QUEUE_NOTIFY
:
651 case VIRTIO_PCI_STATUS
:
652 value
= vs
->vs_status
;
656 vs
->vs_isr
= 0; /* a read clears this flag */
658 pci_lintr_deassert(pi
);
660 case VIRTIO_MSI_CONFIG_VECTOR
:
661 value
= vs
->vs_msix_cfg_idx
;
663 case VIRTIO_MSI_QUEUE_VECTOR
:
664 value
= vs
->vs_curq
< vc
->vc_nvq
?
665 vs
->vs_queues
[vs
->vs_curq
].vq_msix_idx
:
666 VIRTIO_MSI_NO_VECTOR
;
671 pthread_mutex_unlock(vs
->vs_mtx
);
676 * Handle pci config space writes.
677 * If it's to the MSI-X info, do that.
678 * If it's part of the virtio standard stuff, do that.
679 * Otherwise dispatch to the actual driver.
682 vi_pci_write(struct vmctx
*ctx __unused
,
683 struct pci_devinst
*pi
, int baridx
, uint64_t offset
, int size
,
686 struct virtio_softc
*vs
= pi
->pi_arg
;
687 struct vqueue_info
*vq
;
688 struct virtio_consts
*vc
;
689 struct config_reg
*cr
;
690 uint64_t virtio_config_size
, max
;
695 if (vs
->vs_flags
& VIRTIO_USE_MSIX
) {
696 if (baridx
== pci_msix_table_bar(pi
) ||
697 baridx
== pci_msix_pba_bar(pi
)) {
698 pci_emul_msix_twrite(pi
, offset
, size
, value
);
703 /* XXX probably should do something better than just assert() */
707 pthread_mutex_lock(vs
->vs_mtx
);
712 if (size
!= 1 && size
!= 2 && size
!= 4)
715 virtio_config_size
= VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi
));
717 if (offset
>= virtio_config_size
) {
719 * Subtract off the standard size (including MSI-X
720 * registers if enabled) and dispatch to underlying driver.
722 newoff
= offset
- virtio_config_size
;
723 max
= vc
->vc_cfgsize
? vc
->vc_cfgsize
: 0x100000000;
724 if (newoff
+ size
> max
)
726 if (vc
->vc_cfgwrite
!= NULL
)
727 error
= (*vc
->vc_cfgwrite
)(DEV_SOFTC(vs
), newoff
, size
, value
);
735 cr
= vi_find_cr(offset
);
736 if (cr
== NULL
|| cr
->cr_size
!= size
|| cr
->cr_ro
) {
738 /* offset must be OK, wrong size and/or reg is R/O */
739 if (cr
->cr_size
!= size
)
741 "%s: write to %s: bad size %d",
742 name
, cr
->cr_name
, size
);
745 "%s: write to read-only reg %s",
749 "%s: write to bad offset/size %jd/%d",
750 name
, (uintmax_t)offset
, size
);
756 case VIRTIO_PCI_GUEST_FEATURES
:
757 vs
->vs_negotiated_caps
= value
& vc
->vc_hv_caps
;
758 if (vc
->vc_apply_features
)
759 (*vc
->vc_apply_features
)(DEV_SOFTC(vs
),
760 vs
->vs_negotiated_caps
);
762 case VIRTIO_PCI_QUEUE_PFN
:
763 if (vs
->vs_curq
>= vc
->vc_nvq
)
765 vi_vq_init(vs
, value
);
767 case VIRTIO_PCI_QUEUE_SEL
:
769 * Note that the guest is allowed to select an
770 * invalid queue; we just need to return a QNUM
771 * of 0 while the bad queue is selected.
775 case VIRTIO_PCI_QUEUE_NOTIFY
:
776 if (value
>= (unsigned int)vc
->vc_nvq
) {
777 EPRINTLN("%s: queue %d notify out of range",
781 vq
= &vs
->vs_queues
[value
];
783 (*vq
->vq_notify
)(DEV_SOFTC(vs
), vq
);
784 else if (vc
->vc_qnotify
)
785 (*vc
->vc_qnotify
)(DEV_SOFTC(vs
), vq
);
788 "%s: qnotify queue %d: missing vq/vc notify",
791 case VIRTIO_PCI_STATUS
:
792 vs
->vs_status
= value
;
794 (*vc
->vc_reset
)(DEV_SOFTC(vs
));
796 case VIRTIO_MSI_CONFIG_VECTOR
:
797 vs
->vs_msix_cfg_idx
= value
;
799 case VIRTIO_MSI_QUEUE_VECTOR
:
800 if (vs
->vs_curq
>= vc
->vc_nvq
)
802 vq
= &vs
->vs_queues
[vs
->vs_curq
];
803 vq
->vq_msix_idx
= value
;
810 "%s: write config reg %s: curq %d >= max %d",
811 name
, cr
->cr_name
, vs
->vs_curq
, vc
->vc_nvq
);
814 pthread_mutex_unlock(vs
->vs_mtx
);