2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/virtio/pci/virtio_pci.c,v 1.3 2012/04/14 05:48:04 grehan Exp $
29 /* Driver for the VirtIO PCI interface. */
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/malloc.h>
37 #include <sys/serialize.h>
39 #include <bus/pci/pcivar.h>
40 #include <bus/pci/pcireg.h>
44 #include <dev/virtual/virtio/virtio/virtio.h>
45 #include <dev/virtual/virtio/virtio/virtqueue.h>
46 #include "virtio_pci.h"
47 #include "virtio_bus_if.h"
52 driver_intr_t
*handler
;
54 TAILQ_ENTRY(vqentry
) entries
;
57 TAILQ_HEAD(vqirq_list
, vqentry
);
61 struct resource
*vtpci_res
;
62 struct resource
*vtpci_msix_res
;
63 uint64_t vtpci_features
;
65 #define VIRTIO_PCI_FLAG_MSI 0x0001
66 #define VIRTIO_PCI_FLAG_MSIX 0x0010
68 device_t vtpci_child_dev
;
69 struct virtio_feature_desc
*vtpci_child_feat_desc
;
72 * Ideally, each virtqueue that the driver provides a callback for
73 * will receive its own MSIX vector. If there are not sufficient
74 * vectors available, we will then attempt to have all the VQs
75 * share one vector. Note that when using MSIX, the configuration
76 * changed notifications must be on their own vector.
78 * If MSIX is not available, we will attempt to have the whole
79 * device share one MSI vector, and then, finally, one legacy
83 struct vtpci_virtqueue
{
86 /* Index into vtpci_intr_res[] below. -1 if no IRQ assigned. */
88 } vtpci_vqx
[VIRTIO_MAX_VIRTQUEUES
];
91 * When using MSIX interrupts, the first element of vtpci_intr_res[]
92 * is always the configuration changed notifications. The remaining
93 * element(s) are used for the virtqueues.
95 * With MSI and legacy interrupts, only the first element of
96 * vtpci_intr_res[] is used.
100 struct vtpci_intr_resource
{
101 struct vtpci_softc
*ires_sc
;
102 struct resource
*irq
;
105 struct vqirq_list ls
;
106 } vtpci_intr_res
[1 + VIRTIO_MAX_VIRTQUEUES
];
108 int vtpci_config_irq
;
111 static int vtpci_probe(device_t
);
112 static int vtpci_attach(device_t
);
113 static int vtpci_detach(device_t
);
114 static int vtpci_suspend(device_t
);
115 static int vtpci_resume(device_t
);
116 static int vtpci_shutdown(device_t
);
117 static void vtpci_driver_added(device_t
, driver_t
*);
118 static void vtpci_child_detached(device_t
, device_t
);
119 static int vtpci_read_ivar(device_t
, device_t
, int, uintptr_t *);
120 static int vtpci_write_ivar(device_t
, device_t
, int, uintptr_t);
122 static uint64_t vtpci_negotiate_features(device_t
, uint64_t);
123 static int vtpci_with_feature(device_t
, uint64_t);
124 static int vtpci_intr_count(device_t dev
);
125 static int vtpci_intr_alloc(device_t dev
, int *cnt
, int use_config
,
127 static int vtpci_intr_release(device_t dev
);
128 static int vtpci_alloc_virtqueues(device_t
, int, struct vq_alloc_info
*);
129 static int vtpci_setup_intr(device_t
, uint irq
, lwkt_serialize_t
);
130 static int vtpci_teardown_intr(device_t
, uint irq
);
131 static int vtpci_bind_intr(device_t
, uint
, int, driver_intr_t
, void *);
132 static int vtpci_unbind_intr(device_t
, int);
133 static void vtpci_stop(device_t
);
134 static int vtpci_reinit(device_t
, uint64_t);
135 static void vtpci_reinit_complete(device_t
);
136 static void vtpci_notify_virtqueue(device_t
, uint16_t);
137 static uint8_t vtpci_get_status(device_t
);
138 static void vtpci_set_status(device_t
, uint8_t);
139 static void vtpci_read_dev_config(device_t
, bus_size_t
, void *, int);
140 static void vtpci_write_dev_config(device_t
, bus_size_t
, void *, int);
142 static void vtpci_describe_features(struct vtpci_softc
*, const char *,
144 static void vtpci_probe_and_attach_child(struct vtpci_softc
*);
146 static int vtpci_register_msix_vector(struct vtpci_softc
*, int, int);
148 static void vtpci_free_interrupts(struct vtpci_softc
*);
149 static void vtpci_free_virtqueues(struct vtpci_softc
*);
150 static void vtpci_release_child_resources(struct vtpci_softc
*);
151 static void vtpci_reset(struct vtpci_softc
*);
153 static void vtpci_legacy_intr(void *);
154 static void vtpci_msix_intr(void *);
157 * I/O port read/write wrappers.
159 #define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o))
160 #define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o))
161 #define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o))
162 #define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v))
163 #define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v))
164 #define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v))
167 static int vtpci_disable_msix
= 0;
168 TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix
);
170 static device_method_t vtpci_methods
[] = {
171 /* Device interface. */
172 DEVMETHOD(device_probe
, vtpci_probe
),
173 DEVMETHOD(device_attach
, vtpci_attach
),
174 DEVMETHOD(device_detach
, vtpci_detach
),
175 DEVMETHOD(device_suspend
, vtpci_suspend
),
176 DEVMETHOD(device_resume
, vtpci_resume
),
177 DEVMETHOD(device_shutdown
, vtpci_shutdown
),
180 DEVMETHOD(bus_driver_added
, vtpci_driver_added
),
181 DEVMETHOD(bus_child_detached
, vtpci_child_detached
),
182 DEVMETHOD(bus_read_ivar
, vtpci_read_ivar
),
183 DEVMETHOD(bus_write_ivar
, vtpci_write_ivar
),
185 /* VirtIO bus interface. */
186 DEVMETHOD(virtio_bus_negotiate_features
, vtpci_negotiate_features
),
187 DEVMETHOD(virtio_bus_with_feature
, vtpci_with_feature
),
188 DEVMETHOD(virtio_bus_intr_count
, vtpci_intr_count
),
189 DEVMETHOD(virtio_bus_intr_alloc
, vtpci_intr_alloc
),
190 DEVMETHOD(virtio_bus_intr_release
, vtpci_intr_release
),
191 DEVMETHOD(virtio_bus_alloc_virtqueues
, vtpci_alloc_virtqueues
),
192 DEVMETHOD(virtio_bus_setup_intr
, vtpci_setup_intr
),
193 DEVMETHOD(virtio_bus_teardown_intr
, vtpci_teardown_intr
),
194 DEVMETHOD(virtio_bus_bind_intr
, vtpci_bind_intr
),
195 DEVMETHOD(virtio_bus_unbind_intr
, vtpci_unbind_intr
),
196 DEVMETHOD(virtio_bus_stop
, vtpci_stop
),
197 DEVMETHOD(virtio_bus_reinit
, vtpci_reinit
),
198 DEVMETHOD(virtio_bus_reinit_complete
, vtpci_reinit_complete
),
199 DEVMETHOD(virtio_bus_notify_vq
, vtpci_notify_virtqueue
),
200 DEVMETHOD(virtio_bus_read_device_config
, vtpci_read_dev_config
),
201 DEVMETHOD(virtio_bus_write_device_config
, vtpci_write_dev_config
),
206 static driver_t vtpci_driver
= {
209 sizeof(struct vtpci_softc
)
212 devclass_t vtpci_devclass
;
214 DRIVER_MODULE(virtio_pci
, pci
, vtpci_driver
, vtpci_devclass
, NULL
, NULL
);
215 MODULE_VERSION(virtio_pci
, 1);
216 MODULE_DEPEND(virtio_pci
, pci
, 1, 1, 1);
217 MODULE_DEPEND(virtio_pci
, virtio
, 1, 1, 1);
220 vtpci_probe(device_t dev
)
225 if (pci_get_vendor(dev
) != VIRTIO_PCI_VENDORID
)
228 if (pci_get_device(dev
) < VIRTIO_PCI_DEVICEID_MIN
||
229 pci_get_device(dev
) > VIRTIO_PCI_DEVICEID_MAX
)
232 if (pci_get_revid(dev
) != VIRTIO_PCI_ABI_VERSION
)
235 name
= virtio_device_name(pci_get_subdevice(dev
));
239 ksnprintf(desc
, sizeof(desc
), "VirtIO PCI %s adapter", name
);
240 device_set_desc_copy(dev
, desc
);
242 return (BUS_PROBE_DEFAULT
);
246 vtpci_attach(device_t dev
)
248 struct vtpci_softc
*sc
;
252 sc
= device_get_softc(dev
);
254 sc
->vtpci_config_irq
= -1;
256 pci_enable_busmaster(dev
);
259 sc
->vtpci_res
= bus_alloc_resource_any(dev
, SYS_RES_IOPORT
, &rid
,
261 if (sc
->vtpci_res
== NULL
) {
262 device_printf(dev
, "cannot map I/O space\n");
266 if (pci_find_extcap(dev
, PCIY_MSIX
, &msix_cap
) == 0) {
268 val
= pci_read_config(dev
, msix_cap
+ PCIR_MSIX_TABLE
, 4);
269 rid
= PCIR_BAR(val
& PCIM_MSIX_BIR_MASK
);
270 sc
->vtpci_msix_res
= bus_alloc_resource_any(dev
,
271 SYS_RES_MEMORY
, &rid
, RF_ACTIVE
);
276 /* Tell the host we've noticed this device. */
277 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_ACK
);
279 if ((child
= device_add_child(dev
, NULL
, -1)) == NULL
) {
280 device_printf(dev
, "cannot create child device\n");
281 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_FAILED
);
286 sc
->vtpci_child_dev
= child
;
287 vtpci_probe_and_attach_child(sc
);
293 vtpci_detach(device_t dev
)
295 struct vtpci_softc
*sc
;
299 sc
= device_get_softc(dev
);
301 if ((child
= sc
->vtpci_child_dev
) != NULL
) {
302 error
= device_delete_child(dev
, child
);
305 sc
->vtpci_child_dev
= NULL
;
310 if (sc
->vtpci_msix_res
!= NULL
) {
311 bus_release_resource(dev
, SYS_RES_MEMORY
,
312 rman_get_rid(sc
->vtpci_msix_res
), sc
->vtpci_msix_res
);
313 sc
->vtpci_msix_res
= NULL
;
316 if (sc
->vtpci_res
!= NULL
) {
317 bus_release_resource(dev
, SYS_RES_IOPORT
, PCIR_BAR(0),
319 sc
->vtpci_res
= NULL
;
326 vtpci_suspend(device_t dev
)
329 return (bus_generic_suspend(dev
));
333 vtpci_resume(device_t dev
)
336 return (bus_generic_resume(dev
));
340 vtpci_shutdown(device_t dev
)
343 (void) bus_generic_shutdown(dev
);
344 /* Forcibly stop the host device. */
351 vtpci_driver_added(device_t dev
, driver_t
*driver
)
353 struct vtpci_softc
*sc
;
355 sc
= device_get_softc(dev
);
357 vtpci_probe_and_attach_child(sc
);
361 vtpci_child_detached(device_t dev
, device_t child
)
363 struct vtpci_softc
*sc
;
365 sc
= device_get_softc(dev
);
368 vtpci_release_child_resources(sc
);
372 vtpci_read_ivar(device_t dev
, device_t child
, int index
, uintptr_t *result
)
374 struct vtpci_softc
*sc
;
376 sc
= device_get_softc(dev
);
378 if (sc
->vtpci_child_dev
!= child
)
382 case VIRTIO_IVAR_DEVTYPE
:
383 *result
= pci_get_subdevice(dev
);
393 vtpci_write_ivar(device_t dev
, device_t child
, int index
, uintptr_t value
)
395 struct vtpci_softc
*sc
;
397 sc
= device_get_softc(dev
);
399 if (sc
->vtpci_child_dev
!= child
)
403 case VIRTIO_IVAR_FEATURE_DESC
:
404 sc
->vtpci_child_feat_desc
= (void *) value
;
414 vtpci_negotiate_features(device_t dev
, uint64_t child_features
)
416 struct vtpci_softc
*sc
;
417 uint64_t host_features
, features
;
419 sc
= device_get_softc(dev
);
421 host_features
= vtpci_read_config_4(sc
, VIRTIO_PCI_HOST_FEATURES
);
422 vtpci_describe_features(sc
, "host", host_features
);
425 * Limit negotiated features to what the driver, virtqueue, and
428 features
= host_features
& child_features
;
429 features
= virtqueue_filter_features(features
);
430 sc
->vtpci_features
= features
;
432 vtpci_describe_features(sc
, "negotiated", features
);
433 vtpci_write_config_4(sc
, VIRTIO_PCI_GUEST_FEATURES
, features
);
439 vtpci_with_feature(device_t dev
, uint64_t feature
)
441 struct vtpci_softc
*sc
;
443 sc
= device_get_softc(dev
);
445 return ((sc
->vtpci_features
& feature
) != 0);
449 vtpci_intr_count(device_t dev
)
451 struct vtpci_softc
*sc
= device_get_softc(dev
);
453 if (vtpci_disable_msix
!= 0 || sc
->vtpci_msix_res
== NULL
)
456 return pci_msix_count(dev
);
459 /* Will never return 0, with *cnt <= 0. */
461 vtpci_intr_alloc(device_t dev
, int *cnt
, int use_config
, int *cpus
)
463 struct vtpci_softc
*sc
= device_get_softc(dev
);
466 if (sc
->vtpci_nintr_res
> 0)
472 if (vtpci_disable_msix
== 0 && sc
->vtpci_msix_res
!= NULL
) {
473 int nmsix
= pci_msix_count(dev
);
478 if ((*cnt
> 1 || use_config
== 0) &&
479 vtpci_disable_msix
== 0 && sc
->vtpci_msix_res
!= NULL
) {
480 if (pci_setup_msix(dev
) != 0) {
481 device_printf(dev
, "pci_setup_msix failed\n");
482 /* Just fallthrough to legacy IRQ code instead. */
484 for (i
= 0; i
< *cnt
; i
++) {
487 if (cpus
!= NULL
&& cpus
[i
] >= 0 &&
491 cpu
= device_get_unit(dev
) + i
;
494 if (pci_alloc_msix_vector(dev
, i
, &rid
, cpu
)
496 if (i
> 1 || (i
== 1 && !use_config
)) {
498 /* Got some MSI-X vectors. */
499 sc
->vtpci_irq_flags
= RF_ACTIVE
;
501 VIRTIO_PCI_FLAG_MSIX
;
505 * Allocate the legacy IRQ instead.
508 pci_release_msix_vector(dev
, 0);
510 pci_teardown_msix(dev
);
513 sc
->vtpci_intr_res
[i
].rid
= rid
;
515 /* Got all the MSI-X vectors we wanted. */
516 sc
->vtpci_irq_flags
= RF_ACTIVE
;
517 sc
->vtpci_flags
|= VIRTIO_PCI_FLAG_MSIX
;
518 /* Successfully allocated all MSI-X vectors */
523 /* Legacy IRQ code: */
526 * Use MSI interrupts if available. Otherwise, we fallback
527 * to legacy interrupts.
529 sc
->vtpci_intr_res
[0].rid
= 0;
530 if (pci_alloc_1intr(sc
->vtpci_dev
, 1,
531 &sc
->vtpci_intr_res
[0].rid
,
532 &sc
->vtpci_irq_flags
) == PCI_INTR_TYPE_MSI
) {
533 sc
->vtpci_flags
|= VIRTIO_PCI_FLAG_MSI
;
537 KKASSERT(!((sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSI
) != 0 &&
538 (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) != 0));
540 sc
->vtpci_nintr_res
= *cnt
;
541 for (i
= 0; i
< sc
->vtpci_nintr_res
; i
++) {
542 struct resource
*irq
;
544 TAILQ_INIT(&sc
->vtpci_intr_res
[i
].ls
);
545 sc
->vtpci_intr_res
[i
].ires_sc
= sc
;
546 irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
547 &sc
->vtpci_intr_res
[i
].rid
, sc
->vtpci_irq_flags
);
551 cpus
[i
] = rman_get_cpuid(irq
);
553 sc
->vtpci_intr_res
[i
].irq
= irq
;
556 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
557 device_printf(dev
, "using %d MSI-X vectors\n", *cnt
);
558 pci_enable_msix(dev
);
565 vtpci_intr_release(device_t dev
)
567 struct vtpci_softc
*sc
= device_get_softc(dev
);
568 struct vtpci_intr_resource
*ires
;
571 if (sc
->vtpci_nintr_res
== 0)
574 /* XXX Make sure none of the interrupts is used at the moment. */
576 for (i
= 0; i
< sc
->vtpci_nintr_res
; i
++) {
577 ires
= &sc
->vtpci_intr_res
[i
];
579 KKASSERT(TAILQ_EMPTY(&ires
->ls
));
580 if (ires
->irq
!= NULL
) {
581 bus_release_resource(dev
, SYS_RES_IRQ
, ires
->rid
,
585 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
)
586 pci_release_msix_vector(dev
, ires
->rid
);
589 sc
->vtpci_nintr_res
= 0;
590 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSI
) {
591 pci_release_msi(dev
);
592 sc
->vtpci_flags
&= ~VIRTIO_PCI_FLAG_MSI
;
594 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
595 pci_teardown_msix(dev
);
596 sc
->vtpci_flags
&= ~VIRTIO_PCI_FLAG_MSIX
;
602 vtpci_alloc_virtqueues(device_t dev
, int nvqs
, struct vq_alloc_info
*vq_info
)
604 struct vtpci_softc
*sc
;
605 struct vtpci_virtqueue
*vqx
;
606 struct vq_alloc_info
*info
;
610 sc
= device_get_softc(dev
);
612 if (sc
->vtpci_nvqs
!= 0 || nvqs
<= 0 ||
613 nvqs
> VIRTIO_MAX_VIRTQUEUES
)
616 for (queue
= 0; queue
< nvqs
; queue
++) {
617 vqx
= &sc
->vtpci_vqx
[queue
];
618 info
= &vq_info
[queue
];
621 vtpci_write_config_2(sc
, VIRTIO_PCI_QUEUE_SEL
, queue
);
623 vq_size
= vtpci_read_config_2(sc
, VIRTIO_PCI_QUEUE_NUM
);
624 error
= virtqueue_alloc(dev
, queue
, vq_size
,
625 VIRTIO_PCI_VRING_ALIGN
, 0xFFFFFFFFUL
, info
, &vqx
->vq
);
629 vtpci_write_config_4(sc
, VIRTIO_PCI_QUEUE_PFN
,
630 virtqueue_paddr(vqx
->vq
) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
);
632 *info
->vqai_vq
= vqx
->vq
;
639 /* XXX Add argument to specify the callback function here. */
641 vtpci_setup_intr(device_t dev
, uint irq
, lwkt_serialize_t slz
)
643 struct vtpci_softc
*sc
;
644 struct vtpci_intr_resource
*ires
;
647 sc
= device_get_softc(dev
);
650 if ((int)irq
>= sc
->vtpci_nintr_res
)
652 ires
= &sc
->vtpci_intr_res
[irq
];
654 if ((sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) == 0) {
655 error
= bus_setup_intr(dev
, ires
->irq
, flags
,
657 ires
, &ires
->intrhand
, slz
);
659 error
= bus_setup_intr(dev
, ires
->irq
, flags
,
661 ires
, &ires
->intrhand
, slz
);
667 vtpci_teardown_intr(device_t dev
, uint irq
)
669 struct vtpci_softc
*sc
= device_get_softc(dev
);
670 struct vtpci_intr_resource
*ires
;
672 if ((int)irq
>= sc
->vtpci_nintr_res
)
675 ires
= &sc
->vtpci_intr_res
[irq
];
677 if (ires
->intrhand
== NULL
)
680 bus_teardown_intr(dev
, ires
->irq
, ires
->intrhand
);
681 ires
->intrhand
= NULL
;
686 vtpci_add_irqentry(struct vtpci_intr_resource
*intr_res
, int what
,
687 driver_intr_t handler
, void *arg
)
691 TAILQ_FOREACH(e
, &intr_res
->ls
, entries
) {
695 e
= kmalloc(sizeof(*e
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
700 e
->vq
= intr_res
->ires_sc
->vtpci_vqx
[e
->what
].vq
;
702 e
->handler
= handler
;
704 TAILQ_INSERT_TAIL(&intr_res
->ls
, e
, entries
);
708 vtpci_del_irqentry(struct vtpci_intr_resource
*intr_res
, int what
)
712 TAILQ_FOREACH(e
, &intr_res
->ls
, entries
) {
717 TAILQ_REMOVE(&intr_res
->ls
, e
, entries
);
723 * Config intr can be bound after intr_alloc, virtqueue intrs can be bound
724 * after intr_alloc and alloc_virtqueues.
727 vtpci_bind_intr(device_t dev
, uint irq
, int what
,
728 driver_intr_t handler
, void *arg
)
730 struct vtpci_softc
*sc
= device_get_softc(dev
);
731 struct vtpci_virtqueue
*vqx
;
734 if (irq
>= sc
->vtpci_nintr_res
)
738 if (sc
->vtpci_config_irq
!= -1)
741 sc
->vtpci_config_irq
= irq
;
742 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
743 error
= vtpci_register_msix_vector(sc
,
744 VIRTIO_MSI_CONFIG_VECTOR
, irq
);
751 if (sc
->vtpci_nvqs
<= what
|| what
< 0)
754 vqx
= &sc
->vtpci_vqx
[what
];
755 if (vqx
->ires_idx
!= -1)
759 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
760 vtpci_write_config_2(sc
, VIRTIO_PCI_QUEUE_SEL
, what
);
761 error
= vtpci_register_msix_vector(sc
, VIRTIO_MSI_QUEUE_VECTOR
,
767 vtpci_add_irqentry(&sc
->vtpci_intr_res
[irq
], what
, handler
, arg
);
772 vtpci_unbind_intr(device_t dev
, int what
)
774 struct vtpci_softc
*sc
= device_get_softc(dev
);
775 struct vtpci_virtqueue
*vqx
;
779 if (sc
->vtpci_config_irq
== -1)
782 irq
= sc
->vtpci_config_irq
;
783 sc
->vtpci_config_irq
= -1;
784 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
785 vtpci_register_msix_vector(sc
,
786 VIRTIO_MSI_CONFIG_VECTOR
, -1);
791 if (sc
->vtpci_nvqs
<= what
|| what
< 0)
794 vqx
= &sc
->vtpci_vqx
[what
];
795 if (vqx
->ires_idx
== -1)
800 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
801 vtpci_write_config_2(sc
, VIRTIO_PCI_QUEUE_SEL
, what
);
802 vtpci_register_msix_vector(sc
, VIRTIO_MSI_QUEUE_VECTOR
, -1);
805 KKASSERT(irq
>= 0 && irq
< sc
->vtpci_nintr_res
);
806 vtpci_del_irqentry(&sc
->vtpci_intr_res
[irq
], what
);
811 vtpci_stop(device_t dev
)
813 vtpci_reset(device_get_softc(dev
));
817 vtpci_reinit(device_t dev
, uint64_t features
)
819 struct vtpci_softc
*sc
;
820 struct vtpci_virtqueue
*vqx
;
821 struct virtqueue
*vq
;
825 sc
= device_get_softc(dev
);
828 * Redrive the device initialization. This is a bit of an abuse
829 * of the specification, but both VirtualBox and QEMU/KVM seem
830 * to play nice. We do not allow the host device to change from
831 * what was originally negotiated beyond what the guest driver
832 * changed (MSIX state should not change, number of virtqueues
833 * and their size remain the same, etc).
836 if (vtpci_get_status(dev
) != VIRTIO_CONFIG_STATUS_RESET
)
840 * Quickly drive the status through ACK and DRIVER. The device
841 * does not become usable again until vtpci_reinit_complete().
843 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_ACK
);
844 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_DRIVER
);
846 vtpci_negotiate_features(dev
, features
);
848 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
849 pci_enable_msix(dev
);
850 error
= vtpci_register_msix_vector(sc
,
851 VIRTIO_MSI_CONFIG_VECTOR
, 0);
856 for (queue
= 0; queue
< sc
->vtpci_nvqs
; queue
++) {
857 vqx
= &sc
->vtpci_vqx
[queue
];
860 KASSERT(vq
!= NULL
, ("vq %d not allocated", queue
));
861 vtpci_write_config_2(sc
, VIRTIO_PCI_QUEUE_SEL
, queue
);
863 vq_size
= vtpci_read_config_2(sc
, VIRTIO_PCI_QUEUE_NUM
);
864 error
= virtqueue_reinit(vq
, vq_size
);
868 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
869 error
= vtpci_register_msix_vector(sc
,
870 VIRTIO_MSI_QUEUE_VECTOR
, vqx
->ires_idx
);
875 vtpci_write_config_4(sc
, VIRTIO_PCI_QUEUE_PFN
,
876 virtqueue_paddr(vqx
->vq
) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
);
883 vtpci_reinit_complete(device_t dev
)
886 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_DRIVER_OK
);
890 vtpci_notify_virtqueue(device_t dev
, uint16_t queue
)
892 struct vtpci_softc
*sc
;
894 sc
= device_get_softc(dev
);
896 vtpci_write_config_2(sc
, VIRTIO_PCI_QUEUE_NOTIFY
, queue
);
900 vtpci_get_status(device_t dev
)
902 struct vtpci_softc
*sc
;
904 sc
= device_get_softc(dev
);
906 return (vtpci_read_config_1(sc
, VIRTIO_PCI_STATUS
));
910 vtpci_set_status(device_t dev
, uint8_t status
)
912 struct vtpci_softc
*sc
;
914 sc
= device_get_softc(dev
);
916 if (status
!= VIRTIO_CONFIG_STATUS_RESET
)
917 status
|= vtpci_get_status(dev
);
919 vtpci_write_config_1(sc
, VIRTIO_PCI_STATUS
, status
);
923 vtpci_read_dev_config(device_t dev
, bus_size_t offset
,
924 void *dst
, int length
)
926 struct vtpci_softc
*sc
;
931 sc
= device_get_softc(dev
);
932 off
= VIRTIO_PCI_CONFIG(sc
) + offset
;
934 for (d
= dst
; length
> 0; d
+= size
, off
+= size
, length
-= size
) {
937 *(uint32_t *)d
= vtpci_read_config_4(sc
, off
);
938 } else if (length
>= 2) {
940 *(uint16_t *)d
= vtpci_read_config_2(sc
, off
);
943 *d
= vtpci_read_config_1(sc
, off
);
949 vtpci_write_dev_config(device_t dev
, bus_size_t offset
,
950 void *src
, int length
)
952 struct vtpci_softc
*sc
;
957 sc
= device_get_softc(dev
);
958 off
= VIRTIO_PCI_CONFIG(sc
) + offset
;
960 for (s
= src
; length
> 0; s
+= size
, off
+= size
, length
-= size
) {
963 vtpci_write_config_4(sc
, off
, *(uint32_t *)s
);
964 } else if (length
>= 2) {
966 vtpci_write_config_2(sc
, off
, *(uint16_t *)s
);
969 vtpci_write_config_1(sc
, off
, *s
);
975 vtpci_describe_features(struct vtpci_softc
*sc
, const char *msg
,
981 child
= sc
->vtpci_child_dev
;
983 if (device_is_attached(child
) && bootverbose
== 0)
986 virtio_describe(dev
, msg
, features
, sc
->vtpci_child_feat_desc
);
990 vtpci_probe_and_attach_child(struct vtpci_softc
*sc
)
996 child
= sc
->vtpci_child_dev
;
1001 if (device_get_state(child
) != DS_NOTPRESENT
)
1004 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_DRIVER
);
1005 error
= device_probe_and_attach(child
);
1006 if (error
!= 0 || device_get_state(child
) == DS_NOTPRESENT
) {
1007 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_FAILED
);
1009 vtpci_release_child_resources(sc
);
1011 /* Reset status for future attempt. */
1012 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_ACK
);
1014 vtpci_set_status(dev
, VIRTIO_CONFIG_STATUS_DRIVER_OK
);
1018 vtpci_register_msix_vector(struct vtpci_softc
*sc
, int offset
, int res_idx
)
1023 dev
= sc
->vtpci_dev
;
1025 if (offset
!= VIRTIO_MSI_CONFIG_VECTOR
&&
1026 offset
!= VIRTIO_MSI_QUEUE_VECTOR
)
1029 if (res_idx
!= -1) {
1030 /* Map from rid to host vector. */
1033 vector
= VIRTIO_MSI_NO_VECTOR
;
1036 vtpci_write_config_2(sc
, offset
, vector
);
1038 if (vtpci_read_config_2(sc
, offset
) != vector
) {
1039 device_printf(dev
, "insufficient host resources for "
1040 "MSIX interrupts\n");
1048 vtpci_free_interrupts(struct vtpci_softc
*sc
)
1050 device_t dev
= sc
->vtpci_dev
;
1051 struct vtpci_intr_resource
*ires
;
1054 for (i
= 0; i
< sc
->vtpci_nintr_res
; i
++) {
1055 ires
= &sc
->vtpci_intr_res
[i
];
1057 if (ires
->intrhand
!= NULL
) {
1058 bus_teardown_intr(dev
, ires
->irq
, ires
->intrhand
);
1059 ires
->intrhand
= NULL
;
1061 if (ires
->irq
!= NULL
) {
1062 bus_release_resource(dev
, SYS_RES_IRQ
, ires
->rid
,
1068 vtpci_unbind_intr(sc
->vtpci_dev
, -1);
1069 for (i
= 0; i
< sc
->vtpci_nvqs
; i
++)
1070 vtpci_unbind_intr(sc
->vtpci_dev
, i
);
1072 for (i
= 0; i
< sc
->vtpci_nintr_res
; i
++) {
1073 ires
= &sc
->vtpci_intr_res
[i
];
1075 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
)
1076 pci_release_msix_vector(dev
, ires
->rid
);
1079 sc
->vtpci_nintr_res
= 0;
1080 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSI
) {
1081 pci_release_msi(dev
);
1082 sc
->vtpci_flags
&= ~VIRTIO_PCI_FLAG_MSI
;
1084 if (sc
->vtpci_flags
& VIRTIO_PCI_FLAG_MSIX
) {
1085 pci_disable_msix(dev
);
1086 pci_teardown_msix(dev
);
1087 sc
->vtpci_flags
&= ~VIRTIO_PCI_FLAG_MSIX
;
1093 vtpci_free_virtqueues(struct vtpci_softc
*sc
)
1095 struct vtpci_virtqueue
*vqx
;
1100 for (i
= 0; i
< VIRTIO_MAX_VIRTQUEUES
; i
++) {
1101 vqx
= &sc
->vtpci_vqx
[i
];
1103 if (vqx
->vq
!= NULL
) {
1104 virtqueue_free(vqx
->vq
);
1111 vtpci_release_child_resources(struct vtpci_softc
*sc
)
1113 vtpci_free_interrupts(sc
);
1114 vtpci_free_virtqueues(sc
);
1118 vtpci_reset(struct vtpci_softc
*sc
)
1122 * Setting the status to RESET sets the host device to
1123 * the original, uninitialized state.
1125 vtpci_set_status(sc
->vtpci_dev
, VIRTIO_CONFIG_STATUS_RESET
);
1129 vtpci_legacy_intr(void *arg
)
1131 struct vtpci_intr_resource
*ires
;
1132 struct vtpci_softc
*sc
;
1139 /* Reading the ISR also clears it. */
1140 isr
= vtpci_read_config_1(sc
, VIRTIO_PCI_ISR
);
1142 TAILQ_FOREACH(e
, &ires
->ls
, entries
) {
1144 * The lwkt_serialize_handler_call API doesn't seem to fit
1145 * properly here. Instead move the virtqueue pending check
1146 * into the driver, who can then properly implement masking
1147 * of the handler itself.
1149 if (e
->what
== -1) {
1150 if (isr
& VIRTIO_PCI_ISR_CONFIG
)
1152 } else if (isr
& VIRTIO_PCI_ISR_INTR
) {
1159 vtpci_msix_intr(void *arg
)
1161 struct vtpci_intr_resource
*ires
;
1162 struct vtpci_softc
*sc
;
1167 TAILQ_FOREACH(e
, &ires
->ls
, entries
) {
1169 * The lwkt_serialize_handler_call API doesn't seem to fit
1170 * properly here. Instead move the virtqueue pending check
1171 * into the driver, who can then properly implement masking
1172 * of the handler itself.