Merge commit '15aeb4d1148772724cf568e1f7a13fbb99f11ab8'
[unleashed.git] / kernel / drivers / virtio / virtio.c
blob19a66b8f3813d53031b7675a8ca400da475f837d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
24 * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 * Copyright 2017 Joyent, Inc.
29 /* Based on the NetBSD virtio driver by Minoura Makoto. */
31 * Copyright (c) 2010 Minoura Makoto.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <sys/conf.h>
57 #include <sys/kmem.h>
58 #include <sys/debug.h>
59 #include <sys/modctl.h>
60 #include <sys/autoconf.h>
61 #include <sys/ddi_impldefs.h>
62 #include <sys/ddi.h>
63 #include <sys/sunddi.h>
64 #include <sys/sunndi.h>
65 #include <sys/avintr.h>
66 #include <sys/spl.h>
67 #include <sys/promif.h>
68 #include <sys/list.h>
69 #include <sys/bootconf.h>
70 #include <sys/bootsvcs.h>
71 #include <sys/sysmacros.h>
72 #include <sys/pci.h>
74 #include "virtiovar.h"
75 #include "virtioreg.h"
77 #define NDEVNAMES (sizeof (virtio_device_name) / sizeof (char *))
78 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
79 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
80 ~(VIRTIO_PAGE_SIZE-1))
82 void
83 virtio_set_status(struct virtio_softc *sc, unsigned int status)
85 int old = 0;
87 if (status != 0) {
88 old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
89 VIRTIO_CONFIG_DEVICE_STATUS));
92 ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
93 VIRTIO_CONFIG_DEVICE_STATUS), status | old);
97 * Negotiate features, save the result in sc->sc_features
99 uint32_t
100 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
102 uint32_t host_features;
103 uint32_t features;
105 host_features = ddi_get32(sc->sc_ioh,
106 /* LINTED E_BAD_PTR_CAST_ALIGN */
107 (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
109 dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
110 host_features, guest_features);
112 features = host_features & guest_features;
113 ddi_put32(sc->sc_ioh,
114 /* LINTED E_BAD_PTR_CAST_ALIGN */
115 (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
116 features);
118 sc->sc_features = features;
120 return (host_features);
123 size_t
124 virtio_show_features(uint32_t features, char *buf, size_t len)
126 char *orig_buf = buf;
127 char *bufend = buf + len;
129 /* LINTED E_PTRDIFF_OVERFLOW */
130 buf += snprintf(buf, bufend - buf, "Generic ( ");
131 if (features & VIRTIO_F_RING_INDIRECT_DESC)
132 /* LINTED E_PTRDIFF_OVERFLOW */
133 buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
135 /* LINTED E_PTRDIFF_OVERFLOW */
136 buf += snprintf(buf, bufend - buf, ") ");
138 /* LINTED E_PTRDIFF_OVERFLOW */
139 return (buf - orig_buf);
142 boolean_t
143 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
145 return (sc->sc_features & feature);
149 * Device configuration registers.
151 uint8_t
152 virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
154 ASSERT(sc->sc_config_offset);
155 return ddi_get8(sc->sc_ioh,
156 (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
159 uint16_t
160 virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
162 ASSERT(sc->sc_config_offset);
163 return ddi_get16(sc->sc_ioh,
164 /* LINTED E_BAD_PTR_CAST_ALIGN */
165 (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
168 uint32_t
169 virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
171 ASSERT(sc->sc_config_offset);
172 return ddi_get32(sc->sc_ioh,
173 /* LINTED E_BAD_PTR_CAST_ALIGN */
174 (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
177 uint64_t
178 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
180 uint64_t r;
182 ASSERT(sc->sc_config_offset);
183 r = ddi_get32(sc->sc_ioh,
184 /* LINTED E_BAD_PTR_CAST_ALIGN */
185 (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
186 index + sizeof (uint32_t)));
188 r <<= 32;
190 r += ddi_get32(sc->sc_ioh,
191 /* LINTED E_BAD_PTR_CAST_ALIGN */
192 (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
193 return (r);
196 void
197 virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
198 uint8_t value)
200 ASSERT(sc->sc_config_offset);
201 ddi_put8(sc->sc_ioh,
202 (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
205 void
206 virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
207 uint16_t value)
209 ASSERT(sc->sc_config_offset);
210 ddi_put16(sc->sc_ioh,
211 /* LINTED E_BAD_PTR_CAST_ALIGN */
212 (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
215 void
216 virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
217 uint32_t value)
219 ASSERT(sc->sc_config_offset);
220 ddi_put32(sc->sc_ioh,
221 /* LINTED E_BAD_PTR_CAST_ALIGN */
222 (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
225 void
226 virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
227 uint64_t value)
229 ASSERT(sc->sc_config_offset);
230 ddi_put32(sc->sc_ioh,
231 /* LINTED E_BAD_PTR_CAST_ALIGN */
232 (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
233 value & 0xFFFFFFFF);
234 ddi_put32(sc->sc_ioh,
235 /* LINTED E_BAD_PTR_CAST_ALIGN */
236 (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
237 index + sizeof (uint32_t)), value >> 32);
241 * Start/stop vq interrupt. No guarantee.
243 void
244 virtio_stop_vq_intr(struct virtqueue *vq)
246 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
249 void
250 virtio_start_vq_intr(struct virtqueue *vq)
252 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
255 static ddi_dma_attr_t virtio_vq_dma_attr = {
256 DMA_ATTR_V0, /* Version number */
257 0, /* low address */
258 0x00000FFFFFFFFFFF, /* high address. Has to fit into 32 bits */
259 /* after page-shifting */
260 0xFFFFFFFF, /* counter register max */
261 VIRTIO_PAGE_SIZE, /* page alignment required */
262 0x3F, /* burst sizes: 1 - 32 */
263 0x1, /* minimum transfer size */
264 0xFFFFFFFF, /* max transfer size */
265 0xFFFFFFFF, /* address register max */
266 1, /* no scatter-gather */
267 1, /* device operates on bytes */
268 0, /* attr flag: set to 0 */
271 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
272 DMA_ATTR_V0, /* Version number */
273 0, /* low address */
274 0xFFFFFFFFFFFFFFFF, /* high address */
275 0xFFFFFFFF, /* counter register max */
276 1, /* No specific alignment */
277 0x3F, /* burst sizes: 1 - 32 */
278 0x1, /* minimum transfer size */
279 0xFFFFFFFF, /* max transfer size */
280 0xFFFFFFFF, /* address register max */
281 1, /* no scatter-gather */
282 1, /* device operates on bytes */
283 0, /* attr flag: set to 0 */
286 /* Same for direct and indirect descriptors. */
287 static ddi_device_acc_attr_t virtio_vq_devattr = {
288 DDI_DEVICE_ATTR_V0,
289 DDI_NEVERSWAP_ACC,
290 DDI_STORECACHING_OK_ACC,
291 DDI_DEFAULT_ACC
294 static void
295 virtio_free_indirect(struct vq_entry *entry)
298 (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
299 ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
300 ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
302 entry->qe_indirect_descs = NULL;
306 static int
307 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
309 int allocsize, num;
310 size_t len;
311 unsigned int ncookies;
312 int ret;
314 num = entry->qe_queue->vq_indirect_num;
315 ASSERT(num > 1);
317 allocsize = sizeof (struct vring_desc) * num;
319 ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
320 DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
321 if (ret != DDI_SUCCESS) {
322 dev_err(sc->sc_dev, CE_WARN,
323 "Failed to allocate dma handle for indirect descriptors, "
324 "entry %d, vq %d", entry->qe_index,
325 entry->qe_queue->vq_index);
326 goto out_alloc_handle;
329 ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
330 &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
331 (caddr_t *)&entry->qe_indirect_descs, &len,
332 &entry->qe_indirect_dma_acch);
333 if (ret != DDI_SUCCESS) {
334 dev_err(sc->sc_dev, CE_WARN,
335 "Failed to allocate dma memory for indirect descriptors, "
336 "entry %d, vq %d,", entry->qe_index,
337 entry->qe_queue->vq_index);
338 goto out_alloc;
341 (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
343 ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
344 (caddr_t)entry->qe_indirect_descs, len,
345 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
346 &entry->qe_indirect_dma_cookie, &ncookies);
347 if (ret != DDI_DMA_MAPPED) {
348 dev_err(sc->sc_dev, CE_WARN,
349 "Failed to bind dma memory for indirect descriptors, "
350 "entry %d, vq %d", entry->qe_index,
351 entry->qe_queue->vq_index);
352 goto out_bind;
355 /* We asked for a single segment */
356 ASSERT(ncookies == 1);
358 return (0);
360 out_bind:
361 ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
362 out_alloc:
363 ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
364 out_alloc_handle:
366 return (ret);
370 * Initialize the vq structure.
372 static int
373 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
375 int ret;
376 uint16_t i;
377 int vq_size = vq->vq_num;
378 int indirect_num = vq->vq_indirect_num;
380 /* free slot management */
381 list_create(&vq->vq_freelist, sizeof (struct vq_entry),
382 offsetof(struct vq_entry, qe_list));
384 for (i = 0; i < vq_size; i++) {
385 struct vq_entry *entry = &vq->vq_entries[i];
386 list_insert_tail(&vq->vq_freelist, entry);
387 entry->qe_index = i;
388 entry->qe_desc = &vq->vq_descs[i];
389 entry->qe_queue = vq;
391 if (indirect_num) {
392 ret = virtio_alloc_indirect(sc, entry);
393 if (ret)
394 goto out_indirect;
398 mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
399 DDI_INTR_PRI(sc->sc_intr_prio));
400 mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
401 DDI_INTR_PRI(sc->sc_intr_prio));
402 mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
403 DDI_INTR_PRI(sc->sc_intr_prio));
405 return (0);
407 out_indirect:
408 for (i = 0; i < vq_size; i++) {
409 struct vq_entry *entry = &vq->vq_entries[i];
410 if (entry->qe_indirect_descs)
411 virtio_free_indirect(entry);
414 return (ret);
418 * Allocate/free a vq.
420 struct virtqueue *
421 virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
422 unsigned int indirect_num, const char *name)
424 int vq_size, allocsize1, allocsize2, allocsize = 0;
425 int ret;
426 unsigned int ncookies;
427 size_t len;
428 struct virtqueue *vq;
430 ddi_put16(sc->sc_ioh,
431 /* LINTED E_BAD_PTR_CAST_ALIGN */
432 (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
433 vq_size = ddi_get16(sc->sc_ioh,
434 /* LINTED E_BAD_PTR_CAST_ALIGN */
435 (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
436 if (vq_size == 0) {
437 dev_err(sc->sc_dev, CE_WARN,
438 "virtqueue dest not exist, index %d for %s\n", index, name);
439 goto out;
442 vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
444 /* size 0 => use native vq size, good for receive queues. */
445 if (size)
446 vq_size = MIN(vq_size, size);
448 /* allocsize1: descriptor table + avail ring + pad */
449 allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
450 sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
451 /* allocsize2: used ring + pad */
452 allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
453 sizeof (struct vring_used_elem) * vq_size);
455 allocsize = allocsize1 + allocsize2;
457 ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
458 DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
459 if (ret != DDI_SUCCESS) {
460 dev_err(sc->sc_dev, CE_WARN,
461 "Failed to allocate dma handle for vq %d", index);
462 goto out_alloc_handle;
465 ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
466 &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
467 (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
468 if (ret != DDI_SUCCESS) {
469 dev_err(sc->sc_dev, CE_WARN,
470 "Failed to allocate dma memory for vq %d", index);
471 goto out_alloc;
474 ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
475 (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
476 DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
477 if (ret != DDI_DMA_MAPPED) {
478 dev_err(sc->sc_dev, CE_WARN,
479 "Failed to bind dma memory for vq %d", index);
480 goto out_bind;
483 /* We asked for a single segment */
484 ASSERT(ncookies == 1);
485 /* and page-ligned buffers. */
486 ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
488 (void) memset(vq->vq_vaddr, 0, allocsize);
490 /* Make sure all zeros hit the buffer before we point the host to it */
491 membar_producer();
493 /* set the vq address */
494 ddi_put32(sc->sc_ioh,
495 /* LINTED E_BAD_PTR_CAST_ALIGN */
496 (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
497 (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
499 /* remember addresses and offsets for later use */
500 vq->vq_owner = sc;
501 vq->vq_num = vq_size;
502 vq->vq_index = index;
503 vq->vq_descs = vq->vq_vaddr;
504 vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
505 vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
506 vq->vq_usedoffset = allocsize1;
507 vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
509 ASSERT(indirect_num == 0 ||
510 virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
511 vq->vq_indirect_num = indirect_num;
513 /* free slot management */
514 vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
515 KM_SLEEP);
517 ret = virtio_init_vq(sc, vq);
518 if (ret)
519 goto out_init;
521 dev_debug(sc->sc_dev, CE_NOTE,
522 "Allocated %d entries for vq %d:%s (%d indirect descs)",
523 vq_size, index, name, indirect_num * vq_size);
525 return (vq);
527 out_init:
528 kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
529 (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
530 out_bind:
531 ddi_dma_mem_free(&vq->vq_dma_acch);
532 out_alloc:
533 ddi_dma_free_handle(&vq->vq_dma_handle);
534 out_alloc_handle:
535 kmem_free(vq, sizeof (struct virtqueue));
536 out:
537 return (NULL);
540 void
541 virtio_free_vq(struct virtqueue *vq)
543 struct virtio_softc *sc = vq->vq_owner;
544 int i;
546 /* tell device that there's no virtqueue any longer */
547 ddi_put16(sc->sc_ioh,
548 /* LINTED E_BAD_PTR_CAST_ALIGN */
549 (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
550 vq->vq_index);
551 ddi_put32(sc->sc_ioh,
552 /* LINTED E_BAD_PTR_CAST_ALIGN */
553 (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
555 /* Free the indirect descriptors, if any. */
556 for (i = 0; i < vq->vq_num; i++) {
557 struct vq_entry *entry = &vq->vq_entries[i];
558 if (entry->qe_indirect_descs)
559 virtio_free_indirect(entry);
562 kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
564 (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
565 ddi_dma_mem_free(&vq->vq_dma_acch);
566 ddi_dma_free_handle(&vq->vq_dma_handle);
568 mutex_destroy(&vq->vq_used_lock);
569 mutex_destroy(&vq->vq_avail_lock);
570 mutex_destroy(&vq->vq_freelist_lock);
572 kmem_free(vq, sizeof (struct virtqueue));
576 * Free descriptor management.
578 struct vq_entry *
579 vq_alloc_entry(struct virtqueue *vq)
581 struct vq_entry *qe;
583 mutex_enter(&vq->vq_freelist_lock);
584 if (list_is_empty(&vq->vq_freelist)) {
585 mutex_exit(&vq->vq_freelist_lock);
586 return (NULL);
588 qe = list_remove_head(&vq->vq_freelist);
590 ASSERT(vq->vq_used_entries >= 0);
591 vq->vq_used_entries++;
593 mutex_exit(&vq->vq_freelist_lock);
595 qe->qe_next = NULL;
596 qe->qe_indirect_next = 0;
597 (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
599 return (qe);
602 void
603 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
605 mutex_enter(&vq->vq_freelist_lock);
607 list_insert_head(&vq->vq_freelist, qe);
608 vq->vq_used_entries--;
609 ASSERT(vq->vq_used_entries >= 0);
610 mutex_exit(&vq->vq_freelist_lock);
614 * We (intentionally) don't have a global vq mutex, so you are
615 * responsible for external locking to avoid allocting/freeing any
616 * entries before using the returned value. Have fun.
618 uint_t
619 vq_num_used(struct virtqueue *vq)
621 /* vq->vq_freelist_lock would not help here. */
622 return (vq->vq_used_entries);
625 static inline void
626 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
627 boolean_t write)
629 desc->addr = paddr;
630 desc->len = len;
631 desc->next = 0;
632 desc->flags = 0;
634 /* 'write' - from the driver's point of view */
635 if (!write)
636 desc->flags = VRING_DESC_F_WRITE;
639 void
640 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
641 boolean_t write)
643 virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
646 unsigned int
647 virtio_ve_indirect_available(struct vq_entry *qe)
649 return (qe->qe_queue->vq_indirect_num - qe->qe_indirect_next);
652 void
653 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
654 boolean_t write)
656 struct vring_desc *indirect_desc;
658 ASSERT(qe->qe_queue->vq_indirect_num);
659 ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
661 indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
662 virtio_ve_set_desc(indirect_desc, paddr, len, write);
663 qe->qe_indirect_next++;
666 void
667 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
668 ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
670 int i;
672 for (i = 0; i < ncookies; i++) {
673 virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
674 dma_cookie.dmac_size, write);
675 ddi_dma_nextcookie(dma_handle, &dma_cookie);
679 void
680 virtio_sync_vq(struct virtqueue *vq)
682 struct virtio_softc *vsc = vq->vq_owner;
684 /* Make sure the avail ring update hit the buffer */
685 membar_producer();
687 vq->vq_avail->idx = vq->vq_avail_idx;
689 /* Make sure the avail idx update hits the buffer */
690 membar_producer();
692 /* Make sure we see the flags update */
693 membar_consumer();
695 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
696 ddi_put16(vsc->sc_ioh,
697 /* LINTED E_BAD_PTR_CAST_ALIGN */
698 (uint16_t *)(vsc->sc_io_addr +
699 VIRTIO_CONFIG_QUEUE_NOTIFY),
700 vq->vq_index);
704 void
705 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
707 struct virtqueue *vq = qe->qe_queue;
708 struct vq_entry *head = qe;
709 struct vring_desc *desc;
710 int idx;
712 ASSERT(qe);
715 * Bind the descs together, paddr and len should be already
716 * set with virtio_ve_set
718 do {
719 /* Bind the indirect descriptors */
720 if (qe->qe_indirect_next > 1) {
721 uint16_t i = 0;
724 * Set the pointer/flags to the
725 * first indirect descriptor
727 virtio_ve_set_desc(qe->qe_desc,
728 qe->qe_indirect_dma_cookie.dmac_laddress,
729 sizeof (struct vring_desc) * qe->qe_indirect_next,
730 B_FALSE);
731 qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
733 /* For all but the last one, add the next index/flag */
734 do {
735 desc = &qe->qe_indirect_descs[i];
736 i++;
738 desc->flags |= VRING_DESC_F_NEXT;
739 desc->next = i;
740 } while (i < qe->qe_indirect_next - 1);
744 if (qe->qe_next) {
745 qe->qe_desc->flags |= VRING_DESC_F_NEXT;
746 qe->qe_desc->next = qe->qe_next->qe_index;
749 qe = qe->qe_next;
750 } while (qe);
752 mutex_enter(&vq->vq_avail_lock);
753 idx = vq->vq_avail_idx;
754 vq->vq_avail_idx++;
756 /* Make sure the bits hit the descriptor(s) */
757 membar_producer();
758 vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
760 /* Notify the device, if needed. */
761 if (sync)
762 virtio_sync_vq(vq);
764 mutex_exit(&vq->vq_avail_lock);
768 * Get a chain of descriptors from the used ring, if one is available.
770 struct vq_entry *
771 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
773 struct vq_entry *head;
774 int slot;
775 int usedidx;
777 mutex_enter(&vq->vq_used_lock);
779 /* No used entries? Bye. */
780 if (vq->vq_used_idx == vq->vq_used->idx) {
781 mutex_exit(&vq->vq_used_lock);
782 return (NULL);
785 usedidx = vq->vq_used_idx;
786 vq->vq_used_idx++;
787 mutex_exit(&vq->vq_used_lock);
789 usedidx %= vq->vq_num;
791 /* Make sure we do the next step _after_ checking the idx. */
792 membar_consumer();
794 slot = vq->vq_used->ring[usedidx].id;
795 *len = vq->vq_used->ring[usedidx].len;
797 head = &vq->vq_entries[slot];
799 return (head);
802 void
803 virtio_free_chain(struct vq_entry *qe)
805 struct vq_entry *tmp;
806 struct virtqueue *vq = qe->qe_queue;
808 ASSERT(qe);
810 do {
811 ASSERT(qe->qe_queue == vq);
812 tmp = qe->qe_next;
813 vq_free_entry(vq, qe);
814 qe = tmp;
815 } while (tmp != NULL);
818 void
819 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
821 first->qe_next = second;
824 static int
825 virtio_register_msi(struct virtio_softc *sc,
826 struct virtio_int_handler *config_handler,
827 struct virtio_int_handler vq_handlers[], int intr_types)
829 int count, actual;
830 int int_type;
831 int i;
832 int handler_count;
833 int ret;
835 /* If both MSI and MSI-x are reported, prefer MSI-x. */
836 int_type = DDI_INTR_TYPE_MSI;
837 if (intr_types & DDI_INTR_TYPE_MSIX)
838 int_type = DDI_INTR_TYPE_MSIX;
840 /* Walk the handler table to get the number of handlers. */
841 for (handler_count = 0;
842 vq_handlers && vq_handlers[handler_count].vh_func;
843 handler_count++)
846 /* +1 if there is a config change handler. */
847 if (config_handler != NULL)
848 handler_count++;
850 /* Number of MSIs supported by the device. */
851 ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
852 if (ret != DDI_SUCCESS) {
853 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
854 return (ret);
858 * Those who try to register more handlers then the device
859 * supports shall suffer.
861 ASSERT(handler_count <= count);
863 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
864 handler_count, KM_SLEEP);
866 ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
867 handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
868 if (ret != DDI_SUCCESS) {
869 dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
870 goto out_msi_alloc;
873 if (actual != handler_count) {
874 dev_err(sc->sc_dev, CE_WARN,
875 "Not enough MSI available: need %d, available %d",
876 handler_count, actual);
877 goto out_msi_available;
880 sc->sc_intr_num = handler_count;
881 sc->sc_intr_config = B_FALSE;
882 if (config_handler != NULL) {
883 sc->sc_intr_config = B_TRUE;
886 /* Assume they are all same priority */
887 ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
888 if (ret != DDI_SUCCESS) {
889 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
890 goto out_msi_prio;
893 /* Add the vq handlers */
894 for (i = 0; vq_handlers[i].vh_func; i++) {
895 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
896 vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
897 if (ret != DDI_SUCCESS) {
898 dev_err(sc->sc_dev, CE_WARN,
899 "ddi_intr_add_handler failed");
900 /* Remove the handlers that succeeded. */
901 while (--i >= 0) {
902 (void) ddi_intr_remove_handler(
903 sc->sc_intr_htable[i]);
905 goto out_add_handlers;
909 /* Don't forget the config handler */
910 if (config_handler != NULL) {
911 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
912 config_handler->vh_func, sc, config_handler->vh_priv);
913 if (ret != DDI_SUCCESS) {
914 dev_err(sc->sc_dev, CE_WARN,
915 "ddi_intr_add_handler failed");
916 /* Remove the handlers that succeeded. */
917 while (--i >= 0) {
918 (void) ddi_intr_remove_handler(
919 sc->sc_intr_htable[i]);
921 goto out_add_handlers;
925 ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
926 if (ret == DDI_SUCCESS) {
927 sc->sc_int_type = int_type;
928 return (DDI_SUCCESS);
931 out_add_handlers:
932 out_msi_prio:
933 out_msi_available:
934 for (i = 0; i < actual; i++)
935 (void) ddi_intr_free(sc->sc_intr_htable[i]);
936 out_msi_alloc:
937 kmem_free(sc->sc_intr_htable,
938 sizeof (ddi_intr_handle_t) * handler_count);
940 return (ret);
943 struct virtio_handler_container {
944 int nhandlers;
945 struct virtio_int_handler config_handler;
946 struct virtio_int_handler vq_handlers[];
949 uint_t
950 virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
952 struct virtio_softc *sc = (void *)arg1;
953 struct virtio_handler_container *vhc = (void *)arg2;
954 uint8_t isr_status;
955 int i;
957 isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
958 VIRTIO_CONFIG_ISR_STATUS));
960 if (!isr_status)
961 return (DDI_INTR_UNCLAIMED);
963 if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
964 vhc->config_handler.vh_func) {
965 vhc->config_handler.vh_func((void *)sc,
966 vhc->config_handler.vh_priv);
969 /* Notify all handlers */
970 for (i = 0; i < vhc->nhandlers; i++) {
971 vhc->vq_handlers[i].vh_func((void *)sc,
972 vhc->vq_handlers[i].vh_priv);
975 return (DDI_INTR_CLAIMED);
979 * config_handler and vq_handlers may be allocated on stack.
980 * Take precautions not to loose them.
982 static int
983 virtio_register_intx(struct virtio_softc *sc,
984 struct virtio_int_handler *config_handler,
985 struct virtio_int_handler vq_handlers[])
987 int vq_handler_count;
988 int actual;
989 struct virtio_handler_container *vhc;
990 size_t vhc_sz;
991 int ret = DDI_FAILURE;
993 /* Walk the handler table to get the number of handlers. */
994 for (vq_handler_count = 0;
995 vq_handlers && vq_handlers[vq_handler_count].vh_func;
996 vq_handler_count++)
999 vhc_sz = sizeof (struct virtio_handler_container) +
1000 sizeof (struct virtio_int_handler) * vq_handler_count;
1001 vhc = kmem_zalloc(vhc_sz, KM_SLEEP);
1003 vhc->nhandlers = vq_handler_count;
1004 (void) memcpy(vhc->vq_handlers, vq_handlers,
1005 sizeof (struct virtio_int_handler) * vq_handler_count);
1007 if (config_handler != NULL) {
1008 (void) memcpy(&vhc->config_handler, config_handler,
1009 sizeof (struct virtio_int_handler));
1012 /* Just a single entry for a single interrupt. */
1013 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1015 ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1016 DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1017 if (ret != DDI_SUCCESS) {
1018 dev_err(sc->sc_dev, CE_WARN,
1019 "Failed to allocate a fixed interrupt: %d", ret);
1020 goto out_int_alloc;
1023 ASSERT(actual == 1);
1024 sc->sc_intr_num = 1;
1026 ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1027 if (ret != DDI_SUCCESS) {
1028 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1029 goto out_prio;
1032 ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1033 virtio_intx_dispatch, sc, vhc);
1034 if (ret != DDI_SUCCESS) {
1035 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1036 goto out_add_handlers;
1039 sc->sc_int_type = DDI_INTR_TYPE_FIXED;
1041 return (DDI_SUCCESS);
1043 out_add_handlers:
1044 out_prio:
1045 (void) ddi_intr_free(sc->sc_intr_htable[0]);
1046 out_int_alloc:
1047 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1048 kmem_free(vhc, vhc_sz);
1049 return (ret);
1053 * We find out if we support MSI during this, and the register layout
1054 * depends on the MSI (doh). Don't acces the device specific bits in
1055 * BAR 0 before calling it!
1058 virtio_register_ints(struct virtio_softc *sc,
1059 struct virtio_int_handler *config_handler,
1060 struct virtio_int_handler vq_handlers[])
1062 int ret;
1063 int intr_types;
1065 /* Default offset until MSI-X is enabled, if ever. */
1066 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1068 /* Determine which types of interrupts are supported */
1069 ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1070 if (ret != DDI_SUCCESS) {
1071 dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1072 goto out_inttype;
1075 /* If we have msi, let's use them. */
1076 if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1077 ret = virtio_register_msi(sc, config_handler,
1078 vq_handlers, intr_types);
1079 if (!ret)
1080 return (0);
1083 /* Fall back to old-fashioned interrupts. */
1084 if (intr_types & DDI_INTR_TYPE_FIXED) {
1085 dev_debug(sc->sc_dev, CE_WARN,
1086 "Using legacy interrupts");
1088 return (virtio_register_intx(sc, config_handler, vq_handlers));
1091 dev_err(sc->sc_dev, CE_WARN,
1092 "MSI failed and fixed interrupts not supported. Giving up.");
1093 ret = DDI_FAILURE;
1095 out_inttype:
1096 return (ret);
1099 static int
1100 virtio_enable_msi(struct virtio_softc *sc)
1102 int ret, i;
1103 int vq_handler_count = sc->sc_intr_num;
1105 /* Number of handlers, not counting the counfig. */
1106 if (sc->sc_intr_config)
1107 vq_handler_count--;
1109 /* Enable the interrupts. Either the whole block, or one by one. */
1110 if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111 ret = ddi_intr_block_enable(sc->sc_intr_htable,
1112 sc->sc_intr_num);
1113 if (ret != DDI_SUCCESS) {
1114 dev_err(sc->sc_dev, CE_WARN,
1115 "Failed to enable MSI, falling back to INTx");
1116 goto out_enable;
1118 } else {
1119 for (i = 0; i < sc->sc_intr_num; i++) {
1120 ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1121 if (ret != DDI_SUCCESS) {
1122 dev_err(sc->sc_dev, CE_WARN,
1123 "Failed to enable MSI %d, "
1124 "falling back to INTx", i);
1126 while (--i >= 0) {
1127 (void) ddi_intr_disable(
1128 sc->sc_intr_htable[i]);
1130 goto out_enable;
1135 /* Bind the allocated MSI to the queues and config */
1136 for (i = 0; i < vq_handler_count; i++) {
1137 int check;
1139 ddi_put16(sc->sc_ioh,
1140 /* LINTED E_BAD_PTR_CAST_ALIGN */
1141 (uint16_t *)(sc->sc_io_addr +
1142 VIRTIO_CONFIG_QUEUE_SELECT), i);
1144 ddi_put16(sc->sc_ioh,
1145 /* LINTED E_BAD_PTR_CAST_ALIGN */
1146 (uint16_t *)(sc->sc_io_addr +
1147 VIRTIO_CONFIG_QUEUE_VECTOR), i);
1149 check = ddi_get16(sc->sc_ioh,
1150 /* LINTED E_BAD_PTR_CAST_ALIGN */
1151 (uint16_t *)(sc->sc_io_addr +
1152 VIRTIO_CONFIG_QUEUE_VECTOR));
1153 if (check != i) {
1154 dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1155 "for VQ %d, MSI %d. Check = %x", i, i, check);
1156 ret = ENODEV;
1157 goto out_bind;
1161 if (sc->sc_intr_config) {
1162 int check;
1164 ddi_put16(sc->sc_ioh,
1165 /* LINTED E_BAD_PTR_CAST_ALIGN */
1166 (uint16_t *)(sc->sc_io_addr +
1167 VIRTIO_CONFIG_CONFIG_VECTOR), i);
1169 check = ddi_get16(sc->sc_ioh,
1170 /* LINTED E_BAD_PTR_CAST_ALIGN */
1171 (uint16_t *)(sc->sc_io_addr +
1172 VIRTIO_CONFIG_CONFIG_VECTOR));
1173 if (check != i) {
1174 dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1175 "for Config updates, MSI %d", i);
1176 ret = ENODEV;
1177 goto out_bind;
1181 /* Configuration offset depends on whether MSI-X is used. */
1182 if (sc->sc_int_type == DDI_INTR_TYPE_MSIX)
1183 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSIX;
1184 else
1185 ASSERT(sc->sc_int_type == DDI_INTR_TYPE_MSI);
1187 return (DDI_SUCCESS);
1189 out_bind:
1190 /* Unbind the vqs */
1191 for (i = 0; i < vq_handler_count - 1; i++) {
1192 ddi_put16(sc->sc_ioh,
1193 /* LINTED E_BAD_PTR_CAST_ALIGN */
1194 (uint16_t *)(sc->sc_io_addr +
1195 VIRTIO_CONFIG_QUEUE_SELECT), i);
1197 ddi_put16(sc->sc_ioh,
1198 /* LINTED E_BAD_PTR_CAST_ALIGN */
1199 (uint16_t *)(sc->sc_io_addr +
1200 VIRTIO_CONFIG_QUEUE_VECTOR),
1201 VIRTIO_MSI_NO_VECTOR);
1203 /* And the config */
1204 /* LINTED E_BAD_PTR_CAST_ALIGN */
1205 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1206 VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1208 /* Disable the interrupts. Either the whole block, or one by one. */
1209 if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1210 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1211 sc->sc_intr_num);
1212 if (ret != DDI_SUCCESS) {
1213 dev_err(sc->sc_dev, CE_WARN,
1214 "Failed to disable MSIs, won't be able to "
1215 "reuse next time");
1217 } else {
1218 for (i = 0; i < sc->sc_intr_num; i++) {
1219 ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1220 if (ret != DDI_SUCCESS) {
1221 dev_err(sc->sc_dev, CE_WARN,
1222 "Failed to disable interrupt %d, "
1223 "won't be able to reuse", i);
1228 ret = DDI_FAILURE;
1230 out_enable:
1231 return (ret);
1234 static int
1235 virtio_enable_intx(struct virtio_softc *sc)
1237 int ret;
1239 ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1240 if (ret != DDI_SUCCESS) {
1241 dev_err(sc->sc_dev, CE_WARN,
1242 "Failed to enable interrupt: %d", ret);
1245 return (ret);
1249 * We can't enable/disable individual handlers in the INTx case so do
1250 * the whole bunch even in the msi case.
1253 virtio_enable_ints(struct virtio_softc *sc)
1256 ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX);
1258 /* See if we are using MSI. */
1259 if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1260 sc->sc_int_type == DDI_INTR_TYPE_MSI)
1261 return (virtio_enable_msi(sc));
1263 ASSERT(sc->sc_int_type == DDI_INTR_TYPE_FIXED);
1264 return (virtio_enable_intx(sc));
1267 void
1268 virtio_release_ints(struct virtio_softc *sc)
1270 int i;
1271 int ret;
1273 /* We were running with MSI, unbind them. */
1274 if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1275 sc->sc_int_type == DDI_INTR_TYPE_MSI) {
1276 /* Unbind all vqs */
1277 for (i = 0; i < sc->sc_nvqs; i++) {
1278 ddi_put16(sc->sc_ioh,
1279 /* LINTED E_BAD_PTR_CAST_ALIGN */
1280 (uint16_t *)(sc->sc_io_addr +
1281 VIRTIO_CONFIG_QUEUE_SELECT), i);
1283 ddi_put16(sc->sc_ioh,
1284 /* LINTED E_BAD_PTR_CAST_ALIGN */
1285 (uint16_t *)(sc->sc_io_addr +
1286 VIRTIO_CONFIG_QUEUE_VECTOR),
1287 VIRTIO_MSI_NO_VECTOR);
1289 /* And the config */
1290 /* LINTED E_BAD_PTR_CAST_ALIGN */
1291 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1292 VIRTIO_CONFIG_CONFIG_VECTOR),
1293 VIRTIO_MSI_NO_VECTOR);
1297 /* Disable the interrupts. Either the whole block, or one by one. */
1298 if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1299 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1300 sc->sc_intr_num);
1301 if (ret != DDI_SUCCESS) {
1302 dev_err(sc->sc_dev, CE_WARN,
1303 "Failed to disable MSIs, won't be able to "
1304 "reuse next time");
1306 } else {
1307 for (i = 0; i < sc->sc_intr_num; i++) {
1308 ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1309 if (ret != DDI_SUCCESS) {
1310 dev_err(sc->sc_dev, CE_WARN,
1311 "Failed to disable interrupt %d, "
1312 "won't be able to reuse", i);
1318 for (i = 0; i < sc->sc_intr_num; i++) {
1319 (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1322 for (i = 0; i < sc->sc_intr_num; i++)
1323 (void) ddi_intr_free(sc->sc_intr_htable[i]);
1325 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1326 sc->sc_intr_num);
1328 /* After disabling interrupts, the config offset is non-MSI-X. */
1329 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1333 * Module linkage information for the kernel.
1335 static struct modlmisc modlmisc = {
1336 &mod_miscops, /* Type of module */
1337 "VirtIO common library module",
1340 static struct modlinkage modlinkage = {
1341 MODREV_1,
1343 (void *)&modlmisc,
1344 NULL
1349 _init(void)
1351 return (mod_install(&modlinkage));
1355 _fini(void)
1357 return (mod_remove(&modlinkage));
1361 _info(struct modinfo *modinfop)
1363 return (mod_info(&modlinkage, modinfop));