2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/linker_set.h>
36 #include <sys/types.h>
39 #include <sys/queue.h>
51 #include <pthread_np.h>
53 #include <cam/scsi/scsi_all.h>
54 #include <cam/scsi/scsi_message.h>
55 #include <cam/ctl/ctl.h>
56 #include <cam/ctl/ctl_io.h>
57 #include <cam/ctl/ctl_backend.h>
58 #include <cam/ctl/ctl_ioctl.h>
59 #include <cam/ctl/ctl_util.h>
60 #include <cam/ctl/ctl_scsi_all.h>
70 #define VTSCSI_RINGSZ 64
71 #define VTSCSI_REQUESTQ 1
72 #define VTSCSI_THR_PER_Q 16
73 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
74 #define VTSCSI_MAXSEG 64
76 #define VTSCSI_IN_HEADER_LEN(_sc) \
77 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
79 #define VTSCSI_OUT_HEADER_LEN(_sc) \
80 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
82 #define VIRTIO_SCSI_MAX_CHANNEL 0
83 #define VIRTIO_SCSI_MAX_TARGET 0
84 #define VIRTIO_SCSI_MAX_LUN 16383
86 #define VIRTIO_SCSI_F_INOUT (1 << 0)
87 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
88 #define VIRTIO_SCSI_F_CHANGE (1 << 2)
90 static int pci_vtscsi_debug
= 0;
91 #define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
92 #define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
94 struct pci_vtscsi_config
{
99 uint32_t event_info_size
;
102 uint16_t max_channel
;
105 } __attribute__((packed
));
107 struct pci_vtscsi_queue
{
108 struct pci_vtscsi_softc
* vsq_sc
;
109 struct vqueue_info
* vsq_vq
;
110 pthread_mutex_t vsq_mtx
;
111 pthread_mutex_t vsq_qmtx
;
112 pthread_cond_t vsq_cv
;
113 STAILQ_HEAD(, pci_vtscsi_request
) vsq_requests
;
114 LIST_HEAD(, pci_vtscsi_worker
) vsq_workers
;
117 struct pci_vtscsi_worker
{
118 struct pci_vtscsi_queue
* vsw_queue
;
119 pthread_t vsw_thread
;
121 LIST_ENTRY(pci_vtscsi_worker
) vsw_link
;
124 struct pci_vtscsi_request
{
125 struct pci_vtscsi_queue
* vsr_queue
;
126 struct iovec vsr_iov_in
[VTSCSI_MAXSEG
];
128 struct iovec vsr_iov_out
[VTSCSI_MAXSEG
];
131 STAILQ_ENTRY(pci_vtscsi_request
) vsr_link
;
137 struct pci_vtscsi_softc
{
138 struct virtio_softc vss_vs
;
139 struct vqueue_info vss_vq
[VTSCSI_MAXQ
];
140 struct pci_vtscsi_queue vss_queues
[VTSCSI_REQUESTQ
];
141 pthread_mutex_t vss_mtx
;
144 uint32_t vss_features
;
145 struct pci_vtscsi_config vss_config
;
148 #define VIRTIO_SCSI_T_TMF 0
149 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
150 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
151 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
152 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
153 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
154 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
155 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
156 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
158 /* command-specific response values */
159 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
160 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
161 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
163 struct pci_vtscsi_ctrl_tmf
{
169 } __attribute__((packed
));
171 #define VIRTIO_SCSI_T_AN_QUERY 1
172 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
173 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
174 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
175 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
176 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
177 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
179 struct pci_vtscsi_ctrl_an
{
182 uint32_t event_requested
;
183 uint32_t event_actual
;
185 } __attribute__((packed
));
187 /* command-specific response values */
188 #define VIRTIO_SCSI_S_OK 0
189 #define VIRTIO_SCSI_S_OVERRUN 1
190 #define VIRTIO_SCSI_S_ABORTED 2
191 #define VIRTIO_SCSI_S_BAD_TARGET 3
192 #define VIRTIO_SCSI_S_RESET 4
193 #define VIRTIO_SCSI_S_BUSY 5
194 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
195 #define VIRTIO_SCSI_S_TARGET_FAILURE 7
196 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8
197 #define VIRTIO_SCSI_S_FAILURE 9
198 #define VIRTIO_SCSI_S_INCORRECT_LUN 12
201 #define VIRTIO_SCSI_S_SIMPLE 0
202 #define VIRTIO_SCSI_S_ORDERED 1
203 #define VIRTIO_SCSI_S_HEAD 2
204 #define VIRTIO_SCSI_S_ACA 3
206 struct pci_vtscsi_event
{
210 } __attribute__((packed
));
212 struct pci_vtscsi_req_cmd_rd
{
219 } __attribute__((packed
));
221 struct pci_vtscsi_req_cmd_wr
{
224 uint16_t status_qualifier
;
228 } __attribute__((packed
));
230 static void *pci_vtscsi_proc(void *);
231 static void pci_vtscsi_reset(void *);
232 static void pci_vtscsi_neg_features(void *, uint64_t);
233 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
234 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
235 static inline int pci_vtscsi_get_lun(uint8_t *);
236 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc
*, void *, size_t);
237 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc
*,
238 struct pci_vtscsi_ctrl_tmf
*);
239 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc
*,
240 struct pci_vtscsi_ctrl_an
*);
241 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue
*, struct iovec
*,
242 int, struct iovec
*, int);
243 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info
*);
244 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info
*);
245 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info
*);
246 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc
*,
247 struct pci_vtscsi_queue
*, int);
248 static int pci_vtscsi_init(struct vmctx
*, struct pci_devinst
*, nvlist_t
*);
250 static struct virtio_consts vtscsi_vi_consts
= {
252 .vc_nvq
= VTSCSI_MAXQ
,
253 .vc_cfgsize
= sizeof(struct pci_vtscsi_config
),
254 .vc_reset
= pci_vtscsi_reset
,
255 .vc_cfgread
= pci_vtscsi_cfgread
,
256 .vc_cfgwrite
= pci_vtscsi_cfgwrite
,
257 .vc_apply_features
= pci_vtscsi_neg_features
,
262 pci_vtscsi_proc(void *arg
)
264 struct pci_vtscsi_worker
*worker
= (struct pci_vtscsi_worker
*)arg
;
265 struct pci_vtscsi_queue
*q
= worker
->vsw_queue
;
266 struct pci_vtscsi_request
*req
;
270 pthread_mutex_lock(&q
->vsq_mtx
);
272 while (STAILQ_EMPTY(&q
->vsq_requests
)
273 && !worker
->vsw_exiting
)
274 pthread_cond_wait(&q
->vsq_cv
, &q
->vsq_mtx
);
276 if (worker
->vsw_exiting
)
279 req
= STAILQ_FIRST(&q
->vsq_requests
);
280 STAILQ_REMOVE_HEAD(&q
->vsq_requests
, vsr_link
);
282 pthread_mutex_unlock(&q
->vsq_mtx
);
283 iolen
= pci_vtscsi_request_handle(q
, req
->vsr_iov_in
,
284 req
->vsr_niov_in
, req
->vsr_iov_out
, req
->vsr_niov_out
);
286 pthread_mutex_lock(&q
->vsq_qmtx
);
287 vq_relchain(q
->vsq_vq
, req
->vsr_idx
, iolen
);
288 vq_endchains(q
->vsq_vq
, 0);
289 pthread_mutex_unlock(&q
->vsq_qmtx
);
291 DPRINTF("request <idx=%d> completed", req
->vsr_idx
);
295 pthread_mutex_unlock(&q
->vsq_mtx
);
300 pci_vtscsi_reset(void *vsc
)
302 struct pci_vtscsi_softc
*sc
;
306 DPRINTF("device reset requested");
307 vi_reset_dev(&sc
->vss_vs
);
309 /* initialize config structure */
310 sc
->vss_config
= (struct pci_vtscsi_config
){
311 .num_queues
= VTSCSI_REQUESTQ
,
312 /* Leave room for the request and the response. */
313 .seg_max
= VTSCSI_MAXSEG
- 2,
316 .event_info_size
= sizeof(struct pci_vtscsi_event
),
319 .max_channel
= VIRTIO_SCSI_MAX_CHANNEL
,
320 .max_target
= VIRTIO_SCSI_MAX_TARGET
,
321 .max_lun
= VIRTIO_SCSI_MAX_LUN
326 pci_vtscsi_neg_features(void *vsc
, uint64_t negotiated_features
)
328 struct pci_vtscsi_softc
*sc
= vsc
;
330 sc
->vss_features
= negotiated_features
;
334 pci_vtscsi_cfgread(void *vsc
, int offset
, int size
, uint32_t *retval
)
336 struct pci_vtscsi_softc
*sc
= vsc
;
339 ptr
= (uint8_t *)&sc
->vss_config
+ offset
;
340 memcpy(retval
, ptr
, size
);
345 pci_vtscsi_cfgwrite(void *vsc __unused
, int offset __unused
, int size __unused
,
346 uint32_t val __unused
)
352 pci_vtscsi_get_lun(uint8_t *lun
)
355 return (((lun
[2] << 8) | lun
[3]) & 0x3fff);
359 pci_vtscsi_control_handle(struct pci_vtscsi_softc
*sc
, void *buf
,
362 struct pci_vtscsi_ctrl_tmf
*tmf
;
363 struct pci_vtscsi_ctrl_an
*an
;
366 if (bufsize
< sizeof(uint32_t)) {
367 WPRINTF("ignoring truncated control request");
371 type
= *(uint32_t *)buf
;
373 if (type
== VIRTIO_SCSI_T_TMF
) {
374 if (bufsize
!= sizeof(*tmf
)) {
375 WPRINTF("ignoring tmf request with size %zu", bufsize
);
378 tmf
= (struct pci_vtscsi_ctrl_tmf
*)buf
;
379 return (pci_vtscsi_tmf_handle(sc
, tmf
));
382 if (type
== VIRTIO_SCSI_T_AN_QUERY
) {
383 if (bufsize
!= sizeof(*an
)) {
384 WPRINTF("ignoring AN request with size %zu", bufsize
);
387 an
= (struct pci_vtscsi_ctrl_an
*)buf
;
388 return (pci_vtscsi_an_handle(sc
, an
));
395 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc
*sc
,
396 struct pci_vtscsi_ctrl_tmf
*tmf
)
401 io
= ctl_scsi_alloc_io(sc
->vss_iid
);
402 ctl_scsi_zero_io(io
);
404 io
->io_hdr
.io_type
= CTL_IO_TASK
;
405 io
->io_hdr
.nexus
.initid
= sc
->vss_iid
;
406 io
->io_hdr
.nexus
.targ_lun
= pci_vtscsi_get_lun(tmf
->lun
);
407 io
->taskio
.tag_type
= CTL_TAG_SIMPLE
;
408 io
->taskio
.tag_num
= tmf
->id
;
409 io
->io_hdr
.flags
|= CTL_FLAG_USER_TAG
;
411 switch (tmf
->subtype
) {
412 case VIRTIO_SCSI_T_TMF_ABORT_TASK
:
413 io
->taskio
.task_action
= CTL_TASK_ABORT_TASK
;
416 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET
:
417 io
->taskio
.task_action
= CTL_TASK_ABORT_TASK_SET
;
420 case VIRTIO_SCSI_T_TMF_CLEAR_ACA
:
421 io
->taskio
.task_action
= CTL_TASK_CLEAR_ACA
;
424 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET
:
425 io
->taskio
.task_action
= CTL_TASK_CLEAR_TASK_SET
;
428 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET
:
429 io
->taskio
.task_action
= CTL_TASK_I_T_NEXUS_RESET
;
432 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET
:
433 io
->taskio
.task_action
= CTL_TASK_LUN_RESET
;
436 case VIRTIO_SCSI_T_TMF_QUERY_TASK
:
437 io
->taskio
.task_action
= CTL_TASK_QUERY_TASK
;
440 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET
:
441 io
->taskio
.task_action
= CTL_TASK_QUERY_TASK_SET
;
445 if (pci_vtscsi_debug
) {
446 struct sbuf
*sb
= sbuf_new_auto();
449 DPRINTF("%s", sbuf_data(sb
));
453 err
= ioctl(sc
->vss_ctl_fd
, CTL_IO
, io
);
455 WPRINTF("CTL_IO: err=%d (%s)", errno
, strerror(errno
));
457 tmf
->response
= io
->taskio
.task_status
;
458 ctl_scsi_free_io(io
);
463 pci_vtscsi_an_handle(struct pci_vtscsi_softc
*sc __unused
,
464 struct pci_vtscsi_ctrl_an
*an __unused
)
470 pci_vtscsi_request_handle(struct pci_vtscsi_queue
*q
, struct iovec
*iov_in
,
471 int niov_in
, struct iovec
*iov_out
, int niov_out
)
473 struct pci_vtscsi_softc
*sc
= q
->vsq_sc
;
474 struct pci_vtscsi_req_cmd_rd
*cmd_rd
= NULL
;
475 struct pci_vtscsi_req_cmd_wr
*cmd_wr
;
476 struct iovec data_iov_in
[VTSCSI_MAXSEG
], data_iov_out
[VTSCSI_MAXSEG
];
478 int data_niov_in
, data_niov_out
;
479 void *ext_data_ptr
= NULL
;
480 uint32_t ext_data_len
= 0, ext_sg_entries
= 0;
483 if (count_iov(iov_out
, niov_out
) < VTSCSI_OUT_HEADER_LEN(sc
)) {
484 WPRINTF("ignoring request with insufficient output");
487 if (count_iov(iov_in
, niov_in
) < VTSCSI_IN_HEADER_LEN(sc
)) {
488 WPRINTF("ignoring request with incomplete header");
492 seek_iov(iov_in
, niov_in
, data_iov_in
, &data_niov_in
,
493 VTSCSI_IN_HEADER_LEN(sc
));
494 seek_iov(iov_out
, niov_out
, data_iov_out
, &data_niov_out
,
495 VTSCSI_OUT_HEADER_LEN(sc
));
497 truncate_iov(iov_in
, &niov_in
, VTSCSI_IN_HEADER_LEN(sc
));
498 truncate_iov(iov_out
, &niov_out
, VTSCSI_OUT_HEADER_LEN(sc
));
499 iov_to_buf(iov_in
, niov_in
, (void **)&cmd_rd
);
501 cmd_wr
= calloc(1, VTSCSI_OUT_HEADER_LEN(sc
));
502 io
= ctl_scsi_alloc_io(sc
->vss_iid
);
503 ctl_scsi_zero_io(io
);
505 io
->io_hdr
.nexus
.initid
= sc
->vss_iid
;
506 io
->io_hdr
.nexus
.targ_lun
= pci_vtscsi_get_lun(cmd_rd
->lun
);
508 io
->io_hdr
.io_type
= CTL_IO_SCSI
;
510 if (data_niov_in
> 0) {
511 ext_data_ptr
= (void *)data_iov_in
;
512 ext_sg_entries
= data_niov_in
;
513 ext_data_len
= count_iov(data_iov_in
, data_niov_in
);
514 io
->io_hdr
.flags
|= CTL_FLAG_DATA_OUT
;
515 } else if (data_niov_out
> 0) {
516 ext_data_ptr
= (void *)data_iov_out
;
517 ext_sg_entries
= data_niov_out
;
518 ext_data_len
= count_iov(data_iov_out
, data_niov_out
);
519 io
->io_hdr
.flags
|= CTL_FLAG_DATA_IN
;
522 io
->scsiio
.sense_len
= sc
->vss_config
.sense_size
;
523 io
->scsiio
.tag_num
= cmd_rd
->id
;
524 io
->io_hdr
.flags
|= CTL_FLAG_USER_TAG
;
525 switch (cmd_rd
->task_attr
) {
526 case VIRTIO_SCSI_S_ORDERED
:
527 io
->scsiio
.tag_type
= CTL_TAG_ORDERED
;
529 case VIRTIO_SCSI_S_HEAD
:
530 io
->scsiio
.tag_type
= CTL_TAG_HEAD_OF_QUEUE
;
532 case VIRTIO_SCSI_S_ACA
:
533 io
->scsiio
.tag_type
= CTL_TAG_ACA
;
535 case VIRTIO_SCSI_S_SIMPLE
:
537 io
->scsiio
.tag_type
= CTL_TAG_SIMPLE
;
540 io
->scsiio
.ext_sg_entries
= ext_sg_entries
;
541 io
->scsiio
.ext_data_ptr
= ext_data_ptr
;
542 io
->scsiio
.ext_data_len
= ext_data_len
;
543 io
->scsiio
.ext_data_filled
= 0;
544 io
->scsiio
.cdb_len
= sc
->vss_config
.cdb_size
;
545 memcpy(io
->scsiio
.cdb
, cmd_rd
->cdb
, sc
->vss_config
.cdb_size
);
547 if (pci_vtscsi_debug
) {
548 struct sbuf
*sb
= sbuf_new_auto();
551 DPRINTF("%s", sbuf_data(sb
));
555 err
= ioctl(sc
->vss_ctl_fd
, CTL_IO
, io
);
557 WPRINTF("CTL_IO: err=%d (%s)", errno
, strerror(errno
));
558 cmd_wr
->response
= VIRTIO_SCSI_S_FAILURE
;
560 cmd_wr
->sense_len
= MIN(io
->scsiio
.sense_len
,
561 sc
->vss_config
.sense_size
);
562 cmd_wr
->residual
= ext_data_len
- io
->scsiio
.ext_data_filled
;
563 cmd_wr
->status
= io
->scsiio
.scsi_status
;
564 cmd_wr
->response
= VIRTIO_SCSI_S_OK
;
565 memcpy(&cmd_wr
->sense
, &io
->scsiio
.sense_data
,
569 buf_to_iov(cmd_wr
, VTSCSI_OUT_HEADER_LEN(sc
), iov_out
, niov_out
, 0);
570 nxferred
= VTSCSI_OUT_HEADER_LEN(sc
) + io
->scsiio
.ext_data_filled
;
573 ctl_scsi_free_io(io
);
578 pci_vtscsi_controlq_notify(void *vsc
, struct vqueue_info
*vq
)
580 struct pci_vtscsi_softc
*sc
;
581 struct iovec iov
[VTSCSI_MAXSEG
];
589 while (vq_has_descs(vq
)) {
590 n
= vq_getchain(vq
, iov
, VTSCSI_MAXSEG
, &req
);
591 assert(n
>= 1 && n
<= VTSCSI_MAXSEG
);
593 bufsize
= iov_to_buf(iov
, n
, &buf
);
594 iolen
= pci_vtscsi_control_handle(sc
, buf
, bufsize
);
595 buf_to_iov((uint8_t *)buf
+ bufsize
- iolen
, iolen
, iov
, n
,
599 * Release this chain and handle more
601 vq_relchain(vq
, req
.idx
, iolen
);
603 vq_endchains(vq
, 1); /* Generate interrupt if appropriate. */
608 pci_vtscsi_eventq_notify(void *vsc __unused
, struct vqueue_info
*vq
)
614 pci_vtscsi_requestq_notify(void *vsc
, struct vqueue_info
*vq
)
616 struct pci_vtscsi_softc
*sc
;
617 struct pci_vtscsi_queue
*q
;
618 struct pci_vtscsi_request
*req
;
619 struct iovec iov
[VTSCSI_MAXSEG
];
624 q
= &sc
->vss_queues
[vq
->vq_num
- 2];
626 while (vq_has_descs(vq
)) {
627 n
= vq_getchain(vq
, iov
, VTSCSI_MAXSEG
, &vireq
);
628 assert(n
>= 1 && n
<= VTSCSI_MAXSEG
);
630 req
= calloc(1, sizeof(struct pci_vtscsi_request
));
631 req
->vsr_idx
= vireq
.idx
;
633 req
->vsr_niov_in
= vireq
.readable
;
634 req
->vsr_niov_out
= vireq
.writable
;
635 memcpy(req
->vsr_iov_in
, iov
,
636 req
->vsr_niov_in
* sizeof(struct iovec
));
637 memcpy(req
->vsr_iov_out
, iov
+ vireq
.readable
,
638 req
->vsr_niov_out
* sizeof(struct iovec
));
640 pthread_mutex_lock(&q
->vsq_mtx
);
641 STAILQ_INSERT_TAIL(&q
->vsq_requests
, req
, vsr_link
);
642 pthread_cond_signal(&q
->vsq_cv
);
643 pthread_mutex_unlock(&q
->vsq_mtx
);
645 DPRINTF("request <idx=%d> enqueued", vireq
.idx
);
650 pci_vtscsi_init_queue(struct pci_vtscsi_softc
*sc
,
651 struct pci_vtscsi_queue
*queue
, int num
)
653 struct pci_vtscsi_worker
*worker
;
654 char tname
[MAXCOMLEN
+ 1];
658 queue
->vsq_vq
= &sc
->vss_vq
[num
+ 2];
660 pthread_mutex_init(&queue
->vsq_mtx
, NULL
);
661 pthread_mutex_init(&queue
->vsq_qmtx
, NULL
);
662 pthread_cond_init(&queue
->vsq_cv
, NULL
);
663 STAILQ_INIT(&queue
->vsq_requests
);
664 LIST_INIT(&queue
->vsq_workers
);
666 for (i
= 0; i
< VTSCSI_THR_PER_Q
; i
++) {
667 worker
= calloc(1, sizeof(struct pci_vtscsi_worker
));
668 worker
->vsw_queue
= queue
;
670 pthread_create(&worker
->vsw_thread
, NULL
, &pci_vtscsi_proc
,
673 snprintf(tname
, sizeof(tname
), "vtscsi:%d-%d", num
, i
);
674 pthread_set_name_np(worker
->vsw_thread
, tname
);
675 LIST_INSERT_HEAD(&queue
->vsq_workers
, worker
, vsw_link
);
682 pci_vtscsi_legacy_config(nvlist_t
*nvl
, const char *opts
)
689 cp
= strchr(opts
, ',');
691 set_config_value_node(nvl
, "dev", opts
);
694 devname
= strndup(opts
, cp
- opts
);
695 set_config_value_node(nvl
, "dev", devname
);
697 return (pci_parse_legacy_config(nvl
, cp
+ 1));
701 pci_vtscsi_init(struct vmctx
*ctx __unused
, struct pci_devinst
*pi
,
704 struct pci_vtscsi_softc
*sc
;
705 const char *devname
, *value
;
708 sc
= calloc(1, sizeof(struct pci_vtscsi_softc
));
709 value
= get_config_value_node(nvl
, "iid");
711 sc
->vss_iid
= strtoul(value
, NULL
, 10);
713 devname
= get_config_value_node(nvl
, "dev");
715 devname
= "/dev/cam/ctl";
716 sc
->vss_ctl_fd
= open(devname
, O_RDWR
);
717 if (sc
->vss_ctl_fd
< 0) {
718 WPRINTF("cannot open %s: %s", devname
, strerror(errno
));
723 pthread_mutex_init(&sc
->vss_mtx
, NULL
);
725 vi_softc_linkup(&sc
->vss_vs
, &vtscsi_vi_consts
, sc
, pi
, sc
->vss_vq
);
726 sc
->vss_vs
.vs_mtx
= &sc
->vss_mtx
;
729 sc
->vss_vq
[0].vq_qsize
= VTSCSI_RINGSZ
;
730 sc
->vss_vq
[0].vq_notify
= pci_vtscsi_controlq_notify
;
733 sc
->vss_vq
[1].vq_qsize
= VTSCSI_RINGSZ
;
734 sc
->vss_vq
[1].vq_notify
= pci_vtscsi_eventq_notify
;
737 for (i
= 2; i
< VTSCSI_MAXQ
; i
++) {
738 sc
->vss_vq
[i
].vq_qsize
= VTSCSI_RINGSZ
;
739 sc
->vss_vq
[i
].vq_notify
= pci_vtscsi_requestq_notify
;
740 pci_vtscsi_init_queue(sc
, &sc
->vss_queues
[i
- 2], i
- 2);
743 /* initialize config space */
744 pci_set_cfgdata16(pi
, PCIR_DEVICE
, VIRTIO_DEV_SCSI
);
745 pci_set_cfgdata16(pi
, PCIR_VENDOR
, VIRTIO_VENDOR
);
746 pci_set_cfgdata8(pi
, PCIR_CLASS
, PCIC_STORAGE
);
747 pci_set_cfgdata16(pi
, PCIR_SUBDEV_0
, VIRTIO_ID_SCSI
);
748 pci_set_cfgdata16(pi
, PCIR_SUBVEND_0
, VIRTIO_VENDOR
);
750 if (vi_intr_init(&sc
->vss_vs
, 1, fbsdrun_virtio_msix()))
752 vi_set_io_bar(&sc
->vss_vs
, 0);
758 static const struct pci_devemu pci_de_vscsi
= {
759 .pe_emu
= "virtio-scsi",
760 .pe_init
= pci_vtscsi_init
,
761 .pe_legacy_config
= pci_vtscsi_legacy_config
,
762 .pe_barwrite
= vi_pci_write
,
763 .pe_barread
= vi_pci_read
765 PCI_EMUL_SET(pci_de_vscsi
);