Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / hw / scsi / virtio-scsi.c
blob2b649ca9762fe0b544ab6d5d1c617b1bde4cd2e2
1 /*
2 * Virtio SCSI HBA
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "qemu/error-report.h"
22 #include "qemu/iov.h"
23 #include "qemu/module.h"
24 #include "sysemu/block-backend.h"
25 #include "sysemu/dma.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/scsi/scsi.h"
28 #include "scsi/constants.h"
29 #include "hw/virtio/virtio-bus.h"
30 #include "hw/virtio/virtio-access.h"
31 #include "trace.h"
33 typedef struct VirtIOSCSIReq {
35 * Note:
36 * - fields up to resp_iov are initialized by virtio_scsi_init_req;
37 * - fields starting at vring are zeroed by virtio_scsi_init_req.
39 VirtQueueElement elem;
41 VirtIOSCSI *dev;
42 VirtQueue *vq;
43 QEMUSGList qsgl;
44 QEMUIOVector resp_iov;
46 union {
47 /* Used for two-stage request submission */
48 QTAILQ_ENTRY(VirtIOSCSIReq) next;
50 /* Used for cancellation of request during TMFs */
51 int remaining;
54 SCSIRequest *sreq;
55 size_t resp_size;
56 enum SCSIXferMode mode;
57 union {
58 VirtIOSCSICmdResp cmd;
59 VirtIOSCSICtrlTMFResp tmf;
60 VirtIOSCSICtrlANResp an;
61 VirtIOSCSIEvent event;
62 } resp;
63 union {
64 VirtIOSCSICmdReq cmd;
65 VirtIOSCSICtrlTMFReq tmf;
66 VirtIOSCSICtrlANReq an;
67 } req;
68 } VirtIOSCSIReq;
70 static inline int virtio_scsi_get_lun(uint8_t *lun)
72 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
75 static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
77 if (lun[0] != 1) {
78 return NULL;
80 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
81 return NULL;
83 return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
86 static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
88 VirtIODevice *vdev = VIRTIO_DEVICE(s);
89 const size_t zero_skip =
90 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
92 req->vq = vq;
93 req->dev = s;
94 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
95 qemu_iovec_init(&req->resp_iov, 1);
96 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
99 static void virtio_scsi_free_req(VirtIOSCSIReq *req)
101 qemu_iovec_destroy(&req->resp_iov);
102 qemu_sglist_destroy(&req->qsgl);
103 g_free(req);
106 static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
108 VirtIOSCSI *s = req->dev;
109 VirtQueue *vq = req->vq;
110 VirtIODevice *vdev = VIRTIO_DEVICE(s);
112 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
113 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
114 if (s->dataplane_started && !s->dataplane_fenced) {
115 virtio_notify_irqfd(vdev, vq);
116 } else {
117 virtio_notify(vdev, vq);
120 if (req->sreq) {
121 req->sreq->hba_private = NULL;
122 scsi_req_unref(req->sreq);
124 virtio_scsi_free_req(req);
127 static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
129 virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
130 virtqueue_detach_element(req->vq, &req->elem, 0);
131 virtio_scsi_free_req(req);
134 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
135 hwaddr *addr, int num, size_t skip)
137 QEMUSGList *qsgl = &req->qsgl;
138 size_t copied = 0;
140 while (num) {
141 if (skip >= iov->iov_len) {
142 skip -= iov->iov_len;
143 } else {
144 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
145 copied += iov->iov_len - skip;
146 skip = 0;
148 iov++;
149 addr++;
150 num--;
153 assert(skip == 0);
154 return copied;
157 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
158 unsigned req_size, unsigned resp_size)
160 VirtIODevice *vdev = (VirtIODevice *) req->dev;
161 size_t in_size, out_size;
163 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
164 &req->req, req_size) < req_size) {
165 return -EINVAL;
168 if (qemu_iovec_concat_iov(&req->resp_iov,
169 req->elem.in_sg, req->elem.in_num, 0,
170 resp_size) < resp_size) {
171 return -EINVAL;
174 req->resp_size = resp_size;
176 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
177 * As a workaround, always consider the first buffer as the virtio-scsi
178 * request/response, making the payload start at the second element
179 * of the iovec.
181 * The actual length of the response header, stored in req->resp_size,
182 * does not change.
184 * TODO: always disable this workaround for virtio 1.0 devices.
186 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
187 if (req->elem.out_num) {
188 req_size = req->elem.out_sg[0].iov_len;
190 if (req->elem.in_num) {
191 resp_size = req->elem.in_sg[0].iov_len;
195 out_size = qemu_sgl_concat(req, req->elem.out_sg,
196 &req->elem.out_addr[0], req->elem.out_num,
197 req_size);
198 in_size = qemu_sgl_concat(req, req->elem.in_sg,
199 &req->elem.in_addr[0], req->elem.in_num,
200 resp_size);
202 if (out_size && in_size) {
203 return -ENOTSUP;
206 if (out_size) {
207 req->mode = SCSI_XFER_TO_DEV;
208 } else if (in_size) {
209 req->mode = SCSI_XFER_FROM_DEV;
212 return 0;
215 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
217 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
218 VirtIOSCSIReq *req;
220 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
221 if (!req) {
222 return NULL;
224 virtio_scsi_init_req(s, vq, req);
225 return req;
228 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
230 VirtIOSCSIReq *req = sreq->hba_private;
231 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
232 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
233 uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
235 assert(n < vs->conf.num_queues);
236 qemu_put_be32s(f, &n);
237 qemu_put_virtqueue_element(vdev, f, &req->elem);
240 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
242 SCSIBus *bus = sreq->bus;
243 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
244 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
245 VirtIODevice *vdev = VIRTIO_DEVICE(s);
246 VirtIOSCSIReq *req;
247 uint32_t n;
249 qemu_get_be32s(f, &n);
250 assert(n < vs->conf.num_queues);
251 req = qemu_get_virtqueue_element(vdev, f,
252 sizeof(VirtIOSCSIReq) + vs->cdb_size);
253 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
255 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
256 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
257 error_report("invalid SCSI request migration data");
258 exit(1);
261 scsi_req_ref(sreq);
262 req->sreq = sreq;
263 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
264 assert(req->sreq->cmd.mode == req->mode);
266 return req;
269 typedef struct {
270 Notifier notifier;
271 VirtIOSCSIReq *tmf_req;
272 } VirtIOSCSICancelNotifier;
274 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
276 VirtIOSCSICancelNotifier *n = container_of(notifier,
277 VirtIOSCSICancelNotifier,
278 notifier);
280 if (--n->tmf_req->remaining == 0) {
281 VirtIOSCSIReq *req = n->tmf_req;
283 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
284 req->req.tmf.tag, req->resp.tmf.response);
285 virtio_scsi_complete_req(req);
287 g_free(n);
290 static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
292 if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
293 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
297 /* Return 0 if the request is ready to be completed and return to guest;
298 * -EINPROGRESS if the request is submitted and will be completed later, in the
299 * case of async cancellation. */
300 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
302 SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
303 SCSIRequest *r, *next;
304 BusChild *kid;
305 int target;
306 int ret = 0;
308 virtio_scsi_ctx_check(s, d);
309 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
310 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
313 * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
314 * to avoid compiler errors.
316 req->req.tmf.subtype =
317 virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
319 trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
320 req->req.tmf.tag, req->req.tmf.subtype);
322 switch (req->req.tmf.subtype) {
323 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
324 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
325 if (!d) {
326 goto fail;
328 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
329 goto incorrect_lun;
331 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
332 VirtIOSCSIReq *cmd_req = r->hba_private;
333 if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
334 break;
337 if (r) {
339 * Assert that the request has not been completed yet, we
340 * check for it in the loop above.
342 assert(r->hba_private);
343 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
344 /* "If the specified command is present in the task set, then
345 * return a service response set to FUNCTION SUCCEEDED".
347 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
348 } else {
349 VirtIOSCSICancelNotifier *notifier;
351 req->remaining = 1;
352 notifier = g_new(VirtIOSCSICancelNotifier, 1);
353 notifier->tmf_req = req;
354 notifier->notifier.notify = virtio_scsi_cancel_notify;
355 scsi_req_cancel_async(r, &notifier->notifier);
356 ret = -EINPROGRESS;
359 break;
361 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
362 if (!d) {
363 goto fail;
365 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
366 goto incorrect_lun;
368 s->resetting++;
369 device_cold_reset(&d->qdev);
370 s->resetting--;
371 break;
373 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
374 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
375 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
376 if (!d) {
377 goto fail;
379 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
380 goto incorrect_lun;
383 /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
384 * This way, if the bus starts calling back to the notifiers
385 * even before we finish the loop, virtio_scsi_cancel_notify
386 * will not complete the TMF too early.
388 req->remaining = 1;
389 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
390 if (r->hba_private) {
391 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
392 /* "If there is any command present in the task set, then
393 * return a service response set to FUNCTION SUCCEEDED".
395 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
396 break;
397 } else {
398 VirtIOSCSICancelNotifier *notifier;
400 req->remaining++;
401 notifier = g_new(VirtIOSCSICancelNotifier, 1);
402 notifier->notifier.notify = virtio_scsi_cancel_notify;
403 notifier->tmf_req = req;
404 scsi_req_cancel_async(r, &notifier->notifier);
408 if (--req->remaining > 0) {
409 ret = -EINPROGRESS;
411 break;
413 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
414 target = req->req.tmf.lun[1];
415 s->resetting++;
417 rcu_read_lock();
418 QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
419 SCSIDevice *d1 = SCSI_DEVICE(kid->child);
420 if (d1->channel == 0 && d1->id == target) {
421 device_cold_reset(&d1->qdev);
424 rcu_read_unlock();
426 s->resetting--;
427 break;
429 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
430 default:
431 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
432 break;
435 object_unref(OBJECT(d));
436 return ret;
438 incorrect_lun:
439 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
440 object_unref(OBJECT(d));
441 return ret;
443 fail:
444 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
445 object_unref(OBJECT(d));
446 return ret;
449 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
451 VirtIODevice *vdev = (VirtIODevice *)s;
452 uint32_t type;
453 int r = 0;
455 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
456 &type, sizeof(type)) < sizeof(type)) {
457 virtio_scsi_bad_req(req);
458 return;
461 virtio_tswap32s(vdev, &type);
462 if (type == VIRTIO_SCSI_T_TMF) {
463 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
464 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
465 virtio_scsi_bad_req(req);
466 return;
467 } else {
468 r = virtio_scsi_do_tmf(s, req);
471 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
472 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
473 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
474 sizeof(VirtIOSCSICtrlANResp)) < 0) {
475 virtio_scsi_bad_req(req);
476 return;
477 } else {
478 req->req.an.event_requested =
479 virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
480 trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
481 req->req.an.event_requested);
482 req->resp.an.event_actual = 0;
483 req->resp.an.response = VIRTIO_SCSI_S_OK;
486 if (r == 0) {
487 if (type == VIRTIO_SCSI_T_TMF)
488 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
489 req->req.tmf.tag,
490 req->resp.tmf.response);
491 else if (type == VIRTIO_SCSI_T_AN_QUERY ||
492 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
493 trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
494 req->resp.an.response);
495 virtio_scsi_complete_req(req);
496 } else {
497 assert(r == -EINPROGRESS);
501 static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
503 VirtIOSCSIReq *req;
505 while ((req = virtio_scsi_pop_req(s, vq))) {
506 virtio_scsi_handle_ctrl_req(s, req);
511 * If dataplane is configured but not yet started, do so now and return true on
512 * success.
514 * Dataplane is started by the core virtio code but virtqueue handler functions
515 * can also be invoked when a guest kicks before DRIVER_OK, so this helper
516 * function helps us deal with manually starting ioeventfd in that case.
518 static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
520 if (!s->ctx || s->dataplane_started) {
521 return false;
524 virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
525 return !s->dataplane_fenced;
528 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
530 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
532 if (virtio_scsi_defer_to_dataplane(s)) {
533 return;
536 virtio_scsi_acquire(s);
537 virtio_scsi_handle_ctrl_vq(s, vq);
538 virtio_scsi_release(s);
541 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
543 trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
544 req->req.cmd.tag,
545 req->resp.cmd.response,
546 req->resp.cmd.status);
547 /* Sense data is not in req->resp and is copied separately
548 * in virtio_scsi_command_complete.
550 req->resp_size = sizeof(VirtIOSCSICmdResp);
551 virtio_scsi_complete_req(req);
554 static void virtio_scsi_command_failed(SCSIRequest *r)
556 VirtIOSCSIReq *req = r->hba_private;
558 if (r->io_canceled) {
559 return;
562 req->resp.cmd.status = GOOD;
563 switch (r->host_status) {
564 case SCSI_HOST_NO_LUN:
565 req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
566 break;
567 case SCSI_HOST_BUSY:
568 req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
569 break;
570 case SCSI_HOST_TIME_OUT:
571 case SCSI_HOST_ABORTED:
572 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
573 break;
574 case SCSI_HOST_BAD_RESPONSE:
575 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
576 break;
577 case SCSI_HOST_RESET:
578 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
579 break;
580 case SCSI_HOST_TRANSPORT_DISRUPTED:
581 req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
582 break;
583 case SCSI_HOST_TARGET_FAILURE:
584 req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
585 break;
586 case SCSI_HOST_RESERVATION_ERROR:
587 req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
588 break;
589 case SCSI_HOST_ALLOCATION_FAILURE:
590 case SCSI_HOST_MEDIUM_ERROR:
591 case SCSI_HOST_ERROR:
592 default:
593 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
594 break;
596 virtio_scsi_complete_cmd_req(req);
599 static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
601 VirtIOSCSIReq *req = r->hba_private;
602 uint8_t sense[SCSI_SENSE_BUF_SIZE];
603 uint32_t sense_len;
604 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
606 if (r->io_canceled) {
607 return;
610 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
611 req->resp.cmd.status = r->status;
612 if (req->resp.cmd.status == GOOD) {
613 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
614 } else {
615 req->resp.cmd.resid = 0;
616 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
617 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
618 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
619 sense, sense_len);
620 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
622 virtio_scsi_complete_cmd_req(req);
625 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
626 uint8_t *buf, size_t buf_len,
627 void *hba_private)
629 VirtIOSCSIReq *req = hba_private;
631 if (cmd->len == 0) {
632 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
633 memcpy(cmd->buf, buf, cmd->len);
636 /* Extract the direction and mode directly from the request, for
637 * host device passthrough.
639 cmd->xfer = req->qsgl.size;
640 cmd->mode = req->mode;
641 return 0;
644 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
646 VirtIOSCSIReq *req = r->hba_private;
648 return &req->qsgl;
651 static void virtio_scsi_request_cancelled(SCSIRequest *r)
653 VirtIOSCSIReq *req = r->hba_private;
655 if (!req) {
656 return;
658 if (req->dev->resetting) {
659 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
660 } else {
661 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
663 virtio_scsi_complete_cmd_req(req);
666 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
668 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
669 virtio_scsi_complete_cmd_req(req);
672 static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
674 VirtIOSCSICommon *vs = &s->parent_obj;
675 SCSIDevice *d;
676 int rc;
678 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
679 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
680 if (rc < 0) {
681 if (rc == -ENOTSUP) {
682 virtio_scsi_fail_cmd_req(req);
683 return -ENOTSUP;
684 } else {
685 virtio_scsi_bad_req(req);
686 return -EINVAL;
689 trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
690 req->req.cmd.tag, req->req.cmd.cdb[0]);
692 d = virtio_scsi_device_get(s, req->req.cmd.lun);
693 if (!d) {
694 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
695 virtio_scsi_complete_cmd_req(req);
696 return -ENOENT;
698 virtio_scsi_ctx_check(s, d);
699 req->sreq = scsi_req_new(d, req->req.cmd.tag,
700 virtio_scsi_get_lun(req->req.cmd.lun),
701 req->req.cmd.cdb, vs->cdb_size, req);
703 if (req->sreq->cmd.mode != SCSI_XFER_NONE
704 && (req->sreq->cmd.mode != req->mode ||
705 req->sreq->cmd.xfer > req->qsgl.size)) {
706 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
707 virtio_scsi_complete_cmd_req(req);
708 object_unref(OBJECT(d));
709 return -ENOBUFS;
711 scsi_req_ref(req->sreq);
712 blk_io_plug(d->conf.blk);
713 object_unref(OBJECT(d));
714 return 0;
717 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
719 SCSIRequest *sreq = req->sreq;
720 if (scsi_req_enqueue(sreq)) {
721 scsi_req_continue(sreq);
723 blk_io_unplug(sreq->dev->conf.blk);
724 scsi_req_unref(sreq);
727 static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
729 VirtIOSCSIReq *req, *next;
730 int ret = 0;
731 bool suppress_notifications = virtio_queue_get_notification(vq);
733 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
735 do {
736 if (suppress_notifications) {
737 virtio_queue_set_notification(vq, 0);
740 while ((req = virtio_scsi_pop_req(s, vq))) {
741 ret = virtio_scsi_handle_cmd_req_prepare(s, req);
742 if (!ret) {
743 QTAILQ_INSERT_TAIL(&reqs, req, next);
744 } else if (ret == -EINVAL) {
745 /* The device is broken and shouldn't process any request */
746 while (!QTAILQ_EMPTY(&reqs)) {
747 req = QTAILQ_FIRST(&reqs);
748 QTAILQ_REMOVE(&reqs, req, next);
749 blk_io_unplug(req->sreq->dev->conf.blk);
750 scsi_req_unref(req->sreq);
751 virtqueue_detach_element(req->vq, &req->elem, 0);
752 virtio_scsi_free_req(req);
757 if (suppress_notifications) {
758 virtio_queue_set_notification(vq, 1);
760 } while (ret != -EINVAL && !virtio_queue_empty(vq));
762 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
763 virtio_scsi_handle_cmd_req_submit(s, req);
767 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
769 /* use non-QOM casts in the data path */
770 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
772 if (virtio_scsi_defer_to_dataplane(s)) {
773 return;
776 virtio_scsi_acquire(s);
777 virtio_scsi_handle_cmd_vq(s, vq);
778 virtio_scsi_release(s);
781 static void virtio_scsi_get_config(VirtIODevice *vdev,
782 uint8_t *config)
784 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
785 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
787 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
788 virtio_stl_p(vdev, &scsiconf->seg_max,
789 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
790 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
791 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
792 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
793 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
794 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
795 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
796 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
797 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
800 static void virtio_scsi_set_config(VirtIODevice *vdev,
801 const uint8_t *config)
803 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
804 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
806 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
807 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
808 virtio_error(vdev,
809 "bad data written to virtio-scsi configuration space");
810 return;
813 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
814 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
817 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
818 uint64_t requested_features,
819 Error **errp)
821 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
823 /* Firstly sync all virtio-scsi possible supported features */
824 requested_features |= s->host_features;
825 return requested_features;
828 static void virtio_scsi_reset(VirtIODevice *vdev)
830 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
831 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
833 assert(!s->dataplane_started);
834 s->resetting++;
835 bus_cold_reset(BUS(&s->bus));
836 s->resetting--;
838 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
839 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
840 s->events_dropped = false;
843 static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
844 uint32_t event, uint32_t reason)
846 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
847 VirtIOSCSIReq *req;
848 VirtIOSCSIEvent *evt;
849 VirtIODevice *vdev = VIRTIO_DEVICE(s);
851 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
852 return;
855 req = virtio_scsi_pop_req(s, vs->event_vq);
856 if (!req) {
857 s->events_dropped = true;
858 return;
861 if (s->events_dropped) {
862 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
863 s->events_dropped = false;
866 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
867 virtio_scsi_bad_req(req);
868 return;
871 evt = &req->resp.event;
872 memset(evt, 0, sizeof(VirtIOSCSIEvent));
873 evt->event = virtio_tswap32(vdev, event);
874 evt->reason = virtio_tswap32(vdev, reason);
875 if (!dev) {
876 assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
877 } else {
878 evt->lun[0] = 1;
879 evt->lun[1] = dev->id;
881 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
882 if (dev->lun >= 256) {
883 evt->lun[2] = (dev->lun >> 8) | 0x40;
885 evt->lun[3] = dev->lun & 0xFF;
887 trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
889 virtio_scsi_complete_req(req);
892 static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
894 if (s->events_dropped) {
895 virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
899 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
901 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
903 if (virtio_scsi_defer_to_dataplane(s)) {
904 return;
907 virtio_scsi_acquire(s);
908 virtio_scsi_handle_event_vq(s, vq);
909 virtio_scsi_release(s);
912 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
914 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
915 VirtIODevice *vdev = VIRTIO_DEVICE(s);
917 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
918 dev->type != TYPE_ROM) {
919 virtio_scsi_acquire(s);
920 virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
921 sense.asc | (sense.ascq << 8));
922 virtio_scsi_release(s);
926 static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
927 DeviceState *dev, Error **errp)
929 SCSIDevice *sd = SCSI_DEVICE(dev);
930 sd->hba_supports_iothread = true;
933 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
934 Error **errp)
936 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
937 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
938 SCSIDevice *sd = SCSI_DEVICE(dev);
939 AioContext *old_context;
940 int ret;
942 if (s->ctx && !s->dataplane_fenced) {
943 if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
944 return;
946 old_context = blk_get_aio_context(sd->conf.blk);
947 aio_context_acquire(old_context);
948 ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
949 aio_context_release(old_context);
950 if (ret < 0) {
951 return;
955 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
956 virtio_scsi_acquire(s);
957 virtio_scsi_push_event(s, sd,
958 VIRTIO_SCSI_T_TRANSPORT_RESET,
959 VIRTIO_SCSI_EVT_RESET_RESCAN);
960 scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
961 virtio_scsi_release(s);
965 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
966 Error **errp)
968 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
969 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
970 SCSIDevice *sd = SCSI_DEVICE(dev);
971 AioContext *ctx = s->ctx ?: qemu_get_aio_context();
973 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
974 virtio_scsi_acquire(s);
975 virtio_scsi_push_event(s, sd,
976 VIRTIO_SCSI_T_TRANSPORT_RESET,
977 VIRTIO_SCSI_EVT_RESET_REMOVED);
978 scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
979 virtio_scsi_release(s);
982 aio_disable_external(ctx);
983 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
984 aio_enable_external(ctx);
986 if (s->ctx) {
987 virtio_scsi_acquire(s);
988 /* If other users keep the BlockBackend in the iothread, that's ok */
989 blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
990 virtio_scsi_release(s);
994 static struct SCSIBusInfo virtio_scsi_scsi_info = {
995 .tcq = true,
996 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
997 .max_target = VIRTIO_SCSI_MAX_TARGET,
998 .max_lun = VIRTIO_SCSI_MAX_LUN,
1000 .complete = virtio_scsi_command_complete,
1001 .fail = virtio_scsi_command_failed,
1002 .cancel = virtio_scsi_request_cancelled,
1003 .change = virtio_scsi_change,
1004 .parse_cdb = virtio_scsi_parse_cdb,
1005 .get_sg_list = virtio_scsi_get_sg_list,
1006 .save_request = virtio_scsi_save_request,
1007 .load_request = virtio_scsi_load_request,
1010 void virtio_scsi_common_realize(DeviceState *dev,
1011 VirtIOHandleOutput ctrl,
1012 VirtIOHandleOutput evt,
1013 VirtIOHandleOutput cmd,
1014 Error **errp)
1016 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1017 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
1018 int i;
1020 virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig));
1022 if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
1023 s->conf.num_queues = 1;
1025 if (s->conf.num_queues == 0 ||
1026 s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
1027 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1028 "must be a positive integer less than %d.",
1029 s->conf.num_queues,
1030 VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
1031 virtio_cleanup(vdev);
1032 return;
1034 if (s->conf.virtqueue_size <= 2) {
1035 error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
1036 "must be > 2", s->conf.virtqueue_size);
1037 return;
1039 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
1040 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
1041 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
1043 s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
1044 s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
1045 for (i = 0; i < s->conf.num_queues; i++) {
1046 s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
1050 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
1052 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1053 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1054 Error *err = NULL;
1056 virtio_scsi_common_realize(dev,
1057 virtio_scsi_handle_ctrl,
1058 virtio_scsi_handle_event,
1059 virtio_scsi_handle_cmd,
1060 &err);
1061 if (err != NULL) {
1062 error_propagate(errp, err);
1063 return;
1066 scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
1067 &virtio_scsi_scsi_info, vdev->bus_name);
1068 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
1069 qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
1071 virtio_scsi_dataplane_setup(s, errp);
1074 void virtio_scsi_common_unrealize(DeviceState *dev)
1076 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1077 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
1078 int i;
1080 virtio_delete_queue(vs->ctrl_vq);
1081 virtio_delete_queue(vs->event_vq);
1082 for (i = 0; i < vs->conf.num_queues; i++) {
1083 virtio_delete_queue(vs->cmd_vqs[i]);
1085 g_free(vs->cmd_vqs);
1086 virtio_cleanup(vdev);
1089 static void virtio_scsi_device_unrealize(DeviceState *dev)
1091 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1093 qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1094 virtio_scsi_common_unrealize(dev);
1097 static Property virtio_scsi_properties[] = {
1098 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1099 VIRTIO_SCSI_AUTO_NUM_QUEUES),
1100 DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1101 parent_obj.conf.virtqueue_size, 256),
1102 DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1103 parent_obj.conf.seg_max_adjust, true),
1104 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1105 0xFFFF),
1106 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1107 128),
1108 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1109 VIRTIO_SCSI_F_HOTPLUG, true),
1110 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1111 VIRTIO_SCSI_F_CHANGE, true),
1112 DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1113 TYPE_IOTHREAD, IOThread *),
1114 DEFINE_PROP_END_OF_LIST(),
1117 static const VMStateDescription vmstate_virtio_scsi = {
1118 .name = "virtio-scsi",
1119 .minimum_version_id = 1,
1120 .version_id = 1,
1121 .fields = (VMStateField[]) {
1122 VMSTATE_VIRTIO_DEVICE,
1123 VMSTATE_END_OF_LIST()
1127 static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
1129 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1130 DeviceClass *dc = DEVICE_CLASS(klass);
1132 vdc->get_config = virtio_scsi_get_config;
1133 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1136 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1138 DeviceClass *dc = DEVICE_CLASS(klass);
1139 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1140 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1142 device_class_set_props(dc, virtio_scsi_properties);
1143 dc->vmsd = &vmstate_virtio_scsi;
1144 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1145 vdc->realize = virtio_scsi_device_realize;
1146 vdc->unrealize = virtio_scsi_device_unrealize;
1147 vdc->set_config = virtio_scsi_set_config;
1148 vdc->get_features = virtio_scsi_get_features;
1149 vdc->reset = virtio_scsi_reset;
1150 vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1151 vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1152 hc->pre_plug = virtio_scsi_pre_hotplug;
1153 hc->plug = virtio_scsi_hotplug;
1154 hc->unplug = virtio_scsi_hotunplug;
1157 static const TypeInfo virtio_scsi_common_info = {
1158 .name = TYPE_VIRTIO_SCSI_COMMON,
1159 .parent = TYPE_VIRTIO_DEVICE,
1160 .instance_size = sizeof(VirtIOSCSICommon),
1161 .abstract = true,
1162 .class_init = virtio_scsi_common_class_init,
1165 static const TypeInfo virtio_scsi_info = {
1166 .name = TYPE_VIRTIO_SCSI,
1167 .parent = TYPE_VIRTIO_SCSI_COMMON,
1168 .instance_size = sizeof(VirtIOSCSI),
1169 .class_init = virtio_scsi_class_init,
1170 .interfaces = (InterfaceInfo[]) {
1171 { TYPE_HOTPLUG_HANDLER },
1176 static void virtio_register_types(void)
1178 type_register_static(&virtio_scsi_common_info);
1179 type_register_static(&virtio_scsi_info);
1182 type_init(virtio_register_types)