gitlab: fix inconsistent indentation
[qemu/ar7.git] / hw / scsi / virtio-scsi.c
blob99ff261cead13b35de9c998641df968c5b2fbb37
1 /*
2 * Virtio SCSI HBA
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "qemu/error-report.h"
22 #include "qemu/iov.h"
23 #include "qemu/module.h"
24 #include "sysemu/block-backend.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/scsi/scsi.h"
27 #include "scsi/constants.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/virtio/virtio-access.h"
30 #include "trace.h"
32 static inline int virtio_scsi_get_lun(uint8_t *lun)
34 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
37 static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
39 if (lun[0] != 1) {
40 return NULL;
42 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
43 return NULL;
45 return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
48 void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
50 VirtIODevice *vdev = VIRTIO_DEVICE(s);
51 const size_t zero_skip =
52 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
54 req->vq = vq;
55 req->dev = s;
56 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
57 qemu_iovec_init(&req->resp_iov, 1);
58 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
61 void virtio_scsi_free_req(VirtIOSCSIReq *req)
63 qemu_iovec_destroy(&req->resp_iov);
64 qemu_sglist_destroy(&req->qsgl);
65 g_free(req);
68 static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
70 VirtIOSCSI *s = req->dev;
71 VirtQueue *vq = req->vq;
72 VirtIODevice *vdev = VIRTIO_DEVICE(s);
74 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
75 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
76 if (s->dataplane_started && !s->dataplane_fenced) {
77 virtio_notify_irqfd(vdev, vq);
78 } else {
79 virtio_notify(vdev, vq);
82 if (req->sreq) {
83 req->sreq->hba_private = NULL;
84 scsi_req_unref(req->sreq);
86 virtio_scsi_free_req(req);
89 static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
91 virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
92 virtqueue_detach_element(req->vq, &req->elem, 0);
93 virtio_scsi_free_req(req);
96 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
97 hwaddr *addr, int num, size_t skip)
99 QEMUSGList *qsgl = &req->qsgl;
100 size_t copied = 0;
102 while (num) {
103 if (skip >= iov->iov_len) {
104 skip -= iov->iov_len;
105 } else {
106 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
107 copied += iov->iov_len - skip;
108 skip = 0;
110 iov++;
111 addr++;
112 num--;
115 assert(skip == 0);
116 return copied;
119 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
120 unsigned req_size, unsigned resp_size)
122 VirtIODevice *vdev = (VirtIODevice *) req->dev;
123 size_t in_size, out_size;
125 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
126 &req->req, req_size) < req_size) {
127 return -EINVAL;
130 if (qemu_iovec_concat_iov(&req->resp_iov,
131 req->elem.in_sg, req->elem.in_num, 0,
132 resp_size) < resp_size) {
133 return -EINVAL;
136 req->resp_size = resp_size;
138 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
139 * As a workaround, always consider the first buffer as the virtio-scsi
140 * request/response, making the payload start at the second element
141 * of the iovec.
143 * The actual length of the response header, stored in req->resp_size,
144 * does not change.
146 * TODO: always disable this workaround for virtio 1.0 devices.
148 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
149 if (req->elem.out_num) {
150 req_size = req->elem.out_sg[0].iov_len;
152 if (req->elem.in_num) {
153 resp_size = req->elem.in_sg[0].iov_len;
157 out_size = qemu_sgl_concat(req, req->elem.out_sg,
158 &req->elem.out_addr[0], req->elem.out_num,
159 req_size);
160 in_size = qemu_sgl_concat(req, req->elem.in_sg,
161 &req->elem.in_addr[0], req->elem.in_num,
162 resp_size);
164 if (out_size && in_size) {
165 return -ENOTSUP;
168 if (out_size) {
169 req->mode = SCSI_XFER_TO_DEV;
170 } else if (in_size) {
171 req->mode = SCSI_XFER_FROM_DEV;
174 return 0;
177 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
179 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
180 VirtIOSCSIReq *req;
182 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
183 if (!req) {
184 return NULL;
186 virtio_scsi_init_req(s, vq, req);
187 return req;
190 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
192 VirtIOSCSIReq *req = sreq->hba_private;
193 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
194 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
195 uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
197 assert(n < vs->conf.num_queues);
198 qemu_put_be32s(f, &n);
199 qemu_put_virtqueue_element(vdev, f, &req->elem);
202 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
204 SCSIBus *bus = sreq->bus;
205 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
206 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
207 VirtIODevice *vdev = VIRTIO_DEVICE(s);
208 VirtIOSCSIReq *req;
209 uint32_t n;
211 qemu_get_be32s(f, &n);
212 assert(n < vs->conf.num_queues);
213 req = qemu_get_virtqueue_element(vdev, f,
214 sizeof(VirtIOSCSIReq) + vs->cdb_size);
215 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
217 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
218 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
219 error_report("invalid SCSI request migration data");
220 exit(1);
223 scsi_req_ref(sreq);
224 req->sreq = sreq;
225 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
226 assert(req->sreq->cmd.mode == req->mode);
228 return req;
231 typedef struct {
232 Notifier notifier;
233 VirtIOSCSIReq *tmf_req;
234 } VirtIOSCSICancelNotifier;
236 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
238 VirtIOSCSICancelNotifier *n = container_of(notifier,
239 VirtIOSCSICancelNotifier,
240 notifier);
242 if (--n->tmf_req->remaining == 0) {
243 VirtIOSCSIReq *req = n->tmf_req;
245 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
246 req->req.tmf.tag, req->resp.tmf.response);
247 virtio_scsi_complete_req(req);
249 g_free(n);
252 static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
254 if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
255 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
259 /* Return 0 if the request is ready to be completed and return to guest;
260 * -EINPROGRESS if the request is submitted and will be completed later, in the
261 * case of async cancellation. */
262 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
264 SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
265 SCSIRequest *r, *next;
266 BusChild *kid;
267 int target;
268 int ret = 0;
270 virtio_scsi_ctx_check(s, d);
271 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
272 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
275 * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
276 * to avoid compiler errors.
278 req->req.tmf.subtype =
279 virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
281 trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
282 req->req.tmf.tag, req->req.tmf.subtype);
284 switch (req->req.tmf.subtype) {
285 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
286 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
287 if (!d) {
288 goto fail;
290 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
291 goto incorrect_lun;
293 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
294 VirtIOSCSIReq *cmd_req = r->hba_private;
295 if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
296 break;
299 if (r) {
301 * Assert that the request has not been completed yet, we
302 * check for it in the loop above.
304 assert(r->hba_private);
305 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
306 /* "If the specified command is present in the task set, then
307 * return a service response set to FUNCTION SUCCEEDED".
309 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
310 } else {
311 VirtIOSCSICancelNotifier *notifier;
313 req->remaining = 1;
314 notifier = g_new(VirtIOSCSICancelNotifier, 1);
315 notifier->tmf_req = req;
316 notifier->notifier.notify = virtio_scsi_cancel_notify;
317 scsi_req_cancel_async(r, &notifier->notifier);
318 ret = -EINPROGRESS;
321 break;
323 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
324 if (!d) {
325 goto fail;
327 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
328 goto incorrect_lun;
330 s->resetting++;
331 qdev_reset_all(&d->qdev);
332 s->resetting--;
333 break;
335 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
336 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
337 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
338 if (!d) {
339 goto fail;
341 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
342 goto incorrect_lun;
345 /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
346 * This way, if the bus starts calling back to the notifiers
347 * even before we finish the loop, virtio_scsi_cancel_notify
348 * will not complete the TMF too early.
350 req->remaining = 1;
351 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
352 if (r->hba_private) {
353 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
354 /* "If there is any command present in the task set, then
355 * return a service response set to FUNCTION SUCCEEDED".
357 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
358 break;
359 } else {
360 VirtIOSCSICancelNotifier *notifier;
362 req->remaining++;
363 notifier = g_new(VirtIOSCSICancelNotifier, 1);
364 notifier->notifier.notify = virtio_scsi_cancel_notify;
365 notifier->tmf_req = req;
366 scsi_req_cancel_async(r, &notifier->notifier);
370 if (--req->remaining > 0) {
371 ret = -EINPROGRESS;
373 break;
375 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
376 target = req->req.tmf.lun[1];
377 s->resetting++;
379 rcu_read_lock();
380 QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
381 SCSIDevice *d1 = SCSI_DEVICE(kid->child);
382 if (d1->channel == 0 && d1->id == target) {
383 qdev_reset_all(&d1->qdev);
386 rcu_read_unlock();
388 s->resetting--;
389 break;
391 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
392 default:
393 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
394 break;
397 object_unref(OBJECT(d));
398 return ret;
400 incorrect_lun:
401 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
402 object_unref(OBJECT(d));
403 return ret;
405 fail:
406 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
407 object_unref(OBJECT(d));
408 return ret;
411 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
413 VirtIODevice *vdev = (VirtIODevice *)s;
414 uint32_t type;
415 int r = 0;
417 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
418 &type, sizeof(type)) < sizeof(type)) {
419 virtio_scsi_bad_req(req);
420 return;
423 virtio_tswap32s(vdev, &type);
424 if (type == VIRTIO_SCSI_T_TMF) {
425 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
426 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
427 virtio_scsi_bad_req(req);
428 return;
429 } else {
430 r = virtio_scsi_do_tmf(s, req);
433 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
434 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
435 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
436 sizeof(VirtIOSCSICtrlANResp)) < 0) {
437 virtio_scsi_bad_req(req);
438 return;
439 } else {
440 req->req.an.event_requested =
441 virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
442 trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
443 req->req.an.event_requested);
444 req->resp.an.event_actual = 0;
445 req->resp.an.response = VIRTIO_SCSI_S_OK;
448 if (r == 0) {
449 if (type == VIRTIO_SCSI_T_TMF)
450 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
451 req->req.tmf.tag,
452 req->resp.tmf.response);
453 else if (type == VIRTIO_SCSI_T_AN_QUERY ||
454 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
455 trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
456 req->resp.an.response);
457 virtio_scsi_complete_req(req);
458 } else {
459 assert(r == -EINPROGRESS);
463 bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
465 VirtIOSCSIReq *req;
466 bool progress = false;
468 while ((req = virtio_scsi_pop_req(s, vq))) {
469 progress = true;
470 virtio_scsi_handle_ctrl_req(s, req);
472 return progress;
475 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
477 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
479 if (s->ctx) {
480 virtio_device_start_ioeventfd(vdev);
481 if (!s->dataplane_fenced) {
482 return;
485 virtio_scsi_acquire(s);
486 virtio_scsi_handle_ctrl_vq(s, vq);
487 virtio_scsi_release(s);
490 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
492 trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
493 req->req.cmd.tag,
494 req->resp.cmd.response,
495 req->resp.cmd.status);
496 /* Sense data is not in req->resp and is copied separately
497 * in virtio_scsi_command_complete.
499 req->resp_size = sizeof(VirtIOSCSICmdResp);
500 virtio_scsi_complete_req(req);
503 static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
504 size_t resid)
506 VirtIOSCSIReq *req = r->hba_private;
507 uint8_t sense[SCSI_SENSE_BUF_SIZE];
508 uint32_t sense_len;
509 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
511 if (r->io_canceled) {
512 return;
515 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
516 req->resp.cmd.status = status;
517 if (req->resp.cmd.status == GOOD) {
518 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
519 } else {
520 req->resp.cmd.resid = 0;
521 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
522 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
523 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
524 sense, sense_len);
525 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
527 virtio_scsi_complete_cmd_req(req);
530 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
531 uint8_t *buf, void *hba_private)
533 VirtIOSCSIReq *req = hba_private;
535 if (cmd->len == 0) {
536 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
537 memcpy(cmd->buf, buf, cmd->len);
540 /* Extract the direction and mode directly from the request, for
541 * host device passthrough.
543 cmd->xfer = req->qsgl.size;
544 cmd->mode = req->mode;
545 return 0;
548 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
550 VirtIOSCSIReq *req = r->hba_private;
552 return &req->qsgl;
555 static void virtio_scsi_request_cancelled(SCSIRequest *r)
557 VirtIOSCSIReq *req = r->hba_private;
559 if (!req) {
560 return;
562 if (req->dev->resetting) {
563 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
564 } else {
565 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
567 virtio_scsi_complete_cmd_req(req);
570 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
572 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
573 virtio_scsi_complete_cmd_req(req);
576 static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
578 VirtIOSCSICommon *vs = &s->parent_obj;
579 SCSIDevice *d;
580 int rc;
582 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
583 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
584 if (rc < 0) {
585 if (rc == -ENOTSUP) {
586 virtio_scsi_fail_cmd_req(req);
587 return -ENOTSUP;
588 } else {
589 virtio_scsi_bad_req(req);
590 return -EINVAL;
593 trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
594 req->req.cmd.tag, req->req.cmd.cdb[0]);
596 d = virtio_scsi_device_get(s, req->req.cmd.lun);
597 if (!d) {
598 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
599 virtio_scsi_complete_cmd_req(req);
600 return -ENOENT;
602 virtio_scsi_ctx_check(s, d);
603 req->sreq = scsi_req_new(d, req->req.cmd.tag,
604 virtio_scsi_get_lun(req->req.cmd.lun),
605 req->req.cmd.cdb, req);
607 if (req->sreq->cmd.mode != SCSI_XFER_NONE
608 && (req->sreq->cmd.mode != req->mode ||
609 req->sreq->cmd.xfer > req->qsgl.size)) {
610 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
611 virtio_scsi_complete_cmd_req(req);
612 object_unref(OBJECT(d));
613 return -ENOBUFS;
615 scsi_req_ref(req->sreq);
616 blk_io_plug(d->conf.blk);
617 object_unref(OBJECT(d));
618 return 0;
621 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
623 SCSIRequest *sreq = req->sreq;
624 if (scsi_req_enqueue(sreq)) {
625 scsi_req_continue(sreq);
627 blk_io_unplug(sreq->dev->conf.blk);
628 scsi_req_unref(sreq);
631 bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
633 VirtIOSCSIReq *req, *next;
634 int ret = 0;
635 bool suppress_notifications = virtio_queue_get_notification(vq);
636 bool progress = false;
638 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
640 do {
641 if (suppress_notifications) {
642 virtio_queue_set_notification(vq, 0);
645 while ((req = virtio_scsi_pop_req(s, vq))) {
646 progress = true;
647 ret = virtio_scsi_handle_cmd_req_prepare(s, req);
648 if (!ret) {
649 QTAILQ_INSERT_TAIL(&reqs, req, next);
650 } else if (ret == -EINVAL) {
651 /* The device is broken and shouldn't process any request */
652 while (!QTAILQ_EMPTY(&reqs)) {
653 req = QTAILQ_FIRST(&reqs);
654 QTAILQ_REMOVE(&reqs, req, next);
655 blk_io_unplug(req->sreq->dev->conf.blk);
656 scsi_req_unref(req->sreq);
657 virtqueue_detach_element(req->vq, &req->elem, 0);
658 virtio_scsi_free_req(req);
663 if (suppress_notifications) {
664 virtio_queue_set_notification(vq, 1);
666 } while (ret != -EINVAL && !virtio_queue_empty(vq));
668 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
669 virtio_scsi_handle_cmd_req_submit(s, req);
671 return progress;
674 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
676 /* use non-QOM casts in the data path */
677 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
679 if (s->ctx) {
680 virtio_device_start_ioeventfd(vdev);
681 if (!s->dataplane_fenced) {
682 return;
685 virtio_scsi_acquire(s);
686 virtio_scsi_handle_cmd_vq(s, vq);
687 virtio_scsi_release(s);
690 static void virtio_scsi_get_config(VirtIODevice *vdev,
691 uint8_t *config)
693 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
694 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
696 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
697 virtio_stl_p(vdev, &scsiconf->seg_max,
698 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
699 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
700 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
701 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
702 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
703 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
704 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
705 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
706 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
709 static void virtio_scsi_set_config(VirtIODevice *vdev,
710 const uint8_t *config)
712 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
713 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
715 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
716 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
717 virtio_error(vdev,
718 "bad data written to virtio-scsi configuration space");
719 return;
722 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
723 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
726 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
727 uint64_t requested_features,
728 Error **errp)
730 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
732 /* Firstly sync all virtio-scsi possible supported features */
733 requested_features |= s->host_features;
734 return requested_features;
737 static void virtio_scsi_reset(VirtIODevice *vdev)
739 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
740 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
742 assert(!s->dataplane_started);
743 s->resetting++;
744 qbus_reset_all(BUS(&s->bus));
745 s->resetting--;
747 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
748 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
749 s->events_dropped = false;
752 void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
753 uint32_t event, uint32_t reason)
755 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
756 VirtIOSCSIReq *req;
757 VirtIOSCSIEvent *evt;
758 VirtIODevice *vdev = VIRTIO_DEVICE(s);
760 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
761 return;
764 req = virtio_scsi_pop_req(s, vs->event_vq);
765 if (!req) {
766 s->events_dropped = true;
767 return;
770 if (s->events_dropped) {
771 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
772 s->events_dropped = false;
775 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
776 virtio_scsi_bad_req(req);
777 return;
780 evt = &req->resp.event;
781 memset(evt, 0, sizeof(VirtIOSCSIEvent));
782 evt->event = virtio_tswap32(vdev, event);
783 evt->reason = virtio_tswap32(vdev, reason);
784 if (!dev) {
785 assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
786 } else {
787 evt->lun[0] = 1;
788 evt->lun[1] = dev->id;
790 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
791 if (dev->lun >= 256) {
792 evt->lun[2] = (dev->lun >> 8) | 0x40;
794 evt->lun[3] = dev->lun & 0xFF;
796 trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
798 virtio_scsi_complete_req(req);
801 bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
803 if (s->events_dropped) {
804 virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
805 return true;
807 return false;
810 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
812 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
814 if (s->ctx) {
815 virtio_device_start_ioeventfd(vdev);
816 if (!s->dataplane_fenced) {
817 return;
820 virtio_scsi_acquire(s);
821 virtio_scsi_handle_event_vq(s, vq);
822 virtio_scsi_release(s);
825 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
827 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
828 VirtIODevice *vdev = VIRTIO_DEVICE(s);
830 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
831 dev->type != TYPE_ROM) {
832 virtio_scsi_acquire(s);
833 virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
834 sense.asc | (sense.ascq << 8));
835 virtio_scsi_release(s);
839 static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
840 DeviceState *dev, Error **errp)
842 SCSIDevice *sd = SCSI_DEVICE(dev);
843 sd->hba_supports_iothread = true;
846 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
847 Error **errp)
849 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
850 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
851 SCSIDevice *sd = SCSI_DEVICE(dev);
852 AioContext *old_context;
853 int ret;
855 if (s->ctx && !s->dataplane_fenced) {
856 if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
857 return;
859 old_context = blk_get_aio_context(sd->conf.blk);
860 aio_context_acquire(old_context);
861 ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
862 aio_context_release(old_context);
863 if (ret < 0) {
864 return;
868 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
869 virtio_scsi_acquire(s);
870 virtio_scsi_push_event(s, sd,
871 VIRTIO_SCSI_T_TRANSPORT_RESET,
872 VIRTIO_SCSI_EVT_RESET_RESCAN);
873 virtio_scsi_release(s);
877 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
878 Error **errp)
880 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
881 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
882 SCSIDevice *sd = SCSI_DEVICE(dev);
883 AioContext *ctx = s->ctx ?: qemu_get_aio_context();
885 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
886 virtio_scsi_acquire(s);
887 virtio_scsi_push_event(s, sd,
888 VIRTIO_SCSI_T_TRANSPORT_RESET,
889 VIRTIO_SCSI_EVT_RESET_REMOVED);
890 virtio_scsi_release(s);
893 aio_disable_external(ctx);
894 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
895 aio_enable_external(ctx);
897 if (s->ctx) {
898 virtio_scsi_acquire(s);
899 /* If other users keep the BlockBackend in the iothread, that's ok */
900 blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
901 virtio_scsi_release(s);
905 static struct SCSIBusInfo virtio_scsi_scsi_info = {
906 .tcq = true,
907 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
908 .max_target = VIRTIO_SCSI_MAX_TARGET,
909 .max_lun = VIRTIO_SCSI_MAX_LUN,
911 .complete = virtio_scsi_command_complete,
912 .cancel = virtio_scsi_request_cancelled,
913 .change = virtio_scsi_change,
914 .parse_cdb = virtio_scsi_parse_cdb,
915 .get_sg_list = virtio_scsi_get_sg_list,
916 .save_request = virtio_scsi_save_request,
917 .load_request = virtio_scsi_load_request,
920 void virtio_scsi_common_realize(DeviceState *dev,
921 VirtIOHandleOutput ctrl,
922 VirtIOHandleOutput evt,
923 VirtIOHandleOutput cmd,
924 Error **errp)
926 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
927 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
928 int i;
930 virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI,
931 sizeof(VirtIOSCSIConfig));
933 if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
934 s->conf.num_queues = 1;
936 if (s->conf.num_queues == 0 ||
937 s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
938 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
939 "must be a positive integer less than %d.",
940 s->conf.num_queues,
941 VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
942 virtio_cleanup(vdev);
943 return;
945 if (s->conf.virtqueue_size <= 2) {
946 error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
947 "must be > 2", s->conf.virtqueue_size);
948 return;
950 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
951 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
952 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
954 s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
955 s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
956 for (i = 0; i < s->conf.num_queues; i++) {
957 s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
961 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
963 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
964 VirtIOSCSI *s = VIRTIO_SCSI(dev);
965 Error *err = NULL;
967 virtio_scsi_common_realize(dev,
968 virtio_scsi_handle_ctrl,
969 virtio_scsi_handle_event,
970 virtio_scsi_handle_cmd,
971 &err);
972 if (err != NULL) {
973 error_propagate(errp, err);
974 return;
977 scsi_bus_new(&s->bus, sizeof(s->bus), dev,
978 &virtio_scsi_scsi_info, vdev->bus_name);
979 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
980 qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
982 virtio_scsi_dataplane_setup(s, errp);
985 void virtio_scsi_common_unrealize(DeviceState *dev)
987 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
988 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
989 int i;
991 virtio_delete_queue(vs->ctrl_vq);
992 virtio_delete_queue(vs->event_vq);
993 for (i = 0; i < vs->conf.num_queues; i++) {
994 virtio_delete_queue(vs->cmd_vqs[i]);
996 g_free(vs->cmd_vqs);
997 virtio_cleanup(vdev);
1000 static void virtio_scsi_device_unrealize(DeviceState *dev)
1002 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1004 qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1005 virtio_scsi_common_unrealize(dev);
1008 static Property virtio_scsi_properties[] = {
1009 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1010 VIRTIO_SCSI_AUTO_NUM_QUEUES),
1011 DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1012 parent_obj.conf.virtqueue_size, 256),
1013 DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1014 parent_obj.conf.seg_max_adjust, true),
1015 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1016 0xFFFF),
1017 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1018 128),
1019 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1020 VIRTIO_SCSI_F_HOTPLUG, true),
1021 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1022 VIRTIO_SCSI_F_CHANGE, true),
1023 DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1024 TYPE_IOTHREAD, IOThread *),
1025 DEFINE_PROP_END_OF_LIST(),
1028 static const VMStateDescription vmstate_virtio_scsi = {
1029 .name = "virtio-scsi",
1030 .minimum_version_id = 1,
1031 .version_id = 1,
1032 .fields = (VMStateField[]) {
1033 VMSTATE_VIRTIO_DEVICE,
1034 VMSTATE_END_OF_LIST()
1038 static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
1040 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1041 DeviceClass *dc = DEVICE_CLASS(klass);
1043 vdc->get_config = virtio_scsi_get_config;
1044 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1047 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1049 DeviceClass *dc = DEVICE_CLASS(klass);
1050 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1051 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1053 device_class_set_props(dc, virtio_scsi_properties);
1054 dc->vmsd = &vmstate_virtio_scsi;
1055 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1056 vdc->realize = virtio_scsi_device_realize;
1057 vdc->unrealize = virtio_scsi_device_unrealize;
1058 vdc->set_config = virtio_scsi_set_config;
1059 vdc->get_features = virtio_scsi_get_features;
1060 vdc->reset = virtio_scsi_reset;
1061 vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1062 vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1063 hc->pre_plug = virtio_scsi_pre_hotplug;
1064 hc->plug = virtio_scsi_hotplug;
1065 hc->unplug = virtio_scsi_hotunplug;
1068 static const TypeInfo virtio_scsi_common_info = {
1069 .name = TYPE_VIRTIO_SCSI_COMMON,
1070 .parent = TYPE_VIRTIO_DEVICE,
1071 .instance_size = sizeof(VirtIOSCSICommon),
1072 .abstract = true,
1073 .class_init = virtio_scsi_common_class_init,
1076 static const TypeInfo virtio_scsi_info = {
1077 .name = TYPE_VIRTIO_SCSI,
1078 .parent = TYPE_VIRTIO_SCSI_COMMON,
1079 .instance_size = sizeof(VirtIOSCSI),
1080 .class_init = virtio_scsi_class_init,
1081 .interfaces = (InterfaceInfo[]) {
1082 { TYPE_HOTPLUG_HANDLER },
1087 static void virtio_register_types(void)
1089 type_register_static(&virtio_scsi_common_info);
1090 type_register_static(&virtio_scsi_info);
1093 type_init(virtio_register_types)