scsi: only access SCSIDevice->requests from one thread
[qemu/kevin.git] / hw / scsi / scsi-bus.c
blobb649cdf555bedf72dc9603779641024116f1f96b
1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "qemu/error-report.h"
4 #include "qemu/module.h"
5 #include "qemu/option.h"
6 #include "qemu/hw-version.h"
7 #include "hw/qdev-properties.h"
8 #include "hw/scsi/scsi.h"
9 #include "migration/qemu-file-types.h"
10 #include "migration/vmstate.h"
11 #include "scsi/constants.h"
12 #include "sysemu/block-backend.h"
13 #include "sysemu/blockdev.h"
14 #include "sysemu/sysemu.h"
15 #include "sysemu/runstate.h"
16 #include "trace.h"
17 #include "sysemu/dma.h"
18 #include "qemu/cutils.h"
20 static char *scsibus_get_dev_path(DeviceState *dev);
21 static char *scsibus_get_fw_dev_path(DeviceState *dev);
22 static void scsi_req_dequeue(SCSIRequest *req);
23 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
24 static void scsi_target_free_buf(SCSIRequest *req);
25 static void scsi_clear_reported_luns_changed(SCSIRequest *req);
27 static int next_scsi_bus;
29 static SCSIDevice *do_scsi_device_find(SCSIBus *bus,
30 int channel, int id, int lun,
31 bool include_unrealized)
33 BusChild *kid;
34 SCSIDevice *retval = NULL;
36 QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) {
37 DeviceState *qdev = kid->child;
38 SCSIDevice *dev = SCSI_DEVICE(qdev);
40 if (dev->channel == channel && dev->id == id) {
41 if (dev->lun == lun) {
42 retval = dev;
43 break;
47 * If we don't find exact match (channel/bus/lun),
48 * we will return the first device which matches channel/bus
51 if (!retval) {
52 retval = dev;
58 * This function might run on the IO thread and we might race against
59 * main thread hot-plugging the device.
60 * We assume that as soon as .realized is set to true we can let
61 * the user access the device.
64 if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) {
65 retval = NULL;
68 return retval;
71 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
73 RCU_READ_LOCK_GUARD();
74 return do_scsi_device_find(bus, channel, id, lun, false);
77 SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
79 SCSIDevice *d;
80 RCU_READ_LOCK_GUARD();
81 d = do_scsi_device_find(bus, channel, id, lun, false);
82 if (d) {
83 object_ref(d);
85 return d;
89 * Invoke @fn() for each enqueued request in device @s. Must be called from the
90 * main loop thread while the guest is stopped. This is only suitable for
91 * vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
93 static void scsi_device_for_each_req_sync(SCSIDevice *s,
94 void (*fn)(SCSIRequest *, void *),
95 void *opaque)
97 SCSIRequest *req;
98 SCSIRequest *next_req;
100 assert(!runstate_is_running());
101 assert(qemu_in_main_thread());
103 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
104 fn(req, opaque);
108 typedef struct {
109 SCSIDevice *s;
110 void (*fn)(SCSIRequest *, void *);
111 void *fn_opaque;
112 } SCSIDeviceForEachReqAsyncData;
114 static void scsi_device_for_each_req_async_bh(void *opaque)
116 g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
117 SCSIDevice *s = data->s;
118 AioContext *ctx;
119 SCSIRequest *req;
120 SCSIRequest *next;
123 * If the AioContext changed before this BH was called then reschedule into
124 * the new AioContext before accessing ->requests. This can happen when
125 * scsi_device_for_each_req_async() is called and then the AioContext is
126 * changed before BHs are run.
128 ctx = blk_get_aio_context(s->conf.blk);
129 if (ctx != qemu_get_current_aio_context()) {
130 aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh,
131 g_steal_pointer(&data));
132 return;
135 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
136 data->fn(req, data->fn_opaque);
139 /* Drop the reference taken by scsi_device_for_each_req_async() */
140 object_unref(OBJECT(s));
144 * Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
145 * runs in the AioContext that is executing the request.
147 static void scsi_device_for_each_req_async(SCSIDevice *s,
148 void (*fn)(SCSIRequest *, void *),
149 void *opaque)
151 assert(qemu_in_main_thread());
153 SCSIDeviceForEachReqAsyncData *data =
154 g_new(SCSIDeviceForEachReqAsyncData, 1);
156 data->s = s;
157 data->fn = fn;
158 data->fn_opaque = opaque;
161 * Hold a reference to the SCSIDevice until
162 * scsi_device_for_each_req_async_bh() finishes.
164 object_ref(OBJECT(s));
166 aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
167 scsi_device_for_each_req_async_bh,
168 data);
171 static void scsi_device_realize(SCSIDevice *s, Error **errp)
173 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
174 if (sc->realize) {
175 sc->realize(s, errp);
179 static void scsi_device_unrealize(SCSIDevice *s)
181 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
182 if (sc->unrealize) {
183 sc->unrealize(s);
187 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
188 size_t buf_len, void *hba_private)
190 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
191 int rc;
193 assert(cmd->len == 0);
194 rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len);
195 if (bus->info->parse_cdb) {
196 rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private);
198 return rc;
201 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
202 uint8_t *buf, void *hba_private)
204 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
205 if (sc->alloc_req) {
206 return sc->alloc_req(s, tag, lun, buf, hba_private);
209 return NULL;
212 void scsi_device_unit_attention_reported(SCSIDevice *s)
214 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
215 if (sc->unit_attention_reported) {
216 sc->unit_attention_reported(s);
220 /* Create a scsi bus, and attach devices to it. */
221 void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
222 const SCSIBusInfo *info, const char *bus_name)
224 qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
225 bus->busnr = next_scsi_bus++;
226 bus->info = info;
227 qbus_set_bus_hotplug_handler(BUS(bus));
230 void scsi_req_retry(SCSIRequest *req)
232 req->retry = true;
235 /* Called in the AioContext that is executing the request */
236 static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
238 scsi_req_ref(req);
239 if (req->retry) {
240 req->retry = false;
241 switch (req->cmd.mode) {
242 case SCSI_XFER_FROM_DEV:
243 case SCSI_XFER_TO_DEV:
244 scsi_req_continue(req);
245 break;
246 case SCSI_XFER_NONE:
247 scsi_req_dequeue(req);
248 scsi_req_enqueue(req);
249 break;
252 scsi_req_unref(req);
255 static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
257 SCSIDevice *s = opaque;
259 assert(qemu_in_main_thread());
261 if (!running) {
262 return;
265 scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
268 static bool scsi_bus_is_address_free(SCSIBus *bus,
269 int channel, int target, int lun,
270 SCSIDevice **p_dev)
272 SCSIDevice *d;
274 RCU_READ_LOCK_GUARD();
275 d = do_scsi_device_find(bus, channel, target, lun, true);
276 if (d && d->lun == lun) {
277 if (p_dev) {
278 *p_dev = d;
280 return false;
282 if (p_dev) {
283 *p_dev = NULL;
285 return true;
288 static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp)
290 SCSIDevice *dev = SCSI_DEVICE(qdev);
291 SCSIBus *bus = SCSI_BUS(qbus);
293 if (dev->channel > bus->info->max_channel) {
294 error_setg(errp, "bad scsi channel id: %d", dev->channel);
295 return false;
297 if (dev->id != -1 && dev->id > bus->info->max_target) {
298 error_setg(errp, "bad scsi device id: %d", dev->id);
299 return false;
301 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
302 error_setg(errp, "bad scsi device lun: %d", dev->lun);
303 return false;
306 if (dev->id != -1 && dev->lun != -1) {
307 SCSIDevice *d;
308 if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) {
309 error_setg(errp, "lun already used by '%s'", d->qdev.id);
310 return false;
314 return true;
317 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
319 SCSIDevice *dev = SCSI_DEVICE(qdev);
320 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
321 bool is_free;
322 Error *local_err = NULL;
324 if (dev->id == -1) {
325 int id = -1;
326 if (dev->lun == -1) {
327 dev->lun = 0;
329 do {
330 is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL);
331 } while (!is_free && id < bus->info->max_target);
332 if (!is_free) {
333 error_setg(errp, "no free target");
334 return;
336 dev->id = id;
337 } else if (dev->lun == -1) {
338 int lun = -1;
339 do {
340 is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL);
341 } while (!is_free && lun < bus->info->max_lun);
342 if (!is_free) {
343 error_setg(errp, "no free lun");
344 return;
346 dev->lun = lun;
349 QTAILQ_INIT(&dev->requests);
350 scsi_device_realize(dev, &local_err);
351 if (local_err) {
352 error_propagate(errp, local_err);
353 return;
355 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
356 scsi_dma_restart_cb, dev);
359 static void scsi_qdev_unrealize(DeviceState *qdev)
361 SCSIDevice *dev = SCSI_DEVICE(qdev);
363 if (dev->vmsentry) {
364 qemu_del_vm_change_state_handler(dev->vmsentry);
367 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
369 scsi_device_unrealize(dev);
371 blockdev_mark_auto_del(dev->conf.blk);
374 /* handle legacy '-drive if=scsi,...' cmd line args */
375 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
376 int unit, bool removable, int bootindex,
377 bool share_rw,
378 BlockdevOnError rerror,
379 BlockdevOnError werror,
380 const char *serial, Error **errp)
382 const char *driver;
383 char *name;
384 DeviceState *dev;
385 DriveInfo *dinfo;
387 if (blk_is_sg(blk)) {
388 driver = "scsi-generic";
389 } else {
390 dinfo = blk_legacy_dinfo(blk);
391 if (dinfo && dinfo->media_cd) {
392 driver = "scsi-cd";
393 } else {
394 driver = "scsi-hd";
397 dev = qdev_new(driver);
398 name = g_strdup_printf("legacy[%d]", unit);
399 object_property_add_child(OBJECT(bus), name, OBJECT(dev));
400 g_free(name);
402 qdev_prop_set_uint32(dev, "scsi-id", unit);
403 if (bootindex >= 0) {
404 object_property_set_int(OBJECT(dev), "bootindex", bootindex,
405 &error_abort);
407 if (object_property_find(OBJECT(dev), "removable")) {
408 qdev_prop_set_bit(dev, "removable", removable);
410 if (serial && object_property_find(OBJECT(dev), "serial")) {
411 qdev_prop_set_string(dev, "serial", serial);
413 if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) {
414 object_unparent(OBJECT(dev));
415 return NULL;
417 if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) {
418 object_unparent(OBJECT(dev));
419 return NULL;
422 qdev_prop_set_enum(dev, "rerror", rerror);
423 qdev_prop_set_enum(dev, "werror", werror);
425 if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
426 object_unparent(OBJECT(dev));
427 return NULL;
429 return SCSI_DEVICE(dev);
432 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
434 Location loc;
435 DriveInfo *dinfo;
436 int unit;
438 loc_push_none(&loc);
439 for (unit = 0; unit <= bus->info->max_target; unit++) {
440 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
441 if (dinfo == NULL) {
442 continue;
444 qemu_opts_loc_restore(dinfo->opts);
445 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
446 unit, false, -1, false,
447 BLOCKDEV_ON_ERROR_AUTO,
448 BLOCKDEV_ON_ERROR_AUTO,
449 NULL, &error_fatal);
451 loc_pop(&loc);
454 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
456 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
457 scsi_req_complete(req, CHECK_CONDITION);
458 return 0;
461 static const struct SCSIReqOps reqops_invalid_field = {
462 .size = sizeof(SCSIRequest),
463 .send_command = scsi_invalid_field
466 /* SCSIReqOps implementation for invalid commands. */
468 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
470 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
471 scsi_req_complete(req, CHECK_CONDITION);
472 return 0;
475 static const struct SCSIReqOps reqops_invalid_opcode = {
476 .size = sizeof(SCSIRequest),
477 .send_command = scsi_invalid_command
480 /* SCSIReqOps implementation for unit attention conditions. */
482 static void scsi_fetch_unit_attention_sense(SCSIRequest *req)
484 SCSISense *ua = NULL;
486 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
487 ua = &req->dev->unit_attention;
488 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
489 ua = &req->bus->unit_attention;
493 * Fetch the unit attention sense immediately so that another
494 * scsi_req_new does not use reqops_unit_attention.
496 if (ua) {
497 scsi_req_build_sense(req, *ua);
498 *ua = SENSE_CODE(NO_SENSE);
502 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
504 scsi_req_complete(req, CHECK_CONDITION);
505 return 0;
508 static const struct SCSIReqOps reqops_unit_attention = {
509 .size = sizeof(SCSIRequest),
510 .init_req = scsi_fetch_unit_attention_sense,
511 .send_command = scsi_unit_attention
514 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
515 an invalid LUN. */
517 typedef struct SCSITargetReq SCSITargetReq;
519 struct SCSITargetReq {
520 SCSIRequest req;
521 int len;
522 uint8_t *buf;
523 int buf_len;
526 static void store_lun(uint8_t *outbuf, int lun)
528 if (lun < 256) {
529 /* Simple logical unit addressing method*/
530 outbuf[0] = 0;
531 outbuf[1] = lun;
532 } else {
533 /* Flat space addressing method */
534 outbuf[0] = 0x40 | (lun >> 8);
535 outbuf[1] = (lun & 255);
539 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
541 BusChild *kid;
542 int channel, id;
543 uint8_t tmp[8] = {0};
544 int len = 0;
545 GByteArray *buf;
547 if (r->req.cmd.xfer < 16) {
548 return false;
550 if (r->req.cmd.buf[2] > 2) {
551 return false;
554 /* reserve space for 63 LUNs*/
555 buf = g_byte_array_sized_new(512);
557 channel = r->req.dev->channel;
558 id = r->req.dev->id;
560 /* add size (will be updated later to correct value */
561 g_byte_array_append(buf, tmp, 8);
562 len += 8;
564 /* add LUN0 */
565 g_byte_array_append(buf, tmp, 8);
566 len += 8;
568 WITH_RCU_READ_LOCK_GUARD() {
569 QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) {
570 DeviceState *qdev = kid->child;
571 SCSIDevice *dev = SCSI_DEVICE(qdev);
573 if (dev->channel == channel && dev->id == id && dev->lun != 0 &&
574 qdev_is_realized(&dev->qdev)) {
575 store_lun(tmp, dev->lun);
576 g_byte_array_append(buf, tmp, 8);
577 len += 8;
582 r->buf_len = len;
583 r->buf = g_byte_array_free(buf, FALSE);
584 r->len = MIN(len, r->req.cmd.xfer & ~7);
586 /* store the LUN list length */
587 stl_be_p(&r->buf[0], len - 8);
590 * If a REPORT LUNS command enters the enabled command state, [...]
591 * the device server shall clear any pending unit attention condition
592 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
594 scsi_clear_reported_luns_changed(&r->req);
596 return true;
599 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
601 assert(r->req.dev->lun != r->req.lun);
603 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
605 if (r->req.cmd.buf[1] & 0x2) {
606 /* Command support data - optional, not implemented */
607 return false;
610 if (r->req.cmd.buf[1] & 0x1) {
611 /* Vital product data */
612 uint8_t page_code = r->req.cmd.buf[2];
613 r->buf[r->len++] = page_code ; /* this page */
614 r->buf[r->len++] = 0x00;
616 switch (page_code) {
617 case 0x00: /* Supported page codes, mandatory */
619 int pages;
620 pages = r->len++;
621 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
622 r->buf[pages] = r->len - pages - 1; /* number of pages */
623 break;
625 default:
626 return false;
628 /* done with EVPD */
629 assert(r->len < r->buf_len);
630 r->len = MIN(r->req.cmd.xfer, r->len);
631 return true;
634 /* Standard INQUIRY data */
635 if (r->req.cmd.buf[2] != 0) {
636 return false;
639 /* PAGE CODE == 0 */
640 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
641 memset(r->buf, 0, r->len);
642 if (r->req.lun != 0) {
643 r->buf[0] = TYPE_NO_LUN;
644 } else {
645 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
646 r->buf[2] = 5; /* Version */
647 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
648 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
649 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
650 memcpy(&r->buf[8], "QEMU ", 8);
651 memcpy(&r->buf[16], "QEMU TARGET ", 16);
652 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
654 return true;
657 static size_t scsi_sense_len(SCSIRequest *req)
659 if (req->dev->type == TYPE_SCANNER)
660 return SCSI_SENSE_LEN_SCANNER;
661 else
662 return SCSI_SENSE_LEN;
665 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
667 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
668 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
670 if (req->lun != 0 &&
671 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
672 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
673 scsi_req_complete(req, CHECK_CONDITION);
674 return 0;
676 switch (buf[0]) {
677 case REPORT_LUNS:
678 if (!scsi_target_emulate_report_luns(r)) {
679 goto illegal_request;
681 break;
682 case INQUIRY:
683 if (!scsi_target_emulate_inquiry(r)) {
684 goto illegal_request;
686 break;
687 case REQUEST_SENSE:
688 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
689 if (req->lun != 0) {
690 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
692 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
693 sense, fixed_sense);
694 } else {
695 r->len = scsi_device_get_sense(r->req.dev, r->buf,
696 MIN(req->cmd.xfer, r->buf_len),
697 fixed_sense);
699 if (r->req.dev->sense_is_ua) {
700 scsi_device_unit_attention_reported(req->dev);
701 r->req.dev->sense_len = 0;
702 r->req.dev->sense_is_ua = false;
704 break;
705 case TEST_UNIT_READY:
706 break;
707 default:
708 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
709 scsi_req_complete(req, CHECK_CONDITION);
710 return 0;
711 illegal_request:
712 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
713 scsi_req_complete(req, CHECK_CONDITION);
714 return 0;
717 if (!r->len) {
718 scsi_req_complete(req, GOOD);
720 return r->len;
723 static void scsi_target_read_data(SCSIRequest *req)
725 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
726 uint32_t n;
728 n = r->len;
729 if (n > 0) {
730 r->len = 0;
731 scsi_req_data(&r->req, n);
732 } else {
733 scsi_req_complete(&r->req, GOOD);
737 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
739 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
741 return r->buf;
744 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
746 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
748 r->buf = g_malloc(len);
749 r->buf_len = len;
751 return r->buf;
754 static void scsi_target_free_buf(SCSIRequest *req)
756 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
758 g_free(r->buf);
761 static const struct SCSIReqOps reqops_target_command = {
762 .size = sizeof(SCSITargetReq),
763 .send_command = scsi_target_send_command,
764 .read_data = scsi_target_read_data,
765 .get_buf = scsi_target_get_buf,
766 .free_req = scsi_target_free_buf,
770 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
771 uint32_t tag, uint32_t lun, void *hba_private)
773 SCSIRequest *req;
774 SCSIBus *bus = scsi_bus_from_device(d);
775 BusState *qbus = BUS(bus);
776 const int memset_off = offsetof(SCSIRequest, sense)
777 + sizeof(req->sense);
779 req = g_malloc(reqops->size);
780 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
781 req->refcount = 1;
782 req->bus = bus;
783 req->dev = d;
784 req->tag = tag;
785 req->lun = lun;
786 req->hba_private = hba_private;
787 req->status = -1;
788 req->host_status = -1;
789 req->ops = reqops;
790 object_ref(OBJECT(d));
791 object_ref(OBJECT(qbus->parent));
792 notifier_list_init(&req->cancel_notifiers);
794 if (reqops->init_req) {
795 reqops->init_req(req);
798 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
799 return req;
802 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
803 uint8_t *buf, size_t buf_len, void *hba_private)
805 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
806 const SCSIReqOps *ops;
807 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
808 SCSIRequest *req;
809 SCSICommand cmd = { .len = 0 };
810 int ret;
812 if (buf_len == 0) {
813 trace_scsi_req_parse_bad(d->id, lun, tag, 0);
814 goto invalid_opcode;
817 if ((d->unit_attention.key == UNIT_ATTENTION ||
818 bus->unit_attention.key == UNIT_ATTENTION) &&
819 (buf[0] != INQUIRY &&
820 buf[0] != REPORT_LUNS &&
821 buf[0] != GET_CONFIGURATION &&
822 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
825 * If we already have a pending unit attention condition,
826 * report this one before triggering another one.
828 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
829 ops = &reqops_unit_attention;
830 } else if (lun != d->lun ||
831 buf[0] == REPORT_LUNS ||
832 (buf[0] == REQUEST_SENSE && d->sense_len)) {
833 ops = &reqops_target_command;
834 } else {
835 ops = NULL;
838 if (ops != NULL || !sc->parse_cdb) {
839 ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len);
840 } else {
841 ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private);
844 if (ret != 0) {
845 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
846 invalid_opcode:
847 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
848 } else {
849 assert(cmd.len != 0);
850 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
851 cmd.mode, cmd.xfer);
852 if (cmd.lba != -1) {
853 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
854 cmd.lba);
857 if (cmd.xfer > INT32_MAX) {
858 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
859 } else if (ops) {
860 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
861 } else {
862 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
866 req->cmd = cmd;
867 req->residual = req->cmd.xfer;
869 switch (buf[0]) {
870 case INQUIRY:
871 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
872 break;
873 case TEST_UNIT_READY:
874 trace_scsi_test_unit_ready(d->id, lun, tag);
875 break;
876 case REPORT_LUNS:
877 trace_scsi_report_luns(d->id, lun, tag);
878 break;
879 case REQUEST_SENSE:
880 trace_scsi_request_sense(d->id, lun, tag);
881 break;
882 default:
883 break;
886 return req;
889 uint8_t *scsi_req_get_buf(SCSIRequest *req)
891 return req->ops->get_buf(req);
894 static void scsi_clear_reported_luns_changed(SCSIRequest *req)
896 SCSISense *ua;
898 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
899 ua = &req->dev->unit_attention;
900 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
901 ua = &req->bus->unit_attention;
902 } else {
903 return;
906 if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
907 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) {
908 *ua = SENSE_CODE(NO_SENSE);
912 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
914 int ret;
916 assert(len >= 14);
917 if (!req->sense_len) {
918 return 0;
921 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
924 * FIXME: clearing unit attention conditions upon autosense should be done
925 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
926 * (SAM-5, 5.14).
928 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
929 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
930 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
932 if (req->dev->sense_is_ua) {
933 scsi_device_unit_attention_reported(req->dev);
934 req->dev->sense_len = 0;
935 req->dev->sense_is_ua = false;
937 return ret;
940 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
942 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
945 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
947 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
948 sense.key, sense.asc, sense.ascq);
949 req->sense_len = scsi_build_sense(req->sense, sense);
952 static void scsi_req_enqueue_internal(SCSIRequest *req)
954 assert(!req->enqueued);
955 scsi_req_ref(req);
956 if (req->bus->info->get_sg_list) {
957 req->sg = req->bus->info->get_sg_list(req);
958 } else {
959 req->sg = NULL;
961 req->enqueued = true;
962 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
965 int32_t scsi_req_enqueue(SCSIRequest *req)
967 int32_t rc;
969 assert(!req->retry);
970 scsi_req_enqueue_internal(req);
971 scsi_req_ref(req);
972 rc = req->ops->send_command(req, req->cmd.buf);
973 scsi_req_unref(req);
974 return rc;
977 static void scsi_req_dequeue(SCSIRequest *req)
979 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
980 req->retry = false;
981 if (req->enqueued) {
982 QTAILQ_REMOVE(&req->dev->requests, req, next);
983 req->enqueued = false;
984 scsi_req_unref(req);
988 static int scsi_get_performance_length(int num_desc, int type, int data_type)
990 /* MMC-6, paragraph 6.7. */
991 switch (type) {
992 case 0:
993 if ((data_type & 3) == 0) {
994 /* Each descriptor is as in Table 295 - Nominal performance. */
995 return 16 * num_desc + 8;
996 } else {
997 /* Each descriptor is as in Table 296 - Exceptions. */
998 return 6 * num_desc + 8;
1000 case 1:
1001 case 4:
1002 case 5:
1003 return 8 * num_desc + 8;
1004 case 2:
1005 return 2048 * num_desc + 8;
1006 case 3:
1007 return 16 * num_desc + 8;
1008 default:
1009 return 8;
1013 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
1015 int byte_block = (buf[2] >> 2) & 0x1;
1016 int type = (buf[2] >> 4) & 0x1;
1017 int xfer_unit;
1019 if (byte_block) {
1020 if (type) {
1021 xfer_unit = dev->blocksize;
1022 } else {
1023 xfer_unit = 512;
1025 } else {
1026 xfer_unit = 1;
1029 return xfer_unit;
1032 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
1034 int length = buf[2] & 0x3;
1035 int xfer;
1036 int unit = ata_passthrough_xfer_unit(dev, buf);
1038 switch (length) {
1039 case 0:
1040 case 3: /* USB-specific. */
1041 default:
1042 xfer = 0;
1043 break;
1044 case 1:
1045 xfer = buf[3];
1046 break;
1047 case 2:
1048 xfer = buf[4];
1049 break;
1052 return xfer * unit;
1055 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
1057 int extend = buf[1] & 0x1;
1058 int length = buf[2] & 0x3;
1059 int xfer;
1060 int unit = ata_passthrough_xfer_unit(dev, buf);
1062 switch (length) {
1063 case 0:
1064 case 3: /* USB-specific. */
1065 default:
1066 xfer = 0;
1067 break;
1068 case 1:
1069 xfer = buf[4];
1070 xfer |= (extend ? buf[3] << 8 : 0);
1071 break;
1072 case 2:
1073 xfer = buf[6];
1074 xfer |= (extend ? buf[5] << 8 : 0);
1075 break;
1078 return xfer * unit;
1081 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1083 cmd->xfer = scsi_cdb_xfer(buf);
1084 switch (buf[0]) {
1085 case TEST_UNIT_READY:
1086 case REWIND:
1087 case START_STOP:
1088 case SET_CAPACITY:
1089 case WRITE_FILEMARKS:
1090 case WRITE_FILEMARKS_16:
1091 case SPACE:
1092 case RESERVE:
1093 case RELEASE:
1094 case ERASE:
1095 case ALLOW_MEDIUM_REMOVAL:
1096 case SEEK_10:
1097 case SYNCHRONIZE_CACHE:
1098 case SYNCHRONIZE_CACHE_16:
1099 case LOCATE_16:
1100 case LOCK_UNLOCK_CACHE:
1101 case SET_CD_SPEED:
1102 case SET_LIMITS:
1103 case WRITE_LONG_10:
1104 case UPDATE_BLOCK:
1105 case RESERVE_TRACK:
1106 case SET_READ_AHEAD:
1107 case PRE_FETCH:
1108 case PRE_FETCH_16:
1109 case ALLOW_OVERWRITE:
1110 cmd->xfer = 0;
1111 break;
1112 case VERIFY_10:
1113 case VERIFY_12:
1114 case VERIFY_16:
1115 if ((buf[1] & 2) == 0) {
1116 cmd->xfer = 0;
1117 } else if ((buf[1] & 4) != 0) {
1118 cmd->xfer = 1;
1120 cmd->xfer *= dev->blocksize;
1121 break;
1122 case MODE_SENSE:
1123 break;
1124 case WRITE_SAME_10:
1125 case WRITE_SAME_16:
1126 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
1127 break;
1128 case READ_CAPACITY_10:
1129 cmd->xfer = 8;
1130 break;
1131 case READ_BLOCK_LIMITS:
1132 cmd->xfer = 6;
1133 break;
1134 case SEND_VOLUME_TAG:
1135 /* GPCMD_SET_STREAMING from multimedia commands. */
1136 if (dev->type == TYPE_ROM) {
1137 cmd->xfer = buf[10] | (buf[9] << 8);
1138 } else {
1139 cmd->xfer = buf[9] | (buf[8] << 8);
1141 break;
1142 case WRITE_6:
1143 /* length 0 means 256 blocks */
1144 if (cmd->xfer == 0) {
1145 cmd->xfer = 256;
1147 /* fall through */
1148 case WRITE_10:
1149 case WRITE_VERIFY_10:
1150 case WRITE_12:
1151 case WRITE_VERIFY_12:
1152 case WRITE_16:
1153 case WRITE_VERIFY_16:
1154 cmd->xfer *= dev->blocksize;
1155 break;
1156 case READ_6:
1157 case READ_REVERSE:
1158 /* length 0 means 256 blocks */
1159 if (cmd->xfer == 0) {
1160 cmd->xfer = 256;
1162 /* fall through */
1163 case READ_10:
1164 case READ_12:
1165 case READ_16:
1166 cmd->xfer *= dev->blocksize;
1167 break;
1168 case FORMAT_UNIT:
1169 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1170 * for block devices are restricted to the header right now. */
1171 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1172 cmd->xfer = 12;
1173 } else {
1174 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1176 break;
1177 case INQUIRY:
1178 case RECEIVE_DIAGNOSTIC:
1179 case SEND_DIAGNOSTIC:
1180 cmd->xfer = buf[4] | (buf[3] << 8);
1181 break;
1182 case READ_CD:
1183 case READ_BUFFER:
1184 case WRITE_BUFFER:
1185 case SEND_CUE_SHEET:
1186 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1187 break;
1188 case PERSISTENT_RESERVE_OUT:
1189 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1190 break;
1191 case ERASE_12:
1192 if (dev->type == TYPE_ROM) {
1193 /* MMC command GET PERFORMANCE. */
1194 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1195 buf[10], buf[1] & 0x1f);
1197 break;
1198 case MECHANISM_STATUS:
1199 case READ_DVD_STRUCTURE:
1200 case SEND_DVD_STRUCTURE:
1201 case MAINTENANCE_OUT:
1202 case MAINTENANCE_IN:
1203 if (dev->type == TYPE_ROM) {
1204 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1205 cmd->xfer = buf[9] | (buf[8] << 8);
1207 break;
1208 case ATA_PASSTHROUGH_12:
1209 if (dev->type == TYPE_ROM) {
1210 /* BLANK command of MMC */
1211 cmd->xfer = 0;
1212 } else {
1213 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1215 break;
1216 case ATA_PASSTHROUGH_16:
1217 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1218 break;
1220 return 0;
1223 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1225 switch (buf[0]) {
1226 /* stream commands */
1227 case ERASE_12:
1228 case ERASE_16:
1229 cmd->xfer = 0;
1230 break;
1231 case READ_6:
1232 case READ_REVERSE:
1233 case RECOVER_BUFFERED_DATA:
1234 case WRITE_6:
1235 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1236 if (buf[1] & 0x01) { /* fixed */
1237 cmd->xfer *= dev->blocksize;
1239 break;
1240 case READ_16:
1241 case READ_REVERSE_16:
1242 case VERIFY_16:
1243 case WRITE_16:
1244 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1245 if (buf[1] & 0x01) { /* fixed */
1246 cmd->xfer *= dev->blocksize;
1248 break;
1249 case REWIND:
1250 case LOAD_UNLOAD:
1251 cmd->xfer = 0;
1252 break;
1253 case SPACE_16:
1254 cmd->xfer = buf[13] | (buf[12] << 8);
1255 break;
1256 case READ_POSITION:
1257 switch (buf[1] & 0x1f) /* operation code */ {
1258 case SHORT_FORM_BLOCK_ID:
1259 case SHORT_FORM_VENDOR_SPECIFIC:
1260 cmd->xfer = 20;
1261 break;
1262 case LONG_FORM:
1263 cmd->xfer = 32;
1264 break;
1265 case EXTENDED_FORM:
1266 cmd->xfer = buf[8] | (buf[7] << 8);
1267 break;
1268 default:
1269 return -1;
1272 break;
1273 case FORMAT_UNIT:
1274 cmd->xfer = buf[4] | (buf[3] << 8);
1275 break;
1276 /* generic commands */
1277 default:
1278 return scsi_req_xfer(cmd, dev, buf);
1280 return 0;
1283 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1285 switch (buf[0]) {
1286 /* medium changer commands */
1287 case EXCHANGE_MEDIUM:
1288 case INITIALIZE_ELEMENT_STATUS:
1289 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1290 case MOVE_MEDIUM:
1291 case POSITION_TO_ELEMENT:
1292 cmd->xfer = 0;
1293 break;
1294 case READ_ELEMENT_STATUS:
1295 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1296 break;
1298 /* generic commands */
1299 default:
1300 return scsi_req_xfer(cmd, dev, buf);
1302 return 0;
1305 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1307 switch (buf[0]) {
1308 /* Scanner commands */
1309 case OBJECT_POSITION:
1310 cmd->xfer = 0;
1311 break;
1312 case SCAN:
1313 cmd->xfer = buf[4];
1314 break;
1315 case READ_10:
1316 case SEND:
1317 case GET_WINDOW:
1318 case SET_WINDOW:
1319 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1320 break;
1321 default:
1322 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1323 return scsi_req_xfer(cmd, dev, buf);
1326 return 0;
1329 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1331 if (!cmd->xfer) {
1332 cmd->mode = SCSI_XFER_NONE;
1333 return;
1335 switch (cmd->buf[0]) {
1336 case WRITE_6:
1337 case WRITE_10:
1338 case WRITE_VERIFY_10:
1339 case WRITE_12:
1340 case WRITE_VERIFY_12:
1341 case WRITE_16:
1342 case WRITE_VERIFY_16:
1343 case VERIFY_10:
1344 case VERIFY_12:
1345 case VERIFY_16:
1346 case COPY:
1347 case COPY_VERIFY:
1348 case COMPARE:
1349 case CHANGE_DEFINITION:
1350 case LOG_SELECT:
1351 case MODE_SELECT:
1352 case MODE_SELECT_10:
1353 case SEND_DIAGNOSTIC:
1354 case WRITE_BUFFER:
1355 case FORMAT_UNIT:
1356 case REASSIGN_BLOCKS:
1357 case SEARCH_EQUAL:
1358 case SEARCH_HIGH:
1359 case SEARCH_LOW:
1360 case UPDATE_BLOCK:
1361 case WRITE_LONG_10:
1362 case WRITE_SAME_10:
1363 case WRITE_SAME_16:
1364 case UNMAP:
1365 case SEARCH_HIGH_12:
1366 case SEARCH_EQUAL_12:
1367 case SEARCH_LOW_12:
1368 case MEDIUM_SCAN:
1369 case SEND_VOLUME_TAG:
1370 case SEND_CUE_SHEET:
1371 case SEND_DVD_STRUCTURE:
1372 case PERSISTENT_RESERVE_OUT:
1373 case MAINTENANCE_OUT:
1374 case SET_WINDOW:
1375 case SCAN:
1376 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1377 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1379 cmd->mode = SCSI_XFER_TO_DEV;
1380 break;
1381 case ATA_PASSTHROUGH_12:
1382 case ATA_PASSTHROUGH_16:
1383 /* T_DIR */
1384 cmd->mode = (cmd->buf[2] & 0x8) ?
1385 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1386 break;
1387 default:
1388 cmd->mode = SCSI_XFER_FROM_DEV;
1389 break;
1393 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
1394 size_t buf_len)
1396 int rc;
1397 int len;
1399 cmd->lba = -1;
1400 len = scsi_cdb_length(buf);
1401 if (len < 0 || len > buf_len) {
1402 return -1;
1405 cmd->len = len;
1406 switch (dev->type) {
1407 case TYPE_TAPE:
1408 rc = scsi_req_stream_xfer(cmd, dev, buf);
1409 break;
1410 case TYPE_MEDIUM_CHANGER:
1411 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1412 break;
1413 case TYPE_SCANNER:
1414 rc = scsi_req_scanner_length(cmd, dev, buf);
1415 break;
1416 default:
1417 rc = scsi_req_xfer(cmd, dev, buf);
1418 break;
1421 if (rc != 0)
1422 return rc;
1424 memcpy(cmd->buf, buf, cmd->len);
1425 scsi_cmd_xfer_mode(cmd);
1426 cmd->lba = scsi_cmd_lba(cmd);
1427 return 0;
1430 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1432 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1434 scsi_device_set_ua(dev, sense);
1435 if (bus->info->change) {
1436 bus->info->change(bus, dev, sense);
1440 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1442 assert(req->refcount > 0);
1443 req->refcount++;
1444 return req;
1447 void scsi_req_unref(SCSIRequest *req)
1449 assert(req->refcount > 0);
1450 if (--req->refcount == 0) {
1451 BusState *qbus = req->dev->qdev.parent_bus;
1452 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1454 if (bus->info->free_request && req->hba_private) {
1455 bus->info->free_request(bus, req->hba_private);
1457 if (req->ops->free_req) {
1458 req->ops->free_req(req);
1460 object_unref(OBJECT(req->dev));
1461 object_unref(OBJECT(qbus->parent));
1462 g_free(req);
1466 /* Tell the device that we finished processing this chunk of I/O. It
1467 will start the next chunk or complete the command. */
1468 void scsi_req_continue(SCSIRequest *req)
1470 if (req->io_canceled) {
1471 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1472 return;
1474 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1475 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1476 req->ops->write_data(req);
1477 } else {
1478 req->ops->read_data(req);
1482 /* Called by the devices when data is ready for the HBA. The HBA should
1483 start a DMA operation to read or fill the device's data buffer.
1484 Once it completes, calling scsi_req_continue will restart I/O. */
1485 void scsi_req_data(SCSIRequest *req, int len)
1487 uint8_t *buf;
1488 if (req->io_canceled) {
1489 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1490 return;
1492 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1493 assert(req->cmd.mode != SCSI_XFER_NONE);
1494 if (!req->sg) {
1495 req->residual -= len;
1496 req->bus->info->transfer_data(req, len);
1497 return;
1500 /* If the device calls scsi_req_data and the HBA specified a
1501 * scatter/gather list, the transfer has to happen in a single
1502 * step. */
1503 assert(!req->dma_started);
1504 req->dma_started = true;
1506 buf = scsi_req_get_buf(req);
1507 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1508 dma_buf_read(buf, len, &req->residual, req->sg,
1509 MEMTXATTRS_UNSPECIFIED);
1510 } else {
1511 dma_buf_write(buf, len, &req->residual, req->sg,
1512 MEMTXATTRS_UNSPECIFIED);
1514 scsi_req_continue(req);
1517 void scsi_req_print(SCSIRequest *req)
1519 FILE *fp = stderr;
1520 int i;
1522 fprintf(fp, "[%s id=%d] %s",
1523 req->dev->qdev.parent_bus->name,
1524 req->dev->id,
1525 scsi_command_name(req->cmd.buf[0]));
1526 for (i = 1; i < req->cmd.len; i++) {
1527 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1529 switch (req->cmd.mode) {
1530 case SCSI_XFER_NONE:
1531 fprintf(fp, " - none\n");
1532 break;
1533 case SCSI_XFER_FROM_DEV:
1534 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1535 break;
1536 case SCSI_XFER_TO_DEV:
1537 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1538 break;
1539 default:
1540 fprintf(fp, " - Oops\n");
1541 break;
1545 void scsi_req_complete_failed(SCSIRequest *req, int host_status)
1547 SCSISense sense;
1548 int status;
1550 assert(req->status == -1 && req->host_status == -1);
1551 assert(req->ops != &reqops_unit_attention);
1553 if (!req->bus->info->fail) {
1554 status = scsi_sense_from_host_status(req->host_status, &sense);
1555 if (status == CHECK_CONDITION) {
1556 scsi_req_build_sense(req, sense);
1558 scsi_req_complete(req, status);
1559 return;
1562 req->host_status = host_status;
1563 scsi_req_ref(req);
1564 scsi_req_dequeue(req);
1565 req->bus->info->fail(req);
1567 /* Cancelled requests might end up being completed instead of cancelled */
1568 notifier_list_notify(&req->cancel_notifiers, req);
1569 scsi_req_unref(req);
1572 void scsi_req_complete(SCSIRequest *req, int status)
1574 assert(req->status == -1 && req->host_status == -1);
1575 req->status = status;
1576 req->host_status = SCSI_HOST_OK;
1578 assert(req->sense_len <= sizeof(req->sense));
1579 if (status == GOOD) {
1580 req->sense_len = 0;
1583 if (req->sense_len) {
1584 memcpy(req->dev->sense, req->sense, req->sense_len);
1585 req->dev->sense_len = req->sense_len;
1586 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1587 } else {
1588 req->dev->sense_len = 0;
1589 req->dev->sense_is_ua = false;
1592 scsi_req_ref(req);
1593 scsi_req_dequeue(req);
1594 req->bus->info->complete(req, req->residual);
1596 /* Cancelled requests might end up being completed instead of cancelled */
1597 notifier_list_notify(&req->cancel_notifiers, req);
1598 scsi_req_unref(req);
1601 /* Called by the devices when the request is canceled. */
1602 void scsi_req_cancel_complete(SCSIRequest *req)
1604 assert(req->io_canceled);
1605 if (req->bus->info->cancel) {
1606 req->bus->info->cancel(req);
1608 notifier_list_notify(&req->cancel_notifiers, req);
1609 scsi_req_unref(req);
1612 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1613 * notifier list, the bus will be notified the requests cancellation is
1614 * completed.
1615 * */
1616 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1618 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1619 if (notifier) {
1620 notifier_list_add(&req->cancel_notifiers, notifier);
1622 if (req->io_canceled) {
1623 /* A blk_aio_cancel_async is pending; when it finishes,
1624 * scsi_req_cancel_complete will be called and will
1625 * call the notifier we just added. Just wait for that.
1627 assert(req->aiocb);
1628 return;
1630 /* Dropped in scsi_req_cancel_complete. */
1631 scsi_req_ref(req);
1632 scsi_req_dequeue(req);
1633 req->io_canceled = true;
1634 if (req->aiocb) {
1635 blk_aio_cancel_async(req->aiocb);
1636 } else {
1637 scsi_req_cancel_complete(req);
1641 void scsi_req_cancel(SCSIRequest *req)
1643 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1644 if (!req->enqueued) {
1645 return;
1647 assert(!req->io_canceled);
1648 /* Dropped in scsi_req_cancel_complete. */
1649 scsi_req_ref(req);
1650 scsi_req_dequeue(req);
1651 req->io_canceled = true;
1652 if (req->aiocb) {
1653 blk_aio_cancel(req->aiocb);
1654 } else {
1655 scsi_req_cancel_complete(req);
1659 static int scsi_ua_precedence(SCSISense sense)
1661 if (sense.key != UNIT_ATTENTION) {
1662 return INT_MAX;
1664 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1665 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1666 return 1;
1667 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1668 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1669 return 2;
1670 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1671 /* These two go with "all others". */
1673 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1674 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1675 * POWER ON OCCURRED = 1
1676 * SCSI BUS RESET OCCURRED = 2
1677 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1678 * I_T NEXUS LOSS OCCURRED = 7
1680 return sense.ascq;
1681 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1682 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1683 return 8;
1685 return (sense.asc << 8) | sense.ascq;
1688 void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense)
1690 int prec1, prec2;
1691 if (sense.key != UNIT_ATTENTION) {
1692 return;
1696 * Override a pre-existing unit attention condition, except for a more
1697 * important reset condition.
1699 prec1 = scsi_ua_precedence(bus->unit_attention);
1700 prec2 = scsi_ua_precedence(sense);
1701 if (prec2 < prec1) {
1702 bus->unit_attention = sense;
1706 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1708 int prec1, prec2;
1709 if (sense.key != UNIT_ATTENTION) {
1710 return;
1712 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1713 sense.asc, sense.ascq);
1716 * Override a pre-existing unit attention condition, except for a more
1717 * important reset condition.
1719 prec1 = scsi_ua_precedence(sdev->unit_attention);
1720 prec2 = scsi_ua_precedence(sense);
1721 if (prec2 < prec1) {
1722 sdev->unit_attention = sense;
1726 static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
1728 scsi_req_cancel_async(req, NULL);
1731 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1733 scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
1735 aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
1736 blk_drain(sdev->conf.blk);
1737 aio_context_release(blk_get_aio_context(sdev->conf.blk));
1738 scsi_device_set_ua(sdev, sense);
1741 void scsi_device_drained_begin(SCSIDevice *sdev)
1743 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1744 if (!bus) {
1745 return;
1748 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1749 assert(bus->drain_count < INT_MAX);
1752 * Multiple BlockBackends can be on a SCSIBus and each may begin/end
1753 * draining at any time. Keep a counter so HBAs only see begin/end once.
1755 if (bus->drain_count++ == 0) {
1756 trace_scsi_bus_drained_begin(bus, sdev);
1757 if (bus->info->drained_begin) {
1758 bus->info->drained_begin(bus);
1763 void scsi_device_drained_end(SCSIDevice *sdev)
1765 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1766 if (!bus) {
1767 return;
1770 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1771 assert(bus->drain_count > 0);
1773 if (bus->drain_count-- == 1) {
1774 trace_scsi_bus_drained_end(bus, sdev);
1775 if (bus->info->drained_end) {
1776 bus->info->drained_end(bus);
1781 static char *scsibus_get_dev_path(DeviceState *dev)
1783 SCSIDevice *d = SCSI_DEVICE(dev);
1784 DeviceState *hba = dev->parent_bus->parent;
1785 char *id;
1786 char *path;
1788 id = qdev_get_dev_path(hba);
1789 if (id) {
1790 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1791 } else {
1792 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1794 g_free(id);
1795 return path;
1798 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1800 SCSIDevice *d = SCSI_DEVICE(dev);
1801 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1802 qdev_fw_name(dev), d->id, d->lun);
1805 /* SCSI request list. For simplicity, pv points to the whole device */
1807 static void put_scsi_req(SCSIRequest *req, void *opaque)
1809 QEMUFile *f = opaque;
1811 assert(!req->io_canceled);
1812 assert(req->status == -1 && req->host_status == -1);
1813 assert(req->enqueued);
1815 qemu_put_sbyte(f, req->retry ? 1 : 2);
1816 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1817 qemu_put_be32s(f, &req->tag);
1818 qemu_put_be32s(f, &req->lun);
1819 if (req->bus->info->save_request) {
1820 req->bus->info->save_request(f, req);
1822 if (req->ops->save_request) {
1823 req->ops->save_request(f, req);
1827 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1828 const VMStateField *field, JSONWriter *vmdesc)
1830 SCSIDevice *s = pv;
1832 scsi_device_for_each_req_sync(s, put_scsi_req, f);
1833 qemu_put_sbyte(f, 0);
1834 return 0;
1837 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1838 const VMStateField *field)
1840 SCSIDevice *s = pv;
1841 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1842 int8_t sbyte;
1844 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1845 uint8_t buf[SCSI_CMD_BUF_SIZE];
1846 uint32_t tag;
1847 uint32_t lun;
1848 SCSIRequest *req;
1850 qemu_get_buffer(f, buf, sizeof(buf));
1851 qemu_get_be32s(f, &tag);
1852 qemu_get_be32s(f, &lun);
1854 * A too-short CDB would have been rejected by scsi_req_new, so just use
1855 * SCSI_CMD_BUF_SIZE as the CDB length.
1857 req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL);
1858 req->retry = (sbyte == 1);
1859 if (bus->info->load_request) {
1860 req->hba_private = bus->info->load_request(f, req);
1862 if (req->ops->load_request) {
1863 req->ops->load_request(f, req);
1866 /* Just restart it later. */
1867 scsi_req_enqueue_internal(req);
1869 /* At this point, the request will be kept alive by the reference
1870 * added by scsi_req_enqueue_internal, so we can release our reference.
1871 * The HBA of course will add its own reference in the load_request
1872 * callback if it needs to hold on the SCSIRequest.
1874 scsi_req_unref(req);
1877 return 0;
1880 static const VMStateInfo vmstate_info_scsi_requests = {
1881 .name = "scsi-requests",
1882 .get = get_scsi_requests,
1883 .put = put_scsi_requests,
1886 static bool scsi_sense_state_needed(void *opaque)
1888 SCSIDevice *s = opaque;
1890 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1893 static const VMStateDescription vmstate_scsi_sense_state = {
1894 .name = "SCSIDevice/sense",
1895 .version_id = 1,
1896 .minimum_version_id = 1,
1897 .needed = scsi_sense_state_needed,
1898 .fields = (VMStateField[]) {
1899 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1900 SCSI_SENSE_BUF_SIZE_OLD,
1901 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1902 VMSTATE_END_OF_LIST()
1906 const VMStateDescription vmstate_scsi_device = {
1907 .name = "SCSIDevice",
1908 .version_id = 1,
1909 .minimum_version_id = 1,
1910 .fields = (VMStateField[]) {
1911 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1912 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1913 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1914 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1915 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1916 VMSTATE_UINT32(sense_len, SCSIDevice),
1918 .name = "requests",
1919 .version_id = 0,
1920 .field_exists = NULL,
1921 .size = 0, /* ouch */
1922 .info = &vmstate_info_scsi_requests,
1923 .flags = VMS_SINGLE,
1924 .offset = 0,
1926 VMSTATE_END_OF_LIST()
1928 .subsections = (const VMStateDescription*[]) {
1929 &vmstate_scsi_sense_state,
1930 NULL
1934 static Property scsi_props[] = {
1935 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
1936 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
1937 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
1938 DEFINE_PROP_END_OF_LIST(),
1941 static void scsi_device_class_init(ObjectClass *klass, void *data)
1943 DeviceClass *k = DEVICE_CLASS(klass);
1944 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1945 k->bus_type = TYPE_SCSI_BUS;
1946 k->realize = scsi_qdev_realize;
1947 k->unrealize = scsi_qdev_unrealize;
1948 device_class_set_props(k, scsi_props);
1951 static void scsi_dev_instance_init(Object *obj)
1953 DeviceState *dev = DEVICE(obj);
1954 SCSIDevice *s = SCSI_DEVICE(dev);
1956 device_add_bootindex_property(obj, &s->conf.bootindex,
1957 "bootindex", NULL,
1958 &s->qdev);
1961 static const TypeInfo scsi_device_type_info = {
1962 .name = TYPE_SCSI_DEVICE,
1963 .parent = TYPE_DEVICE,
1964 .instance_size = sizeof(SCSIDevice),
1965 .abstract = true,
1966 .class_size = sizeof(SCSIDeviceClass),
1967 .class_init = scsi_device_class_init,
1968 .instance_init = scsi_dev_instance_init,
1971 static void scsi_bus_class_init(ObjectClass *klass, void *data)
1973 BusClass *k = BUS_CLASS(klass);
1974 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1976 k->get_dev_path = scsibus_get_dev_path;
1977 k->get_fw_dev_path = scsibus_get_fw_dev_path;
1978 k->check_address = scsi_bus_check_address;
1979 hc->unplug = qdev_simple_device_unplug_cb;
1982 static const TypeInfo scsi_bus_info = {
1983 .name = TYPE_SCSI_BUS,
1984 .parent = TYPE_BUS,
1985 .instance_size = sizeof(SCSIBus),
1986 .class_init = scsi_bus_class_init,
1987 .interfaces = (InterfaceInfo[]) {
1988 { TYPE_HOTPLUG_HANDLER },
1993 static void scsi_register_types(void)
1995 type_register_static(&scsi_bus_info);
1996 type_register_static(&scsi_device_type_info);
1999 type_init(scsi_register_types)