goldfish_rtc: change MemoryRegionOps endianness to DEVICE_NATIVE_ENDIAN
[qemu/ar7.git] / hw / scsi / scsi-bus.c
blob3284a5d1fb675a21bbb5ab6533f57a338f486737
1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "qemu/error-report.h"
4 #include "qemu/module.h"
5 #include "qemu/option.h"
6 #include "hw/qdev-properties.h"
7 #include "hw/scsi/scsi.h"
8 #include "migration/qemu-file-types.h"
9 #include "migration/vmstate.h"
10 #include "scsi/constants.h"
11 #include "sysemu/block-backend.h"
12 #include "sysemu/blockdev.h"
13 #include "sysemu/sysemu.h"
14 #include "sysemu/runstate.h"
15 #include "trace.h"
16 #include "sysemu/dma.h"
17 #include "qemu/cutils.h"
19 static char *scsibus_get_dev_path(DeviceState *dev);
20 static char *scsibus_get_fw_dev_path(DeviceState *dev);
21 static void scsi_req_dequeue(SCSIRequest *req);
22 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
23 static void scsi_target_free_buf(SCSIRequest *req);
25 static Property scsi_props[] = {
26 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
27 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
28 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
29 DEFINE_PROP_END_OF_LIST(),
32 static void scsi_bus_class_init(ObjectClass *klass, void *data)
34 BusClass *k = BUS_CLASS(klass);
35 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
37 k->get_dev_path = scsibus_get_dev_path;
38 k->get_fw_dev_path = scsibus_get_fw_dev_path;
39 hc->unplug = qdev_simple_device_unplug_cb;
42 static const TypeInfo scsi_bus_info = {
43 .name = TYPE_SCSI_BUS,
44 .parent = TYPE_BUS,
45 .instance_size = sizeof(SCSIBus),
46 .class_init = scsi_bus_class_init,
47 .interfaces = (InterfaceInfo[]) {
48 { TYPE_HOTPLUG_HANDLER },
49 { }
52 static int next_scsi_bus;
54 static void scsi_device_realize(SCSIDevice *s, Error **errp)
56 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
57 if (sc->realize) {
58 sc->realize(s, errp);
62 static void scsi_device_unrealize(SCSIDevice *s)
64 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
65 if (sc->unrealize) {
66 sc->unrealize(s);
70 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
71 void *hba_private)
73 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
74 int rc;
76 assert(cmd->len == 0);
77 rc = scsi_req_parse_cdb(dev, cmd, buf);
78 if (bus->info->parse_cdb) {
79 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private);
81 return rc;
84 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
85 uint8_t *buf, void *hba_private)
87 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
88 if (sc->alloc_req) {
89 return sc->alloc_req(s, tag, lun, buf, hba_private);
92 return NULL;
95 void scsi_device_unit_attention_reported(SCSIDevice *s)
97 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
98 if (sc->unit_attention_reported) {
99 sc->unit_attention_reported(s);
103 /* Create a scsi bus, and attach devices to it. */
104 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
105 const SCSIBusInfo *info, const char *bus_name)
107 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
108 bus->busnr = next_scsi_bus++;
109 bus->info = info;
110 qbus_set_bus_hotplug_handler(BUS(bus));
113 static void scsi_dma_restart_bh(void *opaque)
115 SCSIDevice *s = opaque;
116 SCSIRequest *req, *next;
118 qemu_bh_delete(s->bh);
119 s->bh = NULL;
121 aio_context_acquire(blk_get_aio_context(s->conf.blk));
122 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
123 scsi_req_ref(req);
124 if (req->retry) {
125 req->retry = false;
126 switch (req->cmd.mode) {
127 case SCSI_XFER_FROM_DEV:
128 case SCSI_XFER_TO_DEV:
129 scsi_req_continue(req);
130 break;
131 case SCSI_XFER_NONE:
132 scsi_req_dequeue(req);
133 scsi_req_enqueue(req);
134 break;
137 scsi_req_unref(req);
139 aio_context_release(blk_get_aio_context(s->conf.blk));
142 void scsi_req_retry(SCSIRequest *req)
144 /* No need to save a reference, because scsi_dma_restart_bh just
145 * looks at the request list. */
146 req->retry = true;
149 static void scsi_dma_restart_cb(void *opaque, int running, RunState state)
151 SCSIDevice *s = opaque;
153 if (!running) {
154 return;
156 if (!s->bh) {
157 AioContext *ctx = blk_get_aio_context(s->conf.blk);
158 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s);
159 qemu_bh_schedule(s->bh);
163 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
165 SCSIDevice *dev = SCSI_DEVICE(qdev);
166 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
167 SCSIDevice *d;
168 Error *local_err = NULL;
170 if (dev->channel > bus->info->max_channel) {
171 error_setg(errp, "bad scsi channel id: %d", dev->channel);
172 return;
174 if (dev->id != -1 && dev->id > bus->info->max_target) {
175 error_setg(errp, "bad scsi device id: %d", dev->id);
176 return;
178 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
179 error_setg(errp, "bad scsi device lun: %d", dev->lun);
180 return;
183 if (dev->id == -1) {
184 int id = -1;
185 if (dev->lun == -1) {
186 dev->lun = 0;
188 do {
189 d = scsi_device_find(bus, dev->channel, ++id, dev->lun);
190 } while (d && d->lun == dev->lun && id < bus->info->max_target);
191 if (d && d->lun == dev->lun) {
192 error_setg(errp, "no free target");
193 return;
195 dev->id = id;
196 } else if (dev->lun == -1) {
197 int lun = -1;
198 do {
199 d = scsi_device_find(bus, dev->channel, dev->id, ++lun);
200 } while (d && d->lun == lun && lun < bus->info->max_lun);
201 if (d && d->lun == lun) {
202 error_setg(errp, "no free lun");
203 return;
205 dev->lun = lun;
206 } else {
207 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun);
208 assert(d);
209 if (d->lun == dev->lun && dev != d) {
210 error_setg(errp, "lun already used by '%s'", d->qdev.id);
211 return;
215 QTAILQ_INIT(&dev->requests);
216 scsi_device_realize(dev, &local_err);
217 if (local_err) {
218 error_propagate(errp, local_err);
219 return;
221 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
222 scsi_dma_restart_cb, dev);
225 static void scsi_qdev_unrealize(DeviceState *qdev)
227 SCSIDevice *dev = SCSI_DEVICE(qdev);
229 if (dev->vmsentry) {
230 qemu_del_vm_change_state_handler(dev->vmsentry);
233 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
235 scsi_device_unrealize(dev);
237 blockdev_mark_auto_del(dev->conf.blk);
240 /* handle legacy '-drive if=scsi,...' cmd line args */
241 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
242 int unit, bool removable, int bootindex,
243 bool share_rw,
244 BlockdevOnError rerror,
245 BlockdevOnError werror,
246 const char *serial, Error **errp)
248 const char *driver;
249 char *name;
250 DeviceState *dev;
251 DriveInfo *dinfo;
253 if (blk_is_sg(blk)) {
254 driver = "scsi-generic";
255 } else {
256 dinfo = blk_legacy_dinfo(blk);
257 if (dinfo && dinfo->media_cd) {
258 driver = "scsi-cd";
259 } else {
260 driver = "scsi-hd";
263 dev = qdev_new(driver);
264 name = g_strdup_printf("legacy[%d]", unit);
265 object_property_add_child(OBJECT(bus), name, OBJECT(dev));
266 g_free(name);
268 qdev_prop_set_uint32(dev, "scsi-id", unit);
269 if (bootindex >= 0) {
270 object_property_set_int(OBJECT(dev), "bootindex", bootindex,
271 &error_abort);
273 if (object_property_find(OBJECT(dev), "removable")) {
274 qdev_prop_set_bit(dev, "removable", removable);
276 if (serial && object_property_find(OBJECT(dev), "serial")) {
277 qdev_prop_set_string(dev, "serial", serial);
279 if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) {
280 object_unparent(OBJECT(dev));
281 return NULL;
283 if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) {
284 object_unparent(OBJECT(dev));
285 return NULL;
288 qdev_prop_set_enum(dev, "rerror", rerror);
289 qdev_prop_set_enum(dev, "werror", werror);
291 if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
292 object_unparent(OBJECT(dev));
293 return NULL;
295 return SCSI_DEVICE(dev);
298 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
300 Location loc;
301 DriveInfo *dinfo;
302 int unit;
304 loc_push_none(&loc);
305 for (unit = 0; unit <= bus->info->max_target; unit++) {
306 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
307 if (dinfo == NULL) {
308 continue;
310 qemu_opts_loc_restore(dinfo->opts);
311 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
312 unit, false, -1, false,
313 BLOCKDEV_ON_ERROR_AUTO,
314 BLOCKDEV_ON_ERROR_AUTO,
315 NULL, &error_fatal);
317 loc_pop(&loc);
320 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
322 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
323 scsi_req_complete(req, CHECK_CONDITION);
324 return 0;
327 static const struct SCSIReqOps reqops_invalid_field = {
328 .size = sizeof(SCSIRequest),
329 .send_command = scsi_invalid_field
332 /* SCSIReqOps implementation for invalid commands. */
334 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
336 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
337 scsi_req_complete(req, CHECK_CONDITION);
338 return 0;
341 static const struct SCSIReqOps reqops_invalid_opcode = {
342 .size = sizeof(SCSIRequest),
343 .send_command = scsi_invalid_command
346 /* SCSIReqOps implementation for unit attention conditions. */
348 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
350 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
351 scsi_req_build_sense(req, req->dev->unit_attention);
352 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
353 scsi_req_build_sense(req, req->bus->unit_attention);
355 scsi_req_complete(req, CHECK_CONDITION);
356 return 0;
359 static const struct SCSIReqOps reqops_unit_attention = {
360 .size = sizeof(SCSIRequest),
361 .send_command = scsi_unit_attention
364 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
365 an invalid LUN. */
367 typedef struct SCSITargetReq SCSITargetReq;
369 struct SCSITargetReq {
370 SCSIRequest req;
371 int len;
372 uint8_t *buf;
373 int buf_len;
376 static void store_lun(uint8_t *outbuf, int lun)
378 if (lun < 256) {
379 outbuf[1] = lun;
380 return;
382 outbuf[1] = (lun & 255);
383 outbuf[0] = (lun >> 8) | 0x40;
386 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
388 BusChild *kid;
389 int i, len, n;
390 int channel, id;
391 bool found_lun0;
393 if (r->req.cmd.xfer < 16) {
394 return false;
396 if (r->req.cmd.buf[2] > 2) {
397 return false;
399 channel = r->req.dev->channel;
400 id = r->req.dev->id;
401 found_lun0 = false;
402 n = 0;
403 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
404 DeviceState *qdev = kid->child;
405 SCSIDevice *dev = SCSI_DEVICE(qdev);
407 if (dev->channel == channel && dev->id == id) {
408 if (dev->lun == 0) {
409 found_lun0 = true;
411 n += 8;
414 if (!found_lun0) {
415 n += 8;
418 scsi_target_alloc_buf(&r->req, n + 8);
420 len = MIN(n + 8, r->req.cmd.xfer & ~7);
421 memset(r->buf, 0, len);
422 stl_be_p(&r->buf[0], n);
423 i = found_lun0 ? 8 : 16;
424 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
425 DeviceState *qdev = kid->child;
426 SCSIDevice *dev = SCSI_DEVICE(qdev);
428 if (dev->channel == channel && dev->id == id) {
429 store_lun(&r->buf[i], dev->lun);
430 i += 8;
433 assert(i == n + 8);
434 r->len = len;
435 return true;
438 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
440 assert(r->req.dev->lun != r->req.lun);
442 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
444 if (r->req.cmd.buf[1] & 0x2) {
445 /* Command support data - optional, not implemented */
446 return false;
449 if (r->req.cmd.buf[1] & 0x1) {
450 /* Vital product data */
451 uint8_t page_code = r->req.cmd.buf[2];
452 r->buf[r->len++] = page_code ; /* this page */
453 r->buf[r->len++] = 0x00;
455 switch (page_code) {
456 case 0x00: /* Supported page codes, mandatory */
458 int pages;
459 pages = r->len++;
460 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
461 r->buf[pages] = r->len - pages - 1; /* number of pages */
462 break;
464 default:
465 return false;
467 /* done with EVPD */
468 assert(r->len < r->buf_len);
469 r->len = MIN(r->req.cmd.xfer, r->len);
470 return true;
473 /* Standard INQUIRY data */
474 if (r->req.cmd.buf[2] != 0) {
475 return false;
478 /* PAGE CODE == 0 */
479 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
480 memset(r->buf, 0, r->len);
481 if (r->req.lun != 0) {
482 r->buf[0] = TYPE_NO_LUN;
483 } else {
484 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
485 r->buf[2] = 5; /* Version */
486 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
487 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
488 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
489 memcpy(&r->buf[8], "QEMU ", 8);
490 memcpy(&r->buf[16], "QEMU TARGET ", 16);
491 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
493 return true;
496 static size_t scsi_sense_len(SCSIRequest *req)
498 if (req->dev->type == TYPE_SCANNER)
499 return SCSI_SENSE_LEN_SCANNER;
500 else
501 return SCSI_SENSE_LEN;
504 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
506 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
507 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
509 if (req->lun != 0 &&
510 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
511 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
512 scsi_req_complete(req, CHECK_CONDITION);
513 return 0;
515 switch (buf[0]) {
516 case REPORT_LUNS:
517 if (!scsi_target_emulate_report_luns(r)) {
518 goto illegal_request;
520 break;
521 case INQUIRY:
522 if (!scsi_target_emulate_inquiry(r)) {
523 goto illegal_request;
525 break;
526 case REQUEST_SENSE:
527 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
528 if (req->lun != 0) {
529 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
531 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
532 sense, fixed_sense);
533 } else {
534 r->len = scsi_device_get_sense(r->req.dev, r->buf,
535 MIN(req->cmd.xfer, r->buf_len),
536 fixed_sense);
538 if (r->req.dev->sense_is_ua) {
539 scsi_device_unit_attention_reported(req->dev);
540 r->req.dev->sense_len = 0;
541 r->req.dev->sense_is_ua = false;
543 break;
544 case TEST_UNIT_READY:
545 break;
546 default:
547 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
548 scsi_req_complete(req, CHECK_CONDITION);
549 return 0;
550 illegal_request:
551 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
552 scsi_req_complete(req, CHECK_CONDITION);
553 return 0;
556 if (!r->len) {
557 scsi_req_complete(req, GOOD);
559 return r->len;
562 static void scsi_target_read_data(SCSIRequest *req)
564 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
565 uint32_t n;
567 n = r->len;
568 if (n > 0) {
569 r->len = 0;
570 scsi_req_data(&r->req, n);
571 } else {
572 scsi_req_complete(&r->req, GOOD);
576 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
578 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
580 return r->buf;
583 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
585 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
587 r->buf = g_malloc(len);
588 r->buf_len = len;
590 return r->buf;
593 static void scsi_target_free_buf(SCSIRequest *req)
595 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
597 g_free(r->buf);
600 static const struct SCSIReqOps reqops_target_command = {
601 .size = sizeof(SCSITargetReq),
602 .send_command = scsi_target_send_command,
603 .read_data = scsi_target_read_data,
604 .get_buf = scsi_target_get_buf,
605 .free_req = scsi_target_free_buf,
609 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
610 uint32_t tag, uint32_t lun, void *hba_private)
612 SCSIRequest *req;
613 SCSIBus *bus = scsi_bus_from_device(d);
614 BusState *qbus = BUS(bus);
615 const int memset_off = offsetof(SCSIRequest, sense)
616 + sizeof(req->sense);
618 req = g_malloc(reqops->size);
619 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
620 req->refcount = 1;
621 req->bus = bus;
622 req->dev = d;
623 req->tag = tag;
624 req->lun = lun;
625 req->hba_private = hba_private;
626 req->status = -1;
627 req->ops = reqops;
628 object_ref(OBJECT(d));
629 object_ref(OBJECT(qbus->parent));
630 notifier_list_init(&req->cancel_notifiers);
631 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
632 return req;
635 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
636 uint8_t *buf, void *hba_private)
638 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
639 const SCSIReqOps *ops;
640 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
641 SCSIRequest *req;
642 SCSICommand cmd = { .len = 0 };
643 int ret;
645 if ((d->unit_attention.key == UNIT_ATTENTION ||
646 bus->unit_attention.key == UNIT_ATTENTION) &&
647 (buf[0] != INQUIRY &&
648 buf[0] != REPORT_LUNS &&
649 buf[0] != GET_CONFIGURATION &&
650 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
653 * If we already have a pending unit attention condition,
654 * report this one before triggering another one.
656 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
657 ops = &reqops_unit_attention;
658 } else if (lun != d->lun ||
659 buf[0] == REPORT_LUNS ||
660 (buf[0] == REQUEST_SENSE && d->sense_len)) {
661 ops = &reqops_target_command;
662 } else {
663 ops = NULL;
666 if (ops != NULL || !sc->parse_cdb) {
667 ret = scsi_req_parse_cdb(d, &cmd, buf);
668 } else {
669 ret = sc->parse_cdb(d, &cmd, buf, hba_private);
672 if (ret != 0) {
673 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
674 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
675 } else {
676 assert(cmd.len != 0);
677 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
678 cmd.mode, cmd.xfer);
679 if (cmd.lba != -1) {
680 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
681 cmd.lba);
684 if (cmd.xfer > INT32_MAX) {
685 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
686 } else if (ops) {
687 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
688 } else {
689 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
693 req->cmd = cmd;
694 req->resid = req->cmd.xfer;
696 switch (buf[0]) {
697 case INQUIRY:
698 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
699 break;
700 case TEST_UNIT_READY:
701 trace_scsi_test_unit_ready(d->id, lun, tag);
702 break;
703 case REPORT_LUNS:
704 trace_scsi_report_luns(d->id, lun, tag);
705 break;
706 case REQUEST_SENSE:
707 trace_scsi_request_sense(d->id, lun, tag);
708 break;
709 default:
710 break;
713 return req;
716 uint8_t *scsi_req_get_buf(SCSIRequest *req)
718 return req->ops->get_buf(req);
721 static void scsi_clear_unit_attention(SCSIRequest *req)
723 SCSISense *ua;
724 if (req->dev->unit_attention.key != UNIT_ATTENTION &&
725 req->bus->unit_attention.key != UNIT_ATTENTION) {
726 return;
730 * If an INQUIRY command enters the enabled command state,
731 * the device server shall [not] clear any unit attention condition;
732 * See also MMC-6, paragraphs 6.5 and 6.6.2.
734 if (req->cmd.buf[0] == INQUIRY ||
735 req->cmd.buf[0] == GET_CONFIGURATION ||
736 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) {
737 return;
740 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
741 ua = &req->dev->unit_attention;
742 } else {
743 ua = &req->bus->unit_attention;
747 * If a REPORT LUNS command enters the enabled command state, [...]
748 * the device server shall clear any pending unit attention condition
749 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
751 if (req->cmd.buf[0] == REPORT_LUNS &&
752 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
753 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) {
754 return;
757 *ua = SENSE_CODE(NO_SENSE);
760 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
762 int ret;
764 assert(len >= 14);
765 if (!req->sense_len) {
766 return 0;
769 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
772 * FIXME: clearing unit attention conditions upon autosense should be done
773 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
774 * (SAM-5, 5.14).
776 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
777 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
778 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
780 if (req->dev->sense_is_ua) {
781 scsi_device_unit_attention_reported(req->dev);
782 req->dev->sense_len = 0;
783 req->dev->sense_is_ua = false;
785 return ret;
788 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
790 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
793 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
795 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
796 sense.key, sense.asc, sense.ascq);
797 req->sense_len = scsi_build_sense(req->sense, sense);
800 static void scsi_req_enqueue_internal(SCSIRequest *req)
802 assert(!req->enqueued);
803 scsi_req_ref(req);
804 if (req->bus->info->get_sg_list) {
805 req->sg = req->bus->info->get_sg_list(req);
806 } else {
807 req->sg = NULL;
809 req->enqueued = true;
810 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
813 int32_t scsi_req_enqueue(SCSIRequest *req)
815 int32_t rc;
817 assert(!req->retry);
818 scsi_req_enqueue_internal(req);
819 scsi_req_ref(req);
820 rc = req->ops->send_command(req, req->cmd.buf);
821 scsi_req_unref(req);
822 return rc;
825 static void scsi_req_dequeue(SCSIRequest *req)
827 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
828 req->retry = false;
829 if (req->enqueued) {
830 QTAILQ_REMOVE(&req->dev->requests, req, next);
831 req->enqueued = false;
832 scsi_req_unref(req);
836 static int scsi_get_performance_length(int num_desc, int type, int data_type)
838 /* MMC-6, paragraph 6.7. */
839 switch (type) {
840 case 0:
841 if ((data_type & 3) == 0) {
842 /* Each descriptor is as in Table 295 - Nominal performance. */
843 return 16 * num_desc + 8;
844 } else {
845 /* Each descriptor is as in Table 296 - Exceptions. */
846 return 6 * num_desc + 8;
848 case 1:
849 case 4:
850 case 5:
851 return 8 * num_desc + 8;
852 case 2:
853 return 2048 * num_desc + 8;
854 case 3:
855 return 16 * num_desc + 8;
856 default:
857 return 8;
861 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
863 int byte_block = (buf[2] >> 2) & 0x1;
864 int type = (buf[2] >> 4) & 0x1;
865 int xfer_unit;
867 if (byte_block) {
868 if (type) {
869 xfer_unit = dev->blocksize;
870 } else {
871 xfer_unit = 512;
873 } else {
874 xfer_unit = 1;
877 return xfer_unit;
880 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
882 int length = buf[2] & 0x3;
883 int xfer;
884 int unit = ata_passthrough_xfer_unit(dev, buf);
886 switch (length) {
887 case 0:
888 case 3: /* USB-specific. */
889 default:
890 xfer = 0;
891 break;
892 case 1:
893 xfer = buf[3];
894 break;
895 case 2:
896 xfer = buf[4];
897 break;
900 return xfer * unit;
903 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
905 int extend = buf[1] & 0x1;
906 int length = buf[2] & 0x3;
907 int xfer;
908 int unit = ata_passthrough_xfer_unit(dev, buf);
910 switch (length) {
911 case 0:
912 case 3: /* USB-specific. */
913 default:
914 xfer = 0;
915 break;
916 case 1:
917 xfer = buf[4];
918 xfer |= (extend ? buf[3] << 8 : 0);
919 break;
920 case 2:
921 xfer = buf[6];
922 xfer |= (extend ? buf[5] << 8 : 0);
923 break;
926 return xfer * unit;
929 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
931 cmd->xfer = scsi_cdb_xfer(buf);
932 switch (buf[0]) {
933 case TEST_UNIT_READY:
934 case REWIND:
935 case START_STOP:
936 case SET_CAPACITY:
937 case WRITE_FILEMARKS:
938 case WRITE_FILEMARKS_16:
939 case SPACE:
940 case RESERVE:
941 case RELEASE:
942 case ERASE:
943 case ALLOW_MEDIUM_REMOVAL:
944 case SEEK_10:
945 case SYNCHRONIZE_CACHE:
946 case SYNCHRONIZE_CACHE_16:
947 case LOCATE_16:
948 case LOCK_UNLOCK_CACHE:
949 case SET_CD_SPEED:
950 case SET_LIMITS:
951 case WRITE_LONG_10:
952 case UPDATE_BLOCK:
953 case RESERVE_TRACK:
954 case SET_READ_AHEAD:
955 case PRE_FETCH:
956 case PRE_FETCH_16:
957 case ALLOW_OVERWRITE:
958 cmd->xfer = 0;
959 break;
960 case VERIFY_10:
961 case VERIFY_12:
962 case VERIFY_16:
963 if ((buf[1] & 2) == 0) {
964 cmd->xfer = 0;
965 } else if ((buf[1] & 4) != 0) {
966 cmd->xfer = 1;
968 cmd->xfer *= dev->blocksize;
969 break;
970 case MODE_SENSE:
971 break;
972 case WRITE_SAME_10:
973 case WRITE_SAME_16:
974 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
975 break;
976 case READ_CAPACITY_10:
977 cmd->xfer = 8;
978 break;
979 case READ_BLOCK_LIMITS:
980 cmd->xfer = 6;
981 break;
982 case SEND_VOLUME_TAG:
983 /* GPCMD_SET_STREAMING from multimedia commands. */
984 if (dev->type == TYPE_ROM) {
985 cmd->xfer = buf[10] | (buf[9] << 8);
986 } else {
987 cmd->xfer = buf[9] | (buf[8] << 8);
989 break;
990 case WRITE_6:
991 /* length 0 means 256 blocks */
992 if (cmd->xfer == 0) {
993 cmd->xfer = 256;
995 /* fall through */
996 case WRITE_10:
997 case WRITE_VERIFY_10:
998 case WRITE_12:
999 case WRITE_VERIFY_12:
1000 case WRITE_16:
1001 case WRITE_VERIFY_16:
1002 cmd->xfer *= dev->blocksize;
1003 break;
1004 case READ_6:
1005 case READ_REVERSE:
1006 /* length 0 means 256 blocks */
1007 if (cmd->xfer == 0) {
1008 cmd->xfer = 256;
1010 /* fall through */
1011 case READ_10:
1012 case READ_12:
1013 case READ_16:
1014 cmd->xfer *= dev->blocksize;
1015 break;
1016 case FORMAT_UNIT:
1017 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1018 * for block devices are restricted to the header right now. */
1019 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1020 cmd->xfer = 12;
1021 } else {
1022 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1024 break;
1025 case INQUIRY:
1026 case RECEIVE_DIAGNOSTIC:
1027 case SEND_DIAGNOSTIC:
1028 cmd->xfer = buf[4] | (buf[3] << 8);
1029 break;
1030 case READ_CD:
1031 case READ_BUFFER:
1032 case WRITE_BUFFER:
1033 case SEND_CUE_SHEET:
1034 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1035 break;
1036 case PERSISTENT_RESERVE_OUT:
1037 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1038 break;
1039 case ERASE_12:
1040 if (dev->type == TYPE_ROM) {
1041 /* MMC command GET PERFORMANCE. */
1042 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1043 buf[10], buf[1] & 0x1f);
1045 break;
1046 case MECHANISM_STATUS:
1047 case READ_DVD_STRUCTURE:
1048 case SEND_DVD_STRUCTURE:
1049 case MAINTENANCE_OUT:
1050 case MAINTENANCE_IN:
1051 if (dev->type == TYPE_ROM) {
1052 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1053 cmd->xfer = buf[9] | (buf[8] << 8);
1055 break;
1056 case ATA_PASSTHROUGH_12:
1057 if (dev->type == TYPE_ROM) {
1058 /* BLANK command of MMC */
1059 cmd->xfer = 0;
1060 } else {
1061 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1063 break;
1064 case ATA_PASSTHROUGH_16:
1065 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1066 break;
1068 return 0;
1071 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1073 switch (buf[0]) {
1074 /* stream commands */
1075 case ERASE_12:
1076 case ERASE_16:
1077 cmd->xfer = 0;
1078 break;
1079 case READ_6:
1080 case READ_REVERSE:
1081 case RECOVER_BUFFERED_DATA:
1082 case WRITE_6:
1083 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1084 if (buf[1] & 0x01) { /* fixed */
1085 cmd->xfer *= dev->blocksize;
1087 break;
1088 case READ_16:
1089 case READ_REVERSE_16:
1090 case VERIFY_16:
1091 case WRITE_16:
1092 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1093 if (buf[1] & 0x01) { /* fixed */
1094 cmd->xfer *= dev->blocksize;
1096 break;
1097 case REWIND:
1098 case LOAD_UNLOAD:
1099 cmd->xfer = 0;
1100 break;
1101 case SPACE_16:
1102 cmd->xfer = buf[13] | (buf[12] << 8);
1103 break;
1104 case READ_POSITION:
1105 switch (buf[1] & 0x1f) /* operation code */ {
1106 case SHORT_FORM_BLOCK_ID:
1107 case SHORT_FORM_VENDOR_SPECIFIC:
1108 cmd->xfer = 20;
1109 break;
1110 case LONG_FORM:
1111 cmd->xfer = 32;
1112 break;
1113 case EXTENDED_FORM:
1114 cmd->xfer = buf[8] | (buf[7] << 8);
1115 break;
1116 default:
1117 return -1;
1120 break;
1121 case FORMAT_UNIT:
1122 cmd->xfer = buf[4] | (buf[3] << 8);
1123 break;
1124 /* generic commands */
1125 default:
1126 return scsi_req_xfer(cmd, dev, buf);
1128 return 0;
1131 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1133 switch (buf[0]) {
1134 /* medium changer commands */
1135 case EXCHANGE_MEDIUM:
1136 case INITIALIZE_ELEMENT_STATUS:
1137 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1138 case MOVE_MEDIUM:
1139 case POSITION_TO_ELEMENT:
1140 cmd->xfer = 0;
1141 break;
1142 case READ_ELEMENT_STATUS:
1143 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1144 break;
1146 /* generic commands */
1147 default:
1148 return scsi_req_xfer(cmd, dev, buf);
1150 return 0;
1153 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1155 switch (buf[0]) {
1156 /* Scanner commands */
1157 case OBJECT_POSITION:
1158 cmd->xfer = 0;
1159 break;
1160 case SCAN:
1161 cmd->xfer = buf[4];
1162 break;
1163 case READ_10:
1164 case SEND:
1165 case GET_WINDOW:
1166 case SET_WINDOW:
1167 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1168 break;
1169 default:
1170 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1171 return scsi_req_xfer(cmd, dev, buf);
1174 return 0;
1177 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1179 if (!cmd->xfer) {
1180 cmd->mode = SCSI_XFER_NONE;
1181 return;
1183 switch (cmd->buf[0]) {
1184 case WRITE_6:
1185 case WRITE_10:
1186 case WRITE_VERIFY_10:
1187 case WRITE_12:
1188 case WRITE_VERIFY_12:
1189 case WRITE_16:
1190 case WRITE_VERIFY_16:
1191 case VERIFY_10:
1192 case VERIFY_12:
1193 case VERIFY_16:
1194 case COPY:
1195 case COPY_VERIFY:
1196 case COMPARE:
1197 case CHANGE_DEFINITION:
1198 case LOG_SELECT:
1199 case MODE_SELECT:
1200 case MODE_SELECT_10:
1201 case SEND_DIAGNOSTIC:
1202 case WRITE_BUFFER:
1203 case FORMAT_UNIT:
1204 case REASSIGN_BLOCKS:
1205 case SEARCH_EQUAL:
1206 case SEARCH_HIGH:
1207 case SEARCH_LOW:
1208 case UPDATE_BLOCK:
1209 case WRITE_LONG_10:
1210 case WRITE_SAME_10:
1211 case WRITE_SAME_16:
1212 case UNMAP:
1213 case SEARCH_HIGH_12:
1214 case SEARCH_EQUAL_12:
1215 case SEARCH_LOW_12:
1216 case MEDIUM_SCAN:
1217 case SEND_VOLUME_TAG:
1218 case SEND_CUE_SHEET:
1219 case SEND_DVD_STRUCTURE:
1220 case PERSISTENT_RESERVE_OUT:
1221 case MAINTENANCE_OUT:
1222 case SET_WINDOW:
1223 case SCAN:
1224 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1225 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1227 cmd->mode = SCSI_XFER_TO_DEV;
1228 break;
1229 case ATA_PASSTHROUGH_12:
1230 case ATA_PASSTHROUGH_16:
1231 /* T_DIR */
1232 cmd->mode = (cmd->buf[2] & 0x8) ?
1233 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1234 break;
1235 default:
1236 cmd->mode = SCSI_XFER_FROM_DEV;
1237 break;
1241 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf)
1243 int rc;
1244 int len;
1246 cmd->lba = -1;
1247 len = scsi_cdb_length(buf);
1248 if (len < 0) {
1249 return -1;
1252 cmd->len = len;
1253 switch (dev->type) {
1254 case TYPE_TAPE:
1255 rc = scsi_req_stream_xfer(cmd, dev, buf);
1256 break;
1257 case TYPE_MEDIUM_CHANGER:
1258 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1259 break;
1260 case TYPE_SCANNER:
1261 rc = scsi_req_scanner_length(cmd, dev, buf);
1262 break;
1263 default:
1264 rc = scsi_req_xfer(cmd, dev, buf);
1265 break;
1268 if (rc != 0)
1269 return rc;
1271 memcpy(cmd->buf, buf, cmd->len);
1272 scsi_cmd_xfer_mode(cmd);
1273 cmd->lba = scsi_cmd_lba(cmd);
1274 return 0;
1277 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1279 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1281 scsi_device_set_ua(dev, sense);
1282 if (bus->info->change) {
1283 bus->info->change(bus, dev, sense);
1287 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1289 assert(req->refcount > 0);
1290 req->refcount++;
1291 return req;
1294 void scsi_req_unref(SCSIRequest *req)
1296 assert(req->refcount > 0);
1297 if (--req->refcount == 0) {
1298 BusState *qbus = req->dev->qdev.parent_bus;
1299 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1301 if (bus->info->free_request && req->hba_private) {
1302 bus->info->free_request(bus, req->hba_private);
1304 if (req->ops->free_req) {
1305 req->ops->free_req(req);
1307 object_unref(OBJECT(req->dev));
1308 object_unref(OBJECT(qbus->parent));
1309 g_free(req);
1313 /* Tell the device that we finished processing this chunk of I/O. It
1314 will start the next chunk or complete the command. */
1315 void scsi_req_continue(SCSIRequest *req)
1317 if (req->io_canceled) {
1318 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1319 return;
1321 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1322 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1323 req->ops->write_data(req);
1324 } else {
1325 req->ops->read_data(req);
1329 /* Called by the devices when data is ready for the HBA. The HBA should
1330 start a DMA operation to read or fill the device's data buffer.
1331 Once it completes, calling scsi_req_continue will restart I/O. */
1332 void scsi_req_data(SCSIRequest *req, int len)
1334 uint8_t *buf;
1335 if (req->io_canceled) {
1336 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1337 return;
1339 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1340 assert(req->cmd.mode != SCSI_XFER_NONE);
1341 if (!req->sg) {
1342 req->resid -= len;
1343 req->bus->info->transfer_data(req, len);
1344 return;
1347 /* If the device calls scsi_req_data and the HBA specified a
1348 * scatter/gather list, the transfer has to happen in a single
1349 * step. */
1350 assert(!req->dma_started);
1351 req->dma_started = true;
1353 buf = scsi_req_get_buf(req);
1354 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1355 req->resid = dma_buf_read(buf, len, req->sg);
1356 } else {
1357 req->resid = dma_buf_write(buf, len, req->sg);
1359 scsi_req_continue(req);
1362 void scsi_req_print(SCSIRequest *req)
1364 FILE *fp = stderr;
1365 int i;
1367 fprintf(fp, "[%s id=%d] %s",
1368 req->dev->qdev.parent_bus->name,
1369 req->dev->id,
1370 scsi_command_name(req->cmd.buf[0]));
1371 for (i = 1; i < req->cmd.len; i++) {
1372 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1374 switch (req->cmd.mode) {
1375 case SCSI_XFER_NONE:
1376 fprintf(fp, " - none\n");
1377 break;
1378 case SCSI_XFER_FROM_DEV:
1379 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1380 break;
1381 case SCSI_XFER_TO_DEV:
1382 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1383 break;
1384 default:
1385 fprintf(fp, " - Oops\n");
1386 break;
1390 void scsi_req_complete(SCSIRequest *req, int status)
1392 assert(req->status == -1);
1393 req->status = status;
1395 assert(req->sense_len <= sizeof(req->sense));
1396 if (status == GOOD) {
1397 req->sense_len = 0;
1400 if (req->sense_len) {
1401 memcpy(req->dev->sense, req->sense, req->sense_len);
1402 req->dev->sense_len = req->sense_len;
1403 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1404 } else {
1405 req->dev->sense_len = 0;
1406 req->dev->sense_is_ua = false;
1410 * Unit attention state is now stored in the device's sense buffer
1411 * if the HBA didn't do autosense. Clear the pending unit attention
1412 * flags.
1414 scsi_clear_unit_attention(req);
1416 scsi_req_ref(req);
1417 scsi_req_dequeue(req);
1418 req->bus->info->complete(req, req->status, req->resid);
1420 /* Cancelled requests might end up being completed instead of cancelled */
1421 notifier_list_notify(&req->cancel_notifiers, req);
1422 scsi_req_unref(req);
1425 /* Called by the devices when the request is canceled. */
1426 void scsi_req_cancel_complete(SCSIRequest *req)
1428 assert(req->io_canceled);
1429 if (req->bus->info->cancel) {
1430 req->bus->info->cancel(req);
1432 notifier_list_notify(&req->cancel_notifiers, req);
1433 scsi_req_unref(req);
1436 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1437 * notifier list, the bus will be notified the requests cancellation is
1438 * completed.
1439 * */
1440 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1442 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1443 if (notifier) {
1444 notifier_list_add(&req->cancel_notifiers, notifier);
1446 if (req->io_canceled) {
1447 /* A blk_aio_cancel_async is pending; when it finishes,
1448 * scsi_req_cancel_complete will be called and will
1449 * call the notifier we just added. Just wait for that.
1451 assert(req->aiocb);
1452 return;
1454 /* Dropped in scsi_req_cancel_complete. */
1455 scsi_req_ref(req);
1456 scsi_req_dequeue(req);
1457 req->io_canceled = true;
1458 if (req->aiocb) {
1459 blk_aio_cancel_async(req->aiocb);
1460 } else {
1461 scsi_req_cancel_complete(req);
1465 void scsi_req_cancel(SCSIRequest *req)
1467 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1468 if (!req->enqueued) {
1469 return;
1471 assert(!req->io_canceled);
1472 /* Dropped in scsi_req_cancel_complete. */
1473 scsi_req_ref(req);
1474 scsi_req_dequeue(req);
1475 req->io_canceled = true;
1476 if (req->aiocb) {
1477 blk_aio_cancel(req->aiocb);
1478 } else {
1479 scsi_req_cancel_complete(req);
1483 static int scsi_ua_precedence(SCSISense sense)
1485 if (sense.key != UNIT_ATTENTION) {
1486 return INT_MAX;
1488 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1489 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1490 return 1;
1491 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1492 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1493 return 2;
1494 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1495 /* These two go with "all others". */
1497 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1498 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1499 * POWER ON OCCURRED = 1
1500 * SCSI BUS RESET OCCURRED = 2
1501 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1502 * I_T NEXUS LOSS OCCURRED = 7
1504 return sense.ascq;
1505 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1506 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1507 return 8;
1509 return (sense.asc << 8) | sense.ascq;
1512 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1514 int prec1, prec2;
1515 if (sense.key != UNIT_ATTENTION) {
1516 return;
1518 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1519 sense.asc, sense.ascq);
1522 * Override a pre-existing unit attention condition, except for a more
1523 * important reset condition.
1525 prec1 = scsi_ua_precedence(sdev->unit_attention);
1526 prec2 = scsi_ua_precedence(sense);
1527 if (prec2 < prec1) {
1528 sdev->unit_attention = sense;
1532 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1534 SCSIRequest *req;
1536 aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
1537 while (!QTAILQ_EMPTY(&sdev->requests)) {
1538 req = QTAILQ_FIRST(&sdev->requests);
1539 scsi_req_cancel_async(req, NULL);
1541 blk_drain(sdev->conf.blk);
1542 aio_context_release(blk_get_aio_context(sdev->conf.blk));
1543 scsi_device_set_ua(sdev, sense);
1546 static char *scsibus_get_dev_path(DeviceState *dev)
1548 SCSIDevice *d = SCSI_DEVICE(dev);
1549 DeviceState *hba = dev->parent_bus->parent;
1550 char *id;
1551 char *path;
1553 id = qdev_get_dev_path(hba);
1554 if (id) {
1555 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1556 } else {
1557 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1559 g_free(id);
1560 return path;
1563 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1565 SCSIDevice *d = SCSI_DEVICE(dev);
1566 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1567 qdev_fw_name(dev), d->id, d->lun);
1570 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
1572 BusChild *kid;
1573 SCSIDevice *target_dev = NULL;
1575 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) {
1576 DeviceState *qdev = kid->child;
1577 SCSIDevice *dev = SCSI_DEVICE(qdev);
1579 if (dev->channel == channel && dev->id == id) {
1580 if (dev->lun == lun) {
1581 return dev;
1583 target_dev = dev;
1586 return target_dev;
1589 /* SCSI request list. For simplicity, pv points to the whole device */
1591 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1592 const VMStateField *field, QJSON *vmdesc)
1594 SCSIDevice *s = pv;
1595 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1596 SCSIRequest *req;
1598 QTAILQ_FOREACH(req, &s->requests, next) {
1599 assert(!req->io_canceled);
1600 assert(req->status == -1);
1601 assert(req->enqueued);
1603 qemu_put_sbyte(f, req->retry ? 1 : 2);
1604 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1605 qemu_put_be32s(f, &req->tag);
1606 qemu_put_be32s(f, &req->lun);
1607 if (bus->info->save_request) {
1608 bus->info->save_request(f, req);
1610 if (req->ops->save_request) {
1611 req->ops->save_request(f, req);
1614 qemu_put_sbyte(f, 0);
1616 return 0;
1619 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1620 const VMStateField *field)
1622 SCSIDevice *s = pv;
1623 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1624 int8_t sbyte;
1626 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1627 uint8_t buf[SCSI_CMD_BUF_SIZE];
1628 uint32_t tag;
1629 uint32_t lun;
1630 SCSIRequest *req;
1632 qemu_get_buffer(f, buf, sizeof(buf));
1633 qemu_get_be32s(f, &tag);
1634 qemu_get_be32s(f, &lun);
1635 req = scsi_req_new(s, tag, lun, buf, NULL);
1636 req->retry = (sbyte == 1);
1637 if (bus->info->load_request) {
1638 req->hba_private = bus->info->load_request(f, req);
1640 if (req->ops->load_request) {
1641 req->ops->load_request(f, req);
1644 /* Just restart it later. */
1645 scsi_req_enqueue_internal(req);
1647 /* At this point, the request will be kept alive by the reference
1648 * added by scsi_req_enqueue_internal, so we can release our reference.
1649 * The HBA of course will add its own reference in the load_request
1650 * callback if it needs to hold on the SCSIRequest.
1652 scsi_req_unref(req);
1655 return 0;
1658 static const VMStateInfo vmstate_info_scsi_requests = {
1659 .name = "scsi-requests",
1660 .get = get_scsi_requests,
1661 .put = put_scsi_requests,
1664 static bool scsi_sense_state_needed(void *opaque)
1666 SCSIDevice *s = opaque;
1668 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1671 static const VMStateDescription vmstate_scsi_sense_state = {
1672 .name = "SCSIDevice/sense",
1673 .version_id = 1,
1674 .minimum_version_id = 1,
1675 .needed = scsi_sense_state_needed,
1676 .fields = (VMStateField[]) {
1677 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1678 SCSI_SENSE_BUF_SIZE_OLD,
1679 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1680 VMSTATE_END_OF_LIST()
1684 const VMStateDescription vmstate_scsi_device = {
1685 .name = "SCSIDevice",
1686 .version_id = 1,
1687 .minimum_version_id = 1,
1688 .fields = (VMStateField[]) {
1689 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1690 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1691 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1692 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1693 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1694 VMSTATE_UINT32(sense_len, SCSIDevice),
1696 .name = "requests",
1697 .version_id = 0,
1698 .field_exists = NULL,
1699 .size = 0, /* ouch */
1700 .info = &vmstate_info_scsi_requests,
1701 .flags = VMS_SINGLE,
1702 .offset = 0,
1704 VMSTATE_END_OF_LIST()
1706 .subsections = (const VMStateDescription*[]) {
1707 &vmstate_scsi_sense_state,
1708 NULL
1712 static void scsi_device_class_init(ObjectClass *klass, void *data)
1714 DeviceClass *k = DEVICE_CLASS(klass);
1715 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1716 k->bus_type = TYPE_SCSI_BUS;
1717 k->realize = scsi_qdev_realize;
1718 k->unrealize = scsi_qdev_unrealize;
1719 device_class_set_props(k, scsi_props);
1722 static void scsi_dev_instance_init(Object *obj)
1724 DeviceState *dev = DEVICE(obj);
1725 SCSIDevice *s = SCSI_DEVICE(dev);
1727 device_add_bootindex_property(obj, &s->conf.bootindex,
1728 "bootindex", NULL,
1729 &s->qdev);
1732 static const TypeInfo scsi_device_type_info = {
1733 .name = TYPE_SCSI_DEVICE,
1734 .parent = TYPE_DEVICE,
1735 .instance_size = sizeof(SCSIDevice),
1736 .abstract = true,
1737 .class_size = sizeof(SCSIDeviceClass),
1738 .class_init = scsi_device_class_init,
1739 .instance_init = scsi_dev_instance_init,
1742 static void scsi_register_types(void)
1744 type_register_static(&scsi_bus_info);
1745 type_register_static(&scsi_device_type_info);
1748 type_init(scsi_register_types)