ar7: Remove conditional compilation
[qemu/ar7.git] / hw / scsi / scsi-bus.c
blob977f7bce1f6c3c25eb2917bf4409450f4409b5ad
1 #include "qemu/osdep.h"
2 #include "hw/hw.h"
3 #include "qapi/error.h"
4 #include "qemu/error-report.h"
5 #include "hw/scsi/scsi.h"
6 #include "scsi/constants.h"
7 #include "hw/qdev.h"
8 #include "sysemu/block-backend.h"
9 #include "sysemu/blockdev.h"
10 #include "trace.h"
11 #include "sysemu/dma.h"
12 #include "qemu/cutils.h"
14 static char *scsibus_get_dev_path(DeviceState *dev);
15 static char *scsibus_get_fw_dev_path(DeviceState *dev);
16 static void scsi_req_dequeue(SCSIRequest *req);
17 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
18 static void scsi_target_free_buf(SCSIRequest *req);
20 static Property scsi_props[] = {
21 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
22 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
23 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
24 DEFINE_PROP_END_OF_LIST(),
27 static void scsi_bus_class_init(ObjectClass *klass, void *data)
29 BusClass *k = BUS_CLASS(klass);
30 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
32 k->get_dev_path = scsibus_get_dev_path;
33 k->get_fw_dev_path = scsibus_get_fw_dev_path;
34 hc->unplug = qdev_simple_device_unplug_cb;
37 static const TypeInfo scsi_bus_info = {
38 .name = TYPE_SCSI_BUS,
39 .parent = TYPE_BUS,
40 .instance_size = sizeof(SCSIBus),
41 .class_init = scsi_bus_class_init,
42 .interfaces = (InterfaceInfo[]) {
43 { TYPE_HOTPLUG_HANDLER },
44 { }
47 static int next_scsi_bus;
49 static void scsi_device_realize(SCSIDevice *s, Error **errp)
51 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
52 if (sc->realize) {
53 sc->realize(s, errp);
57 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
58 void *hba_private)
60 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
61 int rc;
63 assert(cmd->len == 0);
64 rc = scsi_req_parse_cdb(dev, cmd, buf);
65 if (bus->info->parse_cdb) {
66 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private);
68 return rc;
71 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
72 uint8_t *buf, void *hba_private)
74 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
75 if (sc->alloc_req) {
76 return sc->alloc_req(s, tag, lun, buf, hba_private);
79 return NULL;
82 void scsi_device_unit_attention_reported(SCSIDevice *s)
84 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
85 if (sc->unit_attention_reported) {
86 sc->unit_attention_reported(s);
90 /* Create a scsi bus, and attach devices to it. */
91 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
92 const SCSIBusInfo *info, const char *bus_name)
94 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
95 bus->busnr = next_scsi_bus++;
96 bus->info = info;
97 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort);
100 static void scsi_dma_restart_bh(void *opaque)
102 SCSIDevice *s = opaque;
103 SCSIRequest *req, *next;
105 qemu_bh_delete(s->bh);
106 s->bh = NULL;
108 aio_context_acquire(blk_get_aio_context(s->conf.blk));
109 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
110 scsi_req_ref(req);
111 if (req->retry) {
112 req->retry = false;
113 switch (req->cmd.mode) {
114 case SCSI_XFER_FROM_DEV:
115 case SCSI_XFER_TO_DEV:
116 scsi_req_continue(req);
117 break;
118 case SCSI_XFER_NONE:
119 scsi_req_dequeue(req);
120 scsi_req_enqueue(req);
121 break;
124 scsi_req_unref(req);
126 aio_context_release(blk_get_aio_context(s->conf.blk));
129 void scsi_req_retry(SCSIRequest *req)
131 /* No need to save a reference, because scsi_dma_restart_bh just
132 * looks at the request list. */
133 req->retry = true;
136 static void scsi_dma_restart_cb(void *opaque, int running, RunState state)
138 SCSIDevice *s = opaque;
140 if (!running) {
141 return;
143 if (!s->bh) {
144 AioContext *ctx = blk_get_aio_context(s->conf.blk);
145 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s);
146 qemu_bh_schedule(s->bh);
150 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
152 SCSIDevice *dev = SCSI_DEVICE(qdev);
153 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
154 SCSIDevice *d;
155 Error *local_err = NULL;
157 if (dev->channel > bus->info->max_channel) {
158 error_setg(errp, "bad scsi channel id: %d", dev->channel);
159 return;
161 if (dev->id != -1 && dev->id > bus->info->max_target) {
162 error_setg(errp, "bad scsi device id: %d", dev->id);
163 return;
165 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
166 error_setg(errp, "bad scsi device lun: %d", dev->lun);
167 return;
170 if (dev->id == -1) {
171 int id = -1;
172 if (dev->lun == -1) {
173 dev->lun = 0;
175 do {
176 d = scsi_device_find(bus, dev->channel, ++id, dev->lun);
177 } while (d && d->lun == dev->lun && id < bus->info->max_target);
178 if (d && d->lun == dev->lun) {
179 error_setg(errp, "no free target");
180 return;
182 dev->id = id;
183 } else if (dev->lun == -1) {
184 int lun = -1;
185 do {
186 d = scsi_device_find(bus, dev->channel, dev->id, ++lun);
187 } while (d && d->lun == lun && lun < bus->info->max_lun);
188 if (d && d->lun == lun) {
189 error_setg(errp, "no free lun");
190 return;
192 dev->lun = lun;
193 } else {
194 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun);
195 assert(d);
196 if (d->lun == dev->lun && dev != d) {
197 error_setg(errp, "lun already used by '%s'", d->qdev.id);
198 return;
202 QTAILQ_INIT(&dev->requests);
203 scsi_device_realize(dev, &local_err);
204 if (local_err) {
205 error_propagate(errp, local_err);
206 return;
208 dev->vmsentry = qemu_add_vm_change_state_handler(scsi_dma_restart_cb,
209 dev);
212 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp)
214 SCSIDevice *dev = SCSI_DEVICE(qdev);
216 if (dev->vmsentry) {
217 qemu_del_vm_change_state_handler(dev->vmsentry);
220 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
221 blockdev_mark_auto_del(dev->conf.blk);
224 /* handle legacy '-drive if=scsi,...' cmd line args */
225 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
226 int unit, bool removable, int bootindex,
227 const char *serial, Error **errp)
229 const char *driver;
230 char *name;
231 DeviceState *dev;
232 Error *err = NULL;
234 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
235 dev = qdev_create(&bus->qbus, driver);
236 name = g_strdup_printf("legacy[%d]", unit);
237 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL);
238 g_free(name);
240 qdev_prop_set_uint32(dev, "scsi-id", unit);
241 if (bootindex >= 0) {
242 object_property_set_int(OBJECT(dev), bootindex, "bootindex",
243 &error_abort);
245 if (object_property_find(OBJECT(dev), "removable", NULL)) {
246 qdev_prop_set_bit(dev, "removable", removable);
248 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
249 qdev_prop_set_string(dev, "serial", serial);
251 qdev_prop_set_drive(dev, "drive", blk, &err);
252 if (err) {
253 error_propagate(errp, err);
254 object_unparent(OBJECT(dev));
255 return NULL;
257 object_property_set_bool(OBJECT(dev), true, "realized", &err);
258 if (err != NULL) {
259 error_propagate(errp, err);
260 object_unparent(OBJECT(dev));
261 return NULL;
263 return SCSI_DEVICE(dev);
266 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus, bool deprecated)
268 Location loc;
269 DriveInfo *dinfo;
270 int unit;
272 loc_push_none(&loc);
273 for (unit = 0; unit <= bus->info->max_target; unit++) {
274 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
275 if (dinfo == NULL) {
276 continue;
278 qemu_opts_loc_restore(dinfo->opts);
279 if (deprecated) {
280 /* Handling -drive not claimed by machine initialization */
281 if (blk_get_attached_dev(blk_by_legacy_dinfo(dinfo))) {
282 continue; /* claimed */
284 if (!dinfo->is_default) {
285 warn_report("bus=%d,unit=%d is deprecated with this"
286 " machine type",
287 bus->busnr, unit);
290 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
291 unit, false, -1, NULL, &error_fatal);
293 loc_pop(&loc);
296 static bool is_scsi_hba_with_legacy_magic(Object *obj)
298 static const char *magic[] = {
299 "am53c974", "dc390", "esp", "lsi53c810", "lsi53c895a",
300 "megasas", "megasas-gen2", "mptsas1068", "spapr-vscsi",
301 "virtio-scsi-device",
302 NULL
304 const char *typename = object_get_typename(obj);
305 int i;
307 for (i = 0; magic[i]; i++)
308 if (!strcmp(typename, magic[i])) {
309 return true;
312 return false;
315 static int scsi_legacy_handle_cmdline_cb(Object *obj, void *opaque)
317 SCSIBus *bus = (SCSIBus *)object_dynamic_cast(obj, TYPE_SCSI_BUS);
319 if (bus && is_scsi_hba_with_legacy_magic(OBJECT(bus->qbus.parent))) {
320 scsi_bus_legacy_handle_cmdline(bus, true);
323 return 0;
326 void scsi_legacy_handle_cmdline(void)
328 object_child_foreach_recursive(object_get_root(),
329 scsi_legacy_handle_cmdline_cb, NULL);
332 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
334 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
335 scsi_req_complete(req, CHECK_CONDITION);
336 return 0;
339 static const struct SCSIReqOps reqops_invalid_field = {
340 .size = sizeof(SCSIRequest),
341 .send_command = scsi_invalid_field
344 /* SCSIReqOps implementation for invalid commands. */
346 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
348 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
349 scsi_req_complete(req, CHECK_CONDITION);
350 return 0;
353 static const struct SCSIReqOps reqops_invalid_opcode = {
354 .size = sizeof(SCSIRequest),
355 .send_command = scsi_invalid_command
358 /* SCSIReqOps implementation for unit attention conditions. */
360 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
362 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
363 scsi_req_build_sense(req, req->dev->unit_attention);
364 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
365 scsi_req_build_sense(req, req->bus->unit_attention);
367 scsi_req_complete(req, CHECK_CONDITION);
368 return 0;
371 static const struct SCSIReqOps reqops_unit_attention = {
372 .size = sizeof(SCSIRequest),
373 .send_command = scsi_unit_attention
376 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
377 an invalid LUN. */
379 typedef struct SCSITargetReq SCSITargetReq;
381 struct SCSITargetReq {
382 SCSIRequest req;
383 int len;
384 uint8_t *buf;
385 int buf_len;
388 static void store_lun(uint8_t *outbuf, int lun)
390 if (lun < 256) {
391 outbuf[1] = lun;
392 return;
394 outbuf[1] = (lun & 255);
395 outbuf[0] = (lun >> 8) | 0x40;
398 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
400 BusChild *kid;
401 int i, len, n;
402 int channel, id;
403 bool found_lun0;
405 if (r->req.cmd.xfer < 16) {
406 return false;
408 if (r->req.cmd.buf[2] > 2) {
409 return false;
411 channel = r->req.dev->channel;
412 id = r->req.dev->id;
413 found_lun0 = false;
414 n = 0;
415 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
416 DeviceState *qdev = kid->child;
417 SCSIDevice *dev = SCSI_DEVICE(qdev);
419 if (dev->channel == channel && dev->id == id) {
420 if (dev->lun == 0) {
421 found_lun0 = true;
423 n += 8;
426 if (!found_lun0) {
427 n += 8;
430 scsi_target_alloc_buf(&r->req, n + 8);
432 len = MIN(n + 8, r->req.cmd.xfer & ~7);
433 memset(r->buf, 0, len);
434 stl_be_p(&r->buf[0], n);
435 i = found_lun0 ? 8 : 16;
436 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
437 DeviceState *qdev = kid->child;
438 SCSIDevice *dev = SCSI_DEVICE(qdev);
440 if (dev->channel == channel && dev->id == id) {
441 store_lun(&r->buf[i], dev->lun);
442 i += 8;
445 assert(i == n + 8);
446 r->len = len;
447 return true;
450 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
452 assert(r->req.dev->lun != r->req.lun);
454 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
456 if (r->req.cmd.buf[1] & 0x2) {
457 /* Command support data - optional, not implemented */
458 return false;
461 if (r->req.cmd.buf[1] & 0x1) {
462 /* Vital product data */
463 uint8_t page_code = r->req.cmd.buf[2];
464 r->buf[r->len++] = page_code ; /* this page */
465 r->buf[r->len++] = 0x00;
467 switch (page_code) {
468 case 0x00: /* Supported page codes, mandatory */
470 int pages;
471 pages = r->len++;
472 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
473 r->buf[pages] = r->len - pages - 1; /* number of pages */
474 break;
476 default:
477 return false;
479 /* done with EVPD */
480 assert(r->len < r->buf_len);
481 r->len = MIN(r->req.cmd.xfer, r->len);
482 return true;
485 /* Standard INQUIRY data */
486 if (r->req.cmd.buf[2] != 0) {
487 return false;
490 /* PAGE CODE == 0 */
491 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
492 memset(r->buf, 0, r->len);
493 if (r->req.lun != 0) {
494 r->buf[0] = TYPE_NO_LUN;
495 } else {
496 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
497 r->buf[2] = 5; /* Version */
498 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
499 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
500 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
501 memcpy(&r->buf[8], "QEMU ", 8);
502 memcpy(&r->buf[16], "QEMU TARGET ", 16);
503 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
505 return true;
508 static size_t scsi_sense_len(SCSIRequest *req)
510 if (req->dev->type == TYPE_SCANNER)
511 return SCSI_SENSE_LEN_SCANNER;
512 else
513 return SCSI_SENSE_LEN;
516 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
518 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
519 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
521 if (req->lun != 0 &&
522 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
523 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
524 scsi_req_complete(req, CHECK_CONDITION);
525 return 0;
527 switch (buf[0]) {
528 case REPORT_LUNS:
529 if (!scsi_target_emulate_report_luns(r)) {
530 goto illegal_request;
532 break;
533 case INQUIRY:
534 if (!scsi_target_emulate_inquiry(r)) {
535 goto illegal_request;
537 break;
538 case REQUEST_SENSE:
539 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
540 if (req->lun != 0) {
541 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
543 if (fixed_sense) {
544 r->buf[0] = 0x70;
545 r->buf[2] = sense.key;
546 r->buf[10] = 10;
547 r->buf[12] = sense.asc;
548 r->buf[13] = sense.ascq;
549 r->len = MIN(req->cmd.xfer, SCSI_SENSE_LEN);
550 } else {
551 r->buf[0] = 0x72;
552 r->buf[1] = sense.key;
553 r->buf[2] = sense.asc;
554 r->buf[3] = sense.ascq;
555 r->len = 8;
557 } else {
558 r->len = scsi_device_get_sense(r->req.dev, r->buf,
559 MIN(req->cmd.xfer, r->buf_len),
560 fixed_sense);
562 if (r->req.dev->sense_is_ua) {
563 scsi_device_unit_attention_reported(req->dev);
564 r->req.dev->sense_len = 0;
565 r->req.dev->sense_is_ua = false;
567 break;
568 case TEST_UNIT_READY:
569 break;
570 default:
571 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
572 scsi_req_complete(req, CHECK_CONDITION);
573 return 0;
574 illegal_request:
575 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
576 scsi_req_complete(req, CHECK_CONDITION);
577 return 0;
580 if (!r->len) {
581 scsi_req_complete(req, GOOD);
583 return r->len;
586 static void scsi_target_read_data(SCSIRequest *req)
588 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
589 uint32_t n;
591 n = r->len;
592 if (n > 0) {
593 r->len = 0;
594 scsi_req_data(&r->req, n);
595 } else {
596 scsi_req_complete(&r->req, GOOD);
600 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
602 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
604 return r->buf;
607 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
609 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
611 r->buf = g_malloc(len);
612 r->buf_len = len;
614 return r->buf;
617 static void scsi_target_free_buf(SCSIRequest *req)
619 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
621 g_free(r->buf);
624 static const struct SCSIReqOps reqops_target_command = {
625 .size = sizeof(SCSITargetReq),
626 .send_command = scsi_target_send_command,
627 .read_data = scsi_target_read_data,
628 .get_buf = scsi_target_get_buf,
629 .free_req = scsi_target_free_buf,
633 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
634 uint32_t tag, uint32_t lun, void *hba_private)
636 SCSIRequest *req;
637 SCSIBus *bus = scsi_bus_from_device(d);
638 BusState *qbus = BUS(bus);
639 const int memset_off = offsetof(SCSIRequest, sense)
640 + sizeof(req->sense);
642 req = g_malloc(reqops->size);
643 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
644 req->refcount = 1;
645 req->bus = bus;
646 req->dev = d;
647 req->tag = tag;
648 req->lun = lun;
649 req->hba_private = hba_private;
650 req->status = -1;
651 req->ops = reqops;
652 object_ref(OBJECT(d));
653 object_ref(OBJECT(qbus->parent));
654 notifier_list_init(&req->cancel_notifiers);
655 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
656 return req;
659 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
660 uint8_t *buf, void *hba_private)
662 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
663 const SCSIReqOps *ops;
664 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
665 SCSIRequest *req;
666 SCSICommand cmd = { .len = 0 };
667 int ret;
669 if ((d->unit_attention.key == UNIT_ATTENTION ||
670 bus->unit_attention.key == UNIT_ATTENTION) &&
671 (buf[0] != INQUIRY &&
672 buf[0] != REPORT_LUNS &&
673 buf[0] != GET_CONFIGURATION &&
674 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
677 * If we already have a pending unit attention condition,
678 * report this one before triggering another one.
680 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
681 ops = &reqops_unit_attention;
682 } else if (lun != d->lun ||
683 buf[0] == REPORT_LUNS ||
684 (buf[0] == REQUEST_SENSE && d->sense_len)) {
685 ops = &reqops_target_command;
686 } else {
687 ops = NULL;
690 if (ops != NULL || !sc->parse_cdb) {
691 ret = scsi_req_parse_cdb(d, &cmd, buf);
692 } else {
693 ret = sc->parse_cdb(d, &cmd, buf, hba_private);
696 if (ret != 0) {
697 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
698 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
699 } else {
700 assert(cmd.len != 0);
701 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
702 cmd.mode, cmd.xfer);
703 if (cmd.lba != -1) {
704 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
705 cmd.lba);
708 if (cmd.xfer > INT32_MAX) {
709 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
710 } else if (ops) {
711 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
712 } else {
713 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
717 req->cmd = cmd;
718 req->resid = req->cmd.xfer;
720 switch (buf[0]) {
721 case INQUIRY:
722 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
723 break;
724 case TEST_UNIT_READY:
725 trace_scsi_test_unit_ready(d->id, lun, tag);
726 break;
727 case REPORT_LUNS:
728 trace_scsi_report_luns(d->id, lun, tag);
729 break;
730 case REQUEST_SENSE:
731 trace_scsi_request_sense(d->id, lun, tag);
732 break;
733 default:
734 break;
737 return req;
740 uint8_t *scsi_req_get_buf(SCSIRequest *req)
742 return req->ops->get_buf(req);
745 static void scsi_clear_unit_attention(SCSIRequest *req)
747 SCSISense *ua;
748 if (req->dev->unit_attention.key != UNIT_ATTENTION &&
749 req->bus->unit_attention.key != UNIT_ATTENTION) {
750 return;
754 * If an INQUIRY command enters the enabled command state,
755 * the device server shall [not] clear any unit attention condition;
756 * See also MMC-6, paragraphs 6.5 and 6.6.2.
758 if (req->cmd.buf[0] == INQUIRY ||
759 req->cmd.buf[0] == GET_CONFIGURATION ||
760 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) {
761 return;
764 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
765 ua = &req->dev->unit_attention;
766 } else {
767 ua = &req->bus->unit_attention;
771 * If a REPORT LUNS command enters the enabled command state, [...]
772 * the device server shall clear any pending unit attention condition
773 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
775 if (req->cmd.buf[0] == REPORT_LUNS &&
776 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
777 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) {
778 return;
781 *ua = SENSE_CODE(NO_SENSE);
784 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
786 int ret;
788 assert(len >= 14);
789 if (!req->sense_len) {
790 return 0;
793 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
796 * FIXME: clearing unit attention conditions upon autosense should be done
797 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
798 * (SAM-5, 5.14).
800 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
801 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
802 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
804 if (req->dev->sense_is_ua) {
805 scsi_device_unit_attention_reported(req->dev);
806 req->dev->sense_len = 0;
807 req->dev->sense_is_ua = false;
809 return ret;
812 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
814 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
817 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
819 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
820 sense.key, sense.asc, sense.ascq);
821 req->sense_len = scsi_build_sense(req->sense, sense);
824 static void scsi_req_enqueue_internal(SCSIRequest *req)
826 assert(!req->enqueued);
827 scsi_req_ref(req);
828 if (req->bus->info->get_sg_list) {
829 req->sg = req->bus->info->get_sg_list(req);
830 } else {
831 req->sg = NULL;
833 req->enqueued = true;
834 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
837 int32_t scsi_req_enqueue(SCSIRequest *req)
839 int32_t rc;
841 assert(!req->retry);
842 scsi_req_enqueue_internal(req);
843 scsi_req_ref(req);
844 rc = req->ops->send_command(req, req->cmd.buf);
845 scsi_req_unref(req);
846 return rc;
849 static void scsi_req_dequeue(SCSIRequest *req)
851 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
852 req->retry = false;
853 if (req->enqueued) {
854 QTAILQ_REMOVE(&req->dev->requests, req, next);
855 req->enqueued = false;
856 scsi_req_unref(req);
860 static int scsi_get_performance_length(int num_desc, int type, int data_type)
862 /* MMC-6, paragraph 6.7. */
863 switch (type) {
864 case 0:
865 if ((data_type & 3) == 0) {
866 /* Each descriptor is as in Table 295 - Nominal performance. */
867 return 16 * num_desc + 8;
868 } else {
869 /* Each descriptor is as in Table 296 - Exceptions. */
870 return 6 * num_desc + 8;
872 case 1:
873 case 4:
874 case 5:
875 return 8 * num_desc + 8;
876 case 2:
877 return 2048 * num_desc + 8;
878 case 3:
879 return 16 * num_desc + 8;
880 default:
881 return 8;
885 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
887 int byte_block = (buf[2] >> 2) & 0x1;
888 int type = (buf[2] >> 4) & 0x1;
889 int xfer_unit;
891 if (byte_block) {
892 if (type) {
893 xfer_unit = dev->blocksize;
894 } else {
895 xfer_unit = 512;
897 } else {
898 xfer_unit = 1;
901 return xfer_unit;
904 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
906 int length = buf[2] & 0x3;
907 int xfer;
908 int unit = ata_passthrough_xfer_unit(dev, buf);
910 switch (length) {
911 case 0:
912 case 3: /* USB-specific. */
913 default:
914 xfer = 0;
915 break;
916 case 1:
917 xfer = buf[3];
918 break;
919 case 2:
920 xfer = buf[4];
921 break;
924 return xfer * unit;
927 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
929 int extend = buf[1] & 0x1;
930 int length = buf[2] & 0x3;
931 int xfer;
932 int unit = ata_passthrough_xfer_unit(dev, buf);
934 switch (length) {
935 case 0:
936 case 3: /* USB-specific. */
937 default:
938 xfer = 0;
939 break;
940 case 1:
941 xfer = buf[4];
942 xfer |= (extend ? buf[3] << 8 : 0);
943 break;
944 case 2:
945 xfer = buf[6];
946 xfer |= (extend ? buf[5] << 8 : 0);
947 break;
950 return xfer * unit;
953 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
955 cmd->xfer = scsi_cdb_xfer(buf);
956 switch (buf[0]) {
957 case TEST_UNIT_READY:
958 case REWIND:
959 case START_STOP:
960 case SET_CAPACITY:
961 case WRITE_FILEMARKS:
962 case WRITE_FILEMARKS_16:
963 case SPACE:
964 case RESERVE:
965 case RELEASE:
966 case ERASE:
967 case ALLOW_MEDIUM_REMOVAL:
968 case SEEK_10:
969 case SYNCHRONIZE_CACHE:
970 case SYNCHRONIZE_CACHE_16:
971 case LOCATE_16:
972 case LOCK_UNLOCK_CACHE:
973 case SET_CD_SPEED:
974 case SET_LIMITS:
975 case WRITE_LONG_10:
976 case UPDATE_BLOCK:
977 case RESERVE_TRACK:
978 case SET_READ_AHEAD:
979 case PRE_FETCH:
980 case PRE_FETCH_16:
981 case ALLOW_OVERWRITE:
982 cmd->xfer = 0;
983 break;
984 case VERIFY_10:
985 case VERIFY_12:
986 case VERIFY_16:
987 if ((buf[1] & 2) == 0) {
988 cmd->xfer = 0;
989 } else if ((buf[1] & 4) != 0) {
990 cmd->xfer = 1;
992 cmd->xfer *= dev->blocksize;
993 break;
994 case MODE_SENSE:
995 break;
996 case WRITE_SAME_10:
997 case WRITE_SAME_16:
998 cmd->xfer = dev->blocksize;
999 break;
1000 case READ_CAPACITY_10:
1001 cmd->xfer = 8;
1002 break;
1003 case READ_BLOCK_LIMITS:
1004 cmd->xfer = 6;
1005 break;
1006 case SEND_VOLUME_TAG:
1007 /* GPCMD_SET_STREAMING from multimedia commands. */
1008 if (dev->type == TYPE_ROM) {
1009 cmd->xfer = buf[10] | (buf[9] << 8);
1010 } else {
1011 cmd->xfer = buf[9] | (buf[8] << 8);
1013 break;
1014 case WRITE_6:
1015 /* length 0 means 256 blocks */
1016 if (cmd->xfer == 0) {
1017 cmd->xfer = 256;
1019 /* fall through */
1020 case WRITE_10:
1021 case WRITE_VERIFY_10:
1022 case WRITE_12:
1023 case WRITE_VERIFY_12:
1024 case WRITE_16:
1025 case WRITE_VERIFY_16:
1026 cmd->xfer *= dev->blocksize;
1027 break;
1028 case READ_6:
1029 case READ_REVERSE:
1030 /* length 0 means 256 blocks */
1031 if (cmd->xfer == 0) {
1032 cmd->xfer = 256;
1034 /* fall through */
1035 case READ_10:
1036 case READ_12:
1037 case READ_16:
1038 cmd->xfer *= dev->blocksize;
1039 break;
1040 case FORMAT_UNIT:
1041 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1042 * for block devices are restricted to the header right now. */
1043 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1044 cmd->xfer = 12;
1045 } else {
1046 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1048 break;
1049 case INQUIRY:
1050 case RECEIVE_DIAGNOSTIC:
1051 case SEND_DIAGNOSTIC:
1052 cmd->xfer = buf[4] | (buf[3] << 8);
1053 break;
1054 case READ_CD:
1055 case READ_BUFFER:
1056 case WRITE_BUFFER:
1057 case SEND_CUE_SHEET:
1058 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1059 break;
1060 case PERSISTENT_RESERVE_OUT:
1061 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1062 break;
1063 case ERASE_12:
1064 if (dev->type == TYPE_ROM) {
1065 /* MMC command GET PERFORMANCE. */
1066 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1067 buf[10], buf[1] & 0x1f);
1069 break;
1070 case MECHANISM_STATUS:
1071 case READ_DVD_STRUCTURE:
1072 case SEND_DVD_STRUCTURE:
1073 case MAINTENANCE_OUT:
1074 case MAINTENANCE_IN:
1075 if (dev->type == TYPE_ROM) {
1076 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1077 cmd->xfer = buf[9] | (buf[8] << 8);
1079 break;
1080 case ATA_PASSTHROUGH_12:
1081 if (dev->type == TYPE_ROM) {
1082 /* BLANK command of MMC */
1083 cmd->xfer = 0;
1084 } else {
1085 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1087 break;
1088 case ATA_PASSTHROUGH_16:
1089 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1090 break;
1092 return 0;
1095 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1097 switch (buf[0]) {
1098 /* stream commands */
1099 case ERASE_12:
1100 case ERASE_16:
1101 cmd->xfer = 0;
1102 break;
1103 case READ_6:
1104 case READ_REVERSE:
1105 case RECOVER_BUFFERED_DATA:
1106 case WRITE_6:
1107 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1108 if (buf[1] & 0x01) { /* fixed */
1109 cmd->xfer *= dev->blocksize;
1111 break;
1112 case READ_16:
1113 case READ_REVERSE_16:
1114 case VERIFY_16:
1115 case WRITE_16:
1116 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1117 if (buf[1] & 0x01) { /* fixed */
1118 cmd->xfer *= dev->blocksize;
1120 break;
1121 case REWIND:
1122 case LOAD_UNLOAD:
1123 cmd->xfer = 0;
1124 break;
1125 case SPACE_16:
1126 cmd->xfer = buf[13] | (buf[12] << 8);
1127 break;
1128 case READ_POSITION:
1129 switch (buf[1] & 0x1f) /* operation code */ {
1130 case SHORT_FORM_BLOCK_ID:
1131 case SHORT_FORM_VENDOR_SPECIFIC:
1132 cmd->xfer = 20;
1133 break;
1134 case LONG_FORM:
1135 cmd->xfer = 32;
1136 break;
1137 case EXTENDED_FORM:
1138 cmd->xfer = buf[8] | (buf[7] << 8);
1139 break;
1140 default:
1141 return -1;
1144 break;
1145 case FORMAT_UNIT:
1146 cmd->xfer = buf[4] | (buf[3] << 8);
1147 break;
1148 /* generic commands */
1149 default:
1150 return scsi_req_xfer(cmd, dev, buf);
1152 return 0;
1155 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1157 switch (buf[0]) {
1158 /* medium changer commands */
1159 case EXCHANGE_MEDIUM:
1160 case INITIALIZE_ELEMENT_STATUS:
1161 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1162 case MOVE_MEDIUM:
1163 case POSITION_TO_ELEMENT:
1164 cmd->xfer = 0;
1165 break;
1166 case READ_ELEMENT_STATUS:
1167 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1168 break;
1170 /* generic commands */
1171 default:
1172 return scsi_req_xfer(cmd, dev, buf);
1174 return 0;
1177 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1179 switch (buf[0]) {
1180 /* Scanner commands */
1181 case OBJECT_POSITION:
1182 cmd->xfer = 0;
1183 break;
1184 case SCAN:
1185 cmd->xfer = buf[4];
1186 break;
1187 case READ_10:
1188 case SEND:
1189 case GET_WINDOW:
1190 case SET_WINDOW:
1191 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1192 break;
1193 default:
1194 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1195 return scsi_req_xfer(cmd, dev, buf);
1198 return 0;
1201 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1203 if (!cmd->xfer) {
1204 cmd->mode = SCSI_XFER_NONE;
1205 return;
1207 switch (cmd->buf[0]) {
1208 case WRITE_6:
1209 case WRITE_10:
1210 case WRITE_VERIFY_10:
1211 case WRITE_12:
1212 case WRITE_VERIFY_12:
1213 case WRITE_16:
1214 case WRITE_VERIFY_16:
1215 case VERIFY_10:
1216 case VERIFY_12:
1217 case VERIFY_16:
1218 case COPY:
1219 case COPY_VERIFY:
1220 case COMPARE:
1221 case CHANGE_DEFINITION:
1222 case LOG_SELECT:
1223 case MODE_SELECT:
1224 case MODE_SELECT_10:
1225 case SEND_DIAGNOSTIC:
1226 case WRITE_BUFFER:
1227 case FORMAT_UNIT:
1228 case REASSIGN_BLOCKS:
1229 case SEARCH_EQUAL:
1230 case SEARCH_HIGH:
1231 case SEARCH_LOW:
1232 case UPDATE_BLOCK:
1233 case WRITE_LONG_10:
1234 case WRITE_SAME_10:
1235 case WRITE_SAME_16:
1236 case UNMAP:
1237 case SEARCH_HIGH_12:
1238 case SEARCH_EQUAL_12:
1239 case SEARCH_LOW_12:
1240 case MEDIUM_SCAN:
1241 case SEND_VOLUME_TAG:
1242 case SEND_CUE_SHEET:
1243 case SEND_DVD_STRUCTURE:
1244 case PERSISTENT_RESERVE_OUT:
1245 case MAINTENANCE_OUT:
1246 case SET_WINDOW:
1247 case SCAN:
1248 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1249 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1251 cmd->mode = SCSI_XFER_TO_DEV;
1252 break;
1253 case ATA_PASSTHROUGH_12:
1254 case ATA_PASSTHROUGH_16:
1255 /* T_DIR */
1256 cmd->mode = (cmd->buf[2] & 0x8) ?
1257 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1258 break;
1259 default:
1260 cmd->mode = SCSI_XFER_FROM_DEV;
1261 break;
1265 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf)
1267 int rc;
1268 int len;
1270 cmd->lba = -1;
1271 len = scsi_cdb_length(buf);
1272 if (len < 0) {
1273 return -1;
1276 cmd->len = len;
1277 switch (dev->type) {
1278 case TYPE_TAPE:
1279 rc = scsi_req_stream_xfer(cmd, dev, buf);
1280 break;
1281 case TYPE_MEDIUM_CHANGER:
1282 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1283 break;
1284 case TYPE_SCANNER:
1285 rc = scsi_req_scanner_length(cmd, dev, buf);
1286 break;
1287 default:
1288 rc = scsi_req_xfer(cmd, dev, buf);
1289 break;
1292 if (rc != 0)
1293 return rc;
1295 memcpy(cmd->buf, buf, cmd->len);
1296 scsi_cmd_xfer_mode(cmd);
1297 cmd->lba = scsi_cmd_lba(cmd);
1298 return 0;
1301 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1303 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1305 scsi_device_set_ua(dev, sense);
1306 if (bus->info->change) {
1307 bus->info->change(bus, dev, sense);
1311 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1313 assert(req->refcount > 0);
1314 req->refcount++;
1315 return req;
1318 void scsi_req_unref(SCSIRequest *req)
1320 assert(req->refcount > 0);
1321 if (--req->refcount == 0) {
1322 BusState *qbus = req->dev->qdev.parent_bus;
1323 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1325 if (bus->info->free_request && req->hba_private) {
1326 bus->info->free_request(bus, req->hba_private);
1328 if (req->ops->free_req) {
1329 req->ops->free_req(req);
1331 object_unref(OBJECT(req->dev));
1332 object_unref(OBJECT(qbus->parent));
1333 g_free(req);
1337 /* Tell the device that we finished processing this chunk of I/O. It
1338 will start the next chunk or complete the command. */
1339 void scsi_req_continue(SCSIRequest *req)
1341 if (req->io_canceled) {
1342 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1343 return;
1345 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1346 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1347 req->ops->write_data(req);
1348 } else {
1349 req->ops->read_data(req);
1353 /* Called by the devices when data is ready for the HBA. The HBA should
1354 start a DMA operation to read or fill the device's data buffer.
1355 Once it completes, calling scsi_req_continue will restart I/O. */
1356 void scsi_req_data(SCSIRequest *req, int len)
1358 uint8_t *buf;
1359 if (req->io_canceled) {
1360 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1361 return;
1363 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1364 assert(req->cmd.mode != SCSI_XFER_NONE);
1365 if (!req->sg) {
1366 req->resid -= len;
1367 req->bus->info->transfer_data(req, len);
1368 return;
1371 /* If the device calls scsi_req_data and the HBA specified a
1372 * scatter/gather list, the transfer has to happen in a single
1373 * step. */
1374 assert(!req->dma_started);
1375 req->dma_started = true;
1377 buf = scsi_req_get_buf(req);
1378 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1379 req->resid = dma_buf_read(buf, len, req->sg);
1380 } else {
1381 req->resid = dma_buf_write(buf, len, req->sg);
1383 scsi_req_continue(req);
1386 void scsi_req_print(SCSIRequest *req)
1388 FILE *fp = stderr;
1389 int i;
1391 fprintf(fp, "[%s id=%d] %s",
1392 req->dev->qdev.parent_bus->name,
1393 req->dev->id,
1394 scsi_command_name(req->cmd.buf[0]));
1395 for (i = 1; i < req->cmd.len; i++) {
1396 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1398 switch (req->cmd.mode) {
1399 case SCSI_XFER_NONE:
1400 fprintf(fp, " - none\n");
1401 break;
1402 case SCSI_XFER_FROM_DEV:
1403 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1404 break;
1405 case SCSI_XFER_TO_DEV:
1406 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1407 break;
1408 default:
1409 fprintf(fp, " - Oops\n");
1410 break;
1414 void scsi_req_complete(SCSIRequest *req, int status)
1416 assert(req->status == -1);
1417 req->status = status;
1419 assert(req->sense_len <= sizeof(req->sense));
1420 if (status == GOOD) {
1421 req->sense_len = 0;
1424 if (req->sense_len) {
1425 memcpy(req->dev->sense, req->sense, req->sense_len);
1426 req->dev->sense_len = req->sense_len;
1427 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1428 } else {
1429 req->dev->sense_len = 0;
1430 req->dev->sense_is_ua = false;
1434 * Unit attention state is now stored in the device's sense buffer
1435 * if the HBA didn't do autosense. Clear the pending unit attention
1436 * flags.
1438 scsi_clear_unit_attention(req);
1440 scsi_req_ref(req);
1441 scsi_req_dequeue(req);
1442 req->bus->info->complete(req, req->status, req->resid);
1444 /* Cancelled requests might end up being completed instead of cancelled */
1445 notifier_list_notify(&req->cancel_notifiers, req);
1446 scsi_req_unref(req);
1449 /* Called by the devices when the request is canceled. */
1450 void scsi_req_cancel_complete(SCSIRequest *req)
1452 assert(req->io_canceled);
1453 if (req->bus->info->cancel) {
1454 req->bus->info->cancel(req);
1456 notifier_list_notify(&req->cancel_notifiers, req);
1457 scsi_req_unref(req);
1460 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1461 * notifier list, the bus will be notified the requests cancellation is
1462 * completed.
1463 * */
1464 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1466 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1467 if (notifier) {
1468 notifier_list_add(&req->cancel_notifiers, notifier);
1470 if (req->io_canceled) {
1471 /* A blk_aio_cancel_async is pending; when it finishes,
1472 * scsi_req_cancel_complete will be called and will
1473 * call the notifier we just added. Just wait for that.
1475 assert(req->aiocb);
1476 return;
1478 /* Dropped in scsi_req_cancel_complete. */
1479 scsi_req_ref(req);
1480 scsi_req_dequeue(req);
1481 req->io_canceled = true;
1482 if (req->aiocb) {
1483 blk_aio_cancel_async(req->aiocb);
1484 } else {
1485 scsi_req_cancel_complete(req);
1489 void scsi_req_cancel(SCSIRequest *req)
1491 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1492 if (!req->enqueued) {
1493 return;
1495 assert(!req->io_canceled);
1496 /* Dropped in scsi_req_cancel_complete. */
1497 scsi_req_ref(req);
1498 scsi_req_dequeue(req);
1499 req->io_canceled = true;
1500 if (req->aiocb) {
1501 blk_aio_cancel(req->aiocb);
1502 } else {
1503 scsi_req_cancel_complete(req);
1507 static int scsi_ua_precedence(SCSISense sense)
1509 if (sense.key != UNIT_ATTENTION) {
1510 return INT_MAX;
1512 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1513 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1514 return 1;
1515 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1516 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1517 return 2;
1518 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1519 /* These two go with "all others". */
1521 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1522 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1523 * POWER ON OCCURRED = 1
1524 * SCSI BUS RESET OCCURRED = 2
1525 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1526 * I_T NEXUS LOSS OCCURRED = 7
1528 return sense.ascq;
1529 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1530 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1531 return 8;
1533 return (sense.asc << 8) | sense.ascq;
1536 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1538 int prec1, prec2;
1539 if (sense.key != UNIT_ATTENTION) {
1540 return;
1542 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1543 sense.asc, sense.ascq);
1546 * Override a pre-existing unit attention condition, except for a more
1547 * important reset condition.
1549 prec1 = scsi_ua_precedence(sdev->unit_attention);
1550 prec2 = scsi_ua_precedence(sense);
1551 if (prec2 < prec1) {
1552 sdev->unit_attention = sense;
1556 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1558 SCSIRequest *req;
1560 aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
1561 while (!QTAILQ_EMPTY(&sdev->requests)) {
1562 req = QTAILQ_FIRST(&sdev->requests);
1563 scsi_req_cancel_async(req, NULL);
1565 blk_drain(sdev->conf.blk);
1566 aio_context_release(blk_get_aio_context(sdev->conf.blk));
1567 scsi_device_set_ua(sdev, sense);
1570 static char *scsibus_get_dev_path(DeviceState *dev)
1572 SCSIDevice *d = SCSI_DEVICE(dev);
1573 DeviceState *hba = dev->parent_bus->parent;
1574 char *id;
1575 char *path;
1577 id = qdev_get_dev_path(hba);
1578 if (id) {
1579 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1580 } else {
1581 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1583 g_free(id);
1584 return path;
1587 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1589 SCSIDevice *d = SCSI_DEVICE(dev);
1590 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1591 qdev_fw_name(dev), d->id, d->lun);
1594 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
1596 BusChild *kid;
1597 SCSIDevice *target_dev = NULL;
1599 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, ChildrenHead, sibling) {
1600 DeviceState *qdev = kid->child;
1601 SCSIDevice *dev = SCSI_DEVICE(qdev);
1603 if (dev->channel == channel && dev->id == id) {
1604 if (dev->lun == lun) {
1605 return dev;
1607 target_dev = dev;
1610 return target_dev;
1613 /* SCSI request list. For simplicity, pv points to the whole device */
1615 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1616 VMStateField *field, QJSON *vmdesc)
1618 SCSIDevice *s = pv;
1619 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1620 SCSIRequest *req;
1622 QTAILQ_FOREACH(req, &s->requests, next) {
1623 assert(!req->io_canceled);
1624 assert(req->status == -1);
1625 assert(req->enqueued);
1627 qemu_put_sbyte(f, req->retry ? 1 : 2);
1628 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1629 qemu_put_be32s(f, &req->tag);
1630 qemu_put_be32s(f, &req->lun);
1631 if (bus->info->save_request) {
1632 bus->info->save_request(f, req);
1634 if (req->ops->save_request) {
1635 req->ops->save_request(f, req);
1638 qemu_put_sbyte(f, 0);
1640 return 0;
1643 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1644 VMStateField *field)
1646 SCSIDevice *s = pv;
1647 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1648 int8_t sbyte;
1650 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1651 uint8_t buf[SCSI_CMD_BUF_SIZE];
1652 uint32_t tag;
1653 uint32_t lun;
1654 SCSIRequest *req;
1656 qemu_get_buffer(f, buf, sizeof(buf));
1657 qemu_get_be32s(f, &tag);
1658 qemu_get_be32s(f, &lun);
1659 req = scsi_req_new(s, tag, lun, buf, NULL);
1660 req->retry = (sbyte == 1);
1661 if (bus->info->load_request) {
1662 req->hba_private = bus->info->load_request(f, req);
1664 if (req->ops->load_request) {
1665 req->ops->load_request(f, req);
1668 /* Just restart it later. */
1669 scsi_req_enqueue_internal(req);
1671 /* At this point, the request will be kept alive by the reference
1672 * added by scsi_req_enqueue_internal, so we can release our reference.
1673 * The HBA of course will add its own reference in the load_request
1674 * callback if it needs to hold on the SCSIRequest.
1676 scsi_req_unref(req);
1679 return 0;
1682 static const VMStateInfo vmstate_info_scsi_requests = {
1683 .name = "scsi-requests",
1684 .get = get_scsi_requests,
1685 .put = put_scsi_requests,
1688 static bool scsi_sense_state_needed(void *opaque)
1690 SCSIDevice *s = opaque;
1692 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1695 static const VMStateDescription vmstate_scsi_sense_state = {
1696 .name = "SCSIDevice/sense",
1697 .version_id = 1,
1698 .minimum_version_id = 1,
1699 .needed = scsi_sense_state_needed,
1700 .fields = (VMStateField[]) {
1701 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1702 SCSI_SENSE_BUF_SIZE_OLD,
1703 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1704 VMSTATE_END_OF_LIST()
1708 const VMStateDescription vmstate_scsi_device = {
1709 .name = "SCSIDevice",
1710 .version_id = 1,
1711 .minimum_version_id = 1,
1712 .fields = (VMStateField[]) {
1713 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1714 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1715 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1716 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1717 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1718 VMSTATE_UINT32(sense_len, SCSIDevice),
1720 .name = "requests",
1721 .version_id = 0,
1722 .field_exists = NULL,
1723 .size = 0, /* ouch */
1724 .info = &vmstate_info_scsi_requests,
1725 .flags = VMS_SINGLE,
1726 .offset = 0,
1728 VMSTATE_END_OF_LIST()
1730 .subsections = (const VMStateDescription*[]) {
1731 &vmstate_scsi_sense_state,
1732 NULL
1736 static void scsi_device_class_init(ObjectClass *klass, void *data)
1738 DeviceClass *k = DEVICE_CLASS(klass);
1739 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1740 k->bus_type = TYPE_SCSI_BUS;
1741 k->realize = scsi_qdev_realize;
1742 k->unrealize = scsi_qdev_unrealize;
1743 k->props = scsi_props;
1746 static void scsi_dev_instance_init(Object *obj)
1748 DeviceState *dev = DEVICE(obj);
1749 SCSIDevice *s = SCSI_DEVICE(dev);
1751 device_add_bootindex_property(obj, &s->conf.bootindex,
1752 "bootindex", NULL,
1753 &s->qdev, NULL);
1756 static const TypeInfo scsi_device_type_info = {
1757 .name = TYPE_SCSI_DEVICE,
1758 .parent = TYPE_DEVICE,
1759 .instance_size = sizeof(SCSIDevice),
1760 .abstract = true,
1761 .class_size = sizeof(SCSIDeviceClass),
1762 .class_init = scsi_device_class_init,
1763 .instance_init = scsi_dev_instance_init,
1766 static void scsi_register_types(void)
1768 type_register_static(&scsi_bus_info);
1769 type_register_static(&scsi_device_type_info);
1772 type_init(scsi_register_types)