vhost-user: simplify vhost_user_init/vhost_user_cleanup
[qemu.git] / hw / scsi / scsi-bus.c
blobc480553083f772586be19fbb6079ad95a940645b
1 #include "qemu/osdep.h"
2 #include "hw/hw.h"
3 #include "qapi/error.h"
4 #include "qemu/error-report.h"
5 #include "qemu/option.h"
6 #include "hw/scsi/scsi.h"
7 #include "scsi/constants.h"
8 #include "hw/qdev.h"
9 #include "sysemu/block-backend.h"
10 #include "sysemu/blockdev.h"
11 #include "trace.h"
12 #include "sysemu/dma.h"
13 #include "qemu/cutils.h"
15 static char *scsibus_get_dev_path(DeviceState *dev);
16 static char *scsibus_get_fw_dev_path(DeviceState *dev);
17 static void scsi_req_dequeue(SCSIRequest *req);
18 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
19 static void scsi_target_free_buf(SCSIRequest *req);
21 static Property scsi_props[] = {
22 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
23 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
24 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
25 DEFINE_PROP_END_OF_LIST(),
28 static void scsi_bus_class_init(ObjectClass *klass, void *data)
30 BusClass *k = BUS_CLASS(klass);
31 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
33 k->get_dev_path = scsibus_get_dev_path;
34 k->get_fw_dev_path = scsibus_get_fw_dev_path;
35 hc->unplug = qdev_simple_device_unplug_cb;
38 static const TypeInfo scsi_bus_info = {
39 .name = TYPE_SCSI_BUS,
40 .parent = TYPE_BUS,
41 .instance_size = sizeof(SCSIBus),
42 .class_init = scsi_bus_class_init,
43 .interfaces = (InterfaceInfo[]) {
44 { TYPE_HOTPLUG_HANDLER },
45 { }
48 static int next_scsi_bus;
50 static void scsi_device_realize(SCSIDevice *s, Error **errp)
52 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
53 if (sc->realize) {
54 sc->realize(s, errp);
58 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
59 void *hba_private)
61 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
62 int rc;
64 assert(cmd->len == 0);
65 rc = scsi_req_parse_cdb(dev, cmd, buf);
66 if (bus->info->parse_cdb) {
67 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private);
69 return rc;
72 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
73 uint8_t *buf, void *hba_private)
75 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
76 if (sc->alloc_req) {
77 return sc->alloc_req(s, tag, lun, buf, hba_private);
80 return NULL;
83 void scsi_device_unit_attention_reported(SCSIDevice *s)
85 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
86 if (sc->unit_attention_reported) {
87 sc->unit_attention_reported(s);
91 /* Create a scsi bus, and attach devices to it. */
92 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
93 const SCSIBusInfo *info, const char *bus_name)
95 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
96 bus->busnr = next_scsi_bus++;
97 bus->info = info;
98 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort);
101 static void scsi_dma_restart_bh(void *opaque)
103 SCSIDevice *s = opaque;
104 SCSIRequest *req, *next;
106 qemu_bh_delete(s->bh);
107 s->bh = NULL;
109 aio_context_acquire(blk_get_aio_context(s->conf.blk));
110 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
111 scsi_req_ref(req);
112 if (req->retry) {
113 req->retry = false;
114 switch (req->cmd.mode) {
115 case SCSI_XFER_FROM_DEV:
116 case SCSI_XFER_TO_DEV:
117 scsi_req_continue(req);
118 break;
119 case SCSI_XFER_NONE:
120 scsi_req_dequeue(req);
121 scsi_req_enqueue(req);
122 break;
125 scsi_req_unref(req);
127 aio_context_release(blk_get_aio_context(s->conf.blk));
130 void scsi_req_retry(SCSIRequest *req)
132 /* No need to save a reference, because scsi_dma_restart_bh just
133 * looks at the request list. */
134 req->retry = true;
137 static void scsi_dma_restart_cb(void *opaque, int running, RunState state)
139 SCSIDevice *s = opaque;
141 if (!running) {
142 return;
144 if (!s->bh) {
145 AioContext *ctx = blk_get_aio_context(s->conf.blk);
146 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s);
147 qemu_bh_schedule(s->bh);
151 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
153 SCSIDevice *dev = SCSI_DEVICE(qdev);
154 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
155 SCSIDevice *d;
156 Error *local_err = NULL;
158 if (dev->channel > bus->info->max_channel) {
159 error_setg(errp, "bad scsi channel id: %d", dev->channel);
160 return;
162 if (dev->id != -1 && dev->id > bus->info->max_target) {
163 error_setg(errp, "bad scsi device id: %d", dev->id);
164 return;
166 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
167 error_setg(errp, "bad scsi device lun: %d", dev->lun);
168 return;
171 if (dev->id == -1) {
172 int id = -1;
173 if (dev->lun == -1) {
174 dev->lun = 0;
176 do {
177 d = scsi_device_find(bus, dev->channel, ++id, dev->lun);
178 } while (d && d->lun == dev->lun && id < bus->info->max_target);
179 if (d && d->lun == dev->lun) {
180 error_setg(errp, "no free target");
181 return;
183 dev->id = id;
184 } else if (dev->lun == -1) {
185 int lun = -1;
186 do {
187 d = scsi_device_find(bus, dev->channel, dev->id, ++lun);
188 } while (d && d->lun == lun && lun < bus->info->max_lun);
189 if (d && d->lun == lun) {
190 error_setg(errp, "no free lun");
191 return;
193 dev->lun = lun;
194 } else {
195 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun);
196 assert(d);
197 if (d->lun == dev->lun && dev != d) {
198 error_setg(errp, "lun already used by '%s'", d->qdev.id);
199 return;
203 QTAILQ_INIT(&dev->requests);
204 scsi_device_realize(dev, &local_err);
205 if (local_err) {
206 error_propagate(errp, local_err);
207 return;
209 dev->vmsentry = qemu_add_vm_change_state_handler(scsi_dma_restart_cb,
210 dev);
213 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp)
215 SCSIDevice *dev = SCSI_DEVICE(qdev);
217 if (dev->vmsentry) {
218 qemu_del_vm_change_state_handler(dev->vmsentry);
221 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
222 blockdev_mark_auto_del(dev->conf.blk);
225 /* handle legacy '-drive if=scsi,...' cmd line args */
226 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
227 int unit, bool removable, int bootindex,
228 bool share_rw,
229 BlockdevOnError rerror,
230 BlockdevOnError werror,
231 const char *serial, Error **errp)
233 const char *driver;
234 char *name;
235 DeviceState *dev;
236 Error *err = NULL;
238 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
239 dev = qdev_create(&bus->qbus, driver);
240 name = g_strdup_printf("legacy[%d]", unit);
241 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL);
242 g_free(name);
244 qdev_prop_set_uint32(dev, "scsi-id", unit);
245 if (bootindex >= 0) {
246 object_property_set_int(OBJECT(dev), bootindex, "bootindex",
247 &error_abort);
249 if (object_property_find(OBJECT(dev), "removable", NULL)) {
250 qdev_prop_set_bit(dev, "removable", removable);
252 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
253 qdev_prop_set_string(dev, "serial", serial);
255 qdev_prop_set_drive(dev, "drive", blk, &err);
256 if (err) {
257 error_propagate(errp, err);
258 object_unparent(OBJECT(dev));
259 return NULL;
261 object_property_set_bool(OBJECT(dev), share_rw, "share-rw", &err);
262 if (err != NULL) {
263 error_propagate(errp, err);
264 object_unparent(OBJECT(dev));
265 return NULL;
268 qdev_prop_set_enum(dev, "rerror", rerror);
269 qdev_prop_set_enum(dev, "werror", werror);
271 object_property_set_bool(OBJECT(dev), true, "realized", &err);
272 if (err != NULL) {
273 error_propagate(errp, err);
274 object_unparent(OBJECT(dev));
275 return NULL;
277 return SCSI_DEVICE(dev);
280 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
282 Location loc;
283 DriveInfo *dinfo;
284 int unit;
286 loc_push_none(&loc);
287 for (unit = 0; unit <= bus->info->max_target; unit++) {
288 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
289 if (dinfo == NULL) {
290 continue;
292 qemu_opts_loc_restore(dinfo->opts);
293 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
294 unit, false, -1, false,
295 BLOCKDEV_ON_ERROR_AUTO,
296 BLOCKDEV_ON_ERROR_AUTO,
297 NULL, &error_fatal);
299 loc_pop(&loc);
302 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
304 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
305 scsi_req_complete(req, CHECK_CONDITION);
306 return 0;
309 static const struct SCSIReqOps reqops_invalid_field = {
310 .size = sizeof(SCSIRequest),
311 .send_command = scsi_invalid_field
314 /* SCSIReqOps implementation for invalid commands. */
316 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
318 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
319 scsi_req_complete(req, CHECK_CONDITION);
320 return 0;
323 static const struct SCSIReqOps reqops_invalid_opcode = {
324 .size = sizeof(SCSIRequest),
325 .send_command = scsi_invalid_command
328 /* SCSIReqOps implementation for unit attention conditions. */
330 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
332 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
333 scsi_req_build_sense(req, req->dev->unit_attention);
334 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
335 scsi_req_build_sense(req, req->bus->unit_attention);
337 scsi_req_complete(req, CHECK_CONDITION);
338 return 0;
341 static const struct SCSIReqOps reqops_unit_attention = {
342 .size = sizeof(SCSIRequest),
343 .send_command = scsi_unit_attention
346 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
347 an invalid LUN. */
349 typedef struct SCSITargetReq SCSITargetReq;
351 struct SCSITargetReq {
352 SCSIRequest req;
353 int len;
354 uint8_t *buf;
355 int buf_len;
358 static void store_lun(uint8_t *outbuf, int lun)
360 if (lun < 256) {
361 outbuf[1] = lun;
362 return;
364 outbuf[1] = (lun & 255);
365 outbuf[0] = (lun >> 8) | 0x40;
368 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
370 BusChild *kid;
371 int i, len, n;
372 int channel, id;
373 bool found_lun0;
375 if (r->req.cmd.xfer < 16) {
376 return false;
378 if (r->req.cmd.buf[2] > 2) {
379 return false;
381 channel = r->req.dev->channel;
382 id = r->req.dev->id;
383 found_lun0 = false;
384 n = 0;
385 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
386 DeviceState *qdev = kid->child;
387 SCSIDevice *dev = SCSI_DEVICE(qdev);
389 if (dev->channel == channel && dev->id == id) {
390 if (dev->lun == 0) {
391 found_lun0 = true;
393 n += 8;
396 if (!found_lun0) {
397 n += 8;
400 scsi_target_alloc_buf(&r->req, n + 8);
402 len = MIN(n + 8, r->req.cmd.xfer & ~7);
403 memset(r->buf, 0, len);
404 stl_be_p(&r->buf[0], n);
405 i = found_lun0 ? 8 : 16;
406 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
407 DeviceState *qdev = kid->child;
408 SCSIDevice *dev = SCSI_DEVICE(qdev);
410 if (dev->channel == channel && dev->id == id) {
411 store_lun(&r->buf[i], dev->lun);
412 i += 8;
415 assert(i == n + 8);
416 r->len = len;
417 return true;
420 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
422 assert(r->req.dev->lun != r->req.lun);
424 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
426 if (r->req.cmd.buf[1] & 0x2) {
427 /* Command support data - optional, not implemented */
428 return false;
431 if (r->req.cmd.buf[1] & 0x1) {
432 /* Vital product data */
433 uint8_t page_code = r->req.cmd.buf[2];
434 r->buf[r->len++] = page_code ; /* this page */
435 r->buf[r->len++] = 0x00;
437 switch (page_code) {
438 case 0x00: /* Supported page codes, mandatory */
440 int pages;
441 pages = r->len++;
442 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
443 r->buf[pages] = r->len - pages - 1; /* number of pages */
444 break;
446 default:
447 return false;
449 /* done with EVPD */
450 assert(r->len < r->buf_len);
451 r->len = MIN(r->req.cmd.xfer, r->len);
452 return true;
455 /* Standard INQUIRY data */
456 if (r->req.cmd.buf[2] != 0) {
457 return false;
460 /* PAGE CODE == 0 */
461 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
462 memset(r->buf, 0, r->len);
463 if (r->req.lun != 0) {
464 r->buf[0] = TYPE_NO_LUN;
465 } else {
466 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
467 r->buf[2] = 5; /* Version */
468 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
469 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
470 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
471 memcpy(&r->buf[8], "QEMU ", 8);
472 memcpy(&r->buf[16], "QEMU TARGET ", 16);
473 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
475 return true;
478 static size_t scsi_sense_len(SCSIRequest *req)
480 if (req->dev->type == TYPE_SCANNER)
481 return SCSI_SENSE_LEN_SCANNER;
482 else
483 return SCSI_SENSE_LEN;
486 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
488 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
489 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
491 if (req->lun != 0 &&
492 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
493 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
494 scsi_req_complete(req, CHECK_CONDITION);
495 return 0;
497 switch (buf[0]) {
498 case REPORT_LUNS:
499 if (!scsi_target_emulate_report_luns(r)) {
500 goto illegal_request;
502 break;
503 case INQUIRY:
504 if (!scsi_target_emulate_inquiry(r)) {
505 goto illegal_request;
507 break;
508 case REQUEST_SENSE:
509 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
510 if (req->lun != 0) {
511 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
513 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
514 sense, fixed_sense);
515 } else {
516 r->len = scsi_device_get_sense(r->req.dev, r->buf,
517 MIN(req->cmd.xfer, r->buf_len),
518 fixed_sense);
520 if (r->req.dev->sense_is_ua) {
521 scsi_device_unit_attention_reported(req->dev);
522 r->req.dev->sense_len = 0;
523 r->req.dev->sense_is_ua = false;
525 break;
526 case TEST_UNIT_READY:
527 break;
528 default:
529 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
530 scsi_req_complete(req, CHECK_CONDITION);
531 return 0;
532 illegal_request:
533 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
534 scsi_req_complete(req, CHECK_CONDITION);
535 return 0;
538 if (!r->len) {
539 scsi_req_complete(req, GOOD);
541 return r->len;
544 static void scsi_target_read_data(SCSIRequest *req)
546 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
547 uint32_t n;
549 n = r->len;
550 if (n > 0) {
551 r->len = 0;
552 scsi_req_data(&r->req, n);
553 } else {
554 scsi_req_complete(&r->req, GOOD);
558 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
560 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
562 return r->buf;
565 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
567 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
569 r->buf = g_malloc(len);
570 r->buf_len = len;
572 return r->buf;
575 static void scsi_target_free_buf(SCSIRequest *req)
577 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
579 g_free(r->buf);
582 static const struct SCSIReqOps reqops_target_command = {
583 .size = sizeof(SCSITargetReq),
584 .send_command = scsi_target_send_command,
585 .read_data = scsi_target_read_data,
586 .get_buf = scsi_target_get_buf,
587 .free_req = scsi_target_free_buf,
591 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
592 uint32_t tag, uint32_t lun, void *hba_private)
594 SCSIRequest *req;
595 SCSIBus *bus = scsi_bus_from_device(d);
596 BusState *qbus = BUS(bus);
597 const int memset_off = offsetof(SCSIRequest, sense)
598 + sizeof(req->sense);
600 req = g_malloc(reqops->size);
601 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
602 req->refcount = 1;
603 req->bus = bus;
604 req->dev = d;
605 req->tag = tag;
606 req->lun = lun;
607 req->hba_private = hba_private;
608 req->status = -1;
609 req->ops = reqops;
610 object_ref(OBJECT(d));
611 object_ref(OBJECT(qbus->parent));
612 notifier_list_init(&req->cancel_notifiers);
613 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
614 return req;
617 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
618 uint8_t *buf, void *hba_private)
620 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
621 const SCSIReqOps *ops;
622 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
623 SCSIRequest *req;
624 SCSICommand cmd = { .len = 0 };
625 int ret;
627 if ((d->unit_attention.key == UNIT_ATTENTION ||
628 bus->unit_attention.key == UNIT_ATTENTION) &&
629 (buf[0] != INQUIRY &&
630 buf[0] != REPORT_LUNS &&
631 buf[0] != GET_CONFIGURATION &&
632 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
635 * If we already have a pending unit attention condition,
636 * report this one before triggering another one.
638 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
639 ops = &reqops_unit_attention;
640 } else if (lun != d->lun ||
641 buf[0] == REPORT_LUNS ||
642 (buf[0] == REQUEST_SENSE && d->sense_len)) {
643 ops = &reqops_target_command;
644 } else {
645 ops = NULL;
648 if (ops != NULL || !sc->parse_cdb) {
649 ret = scsi_req_parse_cdb(d, &cmd, buf);
650 } else {
651 ret = sc->parse_cdb(d, &cmd, buf, hba_private);
654 if (ret != 0) {
655 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
656 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
657 } else {
658 assert(cmd.len != 0);
659 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
660 cmd.mode, cmd.xfer);
661 if (cmd.lba != -1) {
662 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
663 cmd.lba);
666 if (cmd.xfer > INT32_MAX) {
667 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
668 } else if (ops) {
669 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
670 } else {
671 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
675 req->cmd = cmd;
676 req->resid = req->cmd.xfer;
678 switch (buf[0]) {
679 case INQUIRY:
680 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
681 break;
682 case TEST_UNIT_READY:
683 trace_scsi_test_unit_ready(d->id, lun, tag);
684 break;
685 case REPORT_LUNS:
686 trace_scsi_report_luns(d->id, lun, tag);
687 break;
688 case REQUEST_SENSE:
689 trace_scsi_request_sense(d->id, lun, tag);
690 break;
691 default:
692 break;
695 return req;
698 uint8_t *scsi_req_get_buf(SCSIRequest *req)
700 return req->ops->get_buf(req);
703 static void scsi_clear_unit_attention(SCSIRequest *req)
705 SCSISense *ua;
706 if (req->dev->unit_attention.key != UNIT_ATTENTION &&
707 req->bus->unit_attention.key != UNIT_ATTENTION) {
708 return;
712 * If an INQUIRY command enters the enabled command state,
713 * the device server shall [not] clear any unit attention condition;
714 * See also MMC-6, paragraphs 6.5 and 6.6.2.
716 if (req->cmd.buf[0] == INQUIRY ||
717 req->cmd.buf[0] == GET_CONFIGURATION ||
718 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) {
719 return;
722 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
723 ua = &req->dev->unit_attention;
724 } else {
725 ua = &req->bus->unit_attention;
729 * If a REPORT LUNS command enters the enabled command state, [...]
730 * the device server shall clear any pending unit attention condition
731 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
733 if (req->cmd.buf[0] == REPORT_LUNS &&
734 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
735 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) {
736 return;
739 *ua = SENSE_CODE(NO_SENSE);
742 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
744 int ret;
746 assert(len >= 14);
747 if (!req->sense_len) {
748 return 0;
751 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
754 * FIXME: clearing unit attention conditions upon autosense should be done
755 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
756 * (SAM-5, 5.14).
758 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
759 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
760 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
762 if (req->dev->sense_is_ua) {
763 scsi_device_unit_attention_reported(req->dev);
764 req->dev->sense_len = 0;
765 req->dev->sense_is_ua = false;
767 return ret;
770 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
772 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
775 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
777 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
778 sense.key, sense.asc, sense.ascq);
779 req->sense_len = scsi_build_sense(req->sense, sense);
782 static void scsi_req_enqueue_internal(SCSIRequest *req)
784 assert(!req->enqueued);
785 scsi_req_ref(req);
786 if (req->bus->info->get_sg_list) {
787 req->sg = req->bus->info->get_sg_list(req);
788 } else {
789 req->sg = NULL;
791 req->enqueued = true;
792 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
795 int32_t scsi_req_enqueue(SCSIRequest *req)
797 int32_t rc;
799 assert(!req->retry);
800 scsi_req_enqueue_internal(req);
801 scsi_req_ref(req);
802 rc = req->ops->send_command(req, req->cmd.buf);
803 scsi_req_unref(req);
804 return rc;
807 static void scsi_req_dequeue(SCSIRequest *req)
809 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
810 req->retry = false;
811 if (req->enqueued) {
812 QTAILQ_REMOVE(&req->dev->requests, req, next);
813 req->enqueued = false;
814 scsi_req_unref(req);
818 static int scsi_get_performance_length(int num_desc, int type, int data_type)
820 /* MMC-6, paragraph 6.7. */
821 switch (type) {
822 case 0:
823 if ((data_type & 3) == 0) {
824 /* Each descriptor is as in Table 295 - Nominal performance. */
825 return 16 * num_desc + 8;
826 } else {
827 /* Each descriptor is as in Table 296 - Exceptions. */
828 return 6 * num_desc + 8;
830 case 1:
831 case 4:
832 case 5:
833 return 8 * num_desc + 8;
834 case 2:
835 return 2048 * num_desc + 8;
836 case 3:
837 return 16 * num_desc + 8;
838 default:
839 return 8;
843 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
845 int byte_block = (buf[2] >> 2) & 0x1;
846 int type = (buf[2] >> 4) & 0x1;
847 int xfer_unit;
849 if (byte_block) {
850 if (type) {
851 xfer_unit = dev->blocksize;
852 } else {
853 xfer_unit = 512;
855 } else {
856 xfer_unit = 1;
859 return xfer_unit;
862 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
864 int length = buf[2] & 0x3;
865 int xfer;
866 int unit = ata_passthrough_xfer_unit(dev, buf);
868 switch (length) {
869 case 0:
870 case 3: /* USB-specific. */
871 default:
872 xfer = 0;
873 break;
874 case 1:
875 xfer = buf[3];
876 break;
877 case 2:
878 xfer = buf[4];
879 break;
882 return xfer * unit;
885 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
887 int extend = buf[1] & 0x1;
888 int length = buf[2] & 0x3;
889 int xfer;
890 int unit = ata_passthrough_xfer_unit(dev, buf);
892 switch (length) {
893 case 0:
894 case 3: /* USB-specific. */
895 default:
896 xfer = 0;
897 break;
898 case 1:
899 xfer = buf[4];
900 xfer |= (extend ? buf[3] << 8 : 0);
901 break;
902 case 2:
903 xfer = buf[6];
904 xfer |= (extend ? buf[5] << 8 : 0);
905 break;
908 return xfer * unit;
911 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
913 cmd->xfer = scsi_cdb_xfer(buf);
914 switch (buf[0]) {
915 case TEST_UNIT_READY:
916 case REWIND:
917 case START_STOP:
918 case SET_CAPACITY:
919 case WRITE_FILEMARKS:
920 case WRITE_FILEMARKS_16:
921 case SPACE:
922 case RESERVE:
923 case RELEASE:
924 case ERASE:
925 case ALLOW_MEDIUM_REMOVAL:
926 case SEEK_10:
927 case SYNCHRONIZE_CACHE:
928 case SYNCHRONIZE_CACHE_16:
929 case LOCATE_16:
930 case LOCK_UNLOCK_CACHE:
931 case SET_CD_SPEED:
932 case SET_LIMITS:
933 case WRITE_LONG_10:
934 case UPDATE_BLOCK:
935 case RESERVE_TRACK:
936 case SET_READ_AHEAD:
937 case PRE_FETCH:
938 case PRE_FETCH_16:
939 case ALLOW_OVERWRITE:
940 cmd->xfer = 0;
941 break;
942 case VERIFY_10:
943 case VERIFY_12:
944 case VERIFY_16:
945 if ((buf[1] & 2) == 0) {
946 cmd->xfer = 0;
947 } else if ((buf[1] & 4) != 0) {
948 cmd->xfer = 1;
950 cmd->xfer *= dev->blocksize;
951 break;
952 case MODE_SENSE:
953 break;
954 case WRITE_SAME_10:
955 case WRITE_SAME_16:
956 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
957 break;
958 case READ_CAPACITY_10:
959 cmd->xfer = 8;
960 break;
961 case READ_BLOCK_LIMITS:
962 cmd->xfer = 6;
963 break;
964 case SEND_VOLUME_TAG:
965 /* GPCMD_SET_STREAMING from multimedia commands. */
966 if (dev->type == TYPE_ROM) {
967 cmd->xfer = buf[10] | (buf[9] << 8);
968 } else {
969 cmd->xfer = buf[9] | (buf[8] << 8);
971 break;
972 case WRITE_6:
973 /* length 0 means 256 blocks */
974 if (cmd->xfer == 0) {
975 cmd->xfer = 256;
977 /* fall through */
978 case WRITE_10:
979 case WRITE_VERIFY_10:
980 case WRITE_12:
981 case WRITE_VERIFY_12:
982 case WRITE_16:
983 case WRITE_VERIFY_16:
984 cmd->xfer *= dev->blocksize;
985 break;
986 case READ_6:
987 case READ_REVERSE:
988 /* length 0 means 256 blocks */
989 if (cmd->xfer == 0) {
990 cmd->xfer = 256;
992 /* fall through */
993 case READ_10:
994 case READ_12:
995 case READ_16:
996 cmd->xfer *= dev->blocksize;
997 break;
998 case FORMAT_UNIT:
999 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1000 * for block devices are restricted to the header right now. */
1001 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1002 cmd->xfer = 12;
1003 } else {
1004 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1006 break;
1007 case INQUIRY:
1008 case RECEIVE_DIAGNOSTIC:
1009 case SEND_DIAGNOSTIC:
1010 cmd->xfer = buf[4] | (buf[3] << 8);
1011 break;
1012 case READ_CD:
1013 case READ_BUFFER:
1014 case WRITE_BUFFER:
1015 case SEND_CUE_SHEET:
1016 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1017 break;
1018 case PERSISTENT_RESERVE_OUT:
1019 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1020 break;
1021 case ERASE_12:
1022 if (dev->type == TYPE_ROM) {
1023 /* MMC command GET PERFORMANCE. */
1024 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1025 buf[10], buf[1] & 0x1f);
1027 break;
1028 case MECHANISM_STATUS:
1029 case READ_DVD_STRUCTURE:
1030 case SEND_DVD_STRUCTURE:
1031 case MAINTENANCE_OUT:
1032 case MAINTENANCE_IN:
1033 if (dev->type == TYPE_ROM) {
1034 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1035 cmd->xfer = buf[9] | (buf[8] << 8);
1037 break;
1038 case ATA_PASSTHROUGH_12:
1039 if (dev->type == TYPE_ROM) {
1040 /* BLANK command of MMC */
1041 cmd->xfer = 0;
1042 } else {
1043 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1045 break;
1046 case ATA_PASSTHROUGH_16:
1047 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1048 break;
1050 return 0;
1053 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1055 switch (buf[0]) {
1056 /* stream commands */
1057 case ERASE_12:
1058 case ERASE_16:
1059 cmd->xfer = 0;
1060 break;
1061 case READ_6:
1062 case READ_REVERSE:
1063 case RECOVER_BUFFERED_DATA:
1064 case WRITE_6:
1065 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1066 if (buf[1] & 0x01) { /* fixed */
1067 cmd->xfer *= dev->blocksize;
1069 break;
1070 case READ_16:
1071 case READ_REVERSE_16:
1072 case VERIFY_16:
1073 case WRITE_16:
1074 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1075 if (buf[1] & 0x01) { /* fixed */
1076 cmd->xfer *= dev->blocksize;
1078 break;
1079 case REWIND:
1080 case LOAD_UNLOAD:
1081 cmd->xfer = 0;
1082 break;
1083 case SPACE_16:
1084 cmd->xfer = buf[13] | (buf[12] << 8);
1085 break;
1086 case READ_POSITION:
1087 switch (buf[1] & 0x1f) /* operation code */ {
1088 case SHORT_FORM_BLOCK_ID:
1089 case SHORT_FORM_VENDOR_SPECIFIC:
1090 cmd->xfer = 20;
1091 break;
1092 case LONG_FORM:
1093 cmd->xfer = 32;
1094 break;
1095 case EXTENDED_FORM:
1096 cmd->xfer = buf[8] | (buf[7] << 8);
1097 break;
1098 default:
1099 return -1;
1102 break;
1103 case FORMAT_UNIT:
1104 cmd->xfer = buf[4] | (buf[3] << 8);
1105 break;
1106 /* generic commands */
1107 default:
1108 return scsi_req_xfer(cmd, dev, buf);
1110 return 0;
1113 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1115 switch (buf[0]) {
1116 /* medium changer commands */
1117 case EXCHANGE_MEDIUM:
1118 case INITIALIZE_ELEMENT_STATUS:
1119 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1120 case MOVE_MEDIUM:
1121 case POSITION_TO_ELEMENT:
1122 cmd->xfer = 0;
1123 break;
1124 case READ_ELEMENT_STATUS:
1125 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1126 break;
1128 /* generic commands */
1129 default:
1130 return scsi_req_xfer(cmd, dev, buf);
1132 return 0;
1135 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1137 switch (buf[0]) {
1138 /* Scanner commands */
1139 case OBJECT_POSITION:
1140 cmd->xfer = 0;
1141 break;
1142 case SCAN:
1143 cmd->xfer = buf[4];
1144 break;
1145 case READ_10:
1146 case SEND:
1147 case GET_WINDOW:
1148 case SET_WINDOW:
1149 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1150 break;
1151 default:
1152 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1153 return scsi_req_xfer(cmd, dev, buf);
1156 return 0;
1159 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1161 if (!cmd->xfer) {
1162 cmd->mode = SCSI_XFER_NONE;
1163 return;
1165 switch (cmd->buf[0]) {
1166 case WRITE_6:
1167 case WRITE_10:
1168 case WRITE_VERIFY_10:
1169 case WRITE_12:
1170 case WRITE_VERIFY_12:
1171 case WRITE_16:
1172 case WRITE_VERIFY_16:
1173 case VERIFY_10:
1174 case VERIFY_12:
1175 case VERIFY_16:
1176 case COPY:
1177 case COPY_VERIFY:
1178 case COMPARE:
1179 case CHANGE_DEFINITION:
1180 case LOG_SELECT:
1181 case MODE_SELECT:
1182 case MODE_SELECT_10:
1183 case SEND_DIAGNOSTIC:
1184 case WRITE_BUFFER:
1185 case FORMAT_UNIT:
1186 case REASSIGN_BLOCKS:
1187 case SEARCH_EQUAL:
1188 case SEARCH_HIGH:
1189 case SEARCH_LOW:
1190 case UPDATE_BLOCK:
1191 case WRITE_LONG_10:
1192 case WRITE_SAME_10:
1193 case WRITE_SAME_16:
1194 case UNMAP:
1195 case SEARCH_HIGH_12:
1196 case SEARCH_EQUAL_12:
1197 case SEARCH_LOW_12:
1198 case MEDIUM_SCAN:
1199 case SEND_VOLUME_TAG:
1200 case SEND_CUE_SHEET:
1201 case SEND_DVD_STRUCTURE:
1202 case PERSISTENT_RESERVE_OUT:
1203 case MAINTENANCE_OUT:
1204 case SET_WINDOW:
1205 case SCAN:
1206 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1207 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1209 cmd->mode = SCSI_XFER_TO_DEV;
1210 break;
1211 case ATA_PASSTHROUGH_12:
1212 case ATA_PASSTHROUGH_16:
1213 /* T_DIR */
1214 cmd->mode = (cmd->buf[2] & 0x8) ?
1215 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1216 break;
1217 default:
1218 cmd->mode = SCSI_XFER_FROM_DEV;
1219 break;
1223 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf)
1225 int rc;
1226 int len;
1228 cmd->lba = -1;
1229 len = scsi_cdb_length(buf);
1230 if (len < 0) {
1231 return -1;
1234 cmd->len = len;
1235 switch (dev->type) {
1236 case TYPE_TAPE:
1237 rc = scsi_req_stream_xfer(cmd, dev, buf);
1238 break;
1239 case TYPE_MEDIUM_CHANGER:
1240 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1241 break;
1242 case TYPE_SCANNER:
1243 rc = scsi_req_scanner_length(cmd, dev, buf);
1244 break;
1245 default:
1246 rc = scsi_req_xfer(cmd, dev, buf);
1247 break;
1250 if (rc != 0)
1251 return rc;
1253 memcpy(cmd->buf, buf, cmd->len);
1254 scsi_cmd_xfer_mode(cmd);
1255 cmd->lba = scsi_cmd_lba(cmd);
1256 return 0;
1259 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1261 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1263 scsi_device_set_ua(dev, sense);
1264 if (bus->info->change) {
1265 bus->info->change(bus, dev, sense);
1269 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1271 assert(req->refcount > 0);
1272 req->refcount++;
1273 return req;
1276 void scsi_req_unref(SCSIRequest *req)
1278 assert(req->refcount > 0);
1279 if (--req->refcount == 0) {
1280 BusState *qbus = req->dev->qdev.parent_bus;
1281 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1283 if (bus->info->free_request && req->hba_private) {
1284 bus->info->free_request(bus, req->hba_private);
1286 if (req->ops->free_req) {
1287 req->ops->free_req(req);
1289 object_unref(OBJECT(req->dev));
1290 object_unref(OBJECT(qbus->parent));
1291 g_free(req);
1295 /* Tell the device that we finished processing this chunk of I/O. It
1296 will start the next chunk or complete the command. */
1297 void scsi_req_continue(SCSIRequest *req)
1299 if (req->io_canceled) {
1300 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1301 return;
1303 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1304 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1305 req->ops->write_data(req);
1306 } else {
1307 req->ops->read_data(req);
1311 /* Called by the devices when data is ready for the HBA. The HBA should
1312 start a DMA operation to read or fill the device's data buffer.
1313 Once it completes, calling scsi_req_continue will restart I/O. */
1314 void scsi_req_data(SCSIRequest *req, int len)
1316 uint8_t *buf;
1317 if (req->io_canceled) {
1318 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1319 return;
1321 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1322 assert(req->cmd.mode != SCSI_XFER_NONE);
1323 if (!req->sg) {
1324 req->resid -= len;
1325 req->bus->info->transfer_data(req, len);
1326 return;
1329 /* If the device calls scsi_req_data and the HBA specified a
1330 * scatter/gather list, the transfer has to happen in a single
1331 * step. */
1332 assert(!req->dma_started);
1333 req->dma_started = true;
1335 buf = scsi_req_get_buf(req);
1336 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1337 req->resid = dma_buf_read(buf, len, req->sg);
1338 } else {
1339 req->resid = dma_buf_write(buf, len, req->sg);
1341 scsi_req_continue(req);
1344 void scsi_req_print(SCSIRequest *req)
1346 FILE *fp = stderr;
1347 int i;
1349 fprintf(fp, "[%s id=%d] %s",
1350 req->dev->qdev.parent_bus->name,
1351 req->dev->id,
1352 scsi_command_name(req->cmd.buf[0]));
1353 for (i = 1; i < req->cmd.len; i++) {
1354 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1356 switch (req->cmd.mode) {
1357 case SCSI_XFER_NONE:
1358 fprintf(fp, " - none\n");
1359 break;
1360 case SCSI_XFER_FROM_DEV:
1361 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1362 break;
1363 case SCSI_XFER_TO_DEV:
1364 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1365 break;
1366 default:
1367 fprintf(fp, " - Oops\n");
1368 break;
1372 void scsi_req_complete(SCSIRequest *req, int status)
1374 assert(req->status == -1);
1375 req->status = status;
1377 assert(req->sense_len <= sizeof(req->sense));
1378 if (status == GOOD) {
1379 req->sense_len = 0;
1382 if (req->sense_len) {
1383 memcpy(req->dev->sense, req->sense, req->sense_len);
1384 req->dev->sense_len = req->sense_len;
1385 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1386 } else {
1387 req->dev->sense_len = 0;
1388 req->dev->sense_is_ua = false;
1392 * Unit attention state is now stored in the device's sense buffer
1393 * if the HBA didn't do autosense. Clear the pending unit attention
1394 * flags.
1396 scsi_clear_unit_attention(req);
1398 scsi_req_ref(req);
1399 scsi_req_dequeue(req);
1400 req->bus->info->complete(req, req->status, req->resid);
1402 /* Cancelled requests might end up being completed instead of cancelled */
1403 notifier_list_notify(&req->cancel_notifiers, req);
1404 scsi_req_unref(req);
1407 /* Called by the devices when the request is canceled. */
1408 void scsi_req_cancel_complete(SCSIRequest *req)
1410 assert(req->io_canceled);
1411 if (req->bus->info->cancel) {
1412 req->bus->info->cancel(req);
1414 notifier_list_notify(&req->cancel_notifiers, req);
1415 scsi_req_unref(req);
1418 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1419 * notifier list, the bus will be notified the requests cancellation is
1420 * completed.
1421 * */
1422 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1424 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1425 if (notifier) {
1426 notifier_list_add(&req->cancel_notifiers, notifier);
1428 if (req->io_canceled) {
1429 /* A blk_aio_cancel_async is pending; when it finishes,
1430 * scsi_req_cancel_complete will be called and will
1431 * call the notifier we just added. Just wait for that.
1433 assert(req->aiocb);
1434 return;
1436 /* Dropped in scsi_req_cancel_complete. */
1437 scsi_req_ref(req);
1438 scsi_req_dequeue(req);
1439 req->io_canceled = true;
1440 if (req->aiocb) {
1441 blk_aio_cancel_async(req->aiocb);
1442 } else {
1443 scsi_req_cancel_complete(req);
1447 void scsi_req_cancel(SCSIRequest *req)
1449 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1450 if (!req->enqueued) {
1451 return;
1453 assert(!req->io_canceled);
1454 /* Dropped in scsi_req_cancel_complete. */
1455 scsi_req_ref(req);
1456 scsi_req_dequeue(req);
1457 req->io_canceled = true;
1458 if (req->aiocb) {
1459 blk_aio_cancel(req->aiocb);
1460 } else {
1461 scsi_req_cancel_complete(req);
1465 static int scsi_ua_precedence(SCSISense sense)
1467 if (sense.key != UNIT_ATTENTION) {
1468 return INT_MAX;
1470 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1471 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1472 return 1;
1473 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1474 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1475 return 2;
1476 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1477 /* These two go with "all others". */
1479 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1480 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1481 * POWER ON OCCURRED = 1
1482 * SCSI BUS RESET OCCURRED = 2
1483 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1484 * I_T NEXUS LOSS OCCURRED = 7
1486 return sense.ascq;
1487 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1488 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1489 return 8;
1491 return (sense.asc << 8) | sense.ascq;
1494 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1496 int prec1, prec2;
1497 if (sense.key != UNIT_ATTENTION) {
1498 return;
1500 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1501 sense.asc, sense.ascq);
1504 * Override a pre-existing unit attention condition, except for a more
1505 * important reset condition.
1507 prec1 = scsi_ua_precedence(sdev->unit_attention);
1508 prec2 = scsi_ua_precedence(sense);
1509 if (prec2 < prec1) {
1510 sdev->unit_attention = sense;
1514 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1516 SCSIRequest *req;
1518 aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
1519 while (!QTAILQ_EMPTY(&sdev->requests)) {
1520 req = QTAILQ_FIRST(&sdev->requests);
1521 scsi_req_cancel_async(req, NULL);
1523 blk_drain(sdev->conf.blk);
1524 aio_context_release(blk_get_aio_context(sdev->conf.blk));
1525 scsi_device_set_ua(sdev, sense);
1528 static char *scsibus_get_dev_path(DeviceState *dev)
1530 SCSIDevice *d = SCSI_DEVICE(dev);
1531 DeviceState *hba = dev->parent_bus->parent;
1532 char *id;
1533 char *path;
1535 id = qdev_get_dev_path(hba);
1536 if (id) {
1537 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1538 } else {
1539 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1541 g_free(id);
1542 return path;
1545 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1547 SCSIDevice *d = SCSI_DEVICE(dev);
1548 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1549 qdev_fw_name(dev), d->id, d->lun);
1552 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
1554 BusChild *kid;
1555 SCSIDevice *target_dev = NULL;
1557 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) {
1558 DeviceState *qdev = kid->child;
1559 SCSIDevice *dev = SCSI_DEVICE(qdev);
1561 if (dev->channel == channel && dev->id == id) {
1562 if (dev->lun == lun) {
1563 return dev;
1565 target_dev = dev;
1568 return target_dev;
1571 /* SCSI request list. For simplicity, pv points to the whole device */
1573 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1574 const VMStateField *field, QJSON *vmdesc)
1576 SCSIDevice *s = pv;
1577 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1578 SCSIRequest *req;
1580 QTAILQ_FOREACH(req, &s->requests, next) {
1581 assert(!req->io_canceled);
1582 assert(req->status == -1);
1583 assert(req->enqueued);
1585 qemu_put_sbyte(f, req->retry ? 1 : 2);
1586 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1587 qemu_put_be32s(f, &req->tag);
1588 qemu_put_be32s(f, &req->lun);
1589 if (bus->info->save_request) {
1590 bus->info->save_request(f, req);
1592 if (req->ops->save_request) {
1593 req->ops->save_request(f, req);
1596 qemu_put_sbyte(f, 0);
1598 return 0;
1601 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1602 const VMStateField *field)
1604 SCSIDevice *s = pv;
1605 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1606 int8_t sbyte;
1608 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1609 uint8_t buf[SCSI_CMD_BUF_SIZE];
1610 uint32_t tag;
1611 uint32_t lun;
1612 SCSIRequest *req;
1614 qemu_get_buffer(f, buf, sizeof(buf));
1615 qemu_get_be32s(f, &tag);
1616 qemu_get_be32s(f, &lun);
1617 req = scsi_req_new(s, tag, lun, buf, NULL);
1618 req->retry = (sbyte == 1);
1619 if (bus->info->load_request) {
1620 req->hba_private = bus->info->load_request(f, req);
1622 if (req->ops->load_request) {
1623 req->ops->load_request(f, req);
1626 /* Just restart it later. */
1627 scsi_req_enqueue_internal(req);
1629 /* At this point, the request will be kept alive by the reference
1630 * added by scsi_req_enqueue_internal, so we can release our reference.
1631 * The HBA of course will add its own reference in the load_request
1632 * callback if it needs to hold on the SCSIRequest.
1634 scsi_req_unref(req);
1637 return 0;
1640 static const VMStateInfo vmstate_info_scsi_requests = {
1641 .name = "scsi-requests",
1642 .get = get_scsi_requests,
1643 .put = put_scsi_requests,
1646 static bool scsi_sense_state_needed(void *opaque)
1648 SCSIDevice *s = opaque;
1650 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1653 static const VMStateDescription vmstate_scsi_sense_state = {
1654 .name = "SCSIDevice/sense",
1655 .version_id = 1,
1656 .minimum_version_id = 1,
1657 .needed = scsi_sense_state_needed,
1658 .fields = (VMStateField[]) {
1659 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1660 SCSI_SENSE_BUF_SIZE_OLD,
1661 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1662 VMSTATE_END_OF_LIST()
1666 const VMStateDescription vmstate_scsi_device = {
1667 .name = "SCSIDevice",
1668 .version_id = 1,
1669 .minimum_version_id = 1,
1670 .fields = (VMStateField[]) {
1671 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1672 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1673 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1674 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1675 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1676 VMSTATE_UINT32(sense_len, SCSIDevice),
1678 .name = "requests",
1679 .version_id = 0,
1680 .field_exists = NULL,
1681 .size = 0, /* ouch */
1682 .info = &vmstate_info_scsi_requests,
1683 .flags = VMS_SINGLE,
1684 .offset = 0,
1686 VMSTATE_END_OF_LIST()
1688 .subsections = (const VMStateDescription*[]) {
1689 &vmstate_scsi_sense_state,
1690 NULL
1694 static void scsi_device_class_init(ObjectClass *klass, void *data)
1696 DeviceClass *k = DEVICE_CLASS(klass);
1697 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1698 k->bus_type = TYPE_SCSI_BUS;
1699 k->realize = scsi_qdev_realize;
1700 k->unrealize = scsi_qdev_unrealize;
1701 k->props = scsi_props;
1704 static void scsi_dev_instance_init(Object *obj)
1706 DeviceState *dev = DEVICE(obj);
1707 SCSIDevice *s = SCSI_DEVICE(dev);
1709 device_add_bootindex_property(obj, &s->conf.bootindex,
1710 "bootindex", NULL,
1711 &s->qdev, NULL);
1714 static const TypeInfo scsi_device_type_info = {
1715 .name = TYPE_SCSI_DEVICE,
1716 .parent = TYPE_DEVICE,
1717 .instance_size = sizeof(SCSIDevice),
1718 .abstract = true,
1719 .class_size = sizeof(SCSIDeviceClass),
1720 .class_init = scsi_device_class_init,
1721 .instance_init = scsi_dev_instance_init,
1724 static void scsi_register_types(void)
1726 type_register_static(&scsi_bus_info);
1727 type_register_static(&scsi_device_type_info);
1730 type_init(scsi_register_types)