1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "qemu/error-report.h"
4 #include "qemu/module.h"
5 #include "qemu/option.h"
6 #include "qemu/hw-version.h"
7 #include "hw/qdev-properties.h"
8 #include "hw/scsi/scsi.h"
9 #include "migration/qemu-file-types.h"
10 #include "migration/vmstate.h"
11 #include "scsi/constants.h"
12 #include "sysemu/block-backend.h"
13 #include "sysemu/blockdev.h"
14 #include "sysemu/sysemu.h"
15 #include "sysemu/runstate.h"
17 #include "sysemu/dma.h"
18 #include "qemu/cutils.h"
20 static char *scsibus_get_dev_path(DeviceState
*dev
);
21 static char *scsibus_get_fw_dev_path(DeviceState
*dev
);
22 static void scsi_req_dequeue(SCSIRequest
*req
);
23 static uint8_t *scsi_target_alloc_buf(SCSIRequest
*req
, size_t len
);
24 static void scsi_target_free_buf(SCSIRequest
*req
);
25 static void scsi_clear_reported_luns_changed(SCSIRequest
*req
);
27 static int next_scsi_bus
;
29 static SCSIDevice
*do_scsi_device_find(SCSIBus
*bus
,
30 int channel
, int id
, int lun
,
31 bool include_unrealized
)
34 SCSIDevice
*retval
= NULL
;
36 QTAILQ_FOREACH_RCU(kid
, &bus
->qbus
.children
, sibling
) {
37 DeviceState
*qdev
= kid
->child
;
38 SCSIDevice
*dev
= SCSI_DEVICE(qdev
);
40 if (dev
->channel
== channel
&& dev
->id
== id
) {
41 if (dev
->lun
== lun
) {
47 * If we don't find exact match (channel/bus/lun),
48 * we will return the first device which matches channel/bus
58 * This function might run on the IO thread and we might race against
59 * main thread hot-plugging the device.
60 * We assume that as soon as .realized is set to true we can let
61 * the user access the device.
64 if (retval
&& !include_unrealized
&& !qdev_is_realized(&retval
->qdev
)) {
71 SCSIDevice
*scsi_device_find(SCSIBus
*bus
, int channel
, int id
, int lun
)
73 RCU_READ_LOCK_GUARD();
74 return do_scsi_device_find(bus
, channel
, id
, lun
, false);
77 SCSIDevice
*scsi_device_get(SCSIBus
*bus
, int channel
, int id
, int lun
)
80 RCU_READ_LOCK_GUARD();
81 d
= do_scsi_device_find(bus
, channel
, id
, lun
, false);
89 * Invoke @fn() for each enqueued request in device @s. Must be called from the
90 * main loop thread while the guest is stopped. This is only suitable for
91 * vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
93 static void scsi_device_for_each_req_sync(SCSIDevice
*s
,
94 void (*fn
)(SCSIRequest
*, void *),
98 SCSIRequest
*next_req
;
100 assert(!runstate_is_running());
101 assert(qemu_in_main_thread());
103 QTAILQ_FOREACH_SAFE(req
, &s
->requests
, next
, next_req
) {
110 void (*fn
)(SCSIRequest
*, void *);
112 } SCSIDeviceForEachReqAsyncData
;
114 static void scsi_device_for_each_req_async_bh(void *opaque
)
116 g_autofree SCSIDeviceForEachReqAsyncData
*data
= opaque
;
117 SCSIDevice
*s
= data
->s
;
123 * If the AioContext changed before this BH was called then reschedule into
124 * the new AioContext before accessing ->requests. This can happen when
125 * scsi_device_for_each_req_async() is called and then the AioContext is
126 * changed before BHs are run.
128 ctx
= blk_get_aio_context(s
->conf
.blk
);
129 if (ctx
!= qemu_get_current_aio_context()) {
130 aio_bh_schedule_oneshot(ctx
, scsi_device_for_each_req_async_bh
,
131 g_steal_pointer(&data
));
135 QTAILQ_FOREACH_SAFE(req
, &s
->requests
, next
, next
) {
136 data
->fn(req
, data
->fn_opaque
);
139 /* Drop the reference taken by scsi_device_for_each_req_async() */
140 object_unref(OBJECT(s
));
144 * Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
145 * runs in the AioContext that is executing the request.
147 static void scsi_device_for_each_req_async(SCSIDevice
*s
,
148 void (*fn
)(SCSIRequest
*, void *),
151 assert(qemu_in_main_thread());
153 SCSIDeviceForEachReqAsyncData
*data
=
154 g_new(SCSIDeviceForEachReqAsyncData
, 1);
158 data
->fn_opaque
= opaque
;
161 * Hold a reference to the SCSIDevice until
162 * scsi_device_for_each_req_async_bh() finishes.
164 object_ref(OBJECT(s
));
166 aio_bh_schedule_oneshot(blk_get_aio_context(s
->conf
.blk
),
167 scsi_device_for_each_req_async_bh
,
171 static void scsi_device_realize(SCSIDevice
*s
, Error
**errp
)
173 SCSIDeviceClass
*sc
= SCSI_DEVICE_GET_CLASS(s
);
175 sc
->realize(s
, errp
);
179 static void scsi_device_unrealize(SCSIDevice
*s
)
181 SCSIDeviceClass
*sc
= SCSI_DEVICE_GET_CLASS(s
);
187 int scsi_bus_parse_cdb(SCSIDevice
*dev
, SCSICommand
*cmd
, uint8_t *buf
,
188 size_t buf_len
, void *hba_private
)
190 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, dev
->qdev
.parent_bus
);
193 assert(cmd
->len
== 0);
194 rc
= scsi_req_parse_cdb(dev
, cmd
, buf
, buf_len
);
195 if (bus
->info
->parse_cdb
) {
196 rc
= bus
->info
->parse_cdb(dev
, cmd
, buf
, buf_len
, hba_private
);
201 static SCSIRequest
*scsi_device_alloc_req(SCSIDevice
*s
, uint32_t tag
, uint32_t lun
,
202 uint8_t *buf
, void *hba_private
)
204 SCSIDeviceClass
*sc
= SCSI_DEVICE_GET_CLASS(s
);
206 return sc
->alloc_req(s
, tag
, lun
, buf
, hba_private
);
212 void scsi_device_unit_attention_reported(SCSIDevice
*s
)
214 SCSIDeviceClass
*sc
= SCSI_DEVICE_GET_CLASS(s
);
215 if (sc
->unit_attention_reported
) {
216 sc
->unit_attention_reported(s
);
220 /* Create a scsi bus, and attach devices to it. */
221 void scsi_bus_init_named(SCSIBus
*bus
, size_t bus_size
, DeviceState
*host
,
222 const SCSIBusInfo
*info
, const char *bus_name
)
224 qbus_init(bus
, bus_size
, TYPE_SCSI_BUS
, host
, bus_name
);
225 bus
->busnr
= next_scsi_bus
++;
227 qbus_set_bus_hotplug_handler(BUS(bus
));
230 void scsi_req_retry(SCSIRequest
*req
)
235 /* Called in the AioContext that is executing the request */
236 static void scsi_dma_restart_req(SCSIRequest
*req
, void *opaque
)
241 switch (req
->cmd
.mode
) {
242 case SCSI_XFER_FROM_DEV
:
243 case SCSI_XFER_TO_DEV
:
244 scsi_req_continue(req
);
247 scsi_req_dequeue(req
);
248 scsi_req_enqueue(req
);
255 static void scsi_dma_restart_cb(void *opaque
, bool running
, RunState state
)
257 SCSIDevice
*s
= opaque
;
259 assert(qemu_in_main_thread());
265 scsi_device_for_each_req_async(s
, scsi_dma_restart_req
, NULL
);
268 static bool scsi_bus_is_address_free(SCSIBus
*bus
,
269 int channel
, int target
, int lun
,
274 RCU_READ_LOCK_GUARD();
275 d
= do_scsi_device_find(bus
, channel
, target
, lun
, true);
276 if (d
&& d
->lun
== lun
) {
288 static bool scsi_bus_check_address(BusState
*qbus
, DeviceState
*qdev
, Error
**errp
)
290 SCSIDevice
*dev
= SCSI_DEVICE(qdev
);
291 SCSIBus
*bus
= SCSI_BUS(qbus
);
293 if (dev
->channel
> bus
->info
->max_channel
) {
294 error_setg(errp
, "bad scsi channel id: %d", dev
->channel
);
297 if (dev
->id
!= -1 && dev
->id
> bus
->info
->max_target
) {
298 error_setg(errp
, "bad scsi device id: %d", dev
->id
);
301 if (dev
->lun
!= -1 && dev
->lun
> bus
->info
->max_lun
) {
302 error_setg(errp
, "bad scsi device lun: %d", dev
->lun
);
306 if (dev
->id
!= -1 && dev
->lun
!= -1) {
308 if (!scsi_bus_is_address_free(bus
, dev
->channel
, dev
->id
, dev
->lun
, &d
)) {
309 error_setg(errp
, "lun already used by '%s'", d
->qdev
.id
);
317 static void scsi_qdev_realize(DeviceState
*qdev
, Error
**errp
)
319 SCSIDevice
*dev
= SCSI_DEVICE(qdev
);
320 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, dev
->qdev
.parent_bus
);
322 Error
*local_err
= NULL
;
326 if (dev
->lun
== -1) {
330 is_free
= scsi_bus_is_address_free(bus
, dev
->channel
, ++id
, dev
->lun
, NULL
);
331 } while (!is_free
&& id
< bus
->info
->max_target
);
333 error_setg(errp
, "no free target");
337 } else if (dev
->lun
== -1) {
340 is_free
= scsi_bus_is_address_free(bus
, dev
->channel
, dev
->id
, ++lun
, NULL
);
341 } while (!is_free
&& lun
< bus
->info
->max_lun
);
343 error_setg(errp
, "no free lun");
349 QTAILQ_INIT(&dev
->requests
);
350 scsi_device_realize(dev
, &local_err
);
352 error_propagate(errp
, local_err
);
355 dev
->vmsentry
= qdev_add_vm_change_state_handler(DEVICE(dev
),
356 scsi_dma_restart_cb
, dev
);
359 static void scsi_qdev_unrealize(DeviceState
*qdev
)
361 SCSIDevice
*dev
= SCSI_DEVICE(qdev
);
364 qemu_del_vm_change_state_handler(dev
->vmsentry
);
367 scsi_device_purge_requests(dev
, SENSE_CODE(NO_SENSE
));
369 scsi_device_unrealize(dev
);
371 blockdev_mark_auto_del(dev
->conf
.blk
);
374 /* handle legacy '-drive if=scsi,...' cmd line args */
375 SCSIDevice
*scsi_bus_legacy_add_drive(SCSIBus
*bus
, BlockBackend
*blk
,
376 int unit
, bool removable
, int bootindex
,
378 BlockdevOnError rerror
,
379 BlockdevOnError werror
,
380 const char *serial
, Error
**errp
)
387 if (blk_is_sg(blk
)) {
388 driver
= "scsi-generic";
390 dinfo
= blk_legacy_dinfo(blk
);
391 if (dinfo
&& dinfo
->media_cd
) {
397 dev
= qdev_new(driver
);
398 name
= g_strdup_printf("legacy[%d]", unit
);
399 object_property_add_child(OBJECT(bus
), name
, OBJECT(dev
));
402 qdev_prop_set_uint32(dev
, "scsi-id", unit
);
403 if (bootindex
>= 0) {
404 object_property_set_int(OBJECT(dev
), "bootindex", bootindex
,
407 if (object_property_find(OBJECT(dev
), "removable")) {
408 qdev_prop_set_bit(dev
, "removable", removable
);
410 if (serial
&& object_property_find(OBJECT(dev
), "serial")) {
411 qdev_prop_set_string(dev
, "serial", serial
);
413 if (!qdev_prop_set_drive_err(dev
, "drive", blk
, errp
)) {
414 object_unparent(OBJECT(dev
));
417 if (!object_property_set_bool(OBJECT(dev
), "share-rw", share_rw
, errp
)) {
418 object_unparent(OBJECT(dev
));
422 qdev_prop_set_enum(dev
, "rerror", rerror
);
423 qdev_prop_set_enum(dev
, "werror", werror
);
425 if (!qdev_realize_and_unref(dev
, &bus
->qbus
, errp
)) {
426 object_unparent(OBJECT(dev
));
429 return SCSI_DEVICE(dev
);
432 void scsi_bus_legacy_handle_cmdline(SCSIBus
*bus
)
439 for (unit
= 0; unit
<= bus
->info
->max_target
; unit
++) {
440 dinfo
= drive_get(IF_SCSI
, bus
->busnr
, unit
);
444 qemu_opts_loc_restore(dinfo
->opts
);
445 scsi_bus_legacy_add_drive(bus
, blk_by_legacy_dinfo(dinfo
),
446 unit
, false, -1, false,
447 BLOCKDEV_ON_ERROR_AUTO
,
448 BLOCKDEV_ON_ERROR_AUTO
,
454 static int32_t scsi_invalid_field(SCSIRequest
*req
, uint8_t *buf
)
456 scsi_req_build_sense(req
, SENSE_CODE(INVALID_FIELD
));
457 scsi_req_complete(req
, CHECK_CONDITION
);
461 static const struct SCSIReqOps reqops_invalid_field
= {
462 .size
= sizeof(SCSIRequest
),
463 .send_command
= scsi_invalid_field
466 /* SCSIReqOps implementation for invalid commands. */
468 static int32_t scsi_invalid_command(SCSIRequest
*req
, uint8_t *buf
)
470 scsi_req_build_sense(req
, SENSE_CODE(INVALID_OPCODE
));
471 scsi_req_complete(req
, CHECK_CONDITION
);
475 static const struct SCSIReqOps reqops_invalid_opcode
= {
476 .size
= sizeof(SCSIRequest
),
477 .send_command
= scsi_invalid_command
480 /* SCSIReqOps implementation for unit attention conditions. */
482 static void scsi_fetch_unit_attention_sense(SCSIRequest
*req
)
484 SCSISense
*ua
= NULL
;
486 if (req
->dev
->unit_attention
.key
== UNIT_ATTENTION
) {
487 ua
= &req
->dev
->unit_attention
;
488 } else if (req
->bus
->unit_attention
.key
== UNIT_ATTENTION
) {
489 ua
= &req
->bus
->unit_attention
;
493 * Fetch the unit attention sense immediately so that another
494 * scsi_req_new does not use reqops_unit_attention.
497 scsi_req_build_sense(req
, *ua
);
498 *ua
= SENSE_CODE(NO_SENSE
);
502 static int32_t scsi_unit_attention(SCSIRequest
*req
, uint8_t *buf
)
504 scsi_req_complete(req
, CHECK_CONDITION
);
508 static const struct SCSIReqOps reqops_unit_attention
= {
509 .size
= sizeof(SCSIRequest
),
510 .init_req
= scsi_fetch_unit_attention_sense
,
511 .send_command
= scsi_unit_attention
514 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
517 typedef struct SCSITargetReq SCSITargetReq
;
519 struct SCSITargetReq
{
526 static void store_lun(uint8_t *outbuf
, int lun
)
529 /* Simple logical unit addressing method*/
533 /* Flat space addressing method */
534 outbuf
[0] = 0x40 | (lun
>> 8);
535 outbuf
[1] = (lun
& 255);
539 static bool scsi_target_emulate_report_luns(SCSITargetReq
*r
)
543 uint8_t tmp
[8] = {0};
547 if (r
->req
.cmd
.xfer
< 16) {
550 if (r
->req
.cmd
.buf
[2] > 2) {
554 /* reserve space for 63 LUNs*/
555 buf
= g_byte_array_sized_new(512);
557 channel
= r
->req
.dev
->channel
;
560 /* add size (will be updated later to correct value */
561 g_byte_array_append(buf
, tmp
, 8);
565 g_byte_array_append(buf
, tmp
, 8);
568 WITH_RCU_READ_LOCK_GUARD() {
569 QTAILQ_FOREACH_RCU(kid
, &r
->req
.bus
->qbus
.children
, sibling
) {
570 DeviceState
*qdev
= kid
->child
;
571 SCSIDevice
*dev
= SCSI_DEVICE(qdev
);
573 if (dev
->channel
== channel
&& dev
->id
== id
&& dev
->lun
!= 0 &&
574 qdev_is_realized(&dev
->qdev
)) {
575 store_lun(tmp
, dev
->lun
);
576 g_byte_array_append(buf
, tmp
, 8);
583 r
->buf
= g_byte_array_free(buf
, FALSE
);
584 r
->len
= MIN(len
, r
->req
.cmd
.xfer
& ~7);
586 /* store the LUN list length */
587 stl_be_p(&r
->buf
[0], len
- 8);
590 * If a REPORT LUNS command enters the enabled command state, [...]
591 * the device server shall clear any pending unit attention condition
592 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
594 scsi_clear_reported_luns_changed(&r
->req
);
599 static bool scsi_target_emulate_inquiry(SCSITargetReq
*r
)
601 assert(r
->req
.dev
->lun
!= r
->req
.lun
);
603 scsi_target_alloc_buf(&r
->req
, SCSI_INQUIRY_LEN
);
605 if (r
->req
.cmd
.buf
[1] & 0x2) {
606 /* Command support data - optional, not implemented */
610 if (r
->req
.cmd
.buf
[1] & 0x1) {
611 /* Vital product data */
612 uint8_t page_code
= r
->req
.cmd
.buf
[2];
613 r
->buf
[r
->len
++] = page_code
; /* this page */
614 r
->buf
[r
->len
++] = 0x00;
617 case 0x00: /* Supported page codes, mandatory */
621 r
->buf
[r
->len
++] = 0x00; /* list of supported pages (this page) */
622 r
->buf
[pages
] = r
->len
- pages
- 1; /* number of pages */
629 assert(r
->len
< r
->buf_len
);
630 r
->len
= MIN(r
->req
.cmd
.xfer
, r
->len
);
634 /* Standard INQUIRY data */
635 if (r
->req
.cmd
.buf
[2] != 0) {
640 r
->len
= MIN(r
->req
.cmd
.xfer
, SCSI_INQUIRY_LEN
);
641 memset(r
->buf
, 0, r
->len
);
642 if (r
->req
.lun
!= 0) {
643 r
->buf
[0] = TYPE_NO_LUN
;
645 r
->buf
[0] = TYPE_NOT_PRESENT
| TYPE_INACTIVE
;
646 r
->buf
[2] = 5; /* Version */
647 r
->buf
[3] = 2 | 0x10; /* HiSup, response data format */
648 r
->buf
[4] = r
->len
- 5; /* Additional Length = (Len - 1) - 4 */
649 r
->buf
[7] = 0x10 | (r
->req
.bus
->info
->tcq
? 0x02 : 0); /* Sync, TCQ. */
650 memcpy(&r
->buf
[8], "QEMU ", 8);
651 memcpy(&r
->buf
[16], "QEMU TARGET ", 16);
652 pstrcpy((char *) &r
->buf
[32], 4, qemu_hw_version());
657 static size_t scsi_sense_len(SCSIRequest
*req
)
659 if (req
->dev
->type
== TYPE_SCANNER
)
660 return SCSI_SENSE_LEN_SCANNER
;
662 return SCSI_SENSE_LEN
;
665 static int32_t scsi_target_send_command(SCSIRequest
*req
, uint8_t *buf
)
667 SCSITargetReq
*r
= DO_UPCAST(SCSITargetReq
, req
, req
);
668 int fixed_sense
= (req
->cmd
.buf
[1] & 1) == 0;
671 buf
[0] != INQUIRY
&& buf
[0] != REQUEST_SENSE
) {
672 scsi_req_build_sense(req
, SENSE_CODE(LUN_NOT_SUPPORTED
));
673 scsi_req_complete(req
, CHECK_CONDITION
);
678 if (!scsi_target_emulate_report_luns(r
)) {
679 goto illegal_request
;
683 if (!scsi_target_emulate_inquiry(r
)) {
684 goto illegal_request
;
688 scsi_target_alloc_buf(&r
->req
, scsi_sense_len(req
));
690 const struct SCSISense sense
= SENSE_CODE(LUN_NOT_SUPPORTED
);
692 r
->len
= scsi_build_sense_buf(r
->buf
, req
->cmd
.xfer
,
695 r
->len
= scsi_device_get_sense(r
->req
.dev
, r
->buf
,
696 MIN(req
->cmd
.xfer
, r
->buf_len
),
699 if (r
->req
.dev
->sense_is_ua
) {
700 scsi_device_unit_attention_reported(req
->dev
);
701 r
->req
.dev
->sense_len
= 0;
702 r
->req
.dev
->sense_is_ua
= false;
705 case TEST_UNIT_READY
:
708 scsi_req_build_sense(req
, SENSE_CODE(INVALID_OPCODE
));
709 scsi_req_complete(req
, CHECK_CONDITION
);
712 scsi_req_build_sense(req
, SENSE_CODE(INVALID_FIELD
));
713 scsi_req_complete(req
, CHECK_CONDITION
);
718 scsi_req_complete(req
, GOOD
);
723 static void scsi_target_read_data(SCSIRequest
*req
)
725 SCSITargetReq
*r
= DO_UPCAST(SCSITargetReq
, req
, req
);
731 scsi_req_data(&r
->req
, n
);
733 scsi_req_complete(&r
->req
, GOOD
);
737 static uint8_t *scsi_target_get_buf(SCSIRequest
*req
)
739 SCSITargetReq
*r
= DO_UPCAST(SCSITargetReq
, req
, req
);
744 static uint8_t *scsi_target_alloc_buf(SCSIRequest
*req
, size_t len
)
746 SCSITargetReq
*r
= DO_UPCAST(SCSITargetReq
, req
, req
);
748 r
->buf
= g_malloc(len
);
754 static void scsi_target_free_buf(SCSIRequest
*req
)
756 SCSITargetReq
*r
= DO_UPCAST(SCSITargetReq
, req
, req
);
761 static const struct SCSIReqOps reqops_target_command
= {
762 .size
= sizeof(SCSITargetReq
),
763 .send_command
= scsi_target_send_command
,
764 .read_data
= scsi_target_read_data
,
765 .get_buf
= scsi_target_get_buf
,
766 .free_req
= scsi_target_free_buf
,
770 SCSIRequest
*scsi_req_alloc(const SCSIReqOps
*reqops
, SCSIDevice
*d
,
771 uint32_t tag
, uint32_t lun
, void *hba_private
)
774 SCSIBus
*bus
= scsi_bus_from_device(d
);
775 BusState
*qbus
= BUS(bus
);
776 const int memset_off
= offsetof(SCSIRequest
, sense
)
777 + sizeof(req
->sense
);
779 req
= g_malloc(reqops
->size
);
780 memset((uint8_t *)req
+ memset_off
, 0, reqops
->size
- memset_off
);
786 req
->hba_private
= hba_private
;
788 req
->host_status
= -1;
790 object_ref(OBJECT(d
));
791 object_ref(OBJECT(qbus
->parent
));
792 notifier_list_init(&req
->cancel_notifiers
);
794 if (reqops
->init_req
) {
795 reqops
->init_req(req
);
798 trace_scsi_req_alloc(req
->dev
->id
, req
->lun
, req
->tag
);
802 SCSIRequest
*scsi_req_new(SCSIDevice
*d
, uint32_t tag
, uint32_t lun
,
803 uint8_t *buf
, size_t buf_len
, void *hba_private
)
805 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, d
->qdev
.parent_bus
);
806 const SCSIReqOps
*ops
;
807 SCSIDeviceClass
*sc
= SCSI_DEVICE_GET_CLASS(d
);
809 SCSICommand cmd
= { .len
= 0 };
813 trace_scsi_req_parse_bad(d
->id
, lun
, tag
, 0);
817 if ((d
->unit_attention
.key
== UNIT_ATTENTION
||
818 bus
->unit_attention
.key
== UNIT_ATTENTION
) &&
819 (buf
[0] != INQUIRY
&&
820 buf
[0] != REPORT_LUNS
&&
821 buf
[0] != GET_CONFIGURATION
&&
822 buf
[0] != GET_EVENT_STATUS_NOTIFICATION
&&
825 * If we already have a pending unit attention condition,
826 * report this one before triggering another one.
828 !(buf
[0] == REQUEST_SENSE
&& d
->sense_is_ua
))) {
829 ops
= &reqops_unit_attention
;
830 } else if (lun
!= d
->lun
||
831 buf
[0] == REPORT_LUNS
||
832 (buf
[0] == REQUEST_SENSE
&& d
->sense_len
)) {
833 ops
= &reqops_target_command
;
838 if (ops
!= NULL
|| !sc
->parse_cdb
) {
839 ret
= scsi_req_parse_cdb(d
, &cmd
, buf
, buf_len
);
841 ret
= sc
->parse_cdb(d
, &cmd
, buf
, buf_len
, hba_private
);
845 trace_scsi_req_parse_bad(d
->id
, lun
, tag
, buf
[0]);
847 req
= scsi_req_alloc(&reqops_invalid_opcode
, d
, tag
, lun
, hba_private
);
849 assert(cmd
.len
!= 0);
850 trace_scsi_req_parsed(d
->id
, lun
, tag
, buf
[0],
853 trace_scsi_req_parsed_lba(d
->id
, lun
, tag
, buf
[0],
857 if (cmd
.xfer
> INT32_MAX
) {
858 req
= scsi_req_alloc(&reqops_invalid_field
, d
, tag
, lun
, hba_private
);
860 req
= scsi_req_alloc(ops
, d
, tag
, lun
, hba_private
);
862 req
= scsi_device_alloc_req(d
, tag
, lun
, buf
, hba_private
);
867 req
->residual
= req
->cmd
.xfer
;
871 trace_scsi_inquiry(d
->id
, lun
, tag
, cmd
.buf
[1], cmd
.buf
[2]);
873 case TEST_UNIT_READY
:
874 trace_scsi_test_unit_ready(d
->id
, lun
, tag
);
877 trace_scsi_report_luns(d
->id
, lun
, tag
);
880 trace_scsi_request_sense(d
->id
, lun
, tag
);
889 uint8_t *scsi_req_get_buf(SCSIRequest
*req
)
891 return req
->ops
->get_buf(req
);
894 static void scsi_clear_reported_luns_changed(SCSIRequest
*req
)
898 if (req
->dev
->unit_attention
.key
== UNIT_ATTENTION
) {
899 ua
= &req
->dev
->unit_attention
;
900 } else if (req
->bus
->unit_attention
.key
== UNIT_ATTENTION
) {
901 ua
= &req
->bus
->unit_attention
;
906 if (ua
->asc
== SENSE_CODE(REPORTED_LUNS_CHANGED
).asc
&&
907 ua
->ascq
== SENSE_CODE(REPORTED_LUNS_CHANGED
).ascq
) {
908 *ua
= SENSE_CODE(NO_SENSE
);
912 int scsi_req_get_sense(SCSIRequest
*req
, uint8_t *buf
, int len
)
917 if (!req
->sense_len
) {
921 ret
= scsi_convert_sense(req
->sense
, req
->sense_len
, buf
, len
, true);
924 * FIXME: clearing unit attention conditions upon autosense should be done
925 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
928 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
929 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
930 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
932 if (req
->dev
->sense_is_ua
) {
933 scsi_device_unit_attention_reported(req
->dev
);
934 req
->dev
->sense_len
= 0;
935 req
->dev
->sense_is_ua
= false;
940 int scsi_device_get_sense(SCSIDevice
*dev
, uint8_t *buf
, int len
, bool fixed
)
942 return scsi_convert_sense(dev
->sense
, dev
->sense_len
, buf
, len
, fixed
);
945 void scsi_req_build_sense(SCSIRequest
*req
, SCSISense sense
)
947 trace_scsi_req_build_sense(req
->dev
->id
, req
->lun
, req
->tag
,
948 sense
.key
, sense
.asc
, sense
.ascq
);
949 req
->sense_len
= scsi_build_sense(req
->sense
, sense
);
952 static void scsi_req_enqueue_internal(SCSIRequest
*req
)
954 assert(!req
->enqueued
);
956 if (req
->bus
->info
->get_sg_list
) {
957 req
->sg
= req
->bus
->info
->get_sg_list(req
);
961 req
->enqueued
= true;
962 QTAILQ_INSERT_TAIL(&req
->dev
->requests
, req
, next
);
965 int32_t scsi_req_enqueue(SCSIRequest
*req
)
970 scsi_req_enqueue_internal(req
);
972 rc
= req
->ops
->send_command(req
, req
->cmd
.buf
);
977 static void scsi_req_dequeue(SCSIRequest
*req
)
979 trace_scsi_req_dequeue(req
->dev
->id
, req
->lun
, req
->tag
);
982 QTAILQ_REMOVE(&req
->dev
->requests
, req
, next
);
983 req
->enqueued
= false;
988 static int scsi_get_performance_length(int num_desc
, int type
, int data_type
)
990 /* MMC-6, paragraph 6.7. */
993 if ((data_type
& 3) == 0) {
994 /* Each descriptor is as in Table 295 - Nominal performance. */
995 return 16 * num_desc
+ 8;
997 /* Each descriptor is as in Table 296 - Exceptions. */
998 return 6 * num_desc
+ 8;
1003 return 8 * num_desc
+ 8;
1005 return 2048 * num_desc
+ 8;
1007 return 16 * num_desc
+ 8;
1013 static int ata_passthrough_xfer_unit(SCSIDevice
*dev
, uint8_t *buf
)
1015 int byte_block
= (buf
[2] >> 2) & 0x1;
1016 int type
= (buf
[2] >> 4) & 0x1;
1021 xfer_unit
= dev
->blocksize
;
1032 static int ata_passthrough_12_xfer(SCSIDevice
*dev
, uint8_t *buf
)
1034 int length
= buf
[2] & 0x3;
1036 int unit
= ata_passthrough_xfer_unit(dev
, buf
);
1040 case 3: /* USB-specific. */
1055 static int ata_passthrough_16_xfer(SCSIDevice
*dev
, uint8_t *buf
)
1057 int extend
= buf
[1] & 0x1;
1058 int length
= buf
[2] & 0x3;
1060 int unit
= ata_passthrough_xfer_unit(dev
, buf
);
1064 case 3: /* USB-specific. */
1070 xfer
|= (extend
? buf
[3] << 8 : 0);
1074 xfer
|= (extend
? buf
[5] << 8 : 0);
1081 static int scsi_req_xfer(SCSICommand
*cmd
, SCSIDevice
*dev
, uint8_t *buf
)
1083 cmd
->xfer
= scsi_cdb_xfer(buf
);
1085 case TEST_UNIT_READY
:
1089 case WRITE_FILEMARKS
:
1090 case WRITE_FILEMARKS_16
:
1095 case ALLOW_MEDIUM_REMOVAL
:
1097 case SYNCHRONIZE_CACHE
:
1098 case SYNCHRONIZE_CACHE_16
:
1100 case LOCK_UNLOCK_CACHE
:
1106 case SET_READ_AHEAD
:
1109 case ALLOW_OVERWRITE
:
1115 if ((buf
[1] & 2) == 0) {
1117 } else if ((buf
[1] & 4) != 0) {
1120 cmd
->xfer
*= dev
->blocksize
;
1126 cmd
->xfer
= buf
[1] & 1 ? 0 : dev
->blocksize
;
1128 case READ_CAPACITY_10
:
1131 case READ_BLOCK_LIMITS
:
1134 case SEND_VOLUME_TAG
:
1135 /* GPCMD_SET_STREAMING from multimedia commands. */
1136 if (dev
->type
== TYPE_ROM
) {
1137 cmd
->xfer
= buf
[10] | (buf
[9] << 8);
1139 cmd
->xfer
= buf
[9] | (buf
[8] << 8);
1143 /* length 0 means 256 blocks */
1144 if (cmd
->xfer
== 0) {
1149 case WRITE_VERIFY_10
:
1151 case WRITE_VERIFY_12
:
1153 case WRITE_VERIFY_16
:
1154 cmd
->xfer
*= dev
->blocksize
;
1158 /* length 0 means 256 blocks */
1159 if (cmd
->xfer
== 0) {
1166 cmd
->xfer
*= dev
->blocksize
;
1169 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1170 * for block devices are restricted to the header right now. */
1171 if (dev
->type
== TYPE_ROM
&& (buf
[1] & 16)) {
1174 cmd
->xfer
= (buf
[1] & 16) == 0 ? 0 : (buf
[1] & 32 ? 8 : 4);
1178 case RECEIVE_DIAGNOSTIC
:
1179 case SEND_DIAGNOSTIC
:
1180 cmd
->xfer
= buf
[4] | (buf
[3] << 8);
1185 case SEND_CUE_SHEET
:
1186 cmd
->xfer
= buf
[8] | (buf
[7] << 8) | (buf
[6] << 16);
1188 case PERSISTENT_RESERVE_OUT
:
1189 cmd
->xfer
= ldl_be_p(&buf
[5]) & 0xffffffffULL
;
1192 if (dev
->type
== TYPE_ROM
) {
1193 /* MMC command GET PERFORMANCE. */
1194 cmd
->xfer
= scsi_get_performance_length(buf
[9] | (buf
[8] << 8),
1195 buf
[10], buf
[1] & 0x1f);
1198 case MECHANISM_STATUS
:
1199 case READ_DVD_STRUCTURE
:
1200 case SEND_DVD_STRUCTURE
:
1201 case MAINTENANCE_OUT
:
1202 case MAINTENANCE_IN
:
1203 if (dev
->type
== TYPE_ROM
) {
1204 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1205 cmd
->xfer
= buf
[9] | (buf
[8] << 8);
1208 case ATA_PASSTHROUGH_12
:
1209 if (dev
->type
== TYPE_ROM
) {
1210 /* BLANK command of MMC */
1213 cmd
->xfer
= ata_passthrough_12_xfer(dev
, buf
);
1216 case ATA_PASSTHROUGH_16
:
1217 cmd
->xfer
= ata_passthrough_16_xfer(dev
, buf
);
1223 static int scsi_req_stream_xfer(SCSICommand
*cmd
, SCSIDevice
*dev
, uint8_t *buf
)
1226 /* stream commands */
1233 case RECOVER_BUFFERED_DATA
:
1235 cmd
->xfer
= buf
[4] | (buf
[3] << 8) | (buf
[2] << 16);
1236 if (buf
[1] & 0x01) { /* fixed */
1237 cmd
->xfer
*= dev
->blocksize
;
1241 case READ_REVERSE_16
:
1244 cmd
->xfer
= buf
[14] | (buf
[13] << 8) | (buf
[12] << 16);
1245 if (buf
[1] & 0x01) { /* fixed */
1246 cmd
->xfer
*= dev
->blocksize
;
1254 cmd
->xfer
= buf
[13] | (buf
[12] << 8);
1257 switch (buf
[1] & 0x1f) /* operation code */ {
1258 case SHORT_FORM_BLOCK_ID
:
1259 case SHORT_FORM_VENDOR_SPECIFIC
:
1266 cmd
->xfer
= buf
[8] | (buf
[7] << 8);
1274 cmd
->xfer
= buf
[4] | (buf
[3] << 8);
1276 /* generic commands */
1278 return scsi_req_xfer(cmd
, dev
, buf
);
1283 static int scsi_req_medium_changer_xfer(SCSICommand
*cmd
, SCSIDevice
*dev
, uint8_t *buf
)
1286 /* medium changer commands */
1287 case EXCHANGE_MEDIUM
:
1288 case INITIALIZE_ELEMENT_STATUS
:
1289 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE
:
1291 case POSITION_TO_ELEMENT
:
1294 case READ_ELEMENT_STATUS
:
1295 cmd
->xfer
= buf
[9] | (buf
[8] << 8) | (buf
[7] << 16);
1298 /* generic commands */
1300 return scsi_req_xfer(cmd
, dev
, buf
);
1305 static int scsi_req_scanner_length(SCSICommand
*cmd
, SCSIDevice
*dev
, uint8_t *buf
)
1308 /* Scanner commands */
1309 case OBJECT_POSITION
:
1319 cmd
->xfer
= buf
[8] | (buf
[7] << 8) | (buf
[6] << 16);
1322 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1323 return scsi_req_xfer(cmd
, dev
, buf
);
1329 static void scsi_cmd_xfer_mode(SCSICommand
*cmd
)
1332 cmd
->mode
= SCSI_XFER_NONE
;
1335 switch (cmd
->buf
[0]) {
1338 case WRITE_VERIFY_10
:
1340 case WRITE_VERIFY_12
:
1342 case WRITE_VERIFY_16
:
1349 case CHANGE_DEFINITION
:
1352 case MODE_SELECT_10
:
1353 case SEND_DIAGNOSTIC
:
1356 case REASSIGN_BLOCKS
:
1365 case SEARCH_HIGH_12
:
1366 case SEARCH_EQUAL_12
:
1369 case SEND_VOLUME_TAG
:
1370 case SEND_CUE_SHEET
:
1371 case SEND_DVD_STRUCTURE
:
1372 case PERSISTENT_RESERVE_OUT
:
1373 case MAINTENANCE_OUT
:
1376 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1377 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1379 cmd
->mode
= SCSI_XFER_TO_DEV
;
1381 case ATA_PASSTHROUGH_12
:
1382 case ATA_PASSTHROUGH_16
:
1384 cmd
->mode
= (cmd
->buf
[2] & 0x8) ?
1385 SCSI_XFER_FROM_DEV
: SCSI_XFER_TO_DEV
;
1388 cmd
->mode
= SCSI_XFER_FROM_DEV
;
1393 int scsi_req_parse_cdb(SCSIDevice
*dev
, SCSICommand
*cmd
, uint8_t *buf
,
1400 len
= scsi_cdb_length(buf
);
1401 if (len
< 0 || len
> buf_len
) {
1406 switch (dev
->type
) {
1408 rc
= scsi_req_stream_xfer(cmd
, dev
, buf
);
1410 case TYPE_MEDIUM_CHANGER
:
1411 rc
= scsi_req_medium_changer_xfer(cmd
, dev
, buf
);
1414 rc
= scsi_req_scanner_length(cmd
, dev
, buf
);
1417 rc
= scsi_req_xfer(cmd
, dev
, buf
);
1424 memcpy(cmd
->buf
, buf
, cmd
->len
);
1425 scsi_cmd_xfer_mode(cmd
);
1426 cmd
->lba
= scsi_cmd_lba(cmd
);
1430 void scsi_device_report_change(SCSIDevice
*dev
, SCSISense sense
)
1432 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, dev
->qdev
.parent_bus
);
1434 scsi_device_set_ua(dev
, sense
);
1435 if (bus
->info
->change
) {
1436 bus
->info
->change(bus
, dev
, sense
);
1440 SCSIRequest
*scsi_req_ref(SCSIRequest
*req
)
1442 assert(req
->refcount
> 0);
1447 void scsi_req_unref(SCSIRequest
*req
)
1449 assert(req
->refcount
> 0);
1450 if (--req
->refcount
== 0) {
1451 BusState
*qbus
= req
->dev
->qdev
.parent_bus
;
1452 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, qbus
);
1454 if (bus
->info
->free_request
&& req
->hba_private
) {
1455 bus
->info
->free_request(bus
, req
->hba_private
);
1457 if (req
->ops
->free_req
) {
1458 req
->ops
->free_req(req
);
1460 object_unref(OBJECT(req
->dev
));
1461 object_unref(OBJECT(qbus
->parent
));
1466 /* Tell the device that we finished processing this chunk of I/O. It
1467 will start the next chunk or complete the command. */
1468 void scsi_req_continue(SCSIRequest
*req
)
1470 if (req
->io_canceled
) {
1471 trace_scsi_req_continue_canceled(req
->dev
->id
, req
->lun
, req
->tag
);
1474 trace_scsi_req_continue(req
->dev
->id
, req
->lun
, req
->tag
);
1475 if (req
->cmd
.mode
== SCSI_XFER_TO_DEV
) {
1476 req
->ops
->write_data(req
);
1478 req
->ops
->read_data(req
);
1482 /* Called by the devices when data is ready for the HBA. The HBA should
1483 start a DMA operation to read or fill the device's data buffer.
1484 Once it completes, calling scsi_req_continue will restart I/O. */
1485 void scsi_req_data(SCSIRequest
*req
, int len
)
1488 if (req
->io_canceled
) {
1489 trace_scsi_req_data_canceled(req
->dev
->id
, req
->lun
, req
->tag
, len
);
1492 trace_scsi_req_data(req
->dev
->id
, req
->lun
, req
->tag
, len
);
1493 assert(req
->cmd
.mode
!= SCSI_XFER_NONE
);
1495 req
->residual
-= len
;
1496 req
->bus
->info
->transfer_data(req
, len
);
1500 /* If the device calls scsi_req_data and the HBA specified a
1501 * scatter/gather list, the transfer has to happen in a single
1503 assert(!req
->dma_started
);
1504 req
->dma_started
= true;
1506 buf
= scsi_req_get_buf(req
);
1507 if (req
->cmd
.mode
== SCSI_XFER_FROM_DEV
) {
1508 dma_buf_read(buf
, len
, &req
->residual
, req
->sg
,
1509 MEMTXATTRS_UNSPECIFIED
);
1511 dma_buf_write(buf
, len
, &req
->residual
, req
->sg
,
1512 MEMTXATTRS_UNSPECIFIED
);
1514 scsi_req_continue(req
);
1517 void scsi_req_print(SCSIRequest
*req
)
1522 fprintf(fp
, "[%s id=%d] %s",
1523 req
->dev
->qdev
.parent_bus
->name
,
1525 scsi_command_name(req
->cmd
.buf
[0]));
1526 for (i
= 1; i
< req
->cmd
.len
; i
++) {
1527 fprintf(fp
, " 0x%02x", req
->cmd
.buf
[i
]);
1529 switch (req
->cmd
.mode
) {
1530 case SCSI_XFER_NONE
:
1531 fprintf(fp
, " - none\n");
1533 case SCSI_XFER_FROM_DEV
:
1534 fprintf(fp
, " - from-dev len=%zd\n", req
->cmd
.xfer
);
1536 case SCSI_XFER_TO_DEV
:
1537 fprintf(fp
, " - to-dev len=%zd\n", req
->cmd
.xfer
);
1540 fprintf(fp
, " - Oops\n");
1545 void scsi_req_complete_failed(SCSIRequest
*req
, int host_status
)
1550 assert(req
->status
== -1 && req
->host_status
== -1);
1551 assert(req
->ops
!= &reqops_unit_attention
);
1553 if (!req
->bus
->info
->fail
) {
1554 status
= scsi_sense_from_host_status(req
->host_status
, &sense
);
1555 if (status
== CHECK_CONDITION
) {
1556 scsi_req_build_sense(req
, sense
);
1558 scsi_req_complete(req
, status
);
1562 req
->host_status
= host_status
;
1564 scsi_req_dequeue(req
);
1565 req
->bus
->info
->fail(req
);
1567 /* Cancelled requests might end up being completed instead of cancelled */
1568 notifier_list_notify(&req
->cancel_notifiers
, req
);
1569 scsi_req_unref(req
);
1572 void scsi_req_complete(SCSIRequest
*req
, int status
)
1574 assert(req
->status
== -1 && req
->host_status
== -1);
1575 req
->status
= status
;
1576 req
->host_status
= SCSI_HOST_OK
;
1578 assert(req
->sense_len
<= sizeof(req
->sense
));
1579 if (status
== GOOD
) {
1583 if (req
->sense_len
) {
1584 memcpy(req
->dev
->sense
, req
->sense
, req
->sense_len
);
1585 req
->dev
->sense_len
= req
->sense_len
;
1586 req
->dev
->sense_is_ua
= (req
->ops
== &reqops_unit_attention
);
1588 req
->dev
->sense_len
= 0;
1589 req
->dev
->sense_is_ua
= false;
1593 scsi_req_dequeue(req
);
1594 req
->bus
->info
->complete(req
, req
->residual
);
1596 /* Cancelled requests might end up being completed instead of cancelled */
1597 notifier_list_notify(&req
->cancel_notifiers
, req
);
1598 scsi_req_unref(req
);
1601 /* Called by the devices when the request is canceled. */
1602 void scsi_req_cancel_complete(SCSIRequest
*req
)
1604 assert(req
->io_canceled
);
1605 if (req
->bus
->info
->cancel
) {
1606 req
->bus
->info
->cancel(req
);
1608 notifier_list_notify(&req
->cancel_notifiers
, req
);
1609 scsi_req_unref(req
);
1612 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1613 * notifier list, the bus will be notified the requests cancellation is
1616 void scsi_req_cancel_async(SCSIRequest
*req
, Notifier
*notifier
)
1618 trace_scsi_req_cancel(req
->dev
->id
, req
->lun
, req
->tag
);
1620 notifier_list_add(&req
->cancel_notifiers
, notifier
);
1622 if (req
->io_canceled
) {
1623 /* A blk_aio_cancel_async is pending; when it finishes,
1624 * scsi_req_cancel_complete will be called and will
1625 * call the notifier we just added. Just wait for that.
1630 /* Dropped in scsi_req_cancel_complete. */
1632 scsi_req_dequeue(req
);
1633 req
->io_canceled
= true;
1635 blk_aio_cancel_async(req
->aiocb
);
1637 scsi_req_cancel_complete(req
);
1641 void scsi_req_cancel(SCSIRequest
*req
)
1643 trace_scsi_req_cancel(req
->dev
->id
, req
->lun
, req
->tag
);
1644 if (!req
->enqueued
) {
1647 assert(!req
->io_canceled
);
1648 /* Dropped in scsi_req_cancel_complete. */
1650 scsi_req_dequeue(req
);
1651 req
->io_canceled
= true;
1653 blk_aio_cancel(req
->aiocb
);
1655 scsi_req_cancel_complete(req
);
1659 static int scsi_ua_precedence(SCSISense sense
)
1661 if (sense
.key
!= UNIT_ATTENTION
) {
1664 if (sense
.asc
== 0x29 && sense
.ascq
== 0x04) {
1665 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1667 } else if (sense
.asc
== 0x3F && sense
.ascq
== 0x01) {
1668 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1670 } else if (sense
.asc
== 0x29 && (sense
.ascq
== 0x05 || sense
.ascq
== 0x06)) {
1671 /* These two go with "all others". */
1673 } else if (sense
.asc
== 0x29 && sense
.ascq
<= 0x07) {
1674 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1675 * POWER ON OCCURRED = 1
1676 * SCSI BUS RESET OCCURRED = 2
1677 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1678 * I_T NEXUS LOSS OCCURRED = 7
1681 } else if (sense
.asc
== 0x2F && sense
.ascq
== 0x01) {
1682 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1685 return (sense
.asc
<< 8) | sense
.ascq
;
1688 void scsi_bus_set_ua(SCSIBus
*bus
, SCSISense sense
)
1691 if (sense
.key
!= UNIT_ATTENTION
) {
1696 * Override a pre-existing unit attention condition, except for a more
1697 * important reset condition.
1699 prec1
= scsi_ua_precedence(bus
->unit_attention
);
1700 prec2
= scsi_ua_precedence(sense
);
1701 if (prec2
< prec1
) {
1702 bus
->unit_attention
= sense
;
1706 void scsi_device_set_ua(SCSIDevice
*sdev
, SCSISense sense
)
1709 if (sense
.key
!= UNIT_ATTENTION
) {
1712 trace_scsi_device_set_ua(sdev
->id
, sdev
->lun
, sense
.key
,
1713 sense
.asc
, sense
.ascq
);
1716 * Override a pre-existing unit attention condition, except for a more
1717 * important reset condition.
1719 prec1
= scsi_ua_precedence(sdev
->unit_attention
);
1720 prec2
= scsi_ua_precedence(sense
);
1721 if (prec2
< prec1
) {
1722 sdev
->unit_attention
= sense
;
1726 static void scsi_device_purge_one_req(SCSIRequest
*req
, void *opaque
)
1728 scsi_req_cancel_async(req
, NULL
);
1731 void scsi_device_purge_requests(SCSIDevice
*sdev
, SCSISense sense
)
1733 scsi_device_for_each_req_async(sdev
, scsi_device_purge_one_req
, NULL
);
1735 aio_context_acquire(blk_get_aio_context(sdev
->conf
.blk
));
1736 blk_drain(sdev
->conf
.blk
);
1737 aio_context_release(blk_get_aio_context(sdev
->conf
.blk
));
1738 scsi_device_set_ua(sdev
, sense
);
1741 void scsi_device_drained_begin(SCSIDevice
*sdev
)
1743 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, sdev
->qdev
.parent_bus
);
1748 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1749 assert(bus
->drain_count
< INT_MAX
);
1752 * Multiple BlockBackends can be on a SCSIBus and each may begin/end
1753 * draining at any time. Keep a counter so HBAs only see begin/end once.
1755 if (bus
->drain_count
++ == 0) {
1756 trace_scsi_bus_drained_begin(bus
, sdev
);
1757 if (bus
->info
->drained_begin
) {
1758 bus
->info
->drained_begin(bus
);
1763 void scsi_device_drained_end(SCSIDevice
*sdev
)
1765 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, sdev
->qdev
.parent_bus
);
1770 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1771 assert(bus
->drain_count
> 0);
1773 if (bus
->drain_count
-- == 1) {
1774 trace_scsi_bus_drained_end(bus
, sdev
);
1775 if (bus
->info
->drained_end
) {
1776 bus
->info
->drained_end(bus
);
1781 static char *scsibus_get_dev_path(DeviceState
*dev
)
1783 SCSIDevice
*d
= SCSI_DEVICE(dev
);
1784 DeviceState
*hba
= dev
->parent_bus
->parent
;
1788 id
= qdev_get_dev_path(hba
);
1790 path
= g_strdup_printf("%s/%d:%d:%d", id
, d
->channel
, d
->id
, d
->lun
);
1792 path
= g_strdup_printf("%d:%d:%d", d
->channel
, d
->id
, d
->lun
);
1798 static char *scsibus_get_fw_dev_path(DeviceState
*dev
)
1800 SCSIDevice
*d
= SCSI_DEVICE(dev
);
1801 return g_strdup_printf("channel@%x/%s@%x,%x", d
->channel
,
1802 qdev_fw_name(dev
), d
->id
, d
->lun
);
1805 /* SCSI request list. For simplicity, pv points to the whole device */
1807 static void put_scsi_req(SCSIRequest
*req
, void *opaque
)
1809 QEMUFile
*f
= opaque
;
1811 assert(!req
->io_canceled
);
1812 assert(req
->status
== -1 && req
->host_status
== -1);
1813 assert(req
->enqueued
);
1815 qemu_put_sbyte(f
, req
->retry
? 1 : 2);
1816 qemu_put_buffer(f
, req
->cmd
.buf
, sizeof(req
->cmd
.buf
));
1817 qemu_put_be32s(f
, &req
->tag
);
1818 qemu_put_be32s(f
, &req
->lun
);
1819 if (req
->bus
->info
->save_request
) {
1820 req
->bus
->info
->save_request(f
, req
);
1822 if (req
->ops
->save_request
) {
1823 req
->ops
->save_request(f
, req
);
1827 static int put_scsi_requests(QEMUFile
*f
, void *pv
, size_t size
,
1828 const VMStateField
*field
, JSONWriter
*vmdesc
)
1832 scsi_device_for_each_req_sync(s
, put_scsi_req
, f
);
1833 qemu_put_sbyte(f
, 0);
1837 static int get_scsi_requests(QEMUFile
*f
, void *pv
, size_t size
,
1838 const VMStateField
*field
)
1841 SCSIBus
*bus
= DO_UPCAST(SCSIBus
, qbus
, s
->qdev
.parent_bus
);
1844 while ((sbyte
= qemu_get_sbyte(f
)) > 0) {
1845 uint8_t buf
[SCSI_CMD_BUF_SIZE
];
1850 qemu_get_buffer(f
, buf
, sizeof(buf
));
1851 qemu_get_be32s(f
, &tag
);
1852 qemu_get_be32s(f
, &lun
);
1854 * A too-short CDB would have been rejected by scsi_req_new, so just use
1855 * SCSI_CMD_BUF_SIZE as the CDB length.
1857 req
= scsi_req_new(s
, tag
, lun
, buf
, sizeof(buf
), NULL
);
1858 req
->retry
= (sbyte
== 1);
1859 if (bus
->info
->load_request
) {
1860 req
->hba_private
= bus
->info
->load_request(f
, req
);
1862 if (req
->ops
->load_request
) {
1863 req
->ops
->load_request(f
, req
);
1866 /* Just restart it later. */
1867 scsi_req_enqueue_internal(req
);
1869 /* At this point, the request will be kept alive by the reference
1870 * added by scsi_req_enqueue_internal, so we can release our reference.
1871 * The HBA of course will add its own reference in the load_request
1872 * callback if it needs to hold on the SCSIRequest.
1874 scsi_req_unref(req
);
1880 static const VMStateInfo vmstate_info_scsi_requests
= {
1881 .name
= "scsi-requests",
1882 .get
= get_scsi_requests
,
1883 .put
= put_scsi_requests
,
1886 static bool scsi_sense_state_needed(void *opaque
)
1888 SCSIDevice
*s
= opaque
;
1890 return s
->sense_len
> SCSI_SENSE_BUF_SIZE_OLD
;
1893 static const VMStateDescription vmstate_scsi_sense_state
= {
1894 .name
= "SCSIDevice/sense",
1896 .minimum_version_id
= 1,
1897 .needed
= scsi_sense_state_needed
,
1898 .fields
= (VMStateField
[]) {
1899 VMSTATE_UINT8_SUB_ARRAY(sense
, SCSIDevice
,
1900 SCSI_SENSE_BUF_SIZE_OLD
,
1901 SCSI_SENSE_BUF_SIZE
- SCSI_SENSE_BUF_SIZE_OLD
),
1902 VMSTATE_END_OF_LIST()
1906 const VMStateDescription vmstate_scsi_device
= {
1907 .name
= "SCSIDevice",
1909 .minimum_version_id
= 1,
1910 .fields
= (VMStateField
[]) {
1911 VMSTATE_UINT8(unit_attention
.key
, SCSIDevice
),
1912 VMSTATE_UINT8(unit_attention
.asc
, SCSIDevice
),
1913 VMSTATE_UINT8(unit_attention
.ascq
, SCSIDevice
),
1914 VMSTATE_BOOL(sense_is_ua
, SCSIDevice
),
1915 VMSTATE_UINT8_SUB_ARRAY(sense
, SCSIDevice
, 0, SCSI_SENSE_BUF_SIZE_OLD
),
1916 VMSTATE_UINT32(sense_len
, SCSIDevice
),
1920 .field_exists
= NULL
,
1921 .size
= 0, /* ouch */
1922 .info
= &vmstate_info_scsi_requests
,
1923 .flags
= VMS_SINGLE
,
1926 VMSTATE_END_OF_LIST()
1928 .subsections
= (const VMStateDescription
*[]) {
1929 &vmstate_scsi_sense_state
,
1934 static Property scsi_props
[] = {
1935 DEFINE_PROP_UINT32("channel", SCSIDevice
, channel
, 0),
1936 DEFINE_PROP_UINT32("scsi-id", SCSIDevice
, id
, -1),
1937 DEFINE_PROP_UINT32("lun", SCSIDevice
, lun
, -1),
1938 DEFINE_PROP_END_OF_LIST(),
1941 static void scsi_device_class_init(ObjectClass
*klass
, void *data
)
1943 DeviceClass
*k
= DEVICE_CLASS(klass
);
1944 set_bit(DEVICE_CATEGORY_STORAGE
, k
->categories
);
1945 k
->bus_type
= TYPE_SCSI_BUS
;
1946 k
->realize
= scsi_qdev_realize
;
1947 k
->unrealize
= scsi_qdev_unrealize
;
1948 device_class_set_props(k
, scsi_props
);
1951 static void scsi_dev_instance_init(Object
*obj
)
1953 DeviceState
*dev
= DEVICE(obj
);
1954 SCSIDevice
*s
= SCSI_DEVICE(dev
);
1956 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
1961 static const TypeInfo scsi_device_type_info
= {
1962 .name
= TYPE_SCSI_DEVICE
,
1963 .parent
= TYPE_DEVICE
,
1964 .instance_size
= sizeof(SCSIDevice
),
1966 .class_size
= sizeof(SCSIDeviceClass
),
1967 .class_init
= scsi_device_class_init
,
1968 .instance_init
= scsi_dev_instance_init
,
1971 static void scsi_bus_class_init(ObjectClass
*klass
, void *data
)
1973 BusClass
*k
= BUS_CLASS(klass
);
1974 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(klass
);
1976 k
->get_dev_path
= scsibus_get_dev_path
;
1977 k
->get_fw_dev_path
= scsibus_get_fw_dev_path
;
1978 k
->check_address
= scsi_bus_check_address
;
1979 hc
->unplug
= qdev_simple_device_unplug_cb
;
1982 static const TypeInfo scsi_bus_info
= {
1983 .name
= TYPE_SCSI_BUS
,
1985 .instance_size
= sizeof(SCSIBus
),
1986 .class_init
= scsi_bus_class_init
,
1987 .interfaces
= (InterfaceInfo
[]) {
1988 { TYPE_HOTPLUG_HANDLER
},
1993 static void scsi_register_types(void)
1995 type_register_static(&scsi_bus_info
);
1996 type_register_static(&scsi_device_type_info
);
1999 type_init(scsi_register_types
)