2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
44 #include "qom/object.h"
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
61 OBJECT_DECLARE_TYPE(SCSIDiskState
, SCSIDiskClass
, SCSI_DISK_BASE
)
63 struct SCSIDiskClass
{
64 SCSIDeviceClass parent_class
;
66 DMAIOFunc
*dma_writev
;
67 bool (*need_fua_emulation
)(SCSICommand
*cmd
);
68 void (*update_sense
)(SCSIRequest
*r
);
71 typedef struct SCSIDiskReq
{
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
75 uint32_t sector_count
;
78 bool need_fua_emulation
;
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
88 struct SCSIDiskState
{
95 uint64_t max_unmap_size
;
107 * 0x0000 - rotation rate not reported
108 * 0x0001 - non-rotating medium (SSD)
109 * 0x0002-0x0400 - reserved
110 * 0x0401-0xffe - rotations per minute
113 uint16_t rotation_rate
;
116 static void scsi_free_request(SCSIRequest
*req
)
118 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
120 qemu_vfree(r
->iov
.iov_base
);
123 /* Helper function for command completion with sense. */
124 static void scsi_check_condition(SCSIDiskReq
*r
, SCSISense sense
)
126 trace_scsi_disk_check_condition(r
->req
.tag
, sense
.key
, sense
.asc
,
128 scsi_req_build_sense(&r
->req
, sense
);
129 scsi_req_complete(&r
->req
, CHECK_CONDITION
);
132 static void scsi_init_iovec(SCSIDiskReq
*r
, size_t size
)
134 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
136 if (!r
->iov
.iov_base
) {
138 r
->iov
.iov_base
= blk_blockalign(s
->qdev
.conf
.blk
, r
->buflen
);
140 r
->iov
.iov_len
= MIN(r
->sector_count
* BDRV_SECTOR_SIZE
, r
->buflen
);
141 qemu_iovec_init_external(&r
->qiov
, &r
->iov
, 1);
144 static void scsi_disk_save_request(QEMUFile
*f
, SCSIRequest
*req
)
146 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
148 qemu_put_be64s(f
, &r
->sector
);
149 qemu_put_be32s(f
, &r
->sector_count
);
150 qemu_put_be32s(f
, &r
->buflen
);
152 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
153 qemu_put_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
154 } else if (!req
->retry
) {
155 uint32_t len
= r
->iov
.iov_len
;
156 qemu_put_be32s(f
, &len
);
157 qemu_put_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
162 static void scsi_disk_load_request(QEMUFile
*f
, SCSIRequest
*req
)
164 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
166 qemu_get_be64s(f
, &r
->sector
);
167 qemu_get_be32s(f
, &r
->sector_count
);
168 qemu_get_be32s(f
, &r
->buflen
);
170 scsi_init_iovec(r
, r
->buflen
);
171 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
172 qemu_get_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
173 } else if (!r
->req
.retry
) {
175 qemu_get_be32s(f
, &len
);
176 r
->iov
.iov_len
= len
;
177 assert(r
->iov
.iov_len
<= r
->buflen
);
178 qemu_get_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
182 qemu_iovec_init_external(&r
->qiov
, &r
->iov
, 1);
186 * scsi_handle_rw_error has two return values. False means that the error
187 * must be ignored, true means that the error has been processed and the
188 * caller should not do anything else for this request. Note that
189 * scsi_handle_rw_error always manages its reference counts, independent
190 * of the return value.
192 static bool scsi_handle_rw_error(SCSIDiskReq
*r
, int ret
, bool acct_failed
)
194 bool is_read
= (r
->req
.cmd
.mode
== SCSI_XFER_FROM_DEV
);
195 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
196 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
197 SCSISense sense
= SENSE_CODE(NO_SENSE
);
199 bool req_has_sense
= false;
200 BlockErrorAction action
;
204 status
= scsi_sense_from_errno(-ret
, &sense
);
207 /* A passthrough command has completed with nonzero status. */
209 if (status
== CHECK_CONDITION
) {
210 req_has_sense
= true;
211 error
= scsi_sense_buf_to_errno(r
->req
.sense
, sizeof(r
->req
.sense
));
218 * Check whether the error has to be handled by the guest or should
219 * rather follow the rerror=/werror= settings. Guest-handled errors
220 * are usually retried immediately, so do not post them to QMP and
221 * do not account them as failed I/O.
224 scsi_sense_buf_is_guest_recoverable(r
->req
.sense
, sizeof(r
->req
.sense
))) {
225 action
= BLOCK_ERROR_ACTION_REPORT
;
228 action
= blk_get_error_action(s
->qdev
.conf
.blk
, is_read
, error
);
229 blk_error_action(s
->qdev
.conf
.blk
, action
, is_read
, error
);
233 case BLOCK_ERROR_ACTION_REPORT
:
235 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
238 sdc
->update_sense(&r
->req
);
239 } else if (status
== CHECK_CONDITION
) {
240 scsi_req_build_sense(&r
->req
, sense
);
242 scsi_req_complete(&r
->req
, status
);
245 case BLOCK_ERROR_ACTION_IGNORE
:
248 case BLOCK_ERROR_ACTION_STOP
:
249 scsi_req_retry(&r
->req
);
253 g_assert_not_reached();
257 static bool scsi_disk_req_check_error(SCSIDiskReq
*r
, int ret
, bool acct_failed
)
259 if (r
->req
.io_canceled
) {
260 scsi_req_cancel_complete(&r
->req
);
265 return scsi_handle_rw_error(r
, ret
, acct_failed
);
271 static void scsi_aio_complete(void *opaque
, int ret
)
273 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
274 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
276 assert(r
->req
.aiocb
!= NULL
);
278 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
279 if (scsi_disk_req_check_error(r
, ret
, true)) {
283 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
284 scsi_req_complete(&r
->req
, GOOD
);
287 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
288 scsi_req_unref(&r
->req
);
291 static bool scsi_is_cmd_fua(SCSICommand
*cmd
)
293 switch (cmd
->buf
[0]) {
300 return (cmd
->buf
[1] & 8) != 0;
305 case WRITE_VERIFY_10
:
306 case WRITE_VERIFY_12
:
307 case WRITE_VERIFY_16
:
317 static void scsi_write_do_fua(SCSIDiskReq
*r
)
319 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
321 assert(r
->req
.aiocb
== NULL
);
322 assert(!r
->req
.io_canceled
);
324 if (r
->need_fua_emulation
) {
325 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
327 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
331 scsi_req_complete(&r
->req
, GOOD
);
332 scsi_req_unref(&r
->req
);
335 static void scsi_dma_complete_noio(SCSIDiskReq
*r
, int ret
)
337 assert(r
->req
.aiocb
== NULL
);
338 if (scsi_disk_req_check_error(r
, ret
, false)) {
342 r
->sector
+= r
->sector_count
;
344 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
345 scsi_write_do_fua(r
);
348 scsi_req_complete(&r
->req
, GOOD
);
352 scsi_req_unref(&r
->req
);
355 static void scsi_dma_complete(void *opaque
, int ret
)
357 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
358 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
360 assert(r
->req
.aiocb
!= NULL
);
363 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
365 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
367 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
369 scsi_dma_complete_noio(r
, ret
);
370 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
373 static void scsi_read_complete_noio(SCSIDiskReq
*r
, int ret
)
377 assert(r
->req
.aiocb
== NULL
);
378 if (scsi_disk_req_check_error(r
, ret
, false)) {
382 n
= r
->qiov
.size
/ BDRV_SECTOR_SIZE
;
384 r
->sector_count
-= n
;
385 scsi_req_data(&r
->req
, r
->qiov
.size
);
388 scsi_req_unref(&r
->req
);
391 static void scsi_read_complete(void *opaque
, int ret
)
393 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
394 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
396 assert(r
->req
.aiocb
!= NULL
);
399 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
401 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
403 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
404 trace_scsi_disk_read_complete(r
->req
.tag
, r
->qiov
.size
);
406 scsi_read_complete_noio(r
, ret
);
407 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
410 /* Actually issue a read to the block device. */
411 static void scsi_do_read(SCSIDiskReq
*r
, int ret
)
413 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
414 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
416 assert (r
->req
.aiocb
== NULL
);
417 if (scsi_disk_req_check_error(r
, ret
, false)) {
421 /* The request is used as the AIO opaque value, so add a ref. */
422 scsi_req_ref(&r
->req
);
425 dma_acct_start(s
->qdev
.conf
.blk
, &r
->acct
, r
->req
.sg
, BLOCK_ACCT_READ
);
426 r
->req
.residual
-= r
->req
.sg
->size
;
427 r
->req
.aiocb
= dma_blk_io(blk_get_aio_context(s
->qdev
.conf
.blk
),
428 r
->req
.sg
, r
->sector
<< BDRV_SECTOR_BITS
,
430 sdc
->dma_readv
, r
, scsi_dma_complete
, r
,
431 DMA_DIRECTION_FROM_DEVICE
);
433 scsi_init_iovec(r
, SCSI_DMA_BUF_SIZE
);
434 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
435 r
->qiov
.size
, BLOCK_ACCT_READ
);
436 r
->req
.aiocb
= sdc
->dma_readv(r
->sector
<< BDRV_SECTOR_BITS
, &r
->qiov
,
437 scsi_read_complete
, r
, r
);
441 scsi_req_unref(&r
->req
);
444 static void scsi_do_read_cb(void *opaque
, int ret
)
446 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
447 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
449 assert (r
->req
.aiocb
!= NULL
);
452 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
454 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
456 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
458 scsi_do_read(opaque
, ret
);
459 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
462 /* Read more data from scsi device into buffer. */
463 static void scsi_read_data(SCSIRequest
*req
)
465 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
466 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
469 trace_scsi_disk_read_data_count(r
->sector_count
);
470 if (r
->sector_count
== 0) {
471 /* This also clears the sense buffer for REQUEST SENSE. */
472 scsi_req_complete(&r
->req
, GOOD
);
476 /* No data transfer may already be in progress */
477 assert(r
->req
.aiocb
== NULL
);
479 /* The request is used as the AIO opaque value, so add a ref. */
480 scsi_req_ref(&r
->req
);
481 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
482 trace_scsi_disk_read_data_invalid();
483 scsi_read_complete_noio(r
, -EINVAL
);
487 if (!blk_is_available(req
->dev
->conf
.blk
)) {
488 scsi_read_complete_noio(r
, -ENOMEDIUM
);
494 if (first
&& r
->need_fua_emulation
) {
495 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
497 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_do_read_cb
, r
);
503 static void scsi_write_complete_noio(SCSIDiskReq
*r
, int ret
)
507 assert (r
->req
.aiocb
== NULL
);
508 if (scsi_disk_req_check_error(r
, ret
, false)) {
512 n
= r
->qiov
.size
/ BDRV_SECTOR_SIZE
;
514 r
->sector_count
-= n
;
515 if (r
->sector_count
== 0) {
516 scsi_write_do_fua(r
);
519 scsi_init_iovec(r
, SCSI_DMA_BUF_SIZE
);
520 trace_scsi_disk_write_complete_noio(r
->req
.tag
, r
->qiov
.size
);
521 scsi_req_data(&r
->req
, r
->qiov
.size
);
525 scsi_req_unref(&r
->req
);
528 static void scsi_write_complete(void * opaque
, int ret
)
530 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
531 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
533 assert (r
->req
.aiocb
!= NULL
);
536 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
538 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
540 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
542 scsi_write_complete_noio(r
, ret
);
543 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
546 static void scsi_write_data(SCSIRequest
*req
)
548 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
549 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
550 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
552 /* No data transfer may already be in progress */
553 assert(r
->req
.aiocb
== NULL
);
555 /* The request is used as the AIO opaque value, so add a ref. */
556 scsi_req_ref(&r
->req
);
557 if (r
->req
.cmd
.mode
!= SCSI_XFER_TO_DEV
) {
558 trace_scsi_disk_write_data_invalid();
559 scsi_write_complete_noio(r
, -EINVAL
);
563 if (!r
->req
.sg
&& !r
->qiov
.size
) {
564 /* Called for the first time. Ask the driver to send us more data. */
566 scsi_write_complete_noio(r
, 0);
569 if (!blk_is_available(req
->dev
->conf
.blk
)) {
570 scsi_write_complete_noio(r
, -ENOMEDIUM
);
574 if (r
->req
.cmd
.buf
[0] == VERIFY_10
|| r
->req
.cmd
.buf
[0] == VERIFY_12
||
575 r
->req
.cmd
.buf
[0] == VERIFY_16
) {
577 scsi_dma_complete_noio(r
, 0);
579 scsi_write_complete_noio(r
, 0);
585 dma_acct_start(s
->qdev
.conf
.blk
, &r
->acct
, r
->req
.sg
, BLOCK_ACCT_WRITE
);
586 r
->req
.residual
-= r
->req
.sg
->size
;
587 r
->req
.aiocb
= dma_blk_io(blk_get_aio_context(s
->qdev
.conf
.blk
),
588 r
->req
.sg
, r
->sector
<< BDRV_SECTOR_BITS
,
590 sdc
->dma_writev
, r
, scsi_dma_complete
, r
,
591 DMA_DIRECTION_TO_DEVICE
);
593 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
594 r
->qiov
.size
, BLOCK_ACCT_WRITE
);
595 r
->req
.aiocb
= sdc
->dma_writev(r
->sector
<< BDRV_SECTOR_BITS
, &r
->qiov
,
596 scsi_write_complete
, r
, r
);
600 /* Return a pointer to the data buffer. */
601 static uint8_t *scsi_get_buf(SCSIRequest
*req
)
603 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
605 return (uint8_t *)r
->iov
.iov_base
;
608 static int scsi_disk_emulate_vpd_page(SCSIRequest
*req
, uint8_t *outbuf
)
610 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
611 uint8_t page_code
= req
->cmd
.buf
[2];
612 int start
, buflen
= 0;
614 outbuf
[buflen
++] = s
->qdev
.type
& 0x1f;
615 outbuf
[buflen
++] = page_code
;
616 outbuf
[buflen
++] = 0x00;
617 outbuf
[buflen
++] = 0x00;
621 case 0x00: /* Supported page codes, mandatory */
623 trace_scsi_disk_emulate_vpd_page_00(req
->cmd
.xfer
);
624 outbuf
[buflen
++] = 0x00; /* list of supported pages (this page) */
626 outbuf
[buflen
++] = 0x80; /* unit serial number */
628 outbuf
[buflen
++] = 0x83; /* device identification */
629 if (s
->qdev
.type
== TYPE_DISK
) {
630 outbuf
[buflen
++] = 0xb0; /* block limits */
631 outbuf
[buflen
++] = 0xb1; /* block device characteristics */
632 outbuf
[buflen
++] = 0xb2; /* thin provisioning */
636 case 0x80: /* Device serial number, optional */
641 trace_scsi_disk_emulate_vpd_page_80_not_supported();
645 l
= strlen(s
->serial
);
650 trace_scsi_disk_emulate_vpd_page_80(req
->cmd
.xfer
);
651 memcpy(outbuf
+ buflen
, s
->serial
, l
);
656 case 0x83: /* Device identification page, mandatory */
658 int id_len
= s
->device_id
? MIN(strlen(s
->device_id
), 255 - 8) : 0;
660 trace_scsi_disk_emulate_vpd_page_83(req
->cmd
.xfer
);
663 outbuf
[buflen
++] = 0x2; /* ASCII */
664 outbuf
[buflen
++] = 0; /* not officially assigned */
665 outbuf
[buflen
++] = 0; /* reserved */
666 outbuf
[buflen
++] = id_len
; /* length of data following */
667 memcpy(outbuf
+ buflen
, s
->device_id
, id_len
);
672 outbuf
[buflen
++] = 0x1; /* Binary */
673 outbuf
[buflen
++] = 0x3; /* NAA */
674 outbuf
[buflen
++] = 0; /* reserved */
675 outbuf
[buflen
++] = 8;
676 stq_be_p(&outbuf
[buflen
], s
->qdev
.wwn
);
680 if (s
->qdev
.port_wwn
) {
681 outbuf
[buflen
++] = 0x61; /* SAS / Binary */
682 outbuf
[buflen
++] = 0x93; /* PIV / Target port / NAA */
683 outbuf
[buflen
++] = 0; /* reserved */
684 outbuf
[buflen
++] = 8;
685 stq_be_p(&outbuf
[buflen
], s
->qdev
.port_wwn
);
690 outbuf
[buflen
++] = 0x61; /* SAS / Binary */
692 /* PIV/Target port/relative target port */
693 outbuf
[buflen
++] = 0x94;
695 outbuf
[buflen
++] = 0; /* reserved */
696 outbuf
[buflen
++] = 4;
697 stw_be_p(&outbuf
[buflen
+ 2], s
->port_index
);
702 case 0xb0: /* block limits */
704 SCSIBlockLimits bl
= {};
706 if (s
->qdev
.type
== TYPE_ROM
) {
707 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
712 s
->qdev
.conf
.discard_granularity
/ s
->qdev
.blocksize
;
714 s
->qdev
.conf
.min_io_size
/ s
->qdev
.blocksize
;
716 s
->qdev
.conf
.opt_io_size
/ s
->qdev
.blocksize
;
717 bl
.max_unmap_sectors
=
718 s
->max_unmap_size
/ s
->qdev
.blocksize
;
720 s
->max_io_size
/ s
->qdev
.blocksize
;
721 /* 255 descriptors fit in 4 KiB with an 8-byte header */
722 bl
.max_unmap_descr
= 255;
724 if (s
->qdev
.type
== TYPE_DISK
) {
725 int max_transfer_blk
= blk_get_max_transfer(s
->qdev
.conf
.blk
);
726 int max_io_sectors_blk
=
727 max_transfer_blk
/ s
->qdev
.blocksize
;
730 MIN_NON_ZERO(max_io_sectors_blk
, bl
.max_io_sectors
);
732 buflen
+= scsi_emulate_block_limits(outbuf
+ buflen
, &bl
);
735 case 0xb1: /* block device characteristics */
738 outbuf
[4] = (s
->rotation_rate
>> 8) & 0xff;
739 outbuf
[5] = s
->rotation_rate
& 0xff;
740 outbuf
[6] = 0; /* PRODUCT TYPE */
741 outbuf
[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
742 outbuf
[8] = 0; /* VBULS */
745 case 0xb2: /* thin provisioning */
749 outbuf
[5] = 0xe0; /* unmap & write_same 10/16 all supported */
750 outbuf
[6] = s
->qdev
.conf
.discard_granularity
? 2 : 1;
758 assert(buflen
- start
<= 255);
759 outbuf
[start
- 1] = buflen
- start
;
763 static int scsi_disk_emulate_inquiry(SCSIRequest
*req
, uint8_t *outbuf
)
765 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
768 if (req
->cmd
.buf
[1] & 0x1) {
769 /* Vital product data */
770 return scsi_disk_emulate_vpd_page(req
, outbuf
);
773 /* Standard INQUIRY data */
774 if (req
->cmd
.buf
[2] != 0) {
779 buflen
= req
->cmd
.xfer
;
780 if (buflen
> SCSI_MAX_INQUIRY_LEN
) {
781 buflen
= SCSI_MAX_INQUIRY_LEN
;
784 outbuf
[0] = s
->qdev
.type
& 0x1f;
785 outbuf
[1] = (s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) ? 0x80 : 0;
787 strpadcpy((char *) &outbuf
[16], 16, s
->product
, ' ');
788 strpadcpy((char *) &outbuf
[8], 8, s
->vendor
, ' ');
790 memset(&outbuf
[32], 0, 4);
791 memcpy(&outbuf
[32], s
->version
, MIN(4, strlen(s
->version
)));
793 * We claim conformance to SPC-3, which is required for guests
794 * to ask for modern features like READ CAPACITY(16) or the
795 * block characteristics VPD page by default. Not all of SPC-3
796 * is actually implemented, but we're good enough.
798 outbuf
[2] = s
->qdev
.default_scsi_version
;
799 outbuf
[3] = 2 | 0x10; /* Format 2, HiSup */
802 outbuf
[4] = buflen
- 5; /* Additional Length = (Len - 1) - 4 */
804 /* If the allocation length of CDB is too small,
805 the additional length is not adjusted */
809 /* Sync data transfer and TCQ. */
810 outbuf
[7] = 0x10 | (req
->bus
->info
->tcq
? 0x02 : 0);
814 static inline bool media_is_dvd(SCSIDiskState
*s
)
817 if (s
->qdev
.type
!= TYPE_ROM
) {
820 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
823 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
824 return nb_sectors
> CD_MAX_SECTORS
;
827 static inline bool media_is_cd(SCSIDiskState
*s
)
830 if (s
->qdev
.type
!= TYPE_ROM
) {
833 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
836 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
837 return nb_sectors
<= CD_MAX_SECTORS
;
840 static int scsi_read_disc_information(SCSIDiskState
*s
, SCSIDiskReq
*r
,
843 uint8_t type
= r
->req
.cmd
.buf
[1] & 7;
845 if (s
->qdev
.type
!= TYPE_ROM
) {
849 /* Types 1/2 are only defined for Blu-Ray. */
851 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
855 memset(outbuf
, 0, 34);
857 outbuf
[2] = 0xe; /* last session complete, disc finalized */
858 outbuf
[3] = 1; /* first track on disc */
859 outbuf
[4] = 1; /* # of sessions */
860 outbuf
[5] = 1; /* first track of last session */
861 outbuf
[6] = 1; /* last track of last session */
862 outbuf
[7] = 0x20; /* unrestricted use */
863 outbuf
[8] = 0x00; /* CD-ROM or DVD-ROM */
864 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
865 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
866 /* 24-31: disc bar code */
867 /* 32: disc application code */
868 /* 33: number of OPC tables */
873 static int scsi_read_dvd_structure(SCSIDiskState
*s
, SCSIDiskReq
*r
,
876 static const int rds_caps_size
[5] = {
883 uint8_t media
= r
->req
.cmd
.buf
[1];
884 uint8_t layer
= r
->req
.cmd
.buf
[6];
885 uint8_t format
= r
->req
.cmd
.buf
[7];
888 if (s
->qdev
.type
!= TYPE_ROM
) {
892 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
896 if (format
!= 0xff) {
897 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
898 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
901 if (media_is_cd(s
)) {
902 scsi_check_condition(r
, SENSE_CODE(INCOMPATIBLE_FORMAT
));
905 if (format
>= ARRAY_SIZE(rds_caps_size
)) {
908 size
= rds_caps_size
[format
];
909 memset(outbuf
, 0, size
);
914 /* Physical format information */
919 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
921 outbuf
[4] = 1; /* DVD-ROM, part version 1 */
922 outbuf
[5] = 0xf; /* 120mm disc, minimum rate unspecified */
923 outbuf
[6] = 1; /* one layer, read-only (per MMC-2 spec) */
924 outbuf
[7] = 0; /* default densities */
926 stl_be_p(&outbuf
[12], (nb_sectors
>> 2) - 1); /* end sector */
927 stl_be_p(&outbuf
[16], (nb_sectors
>> 2) - 1); /* l0 end sector */
931 case 0x01: /* DVD copyright information, all zeros */
934 case 0x03: /* BCA information - invalid field for no BCA info */
937 case 0x04: /* DVD disc manufacturing information, all zeros */
940 case 0xff: { /* List capabilities */
943 for (i
= 0; i
< ARRAY_SIZE(rds_caps_size
); i
++) {
944 if (!rds_caps_size
[i
]) {
948 outbuf
[size
+ 1] = 0x40; /* Not writable, readable */
949 stw_be_p(&outbuf
[size
+ 2], rds_caps_size
[i
]);
959 /* Size of buffer, not including 2 byte size field */
960 stw_be_p(outbuf
, size
- 2);
967 static int scsi_event_status_media(SCSIDiskState
*s
, uint8_t *outbuf
)
969 uint8_t event_code
, media_status
;
973 media_status
= MS_TRAY_OPEN
;
974 } else if (blk_is_inserted(s
->qdev
.conf
.blk
)) {
975 media_status
= MS_MEDIA_PRESENT
;
978 /* Event notification descriptor */
979 event_code
= MEC_NO_CHANGE
;
980 if (media_status
!= MS_TRAY_OPEN
) {
981 if (s
->media_event
) {
982 event_code
= MEC_NEW_MEDIA
;
983 s
->media_event
= false;
984 } else if (s
->eject_request
) {
985 event_code
= MEC_EJECT_REQUESTED
;
986 s
->eject_request
= false;
990 outbuf
[0] = event_code
;
991 outbuf
[1] = media_status
;
993 /* These fields are reserved, just clear them. */
999 static int scsi_get_event_status_notification(SCSIDiskState
*s
, SCSIDiskReq
*r
,
1003 uint8_t *buf
= r
->req
.cmd
.buf
;
1004 uint8_t notification_class_request
= buf
[4];
1005 if (s
->qdev
.type
!= TYPE_ROM
) {
1008 if ((buf
[1] & 1) == 0) {
1014 outbuf
[0] = outbuf
[1] = 0;
1015 outbuf
[3] = 1 << GESN_MEDIA
; /* supported events */
1016 if (notification_class_request
& (1 << GESN_MEDIA
)) {
1017 outbuf
[2] = GESN_MEDIA
;
1018 size
+= scsi_event_status_media(s
, &outbuf
[size
]);
1022 stw_be_p(outbuf
, size
- 4);
1026 static int scsi_get_configuration(SCSIDiskState
*s
, uint8_t *outbuf
)
1030 if (s
->qdev
.type
!= TYPE_ROM
) {
1034 if (media_is_dvd(s
)) {
1035 current
= MMC_PROFILE_DVD_ROM
;
1036 } else if (media_is_cd(s
)) {
1037 current
= MMC_PROFILE_CD_ROM
;
1039 current
= MMC_PROFILE_NONE
;
1042 memset(outbuf
, 0, 40);
1043 stl_be_p(&outbuf
[0], 36); /* Bytes after the data length field */
1044 stw_be_p(&outbuf
[6], current
);
1045 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1046 outbuf
[10] = 0x03; /* persistent, current */
1047 outbuf
[11] = 8; /* two profiles */
1048 stw_be_p(&outbuf
[12], MMC_PROFILE_DVD_ROM
);
1049 outbuf
[14] = (current
== MMC_PROFILE_DVD_ROM
);
1050 stw_be_p(&outbuf
[16], MMC_PROFILE_CD_ROM
);
1051 outbuf
[18] = (current
== MMC_PROFILE_CD_ROM
);
1052 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1053 stw_be_p(&outbuf
[20], 1);
1054 outbuf
[22] = 0x08 | 0x03; /* version 2, persistent, current */
1056 stl_be_p(&outbuf
[24], 1); /* SCSI */
1057 outbuf
[28] = 1; /* DBE = 1, mandatory */
1058 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1059 stw_be_p(&outbuf
[32], 3);
1060 outbuf
[34] = 0x08 | 0x03; /* version 2, persistent, current */
1062 outbuf
[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1063 /* TODO: Random readable, CD read, DVD read, drive serial number,
1068 static int scsi_emulate_mechanism_status(SCSIDiskState
*s
, uint8_t *outbuf
)
1070 if (s
->qdev
.type
!= TYPE_ROM
) {
1073 memset(outbuf
, 0, 8);
1074 outbuf
[5] = 1; /* CD-ROM */
1078 static int mode_sense_page(SCSIDiskState
*s
, int page
, uint8_t **p_outbuf
,
1081 static const int mode_sense_valid
[0x3f] = {
1082 [MODE_PAGE_VENDOR_SPECIFIC
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1083 [MODE_PAGE_HD_GEOMETRY
] = (1 << TYPE_DISK
),
1084 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY
] = (1 << TYPE_DISK
),
1085 [MODE_PAGE_CACHING
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1086 [MODE_PAGE_R_W_ERROR
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1087 [MODE_PAGE_AUDIO_CTL
] = (1 << TYPE_ROM
),
1088 [MODE_PAGE_CAPABILITIES
] = (1 << TYPE_ROM
),
1089 [MODE_PAGE_APPLE_VENDOR
] = (1 << TYPE_ROM
),
1092 uint8_t *p
= *p_outbuf
+ 2;
1095 assert(page
< ARRAY_SIZE(mode_sense_valid
));
1096 if ((mode_sense_valid
[page
] & (1 << s
->qdev
.type
)) == 0) {
1101 * If Changeable Values are requested, a mask denoting those mode parameters
1102 * that are changeable shall be returned. As we currently don't support
1103 * parameter changes via MODE_SELECT all bits are returned set to zero.
1104 * The buffer was already menset to zero by the caller of this function.
1106 * The offsets here are off by two compared to the descriptions in the
1107 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1108 * but it is done so that offsets are consistent within our implementation
1109 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1110 * 2-byte and 4-byte headers.
1113 case MODE_PAGE_HD_GEOMETRY
:
1115 if (page_control
== 1) { /* Changeable Values */
1118 /* if a geometry hint is available, use it */
1119 p
[0] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1120 p
[1] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1121 p
[2] = s
->qdev
.conf
.cyls
& 0xff;
1122 p
[3] = s
->qdev
.conf
.heads
& 0xff;
1123 /* Write precomp start cylinder, disabled */
1124 p
[4] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1125 p
[5] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1126 p
[6] = s
->qdev
.conf
.cyls
& 0xff;
1127 /* Reduced current start cylinder, disabled */
1128 p
[7] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1129 p
[8] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1130 p
[9] = s
->qdev
.conf
.cyls
& 0xff;
1131 /* Device step rate [ns], 200ns */
1134 /* Landing zone cylinder */
1138 /* Medium rotation rate [rpm], 5400 rpm */
1139 p
[18] = (5400 >> 8) & 0xff;
1140 p
[19] = 5400 & 0xff;
1143 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY
:
1145 if (page_control
== 1) { /* Changeable Values */
1148 /* Transfer rate [kbit/s], 5Mbit/s */
1151 /* if a geometry hint is available, use it */
1152 p
[2] = s
->qdev
.conf
.heads
& 0xff;
1153 p
[3] = s
->qdev
.conf
.secs
& 0xff;
1154 p
[4] = s
->qdev
.blocksize
>> 8;
1155 p
[6] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1156 p
[7] = s
->qdev
.conf
.cyls
& 0xff;
1157 /* Write precomp start cylinder, disabled */
1158 p
[8] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1159 p
[9] = s
->qdev
.conf
.cyls
& 0xff;
1160 /* Reduced current start cylinder, disabled */
1161 p
[10] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1162 p
[11] = s
->qdev
.conf
.cyls
& 0xff;
1163 /* Device step rate [100us], 100us */
1166 /* Device step pulse width [us], 1us */
1168 /* Device head settle delay [100us], 100us */
1171 /* Motor on delay [0.1s], 0.1s */
1173 /* Motor off delay [0.1s], 0.1s */
1175 /* Medium rotation rate [rpm], 5400 rpm */
1176 p
[26] = (5400 >> 8) & 0xff;
1177 p
[27] = 5400 & 0xff;
1180 case MODE_PAGE_CACHING
:
1182 if (page_control
== 1 || /* Changeable Values */
1183 blk_enable_write_cache(s
->qdev
.conf
.blk
)) {
1188 case MODE_PAGE_R_W_ERROR
:
1190 if (page_control
== 1) { /* Changeable Values */
1191 if (s
->qdev
.type
== TYPE_ROM
) {
1192 /* Automatic Write Reallocation Enabled */
1197 p
[0] = 0x80; /* Automatic Write Reallocation Enabled */
1198 if (s
->qdev
.type
== TYPE_ROM
) {
1199 p
[1] = 0x20; /* Read Retry Count */
1203 case MODE_PAGE_AUDIO_CTL
:
1207 case MODE_PAGE_CAPABILITIES
:
1209 if (page_control
== 1) { /* Changeable Values */
1213 p
[0] = 0x3b; /* CD-R & CD-RW read */
1214 p
[1] = 0; /* Writing not supported */
1215 p
[2] = 0x7f; /* Audio, composite, digital out,
1216 mode 2 form 1&2, multi session */
1217 p
[3] = 0xff; /* CD DA, DA accurate, RW supported,
1218 RW corrected, C2 errors, ISRC,
1220 p
[4] = 0x2d | (s
->tray_locked
? 2 : 0);
1221 /* Locking supported, jumper present, eject, tray */
1222 p
[5] = 0; /* no volume & mute control, no
1224 p
[6] = (50 * 176) >> 8; /* 50x read speed */
1225 p
[7] = (50 * 176) & 0xff;
1226 p
[8] = 2 >> 8; /* Two volume levels */
1228 p
[10] = 2048 >> 8; /* 2M buffer */
1229 p
[11] = 2048 & 0xff;
1230 p
[12] = (16 * 176) >> 8; /* 16x read speed current */
1231 p
[13] = (16 * 176) & 0xff;
1232 p
[16] = (16 * 176) >> 8; /* 16x write speed */
1233 p
[17] = (16 * 176) & 0xff;
1234 p
[18] = (16 * 176) >> 8; /* 16x write speed current */
1235 p
[19] = (16 * 176) & 0xff;
1238 case MODE_PAGE_APPLE_VENDOR
:
1239 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
)) {
1241 if (page_control
== 1) { /* Changeable Values */
1245 memset(p
, 0, length
);
1246 strcpy((char *)p
+ 8, "APPLE COMPUTER, INC ");
1252 case MODE_PAGE_VENDOR_SPECIFIC
:
1253 if (s
->qdev
.type
== TYPE_DISK
&& (s
->quirks
&
1254 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
))) {
1256 if (page_control
== 1) { /* Changeable Values */
1272 assert(length
< 256);
1273 (*p_outbuf
)[0] = page
;
1274 (*p_outbuf
)[1] = length
;
1275 *p_outbuf
+= length
+ 2;
1279 static int scsi_disk_emulate_mode_sense(SCSIDiskReq
*r
, uint8_t *outbuf
)
1281 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1282 uint64_t nb_sectors
;
1284 int page
, buflen
, ret
, page_control
;
1286 uint8_t dev_specific_param
;
1288 dbd
= (r
->req
.cmd
.buf
[1] & 0x8) != 0;
1289 page
= r
->req
.cmd
.buf
[2] & 0x3f;
1290 page_control
= (r
->req
.cmd
.buf
[2] & 0xc0) >> 6;
1292 trace_scsi_disk_emulate_mode_sense((r
->req
.cmd
.buf
[0] == MODE_SENSE
) ? 6 :
1293 10, page
, r
->req
.cmd
.xfer
, page_control
);
1294 memset(outbuf
, 0, r
->req
.cmd
.xfer
);
1297 if (s
->qdev
.type
== TYPE_DISK
) {
1298 dev_specific_param
= s
->features
& (1 << SCSI_DISK_F_DPOFUA
) ? 0x10 : 0;
1299 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1300 dev_specific_param
|= 0x80; /* Readonly. */
1303 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD
)) {
1304 /* Use DBD from the request... */
1305 dev_specific_param
= 0x00;
1308 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1309 * which should never return a block descriptor even though DBD is
1310 * not set, otherwise CDROM detection fails in MacOS
1312 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
) &&
1313 page
== MODE_PAGE_APPLE_VENDOR
) {
1318 * MMC prescribes that CD/DVD drives have no block descriptors,
1319 * and defines no device-specific parameter.
1321 dev_specific_param
= 0x00;
1326 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1327 p
[1] = 0; /* Default media type. */
1328 p
[2] = dev_specific_param
;
1329 p
[3] = 0; /* Block descriptor length. */
1331 } else { /* MODE_SENSE_10 */
1332 p
[2] = 0; /* Default media type. */
1333 p
[3] = dev_specific_param
;
1334 p
[6] = p
[7] = 0; /* Block descriptor length. */
1338 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
1339 if (!dbd
&& nb_sectors
) {
1340 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1341 outbuf
[3] = 8; /* Block descriptor length */
1342 } else { /* MODE_SENSE_10 */
1343 outbuf
[7] = 8; /* Block descriptor length */
1345 nb_sectors
/= (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1346 if (nb_sectors
> 0xffffff) {
1349 p
[0] = 0; /* media density code */
1350 p
[1] = (nb_sectors
>> 16) & 0xff;
1351 p
[2] = (nb_sectors
>> 8) & 0xff;
1352 p
[3] = nb_sectors
& 0xff;
1353 p
[4] = 0; /* reserved */
1354 p
[5] = 0; /* bytes 5-7 are the sector size in bytes */
1355 p
[6] = s
->qdev
.blocksize
>> 8;
1360 if (page_control
== 3) {
1362 scsi_check_condition(r
, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED
));
1367 for (page
= 0; page
<= 0x3e; page
++) {
1368 mode_sense_page(s
, page
, &p
, page_control
);
1371 ret
= mode_sense_page(s
, page
, &p
, page_control
);
1377 buflen
= p
- outbuf
;
1379 * The mode data length field specifies the length in bytes of the
1380 * following data that is available to be transferred. The mode data
1381 * length does not include itself.
1383 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1384 outbuf
[0] = buflen
- 1;
1385 } else { /* MODE_SENSE_10 */
1386 outbuf
[0] = ((buflen
- 2) >> 8) & 0xff;
1387 outbuf
[1] = (buflen
- 2) & 0xff;
1392 static int scsi_disk_emulate_read_toc(SCSIRequest
*req
, uint8_t *outbuf
)
1394 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1395 int start_track
, format
, msf
, toclen
;
1396 uint64_t nb_sectors
;
1398 msf
= req
->cmd
.buf
[1] & 2;
1399 format
= req
->cmd
.buf
[2] & 0xf;
1400 start_track
= req
->cmd
.buf
[6];
1401 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
1402 trace_scsi_disk_emulate_read_toc(start_track
, format
, msf
>> 1);
1403 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
1406 toclen
= cdrom_read_toc(nb_sectors
, outbuf
, msf
, start_track
);
1409 /* multi session : only a single session defined */
1411 memset(outbuf
, 0, 12);
1417 toclen
= cdrom_read_toc_raw(nb_sectors
, outbuf
, msf
, start_track
);
1425 static int scsi_disk_emulate_start_stop(SCSIDiskReq
*r
)
1427 SCSIRequest
*req
= &r
->req
;
1428 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1429 bool start
= req
->cmd
.buf
[4] & 1;
1430 bool loej
= req
->cmd
.buf
[4] & 2; /* load on start, eject on !start */
1431 int pwrcnd
= req
->cmd
.buf
[4] & 0xf0;
1434 /* eject/load only happens for power condition == 0 */
1438 if ((s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) && loej
) {
1439 if (!start
&& !s
->tray_open
&& s
->tray_locked
) {
1440 scsi_check_condition(r
,
1441 blk_is_inserted(s
->qdev
.conf
.blk
)
1442 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED
)
1443 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED
));
1447 if (s
->tray_open
!= !start
) {
1448 blk_eject(s
->qdev
.conf
.blk
, !start
);
1449 s
->tray_open
= !start
;
1455 static void scsi_disk_emulate_read_data(SCSIRequest
*req
)
1457 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1458 int buflen
= r
->iov
.iov_len
;
1461 trace_scsi_disk_emulate_read_data(buflen
);
1464 scsi_req_data(&r
->req
, buflen
);
1468 /* This also clears the sense buffer for REQUEST SENSE. */
1469 scsi_req_complete(&r
->req
, GOOD
);
1472 static int scsi_disk_check_mode_select(SCSIDiskState
*s
, int page
,
1473 uint8_t *inbuf
, int inlen
)
1475 uint8_t mode_current
[SCSI_MAX_MODE_LEN
];
1476 uint8_t mode_changeable
[SCSI_MAX_MODE_LEN
];
1478 int len
, expected_len
, changeable_len
, i
;
1480 /* The input buffer does not include the page header, so it is
1483 expected_len
= inlen
+ 2;
1484 if (expected_len
> SCSI_MAX_MODE_LEN
) {
1488 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1489 if (page
== MODE_PAGE_ALLS
) {
1494 memset(mode_current
, 0, inlen
+ 2);
1495 len
= mode_sense_page(s
, page
, &p
, 0);
1496 if (len
< 0 || len
!= expected_len
) {
1500 p
= mode_changeable
;
1501 memset(mode_changeable
, 0, inlen
+ 2);
1502 changeable_len
= mode_sense_page(s
, page
, &p
, 1);
1503 assert(changeable_len
== len
);
1505 /* Check that unchangeable bits are the same as what MODE SENSE
1508 for (i
= 2; i
< len
; i
++) {
1509 if (((mode_current
[i
] ^ inbuf
[i
- 2]) & ~mode_changeable
[i
]) != 0) {
1516 static void scsi_disk_apply_mode_select(SCSIDiskState
*s
, int page
, uint8_t *p
)
1519 case MODE_PAGE_CACHING
:
1520 blk_set_enable_write_cache(s
->qdev
.conf
.blk
, (p
[0] & 4) != 0);
1528 static int mode_select_pages(SCSIDiskReq
*r
, uint8_t *p
, int len
, bool change
)
1530 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1533 int page
, subpage
, page_len
;
1535 /* Parse both possible formats for the mode page headers. */
1539 goto invalid_param_len
;
1542 page_len
= lduw_be_p(&p
[2]);
1547 goto invalid_param_len
;
1558 if (page_len
> len
) {
1559 if (!(s
->quirks
& SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED
)) {
1560 goto invalid_param_len
;
1562 trace_scsi_disk_mode_select_page_truncated(page
, page_len
, len
);
1566 if (scsi_disk_check_mode_select(s
, page
, p
, page_len
) < 0) {
1570 scsi_disk_apply_mode_select(s
, page
, p
);
1579 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM
));
1583 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1587 static void scsi_disk_emulate_mode_select(SCSIDiskReq
*r
, uint8_t *inbuf
)
1589 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1591 int cmd
= r
->req
.cmd
.buf
[0];
1592 int len
= r
->req
.cmd
.xfer
;
1593 int hdr_len
= (cmd
== MODE_SELECT
? 4 : 8);
1597 if ((r
->req
.cmd
.buf
[1] & 0x11) != 0x10) {
1599 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
))) {
1600 /* We only support PF=1, SP=0. */
1605 if (len
< hdr_len
) {
1606 goto invalid_param_len
;
1609 bd_len
= (cmd
== MODE_SELECT
? p
[3] : lduw_be_p(&p
[6]));
1613 goto invalid_param_len
;
1615 if (bd_len
!= 0 && bd_len
!= 8) {
1619 /* Allow changing the block size */
1621 bs
= p
[5] << 16 | p
[6] << 8 | p
[7];
1624 * Since the existing code only checks/updates bits 8-15 of the block
1625 * size, restrict ourselves to the same requirement for now to ensure
1626 * that a block size set by a block descriptor and then read back by
1627 * a subsequent SCSI command will be the same
1629 if (bs
&& !(bs
& ~0xff00) && bs
!= s
->qdev
.blocksize
) {
1630 s
->qdev
.blocksize
= bs
;
1631 trace_scsi_disk_mode_select_set_blocksize(s
->qdev
.blocksize
);
1638 /* Ensure no change is made if there is an error! */
1639 for (pass
= 0; pass
< 2; pass
++) {
1640 if (mode_select_pages(r
, p
, len
, pass
== 1) < 0) {
1645 if (!blk_enable_write_cache(s
->qdev
.conf
.blk
)) {
1646 /* The request is used as the AIO opaque value, so add a ref. */
1647 scsi_req_ref(&r
->req
);
1648 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
1650 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
1654 scsi_req_complete(&r
->req
, GOOD
);
1658 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM
));
1662 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1666 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1669 /* sector_num and nb_sectors expected to be in qdev blocksize */
1670 static inline bool check_lba_range(SCSIDiskState
*s
,
1671 uint64_t sector_num
, uint32_t nb_sectors
)
1674 * The first line tests that no overflow happens when computing the last
1675 * sector. The second line tests that the last accessed sector is in
1678 * Careful, the computations should not underflow for nb_sectors == 0,
1679 * and a 0-block read to the first LBA beyond the end of device is
1682 return (sector_num
<= sector_num
+ nb_sectors
&&
1683 sector_num
+ nb_sectors
<= s
->qdev
.max_lba
+ 1);
1686 typedef struct UnmapCBData
{
1692 static void scsi_unmap_complete(void *opaque
, int ret
);
1694 static void scsi_unmap_complete_noio(UnmapCBData
*data
, int ret
)
1696 SCSIDiskReq
*r
= data
->r
;
1697 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1699 assert(r
->req
.aiocb
== NULL
);
1701 if (data
->count
> 0) {
1702 uint64_t sector_num
= ldq_be_p(&data
->inbuf
[0]);
1703 uint32_t nb_sectors
= ldl_be_p(&data
->inbuf
[8]) & 0xffffffffULL
;
1704 r
->sector
= sector_num
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1705 r
->sector_count
= nb_sectors
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1707 if (!check_lba_range(s
, sector_num
, nb_sectors
)) {
1708 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
),
1710 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
1714 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1715 r
->sector_count
* BDRV_SECTOR_SIZE
,
1718 r
->req
.aiocb
= blk_aio_pdiscard(s
->qdev
.conf
.blk
,
1719 r
->sector
* BDRV_SECTOR_SIZE
,
1720 r
->sector_count
* BDRV_SECTOR_SIZE
,
1721 scsi_unmap_complete
, data
);
1727 scsi_req_complete(&r
->req
, GOOD
);
1730 scsi_req_unref(&r
->req
);
1734 static void scsi_unmap_complete(void *opaque
, int ret
)
1736 UnmapCBData
*data
= opaque
;
1737 SCSIDiskReq
*r
= data
->r
;
1738 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1740 assert(r
->req
.aiocb
!= NULL
);
1741 r
->req
.aiocb
= NULL
;
1743 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
1744 if (scsi_disk_req_check_error(r
, ret
, true)) {
1745 scsi_req_unref(&r
->req
);
1748 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
1749 scsi_unmap_complete_noio(data
, ret
);
1751 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1754 static void scsi_disk_emulate_unmap(SCSIDiskReq
*r
, uint8_t *inbuf
)
1756 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1758 int len
= r
->req
.cmd
.xfer
;
1761 /* Reject ANCHOR=1. */
1762 if (r
->req
.cmd
.buf
[1] & 0x1) {
1767 goto invalid_param_len
;
1769 if (len
< lduw_be_p(&p
[0]) + 2) {
1770 goto invalid_param_len
;
1772 if (len
< lduw_be_p(&p
[2]) + 8) {
1773 goto invalid_param_len
;
1775 if (lduw_be_p(&p
[2]) & 15) {
1776 goto invalid_param_len
;
1779 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1780 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1781 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
1785 data
= g_new0(UnmapCBData
, 1);
1787 data
->inbuf
= &p
[8];
1788 data
->count
= lduw_be_p(&p
[2]) >> 4;
1790 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1791 scsi_req_ref(&r
->req
);
1792 scsi_unmap_complete_noio(data
, 0);
1796 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1797 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1801 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1802 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1805 typedef struct WriteSameCBData
{
1813 static void scsi_write_same_complete(void *opaque
, int ret
)
1815 WriteSameCBData
*data
= opaque
;
1816 SCSIDiskReq
*r
= data
->r
;
1817 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1819 assert(r
->req
.aiocb
!= NULL
);
1820 r
->req
.aiocb
= NULL
;
1821 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
1822 if (scsi_disk_req_check_error(r
, ret
, true)) {
1826 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
1828 data
->nb_sectors
-= data
->iov
.iov_len
/ BDRV_SECTOR_SIZE
;
1829 data
->sector
+= data
->iov
.iov_len
/ BDRV_SECTOR_SIZE
;
1830 data
->iov
.iov_len
= MIN(data
->nb_sectors
* BDRV_SECTOR_SIZE
,
1832 if (data
->iov
.iov_len
) {
1833 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1834 data
->iov
.iov_len
, BLOCK_ACCT_WRITE
);
1835 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1836 * where final qiov may need smaller size */
1837 qemu_iovec_init_external(&data
->qiov
, &data
->iov
, 1);
1838 r
->req
.aiocb
= blk_aio_pwritev(s
->qdev
.conf
.blk
,
1839 data
->sector
<< BDRV_SECTOR_BITS
,
1841 scsi_write_same_complete
, data
);
1842 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1846 scsi_req_complete(&r
->req
, GOOD
);
1849 scsi_req_unref(&r
->req
);
1850 qemu_vfree(data
->iov
.iov_base
);
1852 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1855 static void scsi_disk_emulate_write_same(SCSIDiskReq
*r
, uint8_t *inbuf
)
1857 SCSIRequest
*req
= &r
->req
;
1858 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1859 uint32_t nb_sectors
= scsi_data_cdb_xfer(r
->req
.cmd
.buf
);
1860 WriteSameCBData
*data
;
1864 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1865 if (nb_sectors
== 0 || (req
->cmd
.buf
[1] & 0x16)) {
1866 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1870 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1871 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
1874 if (!check_lba_range(s
, r
->req
.cmd
.lba
, nb_sectors
)) {
1875 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
1879 if ((req
->cmd
.buf
[1] & 0x1) || buffer_is_zero(inbuf
, s
->qdev
.blocksize
)) {
1880 int flags
= (req
->cmd
.buf
[1] & 0x8) ? BDRV_REQ_MAY_UNMAP
: 0;
1882 /* The request is used as the AIO opaque value, so add a ref. */
1883 scsi_req_ref(&r
->req
);
1884 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1885 nb_sectors
* s
->qdev
.blocksize
,
1887 r
->req
.aiocb
= blk_aio_pwrite_zeroes(s
->qdev
.conf
.blk
,
1888 r
->req
.cmd
.lba
* s
->qdev
.blocksize
,
1889 nb_sectors
* s
->qdev
.blocksize
,
1890 flags
, scsi_aio_complete
, r
);
1894 data
= g_new0(WriteSameCBData
, 1);
1896 data
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1897 data
->nb_sectors
= nb_sectors
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1898 data
->iov
.iov_len
= MIN(data
->nb_sectors
* BDRV_SECTOR_SIZE
,
1899 SCSI_WRITE_SAME_MAX
);
1900 data
->iov
.iov_base
= buf
= blk_blockalign(s
->qdev
.conf
.blk
,
1902 qemu_iovec_init_external(&data
->qiov
, &data
->iov
, 1);
1904 for (i
= 0; i
< data
->iov
.iov_len
; i
+= l
) {
1905 l
= MIN(s
->qdev
.blocksize
, data
->iov
.iov_len
- i
);
1906 memcpy(&buf
[i
], inbuf
, l
);
1909 scsi_req_ref(&r
->req
);
1910 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1911 data
->iov
.iov_len
, BLOCK_ACCT_WRITE
);
1912 r
->req
.aiocb
= blk_aio_pwritev(s
->qdev
.conf
.blk
,
1913 data
->sector
<< BDRV_SECTOR_BITS
,
1915 scsi_write_same_complete
, data
);
1918 static void scsi_disk_emulate_write_data(SCSIRequest
*req
)
1920 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1922 if (r
->iov
.iov_len
) {
1923 int buflen
= r
->iov
.iov_len
;
1924 trace_scsi_disk_emulate_write_data(buflen
);
1926 scsi_req_data(&r
->req
, buflen
);
1930 switch (req
->cmd
.buf
[0]) {
1932 case MODE_SELECT_10
:
1933 /* This also clears the sense buffer for REQUEST SENSE. */
1934 scsi_disk_emulate_mode_select(r
, r
->iov
.iov_base
);
1938 scsi_disk_emulate_unmap(r
, r
->iov
.iov_base
);
1944 if (r
->req
.status
== -1) {
1945 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1951 scsi_disk_emulate_write_same(r
, r
->iov
.iov_base
);
1959 static int32_t scsi_disk_emulate_command(SCSIRequest
*req
, uint8_t *buf
)
1961 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1962 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1963 uint64_t nb_sectors
;
1967 switch (req
->cmd
.buf
[0]) {
1976 case ALLOW_MEDIUM_REMOVAL
:
1977 case GET_CONFIGURATION
:
1978 case GET_EVENT_STATUS_NOTIFICATION
:
1979 case MECHANISM_STATUS
:
1984 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
1985 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
1992 * FIXME: we shouldn't return anything bigger than 4k, but the code
1993 * requires the buffer to be as big as req->cmd.xfer in several
1994 * places. So, do not allow CDBs with a very large ALLOCATION
1995 * LENGTH. The real fix would be to modify scsi_read_data and
1996 * dma_buf_read, so that they return data beyond the buflen
1999 if (req
->cmd
.xfer
> 65536) {
2000 goto illegal_request
;
2002 r
->buflen
= MAX(4096, req
->cmd
.xfer
);
2004 if (!r
->iov
.iov_base
) {
2005 r
->iov
.iov_base
= blk_blockalign(s
->qdev
.conf
.blk
, r
->buflen
);
2008 outbuf
= r
->iov
.iov_base
;
2009 memset(outbuf
, 0, r
->buflen
);
2010 switch (req
->cmd
.buf
[0]) {
2011 case TEST_UNIT_READY
:
2012 assert(blk_is_available(s
->qdev
.conf
.blk
));
2015 buflen
= scsi_disk_emulate_inquiry(req
, outbuf
);
2017 goto illegal_request
;
2022 buflen
= scsi_disk_emulate_mode_sense(r
, outbuf
);
2024 goto illegal_request
;
2028 buflen
= scsi_disk_emulate_read_toc(req
, outbuf
);
2030 goto illegal_request
;
2034 if (req
->cmd
.buf
[1] & 1) {
2035 goto illegal_request
;
2039 if (req
->cmd
.buf
[1] & 3) {
2040 goto illegal_request
;
2044 if (req
->cmd
.buf
[1] & 1) {
2045 goto illegal_request
;
2049 if (req
->cmd
.buf
[1] & 3) {
2050 goto illegal_request
;
2054 if (scsi_disk_emulate_start_stop(r
) < 0) {
2058 case ALLOW_MEDIUM_REMOVAL
:
2059 s
->tray_locked
= req
->cmd
.buf
[4] & 1;
2060 blk_lock_medium(s
->qdev
.conf
.blk
, req
->cmd
.buf
[4] & 1);
2062 case READ_CAPACITY_10
:
2063 /* The normal LEN field for this command is zero. */
2064 memset(outbuf
, 0, 8);
2065 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2067 scsi_check_condition(r
, SENSE_CODE(LUN_NOT_READY
));
2070 if ((req
->cmd
.buf
[8] & 1) == 0 && req
->cmd
.lba
) {
2071 goto illegal_request
;
2073 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2074 /* Returned value is the address of the last sector. */
2076 /* Remember the new size for read/write sanity checking. */
2077 s
->qdev
.max_lba
= nb_sectors
;
2078 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2079 if (nb_sectors
> UINT32_MAX
) {
2080 nb_sectors
= UINT32_MAX
;
2082 outbuf
[0] = (nb_sectors
>> 24) & 0xff;
2083 outbuf
[1] = (nb_sectors
>> 16) & 0xff;
2084 outbuf
[2] = (nb_sectors
>> 8) & 0xff;
2085 outbuf
[3] = nb_sectors
& 0xff;
2088 outbuf
[6] = s
->qdev
.blocksize
>> 8;
2092 /* Just return "NO SENSE". */
2093 buflen
= scsi_convert_sense(NULL
, 0, outbuf
, r
->buflen
,
2094 (req
->cmd
.buf
[1] & 1) == 0);
2096 goto illegal_request
;
2099 case MECHANISM_STATUS
:
2100 buflen
= scsi_emulate_mechanism_status(s
, outbuf
);
2102 goto illegal_request
;
2105 case GET_CONFIGURATION
:
2106 buflen
= scsi_get_configuration(s
, outbuf
);
2108 goto illegal_request
;
2111 case GET_EVENT_STATUS_NOTIFICATION
:
2112 buflen
= scsi_get_event_status_notification(s
, r
, outbuf
);
2114 goto illegal_request
;
2117 case READ_DISC_INFORMATION
:
2118 buflen
= scsi_read_disc_information(s
, r
, outbuf
);
2120 goto illegal_request
;
2123 case READ_DVD_STRUCTURE
:
2124 buflen
= scsi_read_dvd_structure(s
, r
, outbuf
);
2126 goto illegal_request
;
2129 case SERVICE_ACTION_IN_16
:
2130 /* Service Action In subcommands. */
2131 if ((req
->cmd
.buf
[1] & 31) == SAI_READ_CAPACITY_16
) {
2132 trace_scsi_disk_emulate_command_SAI_16();
2133 memset(outbuf
, 0, req
->cmd
.xfer
);
2134 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2136 scsi_check_condition(r
, SENSE_CODE(LUN_NOT_READY
));
2139 if ((req
->cmd
.buf
[14] & 1) == 0 && req
->cmd
.lba
) {
2140 goto illegal_request
;
2142 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2143 /* Returned value is the address of the last sector. */
2145 /* Remember the new size for read/write sanity checking. */
2146 s
->qdev
.max_lba
= nb_sectors
;
2147 outbuf
[0] = (nb_sectors
>> 56) & 0xff;
2148 outbuf
[1] = (nb_sectors
>> 48) & 0xff;
2149 outbuf
[2] = (nb_sectors
>> 40) & 0xff;
2150 outbuf
[3] = (nb_sectors
>> 32) & 0xff;
2151 outbuf
[4] = (nb_sectors
>> 24) & 0xff;
2152 outbuf
[5] = (nb_sectors
>> 16) & 0xff;
2153 outbuf
[6] = (nb_sectors
>> 8) & 0xff;
2154 outbuf
[7] = nb_sectors
& 0xff;
2157 outbuf
[10] = s
->qdev
.blocksize
>> 8;
2160 outbuf
[13] = get_physical_block_exp(&s
->qdev
.conf
);
2162 /* set TPE bit if the format supports discard */
2163 if (s
->qdev
.conf
.discard_granularity
) {
2167 /* Protection, exponent and lowest lba field left blank. */
2170 trace_scsi_disk_emulate_command_SAI_unsupported();
2171 goto illegal_request
;
2172 case SYNCHRONIZE_CACHE
:
2173 /* The request is used as the AIO opaque value, so add a ref. */
2174 scsi_req_ref(&r
->req
);
2175 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
2177 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
2180 trace_scsi_disk_emulate_command_SEEK_10(r
->req
.cmd
.lba
);
2181 if (r
->req
.cmd
.lba
> s
->qdev
.max_lba
) {
2186 trace_scsi_disk_emulate_command_MODE_SELECT(r
->req
.cmd
.xfer
);
2188 case MODE_SELECT_10
:
2189 trace_scsi_disk_emulate_command_MODE_SELECT_10(r
->req
.cmd
.xfer
);
2192 trace_scsi_disk_emulate_command_UNMAP(r
->req
.cmd
.xfer
);
2197 trace_scsi_disk_emulate_command_VERIFY((req
->cmd
.buf
[1] >> 1) & 3);
2198 if (req
->cmd
.buf
[1] & 6) {
2199 goto illegal_request
;
2204 trace_scsi_disk_emulate_command_WRITE_SAME(
2205 req
->cmd
.buf
[0] == WRITE_SAME_10
? 10 : 16, r
->req
.cmd
.xfer
);
2208 trace_scsi_disk_emulate_command_FORMAT_UNIT(r
->req
.cmd
.xfer
);
2211 trace_scsi_disk_emulate_command_UNKNOWN(buf
[0],
2212 scsi_command_name(buf
[0]));
2213 scsi_check_condition(r
, SENSE_CODE(INVALID_OPCODE
));
2216 assert(!r
->req
.aiocb
);
2217 r
->iov
.iov_len
= MIN(r
->buflen
, req
->cmd
.xfer
);
2218 if (r
->iov
.iov_len
== 0) {
2219 scsi_req_complete(&r
->req
, GOOD
);
2221 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
2222 assert(r
->iov
.iov_len
== req
->cmd
.xfer
);
2223 return -r
->iov
.iov_len
;
2225 return r
->iov
.iov_len
;
2229 if (r
->req
.status
== -1) {
2230 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
2235 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
2239 /* Execute a scsi command. Returns the length of the data expected by the
2240 command. This will be Positive for data transfers from the device
2241 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2242 and zero if the command does not transfer any data. */
2244 static int32_t scsi_disk_dma_command(SCSIRequest
*req
, uint8_t *buf
)
2246 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
2247 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
2248 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
2254 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
2255 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
2259 len
= scsi_data_cdb_xfer(r
->req
.cmd
.buf
);
2265 trace_scsi_disk_dma_command_READ(r
->req
.cmd
.lba
, len
);
2266 /* Protection information is not supported. For SCSI versions 2 and
2267 * older (as determined by snooping the guest's INQUIRY commands),
2268 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2270 if (s
->qdev
.scsi_version
> 2 && (r
->req
.cmd
.buf
[1] & 0xe0)) {
2271 goto illegal_request
;
2273 if (!check_lba_range(s
, r
->req
.cmd
.lba
, len
)) {
2276 r
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2277 r
->sector_count
= len
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2283 case WRITE_VERIFY_10
:
2284 case WRITE_VERIFY_12
:
2285 case WRITE_VERIFY_16
:
2286 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
2287 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
2290 trace_scsi_disk_dma_command_WRITE(
2291 (command
& 0xe) == 0xe ? "And Verify " : "",
2292 r
->req
.cmd
.lba
, len
);
2297 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2298 * As far as DMA is concerned, we can treat it the same as a write;
2299 * scsi_block_do_sgio will send VERIFY commands.
2301 if (s
->qdev
.scsi_version
> 2 && (r
->req
.cmd
.buf
[1] & 0xe0)) {
2302 goto illegal_request
;
2304 if (!check_lba_range(s
, r
->req
.cmd
.lba
, len
)) {
2307 r
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2308 r
->sector_count
= len
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2313 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
2316 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
2319 r
->need_fua_emulation
= sdc
->need_fua_emulation(&r
->req
.cmd
);
2320 if (r
->sector_count
== 0) {
2321 scsi_req_complete(&r
->req
, GOOD
);
2323 assert(r
->iov
.iov_len
== 0);
2324 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
2325 return -r
->sector_count
* BDRV_SECTOR_SIZE
;
2327 return r
->sector_count
* BDRV_SECTOR_SIZE
;
2331 static void scsi_disk_reset(DeviceState
*dev
)
2333 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
.qdev
, dev
);
2334 uint64_t nb_sectors
;
2336 scsi_device_purge_requests(&s
->qdev
, SENSE_CODE(RESET
));
2338 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2339 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2343 s
->qdev
.max_lba
= nb_sectors
;
2344 /* reset tray statuses */
2348 s
->qdev
.scsi_version
= s
->qdev
.default_scsi_version
;
2351 static void scsi_disk_resize_cb(void *opaque
)
2353 SCSIDiskState
*s
= opaque
;
2355 /* SPC lists this sense code as available only for
2356 * direct-access devices.
2358 if (s
->qdev
.type
== TYPE_DISK
) {
2359 scsi_device_report_change(&s
->qdev
, SENSE_CODE(CAPACITY_CHANGED
));
2363 static void scsi_cd_change_media_cb(void *opaque
, bool load
, Error
**errp
)
2365 SCSIDiskState
*s
= opaque
;
2368 * When a CD gets changed, we have to report an ejected state and
2369 * then a loaded state to guests so that they detect tray
2370 * open/close and media change events. Guests that do not use
2371 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2372 * states rely on this behavior.
2374 * media_changed governs the state machine used for unit attention
2375 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2377 s
->media_changed
= load
;
2378 s
->tray_open
= !load
;
2379 scsi_device_set_ua(&s
->qdev
, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM
));
2380 s
->media_event
= true;
2381 s
->eject_request
= false;
2384 static void scsi_cd_eject_request_cb(void *opaque
, bool force
)
2386 SCSIDiskState
*s
= opaque
;
2388 s
->eject_request
= true;
2390 s
->tray_locked
= false;
2394 static bool scsi_cd_is_tray_open(void *opaque
)
2396 return ((SCSIDiskState
*)opaque
)->tray_open
;
2399 static bool scsi_cd_is_medium_locked(void *opaque
)
2401 return ((SCSIDiskState
*)opaque
)->tray_locked
;
2404 static const BlockDevOps scsi_disk_removable_block_ops
= {
2405 .change_media_cb
= scsi_cd_change_media_cb
,
2406 .eject_request_cb
= scsi_cd_eject_request_cb
,
2407 .is_tray_open
= scsi_cd_is_tray_open
,
2408 .is_medium_locked
= scsi_cd_is_medium_locked
,
2410 .resize_cb
= scsi_disk_resize_cb
,
2413 static const BlockDevOps scsi_disk_block_ops
= {
2414 .resize_cb
= scsi_disk_resize_cb
,
2417 static void scsi_disk_unit_attention_reported(SCSIDevice
*dev
)
2419 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2420 if (s
->media_changed
) {
2421 s
->media_changed
= false;
2422 scsi_device_set_ua(&s
->qdev
, SENSE_CODE(MEDIUM_CHANGED
));
2426 static void scsi_realize(SCSIDevice
*dev
, Error
**errp
)
2428 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2431 if (!s
->qdev
.conf
.blk
) {
2432 error_setg(errp
, "drive property not set");
2436 if (!(s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) &&
2437 !blk_is_inserted(s
->qdev
.conf
.blk
)) {
2438 error_setg(errp
, "Device needs media, but drive is empty");
2442 if (!blkconf_blocksizes(&s
->qdev
.conf
, errp
)) {
2446 if (blk_get_aio_context(s
->qdev
.conf
.blk
) != qemu_get_aio_context() &&
2447 !s
->qdev
.hba_supports_iothread
)
2449 error_setg(errp
, "HBA does not support iothreads");
2453 if (dev
->type
== TYPE_DISK
) {
2454 if (!blkconf_geometry(&dev
->conf
, NULL
, 65535, 255, 255, errp
)) {
2459 read_only
= !blk_supports_write_perm(s
->qdev
.conf
.blk
);
2460 if (dev
->type
== TYPE_ROM
) {
2464 if (!blkconf_apply_backend_options(&dev
->conf
, read_only
,
2465 dev
->type
== TYPE_DISK
, errp
)) {
2469 if (s
->qdev
.conf
.discard_granularity
== -1) {
2470 s
->qdev
.conf
.discard_granularity
=
2471 MAX(s
->qdev
.conf
.logical_block_size
, DEFAULT_DISCARD_GRANULARITY
);
2475 s
->version
= g_strdup(qemu_hw_version());
2478 s
->vendor
= g_strdup("QEMU");
2480 if (!s
->device_id
) {
2482 s
->device_id
= g_strdup_printf("%.20s", s
->serial
);
2484 const char *str
= blk_name(s
->qdev
.conf
.blk
);
2486 s
->device_id
= g_strdup(str
);
2491 if (blk_is_sg(s
->qdev
.conf
.blk
)) {
2492 error_setg(errp
, "unwanted /dev/sg*");
2496 if ((s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) &&
2497 !(s
->features
& (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS
))) {
2498 blk_set_dev_ops(s
->qdev
.conf
.blk
, &scsi_disk_removable_block_ops
, s
);
2500 blk_set_dev_ops(s
->qdev
.conf
.blk
, &scsi_disk_block_ops
, s
);
2503 blk_iostatus_enable(s
->qdev
.conf
.blk
);
2505 add_boot_device_lchs(&dev
->qdev
, NULL
,
2511 static void scsi_unrealize(SCSIDevice
*dev
)
2513 del_boot_device_lchs(&dev
->qdev
, NULL
);
2516 static void scsi_hd_realize(SCSIDevice
*dev
, Error
**errp
)
2518 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2519 AioContext
*ctx
= NULL
;
2520 /* can happen for devices without drive. The error message for missing
2521 * backend will be issued in scsi_realize
2523 if (s
->qdev
.conf
.blk
) {
2524 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2525 aio_context_acquire(ctx
);
2526 if (!blkconf_blocksizes(&s
->qdev
.conf
, errp
)) {
2530 s
->qdev
.blocksize
= s
->qdev
.conf
.logical_block_size
;
2531 s
->qdev
.type
= TYPE_DISK
;
2533 s
->product
= g_strdup("QEMU HARDDISK");
2535 scsi_realize(&s
->qdev
, errp
);
2538 aio_context_release(ctx
);
2542 static void scsi_cd_realize(SCSIDevice
*dev
, Error
**errp
)
2544 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2547 uint32_t blocksize
= 2048;
2549 if (!dev
->conf
.blk
) {
2550 /* Anonymous BlockBackend for an empty drive. As we put it into
2551 * dev->conf, qdev takes care of detaching on unplug. */
2552 dev
->conf
.blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
2553 ret
= blk_attach_dev(dev
->conf
.blk
, &dev
->qdev
);
2557 if (dev
->conf
.physical_block_size
!= 0) {
2558 blocksize
= dev
->conf
.physical_block_size
;
2561 ctx
= blk_get_aio_context(dev
->conf
.blk
);
2562 aio_context_acquire(ctx
);
2563 s
->qdev
.blocksize
= blocksize
;
2564 s
->qdev
.type
= TYPE_ROM
;
2565 s
->features
|= 1 << SCSI_DISK_F_REMOVABLE
;
2567 s
->product
= g_strdup("QEMU CD-ROM");
2569 scsi_realize(&s
->qdev
, errp
);
2570 aio_context_release(ctx
);
2574 static const SCSIReqOps scsi_disk_emulate_reqops
= {
2575 .size
= sizeof(SCSIDiskReq
),
2576 .free_req
= scsi_free_request
,
2577 .send_command
= scsi_disk_emulate_command
,
2578 .read_data
= scsi_disk_emulate_read_data
,
2579 .write_data
= scsi_disk_emulate_write_data
,
2580 .get_buf
= scsi_get_buf
,
2583 static const SCSIReqOps scsi_disk_dma_reqops
= {
2584 .size
= sizeof(SCSIDiskReq
),
2585 .free_req
= scsi_free_request
,
2586 .send_command
= scsi_disk_dma_command
,
2587 .read_data
= scsi_read_data
,
2588 .write_data
= scsi_write_data
,
2589 .get_buf
= scsi_get_buf
,
2590 .load_request
= scsi_disk_load_request
,
2591 .save_request
= scsi_disk_save_request
,
2594 static const SCSIReqOps
*const scsi_disk_reqops_dispatch
[256] = {
2595 [TEST_UNIT_READY
] = &scsi_disk_emulate_reqops
,
2596 [INQUIRY
] = &scsi_disk_emulate_reqops
,
2597 [MODE_SENSE
] = &scsi_disk_emulate_reqops
,
2598 [MODE_SENSE_10
] = &scsi_disk_emulate_reqops
,
2599 [START_STOP
] = &scsi_disk_emulate_reqops
,
2600 [ALLOW_MEDIUM_REMOVAL
] = &scsi_disk_emulate_reqops
,
2601 [READ_CAPACITY_10
] = &scsi_disk_emulate_reqops
,
2602 [READ_TOC
] = &scsi_disk_emulate_reqops
,
2603 [READ_DVD_STRUCTURE
] = &scsi_disk_emulate_reqops
,
2604 [READ_DISC_INFORMATION
] = &scsi_disk_emulate_reqops
,
2605 [GET_CONFIGURATION
] = &scsi_disk_emulate_reqops
,
2606 [GET_EVENT_STATUS_NOTIFICATION
] = &scsi_disk_emulate_reqops
,
2607 [MECHANISM_STATUS
] = &scsi_disk_emulate_reqops
,
2608 [SERVICE_ACTION_IN_16
] = &scsi_disk_emulate_reqops
,
2609 [REQUEST_SENSE
] = &scsi_disk_emulate_reqops
,
2610 [SYNCHRONIZE_CACHE
] = &scsi_disk_emulate_reqops
,
2611 [SEEK_10
] = &scsi_disk_emulate_reqops
,
2612 [MODE_SELECT
] = &scsi_disk_emulate_reqops
,
2613 [MODE_SELECT_10
] = &scsi_disk_emulate_reqops
,
2614 [UNMAP
] = &scsi_disk_emulate_reqops
,
2615 [WRITE_SAME_10
] = &scsi_disk_emulate_reqops
,
2616 [WRITE_SAME_16
] = &scsi_disk_emulate_reqops
,
2617 [VERIFY_10
] = &scsi_disk_emulate_reqops
,
2618 [VERIFY_12
] = &scsi_disk_emulate_reqops
,
2619 [VERIFY_16
] = &scsi_disk_emulate_reqops
,
2620 [FORMAT_UNIT
] = &scsi_disk_emulate_reqops
,
2622 [READ_6
] = &scsi_disk_dma_reqops
,
2623 [READ_10
] = &scsi_disk_dma_reqops
,
2624 [READ_12
] = &scsi_disk_dma_reqops
,
2625 [READ_16
] = &scsi_disk_dma_reqops
,
2626 [WRITE_6
] = &scsi_disk_dma_reqops
,
2627 [WRITE_10
] = &scsi_disk_dma_reqops
,
2628 [WRITE_12
] = &scsi_disk_dma_reqops
,
2629 [WRITE_16
] = &scsi_disk_dma_reqops
,
2630 [WRITE_VERIFY_10
] = &scsi_disk_dma_reqops
,
2631 [WRITE_VERIFY_12
] = &scsi_disk_dma_reqops
,
2632 [WRITE_VERIFY_16
] = &scsi_disk_dma_reqops
,
2635 static void scsi_disk_new_request_dump(uint32_t lun
, uint32_t tag
, uint8_t *buf
)
2638 int len
= scsi_cdb_length(buf
);
2639 char *line_buffer
, *p
;
2641 assert(len
> 0 && len
<= 16);
2642 line_buffer
= g_malloc(len
* 5 + 1);
2644 for (i
= 0, p
= line_buffer
; i
< len
; i
++) {
2645 p
+= sprintf(p
, " 0x%02x", buf
[i
]);
2647 trace_scsi_disk_new_request(lun
, tag
, line_buffer
);
2649 g_free(line_buffer
);
2652 static SCSIRequest
*scsi_new_request(SCSIDevice
*d
, uint32_t tag
, uint32_t lun
,
2653 uint8_t *buf
, void *hba_private
)
2655 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
2657 const SCSIReqOps
*ops
;
2661 ops
= scsi_disk_reqops_dispatch
[command
];
2663 ops
= &scsi_disk_emulate_reqops
;
2665 req
= scsi_req_alloc(ops
, &s
->qdev
, tag
, lun
, hba_private
);
2667 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST
)) {
2668 scsi_disk_new_request_dump(lun
, tag
, buf
);
2675 static int get_device_type(SCSIDiskState
*s
)
2681 memset(cmd
, 0, sizeof(cmd
));
2682 memset(buf
, 0, sizeof(buf
));
2684 cmd
[4] = sizeof(buf
);
2686 ret
= scsi_SG_IO_FROM_DEV(s
->qdev
.conf
.blk
, cmd
, sizeof(cmd
),
2687 buf
, sizeof(buf
), s
->qdev
.io_timeout
);
2691 s
->qdev
.type
= buf
[0];
2692 if (buf
[1] & 0x80) {
2693 s
->features
|= 1 << SCSI_DISK_F_REMOVABLE
;
2698 static void scsi_block_realize(SCSIDevice
*dev
, Error
**errp
)
2700 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2705 if (!s
->qdev
.conf
.blk
) {
2706 error_setg(errp
, "drive property not set");
2710 if (s
->rotation_rate
) {
2711 error_report_once("rotation_rate is specified for scsi-block but is "
2712 "not implemented. This option is deprecated and will "
2713 "be removed in a future version");
2716 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2717 aio_context_acquire(ctx
);
2719 /* check we are using a driver managing SG_IO (version 3 and after) */
2720 rc
= blk_ioctl(s
->qdev
.conf
.blk
, SG_GET_VERSION_NUM
, &sg_version
);
2722 error_setg_errno(errp
, -rc
, "cannot get SG_IO version number");
2724 error_append_hint(errp
, "Is this a SCSI device?\n");
2728 if (sg_version
< 30000) {
2729 error_setg(errp
, "scsi generic interface too old");
2733 /* get device type from INQUIRY data */
2734 rc
= get_device_type(s
);
2736 error_setg(errp
, "INQUIRY failed");
2740 /* Make a guess for the block size, we'll fix it when the guest sends.
2741 * READ CAPACITY. If they don't, they likely would assume these sizes
2742 * anyway. (TODO: check in /sys).
2744 if (s
->qdev
.type
== TYPE_ROM
|| s
->qdev
.type
== TYPE_WORM
) {
2745 s
->qdev
.blocksize
= 2048;
2747 s
->qdev
.blocksize
= 512;
2750 /* Makes the scsi-block device not removable by using HMP and QMP eject
2753 s
->features
|= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS
);
2755 scsi_realize(&s
->qdev
, errp
);
2756 scsi_generic_read_device_inquiry(&s
->qdev
);
2759 aio_context_release(ctx
);
2762 typedef struct SCSIBlockReq
{
2764 sg_io_hdr_t io_header
;
2766 /* Selected bytes of the original CDB, copied into our own CDB. */
2767 uint8_t cmd
, cdb1
, group_number
;
2769 /* CDB passed to SG_IO. */
2771 BlockCompletionFunc
*cb
;
2775 static void scsi_block_sgio_complete(void *opaque
, int ret
)
2777 SCSIBlockReq
*req
= (SCSIBlockReq
*)opaque
;
2778 SCSIDiskReq
*r
= &req
->req
;
2779 SCSIDevice
*s
= r
->req
.dev
;
2780 sg_io_hdr_t
*io_hdr
= &req
->io_header
;
2783 if (io_hdr
->host_status
!= SCSI_HOST_OK
) {
2784 scsi_req_complete_failed(&r
->req
, io_hdr
->host_status
);
2785 scsi_req_unref(&r
->req
);
2789 if (io_hdr
->driver_status
& SG_ERR_DRIVER_TIMEOUT
) {
2792 ret
= io_hdr
->status
;
2796 aio_context_acquire(blk_get_aio_context(s
->conf
.blk
));
2797 if (scsi_handle_rw_error(r
, ret
, true)) {
2798 aio_context_release(blk_get_aio_context(s
->conf
.blk
));
2799 scsi_req_unref(&r
->req
);
2802 aio_context_release(blk_get_aio_context(s
->conf
.blk
));
2809 req
->cb(req
->cb_opaque
, ret
);
2812 static BlockAIOCB
*scsi_block_do_sgio(SCSIBlockReq
*req
,
2813 int64_t offset
, QEMUIOVector
*iov
,
2815 BlockCompletionFunc
*cb
, void *opaque
)
2817 sg_io_hdr_t
*io_header
= &req
->io_header
;
2818 SCSIDiskReq
*r
= &req
->req
;
2819 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
2820 int nb_logical_blocks
;
2824 /* This is not supported yet. It can only happen if the guest does
2825 * reads and writes that are not aligned to one logical sectors
2826 * _and_ cover multiple MemoryRegions.
2828 assert(offset
% s
->qdev
.blocksize
== 0);
2829 assert(iov
->size
% s
->qdev
.blocksize
== 0);
2831 io_header
->interface_id
= 'S';
2833 /* The data transfer comes from the QEMUIOVector. */
2834 io_header
->dxfer_direction
= direction
;
2835 io_header
->dxfer_len
= iov
->size
;
2836 io_header
->dxferp
= (void *)iov
->iov
;
2837 io_header
->iovec_count
= iov
->niov
;
2838 assert(io_header
->iovec_count
== iov
->niov
); /* no overflow! */
2840 /* Build a new CDB with the LBA and length patched in, in case
2841 * DMA helpers split the transfer in multiple segments. Do not
2842 * build a CDB smaller than what the guest wanted, and only build
2843 * a larger one if strictly necessary.
2845 io_header
->cmdp
= req
->cdb
;
2846 lba
= offset
/ s
->qdev
.blocksize
;
2847 nb_logical_blocks
= io_header
->dxfer_len
/ s
->qdev
.blocksize
;
2849 if ((req
->cmd
>> 5) == 0 && lba
<= 0x1ffff) {
2851 stl_be_p(&req
->cdb
[0], lba
| (req
->cmd
<< 24));
2852 req
->cdb
[4] = nb_logical_blocks
;
2854 io_header
->cmd_len
= 6;
2855 } else if ((req
->cmd
>> 5) <= 1 && lba
<= 0xffffffffULL
) {
2857 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0x20;
2858 req
->cdb
[1] = req
->cdb1
;
2859 stl_be_p(&req
->cdb
[2], lba
);
2860 req
->cdb
[6] = req
->group_number
;
2861 stw_be_p(&req
->cdb
[7], nb_logical_blocks
);
2863 io_header
->cmd_len
= 10;
2864 } else if ((req
->cmd
>> 5) != 4 && lba
<= 0xffffffffULL
) {
2866 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0xA0;
2867 req
->cdb
[1] = req
->cdb1
;
2868 stl_be_p(&req
->cdb
[2], lba
);
2869 stl_be_p(&req
->cdb
[6], nb_logical_blocks
);
2870 req
->cdb
[10] = req
->group_number
;
2872 io_header
->cmd_len
= 12;
2875 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0x80;
2876 req
->cdb
[1] = req
->cdb1
;
2877 stq_be_p(&req
->cdb
[2], lba
);
2878 stl_be_p(&req
->cdb
[10], nb_logical_blocks
);
2879 req
->cdb
[14] = req
->group_number
;
2881 io_header
->cmd_len
= 16;
2884 /* The rest is as in scsi-generic.c. */
2885 io_header
->mx_sb_len
= sizeof(r
->req
.sense
);
2886 io_header
->sbp
= r
->req
.sense
;
2887 io_header
->timeout
= s
->qdev
.io_timeout
* 1000;
2888 io_header
->usr_ptr
= r
;
2889 io_header
->flags
|= SG_FLAG_DIRECT_IO
;
2891 req
->cb_opaque
= opaque
;
2892 trace_scsi_disk_aio_sgio_command(r
->req
.tag
, req
->cdb
[0], lba
,
2893 nb_logical_blocks
, io_header
->timeout
);
2894 aiocb
= blk_aio_ioctl(s
->qdev
.conf
.blk
, SG_IO
, io_header
, scsi_block_sgio_complete
, req
);
2895 assert(aiocb
!= NULL
);
2899 static bool scsi_block_no_fua(SCSICommand
*cmd
)
2904 static BlockAIOCB
*scsi_block_dma_readv(int64_t offset
,
2906 BlockCompletionFunc
*cb
, void *cb_opaque
,
2909 SCSIBlockReq
*r
= opaque
;
2910 return scsi_block_do_sgio(r
, offset
, iov
,
2911 SG_DXFER_FROM_DEV
, cb
, cb_opaque
);
2914 static BlockAIOCB
*scsi_block_dma_writev(int64_t offset
,
2916 BlockCompletionFunc
*cb
, void *cb_opaque
,
2919 SCSIBlockReq
*r
= opaque
;
2920 return scsi_block_do_sgio(r
, offset
, iov
,
2921 SG_DXFER_TO_DEV
, cb
, cb_opaque
);
2924 static bool scsi_block_is_passthrough(SCSIDiskState
*s
, uint8_t *buf
)
2930 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2931 * for the number of logical blocks specified in the length
2932 * field). For other modes, do not use scatter/gather operation.
2934 if ((buf
[1] & 6) == 2) {
2947 case WRITE_VERIFY_10
:
2948 case WRITE_VERIFY_12
:
2949 case WRITE_VERIFY_16
:
2950 /* MMC writing cannot be done via DMA helpers, because it sometimes
2951 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2952 * We might use scsi_block_dma_reqops as long as no writing commands are
2953 * seen, but performance usually isn't paramount on optical media. So,
2954 * just make scsi-block operate the same as scsi-generic for them.
2956 if (s
->qdev
.type
!= TYPE_ROM
) {
2969 static int32_t scsi_block_dma_command(SCSIRequest
*req
, uint8_t *buf
)
2971 SCSIBlockReq
*r
= (SCSIBlockReq
*)req
;
2972 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
2974 r
->cmd
= req
->cmd
.buf
[0];
2975 switch (r
->cmd
>> 5) {
2978 r
->cdb1
= r
->group_number
= 0;
2982 r
->cdb1
= req
->cmd
.buf
[1];
2983 r
->group_number
= req
->cmd
.buf
[6];
2987 r
->cdb1
= req
->cmd
.buf
[1];
2988 r
->group_number
= req
->cmd
.buf
[10];
2992 r
->cdb1
= req
->cmd
.buf
[1];
2993 r
->group_number
= req
->cmd
.buf
[14];
2999 /* Protection information is not supported. For SCSI versions 2 and
3000 * older (as determined by snooping the guest's INQUIRY commands),
3001 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
3003 if (s
->qdev
.scsi_version
> 2 && (req
->cmd
.buf
[1] & 0xe0)) {
3004 scsi_check_condition(&r
->req
, SENSE_CODE(INVALID_FIELD
));
3008 return scsi_disk_dma_command(req
, buf
);
3011 static const SCSIReqOps scsi_block_dma_reqops
= {
3012 .size
= sizeof(SCSIBlockReq
),
3013 .free_req
= scsi_free_request
,
3014 .send_command
= scsi_block_dma_command
,
3015 .read_data
= scsi_read_data
,
3016 .write_data
= scsi_write_data
,
3017 .get_buf
= scsi_get_buf
,
3018 .load_request
= scsi_disk_load_request
,
3019 .save_request
= scsi_disk_save_request
,
3022 static SCSIRequest
*scsi_block_new_request(SCSIDevice
*d
, uint32_t tag
,
3023 uint32_t lun
, uint8_t *buf
,
3026 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
3028 if (scsi_block_is_passthrough(s
, buf
)) {
3029 return scsi_req_alloc(&scsi_generic_req_ops
, &s
->qdev
, tag
, lun
,
3032 return scsi_req_alloc(&scsi_block_dma_reqops
, &s
->qdev
, tag
, lun
,
3037 static int scsi_block_parse_cdb(SCSIDevice
*d
, SCSICommand
*cmd
,
3038 uint8_t *buf
, size_t buf_len
,
3041 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
3043 if (scsi_block_is_passthrough(s
, buf
)) {
3044 return scsi_bus_parse_cdb(&s
->qdev
, cmd
, buf
, buf_len
, hba_private
);
3046 return scsi_req_parse_cdb(&s
->qdev
, cmd
, buf
, buf_len
);
3050 static void scsi_block_update_sense(SCSIRequest
*req
)
3052 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
3053 SCSIBlockReq
*br
= DO_UPCAST(SCSIBlockReq
, req
, r
);
3054 r
->req
.sense_len
= MIN(br
->io_header
.sb_len_wr
, sizeof(r
->req
.sense
));
3059 BlockAIOCB
*scsi_dma_readv(int64_t offset
, QEMUIOVector
*iov
,
3060 BlockCompletionFunc
*cb
, void *cb_opaque
,
3063 SCSIDiskReq
*r
= opaque
;
3064 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
3065 return blk_aio_preadv(s
->qdev
.conf
.blk
, offset
, iov
, 0, cb
, cb_opaque
);
3069 BlockAIOCB
*scsi_dma_writev(int64_t offset
, QEMUIOVector
*iov
,
3070 BlockCompletionFunc
*cb
, void *cb_opaque
,
3073 SCSIDiskReq
*r
= opaque
;
3074 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
3075 return blk_aio_pwritev(s
->qdev
.conf
.blk
, offset
, iov
, 0, cb
, cb_opaque
);
3078 static void scsi_disk_base_class_initfn(ObjectClass
*klass
, void *data
)
3080 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3081 SCSIDiskClass
*sdc
= SCSI_DISK_BASE_CLASS(klass
);
3083 dc
->fw_name
= "disk";
3084 dc
->reset
= scsi_disk_reset
;
3085 sdc
->dma_readv
= scsi_dma_readv
;
3086 sdc
->dma_writev
= scsi_dma_writev
;
3087 sdc
->need_fua_emulation
= scsi_is_cmd_fua
;
3090 static const TypeInfo scsi_disk_base_info
= {
3091 .name
= TYPE_SCSI_DISK_BASE
,
3092 .parent
= TYPE_SCSI_DEVICE
,
3093 .class_init
= scsi_disk_base_class_initfn
,
3094 .instance_size
= sizeof(SCSIDiskState
),
3095 .class_size
= sizeof(SCSIDiskClass
),
3099 #define DEFINE_SCSI_DISK_PROPERTIES() \
3100 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3101 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3102 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3103 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3104 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3105 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3106 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3107 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3110 static Property scsi_hd_properties
[] = {
3111 DEFINE_SCSI_DISK_PROPERTIES(),
3112 DEFINE_PROP_BIT("removable", SCSIDiskState
, features
,
3113 SCSI_DISK_F_REMOVABLE
, false),
3114 DEFINE_PROP_BIT("dpofua", SCSIDiskState
, features
,
3115 SCSI_DISK_F_DPOFUA
, false),
3116 DEFINE_PROP_UINT64("wwn", SCSIDiskState
, qdev
.wwn
, 0),
3117 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState
, qdev
.port_wwn
, 0),
3118 DEFINE_PROP_UINT16("port_index", SCSIDiskState
, port_index
, 0),
3119 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState
, max_unmap_size
,
3120 DEFAULT_MAX_UNMAP_SIZE
),
3121 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3122 DEFAULT_MAX_IO_SIZE
),
3123 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState
, rotation_rate
, 0),
3124 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3126 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState
,
3127 quirks
, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
,
3129 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState
, qdev
.conf
),
3130 DEFINE_PROP_END_OF_LIST(),
3133 static const VMStateDescription vmstate_scsi_disk_state
= {
3134 .name
= "scsi-disk",
3136 .minimum_version_id
= 1,
3137 .fields
= (VMStateField
[]) {
3138 VMSTATE_SCSI_DEVICE(qdev
, SCSIDiskState
),
3139 VMSTATE_BOOL(media_changed
, SCSIDiskState
),
3140 VMSTATE_BOOL(media_event
, SCSIDiskState
),
3141 VMSTATE_BOOL(eject_request
, SCSIDiskState
),
3142 VMSTATE_BOOL(tray_open
, SCSIDiskState
),
3143 VMSTATE_BOOL(tray_locked
, SCSIDiskState
),
3144 VMSTATE_END_OF_LIST()
3148 static void scsi_hd_class_initfn(ObjectClass
*klass
, void *data
)
3150 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3151 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3153 sc
->realize
= scsi_hd_realize
;
3154 sc
->unrealize
= scsi_unrealize
;
3155 sc
->alloc_req
= scsi_new_request
;
3156 sc
->unit_attention_reported
= scsi_disk_unit_attention_reported
;
3157 dc
->desc
= "virtual SCSI disk";
3158 device_class_set_props(dc
, scsi_hd_properties
);
3159 dc
->vmsd
= &vmstate_scsi_disk_state
;
3162 static const TypeInfo scsi_hd_info
= {
3164 .parent
= TYPE_SCSI_DISK_BASE
,
3165 .class_init
= scsi_hd_class_initfn
,
3168 static Property scsi_cd_properties
[] = {
3169 DEFINE_SCSI_DISK_PROPERTIES(),
3170 DEFINE_PROP_UINT64("wwn", SCSIDiskState
, qdev
.wwn
, 0),
3171 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState
, qdev
.port_wwn
, 0),
3172 DEFINE_PROP_UINT16("port_index", SCSIDiskState
, port_index
, 0),
3173 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3174 DEFAULT_MAX_IO_SIZE
),
3175 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3177 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState
, quirks
,
3178 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
, 0),
3179 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState
, quirks
,
3180 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD
, 0),
3181 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState
,
3182 quirks
, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
,
3184 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState
, quirks
,
3185 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED
, 0),
3186 DEFINE_PROP_END_OF_LIST(),
3189 static void scsi_cd_class_initfn(ObjectClass
*klass
, void *data
)
3191 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3192 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3194 sc
->realize
= scsi_cd_realize
;
3195 sc
->alloc_req
= scsi_new_request
;
3196 sc
->unit_attention_reported
= scsi_disk_unit_attention_reported
;
3197 dc
->desc
= "virtual SCSI CD-ROM";
3198 device_class_set_props(dc
, scsi_cd_properties
);
3199 dc
->vmsd
= &vmstate_scsi_disk_state
;
3202 static const TypeInfo scsi_cd_info
= {
3204 .parent
= TYPE_SCSI_DISK_BASE
,
3205 .class_init
= scsi_cd_class_initfn
,
3209 static Property scsi_block_properties
[] = {
3210 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState
, qdev
.conf
),
3211 DEFINE_PROP_DRIVE("drive", SCSIDiskState
, qdev
.conf
.blk
),
3212 DEFINE_PROP_BOOL("share-rw", SCSIDiskState
, qdev
.conf
.share_rw
, false),
3213 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState
, rotation_rate
, 0),
3214 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState
, max_unmap_size
,
3215 DEFAULT_MAX_UNMAP_SIZE
),
3216 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3217 DEFAULT_MAX_IO_SIZE
),
3218 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3220 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState
, qdev
.io_timeout
,
3221 DEFAULT_IO_TIMEOUT
),
3222 DEFINE_PROP_END_OF_LIST(),
3225 static void scsi_block_class_initfn(ObjectClass
*klass
, void *data
)
3227 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3228 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3229 SCSIDiskClass
*sdc
= SCSI_DISK_BASE_CLASS(klass
);
3231 sc
->realize
= scsi_block_realize
;
3232 sc
->alloc_req
= scsi_block_new_request
;
3233 sc
->parse_cdb
= scsi_block_parse_cdb
;
3234 sdc
->dma_readv
= scsi_block_dma_readv
;
3235 sdc
->dma_writev
= scsi_block_dma_writev
;
3236 sdc
->update_sense
= scsi_block_update_sense
;
3237 sdc
->need_fua_emulation
= scsi_block_no_fua
;
3238 dc
->desc
= "SCSI block device passthrough";
3239 device_class_set_props(dc
, scsi_block_properties
);
3240 dc
->vmsd
= &vmstate_scsi_disk_state
;
3243 static const TypeInfo scsi_block_info
= {
3244 .name
= "scsi-block",
3245 .parent
= TYPE_SCSI_DISK_BASE
,
3246 .class_init
= scsi_block_class_initfn
,
3250 static void scsi_disk_register_types(void)
3252 type_register_static(&scsi_disk_base_info
);
3253 type_register_static(&scsi_hd_info
);
3254 type_register_static(&scsi_cd_info
);
3256 type_register_static(&scsi_block_info
);
3260 type_init(scsi_disk_register_types
)