hw/arm/allwinner-r40: add Clock Control Unit
[qemu/ar7.git] / hw / scsi / scsi-disk.c
blobe0d79c7966cfa460a5b7cc901a494f708ba37514
1 /*
2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
43 #include "trace.h"
44 #include "qom/object.h"
46 #ifdef __linux
47 #include <scsi/sg.h>
48 #endif
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
61 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
63 struct SCSIDiskClass {
64 SCSIDeviceClass parent_class;
65 DMAIOFunc *dma_readv;
66 DMAIOFunc *dma_writev;
67 bool (*need_fua_emulation)(SCSICommand *cmd);
68 void (*update_sense)(SCSIRequest *r);
71 typedef struct SCSIDiskReq {
72 SCSIRequest req;
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
74 uint64_t sector;
75 uint32_t sector_count;
76 uint32_t buflen;
77 bool started;
78 bool need_fua_emulation;
79 struct iovec iov;
80 QEMUIOVector qiov;
81 BlockAcctCookie acct;
82 } SCSIDiskReq;
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
88 struct SCSIDiskState {
89 SCSIDevice qdev;
90 uint32_t features;
91 bool media_changed;
92 bool media_event;
93 bool eject_request;
94 uint16_t port_index;
95 uint64_t max_unmap_size;
96 uint64_t max_io_size;
97 uint32_t quirks;
98 QEMUBH *bh;
99 char *version;
100 char *serial;
101 char *vendor;
102 char *product;
103 char *device_id;
104 bool tray_open;
105 bool tray_locked;
107 * 0x0000 - rotation rate not reported
108 * 0x0001 - non-rotating medium (SSD)
109 * 0x0002-0x0400 - reserved
110 * 0x0401-0xffe - rotations per minute
111 * 0xffff - reserved
113 uint16_t rotation_rate;
116 static void scsi_free_request(SCSIRequest *req)
118 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
120 qemu_vfree(r->iov.iov_base);
123 /* Helper function for command completion with sense. */
124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
126 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
127 sense.ascq);
128 scsi_req_build_sense(&r->req, sense);
129 scsi_req_complete(&r->req, CHECK_CONDITION);
132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
134 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
136 if (!r->iov.iov_base) {
137 r->buflen = size;
138 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
140 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
141 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
146 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
148 qemu_put_be64s(f, &r->sector);
149 qemu_put_be32s(f, &r->sector_count);
150 qemu_put_be32s(f, &r->buflen);
151 if (r->buflen) {
152 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
153 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
154 } else if (!req->retry) {
155 uint32_t len = r->iov.iov_len;
156 qemu_put_be32s(f, &len);
157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
166 qemu_get_be64s(f, &r->sector);
167 qemu_get_be32s(f, &r->sector_count);
168 qemu_get_be32s(f, &r->buflen);
169 if (r->buflen) {
170 scsi_init_iovec(r, r->buflen);
171 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
172 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
173 } else if (!r->req.retry) {
174 uint32_t len;
175 qemu_get_be32s(f, &len);
176 r->iov.iov_len = len;
177 assert(r->iov.iov_len <= r->buflen);
178 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
182 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
186 * scsi_handle_rw_error has two return values. False means that the error
187 * must be ignored, true means that the error has been processed and the
188 * caller should not do anything else for this request. Note that
189 * scsi_handle_rw_error always manages its reference counts, independent
190 * of the return value.
192 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
194 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
195 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
196 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
197 SCSISense sense = SENSE_CODE(NO_SENSE);
198 int error = 0;
199 bool req_has_sense = false;
200 BlockErrorAction action;
201 int status;
203 if (ret < 0) {
204 status = scsi_sense_from_errno(-ret, &sense);
205 error = -ret;
206 } else {
207 /* A passthrough command has completed with nonzero status. */
208 status = ret;
209 if (status == CHECK_CONDITION) {
210 req_has_sense = true;
211 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
212 } else {
213 error = EINVAL;
218 * Check whether the error has to be handled by the guest or should
219 * rather follow the rerror=/werror= settings. Guest-handled errors
220 * are usually retried immediately, so do not post them to QMP and
221 * do not account them as failed I/O.
223 if (req_has_sense &&
224 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
225 action = BLOCK_ERROR_ACTION_REPORT;
226 acct_failed = false;
227 } else {
228 action = blk_get_error_action(s->qdev.conf.blk, is_read, error);
229 blk_error_action(s->qdev.conf.blk, action, is_read, error);
232 switch (action) {
233 case BLOCK_ERROR_ACTION_REPORT:
234 if (acct_failed) {
235 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
237 if (req_has_sense) {
238 sdc->update_sense(&r->req);
239 } else if (status == CHECK_CONDITION) {
240 scsi_req_build_sense(&r->req, sense);
242 scsi_req_complete(&r->req, status);
243 return true;
245 case BLOCK_ERROR_ACTION_IGNORE:
246 return false;
248 case BLOCK_ERROR_ACTION_STOP:
249 scsi_req_retry(&r->req);
250 return true;
252 default:
253 g_assert_not_reached();
257 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
259 if (r->req.io_canceled) {
260 scsi_req_cancel_complete(&r->req);
261 return true;
264 if (ret < 0) {
265 return scsi_handle_rw_error(r, ret, acct_failed);
268 return false;
271 static void scsi_aio_complete(void *opaque, int ret)
273 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
276 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
278 assert(r->req.aiocb != NULL);
279 r->req.aiocb = NULL;
281 if (scsi_disk_req_check_error(r, ret, true)) {
282 goto done;
285 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
286 scsi_req_complete(&r->req, GOOD);
288 done:
289 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
290 scsi_req_unref(&r->req);
293 static bool scsi_is_cmd_fua(SCSICommand *cmd)
295 switch (cmd->buf[0]) {
296 case READ_10:
297 case READ_12:
298 case READ_16:
299 case WRITE_10:
300 case WRITE_12:
301 case WRITE_16:
302 return (cmd->buf[1] & 8) != 0;
304 case VERIFY_10:
305 case VERIFY_12:
306 case VERIFY_16:
307 case WRITE_VERIFY_10:
308 case WRITE_VERIFY_12:
309 case WRITE_VERIFY_16:
310 return true;
312 case READ_6:
313 case WRITE_6:
314 default:
315 return false;
319 static void scsi_write_do_fua(SCSIDiskReq *r)
321 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
323 assert(r->req.aiocb == NULL);
324 assert(!r->req.io_canceled);
326 if (r->need_fua_emulation) {
327 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
328 BLOCK_ACCT_FLUSH);
329 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
330 return;
333 scsi_req_complete(&r->req, GOOD);
334 scsi_req_unref(&r->req);
337 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
339 assert(r->req.aiocb == NULL);
340 if (scsi_disk_req_check_error(r, ret, false)) {
341 goto done;
344 r->sector += r->sector_count;
345 r->sector_count = 0;
346 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
347 scsi_write_do_fua(r);
348 return;
349 } else {
350 scsi_req_complete(&r->req, GOOD);
353 done:
354 scsi_req_unref(&r->req);
357 /* Called with AioContext lock held */
358 static void scsi_dma_complete(void *opaque, int ret)
360 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
363 assert(r->req.aiocb != NULL);
364 r->req.aiocb = NULL;
366 if (ret < 0) {
367 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
368 } else {
369 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
371 scsi_dma_complete_noio(r, ret);
374 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
376 uint32_t n;
378 assert(r->req.aiocb == NULL);
379 if (scsi_disk_req_check_error(r, ret, false)) {
380 goto done;
383 n = r->qiov.size / BDRV_SECTOR_SIZE;
384 r->sector += n;
385 r->sector_count -= n;
386 scsi_req_data(&r->req, r->qiov.size);
388 done:
389 scsi_req_unref(&r->req);
392 static void scsi_read_complete(void *opaque, int ret)
394 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
395 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
397 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
399 assert(r->req.aiocb != NULL);
400 r->req.aiocb = NULL;
402 if (ret < 0) {
403 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
404 } else {
405 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
406 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
408 scsi_read_complete_noio(r, ret);
409 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
412 /* Actually issue a read to the block device. */
413 static void scsi_do_read(SCSIDiskReq *r, int ret)
415 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
416 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
418 assert (r->req.aiocb == NULL);
419 if (scsi_disk_req_check_error(r, ret, false)) {
420 goto done;
423 /* The request is used as the AIO opaque value, so add a ref. */
424 scsi_req_ref(&r->req);
426 if (r->req.sg) {
427 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
428 r->req.residual -= r->req.sg->size;
429 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
430 r->req.sg, r->sector << BDRV_SECTOR_BITS,
431 BDRV_SECTOR_SIZE,
432 sdc->dma_readv, r, scsi_dma_complete, r,
433 DMA_DIRECTION_FROM_DEVICE);
434 } else {
435 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
436 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
437 r->qiov.size, BLOCK_ACCT_READ);
438 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
439 scsi_read_complete, r, r);
442 done:
443 scsi_req_unref(&r->req);
446 static void scsi_do_read_cb(void *opaque, int ret)
448 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
449 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
451 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
453 assert (r->req.aiocb != NULL);
454 r->req.aiocb = NULL;
456 if (ret < 0) {
457 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
458 } else {
459 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
461 scsi_do_read(opaque, ret);
462 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
465 /* Read more data from scsi device into buffer. */
466 static void scsi_read_data(SCSIRequest *req)
468 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
469 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
470 bool first;
472 trace_scsi_disk_read_data_count(r->sector_count);
473 if (r->sector_count == 0) {
474 /* This also clears the sense buffer for REQUEST SENSE. */
475 scsi_req_complete(&r->req, GOOD);
476 return;
479 /* No data transfer may already be in progress */
480 assert(r->req.aiocb == NULL);
482 /* The request is used as the AIO opaque value, so add a ref. */
483 scsi_req_ref(&r->req);
484 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
485 trace_scsi_disk_read_data_invalid();
486 scsi_read_complete_noio(r, -EINVAL);
487 return;
490 if (!blk_is_available(req->dev->conf.blk)) {
491 scsi_read_complete_noio(r, -ENOMEDIUM);
492 return;
495 first = !r->started;
496 r->started = true;
497 if (first && r->need_fua_emulation) {
498 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
499 BLOCK_ACCT_FLUSH);
500 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
501 } else {
502 scsi_do_read(r, 0);
506 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
508 uint32_t n;
510 assert (r->req.aiocb == NULL);
511 if (scsi_disk_req_check_error(r, ret, false)) {
512 goto done;
515 n = r->qiov.size / BDRV_SECTOR_SIZE;
516 r->sector += n;
517 r->sector_count -= n;
518 if (r->sector_count == 0) {
519 scsi_write_do_fua(r);
520 return;
521 } else {
522 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
523 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
524 scsi_req_data(&r->req, r->qiov.size);
527 done:
528 scsi_req_unref(&r->req);
531 static void scsi_write_complete(void * opaque, int ret)
533 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
534 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
536 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
538 assert (r->req.aiocb != NULL);
539 r->req.aiocb = NULL;
541 if (ret < 0) {
542 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
543 } else {
544 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
546 scsi_write_complete_noio(r, ret);
547 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
550 static void scsi_write_data(SCSIRequest *req)
552 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
553 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
554 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
556 /* No data transfer may already be in progress */
557 assert(r->req.aiocb == NULL);
559 /* The request is used as the AIO opaque value, so add a ref. */
560 scsi_req_ref(&r->req);
561 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
562 trace_scsi_disk_write_data_invalid();
563 scsi_write_complete_noio(r, -EINVAL);
564 return;
567 if (!r->req.sg && !r->qiov.size) {
568 /* Called for the first time. Ask the driver to send us more data. */
569 r->started = true;
570 scsi_write_complete_noio(r, 0);
571 return;
573 if (!blk_is_available(req->dev->conf.blk)) {
574 scsi_write_complete_noio(r, -ENOMEDIUM);
575 return;
578 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
579 r->req.cmd.buf[0] == VERIFY_16) {
580 if (r->req.sg) {
581 scsi_dma_complete_noio(r, 0);
582 } else {
583 scsi_write_complete_noio(r, 0);
585 return;
588 if (r->req.sg) {
589 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
590 r->req.residual -= r->req.sg->size;
591 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
592 r->req.sg, r->sector << BDRV_SECTOR_BITS,
593 BDRV_SECTOR_SIZE,
594 sdc->dma_writev, r, scsi_dma_complete, r,
595 DMA_DIRECTION_TO_DEVICE);
596 } else {
597 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
598 r->qiov.size, BLOCK_ACCT_WRITE);
599 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
600 scsi_write_complete, r, r);
604 /* Return a pointer to the data buffer. */
605 static uint8_t *scsi_get_buf(SCSIRequest *req)
607 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
609 return (uint8_t *)r->iov.iov_base;
612 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
614 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
615 uint8_t page_code = req->cmd.buf[2];
616 int start, buflen = 0;
618 outbuf[buflen++] = s->qdev.type & 0x1f;
619 outbuf[buflen++] = page_code;
620 outbuf[buflen++] = 0x00;
621 outbuf[buflen++] = 0x00;
622 start = buflen;
624 switch (page_code) {
625 case 0x00: /* Supported page codes, mandatory */
627 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
628 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
629 if (s->serial) {
630 outbuf[buflen++] = 0x80; /* unit serial number */
632 outbuf[buflen++] = 0x83; /* device identification */
633 if (s->qdev.type == TYPE_DISK) {
634 outbuf[buflen++] = 0xb0; /* block limits */
635 outbuf[buflen++] = 0xb1; /* block device characteristics */
636 outbuf[buflen++] = 0xb2; /* thin provisioning */
638 break;
640 case 0x80: /* Device serial number, optional */
642 int l;
644 if (!s->serial) {
645 trace_scsi_disk_emulate_vpd_page_80_not_supported();
646 return -1;
649 l = strlen(s->serial);
650 if (l > 36) {
651 l = 36;
654 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
655 memcpy(outbuf + buflen, s->serial, l);
656 buflen += l;
657 break;
660 case 0x83: /* Device identification page, mandatory */
662 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
664 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
666 if (id_len) {
667 outbuf[buflen++] = 0x2; /* ASCII */
668 outbuf[buflen++] = 0; /* not officially assigned */
669 outbuf[buflen++] = 0; /* reserved */
670 outbuf[buflen++] = id_len; /* length of data following */
671 memcpy(outbuf + buflen, s->device_id, id_len);
672 buflen += id_len;
675 if (s->qdev.wwn) {
676 outbuf[buflen++] = 0x1; /* Binary */
677 outbuf[buflen++] = 0x3; /* NAA */
678 outbuf[buflen++] = 0; /* reserved */
679 outbuf[buflen++] = 8;
680 stq_be_p(&outbuf[buflen], s->qdev.wwn);
681 buflen += 8;
684 if (s->qdev.port_wwn) {
685 outbuf[buflen++] = 0x61; /* SAS / Binary */
686 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
687 outbuf[buflen++] = 0; /* reserved */
688 outbuf[buflen++] = 8;
689 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
690 buflen += 8;
693 if (s->port_index) {
694 outbuf[buflen++] = 0x61; /* SAS / Binary */
696 /* PIV/Target port/relative target port */
697 outbuf[buflen++] = 0x94;
699 outbuf[buflen++] = 0; /* reserved */
700 outbuf[buflen++] = 4;
701 stw_be_p(&outbuf[buflen + 2], s->port_index);
702 buflen += 4;
704 break;
706 case 0xb0: /* block limits */
708 SCSIBlockLimits bl = {};
710 if (s->qdev.type == TYPE_ROM) {
711 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
712 return -1;
714 bl.wsnz = 1;
715 bl.unmap_sectors =
716 s->qdev.conf.discard_granularity / s->qdev.blocksize;
717 bl.min_io_size =
718 s->qdev.conf.min_io_size / s->qdev.blocksize;
719 bl.opt_io_size =
720 s->qdev.conf.opt_io_size / s->qdev.blocksize;
721 bl.max_unmap_sectors =
722 s->max_unmap_size / s->qdev.blocksize;
723 bl.max_io_sectors =
724 s->max_io_size / s->qdev.blocksize;
725 /* 255 descriptors fit in 4 KiB with an 8-byte header */
726 bl.max_unmap_descr = 255;
728 if (s->qdev.type == TYPE_DISK) {
729 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
730 int max_io_sectors_blk =
731 max_transfer_blk / s->qdev.blocksize;
733 bl.max_io_sectors =
734 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
736 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
737 break;
739 case 0xb1: /* block device characteristics */
741 buflen = 0x40;
742 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
743 outbuf[5] = s->rotation_rate & 0xff;
744 outbuf[6] = 0; /* PRODUCT TYPE */
745 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
746 outbuf[8] = 0; /* VBULS */
747 break;
749 case 0xb2: /* thin provisioning */
751 buflen = 8;
752 outbuf[4] = 0;
753 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
754 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
755 outbuf[7] = 0;
756 break;
758 default:
759 return -1;
761 /* done with EVPD */
762 assert(buflen - start <= 255);
763 outbuf[start - 1] = buflen - start;
764 return buflen;
767 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
769 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
770 int buflen = 0;
772 if (req->cmd.buf[1] & 0x1) {
773 /* Vital product data */
774 return scsi_disk_emulate_vpd_page(req, outbuf);
777 /* Standard INQUIRY data */
778 if (req->cmd.buf[2] != 0) {
779 return -1;
782 /* PAGE CODE == 0 */
783 buflen = req->cmd.xfer;
784 if (buflen > SCSI_MAX_INQUIRY_LEN) {
785 buflen = SCSI_MAX_INQUIRY_LEN;
788 outbuf[0] = s->qdev.type & 0x1f;
789 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
791 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
792 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
794 memset(&outbuf[32], 0, 4);
795 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
797 * We claim conformance to SPC-3, which is required for guests
798 * to ask for modern features like READ CAPACITY(16) or the
799 * block characteristics VPD page by default. Not all of SPC-3
800 * is actually implemented, but we're good enough.
802 outbuf[2] = s->qdev.default_scsi_version;
803 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
805 if (buflen > 36) {
806 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
807 } else {
808 /* If the allocation length of CDB is too small,
809 the additional length is not adjusted */
810 outbuf[4] = 36 - 5;
813 /* Sync data transfer and TCQ. */
814 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
815 return buflen;
818 static inline bool media_is_dvd(SCSIDiskState *s)
820 uint64_t nb_sectors;
821 if (s->qdev.type != TYPE_ROM) {
822 return false;
824 if (!blk_is_available(s->qdev.conf.blk)) {
825 return false;
827 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
828 return nb_sectors > CD_MAX_SECTORS;
831 static inline bool media_is_cd(SCSIDiskState *s)
833 uint64_t nb_sectors;
834 if (s->qdev.type != TYPE_ROM) {
835 return false;
837 if (!blk_is_available(s->qdev.conf.blk)) {
838 return false;
840 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
841 return nb_sectors <= CD_MAX_SECTORS;
844 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
845 uint8_t *outbuf)
847 uint8_t type = r->req.cmd.buf[1] & 7;
849 if (s->qdev.type != TYPE_ROM) {
850 return -1;
853 /* Types 1/2 are only defined for Blu-Ray. */
854 if (type != 0) {
855 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
856 return -1;
859 memset(outbuf, 0, 34);
860 outbuf[1] = 32;
861 outbuf[2] = 0xe; /* last session complete, disc finalized */
862 outbuf[3] = 1; /* first track on disc */
863 outbuf[4] = 1; /* # of sessions */
864 outbuf[5] = 1; /* first track of last session */
865 outbuf[6] = 1; /* last track of last session */
866 outbuf[7] = 0x20; /* unrestricted use */
867 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
868 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
869 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
870 /* 24-31: disc bar code */
871 /* 32: disc application code */
872 /* 33: number of OPC tables */
874 return 34;
877 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
878 uint8_t *outbuf)
880 static const int rds_caps_size[5] = {
881 [0] = 2048 + 4,
882 [1] = 4 + 4,
883 [3] = 188 + 4,
884 [4] = 2048 + 4,
887 uint8_t media = r->req.cmd.buf[1];
888 uint8_t layer = r->req.cmd.buf[6];
889 uint8_t format = r->req.cmd.buf[7];
890 int size = -1;
892 if (s->qdev.type != TYPE_ROM) {
893 return -1;
895 if (media != 0) {
896 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
897 return -1;
900 if (format != 0xff) {
901 if (!blk_is_available(s->qdev.conf.blk)) {
902 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
903 return -1;
905 if (media_is_cd(s)) {
906 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
907 return -1;
909 if (format >= ARRAY_SIZE(rds_caps_size)) {
910 return -1;
912 size = rds_caps_size[format];
913 memset(outbuf, 0, size);
916 switch (format) {
917 case 0x00: {
918 /* Physical format information */
919 uint64_t nb_sectors;
920 if (layer != 0) {
921 goto fail;
923 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
925 outbuf[4] = 1; /* DVD-ROM, part version 1 */
926 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
927 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
928 outbuf[7] = 0; /* default densities */
930 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
931 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
932 break;
935 case 0x01: /* DVD copyright information, all zeros */
936 break;
938 case 0x03: /* BCA information - invalid field for no BCA info */
939 return -1;
941 case 0x04: /* DVD disc manufacturing information, all zeros */
942 break;
944 case 0xff: { /* List capabilities */
945 int i;
946 size = 4;
947 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
948 if (!rds_caps_size[i]) {
949 continue;
951 outbuf[size] = i;
952 outbuf[size + 1] = 0x40; /* Not writable, readable */
953 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
954 size += 4;
956 break;
959 default:
960 return -1;
963 /* Size of buffer, not including 2 byte size field */
964 stw_be_p(outbuf, size - 2);
965 return size;
967 fail:
968 return -1;
971 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
973 uint8_t event_code, media_status;
975 media_status = 0;
976 if (s->tray_open) {
977 media_status = MS_TRAY_OPEN;
978 } else if (blk_is_inserted(s->qdev.conf.blk)) {
979 media_status = MS_MEDIA_PRESENT;
982 /* Event notification descriptor */
983 event_code = MEC_NO_CHANGE;
984 if (media_status != MS_TRAY_OPEN) {
985 if (s->media_event) {
986 event_code = MEC_NEW_MEDIA;
987 s->media_event = false;
988 } else if (s->eject_request) {
989 event_code = MEC_EJECT_REQUESTED;
990 s->eject_request = false;
994 outbuf[0] = event_code;
995 outbuf[1] = media_status;
997 /* These fields are reserved, just clear them. */
998 outbuf[2] = 0;
999 outbuf[3] = 0;
1000 return 4;
1003 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
1004 uint8_t *outbuf)
1006 int size;
1007 uint8_t *buf = r->req.cmd.buf;
1008 uint8_t notification_class_request = buf[4];
1009 if (s->qdev.type != TYPE_ROM) {
1010 return -1;
1012 if ((buf[1] & 1) == 0) {
1013 /* asynchronous */
1014 return -1;
1017 size = 4;
1018 outbuf[0] = outbuf[1] = 0;
1019 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1020 if (notification_class_request & (1 << GESN_MEDIA)) {
1021 outbuf[2] = GESN_MEDIA;
1022 size += scsi_event_status_media(s, &outbuf[size]);
1023 } else {
1024 outbuf[2] = 0x80;
1026 stw_be_p(outbuf, size - 4);
1027 return size;
1030 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1032 int current;
1034 if (s->qdev.type != TYPE_ROM) {
1035 return -1;
1038 if (media_is_dvd(s)) {
1039 current = MMC_PROFILE_DVD_ROM;
1040 } else if (media_is_cd(s)) {
1041 current = MMC_PROFILE_CD_ROM;
1042 } else {
1043 current = MMC_PROFILE_NONE;
1046 memset(outbuf, 0, 40);
1047 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1048 stw_be_p(&outbuf[6], current);
1049 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1050 outbuf[10] = 0x03; /* persistent, current */
1051 outbuf[11] = 8; /* two profiles */
1052 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1053 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1054 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1055 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1056 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1057 stw_be_p(&outbuf[20], 1);
1058 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1059 outbuf[23] = 8;
1060 stl_be_p(&outbuf[24], 1); /* SCSI */
1061 outbuf[28] = 1; /* DBE = 1, mandatory */
1062 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1063 stw_be_p(&outbuf[32], 3);
1064 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1065 outbuf[35] = 4;
1066 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1067 /* TODO: Random readable, CD read, DVD read, drive serial number,
1068 power management */
1069 return 40;
1072 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1074 if (s->qdev.type != TYPE_ROM) {
1075 return -1;
1077 memset(outbuf, 0, 8);
1078 outbuf[5] = 1; /* CD-ROM */
1079 return 8;
1082 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1083 int page_control)
1085 static const int mode_sense_valid[0x3f] = {
1086 [MODE_PAGE_VENDOR_SPECIFIC] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1087 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1088 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1089 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1090 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1091 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1092 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1093 [MODE_PAGE_APPLE_VENDOR] = (1 << TYPE_ROM),
1096 uint8_t *p = *p_outbuf + 2;
1097 int length;
1099 assert(page < ARRAY_SIZE(mode_sense_valid));
1100 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1101 return -1;
1105 * If Changeable Values are requested, a mask denoting those mode parameters
1106 * that are changeable shall be returned. As we currently don't support
1107 * parameter changes via MODE_SELECT all bits are returned set to zero.
1108 * The buffer was already menset to zero by the caller of this function.
1110 * The offsets here are off by two compared to the descriptions in the
1111 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1112 * but it is done so that offsets are consistent within our implementation
1113 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1114 * 2-byte and 4-byte headers.
1116 switch (page) {
1117 case MODE_PAGE_HD_GEOMETRY:
1118 length = 0x16;
1119 if (page_control == 1) { /* Changeable Values */
1120 break;
1122 /* if a geometry hint is available, use it */
1123 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1124 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1125 p[2] = s->qdev.conf.cyls & 0xff;
1126 p[3] = s->qdev.conf.heads & 0xff;
1127 /* Write precomp start cylinder, disabled */
1128 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1129 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1130 p[6] = s->qdev.conf.cyls & 0xff;
1131 /* Reduced current start cylinder, disabled */
1132 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1133 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1134 p[9] = s->qdev.conf.cyls & 0xff;
1135 /* Device step rate [ns], 200ns */
1136 p[10] = 0;
1137 p[11] = 200;
1138 /* Landing zone cylinder */
1139 p[12] = 0xff;
1140 p[13] = 0xff;
1141 p[14] = 0xff;
1142 /* Medium rotation rate [rpm], 5400 rpm */
1143 p[18] = (5400 >> 8) & 0xff;
1144 p[19] = 5400 & 0xff;
1145 break;
1147 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1148 length = 0x1e;
1149 if (page_control == 1) { /* Changeable Values */
1150 break;
1152 /* Transfer rate [kbit/s], 5Mbit/s */
1153 p[0] = 5000 >> 8;
1154 p[1] = 5000 & 0xff;
1155 /* if a geometry hint is available, use it */
1156 p[2] = s->qdev.conf.heads & 0xff;
1157 p[3] = s->qdev.conf.secs & 0xff;
1158 p[4] = s->qdev.blocksize >> 8;
1159 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1160 p[7] = s->qdev.conf.cyls & 0xff;
1161 /* Write precomp start cylinder, disabled */
1162 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1163 p[9] = s->qdev.conf.cyls & 0xff;
1164 /* Reduced current start cylinder, disabled */
1165 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1166 p[11] = s->qdev.conf.cyls & 0xff;
1167 /* Device step rate [100us], 100us */
1168 p[12] = 0;
1169 p[13] = 1;
1170 /* Device step pulse width [us], 1us */
1171 p[14] = 1;
1172 /* Device head settle delay [100us], 100us */
1173 p[15] = 0;
1174 p[16] = 1;
1175 /* Motor on delay [0.1s], 0.1s */
1176 p[17] = 1;
1177 /* Motor off delay [0.1s], 0.1s */
1178 p[18] = 1;
1179 /* Medium rotation rate [rpm], 5400 rpm */
1180 p[26] = (5400 >> 8) & 0xff;
1181 p[27] = 5400 & 0xff;
1182 break;
1184 case MODE_PAGE_CACHING:
1185 length = 0x12;
1186 if (page_control == 1 || /* Changeable Values */
1187 blk_enable_write_cache(s->qdev.conf.blk)) {
1188 p[0] = 4; /* WCE */
1190 break;
1192 case MODE_PAGE_R_W_ERROR:
1193 length = 10;
1194 if (page_control == 1) { /* Changeable Values */
1195 if (s->qdev.type == TYPE_ROM) {
1196 /* Automatic Write Reallocation Enabled */
1197 p[0] = 0x80;
1199 break;
1201 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1202 if (s->qdev.type == TYPE_ROM) {
1203 p[1] = 0x20; /* Read Retry Count */
1205 break;
1207 case MODE_PAGE_AUDIO_CTL:
1208 length = 14;
1209 break;
1211 case MODE_PAGE_CAPABILITIES:
1212 length = 0x14;
1213 if (page_control == 1) { /* Changeable Values */
1214 break;
1217 p[0] = 0x3b; /* CD-R & CD-RW read */
1218 p[1] = 0; /* Writing not supported */
1219 p[2] = 0x7f; /* Audio, composite, digital out,
1220 mode 2 form 1&2, multi session */
1221 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1222 RW corrected, C2 errors, ISRC,
1223 UPC, Bar code */
1224 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1225 /* Locking supported, jumper present, eject, tray */
1226 p[5] = 0; /* no volume & mute control, no
1227 changer */
1228 p[6] = (50 * 176) >> 8; /* 50x read speed */
1229 p[7] = (50 * 176) & 0xff;
1230 p[8] = 2 >> 8; /* Two volume levels */
1231 p[9] = 2 & 0xff;
1232 p[10] = 2048 >> 8; /* 2M buffer */
1233 p[11] = 2048 & 0xff;
1234 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1235 p[13] = (16 * 176) & 0xff;
1236 p[16] = (16 * 176) >> 8; /* 16x write speed */
1237 p[17] = (16 * 176) & 0xff;
1238 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1239 p[19] = (16 * 176) & 0xff;
1240 break;
1242 case MODE_PAGE_APPLE_VENDOR:
1243 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) {
1244 length = 0x1e;
1245 if (page_control == 1) { /* Changeable Values */
1246 break;
1249 memset(p, 0, length);
1250 strcpy((char *)p + 8, "APPLE COMPUTER, INC ");
1251 break;
1252 } else {
1253 return -1;
1256 case MODE_PAGE_VENDOR_SPECIFIC:
1257 if (s->qdev.type == TYPE_DISK && (s->quirks &
1258 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1259 length = 0x2;
1260 if (page_control == 1) { /* Changeable Values */
1261 p[0] = 0xff;
1262 p[1] = 0xff;
1263 break;
1265 p[0] = 0;
1266 p[1] = 0;
1267 break;
1268 } else {
1269 return -1;
1272 default:
1273 return -1;
1276 assert(length < 256);
1277 (*p_outbuf)[0] = page;
1278 (*p_outbuf)[1] = length;
1279 *p_outbuf += length + 2;
1280 return length + 2;
1283 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1285 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1286 uint64_t nb_sectors;
1287 bool dbd;
1288 int page, buflen, ret, page_control;
1289 uint8_t *p;
1290 uint8_t dev_specific_param;
1292 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1293 page = r->req.cmd.buf[2] & 0x3f;
1294 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1296 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1297 10, page, r->req.cmd.xfer, page_control);
1298 memset(outbuf, 0, r->req.cmd.xfer);
1299 p = outbuf;
1301 if (s->qdev.type == TYPE_DISK) {
1302 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1303 if (!blk_is_writable(s->qdev.conf.blk)) {
1304 dev_specific_param |= 0x80; /* Readonly. */
1306 } else {
1307 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) {
1308 /* Use DBD from the request... */
1309 dev_specific_param = 0x00;
1312 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1313 * which should never return a block descriptor even though DBD is
1314 * not set, otherwise CDROM detection fails in MacOS
1316 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) &&
1317 page == MODE_PAGE_APPLE_VENDOR) {
1318 dbd = true;
1320 } else {
1322 * MMC prescribes that CD/DVD drives have no block descriptors,
1323 * and defines no device-specific parameter.
1325 dev_specific_param = 0x00;
1326 dbd = true;
1330 if (r->req.cmd.buf[0] == MODE_SENSE) {
1331 p[1] = 0; /* Default media type. */
1332 p[2] = dev_specific_param;
1333 p[3] = 0; /* Block descriptor length. */
1334 p += 4;
1335 } else { /* MODE_SENSE_10 */
1336 p[2] = 0; /* Default media type. */
1337 p[3] = dev_specific_param;
1338 p[6] = p[7] = 0; /* Block descriptor length. */
1339 p += 8;
1342 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1343 if (!dbd && nb_sectors) {
1344 if (r->req.cmd.buf[0] == MODE_SENSE) {
1345 outbuf[3] = 8; /* Block descriptor length */
1346 } else { /* MODE_SENSE_10 */
1347 outbuf[7] = 8; /* Block descriptor length */
1349 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1350 if (nb_sectors > 0xffffff) {
1351 nb_sectors = 0;
1353 p[0] = 0; /* media density code */
1354 p[1] = (nb_sectors >> 16) & 0xff;
1355 p[2] = (nb_sectors >> 8) & 0xff;
1356 p[3] = nb_sectors & 0xff;
1357 p[4] = 0; /* reserved */
1358 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1359 p[6] = s->qdev.blocksize >> 8;
1360 p[7] = 0;
1361 p += 8;
1364 if (page_control == 3) {
1365 /* Saved Values */
1366 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1367 return -1;
1370 if (page == 0x3f) {
1371 for (page = 0; page <= 0x3e; page++) {
1372 mode_sense_page(s, page, &p, page_control);
1374 } else {
1375 ret = mode_sense_page(s, page, &p, page_control);
1376 if (ret == -1) {
1377 return -1;
1381 buflen = p - outbuf;
1383 * The mode data length field specifies the length in bytes of the
1384 * following data that is available to be transferred. The mode data
1385 * length does not include itself.
1387 if (r->req.cmd.buf[0] == MODE_SENSE) {
1388 outbuf[0] = buflen - 1;
1389 } else { /* MODE_SENSE_10 */
1390 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1391 outbuf[1] = (buflen - 2) & 0xff;
1393 return buflen;
1396 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1398 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1399 int start_track, format, msf, toclen;
1400 uint64_t nb_sectors;
1402 msf = req->cmd.buf[1] & 2;
1403 format = req->cmd.buf[2] & 0xf;
1404 start_track = req->cmd.buf[6];
1405 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1406 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1407 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1408 switch (format) {
1409 case 0:
1410 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1411 break;
1412 case 1:
1413 /* multi session : only a single session defined */
1414 toclen = 12;
1415 memset(outbuf, 0, 12);
1416 outbuf[1] = 0x0a;
1417 outbuf[2] = 0x01;
1418 outbuf[3] = 0x01;
1419 break;
1420 case 2:
1421 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1422 break;
1423 default:
1424 return -1;
1426 return toclen;
1429 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1431 SCSIRequest *req = &r->req;
1432 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1433 bool start = req->cmd.buf[4] & 1;
1434 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1435 int pwrcnd = req->cmd.buf[4] & 0xf0;
1437 if (pwrcnd) {
1438 /* eject/load only happens for power condition == 0 */
1439 return 0;
1442 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1443 if (!start && !s->tray_open && s->tray_locked) {
1444 scsi_check_condition(r,
1445 blk_is_inserted(s->qdev.conf.blk)
1446 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1447 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1448 return -1;
1451 if (s->tray_open != !start) {
1452 blk_eject(s->qdev.conf.blk, !start);
1453 s->tray_open = !start;
1456 return 0;
1459 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1461 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1462 int buflen = r->iov.iov_len;
1464 if (buflen) {
1465 trace_scsi_disk_emulate_read_data(buflen);
1466 r->iov.iov_len = 0;
1467 r->started = true;
1468 scsi_req_data(&r->req, buflen);
1469 return;
1472 /* This also clears the sense buffer for REQUEST SENSE. */
1473 scsi_req_complete(&r->req, GOOD);
1476 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1477 uint8_t *inbuf, int inlen)
1479 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1480 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1481 uint8_t *p;
1482 int len, expected_len, changeable_len, i;
1484 /* The input buffer does not include the page header, so it is
1485 * off by 2 bytes.
1487 expected_len = inlen + 2;
1488 if (expected_len > SCSI_MAX_MODE_LEN) {
1489 return -1;
1492 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1493 if (page == MODE_PAGE_ALLS) {
1494 return -1;
1497 p = mode_current;
1498 memset(mode_current, 0, inlen + 2);
1499 len = mode_sense_page(s, page, &p, 0);
1500 if (len < 0 || len != expected_len) {
1501 return -1;
1504 p = mode_changeable;
1505 memset(mode_changeable, 0, inlen + 2);
1506 changeable_len = mode_sense_page(s, page, &p, 1);
1507 assert(changeable_len == len);
1509 /* Check that unchangeable bits are the same as what MODE SENSE
1510 * would return.
1512 for (i = 2; i < len; i++) {
1513 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1514 return -1;
1517 return 0;
1520 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1522 switch (page) {
1523 case MODE_PAGE_CACHING:
1524 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1525 break;
1527 default:
1528 break;
1532 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1534 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1536 while (len > 0) {
1537 int page, subpage, page_len;
1539 /* Parse both possible formats for the mode page headers. */
1540 page = p[0] & 0x3f;
1541 if (p[0] & 0x40) {
1542 if (len < 4) {
1543 goto invalid_param_len;
1545 subpage = p[1];
1546 page_len = lduw_be_p(&p[2]);
1547 p += 4;
1548 len -= 4;
1549 } else {
1550 if (len < 2) {
1551 goto invalid_param_len;
1553 subpage = 0;
1554 page_len = p[1];
1555 p += 2;
1556 len -= 2;
1559 if (subpage) {
1560 goto invalid_param;
1562 if (page_len > len) {
1563 if (!(s->quirks & SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED)) {
1564 goto invalid_param_len;
1566 trace_scsi_disk_mode_select_page_truncated(page, page_len, len);
1569 if (!change) {
1570 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1571 goto invalid_param;
1573 } else {
1574 scsi_disk_apply_mode_select(s, page, p);
1577 p += page_len;
1578 len -= page_len;
1580 return 0;
1582 invalid_param:
1583 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1584 return -1;
1586 invalid_param_len:
1587 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1588 return -1;
1591 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1593 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1594 uint8_t *p = inbuf;
1595 int cmd = r->req.cmd.buf[0];
1596 int len = r->req.cmd.xfer;
1597 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1598 int bd_len, bs;
1599 int pass;
1601 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1602 if (!(s->quirks &
1603 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1604 /* We only support PF=1, SP=0. */
1605 goto invalid_field;
1609 if (len < hdr_len) {
1610 goto invalid_param_len;
1613 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1614 len -= hdr_len;
1615 p += hdr_len;
1616 if (len < bd_len) {
1617 goto invalid_param_len;
1619 if (bd_len != 0 && bd_len != 8) {
1620 goto invalid_param;
1623 /* Allow changing the block size */
1624 if (bd_len) {
1625 bs = p[5] << 16 | p[6] << 8 | p[7];
1628 * Since the existing code only checks/updates bits 8-15 of the block
1629 * size, restrict ourselves to the same requirement for now to ensure
1630 * that a block size set by a block descriptor and then read back by
1631 * a subsequent SCSI command will be the same
1633 if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
1634 s->qdev.blocksize = bs;
1635 trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
1639 len -= bd_len;
1640 p += bd_len;
1642 /* Ensure no change is made if there is an error! */
1643 for (pass = 0; pass < 2; pass++) {
1644 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1645 assert(pass == 0);
1646 return;
1649 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1650 /* The request is used as the AIO opaque value, so add a ref. */
1651 scsi_req_ref(&r->req);
1652 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1653 BLOCK_ACCT_FLUSH);
1654 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1655 return;
1658 scsi_req_complete(&r->req, GOOD);
1659 return;
1661 invalid_param:
1662 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1663 return;
1665 invalid_param_len:
1666 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1667 return;
1669 invalid_field:
1670 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1673 /* sector_num and nb_sectors expected to be in qdev blocksize */
1674 static inline bool check_lba_range(SCSIDiskState *s,
1675 uint64_t sector_num, uint32_t nb_sectors)
1678 * The first line tests that no overflow happens when computing the last
1679 * sector. The second line tests that the last accessed sector is in
1680 * range.
1682 * Careful, the computations should not underflow for nb_sectors == 0,
1683 * and a 0-block read to the first LBA beyond the end of device is
1684 * valid.
1686 return (sector_num <= sector_num + nb_sectors &&
1687 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1690 typedef struct UnmapCBData {
1691 SCSIDiskReq *r;
1692 uint8_t *inbuf;
1693 int count;
1694 } UnmapCBData;
1696 static void scsi_unmap_complete(void *opaque, int ret);
1698 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1700 SCSIDiskReq *r = data->r;
1701 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1703 assert(r->req.aiocb == NULL);
1705 if (data->count > 0) {
1706 uint64_t sector_num = ldq_be_p(&data->inbuf[0]);
1707 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1708 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1709 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1711 if (!check_lba_range(s, sector_num, nb_sectors)) {
1712 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1713 BLOCK_ACCT_UNMAP);
1714 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1715 goto done;
1718 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1719 r->sector_count * BDRV_SECTOR_SIZE,
1720 BLOCK_ACCT_UNMAP);
1722 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1723 r->sector * BDRV_SECTOR_SIZE,
1724 r->sector_count * BDRV_SECTOR_SIZE,
1725 scsi_unmap_complete, data);
1726 data->count--;
1727 data->inbuf += 16;
1728 return;
1731 scsi_req_complete(&r->req, GOOD);
1733 done:
1734 scsi_req_unref(&r->req);
1735 g_free(data);
1738 static void scsi_unmap_complete(void *opaque, int ret)
1740 UnmapCBData *data = opaque;
1741 SCSIDiskReq *r = data->r;
1742 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1744 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1746 assert(r->req.aiocb != NULL);
1747 r->req.aiocb = NULL;
1749 if (scsi_disk_req_check_error(r, ret, true)) {
1750 scsi_req_unref(&r->req);
1751 g_free(data);
1752 } else {
1753 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1754 scsi_unmap_complete_noio(data, ret);
1756 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1759 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1761 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1762 uint8_t *p = inbuf;
1763 int len = r->req.cmd.xfer;
1764 UnmapCBData *data;
1766 /* Reject ANCHOR=1. */
1767 if (r->req.cmd.buf[1] & 0x1) {
1768 goto invalid_field;
1771 if (len < 8) {
1772 goto invalid_param_len;
1774 if (len < lduw_be_p(&p[0]) + 2) {
1775 goto invalid_param_len;
1777 if (len < lduw_be_p(&p[2]) + 8) {
1778 goto invalid_param_len;
1780 if (lduw_be_p(&p[2]) & 15) {
1781 goto invalid_param_len;
1784 if (!blk_is_writable(s->qdev.conf.blk)) {
1785 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1786 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1787 return;
1790 data = g_new0(UnmapCBData, 1);
1791 data->r = r;
1792 data->inbuf = &p[8];
1793 data->count = lduw_be_p(&p[2]) >> 4;
1795 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1796 scsi_req_ref(&r->req);
1797 scsi_unmap_complete_noio(data, 0);
1798 return;
1800 invalid_param_len:
1801 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1802 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1803 return;
1805 invalid_field:
1806 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1807 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1810 typedef struct WriteSameCBData {
1811 SCSIDiskReq *r;
1812 int64_t sector;
1813 int nb_sectors;
1814 QEMUIOVector qiov;
1815 struct iovec iov;
1816 } WriteSameCBData;
1818 static void scsi_write_same_complete(void *opaque, int ret)
1820 WriteSameCBData *data = opaque;
1821 SCSIDiskReq *r = data->r;
1822 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1824 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1826 assert(r->req.aiocb != NULL);
1827 r->req.aiocb = NULL;
1829 if (scsi_disk_req_check_error(r, ret, true)) {
1830 goto done;
1833 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1835 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1836 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1837 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1838 data->iov.iov_len);
1839 if (data->iov.iov_len) {
1840 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1841 data->iov.iov_len, BLOCK_ACCT_WRITE);
1842 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1843 * where final qiov may need smaller size */
1844 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1845 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1846 data->sector << BDRV_SECTOR_BITS,
1847 &data->qiov, 0,
1848 scsi_write_same_complete, data);
1849 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1850 return;
1853 scsi_req_complete(&r->req, GOOD);
1855 done:
1856 scsi_req_unref(&r->req);
1857 qemu_vfree(data->iov.iov_base);
1858 g_free(data);
1859 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1862 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1864 SCSIRequest *req = &r->req;
1865 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1866 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1867 WriteSameCBData *data;
1868 uint8_t *buf;
1869 int i, l;
1871 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1872 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1873 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1874 return;
1877 if (!blk_is_writable(s->qdev.conf.blk)) {
1878 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1879 return;
1881 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1882 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1883 return;
1886 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1887 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1889 /* The request is used as the AIO opaque value, so add a ref. */
1890 scsi_req_ref(&r->req);
1891 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1892 nb_sectors * s->qdev.blocksize,
1893 BLOCK_ACCT_WRITE);
1894 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1895 r->req.cmd.lba * s->qdev.blocksize,
1896 nb_sectors * s->qdev.blocksize,
1897 flags, scsi_aio_complete, r);
1898 return;
1901 data = g_new0(WriteSameCBData, 1);
1902 data->r = r;
1903 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1904 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1905 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1906 SCSI_WRITE_SAME_MAX);
1907 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1908 data->iov.iov_len);
1909 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1911 for (i = 0; i < data->iov.iov_len; i += l) {
1912 l = MIN(s->qdev.blocksize, data->iov.iov_len - i);
1913 memcpy(&buf[i], inbuf, l);
1916 scsi_req_ref(&r->req);
1917 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1918 data->iov.iov_len, BLOCK_ACCT_WRITE);
1919 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1920 data->sector << BDRV_SECTOR_BITS,
1921 &data->qiov, 0,
1922 scsi_write_same_complete, data);
1925 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1927 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1929 if (r->iov.iov_len) {
1930 int buflen = r->iov.iov_len;
1931 trace_scsi_disk_emulate_write_data(buflen);
1932 r->iov.iov_len = 0;
1933 scsi_req_data(&r->req, buflen);
1934 return;
1937 switch (req->cmd.buf[0]) {
1938 case MODE_SELECT:
1939 case MODE_SELECT_10:
1940 /* This also clears the sense buffer for REQUEST SENSE. */
1941 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1942 break;
1944 case UNMAP:
1945 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1946 break;
1948 case VERIFY_10:
1949 case VERIFY_12:
1950 case VERIFY_16:
1951 if (r->req.status == -1) {
1952 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1954 break;
1956 case WRITE_SAME_10:
1957 case WRITE_SAME_16:
1958 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1959 break;
1961 default:
1962 abort();
1966 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1968 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1969 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1970 uint64_t nb_sectors;
1971 uint8_t *outbuf;
1972 int buflen;
1974 switch (req->cmd.buf[0]) {
1975 case INQUIRY:
1976 case MODE_SENSE:
1977 case MODE_SENSE_10:
1978 case RESERVE:
1979 case RESERVE_10:
1980 case RELEASE:
1981 case RELEASE_10:
1982 case START_STOP:
1983 case ALLOW_MEDIUM_REMOVAL:
1984 case GET_CONFIGURATION:
1985 case GET_EVENT_STATUS_NOTIFICATION:
1986 case MECHANISM_STATUS:
1987 case REQUEST_SENSE:
1988 break;
1990 default:
1991 if (!blk_is_available(s->qdev.conf.blk)) {
1992 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1993 return 0;
1995 break;
1999 * FIXME: we shouldn't return anything bigger than 4k, but the code
2000 * requires the buffer to be as big as req->cmd.xfer in several
2001 * places. So, do not allow CDBs with a very large ALLOCATION
2002 * LENGTH. The real fix would be to modify scsi_read_data and
2003 * dma_buf_read, so that they return data beyond the buflen
2004 * as all zeros.
2006 if (req->cmd.xfer > 65536) {
2007 goto illegal_request;
2009 r->buflen = MAX(4096, req->cmd.xfer);
2011 if (!r->iov.iov_base) {
2012 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
2015 outbuf = r->iov.iov_base;
2016 memset(outbuf, 0, r->buflen);
2017 switch (req->cmd.buf[0]) {
2018 case TEST_UNIT_READY:
2019 assert(blk_is_available(s->qdev.conf.blk));
2020 break;
2021 case INQUIRY:
2022 buflen = scsi_disk_emulate_inquiry(req, outbuf);
2023 if (buflen < 0) {
2024 goto illegal_request;
2026 break;
2027 case MODE_SENSE:
2028 case MODE_SENSE_10:
2029 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
2030 if (buflen < 0) {
2031 goto illegal_request;
2033 break;
2034 case READ_TOC:
2035 buflen = scsi_disk_emulate_read_toc(req, outbuf);
2036 if (buflen < 0) {
2037 goto illegal_request;
2039 break;
2040 case RESERVE:
2041 if (req->cmd.buf[1] & 1) {
2042 goto illegal_request;
2044 break;
2045 case RESERVE_10:
2046 if (req->cmd.buf[1] & 3) {
2047 goto illegal_request;
2049 break;
2050 case RELEASE:
2051 if (req->cmd.buf[1] & 1) {
2052 goto illegal_request;
2054 break;
2055 case RELEASE_10:
2056 if (req->cmd.buf[1] & 3) {
2057 goto illegal_request;
2059 break;
2060 case START_STOP:
2061 if (scsi_disk_emulate_start_stop(r) < 0) {
2062 return 0;
2064 break;
2065 case ALLOW_MEDIUM_REMOVAL:
2066 s->tray_locked = req->cmd.buf[4] & 1;
2067 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
2068 break;
2069 case READ_CAPACITY_10:
2070 /* The normal LEN field for this command is zero. */
2071 memset(outbuf, 0, 8);
2072 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2073 if (!nb_sectors) {
2074 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2075 return 0;
2077 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
2078 goto illegal_request;
2080 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2081 /* Returned value is the address of the last sector. */
2082 nb_sectors--;
2083 /* Remember the new size for read/write sanity checking. */
2084 s->qdev.max_lba = nb_sectors;
2085 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2086 if (nb_sectors > UINT32_MAX) {
2087 nb_sectors = UINT32_MAX;
2089 outbuf[0] = (nb_sectors >> 24) & 0xff;
2090 outbuf[1] = (nb_sectors >> 16) & 0xff;
2091 outbuf[2] = (nb_sectors >> 8) & 0xff;
2092 outbuf[3] = nb_sectors & 0xff;
2093 outbuf[4] = 0;
2094 outbuf[5] = 0;
2095 outbuf[6] = s->qdev.blocksize >> 8;
2096 outbuf[7] = 0;
2097 break;
2098 case REQUEST_SENSE:
2099 /* Just return "NO SENSE". */
2100 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2101 (req->cmd.buf[1] & 1) == 0);
2102 if (buflen < 0) {
2103 goto illegal_request;
2105 break;
2106 case MECHANISM_STATUS:
2107 buflen = scsi_emulate_mechanism_status(s, outbuf);
2108 if (buflen < 0) {
2109 goto illegal_request;
2111 break;
2112 case GET_CONFIGURATION:
2113 buflen = scsi_get_configuration(s, outbuf);
2114 if (buflen < 0) {
2115 goto illegal_request;
2117 break;
2118 case GET_EVENT_STATUS_NOTIFICATION:
2119 buflen = scsi_get_event_status_notification(s, r, outbuf);
2120 if (buflen < 0) {
2121 goto illegal_request;
2123 break;
2124 case READ_DISC_INFORMATION:
2125 buflen = scsi_read_disc_information(s, r, outbuf);
2126 if (buflen < 0) {
2127 goto illegal_request;
2129 break;
2130 case READ_DVD_STRUCTURE:
2131 buflen = scsi_read_dvd_structure(s, r, outbuf);
2132 if (buflen < 0) {
2133 goto illegal_request;
2135 break;
2136 case SERVICE_ACTION_IN_16:
2137 /* Service Action In subcommands. */
2138 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2139 trace_scsi_disk_emulate_command_SAI_16();
2140 memset(outbuf, 0, req->cmd.xfer);
2141 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2142 if (!nb_sectors) {
2143 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2144 return 0;
2146 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2147 goto illegal_request;
2149 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2150 /* Returned value is the address of the last sector. */
2151 nb_sectors--;
2152 /* Remember the new size for read/write sanity checking. */
2153 s->qdev.max_lba = nb_sectors;
2154 outbuf[0] = (nb_sectors >> 56) & 0xff;
2155 outbuf[1] = (nb_sectors >> 48) & 0xff;
2156 outbuf[2] = (nb_sectors >> 40) & 0xff;
2157 outbuf[3] = (nb_sectors >> 32) & 0xff;
2158 outbuf[4] = (nb_sectors >> 24) & 0xff;
2159 outbuf[5] = (nb_sectors >> 16) & 0xff;
2160 outbuf[6] = (nb_sectors >> 8) & 0xff;
2161 outbuf[7] = nb_sectors & 0xff;
2162 outbuf[8] = 0;
2163 outbuf[9] = 0;
2164 outbuf[10] = s->qdev.blocksize >> 8;
2165 outbuf[11] = 0;
2166 outbuf[12] = 0;
2167 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2169 /* set TPE bit if the format supports discard */
2170 if (s->qdev.conf.discard_granularity) {
2171 outbuf[14] = 0x80;
2174 /* Protection, exponent and lowest lba field left blank. */
2175 break;
2177 trace_scsi_disk_emulate_command_SAI_unsupported();
2178 goto illegal_request;
2179 case SYNCHRONIZE_CACHE:
2180 /* The request is used as the AIO opaque value, so add a ref. */
2181 scsi_req_ref(&r->req);
2182 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2183 BLOCK_ACCT_FLUSH);
2184 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2185 return 0;
2186 case SEEK_10:
2187 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2188 if (r->req.cmd.lba > s->qdev.max_lba) {
2189 goto illegal_lba;
2191 break;
2192 case MODE_SELECT:
2193 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2194 break;
2195 case MODE_SELECT_10:
2196 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2197 break;
2198 case UNMAP:
2199 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2200 break;
2201 case VERIFY_10:
2202 case VERIFY_12:
2203 case VERIFY_16:
2204 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2205 if (req->cmd.buf[1] & 6) {
2206 goto illegal_request;
2208 break;
2209 case WRITE_SAME_10:
2210 case WRITE_SAME_16:
2211 trace_scsi_disk_emulate_command_WRITE_SAME(
2212 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2213 break;
2214 case FORMAT_UNIT:
2215 trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer);
2216 break;
2217 default:
2218 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2219 scsi_command_name(buf[0]));
2220 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2221 return 0;
2223 assert(!r->req.aiocb);
2224 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2225 if (r->iov.iov_len == 0) {
2226 scsi_req_complete(&r->req, GOOD);
2228 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2229 assert(r->iov.iov_len == req->cmd.xfer);
2230 return -r->iov.iov_len;
2231 } else {
2232 return r->iov.iov_len;
2235 illegal_request:
2236 if (r->req.status == -1) {
2237 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2239 return 0;
2241 illegal_lba:
2242 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2243 return 0;
2246 /* Execute a scsi command. Returns the length of the data expected by the
2247 command. This will be Positive for data transfers from the device
2248 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2249 and zero if the command does not transfer any data. */
2251 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2253 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2254 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2255 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2256 uint32_t len;
2257 uint8_t command;
2259 command = buf[0];
2261 if (!blk_is_available(s->qdev.conf.blk)) {
2262 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2263 return 0;
2266 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2267 switch (command) {
2268 case READ_6:
2269 case READ_10:
2270 case READ_12:
2271 case READ_16:
2272 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2273 /* Protection information is not supported. For SCSI versions 2 and
2274 * older (as determined by snooping the guest's INQUIRY commands),
2275 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2277 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2278 goto illegal_request;
2280 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2281 goto illegal_lba;
2283 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2284 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2285 break;
2286 case WRITE_6:
2287 case WRITE_10:
2288 case WRITE_12:
2289 case WRITE_16:
2290 case WRITE_VERIFY_10:
2291 case WRITE_VERIFY_12:
2292 case WRITE_VERIFY_16:
2293 if (!blk_is_writable(s->qdev.conf.blk)) {
2294 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2295 return 0;
2297 trace_scsi_disk_dma_command_WRITE(
2298 (command & 0xe) == 0xe ? "And Verify " : "",
2299 r->req.cmd.lba, len);
2300 /* fall through */
2301 case VERIFY_10:
2302 case VERIFY_12:
2303 case VERIFY_16:
2304 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2305 * As far as DMA is concerned, we can treat it the same as a write;
2306 * scsi_block_do_sgio will send VERIFY commands.
2308 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2309 goto illegal_request;
2311 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2312 goto illegal_lba;
2314 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2315 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2316 break;
2317 default:
2318 abort();
2319 illegal_request:
2320 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2321 return 0;
2322 illegal_lba:
2323 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2324 return 0;
2326 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2327 if (r->sector_count == 0) {
2328 scsi_req_complete(&r->req, GOOD);
2330 assert(r->iov.iov_len == 0);
2331 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2332 return -r->sector_count * BDRV_SECTOR_SIZE;
2333 } else {
2334 return r->sector_count * BDRV_SECTOR_SIZE;
2338 static void scsi_disk_reset(DeviceState *dev)
2340 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2341 uint64_t nb_sectors;
2342 AioContext *ctx;
2344 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2346 ctx = blk_get_aio_context(s->qdev.conf.blk);
2347 aio_context_acquire(ctx);
2348 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2349 aio_context_release(ctx);
2351 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2352 if (nb_sectors) {
2353 nb_sectors--;
2355 s->qdev.max_lba = nb_sectors;
2356 /* reset tray statuses */
2357 s->tray_locked = 0;
2358 s->tray_open = 0;
2360 s->qdev.scsi_version = s->qdev.default_scsi_version;
2363 static void scsi_disk_drained_begin(void *opaque)
2365 SCSIDiskState *s = opaque;
2367 scsi_device_drained_begin(&s->qdev);
2370 static void scsi_disk_drained_end(void *opaque)
2372 SCSIDiskState *s = opaque;
2374 scsi_device_drained_end(&s->qdev);
2377 static void scsi_disk_resize_cb(void *opaque)
2379 SCSIDiskState *s = opaque;
2381 /* SPC lists this sense code as available only for
2382 * direct-access devices.
2384 if (s->qdev.type == TYPE_DISK) {
2385 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2389 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2391 SCSIDiskState *s = opaque;
2394 * When a CD gets changed, we have to report an ejected state and
2395 * then a loaded state to guests so that they detect tray
2396 * open/close and media change events. Guests that do not use
2397 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2398 * states rely on this behavior.
2400 * media_changed governs the state machine used for unit attention
2401 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2403 s->media_changed = load;
2404 s->tray_open = !load;
2405 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2406 s->media_event = true;
2407 s->eject_request = false;
2410 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2412 SCSIDiskState *s = opaque;
2414 s->eject_request = true;
2415 if (force) {
2416 s->tray_locked = false;
2420 static bool scsi_cd_is_tray_open(void *opaque)
2422 return ((SCSIDiskState *)opaque)->tray_open;
2425 static bool scsi_cd_is_medium_locked(void *opaque)
2427 return ((SCSIDiskState *)opaque)->tray_locked;
2430 static const BlockDevOps scsi_disk_removable_block_ops = {
2431 .change_media_cb = scsi_cd_change_media_cb,
2432 .drained_begin = scsi_disk_drained_begin,
2433 .drained_end = scsi_disk_drained_end,
2434 .eject_request_cb = scsi_cd_eject_request_cb,
2435 .is_medium_locked = scsi_cd_is_medium_locked,
2436 .is_tray_open = scsi_cd_is_tray_open,
2437 .resize_cb = scsi_disk_resize_cb,
2440 static const BlockDevOps scsi_disk_block_ops = {
2441 .drained_begin = scsi_disk_drained_begin,
2442 .drained_end = scsi_disk_drained_end,
2443 .resize_cb = scsi_disk_resize_cb,
2446 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2448 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2449 if (s->media_changed) {
2450 s->media_changed = false;
2451 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2455 static void scsi_realize(SCSIDevice *dev, Error **errp)
2457 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2458 bool read_only;
2460 if (!s->qdev.conf.blk) {
2461 error_setg(errp, "drive property not set");
2462 return;
2465 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2466 !blk_is_inserted(s->qdev.conf.blk)) {
2467 error_setg(errp, "Device needs media, but drive is empty");
2468 return;
2471 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2472 return;
2475 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2476 !s->qdev.hba_supports_iothread)
2478 error_setg(errp, "HBA does not support iothreads");
2479 return;
2482 if (dev->type == TYPE_DISK) {
2483 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2484 return;
2488 read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2489 if (dev->type == TYPE_ROM) {
2490 read_only = true;
2493 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2494 dev->type == TYPE_DISK, errp)) {
2495 return;
2498 if (s->qdev.conf.discard_granularity == -1) {
2499 s->qdev.conf.discard_granularity =
2500 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2503 if (!s->version) {
2504 s->version = g_strdup(qemu_hw_version());
2506 if (!s->vendor) {
2507 s->vendor = g_strdup("QEMU");
2509 if (!s->device_id) {
2510 if (s->serial) {
2511 s->device_id = g_strdup_printf("%.20s", s->serial);
2512 } else {
2513 const char *str = blk_name(s->qdev.conf.blk);
2514 if (str && *str) {
2515 s->device_id = g_strdup(str);
2520 if (blk_is_sg(s->qdev.conf.blk)) {
2521 error_setg(errp, "unwanted /dev/sg*");
2522 return;
2525 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2526 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2527 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2528 } else {
2529 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2532 blk_iostatus_enable(s->qdev.conf.blk);
2534 add_boot_device_lchs(&dev->qdev, NULL,
2535 dev->conf.lcyls,
2536 dev->conf.lheads,
2537 dev->conf.lsecs);
2540 static void scsi_unrealize(SCSIDevice *dev)
2542 del_boot_device_lchs(&dev->qdev, NULL);
2545 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2547 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2548 AioContext *ctx = NULL;
2549 /* can happen for devices without drive. The error message for missing
2550 * backend will be issued in scsi_realize
2552 if (s->qdev.conf.blk) {
2553 ctx = blk_get_aio_context(s->qdev.conf.blk);
2554 aio_context_acquire(ctx);
2555 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2556 goto out;
2559 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2560 s->qdev.type = TYPE_DISK;
2561 if (!s->product) {
2562 s->product = g_strdup("QEMU HARDDISK");
2564 scsi_realize(&s->qdev, errp);
2565 out:
2566 if (ctx) {
2567 aio_context_release(ctx);
2571 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2573 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2574 AioContext *ctx;
2575 int ret;
2576 uint32_t blocksize = 2048;
2578 if (!dev->conf.blk) {
2579 /* Anonymous BlockBackend for an empty drive. As we put it into
2580 * dev->conf, qdev takes care of detaching on unplug. */
2581 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2582 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2583 assert(ret == 0);
2586 if (dev->conf.physical_block_size != 0) {
2587 blocksize = dev->conf.physical_block_size;
2590 ctx = blk_get_aio_context(dev->conf.blk);
2591 aio_context_acquire(ctx);
2592 s->qdev.blocksize = blocksize;
2593 s->qdev.type = TYPE_ROM;
2594 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2595 if (!s->product) {
2596 s->product = g_strdup("QEMU CD-ROM");
2598 scsi_realize(&s->qdev, errp);
2599 aio_context_release(ctx);
2603 static const SCSIReqOps scsi_disk_emulate_reqops = {
2604 .size = sizeof(SCSIDiskReq),
2605 .free_req = scsi_free_request,
2606 .send_command = scsi_disk_emulate_command,
2607 .read_data = scsi_disk_emulate_read_data,
2608 .write_data = scsi_disk_emulate_write_data,
2609 .get_buf = scsi_get_buf,
2612 static const SCSIReqOps scsi_disk_dma_reqops = {
2613 .size = sizeof(SCSIDiskReq),
2614 .free_req = scsi_free_request,
2615 .send_command = scsi_disk_dma_command,
2616 .read_data = scsi_read_data,
2617 .write_data = scsi_write_data,
2618 .get_buf = scsi_get_buf,
2619 .load_request = scsi_disk_load_request,
2620 .save_request = scsi_disk_save_request,
2623 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2624 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2625 [INQUIRY] = &scsi_disk_emulate_reqops,
2626 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2627 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2628 [START_STOP] = &scsi_disk_emulate_reqops,
2629 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2630 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2631 [READ_TOC] = &scsi_disk_emulate_reqops,
2632 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2633 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2634 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2635 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2636 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2637 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2638 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2639 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2640 [SEEK_10] = &scsi_disk_emulate_reqops,
2641 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2642 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2643 [UNMAP] = &scsi_disk_emulate_reqops,
2644 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2645 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2646 [VERIFY_10] = &scsi_disk_emulate_reqops,
2647 [VERIFY_12] = &scsi_disk_emulate_reqops,
2648 [VERIFY_16] = &scsi_disk_emulate_reqops,
2649 [FORMAT_UNIT] = &scsi_disk_emulate_reqops,
2651 [READ_6] = &scsi_disk_dma_reqops,
2652 [READ_10] = &scsi_disk_dma_reqops,
2653 [READ_12] = &scsi_disk_dma_reqops,
2654 [READ_16] = &scsi_disk_dma_reqops,
2655 [WRITE_6] = &scsi_disk_dma_reqops,
2656 [WRITE_10] = &scsi_disk_dma_reqops,
2657 [WRITE_12] = &scsi_disk_dma_reqops,
2658 [WRITE_16] = &scsi_disk_dma_reqops,
2659 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2660 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2661 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2664 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2666 int i;
2667 int len = scsi_cdb_length(buf);
2668 char *line_buffer, *p;
2670 assert(len > 0 && len <= 16);
2671 line_buffer = g_malloc(len * 5 + 1);
2673 for (i = 0, p = line_buffer; i < len; i++) {
2674 p += sprintf(p, " 0x%02x", buf[i]);
2676 trace_scsi_disk_new_request(lun, tag, line_buffer);
2678 g_free(line_buffer);
2681 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2682 uint8_t *buf, void *hba_private)
2684 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2685 SCSIRequest *req;
2686 const SCSIReqOps *ops;
2687 uint8_t command;
2689 command = buf[0];
2690 ops = scsi_disk_reqops_dispatch[command];
2691 if (!ops) {
2692 ops = &scsi_disk_emulate_reqops;
2694 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2696 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2697 scsi_disk_new_request_dump(lun, tag, buf);
2700 return req;
2703 #ifdef __linux__
2704 static int get_device_type(SCSIDiskState *s)
2706 uint8_t cmd[16];
2707 uint8_t buf[36];
2708 int ret;
2710 memset(cmd, 0, sizeof(cmd));
2711 memset(buf, 0, sizeof(buf));
2712 cmd[0] = INQUIRY;
2713 cmd[4] = sizeof(buf);
2715 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2716 buf, sizeof(buf), s->qdev.io_timeout);
2717 if (ret < 0) {
2718 return -1;
2720 s->qdev.type = buf[0];
2721 if (buf[1] & 0x80) {
2722 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2724 return 0;
2727 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2729 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2730 AioContext *ctx;
2731 int sg_version;
2732 int rc;
2734 if (!s->qdev.conf.blk) {
2735 error_setg(errp, "drive property not set");
2736 return;
2739 if (s->rotation_rate) {
2740 error_report_once("rotation_rate is specified for scsi-block but is "
2741 "not implemented. This option is deprecated and will "
2742 "be removed in a future version");
2745 ctx = blk_get_aio_context(s->qdev.conf.blk);
2746 aio_context_acquire(ctx);
2748 /* check we are using a driver managing SG_IO (version 3 and after) */
2749 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2750 if (rc < 0) {
2751 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2752 if (rc != -EPERM) {
2753 error_append_hint(errp, "Is this a SCSI device?\n");
2755 goto out;
2757 if (sg_version < 30000) {
2758 error_setg(errp, "scsi generic interface too old");
2759 goto out;
2762 /* get device type from INQUIRY data */
2763 rc = get_device_type(s);
2764 if (rc < 0) {
2765 error_setg(errp, "INQUIRY failed");
2766 goto out;
2769 /* Make a guess for the block size, we'll fix it when the guest sends.
2770 * READ CAPACITY. If they don't, they likely would assume these sizes
2771 * anyway. (TODO: check in /sys).
2773 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2774 s->qdev.blocksize = 2048;
2775 } else {
2776 s->qdev.blocksize = 512;
2779 /* Makes the scsi-block device not removable by using HMP and QMP eject
2780 * command.
2782 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2784 scsi_realize(&s->qdev, errp);
2785 scsi_generic_read_device_inquiry(&s->qdev);
2787 out:
2788 aio_context_release(ctx);
2791 typedef struct SCSIBlockReq {
2792 SCSIDiskReq req;
2793 sg_io_hdr_t io_header;
2795 /* Selected bytes of the original CDB, copied into our own CDB. */
2796 uint8_t cmd, cdb1, group_number;
2798 /* CDB passed to SG_IO. */
2799 uint8_t cdb[16];
2800 BlockCompletionFunc *cb;
2801 void *cb_opaque;
2802 } SCSIBlockReq;
2804 static void scsi_block_sgio_complete(void *opaque, int ret)
2806 SCSIBlockReq *req = (SCSIBlockReq *)opaque;
2807 SCSIDiskReq *r = &req->req;
2808 SCSIDevice *s = r->req.dev;
2809 sg_io_hdr_t *io_hdr = &req->io_header;
2811 if (ret == 0) {
2812 if (io_hdr->host_status != SCSI_HOST_OK) {
2813 scsi_req_complete_failed(&r->req, io_hdr->host_status);
2814 scsi_req_unref(&r->req);
2815 return;
2818 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
2819 ret = BUSY;
2820 } else {
2821 ret = io_hdr->status;
2824 if (ret > 0) {
2825 aio_context_acquire(blk_get_aio_context(s->conf.blk));
2826 if (scsi_handle_rw_error(r, ret, true)) {
2827 aio_context_release(blk_get_aio_context(s->conf.blk));
2828 scsi_req_unref(&r->req);
2829 return;
2831 aio_context_release(blk_get_aio_context(s->conf.blk));
2833 /* Ignore error. */
2834 ret = 0;
2838 req->cb(req->cb_opaque, ret);
2841 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2842 int64_t offset, QEMUIOVector *iov,
2843 int direction,
2844 BlockCompletionFunc *cb, void *opaque)
2846 sg_io_hdr_t *io_header = &req->io_header;
2847 SCSIDiskReq *r = &req->req;
2848 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2849 int nb_logical_blocks;
2850 uint64_t lba;
2851 BlockAIOCB *aiocb;
2853 /* This is not supported yet. It can only happen if the guest does
2854 * reads and writes that are not aligned to one logical sectors
2855 * _and_ cover multiple MemoryRegions.
2857 assert(offset % s->qdev.blocksize == 0);
2858 assert(iov->size % s->qdev.blocksize == 0);
2860 io_header->interface_id = 'S';
2862 /* The data transfer comes from the QEMUIOVector. */
2863 io_header->dxfer_direction = direction;
2864 io_header->dxfer_len = iov->size;
2865 io_header->dxferp = (void *)iov->iov;
2866 io_header->iovec_count = iov->niov;
2867 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2869 /* Build a new CDB with the LBA and length patched in, in case
2870 * DMA helpers split the transfer in multiple segments. Do not
2871 * build a CDB smaller than what the guest wanted, and only build
2872 * a larger one if strictly necessary.
2874 io_header->cmdp = req->cdb;
2875 lba = offset / s->qdev.blocksize;
2876 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2878 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2879 /* 6-byte CDB */
2880 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2881 req->cdb[4] = nb_logical_blocks;
2882 req->cdb[5] = 0;
2883 io_header->cmd_len = 6;
2884 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2885 /* 10-byte CDB */
2886 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2887 req->cdb[1] = req->cdb1;
2888 stl_be_p(&req->cdb[2], lba);
2889 req->cdb[6] = req->group_number;
2890 stw_be_p(&req->cdb[7], nb_logical_blocks);
2891 req->cdb[9] = 0;
2892 io_header->cmd_len = 10;
2893 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2894 /* 12-byte CDB */
2895 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2896 req->cdb[1] = req->cdb1;
2897 stl_be_p(&req->cdb[2], lba);
2898 stl_be_p(&req->cdb[6], nb_logical_blocks);
2899 req->cdb[10] = req->group_number;
2900 req->cdb[11] = 0;
2901 io_header->cmd_len = 12;
2902 } else {
2903 /* 16-byte CDB */
2904 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2905 req->cdb[1] = req->cdb1;
2906 stq_be_p(&req->cdb[2], lba);
2907 stl_be_p(&req->cdb[10], nb_logical_blocks);
2908 req->cdb[14] = req->group_number;
2909 req->cdb[15] = 0;
2910 io_header->cmd_len = 16;
2913 /* The rest is as in scsi-generic.c. */
2914 io_header->mx_sb_len = sizeof(r->req.sense);
2915 io_header->sbp = r->req.sense;
2916 io_header->timeout = s->qdev.io_timeout * 1000;
2917 io_header->usr_ptr = r;
2918 io_header->flags |= SG_FLAG_DIRECT_IO;
2919 req->cb = cb;
2920 req->cb_opaque = opaque;
2921 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2922 nb_logical_blocks, io_header->timeout);
2923 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req);
2924 assert(aiocb != NULL);
2925 return aiocb;
2928 static bool scsi_block_no_fua(SCSICommand *cmd)
2930 return false;
2933 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2934 QEMUIOVector *iov,
2935 BlockCompletionFunc *cb, void *cb_opaque,
2936 void *opaque)
2938 SCSIBlockReq *r = opaque;
2939 return scsi_block_do_sgio(r, offset, iov,
2940 SG_DXFER_FROM_DEV, cb, cb_opaque);
2943 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2944 QEMUIOVector *iov,
2945 BlockCompletionFunc *cb, void *cb_opaque,
2946 void *opaque)
2948 SCSIBlockReq *r = opaque;
2949 return scsi_block_do_sgio(r, offset, iov,
2950 SG_DXFER_TO_DEV, cb, cb_opaque);
2953 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2955 switch (buf[0]) {
2956 case VERIFY_10:
2957 case VERIFY_12:
2958 case VERIFY_16:
2959 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2960 * for the number of logical blocks specified in the length
2961 * field). For other modes, do not use scatter/gather operation.
2963 if ((buf[1] & 6) == 2) {
2964 return false;
2966 break;
2968 case READ_6:
2969 case READ_10:
2970 case READ_12:
2971 case READ_16:
2972 case WRITE_6:
2973 case WRITE_10:
2974 case WRITE_12:
2975 case WRITE_16:
2976 case WRITE_VERIFY_10:
2977 case WRITE_VERIFY_12:
2978 case WRITE_VERIFY_16:
2979 /* MMC writing cannot be done via DMA helpers, because it sometimes
2980 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2981 * We might use scsi_block_dma_reqops as long as no writing commands are
2982 * seen, but performance usually isn't paramount on optical media. So,
2983 * just make scsi-block operate the same as scsi-generic for them.
2985 if (s->qdev.type != TYPE_ROM) {
2986 return false;
2988 break;
2990 default:
2991 break;
2994 return true;
2998 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
3000 SCSIBlockReq *r = (SCSIBlockReq *)req;
3001 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
3003 r->cmd = req->cmd.buf[0];
3004 switch (r->cmd >> 5) {
3005 case 0:
3006 /* 6-byte CDB. */
3007 r->cdb1 = r->group_number = 0;
3008 break;
3009 case 1:
3010 /* 10-byte CDB. */
3011 r->cdb1 = req->cmd.buf[1];
3012 r->group_number = req->cmd.buf[6];
3013 break;
3014 case 4:
3015 /* 12-byte CDB. */
3016 r->cdb1 = req->cmd.buf[1];
3017 r->group_number = req->cmd.buf[10];
3018 break;
3019 case 5:
3020 /* 16-byte CDB. */
3021 r->cdb1 = req->cmd.buf[1];
3022 r->group_number = req->cmd.buf[14];
3023 break;
3024 default:
3025 abort();
3028 /* Protection information is not supported. For SCSI versions 2 and
3029 * older (as determined by snooping the guest's INQUIRY commands),
3030 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
3032 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
3033 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
3034 return 0;
3037 return scsi_disk_dma_command(req, buf);
3040 static const SCSIReqOps scsi_block_dma_reqops = {
3041 .size = sizeof(SCSIBlockReq),
3042 .free_req = scsi_free_request,
3043 .send_command = scsi_block_dma_command,
3044 .read_data = scsi_read_data,
3045 .write_data = scsi_write_data,
3046 .get_buf = scsi_get_buf,
3047 .load_request = scsi_disk_load_request,
3048 .save_request = scsi_disk_save_request,
3051 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
3052 uint32_t lun, uint8_t *buf,
3053 void *hba_private)
3055 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3057 if (scsi_block_is_passthrough(s, buf)) {
3058 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
3059 hba_private);
3060 } else {
3061 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
3062 hba_private);
3066 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
3067 uint8_t *buf, size_t buf_len,
3068 void *hba_private)
3070 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3072 if (scsi_block_is_passthrough(s, buf)) {
3073 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, buf_len, hba_private);
3074 } else {
3075 return scsi_req_parse_cdb(&s->qdev, cmd, buf, buf_len);
3079 static void scsi_block_update_sense(SCSIRequest *req)
3081 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
3082 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
3083 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
3085 #endif
3087 static
3088 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
3089 BlockCompletionFunc *cb, void *cb_opaque,
3090 void *opaque)
3092 SCSIDiskReq *r = opaque;
3093 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3094 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3097 static
3098 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
3099 BlockCompletionFunc *cb, void *cb_opaque,
3100 void *opaque)
3102 SCSIDiskReq *r = opaque;
3103 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3104 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3107 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
3109 DeviceClass *dc = DEVICE_CLASS(klass);
3110 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3112 dc->fw_name = "disk";
3113 dc->reset = scsi_disk_reset;
3114 sdc->dma_readv = scsi_dma_readv;
3115 sdc->dma_writev = scsi_dma_writev;
3116 sdc->need_fua_emulation = scsi_is_cmd_fua;
3119 static const TypeInfo scsi_disk_base_info = {
3120 .name = TYPE_SCSI_DISK_BASE,
3121 .parent = TYPE_SCSI_DEVICE,
3122 .class_init = scsi_disk_base_class_initfn,
3123 .instance_size = sizeof(SCSIDiskState),
3124 .class_size = sizeof(SCSIDiskClass),
3125 .abstract = true,
3128 #define DEFINE_SCSI_DISK_PROPERTIES() \
3129 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3130 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3131 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3132 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3133 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3134 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3135 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3136 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3139 static Property scsi_hd_properties[] = {
3140 DEFINE_SCSI_DISK_PROPERTIES(),
3141 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3142 SCSI_DISK_F_REMOVABLE, false),
3143 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3144 SCSI_DISK_F_DPOFUA, false),
3145 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3146 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3147 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3148 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3149 DEFAULT_MAX_UNMAP_SIZE),
3150 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3151 DEFAULT_MAX_IO_SIZE),
3152 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3153 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3155 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3156 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3158 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3159 DEFINE_PROP_END_OF_LIST(),
3162 static const VMStateDescription vmstate_scsi_disk_state = {
3163 .name = "scsi-disk",
3164 .version_id = 1,
3165 .minimum_version_id = 1,
3166 .fields = (VMStateField[]) {
3167 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3168 VMSTATE_BOOL(media_changed, SCSIDiskState),
3169 VMSTATE_BOOL(media_event, SCSIDiskState),
3170 VMSTATE_BOOL(eject_request, SCSIDiskState),
3171 VMSTATE_BOOL(tray_open, SCSIDiskState),
3172 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3173 VMSTATE_END_OF_LIST()
3177 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3179 DeviceClass *dc = DEVICE_CLASS(klass);
3180 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3182 sc->realize = scsi_hd_realize;
3183 sc->unrealize = scsi_unrealize;
3184 sc->alloc_req = scsi_new_request;
3185 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3186 dc->desc = "virtual SCSI disk";
3187 device_class_set_props(dc, scsi_hd_properties);
3188 dc->vmsd = &vmstate_scsi_disk_state;
3191 static const TypeInfo scsi_hd_info = {
3192 .name = "scsi-hd",
3193 .parent = TYPE_SCSI_DISK_BASE,
3194 .class_init = scsi_hd_class_initfn,
3197 static Property scsi_cd_properties[] = {
3198 DEFINE_SCSI_DISK_PROPERTIES(),
3199 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3200 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3201 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3202 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3203 DEFAULT_MAX_IO_SIZE),
3204 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3206 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks,
3207 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0),
3208 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks,
3209 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0),
3210 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3211 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3213 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks,
3214 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0),
3215 DEFINE_PROP_END_OF_LIST(),
3218 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3220 DeviceClass *dc = DEVICE_CLASS(klass);
3221 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3223 sc->realize = scsi_cd_realize;
3224 sc->alloc_req = scsi_new_request;
3225 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3226 dc->desc = "virtual SCSI CD-ROM";
3227 device_class_set_props(dc, scsi_cd_properties);
3228 dc->vmsd = &vmstate_scsi_disk_state;
3231 static const TypeInfo scsi_cd_info = {
3232 .name = "scsi-cd",
3233 .parent = TYPE_SCSI_DISK_BASE,
3234 .class_init = scsi_cd_class_initfn,
3237 #ifdef __linux__
3238 static Property scsi_block_properties[] = {
3239 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3240 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3241 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3242 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3243 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3244 DEFAULT_MAX_UNMAP_SIZE),
3245 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3246 DEFAULT_MAX_IO_SIZE),
3247 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3248 -1),
3249 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3250 DEFAULT_IO_TIMEOUT),
3251 DEFINE_PROP_END_OF_LIST(),
3254 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3256 DeviceClass *dc = DEVICE_CLASS(klass);
3257 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3258 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3260 sc->realize = scsi_block_realize;
3261 sc->alloc_req = scsi_block_new_request;
3262 sc->parse_cdb = scsi_block_parse_cdb;
3263 sdc->dma_readv = scsi_block_dma_readv;
3264 sdc->dma_writev = scsi_block_dma_writev;
3265 sdc->update_sense = scsi_block_update_sense;
3266 sdc->need_fua_emulation = scsi_block_no_fua;
3267 dc->desc = "SCSI block device passthrough";
3268 device_class_set_props(dc, scsi_block_properties);
3269 dc->vmsd = &vmstate_scsi_disk_state;
3272 static const TypeInfo scsi_block_info = {
3273 .name = "scsi-block",
3274 .parent = TYPE_SCSI_DISK_BASE,
3275 .class_init = scsi_block_class_initfn,
3277 #endif
3279 static void scsi_disk_register_types(void)
3281 type_register_static(&scsi_disk_base_info);
3282 type_register_static(&scsi_hd_info);
3283 type_register_static(&scsi_cd_info);
3284 #ifdef __linux__
3285 type_register_static(&scsi_block_info);
3286 #endif
3289 type_init(scsi_disk_register_types)