scsi-disk: move scsi_handle_rw_error earlier
[qemu.git] / hw / scsi / scsi-disk.c
blob18ab777017c7b76881737074c04548519313af3d
1 /*
2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "hw/scsi/scsi.h"
29 #include "migration/qemu-file-types.h"
30 #include "migration/vmstate.h"
31 #include "hw/scsi/emulation.h"
32 #include "scsi/constants.h"
33 #include "sysemu/block-backend.h"
34 #include "sysemu/blockdev.h"
35 #include "hw/block/block.h"
36 #include "hw/qdev-properties.h"
37 #include "hw/qdev-properties-system.h"
38 #include "sysemu/dma.h"
39 #include "sysemu/sysemu.h"
40 #include "qemu/cutils.h"
41 #include "trace.h"
42 #include "qom/object.h"
44 #ifdef __linux
45 #include <scsi/sg.h>
46 #endif
48 #define SCSI_WRITE_SAME_MAX (512 * KiB)
49 #define SCSI_DMA_BUF_SIZE (128 * KiB)
50 #define SCSI_MAX_INQUIRY_LEN 256
51 #define SCSI_MAX_MODE_LEN 256
53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
59 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
61 struct SCSIDiskClass {
62 SCSIDeviceClass parent_class;
63 DMAIOFunc *dma_readv;
64 DMAIOFunc *dma_writev;
65 bool (*need_fua_emulation)(SCSICommand *cmd);
66 void (*update_sense)(SCSIRequest *r);
69 typedef struct SCSIDiskReq {
70 SCSIRequest req;
71 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
72 uint64_t sector;
73 uint32_t sector_count;
74 uint32_t buflen;
75 bool started;
76 bool need_fua_emulation;
77 struct iovec iov;
78 QEMUIOVector qiov;
79 BlockAcctCookie acct;
80 unsigned char *status;
81 } SCSIDiskReq;
83 #define SCSI_DISK_F_REMOVABLE 0
84 #define SCSI_DISK_F_DPOFUA 1
85 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
87 struct SCSIDiskState {
88 SCSIDevice qdev;
89 uint32_t features;
90 bool media_changed;
91 bool media_event;
92 bool eject_request;
93 uint16_t port_index;
94 uint64_t max_unmap_size;
95 uint64_t max_io_size;
96 QEMUBH *bh;
97 char *version;
98 char *serial;
99 char *vendor;
100 char *product;
101 char *device_id;
102 bool tray_open;
103 bool tray_locked;
105 * 0x0000 - rotation rate not reported
106 * 0x0001 - non-rotating medium (SSD)
107 * 0x0002-0x0400 - reserved
108 * 0x0401-0xffe - rotations per minute
109 * 0xffff - reserved
111 uint16_t rotation_rate;
114 static void scsi_free_request(SCSIRequest *req)
116 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
118 qemu_vfree(r->iov.iov_base);
121 /* Helper function for command completion with sense. */
122 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
124 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
125 sense.ascq);
126 scsi_req_build_sense(&r->req, sense);
127 scsi_req_complete(&r->req, CHECK_CONDITION);
130 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
132 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
134 if (!r->iov.iov_base) {
135 r->buflen = size;
136 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
138 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
139 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
142 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
144 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
146 qemu_put_be64s(f, &r->sector);
147 qemu_put_be32s(f, &r->sector_count);
148 qemu_put_be32s(f, &r->buflen);
149 if (r->buflen) {
150 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
151 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
152 } else if (!req->retry) {
153 uint32_t len = r->iov.iov_len;
154 qemu_put_be32s(f, &len);
155 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
160 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
162 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
164 qemu_get_be64s(f, &r->sector);
165 qemu_get_be32s(f, &r->sector_count);
166 qemu_get_be32s(f, &r->buflen);
167 if (r->buflen) {
168 scsi_init_iovec(r, r->buflen);
169 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
170 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
171 } else if (!r->req.retry) {
172 uint32_t len;
173 qemu_get_be32s(f, &len);
174 r->iov.iov_len = len;
175 assert(r->iov.iov_len <= r->buflen);
176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
180 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
184 * scsi_handle_rw_error has two return values. False means that the error
185 * must be ignored, true means that the error has been processed and the
186 * caller should not do anything else for this request. Note that
187 * scsi_handle_rw_error always manages its reference counts, independent
188 * of the return value.
190 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
192 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
194 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
195 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
196 is_read, error);
198 if (action == BLOCK_ERROR_ACTION_REPORT) {
199 if (acct_failed) {
200 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
202 switch (error) {
203 case 0:
204 /* A passthrough command has run and has produced sense data; check
205 * whether the error has to be handled by the guest or should rather
206 * pause the host.
208 assert(r->status && *r->status);
209 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
210 /* These errors are handled by guest. */
211 sdc->update_sense(&r->req);
212 scsi_req_complete(&r->req, *r->status);
213 return true;
215 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
216 break;
217 #ifdef CONFIG_LINUX
218 /* These errno mapping are specific to Linux. For more information:
219 * - scsi_decide_disposition in drivers/scsi/scsi_error.c
220 * - scsi_result_to_blk_status in drivers/scsi/scsi_lib.c
221 * - blk_errors[] in block/blk-core.c
223 case EBADE:
224 /* DID_NEXUS_FAILURE -> BLK_STS_NEXUS. */
225 scsi_req_complete(&r->req, RESERVATION_CONFLICT);
226 break;
227 case ENODATA:
228 /* DID_MEDIUM_ERROR -> BLK_STS_MEDIUM. */
229 scsi_check_condition(r, SENSE_CODE(READ_ERROR));
230 break;
231 case EREMOTEIO:
232 /* DID_TARGET_FAILURE -> BLK_STS_TARGET. */
233 scsi_req_complete(&r->req, HARDWARE_ERROR);
234 break;
235 #endif
236 case ENOMEDIUM:
237 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
238 break;
239 case ENOMEM:
240 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
241 break;
242 case EINVAL:
243 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
244 break;
245 case ENOSPC:
246 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
247 break;
248 default:
249 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
250 break;
254 blk_error_action(s->qdev.conf.blk, action, is_read, error);
255 if (action == BLOCK_ERROR_ACTION_IGNORE) {
256 scsi_req_complete(&r->req, 0);
257 return true;
260 if (action == BLOCK_ERROR_ACTION_STOP) {
261 scsi_req_retry(&r->req);
263 return true;
266 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
268 if (r->req.io_canceled) {
269 scsi_req_cancel_complete(&r->req);
270 return true;
273 if (ret < 0 || (r->status && *r->status)) {
274 return scsi_handle_rw_error(r, -ret, acct_failed);
277 return false;
280 static void scsi_aio_complete(void *opaque, int ret)
282 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
283 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
285 assert(r->req.aiocb != NULL);
286 r->req.aiocb = NULL;
287 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
288 if (scsi_disk_req_check_error(r, ret, true)) {
289 goto done;
292 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
293 scsi_req_complete(&r->req, GOOD);
295 done:
296 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
297 scsi_req_unref(&r->req);
300 static bool scsi_is_cmd_fua(SCSICommand *cmd)
302 switch (cmd->buf[0]) {
303 case READ_10:
304 case READ_12:
305 case READ_16:
306 case WRITE_10:
307 case WRITE_12:
308 case WRITE_16:
309 return (cmd->buf[1] & 8) != 0;
311 case VERIFY_10:
312 case VERIFY_12:
313 case VERIFY_16:
314 case WRITE_VERIFY_10:
315 case WRITE_VERIFY_12:
316 case WRITE_VERIFY_16:
317 return true;
319 case READ_6:
320 case WRITE_6:
321 default:
322 return false;
326 static void scsi_write_do_fua(SCSIDiskReq *r)
328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
330 assert(r->req.aiocb == NULL);
331 assert(!r->req.io_canceled);
333 if (r->need_fua_emulation) {
334 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
335 BLOCK_ACCT_FLUSH);
336 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
337 return;
340 scsi_req_complete(&r->req, GOOD);
341 scsi_req_unref(&r->req);
344 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
346 assert(r->req.aiocb == NULL);
347 if (scsi_disk_req_check_error(r, ret, false)) {
348 goto done;
351 r->sector += r->sector_count;
352 r->sector_count = 0;
353 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
354 scsi_write_do_fua(r);
355 return;
356 } else {
357 scsi_req_complete(&r->req, GOOD);
360 done:
361 scsi_req_unref(&r->req);
364 static void scsi_dma_complete(void *opaque, int ret)
366 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
369 assert(r->req.aiocb != NULL);
370 r->req.aiocb = NULL;
372 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
373 if (ret < 0) {
374 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
375 } else {
376 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
378 scsi_dma_complete_noio(r, ret);
379 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
382 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
384 uint32_t n;
386 assert(r->req.aiocb == NULL);
387 if (scsi_disk_req_check_error(r, ret, false)) {
388 goto done;
391 n = r->qiov.size / BDRV_SECTOR_SIZE;
392 r->sector += n;
393 r->sector_count -= n;
394 scsi_req_data(&r->req, r->qiov.size);
396 done:
397 scsi_req_unref(&r->req);
400 static void scsi_read_complete(void *opaque, int ret)
402 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
403 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
405 assert(r->req.aiocb != NULL);
406 r->req.aiocb = NULL;
408 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
409 if (ret < 0) {
410 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
411 } else {
412 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
413 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
415 scsi_read_complete_noio(r, ret);
416 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
419 /* Actually issue a read to the block device. */
420 static void scsi_do_read(SCSIDiskReq *r, int ret)
422 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
423 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
425 assert (r->req.aiocb == NULL);
426 if (scsi_disk_req_check_error(r, ret, false)) {
427 goto done;
430 /* The request is used as the AIO opaque value, so add a ref. */
431 scsi_req_ref(&r->req);
433 if (r->req.sg) {
434 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
435 r->req.resid -= r->req.sg->size;
436 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
437 r->req.sg, r->sector << BDRV_SECTOR_BITS,
438 BDRV_SECTOR_SIZE,
439 sdc->dma_readv, r, scsi_dma_complete, r,
440 DMA_DIRECTION_FROM_DEVICE);
441 } else {
442 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
443 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
444 r->qiov.size, BLOCK_ACCT_READ);
445 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
446 scsi_read_complete, r, r);
449 done:
450 scsi_req_unref(&r->req);
453 static void scsi_do_read_cb(void *opaque, int ret)
455 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
456 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
458 assert (r->req.aiocb != NULL);
459 r->req.aiocb = NULL;
461 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
462 if (ret < 0) {
463 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
464 } else {
465 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
467 scsi_do_read(opaque, ret);
468 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
471 /* Read more data from scsi device into buffer. */
472 static void scsi_read_data(SCSIRequest *req)
474 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
475 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
476 bool first;
478 trace_scsi_disk_read_data_count(r->sector_count);
479 if (r->sector_count == 0) {
480 /* This also clears the sense buffer for REQUEST SENSE. */
481 scsi_req_complete(&r->req, GOOD);
482 return;
485 /* No data transfer may already be in progress */
486 assert(r->req.aiocb == NULL);
488 /* The request is used as the AIO opaque value, so add a ref. */
489 scsi_req_ref(&r->req);
490 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
491 trace_scsi_disk_read_data_invalid();
492 scsi_read_complete_noio(r, -EINVAL);
493 return;
496 if (!blk_is_available(req->dev->conf.blk)) {
497 scsi_read_complete_noio(r, -ENOMEDIUM);
498 return;
501 first = !r->started;
502 r->started = true;
503 if (first && r->need_fua_emulation) {
504 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
505 BLOCK_ACCT_FLUSH);
506 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
507 } else {
508 scsi_do_read(r, 0);
512 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
514 uint32_t n;
516 assert (r->req.aiocb == NULL);
517 if (scsi_disk_req_check_error(r, ret, false)) {
518 goto done;
521 n = r->qiov.size / BDRV_SECTOR_SIZE;
522 r->sector += n;
523 r->sector_count -= n;
524 if (r->sector_count == 0) {
525 scsi_write_do_fua(r);
526 return;
527 } else {
528 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
529 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
530 scsi_req_data(&r->req, r->qiov.size);
533 done:
534 scsi_req_unref(&r->req);
537 static void scsi_write_complete(void * opaque, int ret)
539 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
540 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
542 assert (r->req.aiocb != NULL);
543 r->req.aiocb = NULL;
545 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
546 if (ret < 0) {
547 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
548 } else {
549 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
551 scsi_write_complete_noio(r, ret);
552 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
555 static void scsi_write_data(SCSIRequest *req)
557 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
558 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
559 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
561 /* No data transfer may already be in progress */
562 assert(r->req.aiocb == NULL);
564 /* The request is used as the AIO opaque value, so add a ref. */
565 scsi_req_ref(&r->req);
566 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
567 trace_scsi_disk_write_data_invalid();
568 scsi_write_complete_noio(r, -EINVAL);
569 return;
572 if (!r->req.sg && !r->qiov.size) {
573 /* Called for the first time. Ask the driver to send us more data. */
574 r->started = true;
575 scsi_write_complete_noio(r, 0);
576 return;
578 if (!blk_is_available(req->dev->conf.blk)) {
579 scsi_write_complete_noio(r, -ENOMEDIUM);
580 return;
583 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
584 r->req.cmd.buf[0] == VERIFY_16) {
585 if (r->req.sg) {
586 scsi_dma_complete_noio(r, 0);
587 } else {
588 scsi_write_complete_noio(r, 0);
590 return;
593 if (r->req.sg) {
594 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
595 r->req.resid -= r->req.sg->size;
596 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
597 r->req.sg, r->sector << BDRV_SECTOR_BITS,
598 BDRV_SECTOR_SIZE,
599 sdc->dma_writev, r, scsi_dma_complete, r,
600 DMA_DIRECTION_TO_DEVICE);
601 } else {
602 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
603 r->qiov.size, BLOCK_ACCT_WRITE);
604 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
605 scsi_write_complete, r, r);
609 /* Return a pointer to the data buffer. */
610 static uint8_t *scsi_get_buf(SCSIRequest *req)
612 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
614 return (uint8_t *)r->iov.iov_base;
617 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
619 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
620 uint8_t page_code = req->cmd.buf[2];
621 int start, buflen = 0;
623 outbuf[buflen++] = s->qdev.type & 0x1f;
624 outbuf[buflen++] = page_code;
625 outbuf[buflen++] = 0x00;
626 outbuf[buflen++] = 0x00;
627 start = buflen;
629 switch (page_code) {
630 case 0x00: /* Supported page codes, mandatory */
632 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
633 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
634 if (s->serial) {
635 outbuf[buflen++] = 0x80; /* unit serial number */
637 outbuf[buflen++] = 0x83; /* device identification */
638 if (s->qdev.type == TYPE_DISK) {
639 outbuf[buflen++] = 0xb0; /* block limits */
640 outbuf[buflen++] = 0xb1; /* block device characteristics */
641 outbuf[buflen++] = 0xb2; /* thin provisioning */
643 break;
645 case 0x80: /* Device serial number, optional */
647 int l;
649 if (!s->serial) {
650 trace_scsi_disk_emulate_vpd_page_80_not_supported();
651 return -1;
654 l = strlen(s->serial);
655 if (l > 36) {
656 l = 36;
659 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
660 memcpy(outbuf + buflen, s->serial, l);
661 buflen += l;
662 break;
665 case 0x83: /* Device identification page, mandatory */
667 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
669 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
671 if (id_len) {
672 outbuf[buflen++] = 0x2; /* ASCII */
673 outbuf[buflen++] = 0; /* not officially assigned */
674 outbuf[buflen++] = 0; /* reserved */
675 outbuf[buflen++] = id_len; /* length of data following */
676 memcpy(outbuf + buflen, s->device_id, id_len);
677 buflen += id_len;
680 if (s->qdev.wwn) {
681 outbuf[buflen++] = 0x1; /* Binary */
682 outbuf[buflen++] = 0x3; /* NAA */
683 outbuf[buflen++] = 0; /* reserved */
684 outbuf[buflen++] = 8;
685 stq_be_p(&outbuf[buflen], s->qdev.wwn);
686 buflen += 8;
689 if (s->qdev.port_wwn) {
690 outbuf[buflen++] = 0x61; /* SAS / Binary */
691 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
692 outbuf[buflen++] = 0; /* reserved */
693 outbuf[buflen++] = 8;
694 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
695 buflen += 8;
698 if (s->port_index) {
699 outbuf[buflen++] = 0x61; /* SAS / Binary */
701 /* PIV/Target port/relative target port */
702 outbuf[buflen++] = 0x94;
704 outbuf[buflen++] = 0; /* reserved */
705 outbuf[buflen++] = 4;
706 stw_be_p(&outbuf[buflen + 2], s->port_index);
707 buflen += 4;
709 break;
711 case 0xb0: /* block limits */
713 SCSIBlockLimits bl = {};
715 if (s->qdev.type == TYPE_ROM) {
716 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
717 return -1;
719 bl.wsnz = 1;
720 bl.unmap_sectors =
721 s->qdev.conf.discard_granularity / s->qdev.blocksize;
722 bl.min_io_size =
723 s->qdev.conf.min_io_size / s->qdev.blocksize;
724 bl.opt_io_size =
725 s->qdev.conf.opt_io_size / s->qdev.blocksize;
726 bl.max_unmap_sectors =
727 s->max_unmap_size / s->qdev.blocksize;
728 bl.max_io_sectors =
729 s->max_io_size / s->qdev.blocksize;
730 /* 255 descriptors fit in 4 KiB with an 8-byte header */
731 bl.max_unmap_descr = 255;
733 if (s->qdev.type == TYPE_DISK) {
734 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
735 int max_io_sectors_blk =
736 max_transfer_blk / s->qdev.blocksize;
738 bl.max_io_sectors =
739 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
741 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
742 break;
744 case 0xb1: /* block device characteristics */
746 buflen = 0x40;
747 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
748 outbuf[5] = s->rotation_rate & 0xff;
749 outbuf[6] = 0; /* PRODUCT TYPE */
750 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
751 outbuf[8] = 0; /* VBULS */
752 break;
754 case 0xb2: /* thin provisioning */
756 buflen = 8;
757 outbuf[4] = 0;
758 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
759 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
760 outbuf[7] = 0;
761 break;
763 default:
764 return -1;
766 /* done with EVPD */
767 assert(buflen - start <= 255);
768 outbuf[start - 1] = buflen - start;
769 return buflen;
772 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
774 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
775 int buflen = 0;
777 if (req->cmd.buf[1] & 0x1) {
778 /* Vital product data */
779 return scsi_disk_emulate_vpd_page(req, outbuf);
782 /* Standard INQUIRY data */
783 if (req->cmd.buf[2] != 0) {
784 return -1;
787 /* PAGE CODE == 0 */
788 buflen = req->cmd.xfer;
789 if (buflen > SCSI_MAX_INQUIRY_LEN) {
790 buflen = SCSI_MAX_INQUIRY_LEN;
793 outbuf[0] = s->qdev.type & 0x1f;
794 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
796 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
797 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
799 memset(&outbuf[32], 0, 4);
800 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
802 * We claim conformance to SPC-3, which is required for guests
803 * to ask for modern features like READ CAPACITY(16) or the
804 * block characteristics VPD page by default. Not all of SPC-3
805 * is actually implemented, but we're good enough.
807 outbuf[2] = s->qdev.default_scsi_version;
808 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
810 if (buflen > 36) {
811 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
812 } else {
813 /* If the allocation length of CDB is too small,
814 the additional length is not adjusted */
815 outbuf[4] = 36 - 5;
818 /* Sync data transfer and TCQ. */
819 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
820 return buflen;
823 static inline bool media_is_dvd(SCSIDiskState *s)
825 uint64_t nb_sectors;
826 if (s->qdev.type != TYPE_ROM) {
827 return false;
829 if (!blk_is_available(s->qdev.conf.blk)) {
830 return false;
832 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
833 return nb_sectors > CD_MAX_SECTORS;
836 static inline bool media_is_cd(SCSIDiskState *s)
838 uint64_t nb_sectors;
839 if (s->qdev.type != TYPE_ROM) {
840 return false;
842 if (!blk_is_available(s->qdev.conf.blk)) {
843 return false;
845 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
846 return nb_sectors <= CD_MAX_SECTORS;
849 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
850 uint8_t *outbuf)
852 uint8_t type = r->req.cmd.buf[1] & 7;
854 if (s->qdev.type != TYPE_ROM) {
855 return -1;
858 /* Types 1/2 are only defined for Blu-Ray. */
859 if (type != 0) {
860 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
861 return -1;
864 memset(outbuf, 0, 34);
865 outbuf[1] = 32;
866 outbuf[2] = 0xe; /* last session complete, disc finalized */
867 outbuf[3] = 1; /* first track on disc */
868 outbuf[4] = 1; /* # of sessions */
869 outbuf[5] = 1; /* first track of last session */
870 outbuf[6] = 1; /* last track of last session */
871 outbuf[7] = 0x20; /* unrestricted use */
872 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
873 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
874 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
875 /* 24-31: disc bar code */
876 /* 32: disc application code */
877 /* 33: number of OPC tables */
879 return 34;
882 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
883 uint8_t *outbuf)
885 static const int rds_caps_size[5] = {
886 [0] = 2048 + 4,
887 [1] = 4 + 4,
888 [3] = 188 + 4,
889 [4] = 2048 + 4,
892 uint8_t media = r->req.cmd.buf[1];
893 uint8_t layer = r->req.cmd.buf[6];
894 uint8_t format = r->req.cmd.buf[7];
895 int size = -1;
897 if (s->qdev.type != TYPE_ROM) {
898 return -1;
900 if (media != 0) {
901 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
902 return -1;
905 if (format != 0xff) {
906 if (!blk_is_available(s->qdev.conf.blk)) {
907 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
908 return -1;
910 if (media_is_cd(s)) {
911 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
912 return -1;
914 if (format >= ARRAY_SIZE(rds_caps_size)) {
915 return -1;
917 size = rds_caps_size[format];
918 memset(outbuf, 0, size);
921 switch (format) {
922 case 0x00: {
923 /* Physical format information */
924 uint64_t nb_sectors;
925 if (layer != 0) {
926 goto fail;
928 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
930 outbuf[4] = 1; /* DVD-ROM, part version 1 */
931 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
932 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
933 outbuf[7] = 0; /* default densities */
935 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
936 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
937 break;
940 case 0x01: /* DVD copyright information, all zeros */
941 break;
943 case 0x03: /* BCA information - invalid field for no BCA info */
944 return -1;
946 case 0x04: /* DVD disc manufacturing information, all zeros */
947 break;
949 case 0xff: { /* List capabilities */
950 int i;
951 size = 4;
952 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
953 if (!rds_caps_size[i]) {
954 continue;
956 outbuf[size] = i;
957 outbuf[size + 1] = 0x40; /* Not writable, readable */
958 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
959 size += 4;
961 break;
964 default:
965 return -1;
968 /* Size of buffer, not including 2 byte size field */
969 stw_be_p(outbuf, size - 2);
970 return size;
972 fail:
973 return -1;
976 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
978 uint8_t event_code, media_status;
980 media_status = 0;
981 if (s->tray_open) {
982 media_status = MS_TRAY_OPEN;
983 } else if (blk_is_inserted(s->qdev.conf.blk)) {
984 media_status = MS_MEDIA_PRESENT;
987 /* Event notification descriptor */
988 event_code = MEC_NO_CHANGE;
989 if (media_status != MS_TRAY_OPEN) {
990 if (s->media_event) {
991 event_code = MEC_NEW_MEDIA;
992 s->media_event = false;
993 } else if (s->eject_request) {
994 event_code = MEC_EJECT_REQUESTED;
995 s->eject_request = false;
999 outbuf[0] = event_code;
1000 outbuf[1] = media_status;
1002 /* These fields are reserved, just clear them. */
1003 outbuf[2] = 0;
1004 outbuf[3] = 0;
1005 return 4;
1008 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
1009 uint8_t *outbuf)
1011 int size;
1012 uint8_t *buf = r->req.cmd.buf;
1013 uint8_t notification_class_request = buf[4];
1014 if (s->qdev.type != TYPE_ROM) {
1015 return -1;
1017 if ((buf[1] & 1) == 0) {
1018 /* asynchronous */
1019 return -1;
1022 size = 4;
1023 outbuf[0] = outbuf[1] = 0;
1024 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1025 if (notification_class_request & (1 << GESN_MEDIA)) {
1026 outbuf[2] = GESN_MEDIA;
1027 size += scsi_event_status_media(s, &outbuf[size]);
1028 } else {
1029 outbuf[2] = 0x80;
1031 stw_be_p(outbuf, size - 4);
1032 return size;
1035 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1037 int current;
1039 if (s->qdev.type != TYPE_ROM) {
1040 return -1;
1043 if (media_is_dvd(s)) {
1044 current = MMC_PROFILE_DVD_ROM;
1045 } else if (media_is_cd(s)) {
1046 current = MMC_PROFILE_CD_ROM;
1047 } else {
1048 current = MMC_PROFILE_NONE;
1051 memset(outbuf, 0, 40);
1052 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1053 stw_be_p(&outbuf[6], current);
1054 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1055 outbuf[10] = 0x03; /* persistent, current */
1056 outbuf[11] = 8; /* two profiles */
1057 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1058 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1059 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1060 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1061 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1062 stw_be_p(&outbuf[20], 1);
1063 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1064 outbuf[23] = 8;
1065 stl_be_p(&outbuf[24], 1); /* SCSI */
1066 outbuf[28] = 1; /* DBE = 1, mandatory */
1067 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1068 stw_be_p(&outbuf[32], 3);
1069 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1070 outbuf[35] = 4;
1071 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1072 /* TODO: Random readable, CD read, DVD read, drive serial number,
1073 power management */
1074 return 40;
1077 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1079 if (s->qdev.type != TYPE_ROM) {
1080 return -1;
1082 memset(outbuf, 0, 8);
1083 outbuf[5] = 1; /* CD-ROM */
1084 return 8;
1087 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1088 int page_control)
1090 static const int mode_sense_valid[0x3f] = {
1091 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1092 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1093 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1094 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1095 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1096 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1099 uint8_t *p = *p_outbuf + 2;
1100 int length;
1102 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1103 return -1;
1107 * If Changeable Values are requested, a mask denoting those mode parameters
1108 * that are changeable shall be returned. As we currently don't support
1109 * parameter changes via MODE_SELECT all bits are returned set to zero.
1110 * The buffer was already menset to zero by the caller of this function.
1112 * The offsets here are off by two compared to the descriptions in the
1113 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1114 * but it is done so that offsets are consistent within our implementation
1115 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1116 * 2-byte and 4-byte headers.
1118 switch (page) {
1119 case MODE_PAGE_HD_GEOMETRY:
1120 length = 0x16;
1121 if (page_control == 1) { /* Changeable Values */
1122 break;
1124 /* if a geometry hint is available, use it */
1125 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1126 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1127 p[2] = s->qdev.conf.cyls & 0xff;
1128 p[3] = s->qdev.conf.heads & 0xff;
1129 /* Write precomp start cylinder, disabled */
1130 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1131 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1132 p[6] = s->qdev.conf.cyls & 0xff;
1133 /* Reduced current start cylinder, disabled */
1134 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1135 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1136 p[9] = s->qdev.conf.cyls & 0xff;
1137 /* Device step rate [ns], 200ns */
1138 p[10] = 0;
1139 p[11] = 200;
1140 /* Landing zone cylinder */
1141 p[12] = 0xff;
1142 p[13] = 0xff;
1143 p[14] = 0xff;
1144 /* Medium rotation rate [rpm], 5400 rpm */
1145 p[18] = (5400 >> 8) & 0xff;
1146 p[19] = 5400 & 0xff;
1147 break;
1149 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1150 length = 0x1e;
1151 if (page_control == 1) { /* Changeable Values */
1152 break;
1154 /* Transfer rate [kbit/s], 5Mbit/s */
1155 p[0] = 5000 >> 8;
1156 p[1] = 5000 & 0xff;
1157 /* if a geometry hint is available, use it */
1158 p[2] = s->qdev.conf.heads & 0xff;
1159 p[3] = s->qdev.conf.secs & 0xff;
1160 p[4] = s->qdev.blocksize >> 8;
1161 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1162 p[7] = s->qdev.conf.cyls & 0xff;
1163 /* Write precomp start cylinder, disabled */
1164 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1165 p[9] = s->qdev.conf.cyls & 0xff;
1166 /* Reduced current start cylinder, disabled */
1167 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1168 p[11] = s->qdev.conf.cyls & 0xff;
1169 /* Device step rate [100us], 100us */
1170 p[12] = 0;
1171 p[13] = 1;
1172 /* Device step pulse width [us], 1us */
1173 p[14] = 1;
1174 /* Device head settle delay [100us], 100us */
1175 p[15] = 0;
1176 p[16] = 1;
1177 /* Motor on delay [0.1s], 0.1s */
1178 p[17] = 1;
1179 /* Motor off delay [0.1s], 0.1s */
1180 p[18] = 1;
1181 /* Medium rotation rate [rpm], 5400 rpm */
1182 p[26] = (5400 >> 8) & 0xff;
1183 p[27] = 5400 & 0xff;
1184 break;
1186 case MODE_PAGE_CACHING:
1187 length = 0x12;
1188 if (page_control == 1 || /* Changeable Values */
1189 blk_enable_write_cache(s->qdev.conf.blk)) {
1190 p[0] = 4; /* WCE */
1192 break;
1194 case MODE_PAGE_R_W_ERROR:
1195 length = 10;
1196 if (page_control == 1) { /* Changeable Values */
1197 break;
1199 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1200 if (s->qdev.type == TYPE_ROM) {
1201 p[1] = 0x20; /* Read Retry Count */
1203 break;
1205 case MODE_PAGE_AUDIO_CTL:
1206 length = 14;
1207 break;
1209 case MODE_PAGE_CAPABILITIES:
1210 length = 0x14;
1211 if (page_control == 1) { /* Changeable Values */
1212 break;
1215 p[0] = 0x3b; /* CD-R & CD-RW read */
1216 p[1] = 0; /* Writing not supported */
1217 p[2] = 0x7f; /* Audio, composite, digital out,
1218 mode 2 form 1&2, multi session */
1219 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1220 RW corrected, C2 errors, ISRC,
1221 UPC, Bar code */
1222 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1223 /* Locking supported, jumper present, eject, tray */
1224 p[5] = 0; /* no volume & mute control, no
1225 changer */
1226 p[6] = (50 * 176) >> 8; /* 50x read speed */
1227 p[7] = (50 * 176) & 0xff;
1228 p[8] = 2 >> 8; /* Two volume levels */
1229 p[9] = 2 & 0xff;
1230 p[10] = 2048 >> 8; /* 2M buffer */
1231 p[11] = 2048 & 0xff;
1232 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1233 p[13] = (16 * 176) & 0xff;
1234 p[16] = (16 * 176) >> 8; /* 16x write speed */
1235 p[17] = (16 * 176) & 0xff;
1236 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1237 p[19] = (16 * 176) & 0xff;
1238 break;
1240 default:
1241 return -1;
1244 assert(length < 256);
1245 (*p_outbuf)[0] = page;
1246 (*p_outbuf)[1] = length;
1247 *p_outbuf += length + 2;
1248 return length + 2;
1251 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1253 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1254 uint64_t nb_sectors;
1255 bool dbd;
1256 int page, buflen, ret, page_control;
1257 uint8_t *p;
1258 uint8_t dev_specific_param;
1260 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1261 page = r->req.cmd.buf[2] & 0x3f;
1262 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1264 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1265 10, page, r->req.cmd.xfer, page_control);
1266 memset(outbuf, 0, r->req.cmd.xfer);
1267 p = outbuf;
1269 if (s->qdev.type == TYPE_DISK) {
1270 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1271 if (!blk_is_writable(s->qdev.conf.blk)) {
1272 dev_specific_param |= 0x80; /* Readonly. */
1274 } else {
1275 /* MMC prescribes that CD/DVD drives have no block descriptors,
1276 * and defines no device-specific parameter. */
1277 dev_specific_param = 0x00;
1278 dbd = true;
1281 if (r->req.cmd.buf[0] == MODE_SENSE) {
1282 p[1] = 0; /* Default media type. */
1283 p[2] = dev_specific_param;
1284 p[3] = 0; /* Block descriptor length. */
1285 p += 4;
1286 } else { /* MODE_SENSE_10 */
1287 p[2] = 0; /* Default media type. */
1288 p[3] = dev_specific_param;
1289 p[6] = p[7] = 0; /* Block descriptor length. */
1290 p += 8;
1293 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1294 if (!dbd && nb_sectors) {
1295 if (r->req.cmd.buf[0] == MODE_SENSE) {
1296 outbuf[3] = 8; /* Block descriptor length */
1297 } else { /* MODE_SENSE_10 */
1298 outbuf[7] = 8; /* Block descriptor length */
1300 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1301 if (nb_sectors > 0xffffff) {
1302 nb_sectors = 0;
1304 p[0] = 0; /* media density code */
1305 p[1] = (nb_sectors >> 16) & 0xff;
1306 p[2] = (nb_sectors >> 8) & 0xff;
1307 p[3] = nb_sectors & 0xff;
1308 p[4] = 0; /* reserved */
1309 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1310 p[6] = s->qdev.blocksize >> 8;
1311 p[7] = 0;
1312 p += 8;
1315 if (page_control == 3) {
1316 /* Saved Values */
1317 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1318 return -1;
1321 if (page == 0x3f) {
1322 for (page = 0; page <= 0x3e; page++) {
1323 mode_sense_page(s, page, &p, page_control);
1325 } else {
1326 ret = mode_sense_page(s, page, &p, page_control);
1327 if (ret == -1) {
1328 return -1;
1332 buflen = p - outbuf;
1334 * The mode data length field specifies the length in bytes of the
1335 * following data that is available to be transferred. The mode data
1336 * length does not include itself.
1338 if (r->req.cmd.buf[0] == MODE_SENSE) {
1339 outbuf[0] = buflen - 1;
1340 } else { /* MODE_SENSE_10 */
1341 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1342 outbuf[1] = (buflen - 2) & 0xff;
1344 return buflen;
1347 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1349 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1350 int start_track, format, msf, toclen;
1351 uint64_t nb_sectors;
1353 msf = req->cmd.buf[1] & 2;
1354 format = req->cmd.buf[2] & 0xf;
1355 start_track = req->cmd.buf[6];
1356 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1357 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1358 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1359 switch (format) {
1360 case 0:
1361 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1362 break;
1363 case 1:
1364 /* multi session : only a single session defined */
1365 toclen = 12;
1366 memset(outbuf, 0, 12);
1367 outbuf[1] = 0x0a;
1368 outbuf[2] = 0x01;
1369 outbuf[3] = 0x01;
1370 break;
1371 case 2:
1372 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1373 break;
1374 default:
1375 return -1;
1377 return toclen;
1380 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1382 SCSIRequest *req = &r->req;
1383 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1384 bool start = req->cmd.buf[4] & 1;
1385 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1386 int pwrcnd = req->cmd.buf[4] & 0xf0;
1388 if (pwrcnd) {
1389 /* eject/load only happens for power condition == 0 */
1390 return 0;
1393 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1394 if (!start && !s->tray_open && s->tray_locked) {
1395 scsi_check_condition(r,
1396 blk_is_inserted(s->qdev.conf.blk)
1397 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1398 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1399 return -1;
1402 if (s->tray_open != !start) {
1403 blk_eject(s->qdev.conf.blk, !start);
1404 s->tray_open = !start;
1407 return 0;
1410 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1412 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1413 int buflen = r->iov.iov_len;
1415 if (buflen) {
1416 trace_scsi_disk_emulate_read_data(buflen);
1417 r->iov.iov_len = 0;
1418 r->started = true;
1419 scsi_req_data(&r->req, buflen);
1420 return;
1423 /* This also clears the sense buffer for REQUEST SENSE. */
1424 scsi_req_complete(&r->req, GOOD);
1427 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1428 uint8_t *inbuf, int inlen)
1430 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1431 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1432 uint8_t *p;
1433 int len, expected_len, changeable_len, i;
1435 /* The input buffer does not include the page header, so it is
1436 * off by 2 bytes.
1438 expected_len = inlen + 2;
1439 if (expected_len > SCSI_MAX_MODE_LEN) {
1440 return -1;
1443 p = mode_current;
1444 memset(mode_current, 0, inlen + 2);
1445 len = mode_sense_page(s, page, &p, 0);
1446 if (len < 0 || len != expected_len) {
1447 return -1;
1450 p = mode_changeable;
1451 memset(mode_changeable, 0, inlen + 2);
1452 changeable_len = mode_sense_page(s, page, &p, 1);
1453 assert(changeable_len == len);
1455 /* Check that unchangeable bits are the same as what MODE SENSE
1456 * would return.
1458 for (i = 2; i < len; i++) {
1459 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1460 return -1;
1463 return 0;
1466 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1468 switch (page) {
1469 case MODE_PAGE_CACHING:
1470 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1471 break;
1473 default:
1474 break;
1478 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1480 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1482 while (len > 0) {
1483 int page, subpage, page_len;
1485 /* Parse both possible formats for the mode page headers. */
1486 page = p[0] & 0x3f;
1487 if (p[0] & 0x40) {
1488 if (len < 4) {
1489 goto invalid_param_len;
1491 subpage = p[1];
1492 page_len = lduw_be_p(&p[2]);
1493 p += 4;
1494 len -= 4;
1495 } else {
1496 if (len < 2) {
1497 goto invalid_param_len;
1499 subpage = 0;
1500 page_len = p[1];
1501 p += 2;
1502 len -= 2;
1505 if (subpage) {
1506 goto invalid_param;
1508 if (page_len > len) {
1509 goto invalid_param_len;
1512 if (!change) {
1513 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1514 goto invalid_param;
1516 } else {
1517 scsi_disk_apply_mode_select(s, page, p);
1520 p += page_len;
1521 len -= page_len;
1523 return 0;
1525 invalid_param:
1526 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1527 return -1;
1529 invalid_param_len:
1530 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1531 return -1;
1534 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1536 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1537 uint8_t *p = inbuf;
1538 int cmd = r->req.cmd.buf[0];
1539 int len = r->req.cmd.xfer;
1540 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1541 int bd_len;
1542 int pass;
1544 /* We only support PF=1, SP=0. */
1545 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1546 goto invalid_field;
1549 if (len < hdr_len) {
1550 goto invalid_param_len;
1553 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1554 len -= hdr_len;
1555 p += hdr_len;
1556 if (len < bd_len) {
1557 goto invalid_param_len;
1559 if (bd_len != 0 && bd_len != 8) {
1560 goto invalid_param;
1563 len -= bd_len;
1564 p += bd_len;
1566 /* Ensure no change is made if there is an error! */
1567 for (pass = 0; pass < 2; pass++) {
1568 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1569 assert(pass == 0);
1570 return;
1573 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1574 /* The request is used as the AIO opaque value, so add a ref. */
1575 scsi_req_ref(&r->req);
1576 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1577 BLOCK_ACCT_FLUSH);
1578 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1579 return;
1582 scsi_req_complete(&r->req, GOOD);
1583 return;
1585 invalid_param:
1586 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1587 return;
1589 invalid_param_len:
1590 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1591 return;
1593 invalid_field:
1594 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1597 static inline bool check_lba_range(SCSIDiskState *s,
1598 uint64_t sector_num, uint32_t nb_sectors)
1601 * The first line tests that no overflow happens when computing the last
1602 * sector. The second line tests that the last accessed sector is in
1603 * range.
1605 * Careful, the computations should not underflow for nb_sectors == 0,
1606 * and a 0-block read to the first LBA beyond the end of device is
1607 * valid.
1609 return (sector_num <= sector_num + nb_sectors &&
1610 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1613 typedef struct UnmapCBData {
1614 SCSIDiskReq *r;
1615 uint8_t *inbuf;
1616 int count;
1617 } UnmapCBData;
1619 static void scsi_unmap_complete(void *opaque, int ret);
1621 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1623 SCSIDiskReq *r = data->r;
1624 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1626 assert(r->req.aiocb == NULL);
1628 if (data->count > 0) {
1629 r->sector = ldq_be_p(&data->inbuf[0])
1630 * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1631 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL)
1632 * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1633 if (!check_lba_range(s, r->sector, r->sector_count)) {
1634 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1635 BLOCK_ACCT_UNMAP);
1636 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1637 goto done;
1640 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1641 r->sector_count * BDRV_SECTOR_SIZE,
1642 BLOCK_ACCT_UNMAP);
1644 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1645 r->sector * BDRV_SECTOR_SIZE,
1646 r->sector_count * BDRV_SECTOR_SIZE,
1647 scsi_unmap_complete, data);
1648 data->count--;
1649 data->inbuf += 16;
1650 return;
1653 scsi_req_complete(&r->req, GOOD);
1655 done:
1656 scsi_req_unref(&r->req);
1657 g_free(data);
1660 static void scsi_unmap_complete(void *opaque, int ret)
1662 UnmapCBData *data = opaque;
1663 SCSIDiskReq *r = data->r;
1664 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1666 assert(r->req.aiocb != NULL);
1667 r->req.aiocb = NULL;
1669 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1670 if (scsi_disk_req_check_error(r, ret, true)) {
1671 scsi_req_unref(&r->req);
1672 g_free(data);
1673 } else {
1674 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1675 scsi_unmap_complete_noio(data, ret);
1677 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1680 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1682 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1683 uint8_t *p = inbuf;
1684 int len = r->req.cmd.xfer;
1685 UnmapCBData *data;
1687 /* Reject ANCHOR=1. */
1688 if (r->req.cmd.buf[1] & 0x1) {
1689 goto invalid_field;
1692 if (len < 8) {
1693 goto invalid_param_len;
1695 if (len < lduw_be_p(&p[0]) + 2) {
1696 goto invalid_param_len;
1698 if (len < lduw_be_p(&p[2]) + 8) {
1699 goto invalid_param_len;
1701 if (lduw_be_p(&p[2]) & 15) {
1702 goto invalid_param_len;
1705 if (!blk_is_writable(s->qdev.conf.blk)) {
1706 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1707 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1708 return;
1711 data = g_new0(UnmapCBData, 1);
1712 data->r = r;
1713 data->inbuf = &p[8];
1714 data->count = lduw_be_p(&p[2]) >> 4;
1716 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1717 scsi_req_ref(&r->req);
1718 scsi_unmap_complete_noio(data, 0);
1719 return;
1721 invalid_param_len:
1722 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1723 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1724 return;
1726 invalid_field:
1727 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1728 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1731 typedef struct WriteSameCBData {
1732 SCSIDiskReq *r;
1733 int64_t sector;
1734 int nb_sectors;
1735 QEMUIOVector qiov;
1736 struct iovec iov;
1737 } WriteSameCBData;
1739 static void scsi_write_same_complete(void *opaque, int ret)
1741 WriteSameCBData *data = opaque;
1742 SCSIDiskReq *r = data->r;
1743 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1745 assert(r->req.aiocb != NULL);
1746 r->req.aiocb = NULL;
1747 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1748 if (scsi_disk_req_check_error(r, ret, true)) {
1749 goto done;
1752 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1754 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1755 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1756 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1757 data->iov.iov_len);
1758 if (data->iov.iov_len) {
1759 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1760 data->iov.iov_len, BLOCK_ACCT_WRITE);
1761 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1762 * where final qiov may need smaller size */
1763 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1764 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1765 data->sector << BDRV_SECTOR_BITS,
1766 &data->qiov, 0,
1767 scsi_write_same_complete, data);
1768 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1769 return;
1772 scsi_req_complete(&r->req, GOOD);
1774 done:
1775 scsi_req_unref(&r->req);
1776 qemu_vfree(data->iov.iov_base);
1777 g_free(data);
1778 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1781 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1783 SCSIRequest *req = &r->req;
1784 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1785 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1786 WriteSameCBData *data;
1787 uint8_t *buf;
1788 int i;
1790 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1791 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1792 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1793 return;
1796 if (!blk_is_writable(s->qdev.conf.blk)) {
1797 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1798 return;
1800 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1801 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1802 return;
1805 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1806 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1808 /* The request is used as the AIO opaque value, so add a ref. */
1809 scsi_req_ref(&r->req);
1810 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1811 nb_sectors * s->qdev.blocksize,
1812 BLOCK_ACCT_WRITE);
1813 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1814 r->req.cmd.lba * s->qdev.blocksize,
1815 nb_sectors * s->qdev.blocksize,
1816 flags, scsi_aio_complete, r);
1817 return;
1820 data = g_new0(WriteSameCBData, 1);
1821 data->r = r;
1822 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1823 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1824 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1825 SCSI_WRITE_SAME_MAX);
1826 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1827 data->iov.iov_len);
1828 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1830 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1831 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1834 scsi_req_ref(&r->req);
1835 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1836 data->iov.iov_len, BLOCK_ACCT_WRITE);
1837 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1838 data->sector << BDRV_SECTOR_BITS,
1839 &data->qiov, 0,
1840 scsi_write_same_complete, data);
1843 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1845 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1847 if (r->iov.iov_len) {
1848 int buflen = r->iov.iov_len;
1849 trace_scsi_disk_emulate_write_data(buflen);
1850 r->iov.iov_len = 0;
1851 scsi_req_data(&r->req, buflen);
1852 return;
1855 switch (req->cmd.buf[0]) {
1856 case MODE_SELECT:
1857 case MODE_SELECT_10:
1858 /* This also clears the sense buffer for REQUEST SENSE. */
1859 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1860 break;
1862 case UNMAP:
1863 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1864 break;
1866 case VERIFY_10:
1867 case VERIFY_12:
1868 case VERIFY_16:
1869 if (r->req.status == -1) {
1870 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1872 break;
1874 case WRITE_SAME_10:
1875 case WRITE_SAME_16:
1876 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1877 break;
1879 default:
1880 abort();
1884 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1886 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1887 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1888 uint64_t nb_sectors;
1889 uint8_t *outbuf;
1890 int buflen;
1892 switch (req->cmd.buf[0]) {
1893 case INQUIRY:
1894 case MODE_SENSE:
1895 case MODE_SENSE_10:
1896 case RESERVE:
1897 case RESERVE_10:
1898 case RELEASE:
1899 case RELEASE_10:
1900 case START_STOP:
1901 case ALLOW_MEDIUM_REMOVAL:
1902 case GET_CONFIGURATION:
1903 case GET_EVENT_STATUS_NOTIFICATION:
1904 case MECHANISM_STATUS:
1905 case REQUEST_SENSE:
1906 break;
1908 default:
1909 if (!blk_is_available(s->qdev.conf.blk)) {
1910 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1911 return 0;
1913 break;
1917 * FIXME: we shouldn't return anything bigger than 4k, but the code
1918 * requires the buffer to be as big as req->cmd.xfer in several
1919 * places. So, do not allow CDBs with a very large ALLOCATION
1920 * LENGTH. The real fix would be to modify scsi_read_data and
1921 * dma_buf_read, so that they return data beyond the buflen
1922 * as all zeros.
1924 if (req->cmd.xfer > 65536) {
1925 goto illegal_request;
1927 r->buflen = MAX(4096, req->cmd.xfer);
1929 if (!r->iov.iov_base) {
1930 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1933 outbuf = r->iov.iov_base;
1934 memset(outbuf, 0, r->buflen);
1935 switch (req->cmd.buf[0]) {
1936 case TEST_UNIT_READY:
1937 assert(blk_is_available(s->qdev.conf.blk));
1938 break;
1939 case INQUIRY:
1940 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1941 if (buflen < 0) {
1942 goto illegal_request;
1944 break;
1945 case MODE_SENSE:
1946 case MODE_SENSE_10:
1947 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1948 if (buflen < 0) {
1949 goto illegal_request;
1951 break;
1952 case READ_TOC:
1953 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1954 if (buflen < 0) {
1955 goto illegal_request;
1957 break;
1958 case RESERVE:
1959 if (req->cmd.buf[1] & 1) {
1960 goto illegal_request;
1962 break;
1963 case RESERVE_10:
1964 if (req->cmd.buf[1] & 3) {
1965 goto illegal_request;
1967 break;
1968 case RELEASE:
1969 if (req->cmd.buf[1] & 1) {
1970 goto illegal_request;
1972 break;
1973 case RELEASE_10:
1974 if (req->cmd.buf[1] & 3) {
1975 goto illegal_request;
1977 break;
1978 case START_STOP:
1979 if (scsi_disk_emulate_start_stop(r) < 0) {
1980 return 0;
1982 break;
1983 case ALLOW_MEDIUM_REMOVAL:
1984 s->tray_locked = req->cmd.buf[4] & 1;
1985 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1986 break;
1987 case READ_CAPACITY_10:
1988 /* The normal LEN field for this command is zero. */
1989 memset(outbuf, 0, 8);
1990 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1991 if (!nb_sectors) {
1992 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1993 return 0;
1995 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1996 goto illegal_request;
1998 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1999 /* Returned value is the address of the last sector. */
2000 nb_sectors--;
2001 /* Remember the new size for read/write sanity checking. */
2002 s->qdev.max_lba = nb_sectors;
2003 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2004 if (nb_sectors > UINT32_MAX) {
2005 nb_sectors = UINT32_MAX;
2007 outbuf[0] = (nb_sectors >> 24) & 0xff;
2008 outbuf[1] = (nb_sectors >> 16) & 0xff;
2009 outbuf[2] = (nb_sectors >> 8) & 0xff;
2010 outbuf[3] = nb_sectors & 0xff;
2011 outbuf[4] = 0;
2012 outbuf[5] = 0;
2013 outbuf[6] = s->qdev.blocksize >> 8;
2014 outbuf[7] = 0;
2015 break;
2016 case REQUEST_SENSE:
2017 /* Just return "NO SENSE". */
2018 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2019 (req->cmd.buf[1] & 1) == 0);
2020 if (buflen < 0) {
2021 goto illegal_request;
2023 break;
2024 case MECHANISM_STATUS:
2025 buflen = scsi_emulate_mechanism_status(s, outbuf);
2026 if (buflen < 0) {
2027 goto illegal_request;
2029 break;
2030 case GET_CONFIGURATION:
2031 buflen = scsi_get_configuration(s, outbuf);
2032 if (buflen < 0) {
2033 goto illegal_request;
2035 break;
2036 case GET_EVENT_STATUS_NOTIFICATION:
2037 buflen = scsi_get_event_status_notification(s, r, outbuf);
2038 if (buflen < 0) {
2039 goto illegal_request;
2041 break;
2042 case READ_DISC_INFORMATION:
2043 buflen = scsi_read_disc_information(s, r, outbuf);
2044 if (buflen < 0) {
2045 goto illegal_request;
2047 break;
2048 case READ_DVD_STRUCTURE:
2049 buflen = scsi_read_dvd_structure(s, r, outbuf);
2050 if (buflen < 0) {
2051 goto illegal_request;
2053 break;
2054 case SERVICE_ACTION_IN_16:
2055 /* Service Action In subcommands. */
2056 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2057 trace_scsi_disk_emulate_command_SAI_16();
2058 memset(outbuf, 0, req->cmd.xfer);
2059 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2060 if (!nb_sectors) {
2061 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2062 return 0;
2064 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2065 goto illegal_request;
2067 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2068 /* Returned value is the address of the last sector. */
2069 nb_sectors--;
2070 /* Remember the new size for read/write sanity checking. */
2071 s->qdev.max_lba = nb_sectors;
2072 outbuf[0] = (nb_sectors >> 56) & 0xff;
2073 outbuf[1] = (nb_sectors >> 48) & 0xff;
2074 outbuf[2] = (nb_sectors >> 40) & 0xff;
2075 outbuf[3] = (nb_sectors >> 32) & 0xff;
2076 outbuf[4] = (nb_sectors >> 24) & 0xff;
2077 outbuf[5] = (nb_sectors >> 16) & 0xff;
2078 outbuf[6] = (nb_sectors >> 8) & 0xff;
2079 outbuf[7] = nb_sectors & 0xff;
2080 outbuf[8] = 0;
2081 outbuf[9] = 0;
2082 outbuf[10] = s->qdev.blocksize >> 8;
2083 outbuf[11] = 0;
2084 outbuf[12] = 0;
2085 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2087 /* set TPE bit if the format supports discard */
2088 if (s->qdev.conf.discard_granularity) {
2089 outbuf[14] = 0x80;
2092 /* Protection, exponent and lowest lba field left blank. */
2093 break;
2095 trace_scsi_disk_emulate_command_SAI_unsupported();
2096 goto illegal_request;
2097 case SYNCHRONIZE_CACHE:
2098 /* The request is used as the AIO opaque value, so add a ref. */
2099 scsi_req_ref(&r->req);
2100 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2101 BLOCK_ACCT_FLUSH);
2102 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2103 return 0;
2104 case SEEK_10:
2105 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2106 if (r->req.cmd.lba > s->qdev.max_lba) {
2107 goto illegal_lba;
2109 break;
2110 case MODE_SELECT:
2111 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2112 break;
2113 case MODE_SELECT_10:
2114 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2115 break;
2116 case UNMAP:
2117 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2118 break;
2119 case VERIFY_10:
2120 case VERIFY_12:
2121 case VERIFY_16:
2122 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2123 if (req->cmd.buf[1] & 6) {
2124 goto illegal_request;
2126 break;
2127 case WRITE_SAME_10:
2128 case WRITE_SAME_16:
2129 trace_scsi_disk_emulate_command_WRITE_SAME(
2130 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2131 break;
2132 default:
2133 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2134 scsi_command_name(buf[0]));
2135 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2136 return 0;
2138 assert(!r->req.aiocb);
2139 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2140 if (r->iov.iov_len == 0) {
2141 scsi_req_complete(&r->req, GOOD);
2143 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2144 assert(r->iov.iov_len == req->cmd.xfer);
2145 return -r->iov.iov_len;
2146 } else {
2147 return r->iov.iov_len;
2150 illegal_request:
2151 if (r->req.status == -1) {
2152 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2154 return 0;
2156 illegal_lba:
2157 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2158 return 0;
2161 /* Execute a scsi command. Returns the length of the data expected by the
2162 command. This will be Positive for data transfers from the device
2163 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2164 and zero if the command does not transfer any data. */
2166 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2168 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2169 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2170 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2171 uint32_t len;
2172 uint8_t command;
2174 command = buf[0];
2176 if (!blk_is_available(s->qdev.conf.blk)) {
2177 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2178 return 0;
2181 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2182 switch (command) {
2183 case READ_6:
2184 case READ_10:
2185 case READ_12:
2186 case READ_16:
2187 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2188 /* Protection information is not supported. For SCSI versions 2 and
2189 * older (as determined by snooping the guest's INQUIRY commands),
2190 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2192 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2193 goto illegal_request;
2195 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2196 goto illegal_lba;
2198 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2199 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2200 break;
2201 case WRITE_6:
2202 case WRITE_10:
2203 case WRITE_12:
2204 case WRITE_16:
2205 case WRITE_VERIFY_10:
2206 case WRITE_VERIFY_12:
2207 case WRITE_VERIFY_16:
2208 if (!blk_is_writable(s->qdev.conf.blk)) {
2209 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2210 return 0;
2212 trace_scsi_disk_dma_command_WRITE(
2213 (command & 0xe) == 0xe ? "And Verify " : "",
2214 r->req.cmd.lba, len);
2215 /* fall through */
2216 case VERIFY_10:
2217 case VERIFY_12:
2218 case VERIFY_16:
2219 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2220 * As far as DMA is concerned, we can treat it the same as a write;
2221 * scsi_block_do_sgio will send VERIFY commands.
2223 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2224 goto illegal_request;
2226 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2227 goto illegal_lba;
2229 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2230 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2231 break;
2232 default:
2233 abort();
2234 illegal_request:
2235 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2236 return 0;
2237 illegal_lba:
2238 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2239 return 0;
2241 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2242 if (r->sector_count == 0) {
2243 scsi_req_complete(&r->req, GOOD);
2245 assert(r->iov.iov_len == 0);
2246 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2247 return -r->sector_count * BDRV_SECTOR_SIZE;
2248 } else {
2249 return r->sector_count * BDRV_SECTOR_SIZE;
2253 static void scsi_disk_reset(DeviceState *dev)
2255 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2256 uint64_t nb_sectors;
2258 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2260 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2261 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2262 if (nb_sectors) {
2263 nb_sectors--;
2265 s->qdev.max_lba = nb_sectors;
2266 /* reset tray statuses */
2267 s->tray_locked = 0;
2268 s->tray_open = 0;
2270 s->qdev.scsi_version = s->qdev.default_scsi_version;
2273 static void scsi_disk_resize_cb(void *opaque)
2275 SCSIDiskState *s = opaque;
2277 /* SPC lists this sense code as available only for
2278 * direct-access devices.
2280 if (s->qdev.type == TYPE_DISK) {
2281 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2285 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2287 SCSIDiskState *s = opaque;
2290 * When a CD gets changed, we have to report an ejected state and
2291 * then a loaded state to guests so that they detect tray
2292 * open/close and media change events. Guests that do not use
2293 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2294 * states rely on this behavior.
2296 * media_changed governs the state machine used for unit attention
2297 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2299 s->media_changed = load;
2300 s->tray_open = !load;
2301 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2302 s->media_event = true;
2303 s->eject_request = false;
2306 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2308 SCSIDiskState *s = opaque;
2310 s->eject_request = true;
2311 if (force) {
2312 s->tray_locked = false;
2316 static bool scsi_cd_is_tray_open(void *opaque)
2318 return ((SCSIDiskState *)opaque)->tray_open;
2321 static bool scsi_cd_is_medium_locked(void *opaque)
2323 return ((SCSIDiskState *)opaque)->tray_locked;
2326 static const BlockDevOps scsi_disk_removable_block_ops = {
2327 .change_media_cb = scsi_cd_change_media_cb,
2328 .eject_request_cb = scsi_cd_eject_request_cb,
2329 .is_tray_open = scsi_cd_is_tray_open,
2330 .is_medium_locked = scsi_cd_is_medium_locked,
2332 .resize_cb = scsi_disk_resize_cb,
2335 static const BlockDevOps scsi_disk_block_ops = {
2336 .resize_cb = scsi_disk_resize_cb,
2339 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2341 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2342 if (s->media_changed) {
2343 s->media_changed = false;
2344 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2348 static void scsi_realize(SCSIDevice *dev, Error **errp)
2350 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2351 bool read_only;
2353 if (!s->qdev.conf.blk) {
2354 error_setg(errp, "drive property not set");
2355 return;
2358 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2359 !blk_is_inserted(s->qdev.conf.blk)) {
2360 error_setg(errp, "Device needs media, but drive is empty");
2361 return;
2364 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2365 return;
2368 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2369 !s->qdev.hba_supports_iothread)
2371 error_setg(errp, "HBA does not support iothreads");
2372 return;
2375 if (dev->type == TYPE_DISK) {
2376 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2377 return;
2381 read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2382 if (dev->type == TYPE_ROM) {
2383 read_only = true;
2386 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2387 dev->type == TYPE_DISK, errp)) {
2388 return;
2391 if (s->qdev.conf.discard_granularity == -1) {
2392 s->qdev.conf.discard_granularity =
2393 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2396 if (!s->version) {
2397 s->version = g_strdup(qemu_hw_version());
2399 if (!s->vendor) {
2400 s->vendor = g_strdup("QEMU");
2402 if (!s->device_id) {
2403 if (s->serial) {
2404 s->device_id = g_strdup_printf("%.20s", s->serial);
2405 } else {
2406 const char *str = blk_name(s->qdev.conf.blk);
2407 if (str && *str) {
2408 s->device_id = g_strdup(str);
2413 if (blk_is_sg(s->qdev.conf.blk)) {
2414 error_setg(errp, "unwanted /dev/sg*");
2415 return;
2418 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2419 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2420 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2421 } else {
2422 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2424 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2426 blk_iostatus_enable(s->qdev.conf.blk);
2428 add_boot_device_lchs(&dev->qdev, NULL,
2429 dev->conf.lcyls,
2430 dev->conf.lheads,
2431 dev->conf.lsecs);
2434 static void scsi_unrealize(SCSIDevice *dev)
2436 del_boot_device_lchs(&dev->qdev, NULL);
2439 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2441 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2442 AioContext *ctx = NULL;
2443 /* can happen for devices without drive. The error message for missing
2444 * backend will be issued in scsi_realize
2446 if (s->qdev.conf.blk) {
2447 ctx = blk_get_aio_context(s->qdev.conf.blk);
2448 aio_context_acquire(ctx);
2449 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2450 goto out;
2453 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2454 s->qdev.type = TYPE_DISK;
2455 if (!s->product) {
2456 s->product = g_strdup("QEMU HARDDISK");
2458 scsi_realize(&s->qdev, errp);
2459 out:
2460 if (ctx) {
2461 aio_context_release(ctx);
2465 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2467 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2468 AioContext *ctx;
2469 int ret;
2471 if (!dev->conf.blk) {
2472 /* Anonymous BlockBackend for an empty drive. As we put it into
2473 * dev->conf, qdev takes care of detaching on unplug. */
2474 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2475 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2476 assert(ret == 0);
2479 ctx = blk_get_aio_context(dev->conf.blk);
2480 aio_context_acquire(ctx);
2481 s->qdev.blocksize = 2048;
2482 s->qdev.type = TYPE_ROM;
2483 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2484 if (!s->product) {
2485 s->product = g_strdup("QEMU CD-ROM");
2487 scsi_realize(&s->qdev, errp);
2488 aio_context_release(ctx);
2491 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2493 DriveInfo *dinfo;
2494 Error *local_err = NULL;
2496 warn_report("'scsi-disk' is deprecated, "
2497 "please use 'scsi-hd' or 'scsi-cd' instead");
2499 if (!dev->conf.blk) {
2500 scsi_realize(dev, &local_err);
2501 assert(local_err);
2502 error_propagate(errp, local_err);
2503 return;
2506 dinfo = blk_legacy_dinfo(dev->conf.blk);
2507 if (dinfo && dinfo->media_cd) {
2508 scsi_cd_realize(dev, errp);
2509 } else {
2510 scsi_hd_realize(dev, errp);
2514 static const SCSIReqOps scsi_disk_emulate_reqops = {
2515 .size = sizeof(SCSIDiskReq),
2516 .free_req = scsi_free_request,
2517 .send_command = scsi_disk_emulate_command,
2518 .read_data = scsi_disk_emulate_read_data,
2519 .write_data = scsi_disk_emulate_write_data,
2520 .get_buf = scsi_get_buf,
2523 static const SCSIReqOps scsi_disk_dma_reqops = {
2524 .size = sizeof(SCSIDiskReq),
2525 .free_req = scsi_free_request,
2526 .send_command = scsi_disk_dma_command,
2527 .read_data = scsi_read_data,
2528 .write_data = scsi_write_data,
2529 .get_buf = scsi_get_buf,
2530 .load_request = scsi_disk_load_request,
2531 .save_request = scsi_disk_save_request,
2534 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2535 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2536 [INQUIRY] = &scsi_disk_emulate_reqops,
2537 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2538 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2539 [START_STOP] = &scsi_disk_emulate_reqops,
2540 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2541 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2542 [READ_TOC] = &scsi_disk_emulate_reqops,
2543 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2544 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2545 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2546 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2547 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2548 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2549 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2550 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2551 [SEEK_10] = &scsi_disk_emulate_reqops,
2552 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2553 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2554 [UNMAP] = &scsi_disk_emulate_reqops,
2555 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2556 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2557 [VERIFY_10] = &scsi_disk_emulate_reqops,
2558 [VERIFY_12] = &scsi_disk_emulate_reqops,
2559 [VERIFY_16] = &scsi_disk_emulate_reqops,
2561 [READ_6] = &scsi_disk_dma_reqops,
2562 [READ_10] = &scsi_disk_dma_reqops,
2563 [READ_12] = &scsi_disk_dma_reqops,
2564 [READ_16] = &scsi_disk_dma_reqops,
2565 [WRITE_6] = &scsi_disk_dma_reqops,
2566 [WRITE_10] = &scsi_disk_dma_reqops,
2567 [WRITE_12] = &scsi_disk_dma_reqops,
2568 [WRITE_16] = &scsi_disk_dma_reqops,
2569 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2570 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2571 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2574 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2576 int i;
2577 int len = scsi_cdb_length(buf);
2578 char *line_buffer, *p;
2580 line_buffer = g_malloc(len * 5 + 1);
2582 for (i = 0, p = line_buffer; i < len; i++) {
2583 p += sprintf(p, " 0x%02x", buf[i]);
2585 trace_scsi_disk_new_request(lun, tag, line_buffer);
2587 g_free(line_buffer);
2590 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2591 uint8_t *buf, void *hba_private)
2593 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2594 SCSIRequest *req;
2595 const SCSIReqOps *ops;
2596 uint8_t command;
2598 command = buf[0];
2599 ops = scsi_disk_reqops_dispatch[command];
2600 if (!ops) {
2601 ops = &scsi_disk_emulate_reqops;
2603 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2605 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2606 scsi_disk_new_request_dump(lun, tag, buf);
2609 return req;
2612 #ifdef __linux__
2613 static int get_device_type(SCSIDiskState *s)
2615 uint8_t cmd[16];
2616 uint8_t buf[36];
2617 int ret;
2619 memset(cmd, 0, sizeof(cmd));
2620 memset(buf, 0, sizeof(buf));
2621 cmd[0] = INQUIRY;
2622 cmd[4] = sizeof(buf);
2624 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2625 buf, sizeof(buf), s->qdev.io_timeout);
2626 if (ret < 0) {
2627 return -1;
2629 s->qdev.type = buf[0];
2630 if (buf[1] & 0x80) {
2631 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2633 return 0;
2636 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2638 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2639 AioContext *ctx;
2640 int sg_version;
2641 int rc;
2643 if (!s->qdev.conf.blk) {
2644 error_setg(errp, "drive property not set");
2645 return;
2648 if (s->rotation_rate) {
2649 error_report_once("rotation_rate is specified for scsi-block but is "
2650 "not implemented. This option is deprecated and will "
2651 "be removed in a future version");
2654 ctx = blk_get_aio_context(s->qdev.conf.blk);
2655 aio_context_acquire(ctx);
2657 /* check we are using a driver managing SG_IO (version 3 and after) */
2658 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2659 if (rc < 0) {
2660 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2661 if (rc != -EPERM) {
2662 error_append_hint(errp, "Is this a SCSI device?\n");
2664 goto out;
2666 if (sg_version < 30000) {
2667 error_setg(errp, "scsi generic interface too old");
2668 goto out;
2671 /* get device type from INQUIRY data */
2672 rc = get_device_type(s);
2673 if (rc < 0) {
2674 error_setg(errp, "INQUIRY failed");
2675 goto out;
2678 /* Make a guess for the block size, we'll fix it when the guest sends.
2679 * READ CAPACITY. If they don't, they likely would assume these sizes
2680 * anyway. (TODO: check in /sys).
2682 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2683 s->qdev.blocksize = 2048;
2684 } else {
2685 s->qdev.blocksize = 512;
2688 /* Makes the scsi-block device not removable by using HMP and QMP eject
2689 * command.
2691 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2693 scsi_realize(&s->qdev, errp);
2694 scsi_generic_read_device_inquiry(&s->qdev);
2696 out:
2697 aio_context_release(ctx);
2700 typedef struct SCSIBlockReq {
2701 SCSIDiskReq req;
2702 sg_io_hdr_t io_header;
2704 /* Selected bytes of the original CDB, copied into our own CDB. */
2705 uint8_t cmd, cdb1, group_number;
2707 /* CDB passed to SG_IO. */
2708 uint8_t cdb[16];
2709 } SCSIBlockReq;
2711 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2712 int64_t offset, QEMUIOVector *iov,
2713 int direction,
2714 BlockCompletionFunc *cb, void *opaque)
2716 sg_io_hdr_t *io_header = &req->io_header;
2717 SCSIDiskReq *r = &req->req;
2718 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2719 int nb_logical_blocks;
2720 uint64_t lba;
2721 BlockAIOCB *aiocb;
2723 /* This is not supported yet. It can only happen if the guest does
2724 * reads and writes that are not aligned to one logical sectors
2725 * _and_ cover multiple MemoryRegions.
2727 assert(offset % s->qdev.blocksize == 0);
2728 assert(iov->size % s->qdev.blocksize == 0);
2730 io_header->interface_id = 'S';
2732 /* The data transfer comes from the QEMUIOVector. */
2733 io_header->dxfer_direction = direction;
2734 io_header->dxfer_len = iov->size;
2735 io_header->dxferp = (void *)iov->iov;
2736 io_header->iovec_count = iov->niov;
2737 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2739 /* Build a new CDB with the LBA and length patched in, in case
2740 * DMA helpers split the transfer in multiple segments. Do not
2741 * build a CDB smaller than what the guest wanted, and only build
2742 * a larger one if strictly necessary.
2744 io_header->cmdp = req->cdb;
2745 lba = offset / s->qdev.blocksize;
2746 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2748 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2749 /* 6-byte CDB */
2750 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2751 req->cdb[4] = nb_logical_blocks;
2752 req->cdb[5] = 0;
2753 io_header->cmd_len = 6;
2754 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2755 /* 10-byte CDB */
2756 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2757 req->cdb[1] = req->cdb1;
2758 stl_be_p(&req->cdb[2], lba);
2759 req->cdb[6] = req->group_number;
2760 stw_be_p(&req->cdb[7], nb_logical_blocks);
2761 req->cdb[9] = 0;
2762 io_header->cmd_len = 10;
2763 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2764 /* 12-byte CDB */
2765 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2766 req->cdb[1] = req->cdb1;
2767 stl_be_p(&req->cdb[2], lba);
2768 stl_be_p(&req->cdb[6], nb_logical_blocks);
2769 req->cdb[10] = req->group_number;
2770 req->cdb[11] = 0;
2771 io_header->cmd_len = 12;
2772 } else {
2773 /* 16-byte CDB */
2774 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2775 req->cdb[1] = req->cdb1;
2776 stq_be_p(&req->cdb[2], lba);
2777 stl_be_p(&req->cdb[10], nb_logical_blocks);
2778 req->cdb[14] = req->group_number;
2779 req->cdb[15] = 0;
2780 io_header->cmd_len = 16;
2783 /* The rest is as in scsi-generic.c. */
2784 io_header->mx_sb_len = sizeof(r->req.sense);
2785 io_header->sbp = r->req.sense;
2786 io_header->timeout = s->qdev.io_timeout * 1000;
2787 io_header->usr_ptr = r;
2788 io_header->flags |= SG_FLAG_DIRECT_IO;
2789 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2790 nb_logical_blocks, io_header->timeout);
2791 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2792 assert(aiocb != NULL);
2793 return aiocb;
2796 static bool scsi_block_no_fua(SCSICommand *cmd)
2798 return false;
2801 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2802 QEMUIOVector *iov,
2803 BlockCompletionFunc *cb, void *cb_opaque,
2804 void *opaque)
2806 SCSIBlockReq *r = opaque;
2807 return scsi_block_do_sgio(r, offset, iov,
2808 SG_DXFER_FROM_DEV, cb, cb_opaque);
2811 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2812 QEMUIOVector *iov,
2813 BlockCompletionFunc *cb, void *cb_opaque,
2814 void *opaque)
2816 SCSIBlockReq *r = opaque;
2817 return scsi_block_do_sgio(r, offset, iov,
2818 SG_DXFER_TO_DEV, cb, cb_opaque);
2821 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2823 switch (buf[0]) {
2824 case VERIFY_10:
2825 case VERIFY_12:
2826 case VERIFY_16:
2827 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2828 * for the number of logical blocks specified in the length
2829 * field). For other modes, do not use scatter/gather operation.
2831 if ((buf[1] & 6) == 2) {
2832 return false;
2834 break;
2836 case READ_6:
2837 case READ_10:
2838 case READ_12:
2839 case READ_16:
2840 case WRITE_6:
2841 case WRITE_10:
2842 case WRITE_12:
2843 case WRITE_16:
2844 case WRITE_VERIFY_10:
2845 case WRITE_VERIFY_12:
2846 case WRITE_VERIFY_16:
2847 /* MMC writing cannot be done via DMA helpers, because it sometimes
2848 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2849 * We might use scsi_block_dma_reqops as long as no writing commands are
2850 * seen, but performance usually isn't paramount on optical media. So,
2851 * just make scsi-block operate the same as scsi-generic for them.
2853 if (s->qdev.type != TYPE_ROM) {
2854 return false;
2856 break;
2858 default:
2859 break;
2862 return true;
2866 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2868 SCSIBlockReq *r = (SCSIBlockReq *)req;
2869 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2871 r->cmd = req->cmd.buf[0];
2872 switch (r->cmd >> 5) {
2873 case 0:
2874 /* 6-byte CDB. */
2875 r->cdb1 = r->group_number = 0;
2876 break;
2877 case 1:
2878 /* 10-byte CDB. */
2879 r->cdb1 = req->cmd.buf[1];
2880 r->group_number = req->cmd.buf[6];
2881 break;
2882 case 4:
2883 /* 12-byte CDB. */
2884 r->cdb1 = req->cmd.buf[1];
2885 r->group_number = req->cmd.buf[10];
2886 break;
2887 case 5:
2888 /* 16-byte CDB. */
2889 r->cdb1 = req->cmd.buf[1];
2890 r->group_number = req->cmd.buf[14];
2891 break;
2892 default:
2893 abort();
2896 /* Protection information is not supported. For SCSI versions 2 and
2897 * older (as determined by snooping the guest's INQUIRY commands),
2898 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2900 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2901 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2902 return 0;
2905 r->req.status = &r->io_header.status;
2906 return scsi_disk_dma_command(req, buf);
2909 static const SCSIReqOps scsi_block_dma_reqops = {
2910 .size = sizeof(SCSIBlockReq),
2911 .free_req = scsi_free_request,
2912 .send_command = scsi_block_dma_command,
2913 .read_data = scsi_read_data,
2914 .write_data = scsi_write_data,
2915 .get_buf = scsi_get_buf,
2916 .load_request = scsi_disk_load_request,
2917 .save_request = scsi_disk_save_request,
2920 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2921 uint32_t lun, uint8_t *buf,
2922 void *hba_private)
2924 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2926 if (scsi_block_is_passthrough(s, buf)) {
2927 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2928 hba_private);
2929 } else {
2930 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2931 hba_private);
2935 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2936 uint8_t *buf, void *hba_private)
2938 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2940 if (scsi_block_is_passthrough(s, buf)) {
2941 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2942 } else {
2943 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2947 static void scsi_block_update_sense(SCSIRequest *req)
2949 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2950 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
2951 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
2953 #endif
2955 static
2956 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2957 BlockCompletionFunc *cb, void *cb_opaque,
2958 void *opaque)
2960 SCSIDiskReq *r = opaque;
2961 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2962 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2965 static
2966 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2967 BlockCompletionFunc *cb, void *cb_opaque,
2968 void *opaque)
2970 SCSIDiskReq *r = opaque;
2971 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2972 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2975 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2977 DeviceClass *dc = DEVICE_CLASS(klass);
2978 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2980 dc->fw_name = "disk";
2981 dc->reset = scsi_disk_reset;
2982 sdc->dma_readv = scsi_dma_readv;
2983 sdc->dma_writev = scsi_dma_writev;
2984 sdc->need_fua_emulation = scsi_is_cmd_fua;
2987 static const TypeInfo scsi_disk_base_info = {
2988 .name = TYPE_SCSI_DISK_BASE,
2989 .parent = TYPE_SCSI_DEVICE,
2990 .class_init = scsi_disk_base_class_initfn,
2991 .instance_size = sizeof(SCSIDiskState),
2992 .class_size = sizeof(SCSIDiskClass),
2993 .abstract = true,
2996 #define DEFINE_SCSI_DISK_PROPERTIES() \
2997 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
2998 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
2999 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3000 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3001 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3002 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3003 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3004 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3007 static Property scsi_hd_properties[] = {
3008 DEFINE_SCSI_DISK_PROPERTIES(),
3009 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3010 SCSI_DISK_F_REMOVABLE, false),
3011 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3012 SCSI_DISK_F_DPOFUA, false),
3013 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3014 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3015 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3016 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3017 DEFAULT_MAX_UNMAP_SIZE),
3018 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3019 DEFAULT_MAX_IO_SIZE),
3020 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3021 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3023 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3024 DEFINE_PROP_END_OF_LIST(),
3027 static const VMStateDescription vmstate_scsi_disk_state = {
3028 .name = "scsi-disk",
3029 .version_id = 1,
3030 .minimum_version_id = 1,
3031 .fields = (VMStateField[]) {
3032 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3033 VMSTATE_BOOL(media_changed, SCSIDiskState),
3034 VMSTATE_BOOL(media_event, SCSIDiskState),
3035 VMSTATE_BOOL(eject_request, SCSIDiskState),
3036 VMSTATE_BOOL(tray_open, SCSIDiskState),
3037 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3038 VMSTATE_END_OF_LIST()
3042 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3044 DeviceClass *dc = DEVICE_CLASS(klass);
3045 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3047 sc->realize = scsi_hd_realize;
3048 sc->unrealize = scsi_unrealize;
3049 sc->alloc_req = scsi_new_request;
3050 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3051 dc->desc = "virtual SCSI disk";
3052 device_class_set_props(dc, scsi_hd_properties);
3053 dc->vmsd = &vmstate_scsi_disk_state;
3056 static const TypeInfo scsi_hd_info = {
3057 .name = "scsi-hd",
3058 .parent = TYPE_SCSI_DISK_BASE,
3059 .class_init = scsi_hd_class_initfn,
3062 static Property scsi_cd_properties[] = {
3063 DEFINE_SCSI_DISK_PROPERTIES(),
3064 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3065 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3066 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3067 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3068 DEFAULT_MAX_IO_SIZE),
3069 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3071 DEFINE_PROP_END_OF_LIST(),
3074 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3076 DeviceClass *dc = DEVICE_CLASS(klass);
3077 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3079 sc->realize = scsi_cd_realize;
3080 sc->alloc_req = scsi_new_request;
3081 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3082 dc->desc = "virtual SCSI CD-ROM";
3083 device_class_set_props(dc, scsi_cd_properties);
3084 dc->vmsd = &vmstate_scsi_disk_state;
3087 static const TypeInfo scsi_cd_info = {
3088 .name = "scsi-cd",
3089 .parent = TYPE_SCSI_DISK_BASE,
3090 .class_init = scsi_cd_class_initfn,
3093 #ifdef __linux__
3094 static Property scsi_block_properties[] = {
3095 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3096 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3097 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3098 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3099 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3100 DEFAULT_MAX_UNMAP_SIZE),
3101 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3102 DEFAULT_MAX_IO_SIZE),
3103 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3104 -1),
3105 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3106 DEFAULT_IO_TIMEOUT),
3107 DEFINE_PROP_END_OF_LIST(),
3110 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3112 DeviceClass *dc = DEVICE_CLASS(klass);
3113 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3114 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3116 sc->realize = scsi_block_realize;
3117 sc->alloc_req = scsi_block_new_request;
3118 sc->parse_cdb = scsi_block_parse_cdb;
3119 sdc->dma_readv = scsi_block_dma_readv;
3120 sdc->dma_writev = scsi_block_dma_writev;
3121 sdc->update_sense = scsi_block_update_sense;
3122 sdc->need_fua_emulation = scsi_block_no_fua;
3123 dc->desc = "SCSI block device passthrough";
3124 device_class_set_props(dc, scsi_block_properties);
3125 dc->vmsd = &vmstate_scsi_disk_state;
3128 static const TypeInfo scsi_block_info = {
3129 .name = "scsi-block",
3130 .parent = TYPE_SCSI_DISK_BASE,
3131 .class_init = scsi_block_class_initfn,
3133 #endif
3135 static Property scsi_disk_properties[] = {
3136 DEFINE_SCSI_DISK_PROPERTIES(),
3137 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3138 SCSI_DISK_F_REMOVABLE, false),
3139 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3140 SCSI_DISK_F_DPOFUA, false),
3141 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3142 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3143 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3144 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3145 DEFAULT_MAX_UNMAP_SIZE),
3146 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3147 DEFAULT_MAX_IO_SIZE),
3148 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3150 DEFINE_PROP_END_OF_LIST(),
3153 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3155 DeviceClass *dc = DEVICE_CLASS(klass);
3156 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3158 sc->realize = scsi_disk_realize;
3159 sc->alloc_req = scsi_new_request;
3160 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3161 dc->fw_name = "disk";
3162 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3163 dc->reset = scsi_disk_reset;
3164 device_class_set_props(dc, scsi_disk_properties);
3165 dc->vmsd = &vmstate_scsi_disk_state;
3168 static const TypeInfo scsi_disk_info = {
3169 .name = "scsi-disk",
3170 .parent = TYPE_SCSI_DISK_BASE,
3171 .class_init = scsi_disk_class_initfn,
3174 static void scsi_disk_register_types(void)
3176 type_register_static(&scsi_disk_base_info);
3177 type_register_static(&scsi_hd_info);
3178 type_register_static(&scsi_cd_info);
3179 #ifdef __linux__
3180 type_register_static(&scsi_block_info);
3181 #endif
3182 type_register_static(&scsi_disk_info);
3185 type_init(scsi_disk_register_types)