scsi-disk: change disk serial length from 20 to 36
[qemu/ar7.git] / hw / scsi / scsi-disk.c
blob77cba31e30cf5e5dc31e1a6923a30131ffdeeca6
1 /*
2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 //#define DEBUG_SCSI
24 #ifdef DEBUG_SCSI
25 #define DPRINTF(fmt, ...) \
26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
27 #else
28 #define DPRINTF(fmt, ...) do {} while(0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "hw/scsi/scsi.h"
35 #include "block/scsi.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/blockdev.h"
39 #include "hw/block/block.h"
40 #include "sysemu/dma.h"
41 #include "qemu/cutils.h"
43 #ifdef __linux
44 #include <scsi/sg.h>
45 #endif
47 #define SCSI_WRITE_SAME_MAX 524288
48 #define SCSI_DMA_BUF_SIZE 131072
49 #define SCSI_MAX_INQUIRY_LEN 256
50 #define SCSI_MAX_MODE_LEN 256
52 #define DEFAULT_DISCARD_GRANULARITY 4096
53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */
54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
58 #define SCSI_DISK_BASE(obj) \
59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
60 #define SCSI_DISK_BASE_CLASS(klass) \
61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
62 #define SCSI_DISK_BASE_GET_CLASS(obj) \
63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
65 typedef struct SCSIDiskClass {
66 SCSIDeviceClass parent_class;
67 DMAIOFunc *dma_readv;
68 DMAIOFunc *dma_writev;
69 bool (*need_fua_emulation)(SCSICommand *cmd);
70 } SCSIDiskClass;
72 typedef struct SCSIDiskReq {
73 SCSIRequest req;
74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint64_t sector;
76 uint32_t sector_count;
77 uint32_t buflen;
78 bool started;
79 bool need_fua_emulation;
80 struct iovec iov;
81 QEMUIOVector qiov;
82 BlockAcctCookie acct;
83 unsigned char *status;
84 } SCSIDiskReq;
86 #define SCSI_DISK_F_REMOVABLE 0
87 #define SCSI_DISK_F_DPOFUA 1
88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
90 typedef struct SCSIDiskState
92 SCSIDevice qdev;
93 uint32_t features;
94 bool media_changed;
95 bool media_event;
96 bool eject_request;
97 uint16_t port_index;
98 uint64_t max_unmap_size;
99 uint64_t max_io_size;
100 QEMUBH *bh;
101 char *version;
102 char *serial;
103 char *vendor;
104 char *product;
105 bool tray_open;
106 bool tray_locked;
107 } SCSIDiskState;
109 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
111 static void scsi_free_request(SCSIRequest *req)
113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
115 qemu_vfree(r->iov.iov_base);
118 /* Helper function for command completion with sense. */
119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n",
122 r->req.tag, sense.key, sense.asc, sense.ascq);
123 scsi_req_build_sense(&r->req, sense);
124 scsi_req_complete(&r->req, CHECK_CONDITION);
127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
131 if (!r->iov.iov_base) {
132 r->buflen = size;
133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
136 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
143 qemu_put_be64s(f, &r->sector);
144 qemu_put_be32s(f, &r->sector_count);
145 qemu_put_be32s(f, &r->buflen);
146 if (r->buflen) {
147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
149 } else if (!req->retry) {
150 uint32_t len = r->iov.iov_len;
151 qemu_put_be32s(f, &len);
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
161 qemu_get_be64s(f, &r->sector);
162 qemu_get_be32s(f, &r->sector_count);
163 qemu_get_be32s(f, &r->buflen);
164 if (r->buflen) {
165 scsi_init_iovec(r, r->buflen);
166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
168 } else if (!r->req.retry) {
169 uint32_t len;
170 qemu_get_be32s(f, &len);
171 r->iov.iov_len = len;
172 assert(r->iov.iov_len <= r->buflen);
173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
177 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
182 if (r->req.io_canceled) {
183 scsi_req_cancel_complete(&r->req);
184 return true;
187 if (ret < 0) {
188 return scsi_handle_rw_error(r, -ret, acct_failed);
191 if (r->status && *r->status) {
192 if (acct_failed) {
193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
194 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
196 scsi_req_complete(&r->req, *r->status);
197 return true;
200 return false;
203 static void scsi_aio_complete(void *opaque, int ret)
205 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
208 assert(r->req.aiocb != NULL);
209 r->req.aiocb = NULL;
210 if (scsi_disk_req_check_error(r, ret, true)) {
211 goto done;
214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
215 scsi_req_complete(&r->req, GOOD);
217 done:
218 scsi_req_unref(&r->req);
221 static bool scsi_is_cmd_fua(SCSICommand *cmd)
223 switch (cmd->buf[0]) {
224 case READ_10:
225 case READ_12:
226 case READ_16:
227 case WRITE_10:
228 case WRITE_12:
229 case WRITE_16:
230 return (cmd->buf[1] & 8) != 0;
232 case VERIFY_10:
233 case VERIFY_12:
234 case VERIFY_16:
235 case WRITE_VERIFY_10:
236 case WRITE_VERIFY_12:
237 case WRITE_VERIFY_16:
238 return true;
240 case READ_6:
241 case WRITE_6:
242 default:
243 return false;
247 static void scsi_write_do_fua(SCSIDiskReq *r)
249 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
251 assert(r->req.aiocb == NULL);
252 assert(!r->req.io_canceled);
254 if (r->need_fua_emulation) {
255 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
256 BLOCK_ACCT_FLUSH);
257 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
258 return;
261 scsi_req_complete(&r->req, GOOD);
262 scsi_req_unref(&r->req);
265 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
267 assert(r->req.aiocb == NULL);
268 if (scsi_disk_req_check_error(r, ret, false)) {
269 goto done;
272 r->sector += r->sector_count;
273 r->sector_count = 0;
274 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
275 scsi_write_do_fua(r);
276 return;
277 } else {
278 scsi_req_complete(&r->req, GOOD);
281 done:
282 scsi_req_unref(&r->req);
285 static void scsi_dma_complete(void *opaque, int ret)
287 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
290 assert(r->req.aiocb != NULL);
291 r->req.aiocb = NULL;
293 if (ret < 0) {
294 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
295 } else {
296 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
298 scsi_dma_complete_noio(r, ret);
301 static void scsi_read_complete(void * opaque, int ret)
303 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
304 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
305 int n;
307 assert(r->req.aiocb != NULL);
308 r->req.aiocb = NULL;
309 if (scsi_disk_req_check_error(r, ret, true)) {
310 goto done;
313 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
314 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
316 n = r->qiov.size / 512;
317 r->sector += n;
318 r->sector_count -= n;
319 scsi_req_data(&r->req, r->qiov.size);
321 done:
322 scsi_req_unref(&r->req);
325 /* Actually issue a read to the block device. */
326 static void scsi_do_read(SCSIDiskReq *r, int ret)
328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
329 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
331 assert (r->req.aiocb == NULL);
332 if (scsi_disk_req_check_error(r, ret, false)) {
333 goto done;
336 /* The request is used as the AIO opaque value, so add a ref. */
337 scsi_req_ref(&r->req);
339 if (r->req.sg) {
340 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
341 r->req.resid -= r->req.sg->size;
342 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
343 r->req.sg, r->sector << BDRV_SECTOR_BITS,
344 sdc->dma_readv, r, scsi_dma_complete, r,
345 DMA_DIRECTION_FROM_DEVICE);
346 } else {
347 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
348 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
349 r->qiov.size, BLOCK_ACCT_READ);
350 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
351 scsi_read_complete, r, r);
354 done:
355 scsi_req_unref(&r->req);
358 static void scsi_do_read_cb(void *opaque, int ret)
360 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
363 assert (r->req.aiocb != NULL);
364 r->req.aiocb = NULL;
366 if (ret < 0) {
367 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
368 } else {
369 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
371 scsi_do_read(opaque, ret);
374 /* Read more data from scsi device into buffer. */
375 static void scsi_read_data(SCSIRequest *req)
377 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
379 bool first;
381 DPRINTF("Read sector_count=%d\n", r->sector_count);
382 if (r->sector_count == 0) {
383 /* This also clears the sense buffer for REQUEST SENSE. */
384 scsi_req_complete(&r->req, GOOD);
385 return;
388 /* No data transfer may already be in progress */
389 assert(r->req.aiocb == NULL);
391 /* The request is used as the AIO opaque value, so add a ref. */
392 scsi_req_ref(&r->req);
393 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
394 DPRINTF("Data transfer direction invalid\n");
395 scsi_read_complete(r, -EINVAL);
396 return;
399 if (s->tray_open) {
400 scsi_read_complete(r, -ENOMEDIUM);
401 return;
404 first = !r->started;
405 r->started = true;
406 if (first && r->need_fua_emulation) {
407 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
408 BLOCK_ACCT_FLUSH);
409 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
410 } else {
411 scsi_do_read(r, 0);
416 * scsi_handle_rw_error has two return values. 0 means that the error
417 * must be ignored, 1 means that the error has been processed and the
418 * caller should not do anything else for this request. Note that
419 * scsi_handle_rw_error always manages its reference counts, independent
420 * of the return value.
422 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
424 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
425 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
426 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
427 is_read, error);
429 if (action == BLOCK_ERROR_ACTION_REPORT) {
430 if (acct_failed) {
431 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
433 switch (error) {
434 case ENOMEDIUM:
435 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
436 break;
437 case ENOMEM:
438 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
439 break;
440 case EINVAL:
441 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
442 break;
443 case ENOSPC:
444 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
445 break;
446 default:
447 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
448 break;
451 blk_error_action(s->qdev.conf.blk, action, is_read, error);
452 if (action == BLOCK_ERROR_ACTION_STOP) {
453 scsi_req_retry(&r->req);
455 return action != BLOCK_ERROR_ACTION_IGNORE;
458 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
460 uint32_t n;
462 assert (r->req.aiocb == NULL);
463 if (scsi_disk_req_check_error(r, ret, false)) {
464 goto done;
467 n = r->qiov.size / 512;
468 r->sector += n;
469 r->sector_count -= n;
470 if (r->sector_count == 0) {
471 scsi_write_do_fua(r);
472 return;
473 } else {
474 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
475 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
476 scsi_req_data(&r->req, r->qiov.size);
479 done:
480 scsi_req_unref(&r->req);
483 static void scsi_write_complete(void * opaque, int ret)
485 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
486 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
488 assert (r->req.aiocb != NULL);
489 r->req.aiocb = NULL;
491 if (ret < 0) {
492 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
493 } else {
494 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
496 scsi_write_complete_noio(r, ret);
499 static void scsi_write_data(SCSIRequest *req)
501 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
502 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
503 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
505 /* No data transfer may already be in progress */
506 assert(r->req.aiocb == NULL);
508 /* The request is used as the AIO opaque value, so add a ref. */
509 scsi_req_ref(&r->req);
510 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
511 DPRINTF("Data transfer direction invalid\n");
512 scsi_write_complete_noio(r, -EINVAL);
513 return;
516 if (!r->req.sg && !r->qiov.size) {
517 /* Called for the first time. Ask the driver to send us more data. */
518 r->started = true;
519 scsi_write_complete_noio(r, 0);
520 return;
522 if (s->tray_open) {
523 scsi_write_complete_noio(r, -ENOMEDIUM);
524 return;
527 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
528 r->req.cmd.buf[0] == VERIFY_16) {
529 if (r->req.sg) {
530 scsi_dma_complete_noio(r, 0);
531 } else {
532 scsi_write_complete_noio(r, 0);
534 return;
537 if (r->req.sg) {
538 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
539 r->req.resid -= r->req.sg->size;
540 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
541 r->req.sg, r->sector << BDRV_SECTOR_BITS,
542 sdc->dma_writev, r, scsi_dma_complete, r,
543 DMA_DIRECTION_TO_DEVICE);
544 } else {
545 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
546 r->qiov.size, BLOCK_ACCT_WRITE);
547 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
548 scsi_write_complete, r, r);
552 /* Return a pointer to the data buffer. */
553 static uint8_t *scsi_get_buf(SCSIRequest *req)
555 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
557 return (uint8_t *)r->iov.iov_base;
560 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
562 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
563 int buflen = 0;
564 int start;
566 if (req->cmd.buf[1] & 0x1) {
567 /* Vital product data */
568 uint8_t page_code = req->cmd.buf[2];
570 outbuf[buflen++] = s->qdev.type & 0x1f;
571 outbuf[buflen++] = page_code ; // this page
572 outbuf[buflen++] = 0x00;
573 outbuf[buflen++] = 0x00;
574 start = buflen;
576 switch (page_code) {
577 case 0x00: /* Supported page codes, mandatory */
579 DPRINTF("Inquiry EVPD[Supported pages] "
580 "buffer size %zd\n", req->cmd.xfer);
581 outbuf[buflen++] = 0x00; // list of supported pages (this page)
582 if (s->serial) {
583 outbuf[buflen++] = 0x80; // unit serial number
585 outbuf[buflen++] = 0x83; // device identification
586 if (s->qdev.type == TYPE_DISK) {
587 outbuf[buflen++] = 0xb0; // block limits
588 outbuf[buflen++] = 0xb2; // thin provisioning
590 break;
592 case 0x80: /* Device serial number, optional */
594 int l;
596 if (!s->serial) {
597 DPRINTF("Inquiry (EVPD[Serial number] not supported\n");
598 return -1;
601 l = strlen(s->serial);
602 if (l > 36) {
603 l = 36;
606 DPRINTF("Inquiry EVPD[Serial number] "
607 "buffer size %zd\n", req->cmd.xfer);
608 memcpy(outbuf+buflen, s->serial, l);
609 buflen += l;
610 break;
613 case 0x83: /* Device identification page, mandatory */
615 const char *str = s->serial ?: blk_name(s->qdev.conf.blk);
616 int max_len = s->serial ? 20 : 255 - 8;
617 int id_len = strlen(str);
619 if (id_len > max_len) {
620 id_len = max_len;
622 DPRINTF("Inquiry EVPD[Device identification] "
623 "buffer size %zd\n", req->cmd.xfer);
625 outbuf[buflen++] = 0x2; // ASCII
626 outbuf[buflen++] = 0; // not officially assigned
627 outbuf[buflen++] = 0; // reserved
628 outbuf[buflen++] = id_len; // length of data following
629 memcpy(outbuf+buflen, str, id_len);
630 buflen += id_len;
632 if (s->qdev.wwn) {
633 outbuf[buflen++] = 0x1; // Binary
634 outbuf[buflen++] = 0x3; // NAA
635 outbuf[buflen++] = 0; // reserved
636 outbuf[buflen++] = 8;
637 stq_be_p(&outbuf[buflen], s->qdev.wwn);
638 buflen += 8;
641 if (s->qdev.port_wwn) {
642 outbuf[buflen++] = 0x61; // SAS / Binary
643 outbuf[buflen++] = 0x93; // PIV / Target port / NAA
644 outbuf[buflen++] = 0; // reserved
645 outbuf[buflen++] = 8;
646 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
647 buflen += 8;
650 if (s->port_index) {
651 outbuf[buflen++] = 0x61; // SAS / Binary
652 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port
653 outbuf[buflen++] = 0; // reserved
654 outbuf[buflen++] = 4;
655 stw_be_p(&outbuf[buflen + 2], s->port_index);
656 buflen += 4;
658 break;
660 case 0xb0: /* block limits */
662 unsigned int unmap_sectors =
663 s->qdev.conf.discard_granularity / s->qdev.blocksize;
664 unsigned int min_io_size =
665 s->qdev.conf.min_io_size / s->qdev.blocksize;
666 unsigned int opt_io_size =
667 s->qdev.conf.opt_io_size / s->qdev.blocksize;
668 unsigned int max_unmap_sectors =
669 s->max_unmap_size / s->qdev.blocksize;
670 unsigned int max_io_sectors =
671 s->max_io_size / s->qdev.blocksize;
673 if (s->qdev.type == TYPE_ROM) {
674 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
675 page_code);
676 return -1;
678 /* required VPD size with unmap support */
679 buflen = 0x40;
680 memset(outbuf + 4, 0, buflen - 4);
682 outbuf[4] = 0x1; /* wsnz */
684 /* optimal transfer length granularity */
685 outbuf[6] = (min_io_size >> 8) & 0xff;
686 outbuf[7] = min_io_size & 0xff;
688 /* maximum transfer length */
689 outbuf[8] = (max_io_sectors >> 24) & 0xff;
690 outbuf[9] = (max_io_sectors >> 16) & 0xff;
691 outbuf[10] = (max_io_sectors >> 8) & 0xff;
692 outbuf[11] = max_io_sectors & 0xff;
694 /* optimal transfer length */
695 outbuf[12] = (opt_io_size >> 24) & 0xff;
696 outbuf[13] = (opt_io_size >> 16) & 0xff;
697 outbuf[14] = (opt_io_size >> 8) & 0xff;
698 outbuf[15] = opt_io_size & 0xff;
700 /* max unmap LBA count, default is 1GB */
701 outbuf[20] = (max_unmap_sectors >> 24) & 0xff;
702 outbuf[21] = (max_unmap_sectors >> 16) & 0xff;
703 outbuf[22] = (max_unmap_sectors >> 8) & 0xff;
704 outbuf[23] = max_unmap_sectors & 0xff;
706 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */
707 outbuf[24] = 0;
708 outbuf[25] = 0;
709 outbuf[26] = 0;
710 outbuf[27] = 255;
712 /* optimal unmap granularity */
713 outbuf[28] = (unmap_sectors >> 24) & 0xff;
714 outbuf[29] = (unmap_sectors >> 16) & 0xff;
715 outbuf[30] = (unmap_sectors >> 8) & 0xff;
716 outbuf[31] = unmap_sectors & 0xff;
718 /* max write same size */
719 outbuf[36] = 0;
720 outbuf[37] = 0;
721 outbuf[38] = 0;
722 outbuf[39] = 0;
724 outbuf[40] = (max_io_sectors >> 24) & 0xff;
725 outbuf[41] = (max_io_sectors >> 16) & 0xff;
726 outbuf[42] = (max_io_sectors >> 8) & 0xff;
727 outbuf[43] = max_io_sectors & 0xff;
728 break;
730 case 0xb2: /* thin provisioning */
732 buflen = 8;
733 outbuf[4] = 0;
734 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
735 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
736 outbuf[7] = 0;
737 break;
739 default:
740 return -1;
742 /* done with EVPD */
743 assert(buflen - start <= 255);
744 outbuf[start - 1] = buflen - start;
745 return buflen;
748 /* Standard INQUIRY data */
749 if (req->cmd.buf[2] != 0) {
750 return -1;
753 /* PAGE CODE == 0 */
754 buflen = req->cmd.xfer;
755 if (buflen > SCSI_MAX_INQUIRY_LEN) {
756 buflen = SCSI_MAX_INQUIRY_LEN;
759 outbuf[0] = s->qdev.type & 0x1f;
760 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
762 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
763 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
765 memset(&outbuf[32], 0, 4);
766 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
768 * We claim conformance to SPC-3, which is required for guests
769 * to ask for modern features like READ CAPACITY(16) or the
770 * block characteristics VPD page by default. Not all of SPC-3
771 * is actually implemented, but we're good enough.
773 outbuf[2] = 5;
774 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
776 if (buflen > 36) {
777 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
778 } else {
779 /* If the allocation length of CDB is too small,
780 the additional length is not adjusted */
781 outbuf[4] = 36 - 5;
784 /* Sync data transfer and TCQ. */
785 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
786 return buflen;
789 static inline bool media_is_dvd(SCSIDiskState *s)
791 uint64_t nb_sectors;
792 if (s->qdev.type != TYPE_ROM) {
793 return false;
795 if (!blk_is_inserted(s->qdev.conf.blk)) {
796 return false;
798 if (s->tray_open) {
799 return false;
801 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
802 return nb_sectors > CD_MAX_SECTORS;
805 static inline bool media_is_cd(SCSIDiskState *s)
807 uint64_t nb_sectors;
808 if (s->qdev.type != TYPE_ROM) {
809 return false;
811 if (!blk_is_inserted(s->qdev.conf.blk)) {
812 return false;
814 if (s->tray_open) {
815 return false;
817 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
818 return nb_sectors <= CD_MAX_SECTORS;
821 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
822 uint8_t *outbuf)
824 uint8_t type = r->req.cmd.buf[1] & 7;
826 if (s->qdev.type != TYPE_ROM) {
827 return -1;
830 /* Types 1/2 are only defined for Blu-Ray. */
831 if (type != 0) {
832 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
833 return -1;
836 memset(outbuf, 0, 34);
837 outbuf[1] = 32;
838 outbuf[2] = 0xe; /* last session complete, disc finalized */
839 outbuf[3] = 1; /* first track on disc */
840 outbuf[4] = 1; /* # of sessions */
841 outbuf[5] = 1; /* first track of last session */
842 outbuf[6] = 1; /* last track of last session */
843 outbuf[7] = 0x20; /* unrestricted use */
844 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
845 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
846 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
847 /* 24-31: disc bar code */
848 /* 32: disc application code */
849 /* 33: number of OPC tables */
851 return 34;
854 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
855 uint8_t *outbuf)
857 static const int rds_caps_size[5] = {
858 [0] = 2048 + 4,
859 [1] = 4 + 4,
860 [3] = 188 + 4,
861 [4] = 2048 + 4,
864 uint8_t media = r->req.cmd.buf[1];
865 uint8_t layer = r->req.cmd.buf[6];
866 uint8_t format = r->req.cmd.buf[7];
867 int size = -1;
869 if (s->qdev.type != TYPE_ROM) {
870 return -1;
872 if (media != 0) {
873 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
874 return -1;
877 if (format != 0xff) {
878 if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
879 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
880 return -1;
882 if (media_is_cd(s)) {
883 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
884 return -1;
886 if (format >= ARRAY_SIZE(rds_caps_size)) {
887 return -1;
889 size = rds_caps_size[format];
890 memset(outbuf, 0, size);
893 switch (format) {
894 case 0x00: {
895 /* Physical format information */
896 uint64_t nb_sectors;
897 if (layer != 0) {
898 goto fail;
900 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
902 outbuf[4] = 1; /* DVD-ROM, part version 1 */
903 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
904 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
905 outbuf[7] = 0; /* default densities */
907 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
908 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
909 break;
912 case 0x01: /* DVD copyright information, all zeros */
913 break;
915 case 0x03: /* BCA information - invalid field for no BCA info */
916 return -1;
918 case 0x04: /* DVD disc manufacturing information, all zeros */
919 break;
921 case 0xff: { /* List capabilities */
922 int i;
923 size = 4;
924 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
925 if (!rds_caps_size[i]) {
926 continue;
928 outbuf[size] = i;
929 outbuf[size + 1] = 0x40; /* Not writable, readable */
930 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
931 size += 4;
933 break;
936 default:
937 return -1;
940 /* Size of buffer, not including 2 byte size field */
941 stw_be_p(outbuf, size - 2);
942 return size;
944 fail:
945 return -1;
948 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
950 uint8_t event_code, media_status;
952 media_status = 0;
953 if (s->tray_open) {
954 media_status = MS_TRAY_OPEN;
955 } else if (blk_is_inserted(s->qdev.conf.blk)) {
956 media_status = MS_MEDIA_PRESENT;
959 /* Event notification descriptor */
960 event_code = MEC_NO_CHANGE;
961 if (media_status != MS_TRAY_OPEN) {
962 if (s->media_event) {
963 event_code = MEC_NEW_MEDIA;
964 s->media_event = false;
965 } else if (s->eject_request) {
966 event_code = MEC_EJECT_REQUESTED;
967 s->eject_request = false;
971 outbuf[0] = event_code;
972 outbuf[1] = media_status;
974 /* These fields are reserved, just clear them. */
975 outbuf[2] = 0;
976 outbuf[3] = 0;
977 return 4;
980 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
981 uint8_t *outbuf)
983 int size;
984 uint8_t *buf = r->req.cmd.buf;
985 uint8_t notification_class_request = buf[4];
986 if (s->qdev.type != TYPE_ROM) {
987 return -1;
989 if ((buf[1] & 1) == 0) {
990 /* asynchronous */
991 return -1;
994 size = 4;
995 outbuf[0] = outbuf[1] = 0;
996 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
997 if (notification_class_request & (1 << GESN_MEDIA)) {
998 outbuf[2] = GESN_MEDIA;
999 size += scsi_event_status_media(s, &outbuf[size]);
1000 } else {
1001 outbuf[2] = 0x80;
1003 stw_be_p(outbuf, size - 4);
1004 return size;
1007 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1009 int current;
1011 if (s->qdev.type != TYPE_ROM) {
1012 return -1;
1015 if (media_is_dvd(s)) {
1016 current = MMC_PROFILE_DVD_ROM;
1017 } else if (media_is_cd(s)) {
1018 current = MMC_PROFILE_CD_ROM;
1019 } else {
1020 current = MMC_PROFILE_NONE;
1023 memset(outbuf, 0, 40);
1024 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1025 stw_be_p(&outbuf[6], current);
1026 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1027 outbuf[10] = 0x03; /* persistent, current */
1028 outbuf[11] = 8; /* two profiles */
1029 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1030 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1031 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1032 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1033 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1034 stw_be_p(&outbuf[20], 1);
1035 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1036 outbuf[23] = 8;
1037 stl_be_p(&outbuf[24], 1); /* SCSI */
1038 outbuf[28] = 1; /* DBE = 1, mandatory */
1039 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1040 stw_be_p(&outbuf[32], 3);
1041 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1042 outbuf[35] = 4;
1043 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1044 /* TODO: Random readable, CD read, DVD read, drive serial number,
1045 power management */
1046 return 40;
1049 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1051 if (s->qdev.type != TYPE_ROM) {
1052 return -1;
1054 memset(outbuf, 0, 8);
1055 outbuf[5] = 1; /* CD-ROM */
1056 return 8;
1059 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1060 int page_control)
1062 static const int mode_sense_valid[0x3f] = {
1063 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1064 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1065 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1066 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1067 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1068 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1071 uint8_t *p = *p_outbuf + 2;
1072 int length;
1074 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1075 return -1;
1079 * If Changeable Values are requested, a mask denoting those mode parameters
1080 * that are changeable shall be returned. As we currently don't support
1081 * parameter changes via MODE_SELECT all bits are returned set to zero.
1082 * The buffer was already menset to zero by the caller of this function.
1084 * The offsets here are off by two compared to the descriptions in the
1085 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1086 * but it is done so that offsets are consistent within our implementation
1087 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1088 * 2-byte and 4-byte headers.
1090 switch (page) {
1091 case MODE_PAGE_HD_GEOMETRY:
1092 length = 0x16;
1093 if (page_control == 1) { /* Changeable Values */
1094 break;
1096 /* if a geometry hint is available, use it */
1097 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1098 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1099 p[2] = s->qdev.conf.cyls & 0xff;
1100 p[3] = s->qdev.conf.heads & 0xff;
1101 /* Write precomp start cylinder, disabled */
1102 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1103 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1104 p[6] = s->qdev.conf.cyls & 0xff;
1105 /* Reduced current start cylinder, disabled */
1106 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1107 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1108 p[9] = s->qdev.conf.cyls & 0xff;
1109 /* Device step rate [ns], 200ns */
1110 p[10] = 0;
1111 p[11] = 200;
1112 /* Landing zone cylinder */
1113 p[12] = 0xff;
1114 p[13] = 0xff;
1115 p[14] = 0xff;
1116 /* Medium rotation rate [rpm], 5400 rpm */
1117 p[18] = (5400 >> 8) & 0xff;
1118 p[19] = 5400 & 0xff;
1119 break;
1121 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1122 length = 0x1e;
1123 if (page_control == 1) { /* Changeable Values */
1124 break;
1126 /* Transfer rate [kbit/s], 5Mbit/s */
1127 p[0] = 5000 >> 8;
1128 p[1] = 5000 & 0xff;
1129 /* if a geometry hint is available, use it */
1130 p[2] = s->qdev.conf.heads & 0xff;
1131 p[3] = s->qdev.conf.secs & 0xff;
1132 p[4] = s->qdev.blocksize >> 8;
1133 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1134 p[7] = s->qdev.conf.cyls & 0xff;
1135 /* Write precomp start cylinder, disabled */
1136 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1137 p[9] = s->qdev.conf.cyls & 0xff;
1138 /* Reduced current start cylinder, disabled */
1139 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1140 p[11] = s->qdev.conf.cyls & 0xff;
1141 /* Device step rate [100us], 100us */
1142 p[12] = 0;
1143 p[13] = 1;
1144 /* Device step pulse width [us], 1us */
1145 p[14] = 1;
1146 /* Device head settle delay [100us], 100us */
1147 p[15] = 0;
1148 p[16] = 1;
1149 /* Motor on delay [0.1s], 0.1s */
1150 p[17] = 1;
1151 /* Motor off delay [0.1s], 0.1s */
1152 p[18] = 1;
1153 /* Medium rotation rate [rpm], 5400 rpm */
1154 p[26] = (5400 >> 8) & 0xff;
1155 p[27] = 5400 & 0xff;
1156 break;
1158 case MODE_PAGE_CACHING:
1159 length = 0x12;
1160 if (page_control == 1 || /* Changeable Values */
1161 blk_enable_write_cache(s->qdev.conf.blk)) {
1162 p[0] = 4; /* WCE */
1164 break;
1166 case MODE_PAGE_R_W_ERROR:
1167 length = 10;
1168 if (page_control == 1) { /* Changeable Values */
1169 break;
1171 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1172 if (s->qdev.type == TYPE_ROM) {
1173 p[1] = 0x20; /* Read Retry Count */
1175 break;
1177 case MODE_PAGE_AUDIO_CTL:
1178 length = 14;
1179 break;
1181 case MODE_PAGE_CAPABILITIES:
1182 length = 0x14;
1183 if (page_control == 1) { /* Changeable Values */
1184 break;
1187 p[0] = 0x3b; /* CD-R & CD-RW read */
1188 p[1] = 0; /* Writing not supported */
1189 p[2] = 0x7f; /* Audio, composite, digital out,
1190 mode 2 form 1&2, multi session */
1191 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1192 RW corrected, C2 errors, ISRC,
1193 UPC, Bar code */
1194 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1195 /* Locking supported, jumper present, eject, tray */
1196 p[5] = 0; /* no volume & mute control, no
1197 changer */
1198 p[6] = (50 * 176) >> 8; /* 50x read speed */
1199 p[7] = (50 * 176) & 0xff;
1200 p[8] = 2 >> 8; /* Two volume levels */
1201 p[9] = 2 & 0xff;
1202 p[10] = 2048 >> 8; /* 2M buffer */
1203 p[11] = 2048 & 0xff;
1204 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1205 p[13] = (16 * 176) & 0xff;
1206 p[16] = (16 * 176) >> 8; /* 16x write speed */
1207 p[17] = (16 * 176) & 0xff;
1208 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1209 p[19] = (16 * 176) & 0xff;
1210 break;
1212 default:
1213 return -1;
1216 assert(length < 256);
1217 (*p_outbuf)[0] = page;
1218 (*p_outbuf)[1] = length;
1219 *p_outbuf += length + 2;
1220 return length + 2;
1223 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1225 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1226 uint64_t nb_sectors;
1227 bool dbd;
1228 int page, buflen, ret, page_control;
1229 uint8_t *p;
1230 uint8_t dev_specific_param;
1232 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1233 page = r->req.cmd.buf[2] & 0x3f;
1234 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1235 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
1236 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
1237 memset(outbuf, 0, r->req.cmd.xfer);
1238 p = outbuf;
1240 if (s->qdev.type == TYPE_DISK) {
1241 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1242 if (blk_is_read_only(s->qdev.conf.blk)) {
1243 dev_specific_param |= 0x80; /* Readonly. */
1245 } else {
1246 /* MMC prescribes that CD/DVD drives have no block descriptors,
1247 * and defines no device-specific parameter. */
1248 dev_specific_param = 0x00;
1249 dbd = true;
1252 if (r->req.cmd.buf[0] == MODE_SENSE) {
1253 p[1] = 0; /* Default media type. */
1254 p[2] = dev_specific_param;
1255 p[3] = 0; /* Block descriptor length. */
1256 p += 4;
1257 } else { /* MODE_SENSE_10 */
1258 p[2] = 0; /* Default media type. */
1259 p[3] = dev_specific_param;
1260 p[6] = p[7] = 0; /* Block descriptor length. */
1261 p += 8;
1264 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1265 if (!dbd && nb_sectors) {
1266 if (r->req.cmd.buf[0] == MODE_SENSE) {
1267 outbuf[3] = 8; /* Block descriptor length */
1268 } else { /* MODE_SENSE_10 */
1269 outbuf[7] = 8; /* Block descriptor length */
1271 nb_sectors /= (s->qdev.blocksize / 512);
1272 if (nb_sectors > 0xffffff) {
1273 nb_sectors = 0;
1275 p[0] = 0; /* media density code */
1276 p[1] = (nb_sectors >> 16) & 0xff;
1277 p[2] = (nb_sectors >> 8) & 0xff;
1278 p[3] = nb_sectors & 0xff;
1279 p[4] = 0; /* reserved */
1280 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1281 p[6] = s->qdev.blocksize >> 8;
1282 p[7] = 0;
1283 p += 8;
1286 if (page_control == 3) {
1287 /* Saved Values */
1288 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1289 return -1;
1292 if (page == 0x3f) {
1293 for (page = 0; page <= 0x3e; page++) {
1294 mode_sense_page(s, page, &p, page_control);
1296 } else {
1297 ret = mode_sense_page(s, page, &p, page_control);
1298 if (ret == -1) {
1299 return -1;
1303 buflen = p - outbuf;
1305 * The mode data length field specifies the length in bytes of the
1306 * following data that is available to be transferred. The mode data
1307 * length does not include itself.
1309 if (r->req.cmd.buf[0] == MODE_SENSE) {
1310 outbuf[0] = buflen - 1;
1311 } else { /* MODE_SENSE_10 */
1312 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1313 outbuf[1] = (buflen - 2) & 0xff;
1315 return buflen;
1318 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1320 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1321 int start_track, format, msf, toclen;
1322 uint64_t nb_sectors;
1324 msf = req->cmd.buf[1] & 2;
1325 format = req->cmd.buf[2] & 0xf;
1326 start_track = req->cmd.buf[6];
1327 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1328 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1);
1329 nb_sectors /= s->qdev.blocksize / 512;
1330 switch (format) {
1331 case 0:
1332 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1333 break;
1334 case 1:
1335 /* multi session : only a single session defined */
1336 toclen = 12;
1337 memset(outbuf, 0, 12);
1338 outbuf[1] = 0x0a;
1339 outbuf[2] = 0x01;
1340 outbuf[3] = 0x01;
1341 break;
1342 case 2:
1343 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1344 break;
1345 default:
1346 return -1;
1348 return toclen;
1351 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1353 SCSIRequest *req = &r->req;
1354 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1355 bool start = req->cmd.buf[4] & 1;
1356 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1357 int pwrcnd = req->cmd.buf[4] & 0xf0;
1359 if (pwrcnd) {
1360 /* eject/load only happens for power condition == 0 */
1361 return 0;
1364 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1365 if (!start && !s->tray_open && s->tray_locked) {
1366 scsi_check_condition(r,
1367 blk_is_inserted(s->qdev.conf.blk)
1368 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1369 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1370 return -1;
1373 if (s->tray_open != !start) {
1374 blk_eject(s->qdev.conf.blk, !start);
1375 s->tray_open = !start;
1378 return 0;
1381 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1383 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1384 int buflen = r->iov.iov_len;
1386 if (buflen) {
1387 DPRINTF("Read buf_len=%d\n", buflen);
1388 r->iov.iov_len = 0;
1389 r->started = true;
1390 scsi_req_data(&r->req, buflen);
1391 return;
1394 /* This also clears the sense buffer for REQUEST SENSE. */
1395 scsi_req_complete(&r->req, GOOD);
1398 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1399 uint8_t *inbuf, int inlen)
1401 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1402 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1403 uint8_t *p;
1404 int len, expected_len, changeable_len, i;
1406 /* The input buffer does not include the page header, so it is
1407 * off by 2 bytes.
1409 expected_len = inlen + 2;
1410 if (expected_len > SCSI_MAX_MODE_LEN) {
1411 return -1;
1414 p = mode_current;
1415 memset(mode_current, 0, inlen + 2);
1416 len = mode_sense_page(s, page, &p, 0);
1417 if (len < 0 || len != expected_len) {
1418 return -1;
1421 p = mode_changeable;
1422 memset(mode_changeable, 0, inlen + 2);
1423 changeable_len = mode_sense_page(s, page, &p, 1);
1424 assert(changeable_len == len);
1426 /* Check that unchangeable bits are the same as what MODE SENSE
1427 * would return.
1429 for (i = 2; i < len; i++) {
1430 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1431 return -1;
1434 return 0;
1437 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1439 switch (page) {
1440 case MODE_PAGE_CACHING:
1441 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1442 break;
1444 default:
1445 break;
1449 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1451 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1453 while (len > 0) {
1454 int page, subpage, page_len;
1456 /* Parse both possible formats for the mode page headers. */
1457 page = p[0] & 0x3f;
1458 if (p[0] & 0x40) {
1459 if (len < 4) {
1460 goto invalid_param_len;
1462 subpage = p[1];
1463 page_len = lduw_be_p(&p[2]);
1464 p += 4;
1465 len -= 4;
1466 } else {
1467 if (len < 2) {
1468 goto invalid_param_len;
1470 subpage = 0;
1471 page_len = p[1];
1472 p += 2;
1473 len -= 2;
1476 if (subpage) {
1477 goto invalid_param;
1479 if (page_len > len) {
1480 goto invalid_param_len;
1483 if (!change) {
1484 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1485 goto invalid_param;
1487 } else {
1488 scsi_disk_apply_mode_select(s, page, p);
1491 p += page_len;
1492 len -= page_len;
1494 return 0;
1496 invalid_param:
1497 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1498 return -1;
1500 invalid_param_len:
1501 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1502 return -1;
1505 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1507 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1508 uint8_t *p = inbuf;
1509 int cmd = r->req.cmd.buf[0];
1510 int len = r->req.cmd.xfer;
1511 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1512 int bd_len;
1513 int pass;
1515 /* We only support PF=1, SP=0. */
1516 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1517 goto invalid_field;
1520 if (len < hdr_len) {
1521 goto invalid_param_len;
1524 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1525 len -= hdr_len;
1526 p += hdr_len;
1527 if (len < bd_len) {
1528 goto invalid_param_len;
1530 if (bd_len != 0 && bd_len != 8) {
1531 goto invalid_param;
1534 len -= bd_len;
1535 p += bd_len;
1537 /* Ensure no change is made if there is an error! */
1538 for (pass = 0; pass < 2; pass++) {
1539 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1540 assert(pass == 0);
1541 return;
1544 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1545 /* The request is used as the AIO opaque value, so add a ref. */
1546 scsi_req_ref(&r->req);
1547 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1548 BLOCK_ACCT_FLUSH);
1549 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1550 return;
1553 scsi_req_complete(&r->req, GOOD);
1554 return;
1556 invalid_param:
1557 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1558 return;
1560 invalid_param_len:
1561 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1562 return;
1564 invalid_field:
1565 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1568 static inline bool check_lba_range(SCSIDiskState *s,
1569 uint64_t sector_num, uint32_t nb_sectors)
1572 * The first line tests that no overflow happens when computing the last
1573 * sector. The second line tests that the last accessed sector is in
1574 * range.
1576 * Careful, the computations should not underflow for nb_sectors == 0,
1577 * and a 0-block read to the first LBA beyond the end of device is
1578 * valid.
1580 return (sector_num <= sector_num + nb_sectors &&
1581 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1584 typedef struct UnmapCBData {
1585 SCSIDiskReq *r;
1586 uint8_t *inbuf;
1587 int count;
1588 } UnmapCBData;
1590 static void scsi_unmap_complete(void *opaque, int ret);
1592 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1594 SCSIDiskReq *r = data->r;
1595 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1596 uint64_t sector_num;
1597 uint32_t nb_sectors;
1599 assert(r->req.aiocb == NULL);
1600 if (scsi_disk_req_check_error(r, ret, false)) {
1601 goto done;
1604 if (data->count > 0) {
1605 sector_num = ldq_be_p(&data->inbuf[0]);
1606 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1607 if (!check_lba_range(s, sector_num, nb_sectors)) {
1608 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1609 goto done;
1612 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1613 sector_num * s->qdev.blocksize,
1614 nb_sectors * s->qdev.blocksize,
1615 scsi_unmap_complete, data);
1616 data->count--;
1617 data->inbuf += 16;
1618 return;
1621 scsi_req_complete(&r->req, GOOD);
1623 done:
1624 scsi_req_unref(&r->req);
1625 g_free(data);
1628 static void scsi_unmap_complete(void *opaque, int ret)
1630 UnmapCBData *data = opaque;
1631 SCSIDiskReq *r = data->r;
1633 assert(r->req.aiocb != NULL);
1634 r->req.aiocb = NULL;
1636 scsi_unmap_complete_noio(data, ret);
1639 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1641 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1642 uint8_t *p = inbuf;
1643 int len = r->req.cmd.xfer;
1644 UnmapCBData *data;
1646 /* Reject ANCHOR=1. */
1647 if (r->req.cmd.buf[1] & 0x1) {
1648 goto invalid_field;
1651 if (len < 8) {
1652 goto invalid_param_len;
1654 if (len < lduw_be_p(&p[0]) + 2) {
1655 goto invalid_param_len;
1657 if (len < lduw_be_p(&p[2]) + 8) {
1658 goto invalid_param_len;
1660 if (lduw_be_p(&p[2]) & 15) {
1661 goto invalid_param_len;
1664 if (blk_is_read_only(s->qdev.conf.blk)) {
1665 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1666 return;
1669 data = g_new0(UnmapCBData, 1);
1670 data->r = r;
1671 data->inbuf = &p[8];
1672 data->count = lduw_be_p(&p[2]) >> 4;
1674 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1675 scsi_req_ref(&r->req);
1676 scsi_unmap_complete_noio(data, 0);
1677 return;
1679 invalid_param_len:
1680 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1681 return;
1683 invalid_field:
1684 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1687 typedef struct WriteSameCBData {
1688 SCSIDiskReq *r;
1689 int64_t sector;
1690 int nb_sectors;
1691 QEMUIOVector qiov;
1692 struct iovec iov;
1693 } WriteSameCBData;
1695 static void scsi_write_same_complete(void *opaque, int ret)
1697 WriteSameCBData *data = opaque;
1698 SCSIDiskReq *r = data->r;
1699 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1701 assert(r->req.aiocb != NULL);
1702 r->req.aiocb = NULL;
1703 if (scsi_disk_req_check_error(r, ret, true)) {
1704 goto done;
1707 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1709 data->nb_sectors -= data->iov.iov_len / 512;
1710 data->sector += data->iov.iov_len / 512;
1711 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1712 if (data->iov.iov_len) {
1713 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1714 data->iov.iov_len, BLOCK_ACCT_WRITE);
1715 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1716 * where final qiov may need smaller size */
1717 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1718 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1719 data->sector << BDRV_SECTOR_BITS,
1720 &data->qiov, 0,
1721 scsi_write_same_complete, data);
1722 return;
1725 scsi_req_complete(&r->req, GOOD);
1727 done:
1728 scsi_req_unref(&r->req);
1729 qemu_vfree(data->iov.iov_base);
1730 g_free(data);
1733 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1735 SCSIRequest *req = &r->req;
1736 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1737 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1738 WriteSameCBData *data;
1739 uint8_t *buf;
1740 int i;
1742 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1743 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1744 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1745 return;
1748 if (blk_is_read_only(s->qdev.conf.blk)) {
1749 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1750 return;
1752 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1753 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1754 return;
1757 if (buffer_is_zero(inbuf, s->qdev.blocksize)) {
1758 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1760 /* The request is used as the AIO opaque value, so add a ref. */
1761 scsi_req_ref(&r->req);
1762 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1763 nb_sectors * s->qdev.blocksize,
1764 BLOCK_ACCT_WRITE);
1765 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1766 r->req.cmd.lba * s->qdev.blocksize,
1767 nb_sectors * s->qdev.blocksize,
1768 flags, scsi_aio_complete, r);
1769 return;
1772 data = g_new0(WriteSameCBData, 1);
1773 data->r = r;
1774 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1775 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1776 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1777 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1778 data->iov.iov_len);
1779 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1781 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1782 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1785 scsi_req_ref(&r->req);
1786 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1787 data->iov.iov_len, BLOCK_ACCT_WRITE);
1788 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1789 data->sector << BDRV_SECTOR_BITS,
1790 &data->qiov, 0,
1791 scsi_write_same_complete, data);
1794 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1796 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1798 if (r->iov.iov_len) {
1799 int buflen = r->iov.iov_len;
1800 DPRINTF("Write buf_len=%d\n", buflen);
1801 r->iov.iov_len = 0;
1802 scsi_req_data(&r->req, buflen);
1803 return;
1806 switch (req->cmd.buf[0]) {
1807 case MODE_SELECT:
1808 case MODE_SELECT_10:
1809 /* This also clears the sense buffer for REQUEST SENSE. */
1810 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1811 break;
1813 case UNMAP:
1814 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1815 break;
1817 case VERIFY_10:
1818 case VERIFY_12:
1819 case VERIFY_16:
1820 if (r->req.status == -1) {
1821 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1823 break;
1825 case WRITE_SAME_10:
1826 case WRITE_SAME_16:
1827 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1828 break;
1830 default:
1831 abort();
1835 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1837 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1838 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1839 uint64_t nb_sectors;
1840 uint8_t *outbuf;
1841 int buflen;
1843 switch (req->cmd.buf[0]) {
1844 case INQUIRY:
1845 case MODE_SENSE:
1846 case MODE_SENSE_10:
1847 case RESERVE:
1848 case RESERVE_10:
1849 case RELEASE:
1850 case RELEASE_10:
1851 case START_STOP:
1852 case ALLOW_MEDIUM_REMOVAL:
1853 case GET_CONFIGURATION:
1854 case GET_EVENT_STATUS_NOTIFICATION:
1855 case MECHANISM_STATUS:
1856 case REQUEST_SENSE:
1857 break;
1859 default:
1860 if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
1861 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1862 return 0;
1864 break;
1868 * FIXME: we shouldn't return anything bigger than 4k, but the code
1869 * requires the buffer to be as big as req->cmd.xfer in several
1870 * places. So, do not allow CDBs with a very large ALLOCATION
1871 * LENGTH. The real fix would be to modify scsi_read_data and
1872 * dma_buf_read, so that they return data beyond the buflen
1873 * as all zeros.
1875 if (req->cmd.xfer > 65536) {
1876 goto illegal_request;
1878 r->buflen = MAX(4096, req->cmd.xfer);
1880 if (!r->iov.iov_base) {
1881 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1884 buflen = req->cmd.xfer;
1885 outbuf = r->iov.iov_base;
1886 memset(outbuf, 0, r->buflen);
1887 switch (req->cmd.buf[0]) {
1888 case TEST_UNIT_READY:
1889 assert(!s->tray_open && blk_is_inserted(s->qdev.conf.blk));
1890 break;
1891 case INQUIRY:
1892 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1893 if (buflen < 0) {
1894 goto illegal_request;
1896 break;
1897 case MODE_SENSE:
1898 case MODE_SENSE_10:
1899 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1900 if (buflen < 0) {
1901 goto illegal_request;
1903 break;
1904 case READ_TOC:
1905 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1906 if (buflen < 0) {
1907 goto illegal_request;
1909 break;
1910 case RESERVE:
1911 if (req->cmd.buf[1] & 1) {
1912 goto illegal_request;
1914 break;
1915 case RESERVE_10:
1916 if (req->cmd.buf[1] & 3) {
1917 goto illegal_request;
1919 break;
1920 case RELEASE:
1921 if (req->cmd.buf[1] & 1) {
1922 goto illegal_request;
1924 break;
1925 case RELEASE_10:
1926 if (req->cmd.buf[1] & 3) {
1927 goto illegal_request;
1929 break;
1930 case START_STOP:
1931 if (scsi_disk_emulate_start_stop(r) < 0) {
1932 return 0;
1934 break;
1935 case ALLOW_MEDIUM_REMOVAL:
1936 s->tray_locked = req->cmd.buf[4] & 1;
1937 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1938 break;
1939 case READ_CAPACITY_10:
1940 /* The normal LEN field for this command is zero. */
1941 memset(outbuf, 0, 8);
1942 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1943 if (!nb_sectors) {
1944 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1945 return 0;
1947 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1948 goto illegal_request;
1950 nb_sectors /= s->qdev.blocksize / 512;
1951 /* Returned value is the address of the last sector. */
1952 nb_sectors--;
1953 /* Remember the new size for read/write sanity checking. */
1954 s->qdev.max_lba = nb_sectors;
1955 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1956 if (nb_sectors > UINT32_MAX) {
1957 nb_sectors = UINT32_MAX;
1959 outbuf[0] = (nb_sectors >> 24) & 0xff;
1960 outbuf[1] = (nb_sectors >> 16) & 0xff;
1961 outbuf[2] = (nb_sectors >> 8) & 0xff;
1962 outbuf[3] = nb_sectors & 0xff;
1963 outbuf[4] = 0;
1964 outbuf[5] = 0;
1965 outbuf[6] = s->qdev.blocksize >> 8;
1966 outbuf[7] = 0;
1967 break;
1968 case REQUEST_SENSE:
1969 /* Just return "NO SENSE". */
1970 buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
1971 (req->cmd.buf[1] & 1) == 0);
1972 if (buflen < 0) {
1973 goto illegal_request;
1975 break;
1976 case MECHANISM_STATUS:
1977 buflen = scsi_emulate_mechanism_status(s, outbuf);
1978 if (buflen < 0) {
1979 goto illegal_request;
1981 break;
1982 case GET_CONFIGURATION:
1983 buflen = scsi_get_configuration(s, outbuf);
1984 if (buflen < 0) {
1985 goto illegal_request;
1987 break;
1988 case GET_EVENT_STATUS_NOTIFICATION:
1989 buflen = scsi_get_event_status_notification(s, r, outbuf);
1990 if (buflen < 0) {
1991 goto illegal_request;
1993 break;
1994 case READ_DISC_INFORMATION:
1995 buflen = scsi_read_disc_information(s, r, outbuf);
1996 if (buflen < 0) {
1997 goto illegal_request;
1999 break;
2000 case READ_DVD_STRUCTURE:
2001 buflen = scsi_read_dvd_structure(s, r, outbuf);
2002 if (buflen < 0) {
2003 goto illegal_request;
2005 break;
2006 case SERVICE_ACTION_IN_16:
2007 /* Service Action In subcommands. */
2008 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2009 DPRINTF("SAI READ CAPACITY(16)\n");
2010 memset(outbuf, 0, req->cmd.xfer);
2011 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2012 if (!nb_sectors) {
2013 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2014 return 0;
2016 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2017 goto illegal_request;
2019 nb_sectors /= s->qdev.blocksize / 512;
2020 /* Returned value is the address of the last sector. */
2021 nb_sectors--;
2022 /* Remember the new size for read/write sanity checking. */
2023 s->qdev.max_lba = nb_sectors;
2024 outbuf[0] = (nb_sectors >> 56) & 0xff;
2025 outbuf[1] = (nb_sectors >> 48) & 0xff;
2026 outbuf[2] = (nb_sectors >> 40) & 0xff;
2027 outbuf[3] = (nb_sectors >> 32) & 0xff;
2028 outbuf[4] = (nb_sectors >> 24) & 0xff;
2029 outbuf[5] = (nb_sectors >> 16) & 0xff;
2030 outbuf[6] = (nb_sectors >> 8) & 0xff;
2031 outbuf[7] = nb_sectors & 0xff;
2032 outbuf[8] = 0;
2033 outbuf[9] = 0;
2034 outbuf[10] = s->qdev.blocksize >> 8;
2035 outbuf[11] = 0;
2036 outbuf[12] = 0;
2037 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2039 /* set TPE bit if the format supports discard */
2040 if (s->qdev.conf.discard_granularity) {
2041 outbuf[14] = 0x80;
2044 /* Protection, exponent and lowest lba field left blank. */
2045 break;
2047 DPRINTF("Unsupported Service Action In\n");
2048 goto illegal_request;
2049 case SYNCHRONIZE_CACHE:
2050 /* The request is used as the AIO opaque value, so add a ref. */
2051 scsi_req_ref(&r->req);
2052 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2053 BLOCK_ACCT_FLUSH);
2054 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2055 return 0;
2056 case SEEK_10:
2057 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
2058 if (r->req.cmd.lba > s->qdev.max_lba) {
2059 goto illegal_lba;
2061 break;
2062 case MODE_SELECT:
2063 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2064 break;
2065 case MODE_SELECT_10:
2066 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2067 break;
2068 case UNMAP:
2069 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2070 break;
2071 case VERIFY_10:
2072 case VERIFY_12:
2073 case VERIFY_16:
2074 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
2075 if (req->cmd.buf[1] & 6) {
2076 goto illegal_request;
2078 break;
2079 case WRITE_SAME_10:
2080 case WRITE_SAME_16:
2081 DPRINTF("WRITE SAME %d (len %lu)\n",
2082 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16,
2083 (unsigned long)r->req.cmd.xfer);
2084 break;
2085 default:
2086 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
2087 scsi_command_name(buf[0]));
2088 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2089 return 0;
2091 assert(!r->req.aiocb);
2092 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2093 if (r->iov.iov_len == 0) {
2094 scsi_req_complete(&r->req, GOOD);
2096 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2097 assert(r->iov.iov_len == req->cmd.xfer);
2098 return -r->iov.iov_len;
2099 } else {
2100 return r->iov.iov_len;
2103 illegal_request:
2104 if (r->req.status == -1) {
2105 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2107 return 0;
2109 illegal_lba:
2110 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2111 return 0;
2114 /* Execute a scsi command. Returns the length of the data expected by the
2115 command. This will be Positive for data transfers from the device
2116 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2117 and zero if the command does not transfer any data. */
2119 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2122 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2123 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2124 uint32_t len;
2125 uint8_t command;
2127 command = buf[0];
2129 if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
2130 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2131 return 0;
2134 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2135 switch (command) {
2136 case READ_6:
2137 case READ_10:
2138 case READ_12:
2139 case READ_16:
2140 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
2141 if (r->req.cmd.buf[1] & 0xe0) {
2142 goto illegal_request;
2144 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2145 goto illegal_lba;
2147 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2148 r->sector_count = len * (s->qdev.blocksize / 512);
2149 break;
2150 case WRITE_6:
2151 case WRITE_10:
2152 case WRITE_12:
2153 case WRITE_16:
2154 case WRITE_VERIFY_10:
2155 case WRITE_VERIFY_12:
2156 case WRITE_VERIFY_16:
2157 if (blk_is_read_only(s->qdev.conf.blk)) {
2158 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2159 return 0;
2161 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
2162 (command & 0xe) == 0xe ? "And Verify " : "",
2163 r->req.cmd.lba, len);
2164 if (r->req.cmd.buf[1] & 0xe0) {
2165 goto illegal_request;
2167 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2168 goto illegal_lba;
2170 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2171 r->sector_count = len * (s->qdev.blocksize / 512);
2172 break;
2173 default:
2174 abort();
2175 illegal_request:
2176 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2177 return 0;
2178 illegal_lba:
2179 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2180 return 0;
2182 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2183 if (r->sector_count == 0) {
2184 scsi_req_complete(&r->req, GOOD);
2186 assert(r->iov.iov_len == 0);
2187 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2188 return -r->sector_count * 512;
2189 } else {
2190 return r->sector_count * 512;
2194 static void scsi_disk_reset(DeviceState *dev)
2196 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2197 uint64_t nb_sectors;
2199 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2201 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2202 nb_sectors /= s->qdev.blocksize / 512;
2203 if (nb_sectors) {
2204 nb_sectors--;
2206 s->qdev.max_lba = nb_sectors;
2207 /* reset tray statuses */
2208 s->tray_locked = 0;
2209 s->tray_open = 0;
2212 static void scsi_disk_resize_cb(void *opaque)
2214 SCSIDiskState *s = opaque;
2216 /* SPC lists this sense code as available only for
2217 * direct-access devices.
2219 if (s->qdev.type == TYPE_DISK) {
2220 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2224 static void scsi_cd_change_media_cb(void *opaque, bool load)
2226 SCSIDiskState *s = opaque;
2229 * When a CD gets changed, we have to report an ejected state and
2230 * then a loaded state to guests so that they detect tray
2231 * open/close and media change events. Guests that do not use
2232 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2233 * states rely on this behavior.
2235 * media_changed governs the state machine used for unit attention
2236 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2238 s->media_changed = load;
2239 s->tray_open = !load;
2240 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2241 s->media_event = true;
2242 s->eject_request = false;
2245 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2247 SCSIDiskState *s = opaque;
2249 s->eject_request = true;
2250 if (force) {
2251 s->tray_locked = false;
2255 static bool scsi_cd_is_tray_open(void *opaque)
2257 return ((SCSIDiskState *)opaque)->tray_open;
2260 static bool scsi_cd_is_medium_locked(void *opaque)
2262 return ((SCSIDiskState *)opaque)->tray_locked;
2265 static const BlockDevOps scsi_disk_removable_block_ops = {
2266 .change_media_cb = scsi_cd_change_media_cb,
2267 .eject_request_cb = scsi_cd_eject_request_cb,
2268 .is_tray_open = scsi_cd_is_tray_open,
2269 .is_medium_locked = scsi_cd_is_medium_locked,
2271 .resize_cb = scsi_disk_resize_cb,
2274 static const BlockDevOps scsi_disk_block_ops = {
2275 .resize_cb = scsi_disk_resize_cb,
2278 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2280 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2281 if (s->media_changed) {
2282 s->media_changed = false;
2283 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2287 static void scsi_realize(SCSIDevice *dev, Error **errp)
2289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2290 Error *err = NULL;
2292 if (!s->qdev.conf.blk) {
2293 error_setg(errp, "drive property not set");
2294 return;
2297 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2298 !blk_is_inserted(s->qdev.conf.blk)) {
2299 error_setg(errp, "Device needs media, but drive is empty");
2300 return;
2303 blkconf_serial(&s->qdev.conf, &s->serial);
2304 blkconf_blocksizes(&s->qdev.conf);
2305 if (dev->type == TYPE_DISK) {
2306 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
2307 if (err) {
2308 error_propagate(errp, err);
2309 return;
2312 blkconf_apply_backend_options(&dev->conf);
2314 if (s->qdev.conf.discard_granularity == -1) {
2315 s->qdev.conf.discard_granularity =
2316 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2319 if (!s->version) {
2320 s->version = g_strdup(qemu_hw_version());
2322 if (!s->vendor) {
2323 s->vendor = g_strdup("QEMU");
2326 if (blk_is_sg(s->qdev.conf.blk)) {
2327 error_setg(errp, "unwanted /dev/sg*");
2328 return;
2331 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2332 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2333 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2334 } else {
2335 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2337 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2339 blk_iostatus_enable(s->qdev.conf.blk);
2342 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2344 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2345 /* can happen for devices without drive. The error message for missing
2346 * backend will be issued in scsi_realize
2348 if (s->qdev.conf.blk) {
2349 blkconf_blocksizes(&s->qdev.conf);
2351 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2352 s->qdev.type = TYPE_DISK;
2353 if (!s->product) {
2354 s->product = g_strdup("QEMU HARDDISK");
2356 scsi_realize(&s->qdev, errp);
2359 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2363 if (!dev->conf.blk) {
2364 dev->conf.blk = blk_new();
2367 s->qdev.blocksize = 2048;
2368 s->qdev.type = TYPE_ROM;
2369 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2370 if (!s->product) {
2371 s->product = g_strdup("QEMU CD-ROM");
2373 scsi_realize(&s->qdev, errp);
2376 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2378 DriveInfo *dinfo;
2379 Error *local_err = NULL;
2381 if (!dev->conf.blk) {
2382 scsi_realize(dev, &local_err);
2383 assert(local_err);
2384 error_propagate(errp, local_err);
2385 return;
2388 dinfo = blk_legacy_dinfo(dev->conf.blk);
2389 if (dinfo && dinfo->media_cd) {
2390 scsi_cd_realize(dev, errp);
2391 } else {
2392 scsi_hd_realize(dev, errp);
2396 static const SCSIReqOps scsi_disk_emulate_reqops = {
2397 .size = sizeof(SCSIDiskReq),
2398 .free_req = scsi_free_request,
2399 .send_command = scsi_disk_emulate_command,
2400 .read_data = scsi_disk_emulate_read_data,
2401 .write_data = scsi_disk_emulate_write_data,
2402 .get_buf = scsi_get_buf,
2405 static const SCSIReqOps scsi_disk_dma_reqops = {
2406 .size = sizeof(SCSIDiskReq),
2407 .free_req = scsi_free_request,
2408 .send_command = scsi_disk_dma_command,
2409 .read_data = scsi_read_data,
2410 .write_data = scsi_write_data,
2411 .get_buf = scsi_get_buf,
2412 .load_request = scsi_disk_load_request,
2413 .save_request = scsi_disk_save_request,
2416 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2417 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2418 [INQUIRY] = &scsi_disk_emulate_reqops,
2419 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2420 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2421 [START_STOP] = &scsi_disk_emulate_reqops,
2422 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2423 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2424 [READ_TOC] = &scsi_disk_emulate_reqops,
2425 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2426 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2427 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2428 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2429 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2430 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2431 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2432 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2433 [SEEK_10] = &scsi_disk_emulate_reqops,
2434 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2435 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2436 [UNMAP] = &scsi_disk_emulate_reqops,
2437 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2438 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2439 [VERIFY_10] = &scsi_disk_emulate_reqops,
2440 [VERIFY_12] = &scsi_disk_emulate_reqops,
2441 [VERIFY_16] = &scsi_disk_emulate_reqops,
2443 [READ_6] = &scsi_disk_dma_reqops,
2444 [READ_10] = &scsi_disk_dma_reqops,
2445 [READ_12] = &scsi_disk_dma_reqops,
2446 [READ_16] = &scsi_disk_dma_reqops,
2447 [WRITE_6] = &scsi_disk_dma_reqops,
2448 [WRITE_10] = &scsi_disk_dma_reqops,
2449 [WRITE_12] = &scsi_disk_dma_reqops,
2450 [WRITE_16] = &scsi_disk_dma_reqops,
2451 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2452 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2453 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2456 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2457 uint8_t *buf, void *hba_private)
2459 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2460 SCSIRequest *req;
2461 const SCSIReqOps *ops;
2462 uint8_t command;
2464 command = buf[0];
2465 ops = scsi_disk_reqops_dispatch[command];
2466 if (!ops) {
2467 ops = &scsi_disk_emulate_reqops;
2469 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2471 #ifdef DEBUG_SCSI
2472 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
2474 int i;
2475 for (i = 1; i < scsi_cdb_length(buf); i++) {
2476 printf(" 0x%02x", buf[i]);
2478 printf("\n");
2480 #endif
2482 return req;
2485 #ifdef __linux__
2486 static int get_device_type(SCSIDiskState *s)
2488 uint8_t cmd[16];
2489 uint8_t buf[36];
2490 uint8_t sensebuf[8];
2491 sg_io_hdr_t io_header;
2492 int ret;
2494 memset(cmd, 0, sizeof(cmd));
2495 memset(buf, 0, sizeof(buf));
2496 cmd[0] = INQUIRY;
2497 cmd[4] = sizeof(buf);
2499 memset(&io_header, 0, sizeof(io_header));
2500 io_header.interface_id = 'S';
2501 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
2502 io_header.dxfer_len = sizeof(buf);
2503 io_header.dxferp = buf;
2504 io_header.cmdp = cmd;
2505 io_header.cmd_len = sizeof(cmd);
2506 io_header.mx_sb_len = sizeof(sensebuf);
2507 io_header.sbp = sensebuf;
2508 io_header.timeout = 6000; /* XXX */
2510 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header);
2511 if (ret < 0 || io_header.driver_status || io_header.host_status) {
2512 return -1;
2514 s->qdev.type = buf[0];
2515 if (buf[1] & 0x80) {
2516 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2518 return 0;
2521 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2523 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2524 int sg_version;
2525 int rc;
2527 if (!s->qdev.conf.blk) {
2528 error_setg(errp, "drive property not set");
2529 return;
2532 /* check we are using a driver managing SG_IO (version 3 and after) */
2533 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2534 if (rc < 0) {
2535 error_setg(errp, "cannot get SG_IO version number: %s. "
2536 "Is this a SCSI device?",
2537 strerror(-rc));
2538 return;
2540 if (sg_version < 30000) {
2541 error_setg(errp, "scsi generic interface too old");
2542 return;
2545 /* get device type from INQUIRY data */
2546 rc = get_device_type(s);
2547 if (rc < 0) {
2548 error_setg(errp, "INQUIRY failed");
2549 return;
2552 /* Make a guess for the block size, we'll fix it when the guest sends.
2553 * READ CAPACITY. If they don't, they likely would assume these sizes
2554 * anyway. (TODO: check in /sys).
2556 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2557 s->qdev.blocksize = 2048;
2558 } else {
2559 s->qdev.blocksize = 512;
2562 /* Makes the scsi-block device not removable by using HMP and QMP eject
2563 * command.
2565 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2567 scsi_realize(&s->qdev, errp);
2568 scsi_generic_read_device_identification(&s->qdev);
2571 typedef struct SCSIBlockReq {
2572 SCSIDiskReq req;
2573 sg_io_hdr_t io_header;
2575 /* Selected bytes of the original CDB, copied into our own CDB. */
2576 uint8_t cmd, cdb1, group_number;
2578 /* CDB passed to SG_IO. */
2579 uint8_t cdb[16];
2580 } SCSIBlockReq;
2582 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2583 int64_t offset, QEMUIOVector *iov,
2584 int direction,
2585 BlockCompletionFunc *cb, void *opaque)
2587 sg_io_hdr_t *io_header = &req->io_header;
2588 SCSIDiskReq *r = &req->req;
2589 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2590 int nb_logical_blocks;
2591 uint64_t lba;
2592 BlockAIOCB *aiocb;
2594 /* This is not supported yet. It can only happen if the guest does
2595 * reads and writes that are not aligned to one logical sectors
2596 * _and_ cover multiple MemoryRegions.
2598 assert(offset % s->qdev.blocksize == 0);
2599 assert(iov->size % s->qdev.blocksize == 0);
2601 io_header->interface_id = 'S';
2603 /* The data transfer comes from the QEMUIOVector. */
2604 io_header->dxfer_direction = direction;
2605 io_header->dxfer_len = iov->size;
2606 io_header->dxferp = (void *)iov->iov;
2607 io_header->iovec_count = iov->niov;
2608 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2610 /* Build a new CDB with the LBA and length patched in, in case
2611 * DMA helpers split the transfer in multiple segments. Do not
2612 * build a CDB smaller than what the guest wanted, and only build
2613 * a larger one if strictly necessary.
2615 io_header->cmdp = req->cdb;
2616 lba = offset / s->qdev.blocksize;
2617 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2619 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2620 /* 6-byte CDB */
2621 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2622 req->cdb[4] = nb_logical_blocks;
2623 req->cdb[5] = 0;
2624 io_header->cmd_len = 6;
2625 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2626 /* 10-byte CDB */
2627 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2628 req->cdb[1] = req->cdb1;
2629 stl_be_p(&req->cdb[2], lba);
2630 req->cdb[6] = req->group_number;
2631 stw_be_p(&req->cdb[7], nb_logical_blocks);
2632 req->cdb[9] = 0;
2633 io_header->cmd_len = 10;
2634 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2635 /* 12-byte CDB */
2636 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2637 req->cdb[1] = req->cdb1;
2638 stl_be_p(&req->cdb[2], lba);
2639 stl_be_p(&req->cdb[6], nb_logical_blocks);
2640 req->cdb[10] = req->group_number;
2641 req->cdb[11] = 0;
2642 io_header->cmd_len = 12;
2643 } else {
2644 /* 16-byte CDB */
2645 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2646 req->cdb[1] = req->cdb1;
2647 stq_be_p(&req->cdb[2], lba);
2648 stl_be_p(&req->cdb[10], nb_logical_blocks);
2649 req->cdb[14] = req->group_number;
2650 req->cdb[15] = 0;
2651 io_header->cmd_len = 16;
2654 /* The rest is as in scsi-generic.c. */
2655 io_header->mx_sb_len = sizeof(r->req.sense);
2656 io_header->sbp = r->req.sense;
2657 io_header->timeout = UINT_MAX;
2658 io_header->usr_ptr = r;
2659 io_header->flags |= SG_FLAG_DIRECT_IO;
2661 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2662 assert(aiocb != NULL);
2663 return aiocb;
2666 static bool scsi_block_no_fua(SCSICommand *cmd)
2668 return false;
2671 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2672 QEMUIOVector *iov,
2673 BlockCompletionFunc *cb, void *cb_opaque,
2674 void *opaque)
2676 SCSIBlockReq *r = opaque;
2677 return scsi_block_do_sgio(r, offset, iov,
2678 SG_DXFER_FROM_DEV, cb, cb_opaque);
2681 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2682 QEMUIOVector *iov,
2683 BlockCompletionFunc *cb, void *cb_opaque,
2684 void *opaque)
2686 SCSIBlockReq *r = opaque;
2687 return scsi_block_do_sgio(r, offset, iov,
2688 SG_DXFER_TO_DEV, cb, cb_opaque);
2691 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2693 switch (buf[0]) {
2694 case VERIFY_10:
2695 case VERIFY_12:
2696 case VERIFY_16:
2697 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2698 * for the number of logical blocks specified in the length
2699 * field). For other modes, do not use scatter/gather operation.
2701 if ((buf[1] & 6) != 2) {
2702 return false;
2704 break;
2706 case READ_6:
2707 case READ_10:
2708 case READ_12:
2709 case READ_16:
2710 case WRITE_6:
2711 case WRITE_10:
2712 case WRITE_12:
2713 case WRITE_16:
2714 case WRITE_VERIFY_10:
2715 case WRITE_VERIFY_12:
2716 case WRITE_VERIFY_16:
2717 /* MMC writing cannot be done via DMA helpers, because it sometimes
2718 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2719 * We might use scsi_disk_dma_reqops as long as no writing commands are
2720 * seen, but performance usually isn't paramount on optical media. So,
2721 * just make scsi-block operate the same as scsi-generic for them.
2723 if (s->qdev.type != TYPE_ROM) {
2724 return false;
2726 break;
2728 default:
2729 break;
2732 return true;
2736 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2738 SCSIBlockReq *r = (SCSIBlockReq *)req;
2739 r->cmd = req->cmd.buf[0];
2740 switch (r->cmd >> 5) {
2741 case 0:
2742 /* 6-byte CDB. */
2743 r->cdb1 = r->group_number = 0;
2744 break;
2745 case 1:
2746 /* 10-byte CDB. */
2747 r->cdb1 = req->cmd.buf[1];
2748 r->group_number = req->cmd.buf[6];
2749 break;
2750 case 4:
2751 /* 12-byte CDB. */
2752 r->cdb1 = req->cmd.buf[1];
2753 r->group_number = req->cmd.buf[10];
2754 break;
2755 case 5:
2756 /* 16-byte CDB. */
2757 r->cdb1 = req->cmd.buf[1];
2758 r->group_number = req->cmd.buf[14];
2759 break;
2760 default:
2761 abort();
2764 if (r->cdb1 & 0xe0) {
2765 /* Protection information is not supported. */
2766 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2767 return 0;
2770 r->req.status = &r->io_header.status;
2771 return scsi_disk_dma_command(req, buf);
2774 static const SCSIReqOps scsi_block_dma_reqops = {
2775 .size = sizeof(SCSIBlockReq),
2776 .free_req = scsi_free_request,
2777 .send_command = scsi_block_dma_command,
2778 .read_data = scsi_read_data,
2779 .write_data = scsi_write_data,
2780 .get_buf = scsi_get_buf,
2781 .load_request = scsi_disk_load_request,
2782 .save_request = scsi_disk_save_request,
2785 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2786 uint32_t lun, uint8_t *buf,
2787 void *hba_private)
2789 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2791 if (scsi_block_is_passthrough(s, buf)) {
2792 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2793 hba_private);
2794 } else {
2795 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2796 hba_private);
2800 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2801 uint8_t *buf, void *hba_private)
2803 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2805 if (scsi_block_is_passthrough(s, buf)) {
2806 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2807 } else {
2808 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2812 #endif
2814 static
2815 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2816 BlockCompletionFunc *cb, void *cb_opaque,
2817 void *opaque)
2819 SCSIDiskReq *r = opaque;
2820 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2821 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2824 static
2825 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2826 BlockCompletionFunc *cb, void *cb_opaque,
2827 void *opaque)
2829 SCSIDiskReq *r = opaque;
2830 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2831 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2834 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2836 DeviceClass *dc = DEVICE_CLASS(klass);
2837 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2839 dc->fw_name = "disk";
2840 dc->reset = scsi_disk_reset;
2841 sdc->dma_readv = scsi_dma_readv;
2842 sdc->dma_writev = scsi_dma_writev;
2843 sdc->need_fua_emulation = scsi_is_cmd_fua;
2846 static const TypeInfo scsi_disk_base_info = {
2847 .name = TYPE_SCSI_DISK_BASE,
2848 .parent = TYPE_SCSI_DEVICE,
2849 .class_init = scsi_disk_base_class_initfn,
2850 .instance_size = sizeof(SCSIDiskState),
2851 .class_size = sizeof(SCSIDiskClass),
2852 .abstract = true,
2855 #define DEFINE_SCSI_DISK_PROPERTIES() \
2856 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
2857 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2858 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2859 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2860 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2861 DEFINE_PROP_STRING("product", SCSIDiskState, product)
2863 static Property scsi_hd_properties[] = {
2864 DEFINE_SCSI_DISK_PROPERTIES(),
2865 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2866 SCSI_DISK_F_REMOVABLE, false),
2867 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2868 SCSI_DISK_F_DPOFUA, false),
2869 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2870 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2871 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2872 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2873 DEFAULT_MAX_UNMAP_SIZE),
2874 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2875 DEFAULT_MAX_IO_SIZE),
2876 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2877 DEFINE_PROP_END_OF_LIST(),
2880 static const VMStateDescription vmstate_scsi_disk_state = {
2881 .name = "scsi-disk",
2882 .version_id = 1,
2883 .minimum_version_id = 1,
2884 .fields = (VMStateField[]) {
2885 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2886 VMSTATE_BOOL(media_changed, SCSIDiskState),
2887 VMSTATE_BOOL(media_event, SCSIDiskState),
2888 VMSTATE_BOOL(eject_request, SCSIDiskState),
2889 VMSTATE_BOOL(tray_open, SCSIDiskState),
2890 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2891 VMSTATE_END_OF_LIST()
2895 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2897 DeviceClass *dc = DEVICE_CLASS(klass);
2898 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2900 sc->realize = scsi_hd_realize;
2901 sc->alloc_req = scsi_new_request;
2902 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2903 dc->desc = "virtual SCSI disk";
2904 dc->props = scsi_hd_properties;
2905 dc->vmsd = &vmstate_scsi_disk_state;
2908 static const TypeInfo scsi_hd_info = {
2909 .name = "scsi-hd",
2910 .parent = TYPE_SCSI_DISK_BASE,
2911 .class_init = scsi_hd_class_initfn,
2914 static Property scsi_cd_properties[] = {
2915 DEFINE_SCSI_DISK_PROPERTIES(),
2916 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2917 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2918 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2919 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2920 DEFAULT_MAX_IO_SIZE),
2921 DEFINE_PROP_END_OF_LIST(),
2924 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
2926 DeviceClass *dc = DEVICE_CLASS(klass);
2927 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2929 sc->realize = scsi_cd_realize;
2930 sc->alloc_req = scsi_new_request;
2931 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2932 dc->desc = "virtual SCSI CD-ROM";
2933 dc->props = scsi_cd_properties;
2934 dc->vmsd = &vmstate_scsi_disk_state;
2937 static const TypeInfo scsi_cd_info = {
2938 .name = "scsi-cd",
2939 .parent = TYPE_SCSI_DISK_BASE,
2940 .class_init = scsi_cd_class_initfn,
2943 #ifdef __linux__
2944 static Property scsi_block_properties[] = {
2945 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
2946 DEFINE_PROP_END_OF_LIST(),
2949 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
2951 DeviceClass *dc = DEVICE_CLASS(klass);
2952 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2953 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2955 sc->realize = scsi_block_realize;
2956 sc->alloc_req = scsi_block_new_request;
2957 sc->parse_cdb = scsi_block_parse_cdb;
2958 sdc->dma_readv = scsi_block_dma_readv;
2959 sdc->dma_writev = scsi_block_dma_writev;
2960 sdc->need_fua_emulation = scsi_block_no_fua;
2961 dc->desc = "SCSI block device passthrough";
2962 dc->props = scsi_block_properties;
2963 dc->vmsd = &vmstate_scsi_disk_state;
2966 static const TypeInfo scsi_block_info = {
2967 .name = "scsi-block",
2968 .parent = TYPE_SCSI_DISK_BASE,
2969 .class_init = scsi_block_class_initfn,
2971 #endif
2973 static Property scsi_disk_properties[] = {
2974 DEFINE_SCSI_DISK_PROPERTIES(),
2975 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2976 SCSI_DISK_F_REMOVABLE, false),
2977 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2978 SCSI_DISK_F_DPOFUA, false),
2979 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2980 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2981 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2982 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2983 DEFAULT_MAX_UNMAP_SIZE),
2984 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2985 DEFAULT_MAX_IO_SIZE),
2986 DEFINE_PROP_END_OF_LIST(),
2989 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
2991 DeviceClass *dc = DEVICE_CLASS(klass);
2992 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2994 sc->realize = scsi_disk_realize;
2995 sc->alloc_req = scsi_new_request;
2996 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2997 dc->fw_name = "disk";
2998 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
2999 dc->reset = scsi_disk_reset;
3000 dc->props = scsi_disk_properties;
3001 dc->vmsd = &vmstate_scsi_disk_state;
3004 static const TypeInfo scsi_disk_info = {
3005 .name = "scsi-disk",
3006 .parent = TYPE_SCSI_DISK_BASE,
3007 .class_init = scsi_disk_class_initfn,
3010 static void scsi_disk_register_types(void)
3012 type_register_static(&scsi_disk_base_info);
3013 type_register_static(&scsi_hd_info);
3014 type_register_static(&scsi_cd_info);
3015 #ifdef __linux__
3016 type_register_static(&scsi_block_info);
3017 #endif
3018 type_register_static(&scsi_disk_info);
3021 type_init(scsi_disk_register_types)