net: imx: limit buffer descriptor count
[qemu/ar7.git] / hw / scsi / scsi-disk.c
blobcc06fe5f6cf37c0c433f5874999dcd9fb79a0339
1 /*
2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 //#define DEBUG_SCSI
24 #ifdef DEBUG_SCSI
25 #define DPRINTF(fmt, ...) \
26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
27 #else
28 #define DPRINTF(fmt, ...) do {} while(0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "hw/scsi/scsi.h"
35 #include "block/scsi.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/blockdev.h"
39 #include "hw/block/block.h"
40 #include "sysemu/dma.h"
41 #include "qemu/cutils.h"
43 #ifdef __linux
44 #include <scsi/sg.h>
45 #endif
47 #define SCSI_WRITE_SAME_MAX 524288
48 #define SCSI_DMA_BUF_SIZE 131072
49 #define SCSI_MAX_INQUIRY_LEN 256
50 #define SCSI_MAX_MODE_LEN 256
52 #define DEFAULT_DISCARD_GRANULARITY 4096
53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */
54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
58 #define SCSI_DISK_BASE(obj) \
59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
60 #define SCSI_DISK_BASE_CLASS(klass) \
61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
62 #define SCSI_DISK_BASE_GET_CLASS(obj) \
63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
65 typedef struct SCSIDiskClass {
66 SCSIDeviceClass parent_class;
67 DMAIOFunc *dma_readv;
68 DMAIOFunc *dma_writev;
69 bool (*need_fua_emulation)(SCSICommand *cmd);
70 } SCSIDiskClass;
72 typedef struct SCSIDiskReq {
73 SCSIRequest req;
74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint64_t sector;
76 uint32_t sector_count;
77 uint32_t buflen;
78 bool started;
79 bool need_fua_emulation;
80 struct iovec iov;
81 QEMUIOVector qiov;
82 BlockAcctCookie acct;
83 unsigned char *status;
84 } SCSIDiskReq;
86 #define SCSI_DISK_F_REMOVABLE 0
87 #define SCSI_DISK_F_DPOFUA 1
88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
90 typedef struct SCSIDiskState
92 SCSIDevice qdev;
93 uint32_t features;
94 bool media_changed;
95 bool media_event;
96 bool eject_request;
97 uint16_t port_index;
98 uint64_t max_unmap_size;
99 uint64_t max_io_size;
100 QEMUBH *bh;
101 char *version;
102 char *serial;
103 char *vendor;
104 char *product;
105 bool tray_open;
106 bool tray_locked;
107 } SCSIDiskState;
109 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
111 static void scsi_free_request(SCSIRequest *req)
113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
115 qemu_vfree(r->iov.iov_base);
118 /* Helper function for command completion with sense. */
119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n",
122 r->req.tag, sense.key, sense.asc, sense.ascq);
123 scsi_req_build_sense(&r->req, sense);
124 scsi_req_complete(&r->req, CHECK_CONDITION);
127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
131 if (!r->iov.iov_base) {
132 r->buflen = size;
133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
136 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
143 qemu_put_be64s(f, &r->sector);
144 qemu_put_be32s(f, &r->sector_count);
145 qemu_put_be32s(f, &r->buflen);
146 if (r->buflen) {
147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
149 } else if (!req->retry) {
150 uint32_t len = r->iov.iov_len;
151 qemu_put_be32s(f, &len);
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
161 qemu_get_be64s(f, &r->sector);
162 qemu_get_be32s(f, &r->sector_count);
163 qemu_get_be32s(f, &r->buflen);
164 if (r->buflen) {
165 scsi_init_iovec(r, r->buflen);
166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
168 } else if (!r->req.retry) {
169 uint32_t len;
170 qemu_get_be32s(f, &len);
171 r->iov.iov_len = len;
172 assert(r->iov.iov_len <= r->buflen);
173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
177 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
182 if (r->req.io_canceled) {
183 scsi_req_cancel_complete(&r->req);
184 return true;
187 if (ret < 0) {
188 return scsi_handle_rw_error(r, -ret, acct_failed);
191 if (r->status && *r->status) {
192 if (acct_failed) {
193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
194 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
196 scsi_req_complete(&r->req, *r->status);
197 return true;
200 return false;
203 static void scsi_aio_complete(void *opaque, int ret)
205 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
208 assert(r->req.aiocb != NULL);
209 r->req.aiocb = NULL;
210 if (scsi_disk_req_check_error(r, ret, true)) {
211 goto done;
214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
215 scsi_req_complete(&r->req, GOOD);
217 done:
218 scsi_req_unref(&r->req);
221 static bool scsi_is_cmd_fua(SCSICommand *cmd)
223 switch (cmd->buf[0]) {
224 case READ_10:
225 case READ_12:
226 case READ_16:
227 case WRITE_10:
228 case WRITE_12:
229 case WRITE_16:
230 return (cmd->buf[1] & 8) != 0;
232 case VERIFY_10:
233 case VERIFY_12:
234 case VERIFY_16:
235 case WRITE_VERIFY_10:
236 case WRITE_VERIFY_12:
237 case WRITE_VERIFY_16:
238 return true;
240 case READ_6:
241 case WRITE_6:
242 default:
243 return false;
247 static void scsi_write_do_fua(SCSIDiskReq *r)
249 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
251 assert(r->req.aiocb == NULL);
252 assert(!r->req.io_canceled);
254 if (r->need_fua_emulation) {
255 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
256 BLOCK_ACCT_FLUSH);
257 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
258 return;
261 scsi_req_complete(&r->req, GOOD);
262 scsi_req_unref(&r->req);
265 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
267 assert(r->req.aiocb == NULL);
268 if (scsi_disk_req_check_error(r, ret, false)) {
269 goto done;
272 r->sector += r->sector_count;
273 r->sector_count = 0;
274 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
275 scsi_write_do_fua(r);
276 return;
277 } else {
278 scsi_req_complete(&r->req, GOOD);
281 done:
282 scsi_req_unref(&r->req);
285 static void scsi_dma_complete(void *opaque, int ret)
287 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
290 assert(r->req.aiocb != NULL);
291 r->req.aiocb = NULL;
293 if (ret < 0) {
294 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
295 } else {
296 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
298 scsi_dma_complete_noio(r, ret);
301 static void scsi_read_complete(void * opaque, int ret)
303 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
304 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
305 int n;
307 assert(r->req.aiocb != NULL);
308 r->req.aiocb = NULL;
309 if (scsi_disk_req_check_error(r, ret, true)) {
310 goto done;
313 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
314 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
316 n = r->qiov.size / 512;
317 r->sector += n;
318 r->sector_count -= n;
319 scsi_req_data(&r->req, r->qiov.size);
321 done:
322 scsi_req_unref(&r->req);
325 /* Actually issue a read to the block device. */
326 static void scsi_do_read(SCSIDiskReq *r, int ret)
328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
329 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
331 assert (r->req.aiocb == NULL);
332 if (scsi_disk_req_check_error(r, ret, false)) {
333 goto done;
336 /* The request is used as the AIO opaque value, so add a ref. */
337 scsi_req_ref(&r->req);
339 if (r->req.sg) {
340 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
341 r->req.resid -= r->req.sg->size;
342 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
343 r->req.sg, r->sector << BDRV_SECTOR_BITS,
344 BDRV_SECTOR_SIZE,
345 sdc->dma_readv, r, scsi_dma_complete, r,
346 DMA_DIRECTION_FROM_DEVICE);
347 } else {
348 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
349 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
350 r->qiov.size, BLOCK_ACCT_READ);
351 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
352 scsi_read_complete, r, r);
355 done:
356 scsi_req_unref(&r->req);
359 static void scsi_do_read_cb(void *opaque, int ret)
361 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
362 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
364 assert (r->req.aiocb != NULL);
365 r->req.aiocb = NULL;
367 if (ret < 0) {
368 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
369 } else {
370 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
372 scsi_do_read(opaque, ret);
375 /* Read more data from scsi device into buffer. */
376 static void scsi_read_data(SCSIRequest *req)
378 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
379 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
380 bool first;
382 DPRINTF("Read sector_count=%d\n", r->sector_count);
383 if (r->sector_count == 0) {
384 /* This also clears the sense buffer for REQUEST SENSE. */
385 scsi_req_complete(&r->req, GOOD);
386 return;
389 /* No data transfer may already be in progress */
390 assert(r->req.aiocb == NULL);
392 /* The request is used as the AIO opaque value, so add a ref. */
393 scsi_req_ref(&r->req);
394 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
395 DPRINTF("Data transfer direction invalid\n");
396 scsi_read_complete(r, -EINVAL);
397 return;
400 if (!blk_is_available(req->dev->conf.blk)) {
401 scsi_read_complete(r, -ENOMEDIUM);
402 return;
405 first = !r->started;
406 r->started = true;
407 if (first && r->need_fua_emulation) {
408 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
409 BLOCK_ACCT_FLUSH);
410 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
411 } else {
412 scsi_do_read(r, 0);
417 * scsi_handle_rw_error has two return values. 0 means that the error
418 * must be ignored, 1 means that the error has been processed and the
419 * caller should not do anything else for this request. Note that
420 * scsi_handle_rw_error always manages its reference counts, independent
421 * of the return value.
423 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
425 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
426 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
427 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
428 is_read, error);
430 if (action == BLOCK_ERROR_ACTION_REPORT) {
431 if (acct_failed) {
432 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
434 switch (error) {
435 case ENOMEDIUM:
436 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
437 break;
438 case ENOMEM:
439 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
440 break;
441 case EINVAL:
442 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
443 break;
444 case ENOSPC:
445 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
446 break;
447 default:
448 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
449 break;
452 blk_error_action(s->qdev.conf.blk, action, is_read, error);
453 if (action == BLOCK_ERROR_ACTION_STOP) {
454 scsi_req_retry(&r->req);
456 return action != BLOCK_ERROR_ACTION_IGNORE;
459 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
461 uint32_t n;
463 assert (r->req.aiocb == NULL);
464 if (scsi_disk_req_check_error(r, ret, false)) {
465 goto done;
468 n = r->qiov.size / 512;
469 r->sector += n;
470 r->sector_count -= n;
471 if (r->sector_count == 0) {
472 scsi_write_do_fua(r);
473 return;
474 } else {
475 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
476 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
477 scsi_req_data(&r->req, r->qiov.size);
480 done:
481 scsi_req_unref(&r->req);
484 static void scsi_write_complete(void * opaque, int ret)
486 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
487 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
489 assert (r->req.aiocb != NULL);
490 r->req.aiocb = NULL;
492 if (ret < 0) {
493 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
494 } else {
495 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
497 scsi_write_complete_noio(r, ret);
500 static void scsi_write_data(SCSIRequest *req)
502 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
503 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
504 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
506 /* No data transfer may already be in progress */
507 assert(r->req.aiocb == NULL);
509 /* The request is used as the AIO opaque value, so add a ref. */
510 scsi_req_ref(&r->req);
511 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
512 DPRINTF("Data transfer direction invalid\n");
513 scsi_write_complete_noio(r, -EINVAL);
514 return;
517 if (!r->req.sg && !r->qiov.size) {
518 /* Called for the first time. Ask the driver to send us more data. */
519 r->started = true;
520 scsi_write_complete_noio(r, 0);
521 return;
523 if (!blk_is_available(req->dev->conf.blk)) {
524 scsi_write_complete_noio(r, -ENOMEDIUM);
525 return;
528 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
529 r->req.cmd.buf[0] == VERIFY_16) {
530 if (r->req.sg) {
531 scsi_dma_complete_noio(r, 0);
532 } else {
533 scsi_write_complete_noio(r, 0);
535 return;
538 if (r->req.sg) {
539 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
540 r->req.resid -= r->req.sg->size;
541 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
542 r->req.sg, r->sector << BDRV_SECTOR_BITS,
543 BDRV_SECTOR_SIZE,
544 sdc->dma_writev, r, scsi_dma_complete, r,
545 DMA_DIRECTION_TO_DEVICE);
546 } else {
547 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
548 r->qiov.size, BLOCK_ACCT_WRITE);
549 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
550 scsi_write_complete, r, r);
554 /* Return a pointer to the data buffer. */
555 static uint8_t *scsi_get_buf(SCSIRequest *req)
557 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
559 return (uint8_t *)r->iov.iov_base;
562 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
564 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
565 int buflen = 0;
566 int start;
568 if (req->cmd.buf[1] & 0x1) {
569 /* Vital product data */
570 uint8_t page_code = req->cmd.buf[2];
572 outbuf[buflen++] = s->qdev.type & 0x1f;
573 outbuf[buflen++] = page_code ; // this page
574 outbuf[buflen++] = 0x00;
575 outbuf[buflen++] = 0x00;
576 start = buflen;
578 switch (page_code) {
579 case 0x00: /* Supported page codes, mandatory */
581 DPRINTF("Inquiry EVPD[Supported pages] "
582 "buffer size %zd\n", req->cmd.xfer);
583 outbuf[buflen++] = 0x00; // list of supported pages (this page)
584 if (s->serial) {
585 outbuf[buflen++] = 0x80; // unit serial number
587 outbuf[buflen++] = 0x83; // device identification
588 if (s->qdev.type == TYPE_DISK) {
589 outbuf[buflen++] = 0xb0; // block limits
590 outbuf[buflen++] = 0xb2; // thin provisioning
592 break;
594 case 0x80: /* Device serial number, optional */
596 int l;
598 if (!s->serial) {
599 DPRINTF("Inquiry (EVPD[Serial number] not supported\n");
600 return -1;
603 l = strlen(s->serial);
604 if (l > 36) {
605 l = 36;
608 DPRINTF("Inquiry EVPD[Serial number] "
609 "buffer size %zd\n", req->cmd.xfer);
610 memcpy(outbuf+buflen, s->serial, l);
611 buflen += l;
612 break;
615 case 0x83: /* Device identification page, mandatory */
617 const char *str = s->serial ?: blk_name(s->qdev.conf.blk);
618 int max_len = s->serial ? 20 : 255 - 8;
619 int id_len = strlen(str);
621 if (id_len > max_len) {
622 id_len = max_len;
624 DPRINTF("Inquiry EVPD[Device identification] "
625 "buffer size %zd\n", req->cmd.xfer);
627 outbuf[buflen++] = 0x2; // ASCII
628 outbuf[buflen++] = 0; // not officially assigned
629 outbuf[buflen++] = 0; // reserved
630 outbuf[buflen++] = id_len; // length of data following
631 memcpy(outbuf+buflen, str, id_len);
632 buflen += id_len;
634 if (s->qdev.wwn) {
635 outbuf[buflen++] = 0x1; // Binary
636 outbuf[buflen++] = 0x3; // NAA
637 outbuf[buflen++] = 0; // reserved
638 outbuf[buflen++] = 8;
639 stq_be_p(&outbuf[buflen], s->qdev.wwn);
640 buflen += 8;
643 if (s->qdev.port_wwn) {
644 outbuf[buflen++] = 0x61; // SAS / Binary
645 outbuf[buflen++] = 0x93; // PIV / Target port / NAA
646 outbuf[buflen++] = 0; // reserved
647 outbuf[buflen++] = 8;
648 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
649 buflen += 8;
652 if (s->port_index) {
653 outbuf[buflen++] = 0x61; // SAS / Binary
654 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port
655 outbuf[buflen++] = 0; // reserved
656 outbuf[buflen++] = 4;
657 stw_be_p(&outbuf[buflen + 2], s->port_index);
658 buflen += 4;
660 break;
662 case 0xb0: /* block limits */
664 unsigned int unmap_sectors =
665 s->qdev.conf.discard_granularity / s->qdev.blocksize;
666 unsigned int min_io_size =
667 s->qdev.conf.min_io_size / s->qdev.blocksize;
668 unsigned int opt_io_size =
669 s->qdev.conf.opt_io_size / s->qdev.blocksize;
670 unsigned int max_unmap_sectors =
671 s->max_unmap_size / s->qdev.blocksize;
672 unsigned int max_io_sectors =
673 s->max_io_size / s->qdev.blocksize;
675 if (s->qdev.type == TYPE_ROM) {
676 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
677 page_code);
678 return -1;
680 /* required VPD size with unmap support */
681 buflen = 0x40;
682 memset(outbuf + 4, 0, buflen - 4);
684 outbuf[4] = 0x1; /* wsnz */
686 /* optimal transfer length granularity */
687 outbuf[6] = (min_io_size >> 8) & 0xff;
688 outbuf[7] = min_io_size & 0xff;
690 /* maximum transfer length */
691 outbuf[8] = (max_io_sectors >> 24) & 0xff;
692 outbuf[9] = (max_io_sectors >> 16) & 0xff;
693 outbuf[10] = (max_io_sectors >> 8) & 0xff;
694 outbuf[11] = max_io_sectors & 0xff;
696 /* optimal transfer length */
697 outbuf[12] = (opt_io_size >> 24) & 0xff;
698 outbuf[13] = (opt_io_size >> 16) & 0xff;
699 outbuf[14] = (opt_io_size >> 8) & 0xff;
700 outbuf[15] = opt_io_size & 0xff;
702 /* max unmap LBA count, default is 1GB */
703 outbuf[20] = (max_unmap_sectors >> 24) & 0xff;
704 outbuf[21] = (max_unmap_sectors >> 16) & 0xff;
705 outbuf[22] = (max_unmap_sectors >> 8) & 0xff;
706 outbuf[23] = max_unmap_sectors & 0xff;
708 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */
709 outbuf[24] = 0;
710 outbuf[25] = 0;
711 outbuf[26] = 0;
712 outbuf[27] = 255;
714 /* optimal unmap granularity */
715 outbuf[28] = (unmap_sectors >> 24) & 0xff;
716 outbuf[29] = (unmap_sectors >> 16) & 0xff;
717 outbuf[30] = (unmap_sectors >> 8) & 0xff;
718 outbuf[31] = unmap_sectors & 0xff;
720 /* max write same size */
721 outbuf[36] = 0;
722 outbuf[37] = 0;
723 outbuf[38] = 0;
724 outbuf[39] = 0;
726 outbuf[40] = (max_io_sectors >> 24) & 0xff;
727 outbuf[41] = (max_io_sectors >> 16) & 0xff;
728 outbuf[42] = (max_io_sectors >> 8) & 0xff;
729 outbuf[43] = max_io_sectors & 0xff;
730 break;
732 case 0xb2: /* thin provisioning */
734 buflen = 8;
735 outbuf[4] = 0;
736 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
737 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
738 outbuf[7] = 0;
739 break;
741 default:
742 return -1;
744 /* done with EVPD */
745 assert(buflen - start <= 255);
746 outbuf[start - 1] = buflen - start;
747 return buflen;
750 /* Standard INQUIRY data */
751 if (req->cmd.buf[2] != 0) {
752 return -1;
755 /* PAGE CODE == 0 */
756 buflen = req->cmd.xfer;
757 if (buflen > SCSI_MAX_INQUIRY_LEN) {
758 buflen = SCSI_MAX_INQUIRY_LEN;
761 outbuf[0] = s->qdev.type & 0x1f;
762 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
764 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
765 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
767 memset(&outbuf[32], 0, 4);
768 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
770 * We claim conformance to SPC-3, which is required for guests
771 * to ask for modern features like READ CAPACITY(16) or the
772 * block characteristics VPD page by default. Not all of SPC-3
773 * is actually implemented, but we're good enough.
775 outbuf[2] = 5;
776 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
778 if (buflen > 36) {
779 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
780 } else {
781 /* If the allocation length of CDB is too small,
782 the additional length is not adjusted */
783 outbuf[4] = 36 - 5;
786 /* Sync data transfer and TCQ. */
787 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
788 return buflen;
791 static inline bool media_is_dvd(SCSIDiskState *s)
793 uint64_t nb_sectors;
794 if (s->qdev.type != TYPE_ROM) {
795 return false;
797 if (!blk_is_available(s->qdev.conf.blk)) {
798 return false;
800 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
801 return nb_sectors > CD_MAX_SECTORS;
804 static inline bool media_is_cd(SCSIDiskState *s)
806 uint64_t nb_sectors;
807 if (s->qdev.type != TYPE_ROM) {
808 return false;
810 if (!blk_is_available(s->qdev.conf.blk)) {
811 return false;
813 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
814 return nb_sectors <= CD_MAX_SECTORS;
817 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
818 uint8_t *outbuf)
820 uint8_t type = r->req.cmd.buf[1] & 7;
822 if (s->qdev.type != TYPE_ROM) {
823 return -1;
826 /* Types 1/2 are only defined for Blu-Ray. */
827 if (type != 0) {
828 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
829 return -1;
832 memset(outbuf, 0, 34);
833 outbuf[1] = 32;
834 outbuf[2] = 0xe; /* last session complete, disc finalized */
835 outbuf[3] = 1; /* first track on disc */
836 outbuf[4] = 1; /* # of sessions */
837 outbuf[5] = 1; /* first track of last session */
838 outbuf[6] = 1; /* last track of last session */
839 outbuf[7] = 0x20; /* unrestricted use */
840 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
841 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
842 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
843 /* 24-31: disc bar code */
844 /* 32: disc application code */
845 /* 33: number of OPC tables */
847 return 34;
850 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
851 uint8_t *outbuf)
853 static const int rds_caps_size[5] = {
854 [0] = 2048 + 4,
855 [1] = 4 + 4,
856 [3] = 188 + 4,
857 [4] = 2048 + 4,
860 uint8_t media = r->req.cmd.buf[1];
861 uint8_t layer = r->req.cmd.buf[6];
862 uint8_t format = r->req.cmd.buf[7];
863 int size = -1;
865 if (s->qdev.type != TYPE_ROM) {
866 return -1;
868 if (media != 0) {
869 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
870 return -1;
873 if (format != 0xff) {
874 if (!blk_is_available(s->qdev.conf.blk)) {
875 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
876 return -1;
878 if (media_is_cd(s)) {
879 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
880 return -1;
882 if (format >= ARRAY_SIZE(rds_caps_size)) {
883 return -1;
885 size = rds_caps_size[format];
886 memset(outbuf, 0, size);
889 switch (format) {
890 case 0x00: {
891 /* Physical format information */
892 uint64_t nb_sectors;
893 if (layer != 0) {
894 goto fail;
896 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
898 outbuf[4] = 1; /* DVD-ROM, part version 1 */
899 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
900 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
901 outbuf[7] = 0; /* default densities */
903 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
904 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
905 break;
908 case 0x01: /* DVD copyright information, all zeros */
909 break;
911 case 0x03: /* BCA information - invalid field for no BCA info */
912 return -1;
914 case 0x04: /* DVD disc manufacturing information, all zeros */
915 break;
917 case 0xff: { /* List capabilities */
918 int i;
919 size = 4;
920 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
921 if (!rds_caps_size[i]) {
922 continue;
924 outbuf[size] = i;
925 outbuf[size + 1] = 0x40; /* Not writable, readable */
926 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
927 size += 4;
929 break;
932 default:
933 return -1;
936 /* Size of buffer, not including 2 byte size field */
937 stw_be_p(outbuf, size - 2);
938 return size;
940 fail:
941 return -1;
944 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
946 uint8_t event_code, media_status;
948 media_status = 0;
949 if (s->tray_open) {
950 media_status = MS_TRAY_OPEN;
951 } else if (blk_is_inserted(s->qdev.conf.blk)) {
952 media_status = MS_MEDIA_PRESENT;
955 /* Event notification descriptor */
956 event_code = MEC_NO_CHANGE;
957 if (media_status != MS_TRAY_OPEN) {
958 if (s->media_event) {
959 event_code = MEC_NEW_MEDIA;
960 s->media_event = false;
961 } else if (s->eject_request) {
962 event_code = MEC_EJECT_REQUESTED;
963 s->eject_request = false;
967 outbuf[0] = event_code;
968 outbuf[1] = media_status;
970 /* These fields are reserved, just clear them. */
971 outbuf[2] = 0;
972 outbuf[3] = 0;
973 return 4;
976 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
977 uint8_t *outbuf)
979 int size;
980 uint8_t *buf = r->req.cmd.buf;
981 uint8_t notification_class_request = buf[4];
982 if (s->qdev.type != TYPE_ROM) {
983 return -1;
985 if ((buf[1] & 1) == 0) {
986 /* asynchronous */
987 return -1;
990 size = 4;
991 outbuf[0] = outbuf[1] = 0;
992 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
993 if (notification_class_request & (1 << GESN_MEDIA)) {
994 outbuf[2] = GESN_MEDIA;
995 size += scsi_event_status_media(s, &outbuf[size]);
996 } else {
997 outbuf[2] = 0x80;
999 stw_be_p(outbuf, size - 4);
1000 return size;
1003 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1005 int current;
1007 if (s->qdev.type != TYPE_ROM) {
1008 return -1;
1011 if (media_is_dvd(s)) {
1012 current = MMC_PROFILE_DVD_ROM;
1013 } else if (media_is_cd(s)) {
1014 current = MMC_PROFILE_CD_ROM;
1015 } else {
1016 current = MMC_PROFILE_NONE;
1019 memset(outbuf, 0, 40);
1020 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1021 stw_be_p(&outbuf[6], current);
1022 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1023 outbuf[10] = 0x03; /* persistent, current */
1024 outbuf[11] = 8; /* two profiles */
1025 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1026 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1027 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1028 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1029 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1030 stw_be_p(&outbuf[20], 1);
1031 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1032 outbuf[23] = 8;
1033 stl_be_p(&outbuf[24], 1); /* SCSI */
1034 outbuf[28] = 1; /* DBE = 1, mandatory */
1035 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1036 stw_be_p(&outbuf[32], 3);
1037 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1038 outbuf[35] = 4;
1039 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1040 /* TODO: Random readable, CD read, DVD read, drive serial number,
1041 power management */
1042 return 40;
1045 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1047 if (s->qdev.type != TYPE_ROM) {
1048 return -1;
1050 memset(outbuf, 0, 8);
1051 outbuf[5] = 1; /* CD-ROM */
1052 return 8;
1055 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1056 int page_control)
1058 static const int mode_sense_valid[0x3f] = {
1059 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1060 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1061 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1062 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1063 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1064 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1067 uint8_t *p = *p_outbuf + 2;
1068 int length;
1070 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1071 return -1;
1075 * If Changeable Values are requested, a mask denoting those mode parameters
1076 * that are changeable shall be returned. As we currently don't support
1077 * parameter changes via MODE_SELECT all bits are returned set to zero.
1078 * The buffer was already menset to zero by the caller of this function.
1080 * The offsets here are off by two compared to the descriptions in the
1081 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1082 * but it is done so that offsets are consistent within our implementation
1083 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1084 * 2-byte and 4-byte headers.
1086 switch (page) {
1087 case MODE_PAGE_HD_GEOMETRY:
1088 length = 0x16;
1089 if (page_control == 1) { /* Changeable Values */
1090 break;
1092 /* if a geometry hint is available, use it */
1093 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1094 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1095 p[2] = s->qdev.conf.cyls & 0xff;
1096 p[3] = s->qdev.conf.heads & 0xff;
1097 /* Write precomp start cylinder, disabled */
1098 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1099 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1100 p[6] = s->qdev.conf.cyls & 0xff;
1101 /* Reduced current start cylinder, disabled */
1102 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1103 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1104 p[9] = s->qdev.conf.cyls & 0xff;
1105 /* Device step rate [ns], 200ns */
1106 p[10] = 0;
1107 p[11] = 200;
1108 /* Landing zone cylinder */
1109 p[12] = 0xff;
1110 p[13] = 0xff;
1111 p[14] = 0xff;
1112 /* Medium rotation rate [rpm], 5400 rpm */
1113 p[18] = (5400 >> 8) & 0xff;
1114 p[19] = 5400 & 0xff;
1115 break;
1117 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1118 length = 0x1e;
1119 if (page_control == 1) { /* Changeable Values */
1120 break;
1122 /* Transfer rate [kbit/s], 5Mbit/s */
1123 p[0] = 5000 >> 8;
1124 p[1] = 5000 & 0xff;
1125 /* if a geometry hint is available, use it */
1126 p[2] = s->qdev.conf.heads & 0xff;
1127 p[3] = s->qdev.conf.secs & 0xff;
1128 p[4] = s->qdev.blocksize >> 8;
1129 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1130 p[7] = s->qdev.conf.cyls & 0xff;
1131 /* Write precomp start cylinder, disabled */
1132 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1133 p[9] = s->qdev.conf.cyls & 0xff;
1134 /* Reduced current start cylinder, disabled */
1135 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1136 p[11] = s->qdev.conf.cyls & 0xff;
1137 /* Device step rate [100us], 100us */
1138 p[12] = 0;
1139 p[13] = 1;
1140 /* Device step pulse width [us], 1us */
1141 p[14] = 1;
1142 /* Device head settle delay [100us], 100us */
1143 p[15] = 0;
1144 p[16] = 1;
1145 /* Motor on delay [0.1s], 0.1s */
1146 p[17] = 1;
1147 /* Motor off delay [0.1s], 0.1s */
1148 p[18] = 1;
1149 /* Medium rotation rate [rpm], 5400 rpm */
1150 p[26] = (5400 >> 8) & 0xff;
1151 p[27] = 5400 & 0xff;
1152 break;
1154 case MODE_PAGE_CACHING:
1155 length = 0x12;
1156 if (page_control == 1 || /* Changeable Values */
1157 blk_enable_write_cache(s->qdev.conf.blk)) {
1158 p[0] = 4; /* WCE */
1160 break;
1162 case MODE_PAGE_R_W_ERROR:
1163 length = 10;
1164 if (page_control == 1) { /* Changeable Values */
1165 break;
1167 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1168 if (s->qdev.type == TYPE_ROM) {
1169 p[1] = 0x20; /* Read Retry Count */
1171 break;
1173 case MODE_PAGE_AUDIO_CTL:
1174 length = 14;
1175 break;
1177 case MODE_PAGE_CAPABILITIES:
1178 length = 0x14;
1179 if (page_control == 1) { /* Changeable Values */
1180 break;
1183 p[0] = 0x3b; /* CD-R & CD-RW read */
1184 p[1] = 0; /* Writing not supported */
1185 p[2] = 0x7f; /* Audio, composite, digital out,
1186 mode 2 form 1&2, multi session */
1187 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1188 RW corrected, C2 errors, ISRC,
1189 UPC, Bar code */
1190 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1191 /* Locking supported, jumper present, eject, tray */
1192 p[5] = 0; /* no volume & mute control, no
1193 changer */
1194 p[6] = (50 * 176) >> 8; /* 50x read speed */
1195 p[7] = (50 * 176) & 0xff;
1196 p[8] = 2 >> 8; /* Two volume levels */
1197 p[9] = 2 & 0xff;
1198 p[10] = 2048 >> 8; /* 2M buffer */
1199 p[11] = 2048 & 0xff;
1200 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1201 p[13] = (16 * 176) & 0xff;
1202 p[16] = (16 * 176) >> 8; /* 16x write speed */
1203 p[17] = (16 * 176) & 0xff;
1204 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1205 p[19] = (16 * 176) & 0xff;
1206 break;
1208 default:
1209 return -1;
1212 assert(length < 256);
1213 (*p_outbuf)[0] = page;
1214 (*p_outbuf)[1] = length;
1215 *p_outbuf += length + 2;
1216 return length + 2;
1219 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1221 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1222 uint64_t nb_sectors;
1223 bool dbd;
1224 int page, buflen, ret, page_control;
1225 uint8_t *p;
1226 uint8_t dev_specific_param;
1228 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1229 page = r->req.cmd.buf[2] & 0x3f;
1230 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1231 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
1232 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
1233 memset(outbuf, 0, r->req.cmd.xfer);
1234 p = outbuf;
1236 if (s->qdev.type == TYPE_DISK) {
1237 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1238 if (blk_is_read_only(s->qdev.conf.blk)) {
1239 dev_specific_param |= 0x80; /* Readonly. */
1241 } else {
1242 /* MMC prescribes that CD/DVD drives have no block descriptors,
1243 * and defines no device-specific parameter. */
1244 dev_specific_param = 0x00;
1245 dbd = true;
1248 if (r->req.cmd.buf[0] == MODE_SENSE) {
1249 p[1] = 0; /* Default media type. */
1250 p[2] = dev_specific_param;
1251 p[3] = 0; /* Block descriptor length. */
1252 p += 4;
1253 } else { /* MODE_SENSE_10 */
1254 p[2] = 0; /* Default media type. */
1255 p[3] = dev_specific_param;
1256 p[6] = p[7] = 0; /* Block descriptor length. */
1257 p += 8;
1260 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1261 if (!dbd && nb_sectors) {
1262 if (r->req.cmd.buf[0] == MODE_SENSE) {
1263 outbuf[3] = 8; /* Block descriptor length */
1264 } else { /* MODE_SENSE_10 */
1265 outbuf[7] = 8; /* Block descriptor length */
1267 nb_sectors /= (s->qdev.blocksize / 512);
1268 if (nb_sectors > 0xffffff) {
1269 nb_sectors = 0;
1271 p[0] = 0; /* media density code */
1272 p[1] = (nb_sectors >> 16) & 0xff;
1273 p[2] = (nb_sectors >> 8) & 0xff;
1274 p[3] = nb_sectors & 0xff;
1275 p[4] = 0; /* reserved */
1276 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1277 p[6] = s->qdev.blocksize >> 8;
1278 p[7] = 0;
1279 p += 8;
1282 if (page_control == 3) {
1283 /* Saved Values */
1284 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1285 return -1;
1288 if (page == 0x3f) {
1289 for (page = 0; page <= 0x3e; page++) {
1290 mode_sense_page(s, page, &p, page_control);
1292 } else {
1293 ret = mode_sense_page(s, page, &p, page_control);
1294 if (ret == -1) {
1295 return -1;
1299 buflen = p - outbuf;
1301 * The mode data length field specifies the length in bytes of the
1302 * following data that is available to be transferred. The mode data
1303 * length does not include itself.
1305 if (r->req.cmd.buf[0] == MODE_SENSE) {
1306 outbuf[0] = buflen - 1;
1307 } else { /* MODE_SENSE_10 */
1308 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1309 outbuf[1] = (buflen - 2) & 0xff;
1311 return buflen;
1314 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1316 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1317 int start_track, format, msf, toclen;
1318 uint64_t nb_sectors;
1320 msf = req->cmd.buf[1] & 2;
1321 format = req->cmd.buf[2] & 0xf;
1322 start_track = req->cmd.buf[6];
1323 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1324 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1);
1325 nb_sectors /= s->qdev.blocksize / 512;
1326 switch (format) {
1327 case 0:
1328 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1329 break;
1330 case 1:
1331 /* multi session : only a single session defined */
1332 toclen = 12;
1333 memset(outbuf, 0, 12);
1334 outbuf[1] = 0x0a;
1335 outbuf[2] = 0x01;
1336 outbuf[3] = 0x01;
1337 break;
1338 case 2:
1339 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1340 break;
1341 default:
1342 return -1;
1344 return toclen;
1347 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1349 SCSIRequest *req = &r->req;
1350 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1351 bool start = req->cmd.buf[4] & 1;
1352 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1353 int pwrcnd = req->cmd.buf[4] & 0xf0;
1355 if (pwrcnd) {
1356 /* eject/load only happens for power condition == 0 */
1357 return 0;
1360 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1361 if (!start && !s->tray_open && s->tray_locked) {
1362 scsi_check_condition(r,
1363 blk_is_inserted(s->qdev.conf.blk)
1364 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1365 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1366 return -1;
1369 if (s->tray_open != !start) {
1370 blk_eject(s->qdev.conf.blk, !start);
1371 s->tray_open = !start;
1374 return 0;
1377 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1379 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1380 int buflen = r->iov.iov_len;
1382 if (buflen) {
1383 DPRINTF("Read buf_len=%d\n", buflen);
1384 r->iov.iov_len = 0;
1385 r->started = true;
1386 scsi_req_data(&r->req, buflen);
1387 return;
1390 /* This also clears the sense buffer for REQUEST SENSE. */
1391 scsi_req_complete(&r->req, GOOD);
1394 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1395 uint8_t *inbuf, int inlen)
1397 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1398 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1399 uint8_t *p;
1400 int len, expected_len, changeable_len, i;
1402 /* The input buffer does not include the page header, so it is
1403 * off by 2 bytes.
1405 expected_len = inlen + 2;
1406 if (expected_len > SCSI_MAX_MODE_LEN) {
1407 return -1;
1410 p = mode_current;
1411 memset(mode_current, 0, inlen + 2);
1412 len = mode_sense_page(s, page, &p, 0);
1413 if (len < 0 || len != expected_len) {
1414 return -1;
1417 p = mode_changeable;
1418 memset(mode_changeable, 0, inlen + 2);
1419 changeable_len = mode_sense_page(s, page, &p, 1);
1420 assert(changeable_len == len);
1422 /* Check that unchangeable bits are the same as what MODE SENSE
1423 * would return.
1425 for (i = 2; i < len; i++) {
1426 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1427 return -1;
1430 return 0;
1433 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1435 switch (page) {
1436 case MODE_PAGE_CACHING:
1437 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1438 break;
1440 default:
1441 break;
1445 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1447 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1449 while (len > 0) {
1450 int page, subpage, page_len;
1452 /* Parse both possible formats for the mode page headers. */
1453 page = p[0] & 0x3f;
1454 if (p[0] & 0x40) {
1455 if (len < 4) {
1456 goto invalid_param_len;
1458 subpage = p[1];
1459 page_len = lduw_be_p(&p[2]);
1460 p += 4;
1461 len -= 4;
1462 } else {
1463 if (len < 2) {
1464 goto invalid_param_len;
1466 subpage = 0;
1467 page_len = p[1];
1468 p += 2;
1469 len -= 2;
1472 if (subpage) {
1473 goto invalid_param;
1475 if (page_len > len) {
1476 goto invalid_param_len;
1479 if (!change) {
1480 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1481 goto invalid_param;
1483 } else {
1484 scsi_disk_apply_mode_select(s, page, p);
1487 p += page_len;
1488 len -= page_len;
1490 return 0;
1492 invalid_param:
1493 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1494 return -1;
1496 invalid_param_len:
1497 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1498 return -1;
1501 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1503 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1504 uint8_t *p = inbuf;
1505 int cmd = r->req.cmd.buf[0];
1506 int len = r->req.cmd.xfer;
1507 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1508 int bd_len;
1509 int pass;
1511 /* We only support PF=1, SP=0. */
1512 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1513 goto invalid_field;
1516 if (len < hdr_len) {
1517 goto invalid_param_len;
1520 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1521 len -= hdr_len;
1522 p += hdr_len;
1523 if (len < bd_len) {
1524 goto invalid_param_len;
1526 if (bd_len != 0 && bd_len != 8) {
1527 goto invalid_param;
1530 len -= bd_len;
1531 p += bd_len;
1533 /* Ensure no change is made if there is an error! */
1534 for (pass = 0; pass < 2; pass++) {
1535 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1536 assert(pass == 0);
1537 return;
1540 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1541 /* The request is used as the AIO opaque value, so add a ref. */
1542 scsi_req_ref(&r->req);
1543 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1544 BLOCK_ACCT_FLUSH);
1545 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1546 return;
1549 scsi_req_complete(&r->req, GOOD);
1550 return;
1552 invalid_param:
1553 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1554 return;
1556 invalid_param_len:
1557 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1558 return;
1560 invalid_field:
1561 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1564 static inline bool check_lba_range(SCSIDiskState *s,
1565 uint64_t sector_num, uint32_t nb_sectors)
1568 * The first line tests that no overflow happens when computing the last
1569 * sector. The second line tests that the last accessed sector is in
1570 * range.
1572 * Careful, the computations should not underflow for nb_sectors == 0,
1573 * and a 0-block read to the first LBA beyond the end of device is
1574 * valid.
1576 return (sector_num <= sector_num + nb_sectors &&
1577 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1580 typedef struct UnmapCBData {
1581 SCSIDiskReq *r;
1582 uint8_t *inbuf;
1583 int count;
1584 } UnmapCBData;
1586 static void scsi_unmap_complete(void *opaque, int ret);
1588 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1590 SCSIDiskReq *r = data->r;
1591 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1592 uint64_t sector_num;
1593 uint32_t nb_sectors;
1595 assert(r->req.aiocb == NULL);
1596 if (scsi_disk_req_check_error(r, ret, false)) {
1597 goto done;
1600 if (data->count > 0) {
1601 sector_num = ldq_be_p(&data->inbuf[0]);
1602 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1603 if (!check_lba_range(s, sector_num, nb_sectors)) {
1604 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1605 goto done;
1608 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1609 sector_num * s->qdev.blocksize,
1610 nb_sectors * s->qdev.blocksize,
1611 scsi_unmap_complete, data);
1612 data->count--;
1613 data->inbuf += 16;
1614 return;
1617 scsi_req_complete(&r->req, GOOD);
1619 done:
1620 scsi_req_unref(&r->req);
1621 g_free(data);
1624 static void scsi_unmap_complete(void *opaque, int ret)
1626 UnmapCBData *data = opaque;
1627 SCSIDiskReq *r = data->r;
1629 assert(r->req.aiocb != NULL);
1630 r->req.aiocb = NULL;
1632 scsi_unmap_complete_noio(data, ret);
1635 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1637 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1638 uint8_t *p = inbuf;
1639 int len = r->req.cmd.xfer;
1640 UnmapCBData *data;
1642 /* Reject ANCHOR=1. */
1643 if (r->req.cmd.buf[1] & 0x1) {
1644 goto invalid_field;
1647 if (len < 8) {
1648 goto invalid_param_len;
1650 if (len < lduw_be_p(&p[0]) + 2) {
1651 goto invalid_param_len;
1653 if (len < lduw_be_p(&p[2]) + 8) {
1654 goto invalid_param_len;
1656 if (lduw_be_p(&p[2]) & 15) {
1657 goto invalid_param_len;
1660 if (blk_is_read_only(s->qdev.conf.blk)) {
1661 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1662 return;
1665 data = g_new0(UnmapCBData, 1);
1666 data->r = r;
1667 data->inbuf = &p[8];
1668 data->count = lduw_be_p(&p[2]) >> 4;
1670 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1671 scsi_req_ref(&r->req);
1672 scsi_unmap_complete_noio(data, 0);
1673 return;
1675 invalid_param_len:
1676 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1677 return;
1679 invalid_field:
1680 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1683 typedef struct WriteSameCBData {
1684 SCSIDiskReq *r;
1685 int64_t sector;
1686 int nb_sectors;
1687 QEMUIOVector qiov;
1688 struct iovec iov;
1689 } WriteSameCBData;
1691 static void scsi_write_same_complete(void *opaque, int ret)
1693 WriteSameCBData *data = opaque;
1694 SCSIDiskReq *r = data->r;
1695 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1697 assert(r->req.aiocb != NULL);
1698 r->req.aiocb = NULL;
1699 if (scsi_disk_req_check_error(r, ret, true)) {
1700 goto done;
1703 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1705 data->nb_sectors -= data->iov.iov_len / 512;
1706 data->sector += data->iov.iov_len / 512;
1707 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1708 if (data->iov.iov_len) {
1709 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1710 data->iov.iov_len, BLOCK_ACCT_WRITE);
1711 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1712 * where final qiov may need smaller size */
1713 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1714 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1715 data->sector << BDRV_SECTOR_BITS,
1716 &data->qiov, 0,
1717 scsi_write_same_complete, data);
1718 return;
1721 scsi_req_complete(&r->req, GOOD);
1723 done:
1724 scsi_req_unref(&r->req);
1725 qemu_vfree(data->iov.iov_base);
1726 g_free(data);
1729 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1731 SCSIRequest *req = &r->req;
1732 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1733 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1734 WriteSameCBData *data;
1735 uint8_t *buf;
1736 int i;
1738 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1739 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1740 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1741 return;
1744 if (blk_is_read_only(s->qdev.conf.blk)) {
1745 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1746 return;
1748 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1749 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1750 return;
1753 if (buffer_is_zero(inbuf, s->qdev.blocksize)) {
1754 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1756 /* The request is used as the AIO opaque value, so add a ref. */
1757 scsi_req_ref(&r->req);
1758 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1759 nb_sectors * s->qdev.blocksize,
1760 BLOCK_ACCT_WRITE);
1761 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1762 r->req.cmd.lba * s->qdev.blocksize,
1763 nb_sectors * s->qdev.blocksize,
1764 flags, scsi_aio_complete, r);
1765 return;
1768 data = g_new0(WriteSameCBData, 1);
1769 data->r = r;
1770 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1771 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1772 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1773 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1774 data->iov.iov_len);
1775 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1777 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1778 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1781 scsi_req_ref(&r->req);
1782 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1783 data->iov.iov_len, BLOCK_ACCT_WRITE);
1784 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1785 data->sector << BDRV_SECTOR_BITS,
1786 &data->qiov, 0,
1787 scsi_write_same_complete, data);
1790 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1792 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1794 if (r->iov.iov_len) {
1795 int buflen = r->iov.iov_len;
1796 DPRINTF("Write buf_len=%d\n", buflen);
1797 r->iov.iov_len = 0;
1798 scsi_req_data(&r->req, buflen);
1799 return;
1802 switch (req->cmd.buf[0]) {
1803 case MODE_SELECT:
1804 case MODE_SELECT_10:
1805 /* This also clears the sense buffer for REQUEST SENSE. */
1806 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1807 break;
1809 case UNMAP:
1810 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1811 break;
1813 case VERIFY_10:
1814 case VERIFY_12:
1815 case VERIFY_16:
1816 if (r->req.status == -1) {
1817 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1819 break;
1821 case WRITE_SAME_10:
1822 case WRITE_SAME_16:
1823 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1824 break;
1826 default:
1827 abort();
1831 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1833 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1834 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1835 uint64_t nb_sectors;
1836 uint8_t *outbuf;
1837 int buflen;
1839 switch (req->cmd.buf[0]) {
1840 case INQUIRY:
1841 case MODE_SENSE:
1842 case MODE_SENSE_10:
1843 case RESERVE:
1844 case RESERVE_10:
1845 case RELEASE:
1846 case RELEASE_10:
1847 case START_STOP:
1848 case ALLOW_MEDIUM_REMOVAL:
1849 case GET_CONFIGURATION:
1850 case GET_EVENT_STATUS_NOTIFICATION:
1851 case MECHANISM_STATUS:
1852 case REQUEST_SENSE:
1853 break;
1855 default:
1856 if (!blk_is_available(s->qdev.conf.blk)) {
1857 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1858 return 0;
1860 break;
1864 * FIXME: we shouldn't return anything bigger than 4k, but the code
1865 * requires the buffer to be as big as req->cmd.xfer in several
1866 * places. So, do not allow CDBs with a very large ALLOCATION
1867 * LENGTH. The real fix would be to modify scsi_read_data and
1868 * dma_buf_read, so that they return data beyond the buflen
1869 * as all zeros.
1871 if (req->cmd.xfer > 65536) {
1872 goto illegal_request;
1874 r->buflen = MAX(4096, req->cmd.xfer);
1876 if (!r->iov.iov_base) {
1877 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1880 buflen = req->cmd.xfer;
1881 outbuf = r->iov.iov_base;
1882 memset(outbuf, 0, r->buflen);
1883 switch (req->cmd.buf[0]) {
1884 case TEST_UNIT_READY:
1885 assert(blk_is_available(s->qdev.conf.blk));
1886 break;
1887 case INQUIRY:
1888 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1889 if (buflen < 0) {
1890 goto illegal_request;
1892 break;
1893 case MODE_SENSE:
1894 case MODE_SENSE_10:
1895 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1896 if (buflen < 0) {
1897 goto illegal_request;
1899 break;
1900 case READ_TOC:
1901 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1902 if (buflen < 0) {
1903 goto illegal_request;
1905 break;
1906 case RESERVE:
1907 if (req->cmd.buf[1] & 1) {
1908 goto illegal_request;
1910 break;
1911 case RESERVE_10:
1912 if (req->cmd.buf[1] & 3) {
1913 goto illegal_request;
1915 break;
1916 case RELEASE:
1917 if (req->cmd.buf[1] & 1) {
1918 goto illegal_request;
1920 break;
1921 case RELEASE_10:
1922 if (req->cmd.buf[1] & 3) {
1923 goto illegal_request;
1925 break;
1926 case START_STOP:
1927 if (scsi_disk_emulate_start_stop(r) < 0) {
1928 return 0;
1930 break;
1931 case ALLOW_MEDIUM_REMOVAL:
1932 s->tray_locked = req->cmd.buf[4] & 1;
1933 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1934 break;
1935 case READ_CAPACITY_10:
1936 /* The normal LEN field for this command is zero. */
1937 memset(outbuf, 0, 8);
1938 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1939 if (!nb_sectors) {
1940 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1941 return 0;
1943 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1944 goto illegal_request;
1946 nb_sectors /= s->qdev.blocksize / 512;
1947 /* Returned value is the address of the last sector. */
1948 nb_sectors--;
1949 /* Remember the new size for read/write sanity checking. */
1950 s->qdev.max_lba = nb_sectors;
1951 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1952 if (nb_sectors > UINT32_MAX) {
1953 nb_sectors = UINT32_MAX;
1955 outbuf[0] = (nb_sectors >> 24) & 0xff;
1956 outbuf[1] = (nb_sectors >> 16) & 0xff;
1957 outbuf[2] = (nb_sectors >> 8) & 0xff;
1958 outbuf[3] = nb_sectors & 0xff;
1959 outbuf[4] = 0;
1960 outbuf[5] = 0;
1961 outbuf[6] = s->qdev.blocksize >> 8;
1962 outbuf[7] = 0;
1963 break;
1964 case REQUEST_SENSE:
1965 /* Just return "NO SENSE". */
1966 buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
1967 (req->cmd.buf[1] & 1) == 0);
1968 if (buflen < 0) {
1969 goto illegal_request;
1971 break;
1972 case MECHANISM_STATUS:
1973 buflen = scsi_emulate_mechanism_status(s, outbuf);
1974 if (buflen < 0) {
1975 goto illegal_request;
1977 break;
1978 case GET_CONFIGURATION:
1979 buflen = scsi_get_configuration(s, outbuf);
1980 if (buflen < 0) {
1981 goto illegal_request;
1983 break;
1984 case GET_EVENT_STATUS_NOTIFICATION:
1985 buflen = scsi_get_event_status_notification(s, r, outbuf);
1986 if (buflen < 0) {
1987 goto illegal_request;
1989 break;
1990 case READ_DISC_INFORMATION:
1991 buflen = scsi_read_disc_information(s, r, outbuf);
1992 if (buflen < 0) {
1993 goto illegal_request;
1995 break;
1996 case READ_DVD_STRUCTURE:
1997 buflen = scsi_read_dvd_structure(s, r, outbuf);
1998 if (buflen < 0) {
1999 goto illegal_request;
2001 break;
2002 case SERVICE_ACTION_IN_16:
2003 /* Service Action In subcommands. */
2004 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2005 DPRINTF("SAI READ CAPACITY(16)\n");
2006 memset(outbuf, 0, req->cmd.xfer);
2007 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2008 if (!nb_sectors) {
2009 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2010 return 0;
2012 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2013 goto illegal_request;
2015 nb_sectors /= s->qdev.blocksize / 512;
2016 /* Returned value is the address of the last sector. */
2017 nb_sectors--;
2018 /* Remember the new size for read/write sanity checking. */
2019 s->qdev.max_lba = nb_sectors;
2020 outbuf[0] = (nb_sectors >> 56) & 0xff;
2021 outbuf[1] = (nb_sectors >> 48) & 0xff;
2022 outbuf[2] = (nb_sectors >> 40) & 0xff;
2023 outbuf[3] = (nb_sectors >> 32) & 0xff;
2024 outbuf[4] = (nb_sectors >> 24) & 0xff;
2025 outbuf[5] = (nb_sectors >> 16) & 0xff;
2026 outbuf[6] = (nb_sectors >> 8) & 0xff;
2027 outbuf[7] = nb_sectors & 0xff;
2028 outbuf[8] = 0;
2029 outbuf[9] = 0;
2030 outbuf[10] = s->qdev.blocksize >> 8;
2031 outbuf[11] = 0;
2032 outbuf[12] = 0;
2033 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2035 /* set TPE bit if the format supports discard */
2036 if (s->qdev.conf.discard_granularity) {
2037 outbuf[14] = 0x80;
2040 /* Protection, exponent and lowest lba field left blank. */
2041 break;
2043 DPRINTF("Unsupported Service Action In\n");
2044 goto illegal_request;
2045 case SYNCHRONIZE_CACHE:
2046 /* The request is used as the AIO opaque value, so add a ref. */
2047 scsi_req_ref(&r->req);
2048 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2049 BLOCK_ACCT_FLUSH);
2050 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2051 return 0;
2052 case SEEK_10:
2053 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
2054 if (r->req.cmd.lba > s->qdev.max_lba) {
2055 goto illegal_lba;
2057 break;
2058 case MODE_SELECT:
2059 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2060 break;
2061 case MODE_SELECT_10:
2062 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2063 break;
2064 case UNMAP:
2065 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2066 break;
2067 case VERIFY_10:
2068 case VERIFY_12:
2069 case VERIFY_16:
2070 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
2071 if (req->cmd.buf[1] & 6) {
2072 goto illegal_request;
2074 break;
2075 case WRITE_SAME_10:
2076 case WRITE_SAME_16:
2077 DPRINTF("WRITE SAME %d (len %lu)\n",
2078 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16,
2079 (unsigned long)r->req.cmd.xfer);
2080 break;
2081 default:
2082 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
2083 scsi_command_name(buf[0]));
2084 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2085 return 0;
2087 assert(!r->req.aiocb);
2088 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2089 if (r->iov.iov_len == 0) {
2090 scsi_req_complete(&r->req, GOOD);
2092 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2093 assert(r->iov.iov_len == req->cmd.xfer);
2094 return -r->iov.iov_len;
2095 } else {
2096 return r->iov.iov_len;
2099 illegal_request:
2100 if (r->req.status == -1) {
2101 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2103 return 0;
2105 illegal_lba:
2106 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2107 return 0;
2110 /* Execute a scsi command. Returns the length of the data expected by the
2111 command. This will be Positive for data transfers from the device
2112 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2113 and zero if the command does not transfer any data. */
2115 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2117 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2118 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2119 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2120 uint32_t len;
2121 uint8_t command;
2123 command = buf[0];
2125 if (!blk_is_available(s->qdev.conf.blk)) {
2126 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2127 return 0;
2130 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2131 switch (command) {
2132 case READ_6:
2133 case READ_10:
2134 case READ_12:
2135 case READ_16:
2136 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
2137 if (r->req.cmd.buf[1] & 0xe0) {
2138 goto illegal_request;
2140 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2141 goto illegal_lba;
2143 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2144 r->sector_count = len * (s->qdev.blocksize / 512);
2145 break;
2146 case WRITE_6:
2147 case WRITE_10:
2148 case WRITE_12:
2149 case WRITE_16:
2150 case WRITE_VERIFY_10:
2151 case WRITE_VERIFY_12:
2152 case WRITE_VERIFY_16:
2153 if (blk_is_read_only(s->qdev.conf.blk)) {
2154 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2155 return 0;
2157 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
2158 (command & 0xe) == 0xe ? "And Verify " : "",
2159 r->req.cmd.lba, len);
2160 /* fall through */
2161 case VERIFY_10:
2162 case VERIFY_12:
2163 case VERIFY_16:
2164 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2165 * As far as DMA is concerned, we can treat it the same as a write;
2166 * scsi_block_do_sgio will send VERIFY commands.
2168 if (r->req.cmd.buf[1] & 0xe0) {
2169 goto illegal_request;
2171 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2172 goto illegal_lba;
2174 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2175 r->sector_count = len * (s->qdev.blocksize / 512);
2176 break;
2177 default:
2178 abort();
2179 illegal_request:
2180 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2181 return 0;
2182 illegal_lba:
2183 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2184 return 0;
2186 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2187 if (r->sector_count == 0) {
2188 scsi_req_complete(&r->req, GOOD);
2190 assert(r->iov.iov_len == 0);
2191 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2192 return -r->sector_count * 512;
2193 } else {
2194 return r->sector_count * 512;
2198 static void scsi_disk_reset(DeviceState *dev)
2200 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2201 uint64_t nb_sectors;
2203 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2205 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2206 nb_sectors /= s->qdev.blocksize / 512;
2207 if (nb_sectors) {
2208 nb_sectors--;
2210 s->qdev.max_lba = nb_sectors;
2211 /* reset tray statuses */
2212 s->tray_locked = 0;
2213 s->tray_open = 0;
2216 static void scsi_disk_resize_cb(void *opaque)
2218 SCSIDiskState *s = opaque;
2220 /* SPC lists this sense code as available only for
2221 * direct-access devices.
2223 if (s->qdev.type == TYPE_DISK) {
2224 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2228 static void scsi_cd_change_media_cb(void *opaque, bool load)
2230 SCSIDiskState *s = opaque;
2233 * When a CD gets changed, we have to report an ejected state and
2234 * then a loaded state to guests so that they detect tray
2235 * open/close and media change events. Guests that do not use
2236 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2237 * states rely on this behavior.
2239 * media_changed governs the state machine used for unit attention
2240 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2242 s->media_changed = load;
2243 s->tray_open = !load;
2244 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2245 s->media_event = true;
2246 s->eject_request = false;
2249 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2251 SCSIDiskState *s = opaque;
2253 s->eject_request = true;
2254 if (force) {
2255 s->tray_locked = false;
2259 static bool scsi_cd_is_tray_open(void *opaque)
2261 return ((SCSIDiskState *)opaque)->tray_open;
2264 static bool scsi_cd_is_medium_locked(void *opaque)
2266 return ((SCSIDiskState *)opaque)->tray_locked;
2269 static const BlockDevOps scsi_disk_removable_block_ops = {
2270 .change_media_cb = scsi_cd_change_media_cb,
2271 .eject_request_cb = scsi_cd_eject_request_cb,
2272 .is_tray_open = scsi_cd_is_tray_open,
2273 .is_medium_locked = scsi_cd_is_medium_locked,
2275 .resize_cb = scsi_disk_resize_cb,
2278 static const BlockDevOps scsi_disk_block_ops = {
2279 .resize_cb = scsi_disk_resize_cb,
2282 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2284 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2285 if (s->media_changed) {
2286 s->media_changed = false;
2287 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2291 static void scsi_realize(SCSIDevice *dev, Error **errp)
2293 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2294 Error *err = NULL;
2296 if (!s->qdev.conf.blk) {
2297 error_setg(errp, "drive property not set");
2298 return;
2301 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2302 !blk_is_inserted(s->qdev.conf.blk)) {
2303 error_setg(errp, "Device needs media, but drive is empty");
2304 return;
2307 blkconf_serial(&s->qdev.conf, &s->serial);
2308 blkconf_blocksizes(&s->qdev.conf);
2309 if (dev->type == TYPE_DISK) {
2310 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
2311 if (err) {
2312 error_propagate(errp, err);
2313 return;
2316 blkconf_apply_backend_options(&dev->conf);
2318 if (s->qdev.conf.discard_granularity == -1) {
2319 s->qdev.conf.discard_granularity =
2320 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2323 if (!s->version) {
2324 s->version = g_strdup(qemu_hw_version());
2326 if (!s->vendor) {
2327 s->vendor = g_strdup("QEMU");
2330 if (blk_is_sg(s->qdev.conf.blk)) {
2331 error_setg(errp, "unwanted /dev/sg*");
2332 return;
2335 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2336 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2337 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2338 } else {
2339 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2341 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2343 blk_iostatus_enable(s->qdev.conf.blk);
2346 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2348 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2349 /* can happen for devices without drive. The error message for missing
2350 * backend will be issued in scsi_realize
2352 if (s->qdev.conf.blk) {
2353 blkconf_blocksizes(&s->qdev.conf);
2355 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2356 s->qdev.type = TYPE_DISK;
2357 if (!s->product) {
2358 s->product = g_strdup("QEMU HARDDISK");
2360 scsi_realize(&s->qdev, errp);
2363 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2365 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2367 if (!dev->conf.blk) {
2368 dev->conf.blk = blk_new();
2371 s->qdev.blocksize = 2048;
2372 s->qdev.type = TYPE_ROM;
2373 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2374 if (!s->product) {
2375 s->product = g_strdup("QEMU CD-ROM");
2377 scsi_realize(&s->qdev, errp);
2380 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2382 DriveInfo *dinfo;
2383 Error *local_err = NULL;
2385 if (!dev->conf.blk) {
2386 scsi_realize(dev, &local_err);
2387 assert(local_err);
2388 error_propagate(errp, local_err);
2389 return;
2392 dinfo = blk_legacy_dinfo(dev->conf.blk);
2393 if (dinfo && dinfo->media_cd) {
2394 scsi_cd_realize(dev, errp);
2395 } else {
2396 scsi_hd_realize(dev, errp);
2400 static const SCSIReqOps scsi_disk_emulate_reqops = {
2401 .size = sizeof(SCSIDiskReq),
2402 .free_req = scsi_free_request,
2403 .send_command = scsi_disk_emulate_command,
2404 .read_data = scsi_disk_emulate_read_data,
2405 .write_data = scsi_disk_emulate_write_data,
2406 .get_buf = scsi_get_buf,
2409 static const SCSIReqOps scsi_disk_dma_reqops = {
2410 .size = sizeof(SCSIDiskReq),
2411 .free_req = scsi_free_request,
2412 .send_command = scsi_disk_dma_command,
2413 .read_data = scsi_read_data,
2414 .write_data = scsi_write_data,
2415 .get_buf = scsi_get_buf,
2416 .load_request = scsi_disk_load_request,
2417 .save_request = scsi_disk_save_request,
2420 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2421 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2422 [INQUIRY] = &scsi_disk_emulate_reqops,
2423 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2424 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2425 [START_STOP] = &scsi_disk_emulate_reqops,
2426 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2427 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2428 [READ_TOC] = &scsi_disk_emulate_reqops,
2429 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2430 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2431 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2432 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2433 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2434 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2435 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2436 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2437 [SEEK_10] = &scsi_disk_emulate_reqops,
2438 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2439 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2440 [UNMAP] = &scsi_disk_emulate_reqops,
2441 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2442 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2443 [VERIFY_10] = &scsi_disk_emulate_reqops,
2444 [VERIFY_12] = &scsi_disk_emulate_reqops,
2445 [VERIFY_16] = &scsi_disk_emulate_reqops,
2447 [READ_6] = &scsi_disk_dma_reqops,
2448 [READ_10] = &scsi_disk_dma_reqops,
2449 [READ_12] = &scsi_disk_dma_reqops,
2450 [READ_16] = &scsi_disk_dma_reqops,
2451 [WRITE_6] = &scsi_disk_dma_reqops,
2452 [WRITE_10] = &scsi_disk_dma_reqops,
2453 [WRITE_12] = &scsi_disk_dma_reqops,
2454 [WRITE_16] = &scsi_disk_dma_reqops,
2455 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2456 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2457 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2460 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2461 uint8_t *buf, void *hba_private)
2463 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2464 SCSIRequest *req;
2465 const SCSIReqOps *ops;
2466 uint8_t command;
2468 command = buf[0];
2469 ops = scsi_disk_reqops_dispatch[command];
2470 if (!ops) {
2471 ops = &scsi_disk_emulate_reqops;
2473 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2475 #ifdef DEBUG_SCSI
2476 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
2478 int i;
2479 for (i = 1; i < scsi_cdb_length(buf); i++) {
2480 printf(" 0x%02x", buf[i]);
2482 printf("\n");
2484 #endif
2486 return req;
2489 #ifdef __linux__
2490 static int get_device_type(SCSIDiskState *s)
2492 uint8_t cmd[16];
2493 uint8_t buf[36];
2494 uint8_t sensebuf[8];
2495 sg_io_hdr_t io_header;
2496 int ret;
2498 memset(cmd, 0, sizeof(cmd));
2499 memset(buf, 0, sizeof(buf));
2500 cmd[0] = INQUIRY;
2501 cmd[4] = sizeof(buf);
2503 memset(&io_header, 0, sizeof(io_header));
2504 io_header.interface_id = 'S';
2505 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
2506 io_header.dxfer_len = sizeof(buf);
2507 io_header.dxferp = buf;
2508 io_header.cmdp = cmd;
2509 io_header.cmd_len = sizeof(cmd);
2510 io_header.mx_sb_len = sizeof(sensebuf);
2511 io_header.sbp = sensebuf;
2512 io_header.timeout = 6000; /* XXX */
2514 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header);
2515 if (ret < 0 || io_header.driver_status || io_header.host_status) {
2516 return -1;
2518 s->qdev.type = buf[0];
2519 if (buf[1] & 0x80) {
2520 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2522 return 0;
2525 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2527 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2528 int sg_version;
2529 int rc;
2531 if (!s->qdev.conf.blk) {
2532 error_setg(errp, "drive property not set");
2533 return;
2536 /* check we are using a driver managing SG_IO (version 3 and after) */
2537 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2538 if (rc < 0) {
2539 error_setg(errp, "cannot get SG_IO version number: %s. "
2540 "Is this a SCSI device?",
2541 strerror(-rc));
2542 return;
2544 if (sg_version < 30000) {
2545 error_setg(errp, "scsi generic interface too old");
2546 return;
2549 /* get device type from INQUIRY data */
2550 rc = get_device_type(s);
2551 if (rc < 0) {
2552 error_setg(errp, "INQUIRY failed");
2553 return;
2556 /* Make a guess for the block size, we'll fix it when the guest sends.
2557 * READ CAPACITY. If they don't, they likely would assume these sizes
2558 * anyway. (TODO: check in /sys).
2560 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2561 s->qdev.blocksize = 2048;
2562 } else {
2563 s->qdev.blocksize = 512;
2566 /* Makes the scsi-block device not removable by using HMP and QMP eject
2567 * command.
2569 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2571 scsi_realize(&s->qdev, errp);
2572 scsi_generic_read_device_identification(&s->qdev);
2575 typedef struct SCSIBlockReq {
2576 SCSIDiskReq req;
2577 sg_io_hdr_t io_header;
2579 /* Selected bytes of the original CDB, copied into our own CDB. */
2580 uint8_t cmd, cdb1, group_number;
2582 /* CDB passed to SG_IO. */
2583 uint8_t cdb[16];
2584 } SCSIBlockReq;
2586 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2587 int64_t offset, QEMUIOVector *iov,
2588 int direction,
2589 BlockCompletionFunc *cb, void *opaque)
2591 sg_io_hdr_t *io_header = &req->io_header;
2592 SCSIDiskReq *r = &req->req;
2593 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2594 int nb_logical_blocks;
2595 uint64_t lba;
2596 BlockAIOCB *aiocb;
2598 /* This is not supported yet. It can only happen if the guest does
2599 * reads and writes that are not aligned to one logical sectors
2600 * _and_ cover multiple MemoryRegions.
2602 assert(offset % s->qdev.blocksize == 0);
2603 assert(iov->size % s->qdev.blocksize == 0);
2605 io_header->interface_id = 'S';
2607 /* The data transfer comes from the QEMUIOVector. */
2608 io_header->dxfer_direction = direction;
2609 io_header->dxfer_len = iov->size;
2610 io_header->dxferp = (void *)iov->iov;
2611 io_header->iovec_count = iov->niov;
2612 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2614 /* Build a new CDB with the LBA and length patched in, in case
2615 * DMA helpers split the transfer in multiple segments. Do not
2616 * build a CDB smaller than what the guest wanted, and only build
2617 * a larger one if strictly necessary.
2619 io_header->cmdp = req->cdb;
2620 lba = offset / s->qdev.blocksize;
2621 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2623 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2624 /* 6-byte CDB */
2625 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2626 req->cdb[4] = nb_logical_blocks;
2627 req->cdb[5] = 0;
2628 io_header->cmd_len = 6;
2629 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2630 /* 10-byte CDB */
2631 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2632 req->cdb[1] = req->cdb1;
2633 stl_be_p(&req->cdb[2], lba);
2634 req->cdb[6] = req->group_number;
2635 stw_be_p(&req->cdb[7], nb_logical_blocks);
2636 req->cdb[9] = 0;
2637 io_header->cmd_len = 10;
2638 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2639 /* 12-byte CDB */
2640 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2641 req->cdb[1] = req->cdb1;
2642 stl_be_p(&req->cdb[2], lba);
2643 stl_be_p(&req->cdb[6], nb_logical_blocks);
2644 req->cdb[10] = req->group_number;
2645 req->cdb[11] = 0;
2646 io_header->cmd_len = 12;
2647 } else {
2648 /* 16-byte CDB */
2649 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2650 req->cdb[1] = req->cdb1;
2651 stq_be_p(&req->cdb[2], lba);
2652 stl_be_p(&req->cdb[10], nb_logical_blocks);
2653 req->cdb[14] = req->group_number;
2654 req->cdb[15] = 0;
2655 io_header->cmd_len = 16;
2658 /* The rest is as in scsi-generic.c. */
2659 io_header->mx_sb_len = sizeof(r->req.sense);
2660 io_header->sbp = r->req.sense;
2661 io_header->timeout = UINT_MAX;
2662 io_header->usr_ptr = r;
2663 io_header->flags |= SG_FLAG_DIRECT_IO;
2665 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2666 assert(aiocb != NULL);
2667 return aiocb;
2670 static bool scsi_block_no_fua(SCSICommand *cmd)
2672 return false;
2675 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2676 QEMUIOVector *iov,
2677 BlockCompletionFunc *cb, void *cb_opaque,
2678 void *opaque)
2680 SCSIBlockReq *r = opaque;
2681 return scsi_block_do_sgio(r, offset, iov,
2682 SG_DXFER_FROM_DEV, cb, cb_opaque);
2685 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2686 QEMUIOVector *iov,
2687 BlockCompletionFunc *cb, void *cb_opaque,
2688 void *opaque)
2690 SCSIBlockReq *r = opaque;
2691 return scsi_block_do_sgio(r, offset, iov,
2692 SG_DXFER_TO_DEV, cb, cb_opaque);
2695 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2697 switch (buf[0]) {
2698 case VERIFY_10:
2699 case VERIFY_12:
2700 case VERIFY_16:
2701 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2702 * for the number of logical blocks specified in the length
2703 * field). For other modes, do not use scatter/gather operation.
2705 if ((buf[1] & 6) == 2) {
2706 return false;
2708 break;
2710 case READ_6:
2711 case READ_10:
2712 case READ_12:
2713 case READ_16:
2714 case WRITE_6:
2715 case WRITE_10:
2716 case WRITE_12:
2717 case WRITE_16:
2718 case WRITE_VERIFY_10:
2719 case WRITE_VERIFY_12:
2720 case WRITE_VERIFY_16:
2721 /* MMC writing cannot be done via DMA helpers, because it sometimes
2722 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2723 * We might use scsi_block_dma_reqops as long as no writing commands are
2724 * seen, but performance usually isn't paramount on optical media. So,
2725 * just make scsi-block operate the same as scsi-generic for them.
2727 if (s->qdev.type != TYPE_ROM) {
2728 return false;
2730 break;
2732 default:
2733 break;
2736 return true;
2740 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2742 SCSIBlockReq *r = (SCSIBlockReq *)req;
2743 r->cmd = req->cmd.buf[0];
2744 switch (r->cmd >> 5) {
2745 case 0:
2746 /* 6-byte CDB. */
2747 r->cdb1 = r->group_number = 0;
2748 break;
2749 case 1:
2750 /* 10-byte CDB. */
2751 r->cdb1 = req->cmd.buf[1];
2752 r->group_number = req->cmd.buf[6];
2753 break;
2754 case 4:
2755 /* 12-byte CDB. */
2756 r->cdb1 = req->cmd.buf[1];
2757 r->group_number = req->cmd.buf[10];
2758 break;
2759 case 5:
2760 /* 16-byte CDB. */
2761 r->cdb1 = req->cmd.buf[1];
2762 r->group_number = req->cmd.buf[14];
2763 break;
2764 default:
2765 abort();
2768 if (r->cdb1 & 0xe0) {
2769 /* Protection information is not supported. */
2770 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2771 return 0;
2774 r->req.status = &r->io_header.status;
2775 return scsi_disk_dma_command(req, buf);
2778 static const SCSIReqOps scsi_block_dma_reqops = {
2779 .size = sizeof(SCSIBlockReq),
2780 .free_req = scsi_free_request,
2781 .send_command = scsi_block_dma_command,
2782 .read_data = scsi_read_data,
2783 .write_data = scsi_write_data,
2784 .get_buf = scsi_get_buf,
2785 .load_request = scsi_disk_load_request,
2786 .save_request = scsi_disk_save_request,
2789 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2790 uint32_t lun, uint8_t *buf,
2791 void *hba_private)
2793 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2795 if (scsi_block_is_passthrough(s, buf)) {
2796 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2797 hba_private);
2798 } else {
2799 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2800 hba_private);
2804 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2805 uint8_t *buf, void *hba_private)
2807 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2809 if (scsi_block_is_passthrough(s, buf)) {
2810 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2811 } else {
2812 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2816 #endif
2818 static
2819 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2820 BlockCompletionFunc *cb, void *cb_opaque,
2821 void *opaque)
2823 SCSIDiskReq *r = opaque;
2824 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2825 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2828 static
2829 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2830 BlockCompletionFunc *cb, void *cb_opaque,
2831 void *opaque)
2833 SCSIDiskReq *r = opaque;
2834 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2835 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2838 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2840 DeviceClass *dc = DEVICE_CLASS(klass);
2841 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2843 dc->fw_name = "disk";
2844 dc->reset = scsi_disk_reset;
2845 sdc->dma_readv = scsi_dma_readv;
2846 sdc->dma_writev = scsi_dma_writev;
2847 sdc->need_fua_emulation = scsi_is_cmd_fua;
2850 static const TypeInfo scsi_disk_base_info = {
2851 .name = TYPE_SCSI_DISK_BASE,
2852 .parent = TYPE_SCSI_DEVICE,
2853 .class_init = scsi_disk_base_class_initfn,
2854 .instance_size = sizeof(SCSIDiskState),
2855 .class_size = sizeof(SCSIDiskClass),
2856 .abstract = true,
2859 #define DEFINE_SCSI_DISK_PROPERTIES() \
2860 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
2861 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2862 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2863 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2864 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2865 DEFINE_PROP_STRING("product", SCSIDiskState, product)
2867 static Property scsi_hd_properties[] = {
2868 DEFINE_SCSI_DISK_PROPERTIES(),
2869 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2870 SCSI_DISK_F_REMOVABLE, false),
2871 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2872 SCSI_DISK_F_DPOFUA, false),
2873 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2874 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2875 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2876 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2877 DEFAULT_MAX_UNMAP_SIZE),
2878 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2879 DEFAULT_MAX_IO_SIZE),
2880 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2881 DEFINE_PROP_END_OF_LIST(),
2884 static const VMStateDescription vmstate_scsi_disk_state = {
2885 .name = "scsi-disk",
2886 .version_id = 1,
2887 .minimum_version_id = 1,
2888 .fields = (VMStateField[]) {
2889 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2890 VMSTATE_BOOL(media_changed, SCSIDiskState),
2891 VMSTATE_BOOL(media_event, SCSIDiskState),
2892 VMSTATE_BOOL(eject_request, SCSIDiskState),
2893 VMSTATE_BOOL(tray_open, SCSIDiskState),
2894 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2895 VMSTATE_END_OF_LIST()
2899 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2901 DeviceClass *dc = DEVICE_CLASS(klass);
2902 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2904 sc->realize = scsi_hd_realize;
2905 sc->alloc_req = scsi_new_request;
2906 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2907 dc->desc = "virtual SCSI disk";
2908 dc->props = scsi_hd_properties;
2909 dc->vmsd = &vmstate_scsi_disk_state;
2912 static const TypeInfo scsi_hd_info = {
2913 .name = "scsi-hd",
2914 .parent = TYPE_SCSI_DISK_BASE,
2915 .class_init = scsi_hd_class_initfn,
2918 static Property scsi_cd_properties[] = {
2919 DEFINE_SCSI_DISK_PROPERTIES(),
2920 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2921 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2922 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2923 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2924 DEFAULT_MAX_IO_SIZE),
2925 DEFINE_PROP_END_OF_LIST(),
2928 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
2930 DeviceClass *dc = DEVICE_CLASS(klass);
2931 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2933 sc->realize = scsi_cd_realize;
2934 sc->alloc_req = scsi_new_request;
2935 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2936 dc->desc = "virtual SCSI CD-ROM";
2937 dc->props = scsi_cd_properties;
2938 dc->vmsd = &vmstate_scsi_disk_state;
2941 static const TypeInfo scsi_cd_info = {
2942 .name = "scsi-cd",
2943 .parent = TYPE_SCSI_DISK_BASE,
2944 .class_init = scsi_cd_class_initfn,
2947 #ifdef __linux__
2948 static Property scsi_block_properties[] = {
2949 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
2950 DEFINE_PROP_END_OF_LIST(),
2953 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
2955 DeviceClass *dc = DEVICE_CLASS(klass);
2956 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2957 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2959 sc->realize = scsi_block_realize;
2960 sc->alloc_req = scsi_block_new_request;
2961 sc->parse_cdb = scsi_block_parse_cdb;
2962 sdc->dma_readv = scsi_block_dma_readv;
2963 sdc->dma_writev = scsi_block_dma_writev;
2964 sdc->need_fua_emulation = scsi_block_no_fua;
2965 dc->desc = "SCSI block device passthrough";
2966 dc->props = scsi_block_properties;
2967 dc->vmsd = &vmstate_scsi_disk_state;
2970 static const TypeInfo scsi_block_info = {
2971 .name = "scsi-block",
2972 .parent = TYPE_SCSI_DISK_BASE,
2973 .class_init = scsi_block_class_initfn,
2975 #endif
2977 static Property scsi_disk_properties[] = {
2978 DEFINE_SCSI_DISK_PROPERTIES(),
2979 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2980 SCSI_DISK_F_REMOVABLE, false),
2981 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2982 SCSI_DISK_F_DPOFUA, false),
2983 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2984 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2985 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2986 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2987 DEFAULT_MAX_UNMAP_SIZE),
2988 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2989 DEFAULT_MAX_IO_SIZE),
2990 DEFINE_PROP_END_OF_LIST(),
2993 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
2995 DeviceClass *dc = DEVICE_CLASS(klass);
2996 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2998 sc->realize = scsi_disk_realize;
2999 sc->alloc_req = scsi_new_request;
3000 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3001 dc->fw_name = "disk";
3002 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3003 dc->reset = scsi_disk_reset;
3004 dc->props = scsi_disk_properties;
3005 dc->vmsd = &vmstate_scsi_disk_state;
3008 static const TypeInfo scsi_disk_info = {
3009 .name = "scsi-disk",
3010 .parent = TYPE_SCSI_DISK_BASE,
3011 .class_init = scsi_disk_class_initfn,
3014 static void scsi_disk_register_types(void)
3016 type_register_static(&scsi_disk_base_info);
3017 type_register_static(&scsi_hd_info);
3018 type_register_static(&scsi_cd_info);
3019 #ifdef __linux__
3020 type_register_static(&scsi_block_info);
3021 #endif
3022 type_register_static(&scsi_disk_info);
3025 type_init(scsi_disk_register_types)