Fix some compiler warnings related to format strings
[qemu/ar7.git] / hw / scsi / scsi-disk.c
blob6e841fb5ff874eda6ed3d8c7256bc327f02402bb
1 /*
2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 //#define DEBUG_SCSI
24 #ifdef DEBUG_SCSI
25 #define DPRINTF(fmt, ...) \
26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
27 #else
28 #define DPRINTF(fmt, ...) do {} while(0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "hw/scsi/scsi.h"
35 #include "scsi/constants.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/blockdev.h"
39 #include "hw/block/block.h"
40 #include "sysemu/dma.h"
41 #include "qemu/cutils.h"
43 #ifdef __linux
44 #include <scsi/sg.h>
45 #endif
47 #define SCSI_WRITE_SAME_MAX 524288
48 #define SCSI_DMA_BUF_SIZE 131072
49 #define SCSI_MAX_INQUIRY_LEN 256
50 #define SCSI_MAX_MODE_LEN 256
52 #define DEFAULT_DISCARD_GRANULARITY 4096
53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */
54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
58 #define SCSI_DISK_BASE(obj) \
59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
60 #define SCSI_DISK_BASE_CLASS(klass) \
61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
62 #define SCSI_DISK_BASE_GET_CLASS(obj) \
63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
65 typedef struct SCSIDiskClass {
66 SCSIDeviceClass parent_class;
67 DMAIOFunc *dma_readv;
68 DMAIOFunc *dma_writev;
69 bool (*need_fua_emulation)(SCSICommand *cmd);
70 } SCSIDiskClass;
72 typedef struct SCSIDiskReq {
73 SCSIRequest req;
74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint64_t sector;
76 uint32_t sector_count;
77 uint32_t buflen;
78 bool started;
79 bool need_fua_emulation;
80 struct iovec iov;
81 QEMUIOVector qiov;
82 BlockAcctCookie acct;
83 unsigned char *status;
84 } SCSIDiskReq;
86 #define SCSI_DISK_F_REMOVABLE 0
87 #define SCSI_DISK_F_DPOFUA 1
88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
90 typedef struct SCSIDiskState
92 SCSIDevice qdev;
93 uint32_t features;
94 bool media_changed;
95 bool media_event;
96 bool eject_request;
97 uint16_t port_index;
98 uint64_t max_unmap_size;
99 uint64_t max_io_size;
100 QEMUBH *bh;
101 char *version;
102 char *serial;
103 char *vendor;
104 char *product;
105 bool tray_open;
106 bool tray_locked;
107 } SCSIDiskState;
109 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
111 static void scsi_free_request(SCSIRequest *req)
113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
115 qemu_vfree(r->iov.iov_base);
118 /* Helper function for command completion with sense. */
119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n",
122 r->req.tag, sense.key, sense.asc, sense.ascq);
123 scsi_req_build_sense(&r->req, sense);
124 scsi_req_complete(&r->req, CHECK_CONDITION);
127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
131 if (!r->iov.iov_base) {
132 r->buflen = size;
133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
136 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
143 qemu_put_be64s(f, &r->sector);
144 qemu_put_be32s(f, &r->sector_count);
145 qemu_put_be32s(f, &r->buflen);
146 if (r->buflen) {
147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
149 } else if (!req->retry) {
150 uint32_t len = r->iov.iov_len;
151 qemu_put_be32s(f, &len);
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
161 qemu_get_be64s(f, &r->sector);
162 qemu_get_be32s(f, &r->sector_count);
163 qemu_get_be32s(f, &r->buflen);
164 if (r->buflen) {
165 scsi_init_iovec(r, r->buflen);
166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
168 } else if (!r->req.retry) {
169 uint32_t len;
170 qemu_get_be32s(f, &len);
171 r->iov.iov_len = len;
172 assert(r->iov.iov_len <= r->buflen);
173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
177 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
182 if (r->req.io_canceled) {
183 scsi_req_cancel_complete(&r->req);
184 return true;
187 if (ret < 0 || (r->status && *r->status)) {
188 return scsi_handle_rw_error(r, -ret, acct_failed);
191 return false;
194 static void scsi_aio_complete(void *opaque, int ret)
196 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
197 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
199 assert(r->req.aiocb != NULL);
200 r->req.aiocb = NULL;
201 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
202 if (scsi_disk_req_check_error(r, ret, true)) {
203 goto done;
206 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
207 scsi_req_complete(&r->req, GOOD);
209 done:
210 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
211 scsi_req_unref(&r->req);
214 static bool scsi_is_cmd_fua(SCSICommand *cmd)
216 switch (cmd->buf[0]) {
217 case READ_10:
218 case READ_12:
219 case READ_16:
220 case WRITE_10:
221 case WRITE_12:
222 case WRITE_16:
223 return (cmd->buf[1] & 8) != 0;
225 case VERIFY_10:
226 case VERIFY_12:
227 case VERIFY_16:
228 case WRITE_VERIFY_10:
229 case WRITE_VERIFY_12:
230 case WRITE_VERIFY_16:
231 return true;
233 case READ_6:
234 case WRITE_6:
235 default:
236 return false;
240 static void scsi_write_do_fua(SCSIDiskReq *r)
242 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
244 assert(r->req.aiocb == NULL);
245 assert(!r->req.io_canceled);
247 if (r->need_fua_emulation) {
248 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
249 BLOCK_ACCT_FLUSH);
250 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
251 return;
254 scsi_req_complete(&r->req, GOOD);
255 scsi_req_unref(&r->req);
258 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
260 assert(r->req.aiocb == NULL);
261 if (scsi_disk_req_check_error(r, ret, false)) {
262 goto done;
265 r->sector += r->sector_count;
266 r->sector_count = 0;
267 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
268 scsi_write_do_fua(r);
269 return;
270 } else {
271 scsi_req_complete(&r->req, GOOD);
274 done:
275 scsi_req_unref(&r->req);
278 static void scsi_dma_complete(void *opaque, int ret)
280 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
281 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
283 assert(r->req.aiocb != NULL);
284 r->req.aiocb = NULL;
286 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
287 if (ret < 0) {
288 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
289 } else {
290 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
292 scsi_dma_complete_noio(r, ret);
293 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
296 static void scsi_read_complete(void * opaque, int ret)
298 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
299 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
300 int n;
302 assert(r->req.aiocb != NULL);
303 r->req.aiocb = NULL;
304 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
305 if (scsi_disk_req_check_error(r, ret, true)) {
306 goto done;
309 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
310 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
312 n = r->qiov.size / 512;
313 r->sector += n;
314 r->sector_count -= n;
315 scsi_req_data(&r->req, r->qiov.size);
317 done:
318 scsi_req_unref(&r->req);
319 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
322 /* Actually issue a read to the block device. */
323 static void scsi_do_read(SCSIDiskReq *r, int ret)
325 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
326 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
328 assert (r->req.aiocb == NULL);
329 if (scsi_disk_req_check_error(r, ret, false)) {
330 goto done;
333 /* The request is used as the AIO opaque value, so add a ref. */
334 scsi_req_ref(&r->req);
336 if (r->req.sg) {
337 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
338 r->req.resid -= r->req.sg->size;
339 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
340 r->req.sg, r->sector << BDRV_SECTOR_BITS,
341 BDRV_SECTOR_SIZE,
342 sdc->dma_readv, r, scsi_dma_complete, r,
343 DMA_DIRECTION_FROM_DEVICE);
344 } else {
345 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
346 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
347 r->qiov.size, BLOCK_ACCT_READ);
348 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
349 scsi_read_complete, r, r);
352 done:
353 scsi_req_unref(&r->req);
356 static void scsi_do_read_cb(void *opaque, int ret)
358 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
359 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
361 assert (r->req.aiocb != NULL);
362 r->req.aiocb = NULL;
364 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
365 if (ret < 0) {
366 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
367 } else {
368 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
370 scsi_do_read(opaque, ret);
371 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
374 /* Read more data from scsi device into buffer. */
375 static void scsi_read_data(SCSIRequest *req)
377 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
379 bool first;
381 DPRINTF("Read sector_count=%d\n", r->sector_count);
382 if (r->sector_count == 0) {
383 /* This also clears the sense buffer for REQUEST SENSE. */
384 scsi_req_complete(&r->req, GOOD);
385 return;
388 /* No data transfer may already be in progress */
389 assert(r->req.aiocb == NULL);
391 /* The request is used as the AIO opaque value, so add a ref. */
392 scsi_req_ref(&r->req);
393 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
394 DPRINTF("Data transfer direction invalid\n");
395 scsi_read_complete(r, -EINVAL);
396 return;
399 if (!blk_is_available(req->dev->conf.blk)) {
400 scsi_read_complete(r, -ENOMEDIUM);
401 return;
404 first = !r->started;
405 r->started = true;
406 if (first && r->need_fua_emulation) {
407 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
408 BLOCK_ACCT_FLUSH);
409 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
410 } else {
411 scsi_do_read(r, 0);
416 * scsi_handle_rw_error has two return values. False means that the error
417 * must be ignored, true means that the error has been processed and the
418 * caller should not do anything else for this request. Note that
419 * scsi_handle_rw_error always manages its reference counts, independent
420 * of the return value.
422 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
424 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
425 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
426 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
427 is_read, error);
429 if (action == BLOCK_ERROR_ACTION_REPORT) {
430 if (acct_failed) {
431 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
433 switch (error) {
434 case 0:
435 /* The command has run, no need to fake sense. */
436 assert(r->status && *r->status);
437 scsi_req_complete(&r->req, *r->status);
438 break;
439 case ENOMEDIUM:
440 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
441 break;
442 case ENOMEM:
443 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
444 break;
445 case EINVAL:
446 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
447 break;
448 case ENOSPC:
449 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
450 break;
451 default:
452 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
453 break;
456 if (!error) {
457 assert(r->status && *r->status);
458 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
460 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN ||
461 error == 0) {
462 /* These errors are handled by guest. */
463 scsi_req_complete(&r->req, *r->status);
464 return true;
468 blk_error_action(s->qdev.conf.blk, action, is_read, error);
469 if (action == BLOCK_ERROR_ACTION_STOP) {
470 scsi_req_retry(&r->req);
472 return action != BLOCK_ERROR_ACTION_IGNORE;
475 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
477 uint32_t n;
479 assert (r->req.aiocb == NULL);
480 if (scsi_disk_req_check_error(r, ret, false)) {
481 goto done;
484 n = r->qiov.size / 512;
485 r->sector += n;
486 r->sector_count -= n;
487 if (r->sector_count == 0) {
488 scsi_write_do_fua(r);
489 return;
490 } else {
491 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
492 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
493 scsi_req_data(&r->req, r->qiov.size);
496 done:
497 scsi_req_unref(&r->req);
500 static void scsi_write_complete(void * opaque, int ret)
502 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
503 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
505 assert (r->req.aiocb != NULL);
506 r->req.aiocb = NULL;
508 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
509 if (ret < 0) {
510 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
511 } else {
512 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
514 scsi_write_complete_noio(r, ret);
515 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
518 static void scsi_write_data(SCSIRequest *req)
520 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
521 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
522 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
524 /* No data transfer may already be in progress */
525 assert(r->req.aiocb == NULL);
527 /* The request is used as the AIO opaque value, so add a ref. */
528 scsi_req_ref(&r->req);
529 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
530 DPRINTF("Data transfer direction invalid\n");
531 scsi_write_complete_noio(r, -EINVAL);
532 return;
535 if (!r->req.sg && !r->qiov.size) {
536 /* Called for the first time. Ask the driver to send us more data. */
537 r->started = true;
538 scsi_write_complete_noio(r, 0);
539 return;
541 if (!blk_is_available(req->dev->conf.blk)) {
542 scsi_write_complete_noio(r, -ENOMEDIUM);
543 return;
546 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
547 r->req.cmd.buf[0] == VERIFY_16) {
548 if (r->req.sg) {
549 scsi_dma_complete_noio(r, 0);
550 } else {
551 scsi_write_complete_noio(r, 0);
553 return;
556 if (r->req.sg) {
557 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
558 r->req.resid -= r->req.sg->size;
559 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
560 r->req.sg, r->sector << BDRV_SECTOR_BITS,
561 BDRV_SECTOR_SIZE,
562 sdc->dma_writev, r, scsi_dma_complete, r,
563 DMA_DIRECTION_TO_DEVICE);
564 } else {
565 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
566 r->qiov.size, BLOCK_ACCT_WRITE);
567 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
568 scsi_write_complete, r, r);
572 /* Return a pointer to the data buffer. */
573 static uint8_t *scsi_get_buf(SCSIRequest *req)
575 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
577 return (uint8_t *)r->iov.iov_base;
580 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
582 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
583 int buflen = 0;
584 int start;
586 if (req->cmd.buf[1] & 0x1) {
587 /* Vital product data */
588 uint8_t page_code = req->cmd.buf[2];
590 outbuf[buflen++] = s->qdev.type & 0x1f;
591 outbuf[buflen++] = page_code ; // this page
592 outbuf[buflen++] = 0x00;
593 outbuf[buflen++] = 0x00;
594 start = buflen;
596 switch (page_code) {
597 case 0x00: /* Supported page codes, mandatory */
599 DPRINTF("Inquiry EVPD[Supported pages] "
600 "buffer size %zd\n", req->cmd.xfer);
601 outbuf[buflen++] = 0x00; // list of supported pages (this page)
602 if (s->serial) {
603 outbuf[buflen++] = 0x80; // unit serial number
605 outbuf[buflen++] = 0x83; // device identification
606 if (s->qdev.type == TYPE_DISK) {
607 outbuf[buflen++] = 0xb0; // block limits
608 outbuf[buflen++] = 0xb2; // thin provisioning
610 break;
612 case 0x80: /* Device serial number, optional */
614 int l;
616 if (!s->serial) {
617 DPRINTF("Inquiry (EVPD[Serial number] not supported\n");
618 return -1;
621 l = strlen(s->serial);
622 if (l > 36) {
623 l = 36;
626 DPRINTF("Inquiry EVPD[Serial number] "
627 "buffer size %zd\n", req->cmd.xfer);
628 memcpy(outbuf+buflen, s->serial, l);
629 buflen += l;
630 break;
633 case 0x83: /* Device identification page, mandatory */
635 const char *str = s->serial ?: blk_name(s->qdev.conf.blk);
636 int max_len = s->serial ? 20 : 255 - 8;
637 int id_len = strlen(str);
639 if (id_len > max_len) {
640 id_len = max_len;
642 DPRINTF("Inquiry EVPD[Device identification] "
643 "buffer size %zd\n", req->cmd.xfer);
645 outbuf[buflen++] = 0x2; // ASCII
646 outbuf[buflen++] = 0; // not officially assigned
647 outbuf[buflen++] = 0; // reserved
648 outbuf[buflen++] = id_len; // length of data following
649 memcpy(outbuf+buflen, str, id_len);
650 buflen += id_len;
652 if (s->qdev.wwn) {
653 outbuf[buflen++] = 0x1; // Binary
654 outbuf[buflen++] = 0x3; // NAA
655 outbuf[buflen++] = 0; // reserved
656 outbuf[buflen++] = 8;
657 stq_be_p(&outbuf[buflen], s->qdev.wwn);
658 buflen += 8;
661 if (s->qdev.port_wwn) {
662 outbuf[buflen++] = 0x61; // SAS / Binary
663 outbuf[buflen++] = 0x93; // PIV / Target port / NAA
664 outbuf[buflen++] = 0; // reserved
665 outbuf[buflen++] = 8;
666 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
667 buflen += 8;
670 if (s->port_index) {
671 outbuf[buflen++] = 0x61; // SAS / Binary
672 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port
673 outbuf[buflen++] = 0; // reserved
674 outbuf[buflen++] = 4;
675 stw_be_p(&outbuf[buflen + 2], s->port_index);
676 buflen += 4;
678 break;
680 case 0xb0: /* block limits */
682 unsigned int unmap_sectors =
683 s->qdev.conf.discard_granularity / s->qdev.blocksize;
684 unsigned int min_io_size =
685 s->qdev.conf.min_io_size / s->qdev.blocksize;
686 unsigned int opt_io_size =
687 s->qdev.conf.opt_io_size / s->qdev.blocksize;
688 unsigned int max_unmap_sectors =
689 s->max_unmap_size / s->qdev.blocksize;
690 unsigned int max_io_sectors =
691 s->max_io_size / s->qdev.blocksize;
693 if (s->qdev.type == TYPE_ROM) {
694 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
695 page_code);
696 return -1;
698 /* required VPD size with unmap support */
699 buflen = 0x40;
700 memset(outbuf + 4, 0, buflen - 4);
702 outbuf[4] = 0x1; /* wsnz */
704 /* optimal transfer length granularity */
705 outbuf[6] = (min_io_size >> 8) & 0xff;
706 outbuf[7] = min_io_size & 0xff;
708 /* maximum transfer length */
709 outbuf[8] = (max_io_sectors >> 24) & 0xff;
710 outbuf[9] = (max_io_sectors >> 16) & 0xff;
711 outbuf[10] = (max_io_sectors >> 8) & 0xff;
712 outbuf[11] = max_io_sectors & 0xff;
714 /* optimal transfer length */
715 outbuf[12] = (opt_io_size >> 24) & 0xff;
716 outbuf[13] = (opt_io_size >> 16) & 0xff;
717 outbuf[14] = (opt_io_size >> 8) & 0xff;
718 outbuf[15] = opt_io_size & 0xff;
720 /* max unmap LBA count, default is 1GB */
721 outbuf[20] = (max_unmap_sectors >> 24) & 0xff;
722 outbuf[21] = (max_unmap_sectors >> 16) & 0xff;
723 outbuf[22] = (max_unmap_sectors >> 8) & 0xff;
724 outbuf[23] = max_unmap_sectors & 0xff;
726 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */
727 outbuf[24] = 0;
728 outbuf[25] = 0;
729 outbuf[26] = 0;
730 outbuf[27] = 255;
732 /* optimal unmap granularity */
733 outbuf[28] = (unmap_sectors >> 24) & 0xff;
734 outbuf[29] = (unmap_sectors >> 16) & 0xff;
735 outbuf[30] = (unmap_sectors >> 8) & 0xff;
736 outbuf[31] = unmap_sectors & 0xff;
738 /* max write same size */
739 outbuf[36] = 0;
740 outbuf[37] = 0;
741 outbuf[38] = 0;
742 outbuf[39] = 0;
744 outbuf[40] = (max_io_sectors >> 24) & 0xff;
745 outbuf[41] = (max_io_sectors >> 16) & 0xff;
746 outbuf[42] = (max_io_sectors >> 8) & 0xff;
747 outbuf[43] = max_io_sectors & 0xff;
748 break;
750 case 0xb2: /* thin provisioning */
752 buflen = 8;
753 outbuf[4] = 0;
754 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
755 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
756 outbuf[7] = 0;
757 break;
759 default:
760 return -1;
762 /* done with EVPD */
763 assert(buflen - start <= 255);
764 outbuf[start - 1] = buflen - start;
765 return buflen;
768 /* Standard INQUIRY data */
769 if (req->cmd.buf[2] != 0) {
770 return -1;
773 /* PAGE CODE == 0 */
774 buflen = req->cmd.xfer;
775 if (buflen > SCSI_MAX_INQUIRY_LEN) {
776 buflen = SCSI_MAX_INQUIRY_LEN;
779 outbuf[0] = s->qdev.type & 0x1f;
780 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
782 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
783 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
785 memset(&outbuf[32], 0, 4);
786 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
788 * We claim conformance to SPC-3, which is required for guests
789 * to ask for modern features like READ CAPACITY(16) or the
790 * block characteristics VPD page by default. Not all of SPC-3
791 * is actually implemented, but we're good enough.
793 outbuf[2] = 5;
794 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
796 if (buflen > 36) {
797 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
798 } else {
799 /* If the allocation length of CDB is too small,
800 the additional length is not adjusted */
801 outbuf[4] = 36 - 5;
804 /* Sync data transfer and TCQ. */
805 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
806 return buflen;
809 static inline bool media_is_dvd(SCSIDiskState *s)
811 uint64_t nb_sectors;
812 if (s->qdev.type != TYPE_ROM) {
813 return false;
815 if (!blk_is_available(s->qdev.conf.blk)) {
816 return false;
818 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
819 return nb_sectors > CD_MAX_SECTORS;
822 static inline bool media_is_cd(SCSIDiskState *s)
824 uint64_t nb_sectors;
825 if (s->qdev.type != TYPE_ROM) {
826 return false;
828 if (!blk_is_available(s->qdev.conf.blk)) {
829 return false;
831 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
832 return nb_sectors <= CD_MAX_SECTORS;
835 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
836 uint8_t *outbuf)
838 uint8_t type = r->req.cmd.buf[1] & 7;
840 if (s->qdev.type != TYPE_ROM) {
841 return -1;
844 /* Types 1/2 are only defined for Blu-Ray. */
845 if (type != 0) {
846 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
847 return -1;
850 memset(outbuf, 0, 34);
851 outbuf[1] = 32;
852 outbuf[2] = 0xe; /* last session complete, disc finalized */
853 outbuf[3] = 1; /* first track on disc */
854 outbuf[4] = 1; /* # of sessions */
855 outbuf[5] = 1; /* first track of last session */
856 outbuf[6] = 1; /* last track of last session */
857 outbuf[7] = 0x20; /* unrestricted use */
858 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
859 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
860 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
861 /* 24-31: disc bar code */
862 /* 32: disc application code */
863 /* 33: number of OPC tables */
865 return 34;
868 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
869 uint8_t *outbuf)
871 static const int rds_caps_size[5] = {
872 [0] = 2048 + 4,
873 [1] = 4 + 4,
874 [3] = 188 + 4,
875 [4] = 2048 + 4,
878 uint8_t media = r->req.cmd.buf[1];
879 uint8_t layer = r->req.cmd.buf[6];
880 uint8_t format = r->req.cmd.buf[7];
881 int size = -1;
883 if (s->qdev.type != TYPE_ROM) {
884 return -1;
886 if (media != 0) {
887 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
888 return -1;
891 if (format != 0xff) {
892 if (!blk_is_available(s->qdev.conf.blk)) {
893 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
894 return -1;
896 if (media_is_cd(s)) {
897 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
898 return -1;
900 if (format >= ARRAY_SIZE(rds_caps_size)) {
901 return -1;
903 size = rds_caps_size[format];
904 memset(outbuf, 0, size);
907 switch (format) {
908 case 0x00: {
909 /* Physical format information */
910 uint64_t nb_sectors;
911 if (layer != 0) {
912 goto fail;
914 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
916 outbuf[4] = 1; /* DVD-ROM, part version 1 */
917 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
918 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
919 outbuf[7] = 0; /* default densities */
921 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
922 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
923 break;
926 case 0x01: /* DVD copyright information, all zeros */
927 break;
929 case 0x03: /* BCA information - invalid field for no BCA info */
930 return -1;
932 case 0x04: /* DVD disc manufacturing information, all zeros */
933 break;
935 case 0xff: { /* List capabilities */
936 int i;
937 size = 4;
938 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
939 if (!rds_caps_size[i]) {
940 continue;
942 outbuf[size] = i;
943 outbuf[size + 1] = 0x40; /* Not writable, readable */
944 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
945 size += 4;
947 break;
950 default:
951 return -1;
954 /* Size of buffer, not including 2 byte size field */
955 stw_be_p(outbuf, size - 2);
956 return size;
958 fail:
959 return -1;
962 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
964 uint8_t event_code, media_status;
966 media_status = 0;
967 if (s->tray_open) {
968 media_status = MS_TRAY_OPEN;
969 } else if (blk_is_inserted(s->qdev.conf.blk)) {
970 media_status = MS_MEDIA_PRESENT;
973 /* Event notification descriptor */
974 event_code = MEC_NO_CHANGE;
975 if (media_status != MS_TRAY_OPEN) {
976 if (s->media_event) {
977 event_code = MEC_NEW_MEDIA;
978 s->media_event = false;
979 } else if (s->eject_request) {
980 event_code = MEC_EJECT_REQUESTED;
981 s->eject_request = false;
985 outbuf[0] = event_code;
986 outbuf[1] = media_status;
988 /* These fields are reserved, just clear them. */
989 outbuf[2] = 0;
990 outbuf[3] = 0;
991 return 4;
994 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
995 uint8_t *outbuf)
997 int size;
998 uint8_t *buf = r->req.cmd.buf;
999 uint8_t notification_class_request = buf[4];
1000 if (s->qdev.type != TYPE_ROM) {
1001 return -1;
1003 if ((buf[1] & 1) == 0) {
1004 /* asynchronous */
1005 return -1;
1008 size = 4;
1009 outbuf[0] = outbuf[1] = 0;
1010 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1011 if (notification_class_request & (1 << GESN_MEDIA)) {
1012 outbuf[2] = GESN_MEDIA;
1013 size += scsi_event_status_media(s, &outbuf[size]);
1014 } else {
1015 outbuf[2] = 0x80;
1017 stw_be_p(outbuf, size - 4);
1018 return size;
1021 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1023 int current;
1025 if (s->qdev.type != TYPE_ROM) {
1026 return -1;
1029 if (media_is_dvd(s)) {
1030 current = MMC_PROFILE_DVD_ROM;
1031 } else if (media_is_cd(s)) {
1032 current = MMC_PROFILE_CD_ROM;
1033 } else {
1034 current = MMC_PROFILE_NONE;
1037 memset(outbuf, 0, 40);
1038 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1039 stw_be_p(&outbuf[6], current);
1040 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1041 outbuf[10] = 0x03; /* persistent, current */
1042 outbuf[11] = 8; /* two profiles */
1043 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1044 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1045 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1046 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1047 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1048 stw_be_p(&outbuf[20], 1);
1049 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1050 outbuf[23] = 8;
1051 stl_be_p(&outbuf[24], 1); /* SCSI */
1052 outbuf[28] = 1; /* DBE = 1, mandatory */
1053 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1054 stw_be_p(&outbuf[32], 3);
1055 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1056 outbuf[35] = 4;
1057 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1058 /* TODO: Random readable, CD read, DVD read, drive serial number,
1059 power management */
1060 return 40;
1063 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1065 if (s->qdev.type != TYPE_ROM) {
1066 return -1;
1068 memset(outbuf, 0, 8);
1069 outbuf[5] = 1; /* CD-ROM */
1070 return 8;
1073 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1074 int page_control)
1076 static const int mode_sense_valid[0x3f] = {
1077 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1078 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1079 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1080 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1081 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1082 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1085 uint8_t *p = *p_outbuf + 2;
1086 int length;
1088 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1089 return -1;
1093 * If Changeable Values are requested, a mask denoting those mode parameters
1094 * that are changeable shall be returned. As we currently don't support
1095 * parameter changes via MODE_SELECT all bits are returned set to zero.
1096 * The buffer was already menset to zero by the caller of this function.
1098 * The offsets here are off by two compared to the descriptions in the
1099 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1100 * but it is done so that offsets are consistent within our implementation
1101 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1102 * 2-byte and 4-byte headers.
1104 switch (page) {
1105 case MODE_PAGE_HD_GEOMETRY:
1106 length = 0x16;
1107 if (page_control == 1) { /* Changeable Values */
1108 break;
1110 /* if a geometry hint is available, use it */
1111 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1112 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1113 p[2] = s->qdev.conf.cyls & 0xff;
1114 p[3] = s->qdev.conf.heads & 0xff;
1115 /* Write precomp start cylinder, disabled */
1116 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1117 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1118 p[6] = s->qdev.conf.cyls & 0xff;
1119 /* Reduced current start cylinder, disabled */
1120 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1121 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1122 p[9] = s->qdev.conf.cyls & 0xff;
1123 /* Device step rate [ns], 200ns */
1124 p[10] = 0;
1125 p[11] = 200;
1126 /* Landing zone cylinder */
1127 p[12] = 0xff;
1128 p[13] = 0xff;
1129 p[14] = 0xff;
1130 /* Medium rotation rate [rpm], 5400 rpm */
1131 p[18] = (5400 >> 8) & 0xff;
1132 p[19] = 5400 & 0xff;
1133 break;
1135 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1136 length = 0x1e;
1137 if (page_control == 1) { /* Changeable Values */
1138 break;
1140 /* Transfer rate [kbit/s], 5Mbit/s */
1141 p[0] = 5000 >> 8;
1142 p[1] = 5000 & 0xff;
1143 /* if a geometry hint is available, use it */
1144 p[2] = s->qdev.conf.heads & 0xff;
1145 p[3] = s->qdev.conf.secs & 0xff;
1146 p[4] = s->qdev.blocksize >> 8;
1147 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1148 p[7] = s->qdev.conf.cyls & 0xff;
1149 /* Write precomp start cylinder, disabled */
1150 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1151 p[9] = s->qdev.conf.cyls & 0xff;
1152 /* Reduced current start cylinder, disabled */
1153 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1154 p[11] = s->qdev.conf.cyls & 0xff;
1155 /* Device step rate [100us], 100us */
1156 p[12] = 0;
1157 p[13] = 1;
1158 /* Device step pulse width [us], 1us */
1159 p[14] = 1;
1160 /* Device head settle delay [100us], 100us */
1161 p[15] = 0;
1162 p[16] = 1;
1163 /* Motor on delay [0.1s], 0.1s */
1164 p[17] = 1;
1165 /* Motor off delay [0.1s], 0.1s */
1166 p[18] = 1;
1167 /* Medium rotation rate [rpm], 5400 rpm */
1168 p[26] = (5400 >> 8) & 0xff;
1169 p[27] = 5400 & 0xff;
1170 break;
1172 case MODE_PAGE_CACHING:
1173 length = 0x12;
1174 if (page_control == 1 || /* Changeable Values */
1175 blk_enable_write_cache(s->qdev.conf.blk)) {
1176 p[0] = 4; /* WCE */
1178 break;
1180 case MODE_PAGE_R_W_ERROR:
1181 length = 10;
1182 if (page_control == 1) { /* Changeable Values */
1183 break;
1185 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1186 if (s->qdev.type == TYPE_ROM) {
1187 p[1] = 0x20; /* Read Retry Count */
1189 break;
1191 case MODE_PAGE_AUDIO_CTL:
1192 length = 14;
1193 break;
1195 case MODE_PAGE_CAPABILITIES:
1196 length = 0x14;
1197 if (page_control == 1) { /* Changeable Values */
1198 break;
1201 p[0] = 0x3b; /* CD-R & CD-RW read */
1202 p[1] = 0; /* Writing not supported */
1203 p[2] = 0x7f; /* Audio, composite, digital out,
1204 mode 2 form 1&2, multi session */
1205 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1206 RW corrected, C2 errors, ISRC,
1207 UPC, Bar code */
1208 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1209 /* Locking supported, jumper present, eject, tray */
1210 p[5] = 0; /* no volume & mute control, no
1211 changer */
1212 p[6] = (50 * 176) >> 8; /* 50x read speed */
1213 p[7] = (50 * 176) & 0xff;
1214 p[8] = 2 >> 8; /* Two volume levels */
1215 p[9] = 2 & 0xff;
1216 p[10] = 2048 >> 8; /* 2M buffer */
1217 p[11] = 2048 & 0xff;
1218 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1219 p[13] = (16 * 176) & 0xff;
1220 p[16] = (16 * 176) >> 8; /* 16x write speed */
1221 p[17] = (16 * 176) & 0xff;
1222 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1223 p[19] = (16 * 176) & 0xff;
1224 break;
1226 default:
1227 return -1;
1230 assert(length < 256);
1231 (*p_outbuf)[0] = page;
1232 (*p_outbuf)[1] = length;
1233 *p_outbuf += length + 2;
1234 return length + 2;
1237 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1239 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1240 uint64_t nb_sectors;
1241 bool dbd;
1242 int page, buflen, ret, page_control;
1243 uint8_t *p;
1244 uint8_t dev_specific_param;
1246 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1247 page = r->req.cmd.buf[2] & 0x3f;
1248 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1249 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
1250 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
1251 memset(outbuf, 0, r->req.cmd.xfer);
1252 p = outbuf;
1254 if (s->qdev.type == TYPE_DISK) {
1255 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1256 if (blk_is_read_only(s->qdev.conf.blk)) {
1257 dev_specific_param |= 0x80; /* Readonly. */
1259 } else {
1260 /* MMC prescribes that CD/DVD drives have no block descriptors,
1261 * and defines no device-specific parameter. */
1262 dev_specific_param = 0x00;
1263 dbd = true;
1266 if (r->req.cmd.buf[0] == MODE_SENSE) {
1267 p[1] = 0; /* Default media type. */
1268 p[2] = dev_specific_param;
1269 p[3] = 0; /* Block descriptor length. */
1270 p += 4;
1271 } else { /* MODE_SENSE_10 */
1272 p[2] = 0; /* Default media type. */
1273 p[3] = dev_specific_param;
1274 p[6] = p[7] = 0; /* Block descriptor length. */
1275 p += 8;
1278 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1279 if (!dbd && nb_sectors) {
1280 if (r->req.cmd.buf[0] == MODE_SENSE) {
1281 outbuf[3] = 8; /* Block descriptor length */
1282 } else { /* MODE_SENSE_10 */
1283 outbuf[7] = 8; /* Block descriptor length */
1285 nb_sectors /= (s->qdev.blocksize / 512);
1286 if (nb_sectors > 0xffffff) {
1287 nb_sectors = 0;
1289 p[0] = 0; /* media density code */
1290 p[1] = (nb_sectors >> 16) & 0xff;
1291 p[2] = (nb_sectors >> 8) & 0xff;
1292 p[3] = nb_sectors & 0xff;
1293 p[4] = 0; /* reserved */
1294 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1295 p[6] = s->qdev.blocksize >> 8;
1296 p[7] = 0;
1297 p += 8;
1300 if (page_control == 3) {
1301 /* Saved Values */
1302 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1303 return -1;
1306 if (page == 0x3f) {
1307 for (page = 0; page <= 0x3e; page++) {
1308 mode_sense_page(s, page, &p, page_control);
1310 } else {
1311 ret = mode_sense_page(s, page, &p, page_control);
1312 if (ret == -1) {
1313 return -1;
1317 buflen = p - outbuf;
1319 * The mode data length field specifies the length in bytes of the
1320 * following data that is available to be transferred. The mode data
1321 * length does not include itself.
1323 if (r->req.cmd.buf[0] == MODE_SENSE) {
1324 outbuf[0] = buflen - 1;
1325 } else { /* MODE_SENSE_10 */
1326 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1327 outbuf[1] = (buflen - 2) & 0xff;
1329 return buflen;
1332 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1334 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1335 int start_track, format, msf, toclen;
1336 uint64_t nb_sectors;
1338 msf = req->cmd.buf[1] & 2;
1339 format = req->cmd.buf[2] & 0xf;
1340 start_track = req->cmd.buf[6];
1341 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1342 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1);
1343 nb_sectors /= s->qdev.blocksize / 512;
1344 switch (format) {
1345 case 0:
1346 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1347 break;
1348 case 1:
1349 /* multi session : only a single session defined */
1350 toclen = 12;
1351 memset(outbuf, 0, 12);
1352 outbuf[1] = 0x0a;
1353 outbuf[2] = 0x01;
1354 outbuf[3] = 0x01;
1355 break;
1356 case 2:
1357 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1358 break;
1359 default:
1360 return -1;
1362 return toclen;
1365 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1367 SCSIRequest *req = &r->req;
1368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1369 bool start = req->cmd.buf[4] & 1;
1370 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1371 int pwrcnd = req->cmd.buf[4] & 0xf0;
1373 if (pwrcnd) {
1374 /* eject/load only happens for power condition == 0 */
1375 return 0;
1378 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1379 if (!start && !s->tray_open && s->tray_locked) {
1380 scsi_check_condition(r,
1381 blk_is_inserted(s->qdev.conf.blk)
1382 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1383 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1384 return -1;
1387 if (s->tray_open != !start) {
1388 blk_eject(s->qdev.conf.blk, !start);
1389 s->tray_open = !start;
1392 return 0;
1395 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1397 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1398 int buflen = r->iov.iov_len;
1400 if (buflen) {
1401 DPRINTF("Read buf_len=%d\n", buflen);
1402 r->iov.iov_len = 0;
1403 r->started = true;
1404 scsi_req_data(&r->req, buflen);
1405 return;
1408 /* This also clears the sense buffer for REQUEST SENSE. */
1409 scsi_req_complete(&r->req, GOOD);
1412 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1413 uint8_t *inbuf, int inlen)
1415 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1416 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1417 uint8_t *p;
1418 int len, expected_len, changeable_len, i;
1420 /* The input buffer does not include the page header, so it is
1421 * off by 2 bytes.
1423 expected_len = inlen + 2;
1424 if (expected_len > SCSI_MAX_MODE_LEN) {
1425 return -1;
1428 p = mode_current;
1429 memset(mode_current, 0, inlen + 2);
1430 len = mode_sense_page(s, page, &p, 0);
1431 if (len < 0 || len != expected_len) {
1432 return -1;
1435 p = mode_changeable;
1436 memset(mode_changeable, 0, inlen + 2);
1437 changeable_len = mode_sense_page(s, page, &p, 1);
1438 assert(changeable_len == len);
1440 /* Check that unchangeable bits are the same as what MODE SENSE
1441 * would return.
1443 for (i = 2; i < len; i++) {
1444 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1445 return -1;
1448 return 0;
1451 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1453 switch (page) {
1454 case MODE_PAGE_CACHING:
1455 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1456 break;
1458 default:
1459 break;
1463 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1465 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1467 while (len > 0) {
1468 int page, subpage, page_len;
1470 /* Parse both possible formats for the mode page headers. */
1471 page = p[0] & 0x3f;
1472 if (p[0] & 0x40) {
1473 if (len < 4) {
1474 goto invalid_param_len;
1476 subpage = p[1];
1477 page_len = lduw_be_p(&p[2]);
1478 p += 4;
1479 len -= 4;
1480 } else {
1481 if (len < 2) {
1482 goto invalid_param_len;
1484 subpage = 0;
1485 page_len = p[1];
1486 p += 2;
1487 len -= 2;
1490 if (subpage) {
1491 goto invalid_param;
1493 if (page_len > len) {
1494 goto invalid_param_len;
1497 if (!change) {
1498 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1499 goto invalid_param;
1501 } else {
1502 scsi_disk_apply_mode_select(s, page, p);
1505 p += page_len;
1506 len -= page_len;
1508 return 0;
1510 invalid_param:
1511 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1512 return -1;
1514 invalid_param_len:
1515 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1516 return -1;
1519 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1521 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1522 uint8_t *p = inbuf;
1523 int cmd = r->req.cmd.buf[0];
1524 int len = r->req.cmd.xfer;
1525 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1526 int bd_len;
1527 int pass;
1529 /* We only support PF=1, SP=0. */
1530 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1531 goto invalid_field;
1534 if (len < hdr_len) {
1535 goto invalid_param_len;
1538 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1539 len -= hdr_len;
1540 p += hdr_len;
1541 if (len < bd_len) {
1542 goto invalid_param_len;
1544 if (bd_len != 0 && bd_len != 8) {
1545 goto invalid_param;
1548 len -= bd_len;
1549 p += bd_len;
1551 /* Ensure no change is made if there is an error! */
1552 for (pass = 0; pass < 2; pass++) {
1553 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1554 assert(pass == 0);
1555 return;
1558 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1559 /* The request is used as the AIO opaque value, so add a ref. */
1560 scsi_req_ref(&r->req);
1561 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1562 BLOCK_ACCT_FLUSH);
1563 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1564 return;
1567 scsi_req_complete(&r->req, GOOD);
1568 return;
1570 invalid_param:
1571 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1572 return;
1574 invalid_param_len:
1575 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1576 return;
1578 invalid_field:
1579 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1582 static inline bool check_lba_range(SCSIDiskState *s,
1583 uint64_t sector_num, uint32_t nb_sectors)
1586 * The first line tests that no overflow happens when computing the last
1587 * sector. The second line tests that the last accessed sector is in
1588 * range.
1590 * Careful, the computations should not underflow for nb_sectors == 0,
1591 * and a 0-block read to the first LBA beyond the end of device is
1592 * valid.
1594 return (sector_num <= sector_num + nb_sectors &&
1595 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1598 typedef struct UnmapCBData {
1599 SCSIDiskReq *r;
1600 uint8_t *inbuf;
1601 int count;
1602 } UnmapCBData;
1604 static void scsi_unmap_complete(void *opaque, int ret);
1606 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1608 SCSIDiskReq *r = data->r;
1609 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1610 uint64_t sector_num;
1611 uint32_t nb_sectors;
1613 assert(r->req.aiocb == NULL);
1614 if (scsi_disk_req_check_error(r, ret, false)) {
1615 goto done;
1618 if (data->count > 0) {
1619 sector_num = ldq_be_p(&data->inbuf[0]);
1620 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1621 if (!check_lba_range(s, sector_num, nb_sectors)) {
1622 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1623 goto done;
1626 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1627 sector_num * s->qdev.blocksize,
1628 nb_sectors * s->qdev.blocksize,
1629 scsi_unmap_complete, data);
1630 data->count--;
1631 data->inbuf += 16;
1632 return;
1635 scsi_req_complete(&r->req, GOOD);
1637 done:
1638 scsi_req_unref(&r->req);
1639 g_free(data);
1642 static void scsi_unmap_complete(void *opaque, int ret)
1644 UnmapCBData *data = opaque;
1645 SCSIDiskReq *r = data->r;
1646 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1648 assert(r->req.aiocb != NULL);
1649 r->req.aiocb = NULL;
1651 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1652 scsi_unmap_complete_noio(data, ret);
1653 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1656 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1658 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1659 uint8_t *p = inbuf;
1660 int len = r->req.cmd.xfer;
1661 UnmapCBData *data;
1663 /* Reject ANCHOR=1. */
1664 if (r->req.cmd.buf[1] & 0x1) {
1665 goto invalid_field;
1668 if (len < 8) {
1669 goto invalid_param_len;
1671 if (len < lduw_be_p(&p[0]) + 2) {
1672 goto invalid_param_len;
1674 if (len < lduw_be_p(&p[2]) + 8) {
1675 goto invalid_param_len;
1677 if (lduw_be_p(&p[2]) & 15) {
1678 goto invalid_param_len;
1681 if (blk_is_read_only(s->qdev.conf.blk)) {
1682 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1683 return;
1686 data = g_new0(UnmapCBData, 1);
1687 data->r = r;
1688 data->inbuf = &p[8];
1689 data->count = lduw_be_p(&p[2]) >> 4;
1691 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1692 scsi_req_ref(&r->req);
1693 scsi_unmap_complete_noio(data, 0);
1694 return;
1696 invalid_param_len:
1697 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1698 return;
1700 invalid_field:
1701 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1704 typedef struct WriteSameCBData {
1705 SCSIDiskReq *r;
1706 int64_t sector;
1707 int nb_sectors;
1708 QEMUIOVector qiov;
1709 struct iovec iov;
1710 } WriteSameCBData;
1712 static void scsi_write_same_complete(void *opaque, int ret)
1714 WriteSameCBData *data = opaque;
1715 SCSIDiskReq *r = data->r;
1716 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1718 assert(r->req.aiocb != NULL);
1719 r->req.aiocb = NULL;
1720 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1721 if (scsi_disk_req_check_error(r, ret, true)) {
1722 goto done;
1725 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1727 data->nb_sectors -= data->iov.iov_len / 512;
1728 data->sector += data->iov.iov_len / 512;
1729 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1730 if (data->iov.iov_len) {
1731 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1732 data->iov.iov_len, BLOCK_ACCT_WRITE);
1733 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1734 * where final qiov may need smaller size */
1735 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1736 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1737 data->sector << BDRV_SECTOR_BITS,
1738 &data->qiov, 0,
1739 scsi_write_same_complete, data);
1740 return;
1743 scsi_req_complete(&r->req, GOOD);
1745 done:
1746 scsi_req_unref(&r->req);
1747 qemu_vfree(data->iov.iov_base);
1748 g_free(data);
1749 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1752 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1754 SCSIRequest *req = &r->req;
1755 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1756 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1757 WriteSameCBData *data;
1758 uint8_t *buf;
1759 int i;
1761 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1762 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1763 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1764 return;
1767 if (blk_is_read_only(s->qdev.conf.blk)) {
1768 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1769 return;
1771 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1772 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1773 return;
1776 if (buffer_is_zero(inbuf, s->qdev.blocksize)) {
1777 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1779 /* The request is used as the AIO opaque value, so add a ref. */
1780 scsi_req_ref(&r->req);
1781 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1782 nb_sectors * s->qdev.blocksize,
1783 BLOCK_ACCT_WRITE);
1784 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1785 r->req.cmd.lba * s->qdev.blocksize,
1786 nb_sectors * s->qdev.blocksize,
1787 flags, scsi_aio_complete, r);
1788 return;
1791 data = g_new0(WriteSameCBData, 1);
1792 data->r = r;
1793 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1794 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1795 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1796 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1797 data->iov.iov_len);
1798 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1800 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1801 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1804 scsi_req_ref(&r->req);
1805 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1806 data->iov.iov_len, BLOCK_ACCT_WRITE);
1807 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1808 data->sector << BDRV_SECTOR_BITS,
1809 &data->qiov, 0,
1810 scsi_write_same_complete, data);
1813 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1815 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1817 if (r->iov.iov_len) {
1818 int buflen = r->iov.iov_len;
1819 DPRINTF("Write buf_len=%d\n", buflen);
1820 r->iov.iov_len = 0;
1821 scsi_req_data(&r->req, buflen);
1822 return;
1825 switch (req->cmd.buf[0]) {
1826 case MODE_SELECT:
1827 case MODE_SELECT_10:
1828 /* This also clears the sense buffer for REQUEST SENSE. */
1829 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1830 break;
1832 case UNMAP:
1833 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1834 break;
1836 case VERIFY_10:
1837 case VERIFY_12:
1838 case VERIFY_16:
1839 if (r->req.status == -1) {
1840 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1842 break;
1844 case WRITE_SAME_10:
1845 case WRITE_SAME_16:
1846 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1847 break;
1849 default:
1850 abort();
1854 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1856 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1857 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1858 uint64_t nb_sectors;
1859 uint8_t *outbuf;
1860 int buflen;
1862 switch (req->cmd.buf[0]) {
1863 case INQUIRY:
1864 case MODE_SENSE:
1865 case MODE_SENSE_10:
1866 case RESERVE:
1867 case RESERVE_10:
1868 case RELEASE:
1869 case RELEASE_10:
1870 case START_STOP:
1871 case ALLOW_MEDIUM_REMOVAL:
1872 case GET_CONFIGURATION:
1873 case GET_EVENT_STATUS_NOTIFICATION:
1874 case MECHANISM_STATUS:
1875 case REQUEST_SENSE:
1876 break;
1878 default:
1879 if (!blk_is_available(s->qdev.conf.blk)) {
1880 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1881 return 0;
1883 break;
1887 * FIXME: we shouldn't return anything bigger than 4k, but the code
1888 * requires the buffer to be as big as req->cmd.xfer in several
1889 * places. So, do not allow CDBs with a very large ALLOCATION
1890 * LENGTH. The real fix would be to modify scsi_read_data and
1891 * dma_buf_read, so that they return data beyond the buflen
1892 * as all zeros.
1894 if (req->cmd.xfer > 65536) {
1895 goto illegal_request;
1897 r->buflen = MAX(4096, req->cmd.xfer);
1899 if (!r->iov.iov_base) {
1900 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1903 buflen = req->cmd.xfer;
1904 outbuf = r->iov.iov_base;
1905 memset(outbuf, 0, r->buflen);
1906 switch (req->cmd.buf[0]) {
1907 case TEST_UNIT_READY:
1908 assert(blk_is_available(s->qdev.conf.blk));
1909 break;
1910 case INQUIRY:
1911 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1912 if (buflen < 0) {
1913 goto illegal_request;
1915 break;
1916 case MODE_SENSE:
1917 case MODE_SENSE_10:
1918 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1919 if (buflen < 0) {
1920 goto illegal_request;
1922 break;
1923 case READ_TOC:
1924 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1925 if (buflen < 0) {
1926 goto illegal_request;
1928 break;
1929 case RESERVE:
1930 if (req->cmd.buf[1] & 1) {
1931 goto illegal_request;
1933 break;
1934 case RESERVE_10:
1935 if (req->cmd.buf[1] & 3) {
1936 goto illegal_request;
1938 break;
1939 case RELEASE:
1940 if (req->cmd.buf[1] & 1) {
1941 goto illegal_request;
1943 break;
1944 case RELEASE_10:
1945 if (req->cmd.buf[1] & 3) {
1946 goto illegal_request;
1948 break;
1949 case START_STOP:
1950 if (scsi_disk_emulate_start_stop(r) < 0) {
1951 return 0;
1953 break;
1954 case ALLOW_MEDIUM_REMOVAL:
1955 s->tray_locked = req->cmd.buf[4] & 1;
1956 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1957 break;
1958 case READ_CAPACITY_10:
1959 /* The normal LEN field for this command is zero. */
1960 memset(outbuf, 0, 8);
1961 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1962 if (!nb_sectors) {
1963 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1964 return 0;
1966 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1967 goto illegal_request;
1969 nb_sectors /= s->qdev.blocksize / 512;
1970 /* Returned value is the address of the last sector. */
1971 nb_sectors--;
1972 /* Remember the new size for read/write sanity checking. */
1973 s->qdev.max_lba = nb_sectors;
1974 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1975 if (nb_sectors > UINT32_MAX) {
1976 nb_sectors = UINT32_MAX;
1978 outbuf[0] = (nb_sectors >> 24) & 0xff;
1979 outbuf[1] = (nb_sectors >> 16) & 0xff;
1980 outbuf[2] = (nb_sectors >> 8) & 0xff;
1981 outbuf[3] = nb_sectors & 0xff;
1982 outbuf[4] = 0;
1983 outbuf[5] = 0;
1984 outbuf[6] = s->qdev.blocksize >> 8;
1985 outbuf[7] = 0;
1986 break;
1987 case REQUEST_SENSE:
1988 /* Just return "NO SENSE". */
1989 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
1990 (req->cmd.buf[1] & 1) == 0);
1991 if (buflen < 0) {
1992 goto illegal_request;
1994 break;
1995 case MECHANISM_STATUS:
1996 buflen = scsi_emulate_mechanism_status(s, outbuf);
1997 if (buflen < 0) {
1998 goto illegal_request;
2000 break;
2001 case GET_CONFIGURATION:
2002 buflen = scsi_get_configuration(s, outbuf);
2003 if (buflen < 0) {
2004 goto illegal_request;
2006 break;
2007 case GET_EVENT_STATUS_NOTIFICATION:
2008 buflen = scsi_get_event_status_notification(s, r, outbuf);
2009 if (buflen < 0) {
2010 goto illegal_request;
2012 break;
2013 case READ_DISC_INFORMATION:
2014 buflen = scsi_read_disc_information(s, r, outbuf);
2015 if (buflen < 0) {
2016 goto illegal_request;
2018 break;
2019 case READ_DVD_STRUCTURE:
2020 buflen = scsi_read_dvd_structure(s, r, outbuf);
2021 if (buflen < 0) {
2022 goto illegal_request;
2024 break;
2025 case SERVICE_ACTION_IN_16:
2026 /* Service Action In subcommands. */
2027 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2028 DPRINTF("SAI READ CAPACITY(16)\n");
2029 memset(outbuf, 0, req->cmd.xfer);
2030 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2031 if (!nb_sectors) {
2032 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2033 return 0;
2035 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2036 goto illegal_request;
2038 nb_sectors /= s->qdev.blocksize / 512;
2039 /* Returned value is the address of the last sector. */
2040 nb_sectors--;
2041 /* Remember the new size for read/write sanity checking. */
2042 s->qdev.max_lba = nb_sectors;
2043 outbuf[0] = (nb_sectors >> 56) & 0xff;
2044 outbuf[1] = (nb_sectors >> 48) & 0xff;
2045 outbuf[2] = (nb_sectors >> 40) & 0xff;
2046 outbuf[3] = (nb_sectors >> 32) & 0xff;
2047 outbuf[4] = (nb_sectors >> 24) & 0xff;
2048 outbuf[5] = (nb_sectors >> 16) & 0xff;
2049 outbuf[6] = (nb_sectors >> 8) & 0xff;
2050 outbuf[7] = nb_sectors & 0xff;
2051 outbuf[8] = 0;
2052 outbuf[9] = 0;
2053 outbuf[10] = s->qdev.blocksize >> 8;
2054 outbuf[11] = 0;
2055 outbuf[12] = 0;
2056 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2058 /* set TPE bit if the format supports discard */
2059 if (s->qdev.conf.discard_granularity) {
2060 outbuf[14] = 0x80;
2063 /* Protection, exponent and lowest lba field left blank. */
2064 break;
2066 DPRINTF("Unsupported Service Action In\n");
2067 goto illegal_request;
2068 case SYNCHRONIZE_CACHE:
2069 /* The request is used as the AIO opaque value, so add a ref. */
2070 scsi_req_ref(&r->req);
2071 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2072 BLOCK_ACCT_FLUSH);
2073 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2074 return 0;
2075 case SEEK_10:
2076 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
2077 if (r->req.cmd.lba > s->qdev.max_lba) {
2078 goto illegal_lba;
2080 break;
2081 case MODE_SELECT:
2082 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2083 break;
2084 case MODE_SELECT_10:
2085 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2086 break;
2087 case UNMAP:
2088 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2089 break;
2090 case VERIFY_10:
2091 case VERIFY_12:
2092 case VERIFY_16:
2093 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
2094 if (req->cmd.buf[1] & 6) {
2095 goto illegal_request;
2097 break;
2098 case WRITE_SAME_10:
2099 case WRITE_SAME_16:
2100 DPRINTF("WRITE SAME %d (len %lu)\n",
2101 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16,
2102 (unsigned long)r->req.cmd.xfer);
2103 break;
2104 default:
2105 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
2106 scsi_command_name(buf[0]));
2107 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2108 return 0;
2110 assert(!r->req.aiocb);
2111 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2112 if (r->iov.iov_len == 0) {
2113 scsi_req_complete(&r->req, GOOD);
2115 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2116 assert(r->iov.iov_len == req->cmd.xfer);
2117 return -r->iov.iov_len;
2118 } else {
2119 return r->iov.iov_len;
2122 illegal_request:
2123 if (r->req.status == -1) {
2124 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2126 return 0;
2128 illegal_lba:
2129 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2130 return 0;
2133 /* Execute a scsi command. Returns the length of the data expected by the
2134 command. This will be Positive for data transfers from the device
2135 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2136 and zero if the command does not transfer any data. */
2138 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2140 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2141 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2142 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2143 uint32_t len;
2144 uint8_t command;
2146 command = buf[0];
2148 if (!blk_is_available(s->qdev.conf.blk)) {
2149 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2150 return 0;
2153 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2154 switch (command) {
2155 case READ_6:
2156 case READ_10:
2157 case READ_12:
2158 case READ_16:
2159 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
2160 if (r->req.cmd.buf[1] & 0xe0) {
2161 goto illegal_request;
2163 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2164 goto illegal_lba;
2166 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2167 r->sector_count = len * (s->qdev.blocksize / 512);
2168 break;
2169 case WRITE_6:
2170 case WRITE_10:
2171 case WRITE_12:
2172 case WRITE_16:
2173 case WRITE_VERIFY_10:
2174 case WRITE_VERIFY_12:
2175 case WRITE_VERIFY_16:
2176 if (blk_is_read_only(s->qdev.conf.blk)) {
2177 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2178 return 0;
2180 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
2181 (command & 0xe) == 0xe ? "And Verify " : "",
2182 r->req.cmd.lba, len);
2183 /* fall through */
2184 case VERIFY_10:
2185 case VERIFY_12:
2186 case VERIFY_16:
2187 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2188 * As far as DMA is concerned, we can treat it the same as a write;
2189 * scsi_block_do_sgio will send VERIFY commands.
2191 if (r->req.cmd.buf[1] & 0xe0) {
2192 goto illegal_request;
2194 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2195 goto illegal_lba;
2197 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2198 r->sector_count = len * (s->qdev.blocksize / 512);
2199 break;
2200 default:
2201 abort();
2202 illegal_request:
2203 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2204 return 0;
2205 illegal_lba:
2206 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2207 return 0;
2209 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2210 if (r->sector_count == 0) {
2211 scsi_req_complete(&r->req, GOOD);
2213 assert(r->iov.iov_len == 0);
2214 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2215 return -r->sector_count * 512;
2216 } else {
2217 return r->sector_count * 512;
2221 static void scsi_disk_reset(DeviceState *dev)
2223 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2224 uint64_t nb_sectors;
2226 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2228 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2229 nb_sectors /= s->qdev.blocksize / 512;
2230 if (nb_sectors) {
2231 nb_sectors--;
2233 s->qdev.max_lba = nb_sectors;
2234 /* reset tray statuses */
2235 s->tray_locked = 0;
2236 s->tray_open = 0;
2239 static void scsi_disk_resize_cb(void *opaque)
2241 SCSIDiskState *s = opaque;
2243 /* SPC lists this sense code as available only for
2244 * direct-access devices.
2246 if (s->qdev.type == TYPE_DISK) {
2247 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2251 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2253 SCSIDiskState *s = opaque;
2256 * When a CD gets changed, we have to report an ejected state and
2257 * then a loaded state to guests so that they detect tray
2258 * open/close and media change events. Guests that do not use
2259 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2260 * states rely on this behavior.
2262 * media_changed governs the state machine used for unit attention
2263 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2265 s->media_changed = load;
2266 s->tray_open = !load;
2267 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2268 s->media_event = true;
2269 s->eject_request = false;
2272 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2274 SCSIDiskState *s = opaque;
2276 s->eject_request = true;
2277 if (force) {
2278 s->tray_locked = false;
2282 static bool scsi_cd_is_tray_open(void *opaque)
2284 return ((SCSIDiskState *)opaque)->tray_open;
2287 static bool scsi_cd_is_medium_locked(void *opaque)
2289 return ((SCSIDiskState *)opaque)->tray_locked;
2292 static const BlockDevOps scsi_disk_removable_block_ops = {
2293 .change_media_cb = scsi_cd_change_media_cb,
2294 .eject_request_cb = scsi_cd_eject_request_cb,
2295 .is_tray_open = scsi_cd_is_tray_open,
2296 .is_medium_locked = scsi_cd_is_medium_locked,
2298 .resize_cb = scsi_disk_resize_cb,
2301 static const BlockDevOps scsi_disk_block_ops = {
2302 .resize_cb = scsi_disk_resize_cb,
2305 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2307 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2308 if (s->media_changed) {
2309 s->media_changed = false;
2310 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2314 static void scsi_realize(SCSIDevice *dev, Error **errp)
2316 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2317 Error *err = NULL;
2319 if (!s->qdev.conf.blk) {
2320 error_setg(errp, "drive property not set");
2321 return;
2324 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2325 !blk_is_inserted(s->qdev.conf.blk)) {
2326 error_setg(errp, "Device needs media, but drive is empty");
2327 return;
2330 blkconf_serial(&s->qdev.conf, &s->serial);
2331 blkconf_blocksizes(&s->qdev.conf);
2332 if (dev->type == TYPE_DISK) {
2333 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
2334 if (err) {
2335 error_propagate(errp, err);
2336 return;
2339 blkconf_apply_backend_options(&dev->conf,
2340 blk_is_read_only(s->qdev.conf.blk),
2341 dev->type == TYPE_DISK, &err);
2342 if (err) {
2343 error_propagate(errp, err);
2344 return;
2347 if (s->qdev.conf.discard_granularity == -1) {
2348 s->qdev.conf.discard_granularity =
2349 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2352 if (!s->version) {
2353 s->version = g_strdup(qemu_hw_version());
2355 if (!s->vendor) {
2356 s->vendor = g_strdup("QEMU");
2359 if (blk_is_sg(s->qdev.conf.blk)) {
2360 error_setg(errp, "unwanted /dev/sg*");
2361 return;
2364 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2365 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2366 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2367 } else {
2368 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2370 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2372 blk_iostatus_enable(s->qdev.conf.blk);
2375 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2377 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2378 /* can happen for devices without drive. The error message for missing
2379 * backend will be issued in scsi_realize
2381 if (s->qdev.conf.blk) {
2382 blkconf_blocksizes(&s->qdev.conf);
2384 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2385 s->qdev.type = TYPE_DISK;
2386 if (!s->product) {
2387 s->product = g_strdup("QEMU HARDDISK");
2389 scsi_realize(&s->qdev, errp);
2392 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2394 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2395 int ret;
2397 if (!dev->conf.blk) {
2398 /* Anonymous BlockBackend for an empty drive. As we put it into
2399 * dev->conf, qdev takes care of detaching on unplug. */
2400 dev->conf.blk = blk_new(0, BLK_PERM_ALL);
2401 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2402 assert(ret == 0);
2405 s->qdev.blocksize = 2048;
2406 s->qdev.type = TYPE_ROM;
2407 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2408 if (!s->product) {
2409 s->product = g_strdup("QEMU CD-ROM");
2411 scsi_realize(&s->qdev, errp);
2414 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2416 DriveInfo *dinfo;
2417 Error *local_err = NULL;
2419 if (!dev->conf.blk) {
2420 scsi_realize(dev, &local_err);
2421 assert(local_err);
2422 error_propagate(errp, local_err);
2423 return;
2426 dinfo = blk_legacy_dinfo(dev->conf.blk);
2427 if (dinfo && dinfo->media_cd) {
2428 scsi_cd_realize(dev, errp);
2429 } else {
2430 scsi_hd_realize(dev, errp);
2434 static const SCSIReqOps scsi_disk_emulate_reqops = {
2435 .size = sizeof(SCSIDiskReq),
2436 .free_req = scsi_free_request,
2437 .send_command = scsi_disk_emulate_command,
2438 .read_data = scsi_disk_emulate_read_data,
2439 .write_data = scsi_disk_emulate_write_data,
2440 .get_buf = scsi_get_buf,
2443 static const SCSIReqOps scsi_disk_dma_reqops = {
2444 .size = sizeof(SCSIDiskReq),
2445 .free_req = scsi_free_request,
2446 .send_command = scsi_disk_dma_command,
2447 .read_data = scsi_read_data,
2448 .write_data = scsi_write_data,
2449 .get_buf = scsi_get_buf,
2450 .load_request = scsi_disk_load_request,
2451 .save_request = scsi_disk_save_request,
2454 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2455 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2456 [INQUIRY] = &scsi_disk_emulate_reqops,
2457 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2458 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2459 [START_STOP] = &scsi_disk_emulate_reqops,
2460 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2461 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2462 [READ_TOC] = &scsi_disk_emulate_reqops,
2463 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2464 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2465 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2466 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2467 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2468 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2469 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2470 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2471 [SEEK_10] = &scsi_disk_emulate_reqops,
2472 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2473 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2474 [UNMAP] = &scsi_disk_emulate_reqops,
2475 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2476 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2477 [VERIFY_10] = &scsi_disk_emulate_reqops,
2478 [VERIFY_12] = &scsi_disk_emulate_reqops,
2479 [VERIFY_16] = &scsi_disk_emulate_reqops,
2481 [READ_6] = &scsi_disk_dma_reqops,
2482 [READ_10] = &scsi_disk_dma_reqops,
2483 [READ_12] = &scsi_disk_dma_reqops,
2484 [READ_16] = &scsi_disk_dma_reqops,
2485 [WRITE_6] = &scsi_disk_dma_reqops,
2486 [WRITE_10] = &scsi_disk_dma_reqops,
2487 [WRITE_12] = &scsi_disk_dma_reqops,
2488 [WRITE_16] = &scsi_disk_dma_reqops,
2489 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2490 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2491 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2494 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2495 uint8_t *buf, void *hba_private)
2497 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2498 SCSIRequest *req;
2499 const SCSIReqOps *ops;
2500 uint8_t command;
2502 command = buf[0];
2503 ops = scsi_disk_reqops_dispatch[command];
2504 if (!ops) {
2505 ops = &scsi_disk_emulate_reqops;
2507 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2509 #ifdef DEBUG_SCSI
2510 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
2512 int i;
2513 for (i = 1; i < scsi_cdb_length(buf); i++) {
2514 printf(" 0x%02x", buf[i]);
2516 printf("\n");
2518 #endif
2520 return req;
2523 #ifdef __linux__
2524 static int get_device_type(SCSIDiskState *s)
2526 uint8_t cmd[16];
2527 uint8_t buf[36];
2528 uint8_t sensebuf[8];
2529 sg_io_hdr_t io_header;
2530 int ret;
2532 memset(cmd, 0, sizeof(cmd));
2533 memset(buf, 0, sizeof(buf));
2534 cmd[0] = INQUIRY;
2535 cmd[4] = sizeof(buf);
2537 memset(&io_header, 0, sizeof(io_header));
2538 io_header.interface_id = 'S';
2539 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
2540 io_header.dxfer_len = sizeof(buf);
2541 io_header.dxferp = buf;
2542 io_header.cmdp = cmd;
2543 io_header.cmd_len = sizeof(cmd);
2544 io_header.mx_sb_len = sizeof(sensebuf);
2545 io_header.sbp = sensebuf;
2546 io_header.timeout = 6000; /* XXX */
2548 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header);
2549 if (ret < 0 || io_header.driver_status || io_header.host_status) {
2550 return -1;
2552 s->qdev.type = buf[0];
2553 if (buf[1] & 0x80) {
2554 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2556 return 0;
2559 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2561 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2562 int sg_version;
2563 int rc;
2565 if (!s->qdev.conf.blk) {
2566 error_setg(errp, "drive property not set");
2567 return;
2570 /* check we are using a driver managing SG_IO (version 3 and after) */
2571 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2572 if (rc < 0) {
2573 error_setg(errp, "cannot get SG_IO version number: %s. "
2574 "Is this a SCSI device?",
2575 strerror(-rc));
2576 return;
2578 if (sg_version < 30000) {
2579 error_setg(errp, "scsi generic interface too old");
2580 return;
2583 /* get device type from INQUIRY data */
2584 rc = get_device_type(s);
2585 if (rc < 0) {
2586 error_setg(errp, "INQUIRY failed");
2587 return;
2590 /* Make a guess for the block size, we'll fix it when the guest sends.
2591 * READ CAPACITY. If they don't, they likely would assume these sizes
2592 * anyway. (TODO: check in /sys).
2594 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2595 s->qdev.blocksize = 2048;
2596 } else {
2597 s->qdev.blocksize = 512;
2600 /* Makes the scsi-block device not removable by using HMP and QMP eject
2601 * command.
2603 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2605 scsi_realize(&s->qdev, errp);
2606 scsi_generic_read_device_identification(&s->qdev);
2609 typedef struct SCSIBlockReq {
2610 SCSIDiskReq req;
2611 sg_io_hdr_t io_header;
2613 /* Selected bytes of the original CDB, copied into our own CDB. */
2614 uint8_t cmd, cdb1, group_number;
2616 /* CDB passed to SG_IO. */
2617 uint8_t cdb[16];
2618 } SCSIBlockReq;
2620 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2621 int64_t offset, QEMUIOVector *iov,
2622 int direction,
2623 BlockCompletionFunc *cb, void *opaque)
2625 sg_io_hdr_t *io_header = &req->io_header;
2626 SCSIDiskReq *r = &req->req;
2627 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2628 int nb_logical_blocks;
2629 uint64_t lba;
2630 BlockAIOCB *aiocb;
2632 /* This is not supported yet. It can only happen if the guest does
2633 * reads and writes that are not aligned to one logical sectors
2634 * _and_ cover multiple MemoryRegions.
2636 assert(offset % s->qdev.blocksize == 0);
2637 assert(iov->size % s->qdev.blocksize == 0);
2639 io_header->interface_id = 'S';
2641 /* The data transfer comes from the QEMUIOVector. */
2642 io_header->dxfer_direction = direction;
2643 io_header->dxfer_len = iov->size;
2644 io_header->dxferp = (void *)iov->iov;
2645 io_header->iovec_count = iov->niov;
2646 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2648 /* Build a new CDB with the LBA and length patched in, in case
2649 * DMA helpers split the transfer in multiple segments. Do not
2650 * build a CDB smaller than what the guest wanted, and only build
2651 * a larger one if strictly necessary.
2653 io_header->cmdp = req->cdb;
2654 lba = offset / s->qdev.blocksize;
2655 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2657 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2658 /* 6-byte CDB */
2659 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2660 req->cdb[4] = nb_logical_blocks;
2661 req->cdb[5] = 0;
2662 io_header->cmd_len = 6;
2663 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2664 /* 10-byte CDB */
2665 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2666 req->cdb[1] = req->cdb1;
2667 stl_be_p(&req->cdb[2], lba);
2668 req->cdb[6] = req->group_number;
2669 stw_be_p(&req->cdb[7], nb_logical_blocks);
2670 req->cdb[9] = 0;
2671 io_header->cmd_len = 10;
2672 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2673 /* 12-byte CDB */
2674 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2675 req->cdb[1] = req->cdb1;
2676 stl_be_p(&req->cdb[2], lba);
2677 stl_be_p(&req->cdb[6], nb_logical_blocks);
2678 req->cdb[10] = req->group_number;
2679 req->cdb[11] = 0;
2680 io_header->cmd_len = 12;
2681 } else {
2682 /* 16-byte CDB */
2683 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2684 req->cdb[1] = req->cdb1;
2685 stq_be_p(&req->cdb[2], lba);
2686 stl_be_p(&req->cdb[10], nb_logical_blocks);
2687 req->cdb[14] = req->group_number;
2688 req->cdb[15] = 0;
2689 io_header->cmd_len = 16;
2692 /* The rest is as in scsi-generic.c. */
2693 io_header->mx_sb_len = sizeof(r->req.sense);
2694 io_header->sbp = r->req.sense;
2695 io_header->timeout = UINT_MAX;
2696 io_header->usr_ptr = r;
2697 io_header->flags |= SG_FLAG_DIRECT_IO;
2699 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2700 assert(aiocb != NULL);
2701 return aiocb;
2704 static bool scsi_block_no_fua(SCSICommand *cmd)
2706 return false;
2709 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2710 QEMUIOVector *iov,
2711 BlockCompletionFunc *cb, void *cb_opaque,
2712 void *opaque)
2714 SCSIBlockReq *r = opaque;
2715 return scsi_block_do_sgio(r, offset, iov,
2716 SG_DXFER_FROM_DEV, cb, cb_opaque);
2719 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2720 QEMUIOVector *iov,
2721 BlockCompletionFunc *cb, void *cb_opaque,
2722 void *opaque)
2724 SCSIBlockReq *r = opaque;
2725 return scsi_block_do_sgio(r, offset, iov,
2726 SG_DXFER_TO_DEV, cb, cb_opaque);
2729 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2731 switch (buf[0]) {
2732 case VERIFY_10:
2733 case VERIFY_12:
2734 case VERIFY_16:
2735 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2736 * for the number of logical blocks specified in the length
2737 * field). For other modes, do not use scatter/gather operation.
2739 if ((buf[1] & 6) == 2) {
2740 return false;
2742 break;
2744 case READ_6:
2745 case READ_10:
2746 case READ_12:
2747 case READ_16:
2748 case WRITE_6:
2749 case WRITE_10:
2750 case WRITE_12:
2751 case WRITE_16:
2752 case WRITE_VERIFY_10:
2753 case WRITE_VERIFY_12:
2754 case WRITE_VERIFY_16:
2755 /* MMC writing cannot be done via DMA helpers, because it sometimes
2756 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2757 * We might use scsi_block_dma_reqops as long as no writing commands are
2758 * seen, but performance usually isn't paramount on optical media. So,
2759 * just make scsi-block operate the same as scsi-generic for them.
2761 if (s->qdev.type != TYPE_ROM) {
2762 return false;
2764 break;
2766 default:
2767 break;
2770 return true;
2774 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2776 SCSIBlockReq *r = (SCSIBlockReq *)req;
2777 r->cmd = req->cmd.buf[0];
2778 switch (r->cmd >> 5) {
2779 case 0:
2780 /* 6-byte CDB. */
2781 r->cdb1 = r->group_number = 0;
2782 break;
2783 case 1:
2784 /* 10-byte CDB. */
2785 r->cdb1 = req->cmd.buf[1];
2786 r->group_number = req->cmd.buf[6];
2787 break;
2788 case 4:
2789 /* 12-byte CDB. */
2790 r->cdb1 = req->cmd.buf[1];
2791 r->group_number = req->cmd.buf[10];
2792 break;
2793 case 5:
2794 /* 16-byte CDB. */
2795 r->cdb1 = req->cmd.buf[1];
2796 r->group_number = req->cmd.buf[14];
2797 break;
2798 default:
2799 abort();
2802 if (r->cdb1 & 0xe0) {
2803 /* Protection information is not supported. */
2804 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2805 return 0;
2808 r->req.status = &r->io_header.status;
2809 return scsi_disk_dma_command(req, buf);
2812 static const SCSIReqOps scsi_block_dma_reqops = {
2813 .size = sizeof(SCSIBlockReq),
2814 .free_req = scsi_free_request,
2815 .send_command = scsi_block_dma_command,
2816 .read_data = scsi_read_data,
2817 .write_data = scsi_write_data,
2818 .get_buf = scsi_get_buf,
2819 .load_request = scsi_disk_load_request,
2820 .save_request = scsi_disk_save_request,
2823 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2824 uint32_t lun, uint8_t *buf,
2825 void *hba_private)
2827 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2829 if (scsi_block_is_passthrough(s, buf)) {
2830 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2831 hba_private);
2832 } else {
2833 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2834 hba_private);
2838 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2839 uint8_t *buf, void *hba_private)
2841 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2843 if (scsi_block_is_passthrough(s, buf)) {
2844 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2845 } else {
2846 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2850 #endif
2852 static
2853 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2854 BlockCompletionFunc *cb, void *cb_opaque,
2855 void *opaque)
2857 SCSIDiskReq *r = opaque;
2858 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2859 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2862 static
2863 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2864 BlockCompletionFunc *cb, void *cb_opaque,
2865 void *opaque)
2867 SCSIDiskReq *r = opaque;
2868 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2869 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2872 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2874 DeviceClass *dc = DEVICE_CLASS(klass);
2875 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2877 dc->fw_name = "disk";
2878 dc->reset = scsi_disk_reset;
2879 sdc->dma_readv = scsi_dma_readv;
2880 sdc->dma_writev = scsi_dma_writev;
2881 sdc->need_fua_emulation = scsi_is_cmd_fua;
2884 static const TypeInfo scsi_disk_base_info = {
2885 .name = TYPE_SCSI_DISK_BASE,
2886 .parent = TYPE_SCSI_DEVICE,
2887 .class_init = scsi_disk_base_class_initfn,
2888 .instance_size = sizeof(SCSIDiskState),
2889 .class_size = sizeof(SCSIDiskClass),
2890 .abstract = true,
2893 #define DEFINE_SCSI_DISK_PROPERTIES() \
2894 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
2895 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2896 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2897 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2898 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2899 DEFINE_PROP_STRING("product", SCSIDiskState, product)
2901 static Property scsi_hd_properties[] = {
2902 DEFINE_SCSI_DISK_PROPERTIES(),
2903 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2904 SCSI_DISK_F_REMOVABLE, false),
2905 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2906 SCSI_DISK_F_DPOFUA, false),
2907 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2908 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2909 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2910 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2911 DEFAULT_MAX_UNMAP_SIZE),
2912 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2913 DEFAULT_MAX_IO_SIZE),
2914 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2915 DEFINE_PROP_END_OF_LIST(),
2918 static const VMStateDescription vmstate_scsi_disk_state = {
2919 .name = "scsi-disk",
2920 .version_id = 1,
2921 .minimum_version_id = 1,
2922 .fields = (VMStateField[]) {
2923 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2924 VMSTATE_BOOL(media_changed, SCSIDiskState),
2925 VMSTATE_BOOL(media_event, SCSIDiskState),
2926 VMSTATE_BOOL(eject_request, SCSIDiskState),
2927 VMSTATE_BOOL(tray_open, SCSIDiskState),
2928 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2929 VMSTATE_END_OF_LIST()
2933 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2935 DeviceClass *dc = DEVICE_CLASS(klass);
2936 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2938 sc->realize = scsi_hd_realize;
2939 sc->alloc_req = scsi_new_request;
2940 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2941 dc->desc = "virtual SCSI disk";
2942 dc->props = scsi_hd_properties;
2943 dc->vmsd = &vmstate_scsi_disk_state;
2946 static const TypeInfo scsi_hd_info = {
2947 .name = "scsi-hd",
2948 .parent = TYPE_SCSI_DISK_BASE,
2949 .class_init = scsi_hd_class_initfn,
2952 static Property scsi_cd_properties[] = {
2953 DEFINE_SCSI_DISK_PROPERTIES(),
2954 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2955 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2956 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2957 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2958 DEFAULT_MAX_IO_SIZE),
2959 DEFINE_PROP_END_OF_LIST(),
2962 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
2964 DeviceClass *dc = DEVICE_CLASS(klass);
2965 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2967 sc->realize = scsi_cd_realize;
2968 sc->alloc_req = scsi_new_request;
2969 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2970 dc->desc = "virtual SCSI CD-ROM";
2971 dc->props = scsi_cd_properties;
2972 dc->vmsd = &vmstate_scsi_disk_state;
2975 static const TypeInfo scsi_cd_info = {
2976 .name = "scsi-cd",
2977 .parent = TYPE_SCSI_DISK_BASE,
2978 .class_init = scsi_cd_class_initfn,
2981 #ifdef __linux__
2982 static Property scsi_block_properties[] = {
2983 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2984 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
2985 DEFINE_PROP_END_OF_LIST(),
2988 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
2990 DeviceClass *dc = DEVICE_CLASS(klass);
2991 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2992 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2994 sc->realize = scsi_block_realize;
2995 sc->alloc_req = scsi_block_new_request;
2996 sc->parse_cdb = scsi_block_parse_cdb;
2997 sdc->dma_readv = scsi_block_dma_readv;
2998 sdc->dma_writev = scsi_block_dma_writev;
2999 sdc->need_fua_emulation = scsi_block_no_fua;
3000 dc->desc = "SCSI block device passthrough";
3001 dc->props = scsi_block_properties;
3002 dc->vmsd = &vmstate_scsi_disk_state;
3005 static const TypeInfo scsi_block_info = {
3006 .name = "scsi-block",
3007 .parent = TYPE_SCSI_DISK_BASE,
3008 .class_init = scsi_block_class_initfn,
3010 #endif
3012 static Property scsi_disk_properties[] = {
3013 DEFINE_SCSI_DISK_PROPERTIES(),
3014 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3015 SCSI_DISK_F_REMOVABLE, false),
3016 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3017 SCSI_DISK_F_DPOFUA, false),
3018 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3019 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3020 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3021 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3022 DEFAULT_MAX_UNMAP_SIZE),
3023 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3024 DEFAULT_MAX_IO_SIZE),
3025 DEFINE_PROP_END_OF_LIST(),
3028 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3030 DeviceClass *dc = DEVICE_CLASS(klass);
3031 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3033 sc->realize = scsi_disk_realize;
3034 sc->alloc_req = scsi_new_request;
3035 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3036 dc->fw_name = "disk";
3037 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3038 dc->reset = scsi_disk_reset;
3039 dc->props = scsi_disk_properties;
3040 dc->vmsd = &vmstate_scsi_disk_state;
3043 static const TypeInfo scsi_disk_info = {
3044 .name = "scsi-disk",
3045 .parent = TYPE_SCSI_DISK_BASE,
3046 .class_init = scsi_disk_class_initfn,
3049 static void scsi_disk_register_types(void)
3051 type_register_static(&scsi_disk_base_info);
3052 type_register_static(&scsi_hd_info);
3053 type_register_static(&scsi_cd_info);
3054 #ifdef __linux__
3055 type_register_static(&scsi_block_info);
3056 #endif
3057 type_register_static(&scsi_disk_info);
3060 type_init(scsi_disk_register_types)