s390x/tcg: Implement VECTOR STORE MULTIPLE
[qemu/ar7.git] / hw / scsi / scsi-disk.c
blobd4e83aef0e1df6ed256861ae91524b159d78528f
1 /*
2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "hw/scsi/scsi.h"
27 #include "hw/scsi/emulation.h"
28 #include "scsi/constants.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/block-backend.h"
31 #include "sysemu/blockdev.h"
32 #include "hw/block/block.h"
33 #include "sysemu/dma.h"
34 #include "qemu/cutils.h"
35 #include "trace.h"
37 #ifdef __linux
38 #include <scsi/sg.h>
39 #endif
41 #define SCSI_WRITE_SAME_MAX (512 * KiB)
42 #define SCSI_DMA_BUF_SIZE (128 * KiB)
43 #define SCSI_MAX_INQUIRY_LEN 256
44 #define SCSI_MAX_MODE_LEN 256
46 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
47 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
48 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
50 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
52 #define SCSI_DISK_BASE(obj) \
53 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
54 #define SCSI_DISK_BASE_CLASS(klass) \
55 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
56 #define SCSI_DISK_BASE_GET_CLASS(obj) \
57 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
59 typedef struct SCSIDiskClass {
60 SCSIDeviceClass parent_class;
61 DMAIOFunc *dma_readv;
62 DMAIOFunc *dma_writev;
63 bool (*need_fua_emulation)(SCSICommand *cmd);
64 } SCSIDiskClass;
66 typedef struct SCSIDiskReq {
67 SCSIRequest req;
68 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
69 uint64_t sector;
70 uint32_t sector_count;
71 uint32_t buflen;
72 bool started;
73 bool need_fua_emulation;
74 struct iovec iov;
75 QEMUIOVector qiov;
76 BlockAcctCookie acct;
77 unsigned char *status;
78 } SCSIDiskReq;
80 #define SCSI_DISK_F_REMOVABLE 0
81 #define SCSI_DISK_F_DPOFUA 1
82 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
84 typedef struct SCSIDiskState
86 SCSIDevice qdev;
87 uint32_t features;
88 bool media_changed;
89 bool media_event;
90 bool eject_request;
91 uint16_t port_index;
92 uint64_t max_unmap_size;
93 uint64_t max_io_size;
94 QEMUBH *bh;
95 char *version;
96 char *serial;
97 char *vendor;
98 char *product;
99 char *device_id;
100 bool tray_open;
101 bool tray_locked;
103 * 0x0000 - rotation rate not reported
104 * 0x0001 - non-rotating medium (SSD)
105 * 0x0002-0x0400 - reserved
106 * 0x0401-0xffe - rotations per minute
107 * 0xffff - reserved
109 uint16_t rotation_rate;
110 } SCSIDiskState;
112 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
114 static void scsi_free_request(SCSIRequest *req)
116 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
118 qemu_vfree(r->iov.iov_base);
121 /* Helper function for command completion with sense. */
122 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
124 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
125 sense.ascq);
126 scsi_req_build_sense(&r->req, sense);
127 scsi_req_complete(&r->req, CHECK_CONDITION);
130 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
132 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
134 if (!r->iov.iov_base) {
135 r->buflen = size;
136 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
138 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
139 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
142 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
144 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
146 qemu_put_be64s(f, &r->sector);
147 qemu_put_be32s(f, &r->sector_count);
148 qemu_put_be32s(f, &r->buflen);
149 if (r->buflen) {
150 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
151 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
152 } else if (!req->retry) {
153 uint32_t len = r->iov.iov_len;
154 qemu_put_be32s(f, &len);
155 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
160 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
162 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
164 qemu_get_be64s(f, &r->sector);
165 qemu_get_be32s(f, &r->sector_count);
166 qemu_get_be32s(f, &r->buflen);
167 if (r->buflen) {
168 scsi_init_iovec(r, r->buflen);
169 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
170 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
171 } else if (!r->req.retry) {
172 uint32_t len;
173 qemu_get_be32s(f, &len);
174 r->iov.iov_len = len;
175 assert(r->iov.iov_len <= r->buflen);
176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
180 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
183 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
185 if (r->req.io_canceled) {
186 scsi_req_cancel_complete(&r->req);
187 return true;
190 if (ret < 0 || (r->status && *r->status)) {
191 return scsi_handle_rw_error(r, -ret, acct_failed);
194 return false;
197 static void scsi_aio_complete(void *opaque, int ret)
199 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
200 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
202 assert(r->req.aiocb != NULL);
203 r->req.aiocb = NULL;
204 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
205 if (scsi_disk_req_check_error(r, ret, true)) {
206 goto done;
209 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
210 scsi_req_complete(&r->req, GOOD);
212 done:
213 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
214 scsi_req_unref(&r->req);
217 static bool scsi_is_cmd_fua(SCSICommand *cmd)
219 switch (cmd->buf[0]) {
220 case READ_10:
221 case READ_12:
222 case READ_16:
223 case WRITE_10:
224 case WRITE_12:
225 case WRITE_16:
226 return (cmd->buf[1] & 8) != 0;
228 case VERIFY_10:
229 case VERIFY_12:
230 case VERIFY_16:
231 case WRITE_VERIFY_10:
232 case WRITE_VERIFY_12:
233 case WRITE_VERIFY_16:
234 return true;
236 case READ_6:
237 case WRITE_6:
238 default:
239 return false;
243 static void scsi_write_do_fua(SCSIDiskReq *r)
245 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
247 assert(r->req.aiocb == NULL);
248 assert(!r->req.io_canceled);
250 if (r->need_fua_emulation) {
251 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
252 BLOCK_ACCT_FLUSH);
253 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
254 return;
257 scsi_req_complete(&r->req, GOOD);
258 scsi_req_unref(&r->req);
261 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
263 assert(r->req.aiocb == NULL);
264 if (scsi_disk_req_check_error(r, ret, false)) {
265 goto done;
268 r->sector += r->sector_count;
269 r->sector_count = 0;
270 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
271 scsi_write_do_fua(r);
272 return;
273 } else {
274 scsi_req_complete(&r->req, GOOD);
277 done:
278 scsi_req_unref(&r->req);
281 static void scsi_dma_complete(void *opaque, int ret)
283 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
284 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
286 assert(r->req.aiocb != NULL);
287 r->req.aiocb = NULL;
289 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
290 if (ret < 0) {
291 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
292 } else {
293 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
295 scsi_dma_complete_noio(r, ret);
296 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
299 static void scsi_read_complete(void * opaque, int ret)
301 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
302 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
303 int n;
305 assert(r->req.aiocb != NULL);
306 r->req.aiocb = NULL;
307 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
308 if (scsi_disk_req_check_error(r, ret, true)) {
309 goto done;
312 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
313 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
315 n = r->qiov.size / 512;
316 r->sector += n;
317 r->sector_count -= n;
318 scsi_req_data(&r->req, r->qiov.size);
320 done:
321 scsi_req_unref(&r->req);
322 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
325 /* Actually issue a read to the block device. */
326 static void scsi_do_read(SCSIDiskReq *r, int ret)
328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
329 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
331 assert (r->req.aiocb == NULL);
332 if (scsi_disk_req_check_error(r, ret, false)) {
333 goto done;
336 /* The request is used as the AIO opaque value, so add a ref. */
337 scsi_req_ref(&r->req);
339 if (r->req.sg) {
340 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
341 r->req.resid -= r->req.sg->size;
342 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
343 r->req.sg, r->sector << BDRV_SECTOR_BITS,
344 BDRV_SECTOR_SIZE,
345 sdc->dma_readv, r, scsi_dma_complete, r,
346 DMA_DIRECTION_FROM_DEVICE);
347 } else {
348 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
349 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
350 r->qiov.size, BLOCK_ACCT_READ);
351 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
352 scsi_read_complete, r, r);
355 done:
356 scsi_req_unref(&r->req);
359 static void scsi_do_read_cb(void *opaque, int ret)
361 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
362 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
364 assert (r->req.aiocb != NULL);
365 r->req.aiocb = NULL;
367 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
368 if (ret < 0) {
369 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
370 } else {
371 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
373 scsi_do_read(opaque, ret);
374 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
377 /* Read more data from scsi device into buffer. */
378 static void scsi_read_data(SCSIRequest *req)
380 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
381 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
382 bool first;
384 trace_scsi_disk_read_data_count(r->sector_count);
385 if (r->sector_count == 0) {
386 /* This also clears the sense buffer for REQUEST SENSE. */
387 scsi_req_complete(&r->req, GOOD);
388 return;
391 /* No data transfer may already be in progress */
392 assert(r->req.aiocb == NULL);
394 /* The request is used as the AIO opaque value, so add a ref. */
395 scsi_req_ref(&r->req);
396 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
397 trace_scsi_disk_read_data_invalid();
398 scsi_read_complete(r, -EINVAL);
399 return;
402 if (!blk_is_available(req->dev->conf.blk)) {
403 scsi_read_complete(r, -ENOMEDIUM);
404 return;
407 first = !r->started;
408 r->started = true;
409 if (first && r->need_fua_emulation) {
410 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
411 BLOCK_ACCT_FLUSH);
412 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
413 } else {
414 scsi_do_read(r, 0);
419 * scsi_handle_rw_error has two return values. False means that the error
420 * must be ignored, true means that the error has been processed and the
421 * caller should not do anything else for this request. Note that
422 * scsi_handle_rw_error always manages its reference counts, independent
423 * of the return value.
425 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
427 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
428 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
429 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
430 is_read, error);
432 if (action == BLOCK_ERROR_ACTION_REPORT) {
433 if (acct_failed) {
434 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
436 switch (error) {
437 case 0:
438 /* A passthrough command has run and has produced sense data; check
439 * whether the error has to be handled by the guest or should rather
440 * pause the host.
442 assert(r->status && *r->status);
443 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
444 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN ||
445 error == 0) {
446 /* These errors are handled by guest. */
447 scsi_req_complete(&r->req, *r->status);
448 return true;
450 break;
451 case ENOMEDIUM:
452 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
453 break;
454 case ENOMEM:
455 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
456 break;
457 case EINVAL:
458 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
459 break;
460 case ENOSPC:
461 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
462 break;
463 default:
464 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
465 break;
469 blk_error_action(s->qdev.conf.blk, action, is_read, error);
470 if (action == BLOCK_ERROR_ACTION_IGNORE) {
471 scsi_req_complete(&r->req, 0);
472 return true;
475 if (action == BLOCK_ERROR_ACTION_STOP) {
476 scsi_req_retry(&r->req);
478 return true;
481 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
483 uint32_t n;
485 assert (r->req.aiocb == NULL);
486 if (scsi_disk_req_check_error(r, ret, false)) {
487 goto done;
490 n = r->qiov.size / 512;
491 r->sector += n;
492 r->sector_count -= n;
493 if (r->sector_count == 0) {
494 scsi_write_do_fua(r);
495 return;
496 } else {
497 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
498 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
499 scsi_req_data(&r->req, r->qiov.size);
502 done:
503 scsi_req_unref(&r->req);
506 static void scsi_write_complete(void * opaque, int ret)
508 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
509 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
511 assert (r->req.aiocb != NULL);
512 r->req.aiocb = NULL;
514 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
515 if (ret < 0) {
516 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
517 } else {
518 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
520 scsi_write_complete_noio(r, ret);
521 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
524 static void scsi_write_data(SCSIRequest *req)
526 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
527 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
528 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
530 /* No data transfer may already be in progress */
531 assert(r->req.aiocb == NULL);
533 /* The request is used as the AIO opaque value, so add a ref. */
534 scsi_req_ref(&r->req);
535 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
536 trace_scsi_disk_write_data_invalid();
537 scsi_write_complete_noio(r, -EINVAL);
538 return;
541 if (!r->req.sg && !r->qiov.size) {
542 /* Called for the first time. Ask the driver to send us more data. */
543 r->started = true;
544 scsi_write_complete_noio(r, 0);
545 return;
547 if (!blk_is_available(req->dev->conf.blk)) {
548 scsi_write_complete_noio(r, -ENOMEDIUM);
549 return;
552 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
553 r->req.cmd.buf[0] == VERIFY_16) {
554 if (r->req.sg) {
555 scsi_dma_complete_noio(r, 0);
556 } else {
557 scsi_write_complete_noio(r, 0);
559 return;
562 if (r->req.sg) {
563 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
564 r->req.resid -= r->req.sg->size;
565 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
566 r->req.sg, r->sector << BDRV_SECTOR_BITS,
567 BDRV_SECTOR_SIZE,
568 sdc->dma_writev, r, scsi_dma_complete, r,
569 DMA_DIRECTION_TO_DEVICE);
570 } else {
571 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
572 r->qiov.size, BLOCK_ACCT_WRITE);
573 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
574 scsi_write_complete, r, r);
578 /* Return a pointer to the data buffer. */
579 static uint8_t *scsi_get_buf(SCSIRequest *req)
581 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
583 return (uint8_t *)r->iov.iov_base;
586 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
588 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
589 uint8_t page_code = req->cmd.buf[2];
590 int start, buflen = 0;
592 outbuf[buflen++] = s->qdev.type & 0x1f;
593 outbuf[buflen++] = page_code;
594 outbuf[buflen++] = 0x00;
595 outbuf[buflen++] = 0x00;
596 start = buflen;
598 switch (page_code) {
599 case 0x00: /* Supported page codes, mandatory */
601 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
602 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
603 if (s->serial) {
604 outbuf[buflen++] = 0x80; /* unit serial number */
606 outbuf[buflen++] = 0x83; /* device identification */
607 if (s->qdev.type == TYPE_DISK) {
608 outbuf[buflen++] = 0xb0; /* block limits */
609 outbuf[buflen++] = 0xb1; /* block device characteristics */
610 outbuf[buflen++] = 0xb2; /* thin provisioning */
612 break;
614 case 0x80: /* Device serial number, optional */
616 int l;
618 if (!s->serial) {
619 trace_scsi_disk_emulate_vpd_page_80_not_supported();
620 return -1;
623 l = strlen(s->serial);
624 if (l > 36) {
625 l = 36;
628 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
629 memcpy(outbuf + buflen, s->serial, l);
630 buflen += l;
631 break;
634 case 0x83: /* Device identification page, mandatory */
636 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
638 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
640 if (id_len) {
641 outbuf[buflen++] = 0x2; /* ASCII */
642 outbuf[buflen++] = 0; /* not officially assigned */
643 outbuf[buflen++] = 0; /* reserved */
644 outbuf[buflen++] = id_len; /* length of data following */
645 memcpy(outbuf + buflen, s->device_id, id_len);
646 buflen += id_len;
649 if (s->qdev.wwn) {
650 outbuf[buflen++] = 0x1; /* Binary */
651 outbuf[buflen++] = 0x3; /* NAA */
652 outbuf[buflen++] = 0; /* reserved */
653 outbuf[buflen++] = 8;
654 stq_be_p(&outbuf[buflen], s->qdev.wwn);
655 buflen += 8;
658 if (s->qdev.port_wwn) {
659 outbuf[buflen++] = 0x61; /* SAS / Binary */
660 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
661 outbuf[buflen++] = 0; /* reserved */
662 outbuf[buflen++] = 8;
663 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
664 buflen += 8;
667 if (s->port_index) {
668 outbuf[buflen++] = 0x61; /* SAS / Binary */
670 /* PIV/Target port/relative target port */
671 outbuf[buflen++] = 0x94;
673 outbuf[buflen++] = 0; /* reserved */
674 outbuf[buflen++] = 4;
675 stw_be_p(&outbuf[buflen + 2], s->port_index);
676 buflen += 4;
678 break;
680 case 0xb0: /* block limits */
682 SCSIBlockLimits bl = {};
684 if (s->qdev.type == TYPE_ROM) {
685 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
686 return -1;
688 bl.wsnz = 1;
689 bl.unmap_sectors =
690 s->qdev.conf.discard_granularity / s->qdev.blocksize;
691 bl.min_io_size =
692 s->qdev.conf.min_io_size / s->qdev.blocksize;
693 bl.opt_io_size =
694 s->qdev.conf.opt_io_size / s->qdev.blocksize;
695 bl.max_unmap_sectors =
696 s->max_unmap_size / s->qdev.blocksize;
697 bl.max_io_sectors =
698 s->max_io_size / s->qdev.blocksize;
699 /* 255 descriptors fit in 4 KiB with an 8-byte header */
700 bl.max_unmap_descr = 255;
702 if (s->qdev.type == TYPE_DISK) {
703 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
704 int max_io_sectors_blk =
705 max_transfer_blk / s->qdev.blocksize;
707 bl.max_io_sectors =
708 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
710 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
711 break;
713 case 0xb1: /* block device characteristics */
715 buflen = 0x40;
716 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
717 outbuf[5] = s->rotation_rate & 0xff;
718 outbuf[6] = 0; /* PRODUCT TYPE */
719 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
720 outbuf[8] = 0; /* VBULS */
721 break;
723 case 0xb2: /* thin provisioning */
725 buflen = 8;
726 outbuf[4] = 0;
727 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
728 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
729 outbuf[7] = 0;
730 break;
732 default:
733 return -1;
735 /* done with EVPD */
736 assert(buflen - start <= 255);
737 outbuf[start - 1] = buflen - start;
738 return buflen;
741 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
743 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
744 int buflen = 0;
746 if (req->cmd.buf[1] & 0x1) {
747 /* Vital product data */
748 return scsi_disk_emulate_vpd_page(req, outbuf);
751 /* Standard INQUIRY data */
752 if (req->cmd.buf[2] != 0) {
753 return -1;
756 /* PAGE CODE == 0 */
757 buflen = req->cmd.xfer;
758 if (buflen > SCSI_MAX_INQUIRY_LEN) {
759 buflen = SCSI_MAX_INQUIRY_LEN;
762 outbuf[0] = s->qdev.type & 0x1f;
763 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
765 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
766 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
768 memset(&outbuf[32], 0, 4);
769 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
771 * We claim conformance to SPC-3, which is required for guests
772 * to ask for modern features like READ CAPACITY(16) or the
773 * block characteristics VPD page by default. Not all of SPC-3
774 * is actually implemented, but we're good enough.
776 outbuf[2] = s->qdev.default_scsi_version;
777 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
779 if (buflen > 36) {
780 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
781 } else {
782 /* If the allocation length of CDB is too small,
783 the additional length is not adjusted */
784 outbuf[4] = 36 - 5;
787 /* Sync data transfer and TCQ. */
788 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
789 return buflen;
792 static inline bool media_is_dvd(SCSIDiskState *s)
794 uint64_t nb_sectors;
795 if (s->qdev.type != TYPE_ROM) {
796 return false;
798 if (!blk_is_available(s->qdev.conf.blk)) {
799 return false;
801 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
802 return nb_sectors > CD_MAX_SECTORS;
805 static inline bool media_is_cd(SCSIDiskState *s)
807 uint64_t nb_sectors;
808 if (s->qdev.type != TYPE_ROM) {
809 return false;
811 if (!blk_is_available(s->qdev.conf.blk)) {
812 return false;
814 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
815 return nb_sectors <= CD_MAX_SECTORS;
818 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
819 uint8_t *outbuf)
821 uint8_t type = r->req.cmd.buf[1] & 7;
823 if (s->qdev.type != TYPE_ROM) {
824 return -1;
827 /* Types 1/2 are only defined for Blu-Ray. */
828 if (type != 0) {
829 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
830 return -1;
833 memset(outbuf, 0, 34);
834 outbuf[1] = 32;
835 outbuf[2] = 0xe; /* last session complete, disc finalized */
836 outbuf[3] = 1; /* first track on disc */
837 outbuf[4] = 1; /* # of sessions */
838 outbuf[5] = 1; /* first track of last session */
839 outbuf[6] = 1; /* last track of last session */
840 outbuf[7] = 0x20; /* unrestricted use */
841 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
842 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
843 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
844 /* 24-31: disc bar code */
845 /* 32: disc application code */
846 /* 33: number of OPC tables */
848 return 34;
851 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
852 uint8_t *outbuf)
854 static const int rds_caps_size[5] = {
855 [0] = 2048 + 4,
856 [1] = 4 + 4,
857 [3] = 188 + 4,
858 [4] = 2048 + 4,
861 uint8_t media = r->req.cmd.buf[1];
862 uint8_t layer = r->req.cmd.buf[6];
863 uint8_t format = r->req.cmd.buf[7];
864 int size = -1;
866 if (s->qdev.type != TYPE_ROM) {
867 return -1;
869 if (media != 0) {
870 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
871 return -1;
874 if (format != 0xff) {
875 if (!blk_is_available(s->qdev.conf.blk)) {
876 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
877 return -1;
879 if (media_is_cd(s)) {
880 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
881 return -1;
883 if (format >= ARRAY_SIZE(rds_caps_size)) {
884 return -1;
886 size = rds_caps_size[format];
887 memset(outbuf, 0, size);
890 switch (format) {
891 case 0x00: {
892 /* Physical format information */
893 uint64_t nb_sectors;
894 if (layer != 0) {
895 goto fail;
897 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
899 outbuf[4] = 1; /* DVD-ROM, part version 1 */
900 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
901 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
902 outbuf[7] = 0; /* default densities */
904 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
905 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
906 break;
909 case 0x01: /* DVD copyright information, all zeros */
910 break;
912 case 0x03: /* BCA information - invalid field for no BCA info */
913 return -1;
915 case 0x04: /* DVD disc manufacturing information, all zeros */
916 break;
918 case 0xff: { /* List capabilities */
919 int i;
920 size = 4;
921 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
922 if (!rds_caps_size[i]) {
923 continue;
925 outbuf[size] = i;
926 outbuf[size + 1] = 0x40; /* Not writable, readable */
927 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
928 size += 4;
930 break;
933 default:
934 return -1;
937 /* Size of buffer, not including 2 byte size field */
938 stw_be_p(outbuf, size - 2);
939 return size;
941 fail:
942 return -1;
945 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
947 uint8_t event_code, media_status;
949 media_status = 0;
950 if (s->tray_open) {
951 media_status = MS_TRAY_OPEN;
952 } else if (blk_is_inserted(s->qdev.conf.blk)) {
953 media_status = MS_MEDIA_PRESENT;
956 /* Event notification descriptor */
957 event_code = MEC_NO_CHANGE;
958 if (media_status != MS_TRAY_OPEN) {
959 if (s->media_event) {
960 event_code = MEC_NEW_MEDIA;
961 s->media_event = false;
962 } else if (s->eject_request) {
963 event_code = MEC_EJECT_REQUESTED;
964 s->eject_request = false;
968 outbuf[0] = event_code;
969 outbuf[1] = media_status;
971 /* These fields are reserved, just clear them. */
972 outbuf[2] = 0;
973 outbuf[3] = 0;
974 return 4;
977 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
978 uint8_t *outbuf)
980 int size;
981 uint8_t *buf = r->req.cmd.buf;
982 uint8_t notification_class_request = buf[4];
983 if (s->qdev.type != TYPE_ROM) {
984 return -1;
986 if ((buf[1] & 1) == 0) {
987 /* asynchronous */
988 return -1;
991 size = 4;
992 outbuf[0] = outbuf[1] = 0;
993 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
994 if (notification_class_request & (1 << GESN_MEDIA)) {
995 outbuf[2] = GESN_MEDIA;
996 size += scsi_event_status_media(s, &outbuf[size]);
997 } else {
998 outbuf[2] = 0x80;
1000 stw_be_p(outbuf, size - 4);
1001 return size;
1004 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1006 int current;
1008 if (s->qdev.type != TYPE_ROM) {
1009 return -1;
1012 if (media_is_dvd(s)) {
1013 current = MMC_PROFILE_DVD_ROM;
1014 } else if (media_is_cd(s)) {
1015 current = MMC_PROFILE_CD_ROM;
1016 } else {
1017 current = MMC_PROFILE_NONE;
1020 memset(outbuf, 0, 40);
1021 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1022 stw_be_p(&outbuf[6], current);
1023 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1024 outbuf[10] = 0x03; /* persistent, current */
1025 outbuf[11] = 8; /* two profiles */
1026 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1027 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1028 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1029 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1030 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1031 stw_be_p(&outbuf[20], 1);
1032 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1033 outbuf[23] = 8;
1034 stl_be_p(&outbuf[24], 1); /* SCSI */
1035 outbuf[28] = 1; /* DBE = 1, mandatory */
1036 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1037 stw_be_p(&outbuf[32], 3);
1038 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1039 outbuf[35] = 4;
1040 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1041 /* TODO: Random readable, CD read, DVD read, drive serial number,
1042 power management */
1043 return 40;
1046 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1048 if (s->qdev.type != TYPE_ROM) {
1049 return -1;
1051 memset(outbuf, 0, 8);
1052 outbuf[5] = 1; /* CD-ROM */
1053 return 8;
1056 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1057 int page_control)
1059 static const int mode_sense_valid[0x3f] = {
1060 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1061 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1062 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1063 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1064 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1065 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1068 uint8_t *p = *p_outbuf + 2;
1069 int length;
1071 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1072 return -1;
1076 * If Changeable Values are requested, a mask denoting those mode parameters
1077 * that are changeable shall be returned. As we currently don't support
1078 * parameter changes via MODE_SELECT all bits are returned set to zero.
1079 * The buffer was already menset to zero by the caller of this function.
1081 * The offsets here are off by two compared to the descriptions in the
1082 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1083 * but it is done so that offsets are consistent within our implementation
1084 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1085 * 2-byte and 4-byte headers.
1087 switch (page) {
1088 case MODE_PAGE_HD_GEOMETRY:
1089 length = 0x16;
1090 if (page_control == 1) { /* Changeable Values */
1091 break;
1093 /* if a geometry hint is available, use it */
1094 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1095 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1096 p[2] = s->qdev.conf.cyls & 0xff;
1097 p[3] = s->qdev.conf.heads & 0xff;
1098 /* Write precomp start cylinder, disabled */
1099 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1100 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1101 p[6] = s->qdev.conf.cyls & 0xff;
1102 /* Reduced current start cylinder, disabled */
1103 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1104 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1105 p[9] = s->qdev.conf.cyls & 0xff;
1106 /* Device step rate [ns], 200ns */
1107 p[10] = 0;
1108 p[11] = 200;
1109 /* Landing zone cylinder */
1110 p[12] = 0xff;
1111 p[13] = 0xff;
1112 p[14] = 0xff;
1113 /* Medium rotation rate [rpm], 5400 rpm */
1114 p[18] = (5400 >> 8) & 0xff;
1115 p[19] = 5400 & 0xff;
1116 break;
1118 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1119 length = 0x1e;
1120 if (page_control == 1) { /* Changeable Values */
1121 break;
1123 /* Transfer rate [kbit/s], 5Mbit/s */
1124 p[0] = 5000 >> 8;
1125 p[1] = 5000 & 0xff;
1126 /* if a geometry hint is available, use it */
1127 p[2] = s->qdev.conf.heads & 0xff;
1128 p[3] = s->qdev.conf.secs & 0xff;
1129 p[4] = s->qdev.blocksize >> 8;
1130 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1131 p[7] = s->qdev.conf.cyls & 0xff;
1132 /* Write precomp start cylinder, disabled */
1133 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1134 p[9] = s->qdev.conf.cyls & 0xff;
1135 /* Reduced current start cylinder, disabled */
1136 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1137 p[11] = s->qdev.conf.cyls & 0xff;
1138 /* Device step rate [100us], 100us */
1139 p[12] = 0;
1140 p[13] = 1;
1141 /* Device step pulse width [us], 1us */
1142 p[14] = 1;
1143 /* Device head settle delay [100us], 100us */
1144 p[15] = 0;
1145 p[16] = 1;
1146 /* Motor on delay [0.1s], 0.1s */
1147 p[17] = 1;
1148 /* Motor off delay [0.1s], 0.1s */
1149 p[18] = 1;
1150 /* Medium rotation rate [rpm], 5400 rpm */
1151 p[26] = (5400 >> 8) & 0xff;
1152 p[27] = 5400 & 0xff;
1153 break;
1155 case MODE_PAGE_CACHING:
1156 length = 0x12;
1157 if (page_control == 1 || /* Changeable Values */
1158 blk_enable_write_cache(s->qdev.conf.blk)) {
1159 p[0] = 4; /* WCE */
1161 break;
1163 case MODE_PAGE_R_W_ERROR:
1164 length = 10;
1165 if (page_control == 1) { /* Changeable Values */
1166 break;
1168 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1169 if (s->qdev.type == TYPE_ROM) {
1170 p[1] = 0x20; /* Read Retry Count */
1172 break;
1174 case MODE_PAGE_AUDIO_CTL:
1175 length = 14;
1176 break;
1178 case MODE_PAGE_CAPABILITIES:
1179 length = 0x14;
1180 if (page_control == 1) { /* Changeable Values */
1181 break;
1184 p[0] = 0x3b; /* CD-R & CD-RW read */
1185 p[1] = 0; /* Writing not supported */
1186 p[2] = 0x7f; /* Audio, composite, digital out,
1187 mode 2 form 1&2, multi session */
1188 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1189 RW corrected, C2 errors, ISRC,
1190 UPC, Bar code */
1191 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1192 /* Locking supported, jumper present, eject, tray */
1193 p[5] = 0; /* no volume & mute control, no
1194 changer */
1195 p[6] = (50 * 176) >> 8; /* 50x read speed */
1196 p[7] = (50 * 176) & 0xff;
1197 p[8] = 2 >> 8; /* Two volume levels */
1198 p[9] = 2 & 0xff;
1199 p[10] = 2048 >> 8; /* 2M buffer */
1200 p[11] = 2048 & 0xff;
1201 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1202 p[13] = (16 * 176) & 0xff;
1203 p[16] = (16 * 176) >> 8; /* 16x write speed */
1204 p[17] = (16 * 176) & 0xff;
1205 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1206 p[19] = (16 * 176) & 0xff;
1207 break;
1209 default:
1210 return -1;
1213 assert(length < 256);
1214 (*p_outbuf)[0] = page;
1215 (*p_outbuf)[1] = length;
1216 *p_outbuf += length + 2;
1217 return length + 2;
1220 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1222 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1223 uint64_t nb_sectors;
1224 bool dbd;
1225 int page, buflen, ret, page_control;
1226 uint8_t *p;
1227 uint8_t dev_specific_param;
1229 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1230 page = r->req.cmd.buf[2] & 0x3f;
1231 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1233 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1234 10, page, r->req.cmd.xfer, page_control);
1235 memset(outbuf, 0, r->req.cmd.xfer);
1236 p = outbuf;
1238 if (s->qdev.type == TYPE_DISK) {
1239 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1240 if (blk_is_read_only(s->qdev.conf.blk)) {
1241 dev_specific_param |= 0x80; /* Readonly. */
1243 } else {
1244 /* MMC prescribes that CD/DVD drives have no block descriptors,
1245 * and defines no device-specific parameter. */
1246 dev_specific_param = 0x00;
1247 dbd = true;
1250 if (r->req.cmd.buf[0] == MODE_SENSE) {
1251 p[1] = 0; /* Default media type. */
1252 p[2] = dev_specific_param;
1253 p[3] = 0; /* Block descriptor length. */
1254 p += 4;
1255 } else { /* MODE_SENSE_10 */
1256 p[2] = 0; /* Default media type. */
1257 p[3] = dev_specific_param;
1258 p[6] = p[7] = 0; /* Block descriptor length. */
1259 p += 8;
1262 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1263 if (!dbd && nb_sectors) {
1264 if (r->req.cmd.buf[0] == MODE_SENSE) {
1265 outbuf[3] = 8; /* Block descriptor length */
1266 } else { /* MODE_SENSE_10 */
1267 outbuf[7] = 8; /* Block descriptor length */
1269 nb_sectors /= (s->qdev.blocksize / 512);
1270 if (nb_sectors > 0xffffff) {
1271 nb_sectors = 0;
1273 p[0] = 0; /* media density code */
1274 p[1] = (nb_sectors >> 16) & 0xff;
1275 p[2] = (nb_sectors >> 8) & 0xff;
1276 p[3] = nb_sectors & 0xff;
1277 p[4] = 0; /* reserved */
1278 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1279 p[6] = s->qdev.blocksize >> 8;
1280 p[7] = 0;
1281 p += 8;
1284 if (page_control == 3) {
1285 /* Saved Values */
1286 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1287 return -1;
1290 if (page == 0x3f) {
1291 for (page = 0; page <= 0x3e; page++) {
1292 mode_sense_page(s, page, &p, page_control);
1294 } else {
1295 ret = mode_sense_page(s, page, &p, page_control);
1296 if (ret == -1) {
1297 return -1;
1301 buflen = p - outbuf;
1303 * The mode data length field specifies the length in bytes of the
1304 * following data that is available to be transferred. The mode data
1305 * length does not include itself.
1307 if (r->req.cmd.buf[0] == MODE_SENSE) {
1308 outbuf[0] = buflen - 1;
1309 } else { /* MODE_SENSE_10 */
1310 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1311 outbuf[1] = (buflen - 2) & 0xff;
1313 return buflen;
1316 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1318 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1319 int start_track, format, msf, toclen;
1320 uint64_t nb_sectors;
1322 msf = req->cmd.buf[1] & 2;
1323 format = req->cmd.buf[2] & 0xf;
1324 start_track = req->cmd.buf[6];
1325 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1326 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1327 nb_sectors /= s->qdev.blocksize / 512;
1328 switch (format) {
1329 case 0:
1330 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1331 break;
1332 case 1:
1333 /* multi session : only a single session defined */
1334 toclen = 12;
1335 memset(outbuf, 0, 12);
1336 outbuf[1] = 0x0a;
1337 outbuf[2] = 0x01;
1338 outbuf[3] = 0x01;
1339 break;
1340 case 2:
1341 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1342 break;
1343 default:
1344 return -1;
1346 return toclen;
1349 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1351 SCSIRequest *req = &r->req;
1352 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1353 bool start = req->cmd.buf[4] & 1;
1354 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1355 int pwrcnd = req->cmd.buf[4] & 0xf0;
1357 if (pwrcnd) {
1358 /* eject/load only happens for power condition == 0 */
1359 return 0;
1362 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1363 if (!start && !s->tray_open && s->tray_locked) {
1364 scsi_check_condition(r,
1365 blk_is_inserted(s->qdev.conf.blk)
1366 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1367 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1368 return -1;
1371 if (s->tray_open != !start) {
1372 blk_eject(s->qdev.conf.blk, !start);
1373 s->tray_open = !start;
1376 return 0;
1379 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1381 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1382 int buflen = r->iov.iov_len;
1384 if (buflen) {
1385 trace_scsi_disk_emulate_read_data(buflen);
1386 r->iov.iov_len = 0;
1387 r->started = true;
1388 scsi_req_data(&r->req, buflen);
1389 return;
1392 /* This also clears the sense buffer for REQUEST SENSE. */
1393 scsi_req_complete(&r->req, GOOD);
1396 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1397 uint8_t *inbuf, int inlen)
1399 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1400 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1401 uint8_t *p;
1402 int len, expected_len, changeable_len, i;
1404 /* The input buffer does not include the page header, so it is
1405 * off by 2 bytes.
1407 expected_len = inlen + 2;
1408 if (expected_len > SCSI_MAX_MODE_LEN) {
1409 return -1;
1412 p = mode_current;
1413 memset(mode_current, 0, inlen + 2);
1414 len = mode_sense_page(s, page, &p, 0);
1415 if (len < 0 || len != expected_len) {
1416 return -1;
1419 p = mode_changeable;
1420 memset(mode_changeable, 0, inlen + 2);
1421 changeable_len = mode_sense_page(s, page, &p, 1);
1422 assert(changeable_len == len);
1424 /* Check that unchangeable bits are the same as what MODE SENSE
1425 * would return.
1427 for (i = 2; i < len; i++) {
1428 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1429 return -1;
1432 return 0;
1435 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1437 switch (page) {
1438 case MODE_PAGE_CACHING:
1439 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1440 break;
1442 default:
1443 break;
1447 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1449 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1451 while (len > 0) {
1452 int page, subpage, page_len;
1454 /* Parse both possible formats for the mode page headers. */
1455 page = p[0] & 0x3f;
1456 if (p[0] & 0x40) {
1457 if (len < 4) {
1458 goto invalid_param_len;
1460 subpage = p[1];
1461 page_len = lduw_be_p(&p[2]);
1462 p += 4;
1463 len -= 4;
1464 } else {
1465 if (len < 2) {
1466 goto invalid_param_len;
1468 subpage = 0;
1469 page_len = p[1];
1470 p += 2;
1471 len -= 2;
1474 if (subpage) {
1475 goto invalid_param;
1477 if (page_len > len) {
1478 goto invalid_param_len;
1481 if (!change) {
1482 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1483 goto invalid_param;
1485 } else {
1486 scsi_disk_apply_mode_select(s, page, p);
1489 p += page_len;
1490 len -= page_len;
1492 return 0;
1494 invalid_param:
1495 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1496 return -1;
1498 invalid_param_len:
1499 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1500 return -1;
1503 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1505 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1506 uint8_t *p = inbuf;
1507 int cmd = r->req.cmd.buf[0];
1508 int len = r->req.cmd.xfer;
1509 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1510 int bd_len;
1511 int pass;
1513 /* We only support PF=1, SP=0. */
1514 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1515 goto invalid_field;
1518 if (len < hdr_len) {
1519 goto invalid_param_len;
1522 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1523 len -= hdr_len;
1524 p += hdr_len;
1525 if (len < bd_len) {
1526 goto invalid_param_len;
1528 if (bd_len != 0 && bd_len != 8) {
1529 goto invalid_param;
1532 len -= bd_len;
1533 p += bd_len;
1535 /* Ensure no change is made if there is an error! */
1536 for (pass = 0; pass < 2; pass++) {
1537 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1538 assert(pass == 0);
1539 return;
1542 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1543 /* The request is used as the AIO opaque value, so add a ref. */
1544 scsi_req_ref(&r->req);
1545 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1546 BLOCK_ACCT_FLUSH);
1547 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1548 return;
1551 scsi_req_complete(&r->req, GOOD);
1552 return;
1554 invalid_param:
1555 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1556 return;
1558 invalid_param_len:
1559 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1560 return;
1562 invalid_field:
1563 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1566 static inline bool check_lba_range(SCSIDiskState *s,
1567 uint64_t sector_num, uint32_t nb_sectors)
1570 * The first line tests that no overflow happens when computing the last
1571 * sector. The second line tests that the last accessed sector is in
1572 * range.
1574 * Careful, the computations should not underflow for nb_sectors == 0,
1575 * and a 0-block read to the first LBA beyond the end of device is
1576 * valid.
1578 return (sector_num <= sector_num + nb_sectors &&
1579 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1582 typedef struct UnmapCBData {
1583 SCSIDiskReq *r;
1584 uint8_t *inbuf;
1585 int count;
1586 } UnmapCBData;
1588 static void scsi_unmap_complete(void *opaque, int ret);
1590 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1592 SCSIDiskReq *r = data->r;
1593 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1594 uint64_t sector_num;
1595 uint32_t nb_sectors;
1597 assert(r->req.aiocb == NULL);
1598 if (scsi_disk_req_check_error(r, ret, false)) {
1599 goto done;
1602 if (data->count > 0) {
1603 sector_num = ldq_be_p(&data->inbuf[0]);
1604 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1605 if (!check_lba_range(s, sector_num, nb_sectors)) {
1606 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1607 goto done;
1610 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1611 sector_num * s->qdev.blocksize,
1612 nb_sectors * s->qdev.blocksize,
1613 scsi_unmap_complete, data);
1614 data->count--;
1615 data->inbuf += 16;
1616 return;
1619 scsi_req_complete(&r->req, GOOD);
1621 done:
1622 scsi_req_unref(&r->req);
1623 g_free(data);
1626 static void scsi_unmap_complete(void *opaque, int ret)
1628 UnmapCBData *data = opaque;
1629 SCSIDiskReq *r = data->r;
1630 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1632 assert(r->req.aiocb != NULL);
1633 r->req.aiocb = NULL;
1635 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1636 scsi_unmap_complete_noio(data, ret);
1637 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1640 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1642 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1643 uint8_t *p = inbuf;
1644 int len = r->req.cmd.xfer;
1645 UnmapCBData *data;
1647 /* Reject ANCHOR=1. */
1648 if (r->req.cmd.buf[1] & 0x1) {
1649 goto invalid_field;
1652 if (len < 8) {
1653 goto invalid_param_len;
1655 if (len < lduw_be_p(&p[0]) + 2) {
1656 goto invalid_param_len;
1658 if (len < lduw_be_p(&p[2]) + 8) {
1659 goto invalid_param_len;
1661 if (lduw_be_p(&p[2]) & 15) {
1662 goto invalid_param_len;
1665 if (blk_is_read_only(s->qdev.conf.blk)) {
1666 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1667 return;
1670 data = g_new0(UnmapCBData, 1);
1671 data->r = r;
1672 data->inbuf = &p[8];
1673 data->count = lduw_be_p(&p[2]) >> 4;
1675 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1676 scsi_req_ref(&r->req);
1677 scsi_unmap_complete_noio(data, 0);
1678 return;
1680 invalid_param_len:
1681 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1682 return;
1684 invalid_field:
1685 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1688 typedef struct WriteSameCBData {
1689 SCSIDiskReq *r;
1690 int64_t sector;
1691 int nb_sectors;
1692 QEMUIOVector qiov;
1693 struct iovec iov;
1694 } WriteSameCBData;
1696 static void scsi_write_same_complete(void *opaque, int ret)
1698 WriteSameCBData *data = opaque;
1699 SCSIDiskReq *r = data->r;
1700 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1702 assert(r->req.aiocb != NULL);
1703 r->req.aiocb = NULL;
1704 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1705 if (scsi_disk_req_check_error(r, ret, true)) {
1706 goto done;
1709 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1711 data->nb_sectors -= data->iov.iov_len / 512;
1712 data->sector += data->iov.iov_len / 512;
1713 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1714 if (data->iov.iov_len) {
1715 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1716 data->iov.iov_len, BLOCK_ACCT_WRITE);
1717 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1718 * where final qiov may need smaller size */
1719 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1720 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1721 data->sector << BDRV_SECTOR_BITS,
1722 &data->qiov, 0,
1723 scsi_write_same_complete, data);
1724 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1725 return;
1728 scsi_req_complete(&r->req, GOOD);
1730 done:
1731 scsi_req_unref(&r->req);
1732 qemu_vfree(data->iov.iov_base);
1733 g_free(data);
1734 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1737 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1739 SCSIRequest *req = &r->req;
1740 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1741 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1742 WriteSameCBData *data;
1743 uint8_t *buf;
1744 int i;
1746 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1747 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1748 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1749 return;
1752 if (blk_is_read_only(s->qdev.conf.blk)) {
1753 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1754 return;
1756 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1757 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1758 return;
1761 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1762 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1764 /* The request is used as the AIO opaque value, so add a ref. */
1765 scsi_req_ref(&r->req);
1766 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1767 nb_sectors * s->qdev.blocksize,
1768 BLOCK_ACCT_WRITE);
1769 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1770 r->req.cmd.lba * s->qdev.blocksize,
1771 nb_sectors * s->qdev.blocksize,
1772 flags, scsi_aio_complete, r);
1773 return;
1776 data = g_new0(WriteSameCBData, 1);
1777 data->r = r;
1778 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1779 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1780 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1781 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1782 data->iov.iov_len);
1783 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1785 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1786 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1789 scsi_req_ref(&r->req);
1790 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1791 data->iov.iov_len, BLOCK_ACCT_WRITE);
1792 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1793 data->sector << BDRV_SECTOR_BITS,
1794 &data->qiov, 0,
1795 scsi_write_same_complete, data);
1798 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1800 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1802 if (r->iov.iov_len) {
1803 int buflen = r->iov.iov_len;
1804 trace_scsi_disk_emulate_write_data(buflen);
1805 r->iov.iov_len = 0;
1806 scsi_req_data(&r->req, buflen);
1807 return;
1810 switch (req->cmd.buf[0]) {
1811 case MODE_SELECT:
1812 case MODE_SELECT_10:
1813 /* This also clears the sense buffer for REQUEST SENSE. */
1814 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1815 break;
1817 case UNMAP:
1818 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1819 break;
1821 case VERIFY_10:
1822 case VERIFY_12:
1823 case VERIFY_16:
1824 if (r->req.status == -1) {
1825 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1827 break;
1829 case WRITE_SAME_10:
1830 case WRITE_SAME_16:
1831 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1832 break;
1834 default:
1835 abort();
1839 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1841 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1842 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1843 uint64_t nb_sectors;
1844 uint8_t *outbuf;
1845 int buflen;
1847 switch (req->cmd.buf[0]) {
1848 case INQUIRY:
1849 case MODE_SENSE:
1850 case MODE_SENSE_10:
1851 case RESERVE:
1852 case RESERVE_10:
1853 case RELEASE:
1854 case RELEASE_10:
1855 case START_STOP:
1856 case ALLOW_MEDIUM_REMOVAL:
1857 case GET_CONFIGURATION:
1858 case GET_EVENT_STATUS_NOTIFICATION:
1859 case MECHANISM_STATUS:
1860 case REQUEST_SENSE:
1861 break;
1863 default:
1864 if (!blk_is_available(s->qdev.conf.blk)) {
1865 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1866 return 0;
1868 break;
1872 * FIXME: we shouldn't return anything bigger than 4k, but the code
1873 * requires the buffer to be as big as req->cmd.xfer in several
1874 * places. So, do not allow CDBs with a very large ALLOCATION
1875 * LENGTH. The real fix would be to modify scsi_read_data and
1876 * dma_buf_read, so that they return data beyond the buflen
1877 * as all zeros.
1879 if (req->cmd.xfer > 65536) {
1880 goto illegal_request;
1882 r->buflen = MAX(4096, req->cmd.xfer);
1884 if (!r->iov.iov_base) {
1885 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1888 buflen = req->cmd.xfer;
1889 outbuf = r->iov.iov_base;
1890 memset(outbuf, 0, r->buflen);
1891 switch (req->cmd.buf[0]) {
1892 case TEST_UNIT_READY:
1893 assert(blk_is_available(s->qdev.conf.blk));
1894 break;
1895 case INQUIRY:
1896 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1897 if (buflen < 0) {
1898 goto illegal_request;
1900 break;
1901 case MODE_SENSE:
1902 case MODE_SENSE_10:
1903 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1904 if (buflen < 0) {
1905 goto illegal_request;
1907 break;
1908 case READ_TOC:
1909 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1910 if (buflen < 0) {
1911 goto illegal_request;
1913 break;
1914 case RESERVE:
1915 if (req->cmd.buf[1] & 1) {
1916 goto illegal_request;
1918 break;
1919 case RESERVE_10:
1920 if (req->cmd.buf[1] & 3) {
1921 goto illegal_request;
1923 break;
1924 case RELEASE:
1925 if (req->cmd.buf[1] & 1) {
1926 goto illegal_request;
1928 break;
1929 case RELEASE_10:
1930 if (req->cmd.buf[1] & 3) {
1931 goto illegal_request;
1933 break;
1934 case START_STOP:
1935 if (scsi_disk_emulate_start_stop(r) < 0) {
1936 return 0;
1938 break;
1939 case ALLOW_MEDIUM_REMOVAL:
1940 s->tray_locked = req->cmd.buf[4] & 1;
1941 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1942 break;
1943 case READ_CAPACITY_10:
1944 /* The normal LEN field for this command is zero. */
1945 memset(outbuf, 0, 8);
1946 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1947 if (!nb_sectors) {
1948 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1949 return 0;
1951 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1952 goto illegal_request;
1954 nb_sectors /= s->qdev.blocksize / 512;
1955 /* Returned value is the address of the last sector. */
1956 nb_sectors--;
1957 /* Remember the new size for read/write sanity checking. */
1958 s->qdev.max_lba = nb_sectors;
1959 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1960 if (nb_sectors > UINT32_MAX) {
1961 nb_sectors = UINT32_MAX;
1963 outbuf[0] = (nb_sectors >> 24) & 0xff;
1964 outbuf[1] = (nb_sectors >> 16) & 0xff;
1965 outbuf[2] = (nb_sectors >> 8) & 0xff;
1966 outbuf[3] = nb_sectors & 0xff;
1967 outbuf[4] = 0;
1968 outbuf[5] = 0;
1969 outbuf[6] = s->qdev.blocksize >> 8;
1970 outbuf[7] = 0;
1971 break;
1972 case REQUEST_SENSE:
1973 /* Just return "NO SENSE". */
1974 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
1975 (req->cmd.buf[1] & 1) == 0);
1976 if (buflen < 0) {
1977 goto illegal_request;
1979 break;
1980 case MECHANISM_STATUS:
1981 buflen = scsi_emulate_mechanism_status(s, outbuf);
1982 if (buflen < 0) {
1983 goto illegal_request;
1985 break;
1986 case GET_CONFIGURATION:
1987 buflen = scsi_get_configuration(s, outbuf);
1988 if (buflen < 0) {
1989 goto illegal_request;
1991 break;
1992 case GET_EVENT_STATUS_NOTIFICATION:
1993 buflen = scsi_get_event_status_notification(s, r, outbuf);
1994 if (buflen < 0) {
1995 goto illegal_request;
1997 break;
1998 case READ_DISC_INFORMATION:
1999 buflen = scsi_read_disc_information(s, r, outbuf);
2000 if (buflen < 0) {
2001 goto illegal_request;
2003 break;
2004 case READ_DVD_STRUCTURE:
2005 buflen = scsi_read_dvd_structure(s, r, outbuf);
2006 if (buflen < 0) {
2007 goto illegal_request;
2009 break;
2010 case SERVICE_ACTION_IN_16:
2011 /* Service Action In subcommands. */
2012 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2013 trace_scsi_disk_emulate_command_SAI_16();
2014 memset(outbuf, 0, req->cmd.xfer);
2015 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2016 if (!nb_sectors) {
2017 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2018 return 0;
2020 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2021 goto illegal_request;
2023 nb_sectors /= s->qdev.blocksize / 512;
2024 /* Returned value is the address of the last sector. */
2025 nb_sectors--;
2026 /* Remember the new size for read/write sanity checking. */
2027 s->qdev.max_lba = nb_sectors;
2028 outbuf[0] = (nb_sectors >> 56) & 0xff;
2029 outbuf[1] = (nb_sectors >> 48) & 0xff;
2030 outbuf[2] = (nb_sectors >> 40) & 0xff;
2031 outbuf[3] = (nb_sectors >> 32) & 0xff;
2032 outbuf[4] = (nb_sectors >> 24) & 0xff;
2033 outbuf[5] = (nb_sectors >> 16) & 0xff;
2034 outbuf[6] = (nb_sectors >> 8) & 0xff;
2035 outbuf[7] = nb_sectors & 0xff;
2036 outbuf[8] = 0;
2037 outbuf[9] = 0;
2038 outbuf[10] = s->qdev.blocksize >> 8;
2039 outbuf[11] = 0;
2040 outbuf[12] = 0;
2041 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2043 /* set TPE bit if the format supports discard */
2044 if (s->qdev.conf.discard_granularity) {
2045 outbuf[14] = 0x80;
2048 /* Protection, exponent and lowest lba field left blank. */
2049 break;
2051 trace_scsi_disk_emulate_command_SAI_unsupported();
2052 goto illegal_request;
2053 case SYNCHRONIZE_CACHE:
2054 /* The request is used as the AIO opaque value, so add a ref. */
2055 scsi_req_ref(&r->req);
2056 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2057 BLOCK_ACCT_FLUSH);
2058 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2059 return 0;
2060 case SEEK_10:
2061 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2062 if (r->req.cmd.lba > s->qdev.max_lba) {
2063 goto illegal_lba;
2065 break;
2066 case MODE_SELECT:
2067 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2068 break;
2069 case MODE_SELECT_10:
2070 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2071 break;
2072 case UNMAP:
2073 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2074 break;
2075 case VERIFY_10:
2076 case VERIFY_12:
2077 case VERIFY_16:
2078 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2079 if (req->cmd.buf[1] & 6) {
2080 goto illegal_request;
2082 break;
2083 case WRITE_SAME_10:
2084 case WRITE_SAME_16:
2085 trace_scsi_disk_emulate_command_WRITE_SAME(
2086 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2087 break;
2088 default:
2089 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2090 scsi_command_name(buf[0]));
2091 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2092 return 0;
2094 assert(!r->req.aiocb);
2095 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2096 if (r->iov.iov_len == 0) {
2097 scsi_req_complete(&r->req, GOOD);
2099 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2100 assert(r->iov.iov_len == req->cmd.xfer);
2101 return -r->iov.iov_len;
2102 } else {
2103 return r->iov.iov_len;
2106 illegal_request:
2107 if (r->req.status == -1) {
2108 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2110 return 0;
2112 illegal_lba:
2113 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2114 return 0;
2117 /* Execute a scsi command. Returns the length of the data expected by the
2118 command. This will be Positive for data transfers from the device
2119 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2120 and zero if the command does not transfer any data. */
2122 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2124 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2125 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2126 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2127 uint32_t len;
2128 uint8_t command;
2130 command = buf[0];
2132 if (!blk_is_available(s->qdev.conf.blk)) {
2133 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2134 return 0;
2137 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2138 switch (command) {
2139 case READ_6:
2140 case READ_10:
2141 case READ_12:
2142 case READ_16:
2143 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2144 /* Protection information is not supported. For SCSI versions 2 and
2145 * older (as determined by snooping the guest's INQUIRY commands),
2146 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2148 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2149 goto illegal_request;
2151 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2152 goto illegal_lba;
2154 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2155 r->sector_count = len * (s->qdev.blocksize / 512);
2156 break;
2157 case WRITE_6:
2158 case WRITE_10:
2159 case WRITE_12:
2160 case WRITE_16:
2161 case WRITE_VERIFY_10:
2162 case WRITE_VERIFY_12:
2163 case WRITE_VERIFY_16:
2164 if (blk_is_read_only(s->qdev.conf.blk)) {
2165 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2166 return 0;
2168 trace_scsi_disk_dma_command_WRITE(
2169 (command & 0xe) == 0xe ? "And Verify " : "",
2170 r->req.cmd.lba, len);
2171 /* fall through */
2172 case VERIFY_10:
2173 case VERIFY_12:
2174 case VERIFY_16:
2175 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2176 * As far as DMA is concerned, we can treat it the same as a write;
2177 * scsi_block_do_sgio will send VERIFY commands.
2179 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2180 goto illegal_request;
2182 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2183 goto illegal_lba;
2185 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2186 r->sector_count = len * (s->qdev.blocksize / 512);
2187 break;
2188 default:
2189 abort();
2190 illegal_request:
2191 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2192 return 0;
2193 illegal_lba:
2194 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2195 return 0;
2197 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2198 if (r->sector_count == 0) {
2199 scsi_req_complete(&r->req, GOOD);
2201 assert(r->iov.iov_len == 0);
2202 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2203 return -r->sector_count * 512;
2204 } else {
2205 return r->sector_count * 512;
2209 static void scsi_disk_reset(DeviceState *dev)
2211 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2212 uint64_t nb_sectors;
2214 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2216 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2217 nb_sectors /= s->qdev.blocksize / 512;
2218 if (nb_sectors) {
2219 nb_sectors--;
2221 s->qdev.max_lba = nb_sectors;
2222 /* reset tray statuses */
2223 s->tray_locked = 0;
2224 s->tray_open = 0;
2226 s->qdev.scsi_version = s->qdev.default_scsi_version;
2229 static void scsi_disk_resize_cb(void *opaque)
2231 SCSIDiskState *s = opaque;
2233 /* SPC lists this sense code as available only for
2234 * direct-access devices.
2236 if (s->qdev.type == TYPE_DISK) {
2237 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2241 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2243 SCSIDiskState *s = opaque;
2246 * When a CD gets changed, we have to report an ejected state and
2247 * then a loaded state to guests so that they detect tray
2248 * open/close and media change events. Guests that do not use
2249 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2250 * states rely on this behavior.
2252 * media_changed governs the state machine used for unit attention
2253 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2255 s->media_changed = load;
2256 s->tray_open = !load;
2257 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2258 s->media_event = true;
2259 s->eject_request = false;
2262 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2264 SCSIDiskState *s = opaque;
2266 s->eject_request = true;
2267 if (force) {
2268 s->tray_locked = false;
2272 static bool scsi_cd_is_tray_open(void *opaque)
2274 return ((SCSIDiskState *)opaque)->tray_open;
2277 static bool scsi_cd_is_medium_locked(void *opaque)
2279 return ((SCSIDiskState *)opaque)->tray_locked;
2282 static const BlockDevOps scsi_disk_removable_block_ops = {
2283 .change_media_cb = scsi_cd_change_media_cb,
2284 .eject_request_cb = scsi_cd_eject_request_cb,
2285 .is_tray_open = scsi_cd_is_tray_open,
2286 .is_medium_locked = scsi_cd_is_medium_locked,
2288 .resize_cb = scsi_disk_resize_cb,
2291 static const BlockDevOps scsi_disk_block_ops = {
2292 .resize_cb = scsi_disk_resize_cb,
2295 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2297 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2298 if (s->media_changed) {
2299 s->media_changed = false;
2300 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2304 static void scsi_realize(SCSIDevice *dev, Error **errp)
2306 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2308 if (!s->qdev.conf.blk) {
2309 error_setg(errp, "drive property not set");
2310 return;
2313 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2314 !blk_is_inserted(s->qdev.conf.blk)) {
2315 error_setg(errp, "Device needs media, but drive is empty");
2316 return;
2319 blkconf_blocksizes(&s->qdev.conf);
2321 if (s->qdev.conf.logical_block_size >
2322 s->qdev.conf.physical_block_size) {
2323 error_setg(errp,
2324 "logical_block_size > physical_block_size not supported");
2325 return;
2328 if (dev->type == TYPE_DISK) {
2329 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2330 return;
2333 if (!blkconf_apply_backend_options(&dev->conf,
2334 blk_is_read_only(s->qdev.conf.blk),
2335 dev->type == TYPE_DISK, errp)) {
2336 return;
2339 if (s->qdev.conf.discard_granularity == -1) {
2340 s->qdev.conf.discard_granularity =
2341 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2344 if (!s->version) {
2345 s->version = g_strdup(qemu_hw_version());
2347 if (!s->vendor) {
2348 s->vendor = g_strdup("QEMU");
2350 if (!s->device_id) {
2351 if (s->serial) {
2352 s->device_id = g_strdup_printf("%.20s", s->serial);
2353 } else {
2354 const char *str = blk_name(s->qdev.conf.blk);
2355 if (str && *str) {
2356 s->device_id = g_strdup(str);
2361 if (blk_is_sg(s->qdev.conf.blk)) {
2362 error_setg(errp, "unwanted /dev/sg*");
2363 return;
2366 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2367 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2368 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2369 } else {
2370 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2372 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2374 blk_iostatus_enable(s->qdev.conf.blk);
2377 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2379 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2380 AioContext *ctx = NULL;
2381 /* can happen for devices without drive. The error message for missing
2382 * backend will be issued in scsi_realize
2384 if (s->qdev.conf.blk) {
2385 ctx = blk_get_aio_context(s->qdev.conf.blk);
2386 aio_context_acquire(ctx);
2387 blkconf_blocksizes(&s->qdev.conf);
2389 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2390 s->qdev.type = TYPE_DISK;
2391 if (!s->product) {
2392 s->product = g_strdup("QEMU HARDDISK");
2394 scsi_realize(&s->qdev, errp);
2395 if (ctx) {
2396 aio_context_release(ctx);
2400 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2402 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2403 AioContext *ctx;
2404 int ret;
2406 if (!dev->conf.blk) {
2407 /* Anonymous BlockBackend for an empty drive. As we put it into
2408 * dev->conf, qdev takes care of detaching on unplug. */
2409 dev->conf.blk = blk_new(0, BLK_PERM_ALL);
2410 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2411 assert(ret == 0);
2414 ctx = blk_get_aio_context(dev->conf.blk);
2415 aio_context_acquire(ctx);
2416 s->qdev.blocksize = 2048;
2417 s->qdev.type = TYPE_ROM;
2418 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2419 if (!s->product) {
2420 s->product = g_strdup("QEMU CD-ROM");
2422 scsi_realize(&s->qdev, errp);
2423 aio_context_release(ctx);
2426 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2428 DriveInfo *dinfo;
2429 Error *local_err = NULL;
2431 if (!dev->conf.blk) {
2432 scsi_realize(dev, &local_err);
2433 assert(local_err);
2434 error_propagate(errp, local_err);
2435 return;
2438 dinfo = blk_legacy_dinfo(dev->conf.blk);
2439 if (dinfo && dinfo->media_cd) {
2440 scsi_cd_realize(dev, errp);
2441 } else {
2442 scsi_hd_realize(dev, errp);
2446 static const SCSIReqOps scsi_disk_emulate_reqops = {
2447 .size = sizeof(SCSIDiskReq),
2448 .free_req = scsi_free_request,
2449 .send_command = scsi_disk_emulate_command,
2450 .read_data = scsi_disk_emulate_read_data,
2451 .write_data = scsi_disk_emulate_write_data,
2452 .get_buf = scsi_get_buf,
2455 static const SCSIReqOps scsi_disk_dma_reqops = {
2456 .size = sizeof(SCSIDiskReq),
2457 .free_req = scsi_free_request,
2458 .send_command = scsi_disk_dma_command,
2459 .read_data = scsi_read_data,
2460 .write_data = scsi_write_data,
2461 .get_buf = scsi_get_buf,
2462 .load_request = scsi_disk_load_request,
2463 .save_request = scsi_disk_save_request,
2466 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2467 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2468 [INQUIRY] = &scsi_disk_emulate_reqops,
2469 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2470 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2471 [START_STOP] = &scsi_disk_emulate_reqops,
2472 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2473 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2474 [READ_TOC] = &scsi_disk_emulate_reqops,
2475 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2476 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2477 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2478 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2479 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2480 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2481 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2482 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2483 [SEEK_10] = &scsi_disk_emulate_reqops,
2484 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2485 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2486 [UNMAP] = &scsi_disk_emulate_reqops,
2487 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2488 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2489 [VERIFY_10] = &scsi_disk_emulate_reqops,
2490 [VERIFY_12] = &scsi_disk_emulate_reqops,
2491 [VERIFY_16] = &scsi_disk_emulate_reqops,
2493 [READ_6] = &scsi_disk_dma_reqops,
2494 [READ_10] = &scsi_disk_dma_reqops,
2495 [READ_12] = &scsi_disk_dma_reqops,
2496 [READ_16] = &scsi_disk_dma_reqops,
2497 [WRITE_6] = &scsi_disk_dma_reqops,
2498 [WRITE_10] = &scsi_disk_dma_reqops,
2499 [WRITE_12] = &scsi_disk_dma_reqops,
2500 [WRITE_16] = &scsi_disk_dma_reqops,
2501 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2502 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2503 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2506 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2508 int i;
2509 int len = scsi_cdb_length(buf);
2510 char *line_buffer, *p;
2512 line_buffer = g_malloc(len * 5 + 1);
2514 for (i = 0, p = line_buffer; i < len; i++) {
2515 p += sprintf(p, " 0x%02x", buf[i]);
2517 trace_scsi_disk_new_request(lun, tag, line_buffer);
2519 g_free(line_buffer);
2522 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2523 uint8_t *buf, void *hba_private)
2525 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2526 SCSIRequest *req;
2527 const SCSIReqOps *ops;
2528 uint8_t command;
2530 command = buf[0];
2531 ops = scsi_disk_reqops_dispatch[command];
2532 if (!ops) {
2533 ops = &scsi_disk_emulate_reqops;
2535 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2537 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2538 scsi_disk_new_request_dump(lun, tag, buf);
2541 return req;
2544 #ifdef __linux__
2545 static int get_device_type(SCSIDiskState *s)
2547 uint8_t cmd[16];
2548 uint8_t buf[36];
2549 int ret;
2551 memset(cmd, 0, sizeof(cmd));
2552 memset(buf, 0, sizeof(buf));
2553 cmd[0] = INQUIRY;
2554 cmd[4] = sizeof(buf);
2556 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2557 buf, sizeof(buf));
2558 if (ret < 0) {
2559 return -1;
2561 s->qdev.type = buf[0];
2562 if (buf[1] & 0x80) {
2563 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2565 return 0;
2568 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2570 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2571 AioContext *ctx;
2572 int sg_version;
2573 int rc;
2575 if (!s->qdev.conf.blk) {
2576 error_setg(errp, "drive property not set");
2577 return;
2580 if (s->rotation_rate) {
2581 error_report_once("rotation_rate is specified for scsi-block but is "
2582 "not implemented. This option is deprecated and will "
2583 "be removed in a future version");
2586 ctx = blk_get_aio_context(s->qdev.conf.blk);
2587 aio_context_acquire(ctx);
2589 /* check we are using a driver managing SG_IO (version 3 and after) */
2590 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2591 if (rc < 0) {
2592 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2593 if (rc != -EPERM) {
2594 error_append_hint(errp, "Is this a SCSI device?\n");
2596 goto out;
2598 if (sg_version < 30000) {
2599 error_setg(errp, "scsi generic interface too old");
2600 goto out;
2603 /* get device type from INQUIRY data */
2604 rc = get_device_type(s);
2605 if (rc < 0) {
2606 error_setg(errp, "INQUIRY failed");
2607 goto out;
2610 /* Make a guess for the block size, we'll fix it when the guest sends.
2611 * READ CAPACITY. If they don't, they likely would assume these sizes
2612 * anyway. (TODO: check in /sys).
2614 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2615 s->qdev.blocksize = 2048;
2616 } else {
2617 s->qdev.blocksize = 512;
2620 /* Makes the scsi-block device not removable by using HMP and QMP eject
2621 * command.
2623 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2625 scsi_realize(&s->qdev, errp);
2626 scsi_generic_read_device_inquiry(&s->qdev);
2628 out:
2629 aio_context_release(ctx);
2632 typedef struct SCSIBlockReq {
2633 SCSIDiskReq req;
2634 sg_io_hdr_t io_header;
2636 /* Selected bytes of the original CDB, copied into our own CDB. */
2637 uint8_t cmd, cdb1, group_number;
2639 /* CDB passed to SG_IO. */
2640 uint8_t cdb[16];
2641 } SCSIBlockReq;
2643 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2644 int64_t offset, QEMUIOVector *iov,
2645 int direction,
2646 BlockCompletionFunc *cb, void *opaque)
2648 sg_io_hdr_t *io_header = &req->io_header;
2649 SCSIDiskReq *r = &req->req;
2650 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2651 int nb_logical_blocks;
2652 uint64_t lba;
2653 BlockAIOCB *aiocb;
2655 /* This is not supported yet. It can only happen if the guest does
2656 * reads and writes that are not aligned to one logical sectors
2657 * _and_ cover multiple MemoryRegions.
2659 assert(offset % s->qdev.blocksize == 0);
2660 assert(iov->size % s->qdev.blocksize == 0);
2662 io_header->interface_id = 'S';
2664 /* The data transfer comes from the QEMUIOVector. */
2665 io_header->dxfer_direction = direction;
2666 io_header->dxfer_len = iov->size;
2667 io_header->dxferp = (void *)iov->iov;
2668 io_header->iovec_count = iov->niov;
2669 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2671 /* Build a new CDB with the LBA and length patched in, in case
2672 * DMA helpers split the transfer in multiple segments. Do not
2673 * build a CDB smaller than what the guest wanted, and only build
2674 * a larger one if strictly necessary.
2676 io_header->cmdp = req->cdb;
2677 lba = offset / s->qdev.blocksize;
2678 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2680 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2681 /* 6-byte CDB */
2682 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2683 req->cdb[4] = nb_logical_blocks;
2684 req->cdb[5] = 0;
2685 io_header->cmd_len = 6;
2686 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2687 /* 10-byte CDB */
2688 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2689 req->cdb[1] = req->cdb1;
2690 stl_be_p(&req->cdb[2], lba);
2691 req->cdb[6] = req->group_number;
2692 stw_be_p(&req->cdb[7], nb_logical_blocks);
2693 req->cdb[9] = 0;
2694 io_header->cmd_len = 10;
2695 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2696 /* 12-byte CDB */
2697 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2698 req->cdb[1] = req->cdb1;
2699 stl_be_p(&req->cdb[2], lba);
2700 stl_be_p(&req->cdb[6], nb_logical_blocks);
2701 req->cdb[10] = req->group_number;
2702 req->cdb[11] = 0;
2703 io_header->cmd_len = 12;
2704 } else {
2705 /* 16-byte CDB */
2706 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2707 req->cdb[1] = req->cdb1;
2708 stq_be_p(&req->cdb[2], lba);
2709 stl_be_p(&req->cdb[10], nb_logical_blocks);
2710 req->cdb[14] = req->group_number;
2711 req->cdb[15] = 0;
2712 io_header->cmd_len = 16;
2715 /* The rest is as in scsi-generic.c. */
2716 io_header->mx_sb_len = sizeof(r->req.sense);
2717 io_header->sbp = r->req.sense;
2718 io_header->timeout = UINT_MAX;
2719 io_header->usr_ptr = r;
2720 io_header->flags |= SG_FLAG_DIRECT_IO;
2722 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2723 assert(aiocb != NULL);
2724 return aiocb;
2727 static bool scsi_block_no_fua(SCSICommand *cmd)
2729 return false;
2732 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2733 QEMUIOVector *iov,
2734 BlockCompletionFunc *cb, void *cb_opaque,
2735 void *opaque)
2737 SCSIBlockReq *r = opaque;
2738 return scsi_block_do_sgio(r, offset, iov,
2739 SG_DXFER_FROM_DEV, cb, cb_opaque);
2742 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2743 QEMUIOVector *iov,
2744 BlockCompletionFunc *cb, void *cb_opaque,
2745 void *opaque)
2747 SCSIBlockReq *r = opaque;
2748 return scsi_block_do_sgio(r, offset, iov,
2749 SG_DXFER_TO_DEV, cb, cb_opaque);
2752 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2754 switch (buf[0]) {
2755 case VERIFY_10:
2756 case VERIFY_12:
2757 case VERIFY_16:
2758 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2759 * for the number of logical blocks specified in the length
2760 * field). For other modes, do not use scatter/gather operation.
2762 if ((buf[1] & 6) == 2) {
2763 return false;
2765 break;
2767 case READ_6:
2768 case READ_10:
2769 case READ_12:
2770 case READ_16:
2771 case WRITE_6:
2772 case WRITE_10:
2773 case WRITE_12:
2774 case WRITE_16:
2775 case WRITE_VERIFY_10:
2776 case WRITE_VERIFY_12:
2777 case WRITE_VERIFY_16:
2778 /* MMC writing cannot be done via DMA helpers, because it sometimes
2779 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2780 * We might use scsi_block_dma_reqops as long as no writing commands are
2781 * seen, but performance usually isn't paramount on optical media. So,
2782 * just make scsi-block operate the same as scsi-generic for them.
2784 if (s->qdev.type != TYPE_ROM) {
2785 return false;
2787 break;
2789 default:
2790 break;
2793 return true;
2797 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2799 SCSIBlockReq *r = (SCSIBlockReq *)req;
2800 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2802 r->cmd = req->cmd.buf[0];
2803 switch (r->cmd >> 5) {
2804 case 0:
2805 /* 6-byte CDB. */
2806 r->cdb1 = r->group_number = 0;
2807 break;
2808 case 1:
2809 /* 10-byte CDB. */
2810 r->cdb1 = req->cmd.buf[1];
2811 r->group_number = req->cmd.buf[6];
2812 break;
2813 case 4:
2814 /* 12-byte CDB. */
2815 r->cdb1 = req->cmd.buf[1];
2816 r->group_number = req->cmd.buf[10];
2817 break;
2818 case 5:
2819 /* 16-byte CDB. */
2820 r->cdb1 = req->cmd.buf[1];
2821 r->group_number = req->cmd.buf[14];
2822 break;
2823 default:
2824 abort();
2827 /* Protection information is not supported. For SCSI versions 2 and
2828 * older (as determined by snooping the guest's INQUIRY commands),
2829 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2831 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2832 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2833 return 0;
2836 r->req.status = &r->io_header.status;
2837 return scsi_disk_dma_command(req, buf);
2840 static const SCSIReqOps scsi_block_dma_reqops = {
2841 .size = sizeof(SCSIBlockReq),
2842 .free_req = scsi_free_request,
2843 .send_command = scsi_block_dma_command,
2844 .read_data = scsi_read_data,
2845 .write_data = scsi_write_data,
2846 .get_buf = scsi_get_buf,
2847 .load_request = scsi_disk_load_request,
2848 .save_request = scsi_disk_save_request,
2851 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2852 uint32_t lun, uint8_t *buf,
2853 void *hba_private)
2855 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2857 if (scsi_block_is_passthrough(s, buf)) {
2858 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2859 hba_private);
2860 } else {
2861 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2862 hba_private);
2866 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2867 uint8_t *buf, void *hba_private)
2869 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2871 if (scsi_block_is_passthrough(s, buf)) {
2872 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2873 } else {
2874 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2878 #endif
2880 static
2881 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2882 BlockCompletionFunc *cb, void *cb_opaque,
2883 void *opaque)
2885 SCSIDiskReq *r = opaque;
2886 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2887 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2890 static
2891 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2892 BlockCompletionFunc *cb, void *cb_opaque,
2893 void *opaque)
2895 SCSIDiskReq *r = opaque;
2896 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2897 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2900 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2902 DeviceClass *dc = DEVICE_CLASS(klass);
2903 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2905 dc->fw_name = "disk";
2906 dc->reset = scsi_disk_reset;
2907 sdc->dma_readv = scsi_dma_readv;
2908 sdc->dma_writev = scsi_dma_writev;
2909 sdc->need_fua_emulation = scsi_is_cmd_fua;
2912 static const TypeInfo scsi_disk_base_info = {
2913 .name = TYPE_SCSI_DISK_BASE,
2914 .parent = TYPE_SCSI_DEVICE,
2915 .class_init = scsi_disk_base_class_initfn,
2916 .instance_size = sizeof(SCSIDiskState),
2917 .class_size = sizeof(SCSIDiskClass),
2918 .abstract = true,
2921 #define DEFINE_SCSI_DISK_PROPERTIES() \
2922 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
2923 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2924 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2925 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2926 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2927 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
2928 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
2931 static Property scsi_hd_properties[] = {
2932 DEFINE_SCSI_DISK_PROPERTIES(),
2933 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2934 SCSI_DISK_F_REMOVABLE, false),
2935 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2936 SCSI_DISK_F_DPOFUA, false),
2937 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2938 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2939 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2940 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2941 DEFAULT_MAX_UNMAP_SIZE),
2942 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2943 DEFAULT_MAX_IO_SIZE),
2944 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
2945 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
2947 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2948 DEFINE_PROP_END_OF_LIST(),
2951 static const VMStateDescription vmstate_scsi_disk_state = {
2952 .name = "scsi-disk",
2953 .version_id = 1,
2954 .minimum_version_id = 1,
2955 .fields = (VMStateField[]) {
2956 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2957 VMSTATE_BOOL(media_changed, SCSIDiskState),
2958 VMSTATE_BOOL(media_event, SCSIDiskState),
2959 VMSTATE_BOOL(eject_request, SCSIDiskState),
2960 VMSTATE_BOOL(tray_open, SCSIDiskState),
2961 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2962 VMSTATE_END_OF_LIST()
2966 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2968 DeviceClass *dc = DEVICE_CLASS(klass);
2969 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2971 sc->realize = scsi_hd_realize;
2972 sc->alloc_req = scsi_new_request;
2973 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2974 dc->desc = "virtual SCSI disk";
2975 dc->props = scsi_hd_properties;
2976 dc->vmsd = &vmstate_scsi_disk_state;
2979 static const TypeInfo scsi_hd_info = {
2980 .name = "scsi-hd",
2981 .parent = TYPE_SCSI_DISK_BASE,
2982 .class_init = scsi_hd_class_initfn,
2985 static Property scsi_cd_properties[] = {
2986 DEFINE_SCSI_DISK_PROPERTIES(),
2987 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2988 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2989 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2990 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2991 DEFAULT_MAX_IO_SIZE),
2992 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
2994 DEFINE_PROP_END_OF_LIST(),
2997 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
2999 DeviceClass *dc = DEVICE_CLASS(klass);
3000 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3002 sc->realize = scsi_cd_realize;
3003 sc->alloc_req = scsi_new_request;
3004 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3005 dc->desc = "virtual SCSI CD-ROM";
3006 dc->props = scsi_cd_properties;
3007 dc->vmsd = &vmstate_scsi_disk_state;
3010 static const TypeInfo scsi_cd_info = {
3011 .name = "scsi-cd",
3012 .parent = TYPE_SCSI_DISK_BASE,
3013 .class_init = scsi_cd_class_initfn,
3016 #ifdef __linux__
3017 static Property scsi_block_properties[] = {
3018 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3019 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3020 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3021 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3022 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3023 DEFAULT_MAX_UNMAP_SIZE),
3024 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3025 DEFAULT_MAX_IO_SIZE),
3026 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3027 -1),
3028 DEFINE_PROP_END_OF_LIST(),
3031 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3033 DeviceClass *dc = DEVICE_CLASS(klass);
3034 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3035 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3037 sc->realize = scsi_block_realize;
3038 sc->alloc_req = scsi_block_new_request;
3039 sc->parse_cdb = scsi_block_parse_cdb;
3040 sdc->dma_readv = scsi_block_dma_readv;
3041 sdc->dma_writev = scsi_block_dma_writev;
3042 sdc->need_fua_emulation = scsi_block_no_fua;
3043 dc->desc = "SCSI block device passthrough";
3044 dc->props = scsi_block_properties;
3045 dc->vmsd = &vmstate_scsi_disk_state;
3048 static const TypeInfo scsi_block_info = {
3049 .name = "scsi-block",
3050 .parent = TYPE_SCSI_DISK_BASE,
3051 .class_init = scsi_block_class_initfn,
3053 #endif
3055 static Property scsi_disk_properties[] = {
3056 DEFINE_SCSI_DISK_PROPERTIES(),
3057 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3058 SCSI_DISK_F_REMOVABLE, false),
3059 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3060 SCSI_DISK_F_DPOFUA, false),
3061 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3062 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3063 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3064 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3065 DEFAULT_MAX_UNMAP_SIZE),
3066 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3067 DEFAULT_MAX_IO_SIZE),
3068 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3070 DEFINE_PROP_END_OF_LIST(),
3073 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3075 DeviceClass *dc = DEVICE_CLASS(klass);
3076 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3078 sc->realize = scsi_disk_realize;
3079 sc->alloc_req = scsi_new_request;
3080 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3081 dc->fw_name = "disk";
3082 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3083 dc->reset = scsi_disk_reset;
3084 dc->props = scsi_disk_properties;
3085 dc->vmsd = &vmstate_scsi_disk_state;
3088 static const TypeInfo scsi_disk_info = {
3089 .name = "scsi-disk",
3090 .parent = TYPE_SCSI_DISK_BASE,
3091 .class_init = scsi_disk_class_initfn,
3094 static void scsi_disk_register_types(void)
3096 type_register_static(&scsi_disk_base_info);
3097 type_register_static(&scsi_hd_info);
3098 type_register_static(&scsi_cd_info);
3099 #ifdef __linux__
3100 type_register_static(&scsi_block_info);
3101 #endif
3102 type_register_static(&scsi_disk_info);
3105 type_init(scsi_disk_register_types)