Merge remote-tracking branch 'qemu-project/master'
[qemu/ar7.git] / hw / scsi / scsi-generic.c
blobee945f87e33a4012cc7fcbca91df4a463025c925
1 /*
2 * Generic SCSI Device support
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
10 * This code is licensed under the LGPL.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/qdev-properties-system.h"
23 #include "hw/scsi/emulation.h"
24 #include "sysemu/block-backend.h"
25 #include "trace.h"
27 #ifdef __linux__
29 #include <scsi/sg.h>
30 #include "scsi/constants.h"
32 #ifndef MAX_UINT
33 #define MAX_UINT ((unsigned int)-1)
34 #endif
36 typedef struct SCSIGenericReq {
37 SCSIRequest req;
38 uint8_t *buf;
39 int buflen;
40 int len;
41 sg_io_hdr_t io_header;
42 } SCSIGenericReq;
44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
48 qemu_put_sbe32s(f, &r->buflen);
49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
50 assert(!r->req.sg);
51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
59 qemu_get_sbe32s(f, &r->buflen);
60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
61 assert(!r->req.sg);
62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
66 static void scsi_free_request(SCSIRequest *req)
68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
70 g_free(r->buf);
73 /* Helper function for command completion. */
74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
76 int status;
77 SCSISense sense;
78 sg_io_hdr_t *io_hdr = &r->io_header;
80 assert(r->req.aiocb == NULL);
82 if (r->req.io_canceled) {
83 scsi_req_cancel_complete(&r->req);
84 goto done;
86 if (ret < 0) {
87 status = scsi_sense_from_errno(-ret, &sense);
88 if (status == CHECK_CONDITION) {
89 scsi_req_build_sense(&r->req, sense);
91 } else if (io_hdr->host_status != SCSI_HOST_OK) {
92 scsi_req_complete_failed(&r->req, io_hdr->host_status);
93 goto done;
94 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
95 status = BUSY;
96 } else {
97 status = io_hdr->status;
98 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
99 r->req.sense_len = io_hdr->sb_len_wr;
102 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
104 scsi_req_complete(&r->req, status);
105 done:
106 scsi_req_unref(&r->req);
109 static void scsi_command_complete(void *opaque, int ret)
111 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
113 assert(r->req.aiocb != NULL);
114 r->req.aiocb = NULL;
116 scsi_command_complete_noio(r, ret);
119 static int execute_command(BlockBackend *blk,
120 SCSIGenericReq *r, int direction,
121 BlockCompletionFunc *complete)
123 SCSIDevice *s = r->req.dev;
125 r->io_header.interface_id = 'S';
126 r->io_header.dxfer_direction = direction;
127 r->io_header.dxferp = r->buf;
128 r->io_header.dxfer_len = r->buflen;
129 r->io_header.cmdp = r->req.cmd.buf;
130 r->io_header.cmd_len = r->req.cmd.len;
131 r->io_header.mx_sb_len = sizeof(r->req.sense);
132 r->io_header.sbp = r->req.sense;
133 r->io_header.timeout = s->io_timeout * 1000;
134 r->io_header.usr_ptr = r;
135 r->io_header.flags |= SG_FLAG_DIRECT_IO;
137 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
138 r->io_header.timeout);
139 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
140 if (r->req.aiocb == NULL) {
141 return -EIO;
144 return 0;
147 static uint64_t calculate_max_transfer(SCSIDevice *s)
149 uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk);
150 uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);
152 assert(max_transfer);
153 max_transfer = MIN_NON_ZERO(max_transfer,
154 max_iov * qemu_real_host_page_size());
156 return max_transfer / s->blocksize;
159 static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
161 uint8_t page, page_idx;
164 * EVPD set to zero returns the standard INQUIRY data.
166 * Check if scsi_version is unset (-1) to avoid re-defining it
167 * each time an INQUIRY with standard data is received.
168 * scsi_version is initialized with -1 in scsi_generic_reset
169 * and scsi_disk_reset, making sure that we'll set the
170 * scsi_version after a reset. If the version field of the
171 * INQUIRY response somehow changes after a guest reboot,
172 * we'll be able to keep track of it.
174 * On SCSI-2 and older, first 3 bits of byte 2 is the
175 * ANSI-approved version, while on later versions the
176 * whole byte 2 contains the version. Check if we're dealing
177 * with a newer version and, in that case, assign the
178 * whole byte.
180 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
181 s->scsi_version = r->buf[2] & 0x07;
182 if (s->scsi_version > 2) {
183 s->scsi_version = r->buf[2];
187 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
188 (r->req.cmd.buf[1] & 0x01)) {
189 page = r->req.cmd.buf[2];
190 if (page == 0xb0 && r->buflen >= 8) {
191 uint8_t buf[16] = {};
192 uint8_t buf_used = MIN(r->buflen, 16);
193 uint64_t max_transfer = calculate_max_transfer(s);
195 memcpy(buf, r->buf, buf_used);
196 stl_be_p(&buf[8], max_transfer);
197 stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12])));
198 memcpy(r->buf + 8, buf + 8, buf_used - 8);
200 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
202 * Now we're capable of supplying the VPD Block Limits
203 * response if the hardware can't. Add it in the INQUIRY
204 * Supported VPD pages response in case we are using the
205 * emulation for this device.
207 * This way, the guest kernel will be aware of the support
208 * and will use it to proper setup the SCSI device.
210 * VPD page numbers must be sorted, so insert 0xb0 at the
211 * right place with an in-place insert. When the while loop
212 * begins the device response is at r[0] to r[page_idx - 1].
214 page_idx = lduw_be_p(r->buf + 2) + 4;
215 page_idx = MIN(page_idx, r->buflen);
216 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
217 if (page_idx < r->buflen) {
218 r->buf[page_idx] = r->buf[page_idx - 1];
220 page_idx--;
222 if (page_idx < r->buflen) {
223 r->buf[page_idx] = 0xb0;
225 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
227 if (len < r->buflen) {
228 len++;
232 return len;
235 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
237 int len;
238 uint8_t buf[64];
240 SCSIBlockLimits bl = {
241 .max_io_sectors = calculate_max_transfer(s),
244 memset(r->buf, 0, r->buflen);
245 stb_p(buf, s->type);
246 stb_p(buf + 1, 0xb0);
247 len = scsi_emulate_block_limits(buf + 4, &bl);
248 assert(len <= sizeof(buf) - 4);
249 stw_be_p(buf + 2, len);
251 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
253 r->io_header.sb_len_wr = 0;
256 * We have valid contents in the reply buffer but the
257 * io_header can report a sense error coming from
258 * the hardware in scsi_command_complete_noio. Clean
259 * up the io_header to avoid reporting it.
261 r->io_header.driver_status = 0;
262 r->io_header.status = 0;
264 return r->buflen;
267 static void scsi_read_complete(void * opaque, int ret)
269 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
270 SCSIDevice *s = r->req.dev;
271 int len;
273 assert(r->req.aiocb != NULL);
274 r->req.aiocb = NULL;
276 if (ret || r->req.io_canceled) {
277 scsi_command_complete_noio(r, ret);
278 return;
281 len = r->io_header.dxfer_len - r->io_header.resid;
282 trace_scsi_generic_read_complete(r->req.tag, len);
284 r->len = -1;
286 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
287 SCSISense sense =
288 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
291 * Check if this is a VPD Block Limits request that
292 * resulted in sense error but would need emulation.
293 * In this case, emulate a valid VPD response.
295 if (sense.key == ILLEGAL_REQUEST &&
296 s->needs_vpd_bl_emulation &&
297 r->req.cmd.buf[0] == INQUIRY &&
298 (r->req.cmd.buf[1] & 0x01) &&
299 r->req.cmd.buf[2] == 0xb0) {
300 len = scsi_generic_emulate_block_limits(r, s);
302 * It's okay to jup to req_complete: no need to
303 * let scsi_handle_inquiry_reply handle an
304 * INQUIRY VPD BL request we created manually.
307 if (sense.key) {
308 goto req_complete;
312 if (r->io_header.host_status != SCSI_HOST_OK ||
313 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
314 r->io_header.status != GOOD ||
315 len == 0) {
316 scsi_command_complete_noio(r, 0);
317 return;
320 /* Snoop READ CAPACITY output to set the blocksize. */
321 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
322 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
323 s->blocksize = ldl_be_p(&r->buf[4]);
324 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
325 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
326 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
327 s->blocksize = ldl_be_p(&r->buf[8]);
328 s->max_lba = ldq_be_p(&r->buf[0]);
332 * Patch MODE SENSE device specific parameters if the BDS is opened
333 * readonly.
335 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
336 !blk_is_writable(s->conf.blk) &&
337 (r->req.cmd.buf[0] == MODE_SENSE ||
338 r->req.cmd.buf[0] == MODE_SENSE_10) &&
339 (r->req.cmd.buf[1] & 0x8) == 0) {
340 if (r->req.cmd.buf[0] == MODE_SENSE) {
341 r->buf[2] |= 0x80;
342 } else {
343 r->buf[3] |= 0x80;
346 if (r->req.cmd.buf[0] == INQUIRY) {
347 len = scsi_handle_inquiry_reply(r, s, len);
350 req_complete:
351 scsi_req_data(&r->req, len);
352 scsi_req_unref(&r->req);
355 /* Read more data from scsi device into buffer. */
356 static void scsi_read_data(SCSIRequest *req)
358 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
359 SCSIDevice *s = r->req.dev;
360 int ret;
362 trace_scsi_generic_read_data(req->tag);
364 /* The request is used as the AIO opaque value, so add a ref. */
365 scsi_req_ref(&r->req);
366 if (r->len == -1) {
367 scsi_command_complete_noio(r, 0);
368 return;
371 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
372 scsi_read_complete);
373 if (ret < 0) {
374 scsi_command_complete_noio(r, ret);
378 static void scsi_write_complete(void * opaque, int ret)
380 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
381 SCSIDevice *s = r->req.dev;
383 trace_scsi_generic_write_complete(ret);
385 assert(r->req.aiocb != NULL);
386 r->req.aiocb = NULL;
388 if (ret || r->req.io_canceled) {
389 scsi_command_complete_noio(r, ret);
390 return;
393 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
394 s->type == TYPE_TAPE) {
395 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
396 trace_scsi_generic_write_complete_blocksize(s->blocksize);
399 scsi_command_complete_noio(r, ret);
402 /* Write data to a scsi device. Returns nonzero on failure.
403 The transfer may complete asynchronously. */
404 static void scsi_write_data(SCSIRequest *req)
406 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
407 SCSIDevice *s = r->req.dev;
408 int ret;
410 trace_scsi_generic_write_data(req->tag);
411 if (r->len == 0) {
412 r->len = r->buflen;
413 scsi_req_data(&r->req, r->len);
414 return;
417 /* The request is used as the AIO opaque value, so add a ref. */
418 scsi_req_ref(&r->req);
419 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
420 if (ret < 0) {
421 scsi_command_complete_noio(r, ret);
425 /* Return a pointer to the data buffer. */
426 static uint8_t *scsi_get_buf(SCSIRequest *req)
428 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
430 return r->buf;
433 static void scsi_generic_command_dump(uint8_t *cmd, int len)
435 int i;
436 char *line_buffer, *p;
438 line_buffer = g_malloc(len * 5 + 1);
440 for (i = 0, p = line_buffer; i < len; i++) {
441 p += sprintf(p, " 0x%02x", cmd[i]);
443 trace_scsi_generic_send_command(line_buffer);
445 g_free(line_buffer);
448 /* Execute a scsi command. Returns the length of the data expected by the
449 command. This will be Positive for data transfers from the device
450 (eg. disk reads), negative for transfers to the device (eg. disk writes),
451 and zero if the command does not transfer any data. */
453 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
455 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
456 SCSIDevice *s = r->req.dev;
457 int ret;
459 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
460 scsi_generic_command_dump(cmd, r->req.cmd.len);
463 if (r->req.cmd.xfer == 0) {
464 g_free(r->buf);
465 r->buflen = 0;
466 r->buf = NULL;
467 /* The request is used as the AIO opaque value, so add a ref. */
468 scsi_req_ref(&r->req);
469 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
470 scsi_command_complete);
471 if (ret < 0) {
472 scsi_command_complete_noio(r, ret);
473 return 0;
475 return 0;
478 if (r->buflen != r->req.cmd.xfer) {
479 g_free(r->buf);
480 r->buf = g_malloc(r->req.cmd.xfer);
481 r->buflen = r->req.cmd.xfer;
484 memset(r->buf, 0, r->buflen);
485 r->len = r->req.cmd.xfer;
486 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
487 r->len = 0;
488 return -r->req.cmd.xfer;
489 } else {
490 return r->req.cmd.xfer;
494 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
496 int i;
498 if ((p[1] & 0xF) == 3) {
499 /* NAA designator type */
500 if (p[3] != 8) {
501 return -EINVAL;
503 *p_wwn = ldq_be_p(p + 4);
504 return 0;
507 if ((p[1] & 0xF) == 8) {
508 /* SCSI name string designator type */
509 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
510 return -EINVAL;
512 if (p[3] > 20 && p[24] != ',') {
513 return -EINVAL;
515 *p_wwn = 0;
516 for (i = 8; i < 24; i++) {
517 char c = qemu_toupper(p[i]);
518 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
519 *p_wwn = (*p_wwn << 4) | c;
521 return 0;
524 return -EINVAL;
527 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
528 uint8_t *buf, uint8_t buf_size, uint32_t timeout)
530 sg_io_hdr_t io_header;
531 uint8_t sensebuf[8];
532 int ret;
534 memset(&io_header, 0, sizeof(io_header));
535 io_header.interface_id = 'S';
536 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
537 io_header.dxfer_len = buf_size;
538 io_header.dxferp = buf;
539 io_header.cmdp = cmd;
540 io_header.cmd_len = cmd_size;
541 io_header.mx_sb_len = sizeof(sensebuf);
542 io_header.sbp = sensebuf;
543 io_header.timeout = timeout * 1000;
545 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
546 ret = blk_ioctl(blk, SG_IO, &io_header);
547 if (ret < 0 || io_header.status ||
548 io_header.driver_status || io_header.host_status) {
549 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
550 io_header.host_status);
551 return -1;
553 return 0;
557 * Executes an INQUIRY request with EVPD set to retrieve the
558 * available VPD pages of the device. If the device does
559 * not support the Block Limits page (page 0xb0), set
560 * the needs_vpd_bl_emulation flag for future use.
562 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
564 uint8_t cmd[6];
565 uint8_t buf[250];
566 uint8_t page_len;
567 int ret, i;
569 memset(cmd, 0, sizeof(cmd));
570 memset(buf, 0, sizeof(buf));
571 cmd[0] = INQUIRY;
572 cmd[1] = 1;
573 cmd[2] = 0x00;
574 cmd[4] = sizeof(buf);
576 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
577 buf, sizeof(buf), s->io_timeout);
578 if (ret < 0) {
580 * Do not assume anything if we can't retrieve the
581 * INQUIRY response to assert the VPD Block Limits
582 * support.
584 s->needs_vpd_bl_emulation = false;
585 return;
588 page_len = buf[3];
589 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
590 if (buf[i] == 0xb0) {
591 s->needs_vpd_bl_emulation = false;
592 return;
595 s->needs_vpd_bl_emulation = true;
598 static void scsi_generic_read_device_identification(SCSIDevice *s)
600 uint8_t cmd[6];
601 uint8_t buf[250];
602 int ret;
603 int i, len;
605 memset(cmd, 0, sizeof(cmd));
606 memset(buf, 0, sizeof(buf));
607 cmd[0] = INQUIRY;
608 cmd[1] = 1;
609 cmd[2] = 0x83;
610 cmd[4] = sizeof(buf);
612 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
613 buf, sizeof(buf), s->io_timeout);
614 if (ret < 0) {
615 return;
618 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
619 for (i = 0; i + 3 <= len; ) {
620 const uint8_t *p = &buf[i + 4];
621 uint64_t wwn;
623 if (i + (p[3] + 4) > len) {
624 break;
627 if ((p[1] & 0x10) == 0) {
628 /* Associated with the logical unit */
629 if (read_naa_id(p, &wwn) == 0) {
630 s->wwn = wwn;
632 } else if ((p[1] & 0x10) == 0x10) {
633 /* Associated with the target port */
634 if (read_naa_id(p, &wwn) == 0) {
635 s->port_wwn = wwn;
639 i += p[3] + 4;
643 void scsi_generic_read_device_inquiry(SCSIDevice *s)
645 scsi_generic_read_device_identification(s);
646 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
647 scsi_generic_set_vpd_bl_emulation(s);
648 } else {
649 s->needs_vpd_bl_emulation = false;
653 static int get_stream_blocksize(BlockBackend *blk)
655 uint8_t cmd[6];
656 uint8_t buf[12];
657 int ret;
659 memset(cmd, 0, sizeof(cmd));
660 memset(buf, 0, sizeof(buf));
661 cmd[0] = MODE_SENSE;
662 cmd[4] = sizeof(buf);
664 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
665 if (ret < 0) {
666 return -1;
669 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
672 static void scsi_generic_reset(DeviceState *dev)
674 SCSIDevice *s = SCSI_DEVICE(dev);
676 s->scsi_version = s->default_scsi_version;
677 scsi_device_purge_requests(s, SENSE_CODE(RESET));
680 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
682 int rc;
683 int sg_version;
684 struct sg_scsi_id scsiid;
686 if (!s->conf.blk) {
687 error_setg(errp, "drive property not set");
688 return;
691 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
692 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
693 error_setg(errp, "Device doesn't support drive option werror");
694 return;
696 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
697 error_setg(errp, "Device doesn't support drive option rerror");
698 return;
701 /* check we are using a driver managing SG_IO (version 3 and after */
702 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
703 if (rc < 0) {
704 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
705 if (rc != -EPERM) {
706 error_append_hint(errp, "Is this a SCSI device?\n");
708 return;
710 if (sg_version < 30000) {
711 error_setg(errp, "scsi generic interface too old");
712 return;
715 /* get LUN of the /dev/sg? */
716 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
717 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
718 return;
720 if (!blkconf_apply_backend_options(&s->conf,
721 !blk_supports_write_perm(s->conf.blk),
722 true, errp)) {
723 return;
726 /* define device state */
727 s->type = scsiid.scsi_type;
728 trace_scsi_generic_realize_type(s->type);
730 switch (s->type) {
731 case TYPE_TAPE:
732 s->blocksize = get_stream_blocksize(s->conf.blk);
733 if (s->blocksize == -1) {
734 s->blocksize = 0;
736 break;
738 /* Make a guess for block devices, we'll fix it when the guest sends.
739 * READ CAPACITY. If they don't, they likely would assume these sizes
740 * anyway. (TODO: they could also send MODE SENSE).
742 case TYPE_ROM:
743 case TYPE_WORM:
744 s->blocksize = 2048;
745 break;
746 default:
747 s->blocksize = 512;
748 break;
751 trace_scsi_generic_realize_blocksize(s->blocksize);
753 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
754 s->default_scsi_version = -1;
755 scsi_generic_read_device_inquiry(s);
758 const SCSIReqOps scsi_generic_req_ops = {
759 .size = sizeof(SCSIGenericReq),
760 .free_req = scsi_free_request,
761 .send_command = scsi_send_command,
762 .read_data = scsi_read_data,
763 .write_data = scsi_write_data,
764 .get_buf = scsi_get_buf,
765 .load_request = scsi_generic_load_request,
766 .save_request = scsi_generic_save_request,
769 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
770 uint8_t *buf, void *hba_private)
772 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
775 static Property scsi_generic_properties[] = {
776 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
777 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
778 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
779 DEFAULT_IO_TIMEOUT),
780 DEFINE_PROP_END_OF_LIST(),
783 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
784 uint8_t *buf, size_t buf_len,
785 void *hba_private)
787 return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private);
790 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
792 DeviceClass *dc = DEVICE_CLASS(klass);
793 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
795 sc->realize = scsi_generic_realize;
796 sc->alloc_req = scsi_new_request;
797 sc->parse_cdb = scsi_generic_parse_cdb;
798 dc->fw_name = "disk";
799 dc->desc = "pass through generic scsi device (/dev/sg*)";
800 dc->reset = scsi_generic_reset;
801 device_class_set_props(dc, scsi_generic_properties);
802 dc->vmsd = &vmstate_scsi_device;
805 static const TypeInfo scsi_generic_info = {
806 .name = "scsi-generic",
807 .parent = TYPE_SCSI_DEVICE,
808 .instance_size = sizeof(SCSIDevice),
809 .class_init = scsi_generic_class_initfn,
812 static void scsi_generic_register_types(void)
814 type_register_static(&scsi_generic_info);
817 type_init(scsi_generic_register_types)
819 #endif /* __linux__ */